repo stringlengths 6 65 | file_url stringlengths 81 311 | file_path stringlengths 6 227 | content stringlengths 0 32.8k | language stringclasses 1
value | license stringclasses 7
values | commit_sha stringlengths 40 40 | retrieved_at stringdate 2026-01-04 15:31:58 2026-01-04 20:25:31 | truncated bool 2
classes |
|---|---|---|---|---|---|---|---|---|
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/ruff/rules/mod.rs | crates/ruff_linter/src/rules/ruff/rules/mod.rs | pub(crate) use access_annotations_from_class_dict::*;
pub(crate) use ambiguous_unicode_character::*;
pub(crate) use assert_with_print_message::*;
pub(crate) use assignment_in_assert::*;
pub(crate) use asyncio_dangling_task::*;
pub(crate) use class_with_mixed_type_vars::*;
pub(crate) use collection_literal_concatenation::*;
pub(crate) use dataclass_enum::*;
pub(crate) use decimal_from_float_literal::*;
pub(crate) use default_factory_kwarg::*;
pub(crate) use explicit_f_string_type_conversion::*;
pub(crate) use falsy_dict_get_fallback::*;
pub(crate) use function_call_in_dataclass_default::*;
pub(crate) use if_key_in_dict_del::*;
pub(crate) use implicit_classvar_in_dataclass::*;
pub(crate) use implicit_optional::*;
pub(crate) use in_empty_collection::*;
pub(crate) use incorrectly_parenthesized_tuple_in_subscript::*;
pub(crate) use indented_form_feed::*;
pub(crate) use invalid_assert_message_literal_argument::*;
pub(crate) use invalid_formatter_suppression_comment::*;
pub(crate) use invalid_index_type::*;
pub(crate) use invalid_pyproject_toml::*;
pub(crate) use invalid_rule_code::*;
pub(crate) use invalid_suppression_comment::*;
pub(crate) use legacy_form_pytest_raises::*;
pub(crate) use logging_eager_conversion::*;
pub(crate) use map_int_version_parsing::*;
pub(crate) use missing_fstring_syntax::*;
pub(crate) use mutable_class_default::*;
pub(crate) use mutable_dataclass_default::*;
pub(crate) use mutable_fromkeys_value::*;
pub(crate) use needless_else::*;
pub(crate) use never_union::*;
pub(crate) use non_empty_init_module::*;
pub(crate) use non_octal_permissions::*;
pub(crate) use none_not_at_end_of_union::*;
pub(crate) use parenthesize_chained_operators::*;
pub(crate) use post_init_default::*;
pub(crate) use property_without_return::*;
pub(crate) use pytest_raises_ambiguous_pattern::*;
pub(crate) use quadratic_list_summation::*;
pub(crate) use redirected_noqa::*;
pub(crate) use redundant_bool_literal::*;
pub(crate) use sort_dunder_all::*;
pub(crate) use sort_dunder_slots::*;
pub(crate) use starmap_zip::*;
pub(crate) use static_key_dict_comprehension::*;
#[cfg(any(feature = "test-rules", test))]
pub(crate) use test_rules::*;
pub(crate) use unmatched_suppression_comment::*;
pub(crate) use unnecessary_cast_to_int::*;
pub(crate) use unnecessary_iterable_allocation_for_first_element::*;
pub(crate) use unnecessary_key_check::*;
pub(crate) use unnecessary_literal_within_deque_call::*;
pub(crate) use unnecessary_nested_literal::*;
pub(crate) use unnecessary_regular_expression::*;
pub(crate) use unnecessary_round::*;
pub(crate) use unraw_re_pattern::*;
pub(crate) use unsafe_markup_use::*;
pub(crate) use unused_async::*;
pub(crate) use unused_noqa::*;
pub(crate) use unused_unpacked_variable::*;
pub(crate) use used_dummy_variable::*;
pub(crate) use useless_if_else::*;
pub(crate) use zip_instead_of_pairwise::*;
mod access_annotations_from_class_dict;
mod ambiguous_unicode_character;
mod assert_with_print_message;
mod assignment_in_assert;
mod asyncio_dangling_task;
mod class_with_mixed_type_vars;
mod collection_literal_concatenation;
mod confusables;
mod dataclass_enum;
mod decimal_from_float_literal;
mod default_factory_kwarg;
mod explicit_f_string_type_conversion;
mod falsy_dict_get_fallback;
mod function_call_in_dataclass_default;
mod if_key_in_dict_del;
mod implicit_classvar_in_dataclass;
mod implicit_optional;
mod in_empty_collection;
mod incorrectly_parenthesized_tuple_in_subscript;
mod indented_form_feed;
mod invalid_assert_message_literal_argument;
mod invalid_formatter_suppression_comment;
mod invalid_index_type;
mod invalid_pyproject_toml;
mod invalid_rule_code;
mod invalid_suppression_comment;
mod legacy_form_pytest_raises;
mod logging_eager_conversion;
mod map_int_version_parsing;
mod missing_fstring_syntax;
mod mutable_class_default;
mod mutable_dataclass_default;
mod mutable_fromkeys_value;
mod needless_else;
mod never_union;
mod non_empty_init_module;
mod non_octal_permissions;
mod none_not_at_end_of_union;
mod parenthesize_chained_operators;
mod post_init_default;
mod property_without_return;
mod pytest_raises_ambiguous_pattern;
mod quadratic_list_summation;
mod redirected_noqa;
mod redundant_bool_literal;
mod sequence_sorting;
mod sort_dunder_all;
mod sort_dunder_slots;
mod starmap_zip;
mod static_key_dict_comprehension;
mod suppression_comment_visitor;
#[cfg(any(feature = "test-rules", test))]
pub(crate) mod test_rules;
mod unmatched_suppression_comment;
mod unnecessary_cast_to_int;
mod unnecessary_iterable_allocation_for_first_element;
mod unnecessary_key_check;
mod unnecessary_literal_within_deque_call;
mod unnecessary_nested_literal;
mod unnecessary_regular_expression;
mod unnecessary_round;
mod unraw_re_pattern;
mod unsafe_markup_use;
mod unused_async;
mod unused_noqa;
mod unused_unpacked_variable;
mod used_dummy_variable;
mod useless_if_else;
mod zip_instead_of_pairwise;
#[derive(Clone, Copy)]
pub(crate) enum Context {
String,
Docstring,
Comment,
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/ruff/rules/asyncio_dangling_task.rs | crates/ruff_linter/src/rules/ruff/rules/asyncio_dangling_task.rs | use std::fmt;
use ast::Stmt;
use ruff_macros::{ViolationMetadata, derive_message_formats};
use ruff_python_ast::{self as ast, Expr};
use ruff_python_semantic::{Scope, SemanticModel, analyze::typing};
use ruff_text_size::Ranged;
use crate::Violation;
use crate::checkers::ast::Checker;
/// ## What it does
/// Checks for `asyncio.create_task` and `asyncio.ensure_future` calls
/// that do not store a reference to the returned result.
///
/// ## Why is this bad?
/// Per the `asyncio` documentation, the event loop only retains a weak
/// reference to tasks. If the task returned by `asyncio.create_task` and
/// `asyncio.ensure_future` is not stored in a variable, or a collection,
/// or otherwise referenced, it may be garbage collected at any time. This
/// can lead to unexpected and inconsistent behavior, as your tasks may or
/// may not run to completion.
///
/// ## Example
/// ```python
/// import asyncio
///
/// for i in range(10):
/// # This creates a weak reference to the task, which may be garbage
/// # collected at any time.
/// asyncio.create_task(some_coro(param=i))
/// ```
///
/// Use instead:
/// ```python
/// import asyncio
///
/// background_tasks = set()
///
/// for i in range(10):
/// task = asyncio.create_task(some_coro(param=i))
///
/// # Add task to the set. This creates a strong reference.
/// background_tasks.add(task)
///
/// # To prevent keeping references to finished tasks forever,
/// # make each task remove its own reference from the set after
/// # completion:
/// task.add_done_callback(background_tasks.discard)
/// ```
///
/// ## References
/// - [_The Heisenbug lurking in your async code_](https://textual.textualize.io/blog/2023/02/11/the-heisenbug-lurking-in-your-async-code/)
/// - [The Python Standard Library](https://docs.python.org/3/library/asyncio-task.html#asyncio.create_task)
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "v0.0.247")]
pub(crate) struct AsyncioDanglingTask {
expr: String,
method: Method,
}
impl Violation for AsyncioDanglingTask {
#[derive_message_formats]
fn message(&self) -> String {
let AsyncioDanglingTask { expr, method } = self;
format!("Store a reference to the return value of `{expr}.{method}`")
}
}
/// RUF006
pub(crate) fn asyncio_dangling_task(checker: &Checker, expr: &Expr, semantic: &SemanticModel) {
let Expr::Call(ast::ExprCall { func, .. }) = expr else {
return;
};
// Ex) `asyncio.create_task(...)`
if let Some(method) = semantic
.resolve_qualified_name(func)
.and_then(|qualified_name| match qualified_name.segments() {
["asyncio", "create_task"] => Some(Method::CreateTask),
["asyncio", "ensure_future"] => Some(Method::EnsureFuture),
_ => None,
})
{
checker.report_diagnostic(
AsyncioDanglingTask {
expr: "asyncio".to_string(),
method,
},
expr.range(),
);
} else
// Ex) `loop = ...; loop.create_task(...)`
if let Expr::Attribute(ast::ExprAttribute { attr, value, .. }) = func.as_ref() {
if attr == "create_task" {
if let Expr::Name(name) = value.as_ref() {
if typing::resolve_assignment(value, semantic).is_some_and(|qualified_name| {
matches!(
qualified_name.segments(),
[
"asyncio",
"get_event_loop" | "get_running_loop" | "new_event_loop"
]
)
}) {
checker.report_diagnostic(
AsyncioDanglingTask {
expr: name.id.to_string(),
method: Method::CreateTask,
},
expr.range(),
);
}
}
}
}
}
/// RUF006
pub(crate) fn asyncio_dangling_binding(scope: &Scope, checker: &Checker) {
let semantic = checker.semantic();
for binding_id in scope.binding_ids() {
// If the binding itself is used, or it's not an assignment, skip it.
let binding = semantic.binding(binding_id);
if binding.is_used()
|| binding.is_global()
|| binding.is_nonlocal()
|| !binding.kind.is_assignment()
{
continue;
}
// Otherwise, flag any dangling tasks, including those that are shadowed, as in:
// ```python
// if x > 0:
// task = asyncio.create_task(make_request())
// else:
// task = asyncio.create_task(make_request())
// ```
for binding_id in std::iter::successors(Some(binding_id), |id| scope.shadowed_binding(*id))
{
let binding = semantic.binding(binding_id);
if binding.is_used()
|| binding.is_global()
|| binding.is_nonlocal()
|| !binding.kind.is_assignment()
{
continue;
}
let Some(source) = binding.source else {
continue;
};
match semantic.statement(source) {
Stmt::Assign(ast::StmtAssign { value, targets, .. }) if targets.len() == 1 => {
asyncio_dangling_task(checker, value, semantic);
}
Stmt::AnnAssign(ast::StmtAnnAssign {
value: Some(value), ..
}) => asyncio_dangling_task(checker, value, semantic),
_ => {}
}
}
}
}
#[derive(Debug, PartialEq, Eq, Copy, Clone)]
enum Method {
CreateTask,
EnsureFuture,
}
impl fmt::Display for Method {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
match self {
Method::CreateTask => fmt.write_str("create_task"),
Method::EnsureFuture => fmt.write_str("ensure_future"),
}
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/ruff/rules/invalid_formatter_suppression_comment.rs | crates/ruff_linter/src/rules/ruff/rules/invalid_formatter_suppression_comment.rs | use std::fmt::Display;
use smallvec::SmallVec;
use ast::{StmtClassDef, StmtFunctionDef};
use ruff_macros::{ViolationMetadata, derive_message_formats};
use ruff_python_ast::{self as ast, AnyNodeRef, helpers::comment_indentation_after};
use ruff_python_trivia::{SuppressionKind, indentation_at_offset};
use ruff_text_size::{Ranged, TextLen, TextRange};
use crate::Locator;
use crate::checkers::ast::Checker;
use crate::fix::edits::delete_comment;
use crate::{AlwaysFixableViolation, Fix};
use super::suppression_comment_visitor::{
CaptureSuppressionComment, SuppressionComment, SuppressionCommentData,
SuppressionCommentVisitor,
};
/// ## What it does
/// Checks for formatter suppression comments that are ineffective or incompatible
/// with Ruff's formatter.
///
/// ## Why is this bad?
/// Suppression comments that do not actually prevent formatting could cause unintended changes
/// when the formatter is run.
///
/// ## Example
/// In the following example, all suppression comments would cause
/// a rule violation.
///
/// ```python
/// def decorator():
/// pass
///
///
/// @decorator
/// # fmt: off
/// def example():
/// if True:
/// # fmt: skip
/// expression = [
/// # fmt: off
/// 1,
/// 2,
/// ]
/// # yapf: disable
/// # fmt: on
/// # yapf: enable
/// ```
///
/// ## Fix safety
///
/// This fix is always marked as unsafe because it deletes the invalid suppression comment,
/// rather than trying to move it to a valid position, which the user more likely intended.
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "0.12.0")]
pub(crate) struct InvalidFormatterSuppressionComment {
reason: IgnoredReason,
}
impl AlwaysFixableViolation for InvalidFormatterSuppressionComment {
#[derive_message_formats]
fn message(&self) -> String {
format!(
"This suppression comment is invalid because {}",
self.reason
)
}
fn fix_title(&self) -> String {
"Remove this comment".to_string()
}
}
/// RUF028
pub(crate) fn ignored_formatter_suppression_comment(checker: &Checker, suite: &ast::Suite) {
let locator = checker.locator();
let comment_ranges: SmallVec<[SuppressionComment; 8]> = checker
.comment_ranges()
.into_iter()
.filter_map(|range| {
Some(SuppressionComment {
range,
kind: SuppressionKind::from_comment(locator.slice(range))?,
})
})
.collect();
if comment_ranges.is_empty() {
return;
}
let mut comments = UselessSuppressionComments::new(locator);
let visitor = SuppressionCommentVisitor::new(
comment_ranges.into_iter(),
&mut comments,
checker.locator(),
);
visitor.visit(suite);
comments.sort();
for (range, reason) in comments.ignored_comments() {
checker
.report_diagnostic(InvalidFormatterSuppressionComment { reason }, range)
.set_fix(Fix::unsafe_edit(delete_comment(range, checker.locator())));
}
}
struct UselessSuppressionComments<'src, 'loc> {
captured: Vec<(TextRange, IgnoredReason)>,
locator: &'loc Locator<'src>,
}
impl<'src, 'loc> UselessSuppressionComments<'src, 'loc> {
fn new(locator: &'loc Locator<'src>) -> Self {
Self {
captured: vec![],
locator,
}
}
/// This function determines whether or not `comment` is a useful suppression comment.
/// If it isn't, it will give a reason why the comment is ignored. See [`IgnoredReason`] for more.
fn check_suppression_comment(
&self,
comment: &SuppressionCommentData,
) -> Result<(), IgnoredReason> {
// check if the comment is inside of an expression.
if comment
.enclosing
.is_some_and(|n| !is_valid_enclosing_node(n))
{
return Err(IgnoredReason::InNonStatement);
}
// check if a skip comment is at the end of a line
if comment.kind == SuppressionKind::Skip && !comment.line_position.is_end_of_line() {
return Err(IgnoredReason::SkipHasToBeTrailing);
}
if comment.kind == SuppressionKind::Off || comment.kind == SuppressionKind::On {
if let Some(
AnyNodeRef::StmtClassDef(StmtClassDef {
name,
decorator_list,
..
})
| AnyNodeRef::StmtFunctionDef(StmtFunctionDef {
name,
decorator_list,
..
}),
) = comment.enclosing
{
if comment.line_position.is_own_line() && comment.range.start() < name.start() {
if let Some(decorator) = decorator_list.first() {
if decorator.end() < comment.range.start() {
return Err(IgnoredReason::AfterDecorator);
}
}
}
}
}
if comment.kind == SuppressionKind::Off && comment.line_position.is_own_line() {
if let (Some(enclosing), Some(preceding), Some(following)) =
(comment.enclosing, comment.preceding, comment.following)
{
if following.is_first_statement_in_alternate_body(enclosing) {
// check indentation
let comment_indentation = comment_indentation_after(
preceding,
comment.range,
self.locator.contents(),
);
let preceding_indentation =
indentation_at_offset(preceding.start(), self.locator.contents())
.unwrap_or_default()
.text_len();
if comment_indentation != preceding_indentation {
return Err(IgnoredReason::FmtOffAboveBlock);
}
}
}
}
if comment.kind == SuppressionKind::On {
// Ensure the comment is not a trailing comment
if !comment.line_position.is_own_line() {
return Err(IgnoredReason::FmtOnCannotBeTrailing);
}
}
Ok(())
}
fn sort(&mut self) {
self.captured.sort_by_key(|(t, _)| t.start());
}
fn ignored_comments(&self) -> impl Iterator<Item = (TextRange, IgnoredReason)> + '_ {
self.captured.iter().map(|(r, i)| (*r, *i))
}
}
impl<'src> CaptureSuppressionComment<'src> for UselessSuppressionComments<'src, '_> {
fn capture(&mut self, comment: SuppressionCommentData<'src>) {
match self.check_suppression_comment(&comment) {
Ok(()) => {}
Err(reason) => {
self.captured.push((comment.range, reason));
}
}
}
}
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
enum IgnoredReason {
InNonStatement,
AfterDecorator,
SkipHasToBeTrailing,
FmtOnCannotBeTrailing,
FmtOffAboveBlock,
}
impl Display for IgnoredReason {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
Self::InNonStatement => write!(
f,
"it cannot be in an expression, pattern, argument list, or other non-statement"
),
Self::AfterDecorator => {
write!(f, "it cannot be after a decorator")
}
Self::SkipHasToBeTrailing => {
write!(f, "it cannot be on its own line")
}
Self::FmtOnCannotBeTrailing => {
write!(f, "it cannot be at the end of a line")
}
Self::FmtOffAboveBlock => {
write!(f, "it cannot be directly above an alternate body")
}
}
}
}
/// Checks if an enclosing node is allowed to enclose a suppression comment.
const fn is_valid_enclosing_node(node: AnyNodeRef) -> bool {
match node {
AnyNodeRef::ModModule(_)
| AnyNodeRef::ModExpression(_)
| AnyNodeRef::StmtFunctionDef(_)
| AnyNodeRef::StmtClassDef(_)
| AnyNodeRef::StmtReturn(_)
| AnyNodeRef::StmtDelete(_)
| AnyNodeRef::StmtTypeAlias(_)
| AnyNodeRef::StmtAssign(_)
| AnyNodeRef::StmtAugAssign(_)
| AnyNodeRef::StmtAnnAssign(_)
| AnyNodeRef::StmtFor(_)
| AnyNodeRef::StmtWhile(_)
| AnyNodeRef::StmtIf(_)
| AnyNodeRef::StmtWith(_)
| AnyNodeRef::StmtMatch(_)
| AnyNodeRef::StmtRaise(_)
| AnyNodeRef::StmtTry(_)
| AnyNodeRef::StmtAssert(_)
| AnyNodeRef::StmtImport(_)
| AnyNodeRef::StmtImportFrom(_)
| AnyNodeRef::StmtGlobal(_)
| AnyNodeRef::StmtNonlocal(_)
| AnyNodeRef::StmtExpr(_)
| AnyNodeRef::StmtPass(_)
| AnyNodeRef::StmtBreak(_)
| AnyNodeRef::StmtContinue(_)
| AnyNodeRef::StmtIpyEscapeCommand(_)
| AnyNodeRef::ExceptHandlerExceptHandler(_)
| AnyNodeRef::MatchCase(_)
| AnyNodeRef::Decorator(_)
| AnyNodeRef::ElifElseClause(_) => true,
AnyNodeRef::ExprBoolOp(_)
| AnyNodeRef::ExprNamed(_)
| AnyNodeRef::ExprBinOp(_)
| AnyNodeRef::ExprUnaryOp(_)
| AnyNodeRef::ExprLambda(_)
| AnyNodeRef::ExprIf(_)
| AnyNodeRef::ExprDict(_)
| AnyNodeRef::ExprSet(_)
| AnyNodeRef::ExprListComp(_)
| AnyNodeRef::ExprSetComp(_)
| AnyNodeRef::ExprDictComp(_)
| AnyNodeRef::ExprGenerator(_)
| AnyNodeRef::ExprAwait(_)
| AnyNodeRef::ExprYield(_)
| AnyNodeRef::ExprYieldFrom(_)
| AnyNodeRef::ExprCompare(_)
| AnyNodeRef::ExprCall(_)
| AnyNodeRef::InterpolatedElement(_)
| AnyNodeRef::InterpolatedStringLiteralElement(_)
| AnyNodeRef::InterpolatedStringFormatSpec(_)
| AnyNodeRef::ExprFString(_)
| AnyNodeRef::ExprTString(_)
| AnyNodeRef::ExprStringLiteral(_)
| AnyNodeRef::ExprBytesLiteral(_)
| AnyNodeRef::ExprNumberLiteral(_)
| AnyNodeRef::ExprBooleanLiteral(_)
| AnyNodeRef::ExprNoneLiteral(_)
| AnyNodeRef::ExprEllipsisLiteral(_)
| AnyNodeRef::ExprAttribute(_)
| AnyNodeRef::ExprSubscript(_)
| AnyNodeRef::ExprStarred(_)
| AnyNodeRef::ExprName(_)
| AnyNodeRef::ExprList(_)
| AnyNodeRef::ExprTuple(_)
| AnyNodeRef::ExprSlice(_)
| AnyNodeRef::ExprIpyEscapeCommand(_)
| AnyNodeRef::PatternMatchValue(_)
| AnyNodeRef::PatternMatchSingleton(_)
| AnyNodeRef::PatternMatchSequence(_)
| AnyNodeRef::PatternMatchMapping(_)
| AnyNodeRef::PatternMatchClass(_)
| AnyNodeRef::PatternMatchStar(_)
| AnyNodeRef::PatternMatchAs(_)
| AnyNodeRef::PatternMatchOr(_)
| AnyNodeRef::PatternArguments(_)
| AnyNodeRef::PatternKeyword(_)
| AnyNodeRef::Comprehension(_)
| AnyNodeRef::Arguments(_)
| AnyNodeRef::Parameters(_)
| AnyNodeRef::Parameter(_)
| AnyNodeRef::ParameterWithDefault(_)
| AnyNodeRef::Keyword(_)
| AnyNodeRef::Alias(_)
| AnyNodeRef::WithItem(_)
| AnyNodeRef::TypeParams(_)
| AnyNodeRef::TypeParamTypeVar(_)
| AnyNodeRef::TypeParamTypeVarTuple(_)
| AnyNodeRef::TypeParamParamSpec(_)
| AnyNodeRef::FString(_)
| AnyNodeRef::TString(_)
| AnyNodeRef::StringLiteral(_)
| AnyNodeRef::BytesLiteral(_)
| AnyNodeRef::Identifier(_) => false,
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/ruff/rules/if_key_in_dict_del.rs | crates/ruff_linter/src/rules/ruff/rules/if_key_in_dict_del.rs | use ruff_macros::{ViolationMetadata, derive_message_formats};
use ruff_python_ast::{CmpOp, Expr, ExprName, ExprSubscript, Stmt, StmtIf};
use ruff_python_semantic::analyze::typing;
use crate::checkers::ast::Checker;
use crate::{AlwaysFixableViolation, Applicability, Edit, Fix};
type Key = Expr;
type Dict = ExprName;
/// ## What it does
/// Checks for `if key in dictionary: del dictionary[key]`.
///
/// ## Why is this bad?
/// To remove a key-value pair from a dictionary, it's more concise to use `.pop(..., None)`.
///
/// ## Example
///
/// ```python
/// if key in dictionary:
/// del dictionary[key]
/// ```
///
/// Use instead:
///
/// ```python
/// dictionary.pop(key, None)
/// ```
///
/// ## Fix safety
/// This rule's fix is marked as safe, unless the if statement contains comments.
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "0.10.0")]
pub(crate) struct IfKeyInDictDel;
impl AlwaysFixableViolation for IfKeyInDictDel {
#[derive_message_formats]
fn message(&self) -> String {
"Use `pop` instead of `key in dict` followed by `del dict[key]`".to_string()
}
fn fix_title(&self) -> String {
"Replace `if` statement with `.pop(..., None)`".to_string()
}
}
/// RUF051
pub(crate) fn if_key_in_dict_del(checker: &Checker, stmt: &StmtIf) {
if !stmt.elif_else_clauses.is_empty() {
return;
}
let [Stmt::Delete(delete)] = &stmt.body[..] else {
return;
};
let Some((test_dict, test_key)) = extract_dict_and_key_from_test(&stmt.test) else {
return;
};
let Some((del_dict, del_key)) = extract_dict_and_key_from_del(&delete.targets) else {
return;
};
if !is_same_key(test_key, del_key) || !is_same_dict(test_dict, del_dict) {
return;
}
if !typing::is_known_to_be_of_type_dict(checker.semantic(), test_dict) {
return;
}
let fix = replace_with_dict_pop_fix(checker, stmt, test_dict, test_key);
checker
.report_diagnostic(IfKeyInDictDel, delete.range)
.set_fix(fix);
}
fn extract_dict_and_key_from_test(test: &Expr) -> Option<(&Dict, &Key)> {
let Expr::Compare(comp) = test else {
return None;
};
let [Expr::Name(dict)] = comp.comparators.as_ref() else {
return None;
};
if !matches!(comp.ops.as_ref(), [CmpOp::In]) {
return None;
}
Some((dict, &comp.left))
}
fn extract_dict_and_key_from_del(targets: &[Expr]) -> Option<(&Dict, &Key)> {
let [Expr::Subscript(ExprSubscript { value, slice, .. })] = targets else {
return None;
};
let Expr::Name(dict) = value.as_ref() else {
return None;
};
Some((dict, slice))
}
fn is_same_key(test: &Key, del: &Key) -> bool {
match (test, del) {
(Expr::Name(ExprName { id: test, .. }), Expr::Name(ExprName { id: del, .. })) => {
test.as_str() == del.as_str()
}
(Expr::NoneLiteral(..), Expr::NoneLiteral(..)) => true,
(Expr::EllipsisLiteral(..), Expr::EllipsisLiteral(..)) => true,
(Expr::BooleanLiteral(test), Expr::BooleanLiteral(del)) => test.value == del.value,
(Expr::NumberLiteral(test), Expr::NumberLiteral(del)) => test.value == del.value,
(Expr::BytesLiteral(test), Expr::BytesLiteral(del)) => {
Iterator::eq(test.value.bytes(), del.value.bytes())
}
(Expr::StringLiteral(test), Expr::StringLiteral(del)) => {
Iterator::eq(test.value.chars(), del.value.chars())
}
_ => false,
}
}
fn is_same_dict(test: &Dict, del: &Dict) -> bool {
test.id.as_str() == del.id.as_str()
}
fn replace_with_dict_pop_fix(checker: &Checker, stmt: &StmtIf, dict: &Dict, key: &Key) -> Fix {
let locator = checker.locator();
let dict_expr = locator.slice(dict);
let key_expr = locator.slice(key);
let replacement = format!("{dict_expr}.pop({key_expr}, None)");
let edit = Edit::range_replacement(replacement, stmt.range);
let comment_ranges = checker.comment_ranges();
let applicability = if comment_ranges.intersects(stmt.range) {
Applicability::Unsafe
} else {
Applicability::Safe
};
Fix::applicable_edit(edit, applicability)
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/ruff/rules/never_union.rs | crates/ruff_linter/src/rules/ruff/rules/never_union.rs | use ruff_macros::{ViolationMetadata, derive_message_formats};
use ruff_python_ast::{self as ast, Expr, ExprBinOp, Operator};
use ruff_python_semantic::{SemanticModel, analyze::typing::traverse_union};
use ruff_text_size::{Ranged, TextRange};
use crate::checkers::ast::Checker;
use crate::{Edit, Fix, FixAvailability, Violation};
/// ## What it does
/// Checks for uses of `typing.NoReturn` and `typing.Never` in union types.
///
/// ## Why is this bad?
/// `typing.NoReturn` and `typing.Never` are special types, used to indicate
/// that a function never returns, or that a type has no values.
///
/// Including `typing.NoReturn` or `typing.Never` in a union type is redundant,
/// as, e.g., `typing.Never | T` is equivalent to `T`.
///
/// ## Example
///
/// ```python
/// from typing import Never
///
///
/// def func() -> Never | int: ...
/// ```
///
/// Use instead:
///
/// ```python
/// def func() -> int: ...
/// ```
///
/// ## References
/// - [Python documentation: `typing.Never`](https://docs.python.org/3/library/typing.html#typing.Never)
/// - [Python documentation: `typing.NoReturn`](https://docs.python.org/3/library/typing.html#typing.NoReturn)
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "v0.2.0")]
pub(crate) struct NeverUnion {
never_like: NeverLike,
union_like: UnionLike,
}
impl Violation for NeverUnion {
const FIX_AVAILABILITY: FixAvailability = FixAvailability::Sometimes;
#[derive_message_formats]
fn message(&self) -> String {
let Self {
never_like,
union_like,
} = self;
match union_like {
UnionLike::PEP604 => {
format!("`{never_like} | T` is equivalent to `T`")
}
UnionLike::TypingUnion => {
format!("`Union[{never_like}, T]` is equivalent to `T`")
}
}
}
fn fix_title(&self) -> Option<String> {
let Self { never_like, .. } = self;
Some(format!("Remove `{never_like}`"))
}
}
/// RUF020
pub(crate) fn never_union(checker: &Checker, expr: &Expr) {
match expr {
// Ex) `typing.NoReturn | int`
Expr::BinOp(ast::ExprBinOp {
op: Operator::BitOr,
left,
right,
range: _,
node_index: _,
}) => {
// Analyze the left-hand side of the `|` operator.
if let Some(never_like) = NeverLike::from_expr(left, checker.semantic()) {
let mut diagnostic = checker.report_diagnostic(
NeverUnion {
never_like,
union_like: UnionLike::PEP604,
},
left.range(),
);
// Avoid producing code that would raise an exception when
// `Never | None` would be fixed to `None | None`.
// Instead do not provide a fix. No action needed for `typing.Union`,
// as `Union[None, None]` is valid Python.
// See https://github.com/astral-sh/ruff/issues/14567.
if !is_pep604_union_with_bare_none(checker.semantic()) {
diagnostic.set_fix(Fix::safe_edit(Edit::range_replacement(
checker.locator().slice(right.as_ref()).to_string(),
expr.range(),
)));
}
}
// Analyze the right-hand side of the `|` operator.
if let Some(never_like) = NeverLike::from_expr(right, checker.semantic()) {
let mut diagnostic = checker.report_diagnostic(
NeverUnion {
never_like,
union_like: UnionLike::PEP604,
},
right.range(),
);
if !is_pep604_union_with_bare_none(checker.semantic()) {
diagnostic.set_fix(Fix::safe_edit(Edit::range_replacement(
checker.locator().slice(left.as_ref()).to_string(),
expr.range(),
)));
}
}
}
// Ex) `typing.Union[typing.NoReturn, int]`
Expr::Subscript(ast::ExprSubscript {
value,
slice,
ctx: _,
range: _,
node_index: _,
}) if checker.semantic().match_typing_expr(value, "Union") => {
let Expr::Tuple(tuple_slice) = &**slice else {
return;
};
// Analyze each element of the `Union`.
for elt in tuple_slice {
if let Some(never_like) = NeverLike::from_expr(elt, checker.semantic()) {
// Collect the other elements of the `Union`.
let rest: Vec<Expr> = tuple_slice
.iter()
.filter(|other| *other != elt)
.cloned()
.collect();
// Ignore, e.g., `typing.Union[typing.NoReturn]`.
if rest.is_empty() {
return;
}
let mut diagnostic = checker.report_diagnostic(
NeverUnion {
never_like,
union_like: UnionLike::TypingUnion,
},
elt.range(),
);
diagnostic.set_fix(Fix::safe_edit(Edit::range_replacement(
if let [only] = rest.as_slice() {
// Ex) `typing.Union[typing.NoReturn, int]` -> `int`
checker.locator().slice(only).to_string()
} else {
// Ex) `typing.Union[typing.NoReturn, int, str]` -> `typing.Union[int, str]`
checker
.generator()
.expr(&Expr::Subscript(ast::ExprSubscript {
value: value.clone(),
slice: Box::new(Expr::Tuple(ast::ExprTuple {
elts: rest,
ctx: ast::ExprContext::Load,
range: TextRange::default(),
node_index: ruff_python_ast::AtomicNodeIndex::NONE,
parenthesized: true,
})),
ctx: ast::ExprContext::Load,
range: TextRange::default(),
node_index: ruff_python_ast::AtomicNodeIndex::NONE,
}))
},
expr.range(),
)));
}
}
}
_ => {}
}
}
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
enum UnionLike {
/// E.g., `typing.Union[int, str]`
TypingUnion,
/// E.g., `int | str`
PEP604,
}
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
enum NeverLike {
/// E.g., `typing.NoReturn`
NoReturn,
/// E.g., `typing.Never`
Never,
}
impl NeverLike {
fn from_expr(expr: &Expr, semantic: &ruff_python_semantic::SemanticModel) -> Option<Self> {
let qualified_name = semantic.resolve_qualified_name(expr)?;
if semantic.match_typing_qualified_name(&qualified_name, "NoReturn") {
Some(NeverLike::NoReturn)
} else if semantic.match_typing_qualified_name(&qualified_name, "Never") {
Some(NeverLike::Never)
} else {
None
}
}
}
impl std::fmt::Display for NeverLike {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
NeverLike::NoReturn => f.write_str("NoReturn"),
NeverLike::Never => f.write_str("Never"),
}
}
}
/// Return `true` if this union is a [PEP 604 union] that contains `None`,
/// e.g. `int | Never | None`.
///
/// Autofixing these unions can be dangerous,
/// as `None | None` results in a runtime exception in Python.
///
/// [PEP 604 union]: https://docs.python.org/3/library/stdtypes.html#types-union
fn is_pep604_union_with_bare_none(semantic: &SemanticModel) -> bool {
let enclosing_pep604_union = semantic
.current_expressions()
.skip(1)
.take_while(|expr| {
matches!(
expr,
Expr::BinOp(ExprBinOp {
op: Operator::BitOr,
..
})
)
})
.last();
let Some(enclosing_pep604_union) = enclosing_pep604_union else {
return false;
};
let mut union_contains_bare_none = false;
traverse_union(
&mut |expr, _| {
if matches!(expr, Expr::NoneLiteral(_)) {
union_contains_bare_none = true;
}
},
semantic,
enclosing_pep604_union,
);
union_contains_bare_none
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/ruff/rules/incorrectly_parenthesized_tuple_in_subscript.rs | crates/ruff_linter/src/rules/ruff/rules/incorrectly_parenthesized_tuple_in_subscript.rs | use ruff_macros::{ViolationMetadata, derive_message_formats};
use ruff_python_ast::{Expr, ExprSubscript, PythonVersion};
use ruff_text_size::Ranged;
use crate::checkers::ast::Checker;
use crate::{AlwaysFixableViolation, Edit, Fix};
/// ## What it does
/// Checks for consistent style regarding whether nonempty tuples in subscripts
/// are parenthesized.
///
/// The exact nature of this violation depends on the setting
/// [`lint.ruff.parenthesize-tuple-in-subscript`]. By default, the use of
/// parentheses is considered a violation.
///
/// This rule is not applied inside "typing contexts" (type annotations,
/// type aliases and subscripted class bases), as these have their own specific
/// conventions around them.
///
/// ## Why is this bad?
/// It is good to be consistent and, depending on the codebase, one or the other
/// convention may be preferred.
///
/// ## Example
///
/// ```python
/// directions = {(0, 1): "North", (1, 0): "East", (0, -1): "South", (-1, 0): "West"}
/// directions[(0, 1)]
/// ```
///
/// Use instead (with default setting):
///
/// ```python
/// directions = {(0, 1): "North", (1, 0): "East", (0, -1): "South", (-1, 0): "West"}
/// directions[0, 1]
/// ```
///
/// ## Options
/// - `lint.ruff.parenthesize-tuple-in-subscript`
#[derive(ViolationMetadata)]
#[violation_metadata(preview_since = "0.5.7")]
pub(crate) struct IncorrectlyParenthesizedTupleInSubscript {
prefer_parentheses: bool,
}
impl AlwaysFixableViolation for IncorrectlyParenthesizedTupleInSubscript {
#[derive_message_formats]
fn message(&self) -> String {
if self.prefer_parentheses {
"Use parentheses for tuples in subscripts".to_string()
} else {
"Avoid parentheses for tuples in subscripts".to_string()
}
}
fn fix_title(&self) -> String {
if self.prefer_parentheses {
"Parenthesize tuple".to_string()
} else {
"Remove parentheses".to_string()
}
}
}
/// RUF031
pub(crate) fn subscript_with_parenthesized_tuple(checker: &Checker, subscript: &ExprSubscript) {
let prefer_parentheses = checker.settings().ruff.parenthesize_tuple_in_subscript;
let Expr::Tuple(tuple_subscript) = &*subscript.slice else {
return;
};
if tuple_subscript.parenthesized == prefer_parentheses || tuple_subscript.is_empty() {
return;
}
// We should not handle single starred expressions
// (regardless of `prefer_parentheses`)
if matches!(&tuple_subscript.elts[..], &[Expr::Starred(_)]) {
return;
}
// Adding parentheses in the presence of a slice leads to a syntax error.
if prefer_parentheses && tuple_subscript.iter().any(Expr::is_slice_expr) {
return;
}
// Removing parentheses in the presence of unpacking leads
// to a syntax error in Python 3.10.
// This is no longer a syntax error starting in Python 3.11
// see https://peps.python.org/pep-0646/#change-1-star-expressions-in-indexes
if checker.target_version() <= PythonVersion::PY310
&& !prefer_parentheses
&& tuple_subscript.iter().any(Expr::is_starred_expr)
{
return;
}
// subscripts in annotations, type definitions or class bases are typing subscripts.
// These have their own special conventions; skip applying the rule in these cases.
let semantic = checker.semantic();
if semantic.in_annotation() || semantic.in_type_definition() || semantic.in_class_base() {
return;
}
let locator = checker.locator();
let source_range = subscript.slice.range();
let new_source = if prefer_parentheses {
format!("({})", locator.slice(source_range))
} else {
locator.slice(source_range)[1..source_range.len().to_usize() - 1].to_string()
};
let edit = Edit::range_replacement(new_source, source_range);
checker
.report_diagnostic(
IncorrectlyParenthesizedTupleInSubscript { prefer_parentheses },
source_range,
)
.set_fix(Fix::safe_edit(edit));
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/ruff/rules/unraw_re_pattern.rs | crates/ruff_linter/src/rules/ruff/rules/unraw_re_pattern.rs | use std::fmt::{Display, Formatter};
use std::str::FromStr;
use ruff_diagnostics::Applicability;
use ruff_macros::{ViolationMetadata, derive_message_formats};
use ruff_python_ast::{
BytesLiteral, Expr, ExprBytesLiteral, ExprCall, ExprStringLiteral, PythonVersion, StringLiteral,
};
use ruff_python_semantic::{Modules, SemanticModel};
use ruff_text_size::{Ranged, TextRange};
use crate::checkers::ast::Checker;
use crate::{Edit, Fix, FixAvailability, Violation};
/// ## What it does
/// Reports the following `re` and `regex` calls when
/// their first arguments are not raw strings:
///
/// - For `regex` and `re`: `compile`, `findall`, `finditer`,
/// `fullmatch`, `match`, `search`, `split`, `sub`, `subn`.
/// - `regex`-specific: `splititer`, `subf`, `subfn`, `template`.
///
/// ## Why is this bad?
/// Regular expressions should be written
/// using raw strings to avoid double escaping.
///
/// ## Fix safety
/// The fix is unsafe if the string/bytes literal contains an escape sequence because the fix alters
/// the runtime value of the literal while retaining the regex semantics.
///
/// For example
/// ```python
/// # Literal is `1\n2`.
/// re.compile("1\n2")
///
/// # Literal is `1\\n2`, but the regex library will interpret `\\n` and will still match a newline
/// # character as before.
/// re.compile(r"1\n2")
/// ```
///
/// ## Fix availability
/// A fix is not available if either
/// * the argument is a string with a (no-op) `u` prefix (e.g., `u"foo"`) as the prefix is
/// incompatible with the raw prefix `r`
/// * the argument is a string or bytes literal with an escape sequence that has a different
/// meaning in the context of a regular expression such as `\b`, which is word boundary or
/// backspace in a regex, depending on the context, but always a backspace in string and bytes
/// literals.
///
/// ## Example
///
/// ```python
/// re.compile("foo\\bar")
/// ```
///
/// Use instead:
///
/// ```python
/// re.compile(r"foo\bar")
/// ```
#[derive(ViolationMetadata)]
#[violation_metadata(preview_since = "0.8.0")]
pub(crate) struct UnrawRePattern {
module: RegexModule,
func: String,
kind: PatternKind,
}
impl Violation for UnrawRePattern {
const FIX_AVAILABILITY: FixAvailability = FixAvailability::Sometimes;
#[derive_message_formats]
fn message(&self) -> String {
let Self { module, func, kind } = &self;
let call = format!("`{module}.{func}()`");
match kind {
PatternKind::String => format!("First argument to {call} is not raw string"),
PatternKind::Bytes => format!("First argument to {call} is not raw bytes literal"),
}
}
fn fix_title(&self) -> Option<String> {
match self.kind {
PatternKind::String => Some("Replace with raw string".to_string()),
PatternKind::Bytes => Some("Replace with raw bytes literal".to_string()),
}
}
}
#[derive(Copy, Clone, Debug, Eq, PartialEq)]
enum RegexModule {
Re,
Regex,
}
impl RegexModule {
fn is_function_taking_pattern(self, name: &str) -> bool {
match name {
"compile" | "findall" | "finditer" | "fullmatch" | "match" | "search" | "split"
| "sub" | "subn" => true,
"splititer" | "subf" | "subfn" | "template" => self == Self::Regex,
_ => false,
}
}
}
impl Display for RegexModule {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
f.write_str(match self {
RegexModule::Re => "re",
RegexModule::Regex => "regex",
})
}
}
impl FromStr for RegexModule {
type Err = ();
fn from_str(s: &str) -> Result<Self, Self::Err> {
match s {
"re" => Ok(Self::Re),
"regex" => Ok(Self::Regex),
_ => Err(()),
}
}
}
#[derive(Copy, Clone, Debug, Eq, PartialEq)]
enum PatternKind {
String,
Bytes,
}
/// RUF039
pub(crate) fn unraw_re_pattern(checker: &Checker, call: &ExprCall) {
let semantic = checker.semantic();
if !semantic.seen_module(Modules::RE) && !semantic.seen_module(Modules::REGEX) {
return;
}
let Some((module, func)) = regex_module_and_func(semantic, call.func.as_ref()) else {
return;
};
match call.arguments.args.as_ref().first() {
Some(Expr::StringLiteral(ExprStringLiteral { value, .. })) => {
value
.iter()
.for_each(|part| check_string(checker, part, module, func));
}
Some(Expr::BytesLiteral(ExprBytesLiteral { value, .. })) => {
value
.iter()
.for_each(|part| check_bytes(checker, part, module, func));
}
_ => {}
}
}
fn regex_module_and_func<'model>(
semantic: &SemanticModel<'model>,
expr: &'model Expr,
) -> Option<(RegexModule, &'model str)> {
let qualified_name = semantic.resolve_qualified_name(expr)?;
if let [module, func] = qualified_name.segments() {
let module = RegexModule::from_str(module).ok()?;
if !module.is_function_taking_pattern(func) {
return None;
}
return Some((module, func));
}
None
}
fn check_string(checker: &Checker, literal: &StringLiteral, module: RegexModule, func: &str) {
if literal.flags.prefix().is_raw() {
return;
}
let kind = PatternKind::String;
let func = func.to_string();
let range = literal.range;
let mut diagnostic = checker.report_diagnostic(UnrawRePattern { module, func, kind }, range);
let Some(applicability) = raw_string_applicability(checker, literal) else {
return;
};
diagnostic.set_fix(Fix::applicable_edit(
Edit::insertion("r".to_string(), literal.range().start()),
applicability,
));
}
/// Check how safe it is to prepend the `r` prefix to the string.
///
/// ## Returns
/// * `None` if the prefix cannot be added,
/// * `Some(a)` if it can be added with applicability `a`.
fn raw_string_applicability(checker: &Checker, literal: &StringLiteral) -> Option<Applicability> {
if literal.flags.prefix().is_unicode() {
// The (no-op) `u` prefix is a syntax error when combined with `r`
return None;
}
if checker.target_version() >= PythonVersion::PY38 {
raw_applicability(checker, literal.range(), |escaped| {
matches!(
escaped,
Some('a' | 'f' | 'n' | 'r' | 't' | 'u' | 'U' | 'v' | 'x' | 'N')
)
})
} else {
raw_applicability(checker, literal.range(), |escaped| {
matches!(
escaped,
Some('a' | 'f' | 'n' | 'r' | 't' | 'u' | 'U' | 'v' | 'x')
)
})
}
// re.compile("\a\f\n\N{Partial Differential}\r\t\u27F2\U0001F0A1\v\x41") # with unsafe fix
}
fn check_bytes(checker: &Checker, literal: &BytesLiteral, module: RegexModule, func: &str) {
if literal.flags.prefix().is_raw() {
return;
}
let kind = PatternKind::Bytes;
let func = func.to_string();
let range = literal.range;
let mut diagnostic = checker.report_diagnostic(UnrawRePattern { module, func, kind }, range);
let Some(applicability) = raw_byte_applicability(checker, literal) else {
return;
};
diagnostic.set_fix(Fix::applicable_edit(
Edit::insertion("r".to_string(), literal.range().start()),
applicability,
));
}
/// Check how same it is to prepend the `r` prefix to the byte sting.
///
/// ## Returns
/// * `None` if the prefix cannot be added,
/// * `Some(a)` if it can be added with applicability `a`.
fn raw_byte_applicability(checker: &Checker, literal: &BytesLiteral) -> Option<Applicability> {
raw_applicability(checker, literal.range(), |escaped| {
matches!(escaped, Some('a' | 'f' | 'n' | 'r' | 't' | 'v' | 'x'))
})
}
fn raw_applicability(
checker: &Checker,
literal_range: TextRange,
match_allowed_escape_sequence: impl Fn(Option<char>) -> bool,
) -> Option<Applicability> {
let mut found_slash = false;
let mut chars = checker.locator().slice(literal_range).chars().peekable();
while let Some(char) = chars.next() {
if char == '\\' {
found_slash = true;
// Turning `"\uXXXX"` into `r"\uXXXX"` is behaviorally equivalent when passed
// to `re`, however, it's not exactly the same runtime value.
// Similarly, for the other escape sequences.
if !match_allowed_escape_sequence(chars.peek().copied()) {
// If the next character is not one of the whitelisted ones, we likely cannot safely turn
// this into a raw string.
return None;
}
}
}
Some(if found_slash {
Applicability::Unsafe
} else {
Applicability::Safe
})
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/ruff/rules/quadratic_list_summation.rs | crates/ruff_linter/src/rules/ruff/rules/quadratic_list_summation.rs | use anyhow::Result;
use itertools::Itertools;
use ruff_macros::{ViolationMetadata, derive_message_formats};
use ruff_python_ast::token::parenthesized_range;
use ruff_python_ast::{self as ast, Arguments, Expr};
use ruff_python_semantic::SemanticModel;
use ruff_text_size::Ranged;
use crate::checkers::ast::Checker;
use crate::importer::ImportRequest;
use crate::{AlwaysFixableViolation, Edit, Fix};
/// ## What it does
/// Checks for the use of `sum()` to flatten lists of lists, which has
/// quadratic complexity.
///
/// ## Why is this bad?
/// The use of `sum()` to flatten lists of lists is quadratic in the number of
/// lists, as `sum()` creates a new list for each element in the summation.
///
/// Instead, consider using another method of flattening lists to avoid
/// quadratic complexity. The following methods are all linear in the number of
/// lists:
///
/// - `functools.reduce(operator.iadd, lists, [])`
/// - `list(itertools.chain.from_iterable(lists))`
/// - `[item for sublist in lists for item in sublist]`
///
/// When fixing relevant violations, Ruff defaults to the `functools.reduce`
/// form, which outperforms the other methods in [microbenchmarks].
///
/// ## Example
/// ```python
/// lists = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
/// joined = sum(lists, [])
/// ```
///
/// Use instead:
/// ```python
/// import functools
/// import operator
///
///
/// lists = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
/// functools.reduce(operator.iadd, lists, [])
/// ```
///
/// ## Fix safety
///
/// This fix is always marked as unsafe because `sum` uses the `__add__` magic method while
/// `operator.iadd` uses the `__iadd__` magic method, and these behave differently on lists.
/// The former requires the right summand to be a list, whereas the latter allows for any iterable.
/// Therefore, the fix could inadvertently cause code that previously raised an error to silently
/// succeed. Moreover, the fix could remove comments from the original code.
///
/// ## References
/// - [_How Not to Flatten a List of Lists in Python_](https://mathieularose.com/how-not-to-flatten-a-list-of-lists-in-python)
/// - [_How do I make a flat list out of a list of lists?_](https://stackoverflow.com/questions/952914/how-do-i-make-a-flat-list-out-of-a-list-of-lists/953097#953097)
///
/// [microbenchmarks]: https://github.com/astral-sh/ruff/issues/5073#issuecomment-1591836349
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "v0.0.285")]
pub(crate) struct QuadraticListSummation;
impl AlwaysFixableViolation for QuadraticListSummation {
#[derive_message_formats]
fn message(&self) -> String {
"Avoid quadratic list summation".to_string()
}
fn fix_title(&self) -> String {
"Replace with `functools.reduce`".to_string()
}
}
/// RUF017
pub(crate) fn quadratic_list_summation(checker: &Checker, call: &ast::ExprCall) {
let ast::ExprCall {
func,
arguments,
range,
node_index: _,
} = call;
let Some(iterable) = arguments.args.first() else {
return;
};
let semantic = checker.semantic();
if !semantic.match_builtin_expr(func, "sum") {
return;
}
if !start_is_empty_list(arguments, semantic) {
return;
}
let mut diagnostic = checker.report_diagnostic(QuadraticListSummation, *range);
diagnostic.try_set_fix(|| convert_to_reduce(iterable, call, checker));
}
/// Generate a [`Fix`] to convert a `sum()` call to a `functools.reduce()` call.
fn convert_to_reduce(iterable: &Expr, call: &ast::ExprCall, checker: &Checker) -> Result<Fix> {
let (reduce_edit, reduce_binding) = checker.importer().get_or_import_symbol(
&ImportRequest::import("functools", "reduce"),
call.start(),
checker.semantic(),
)?;
let (iadd_edit, iadd_binding) = checker.importer().get_or_import_symbol(
&ImportRequest::import("operator", "iadd"),
iterable.start(),
checker.semantic(),
)?;
let iterable = checker.locator().slice(
parenthesized_range(iterable.into(), (&call.arguments).into(), checker.tokens())
.unwrap_or(iterable.range()),
);
Ok(Fix::unsafe_edits(
Edit::range_replacement(
format!("{reduce_binding}({iadd_binding}, {iterable}, [])"),
call.range(),
),
[reduce_edit, iadd_edit].into_iter().dedup(),
))
}
/// Returns `true` if the `start` argument to a `sum()` call is an empty list.
fn start_is_empty_list(arguments: &Arguments, semantic: &SemanticModel) -> bool {
let Some(start_arg) = arguments.find_argument_value("start", 1) else {
return false;
};
match start_arg {
Expr::Call(ast::ExprCall {
func, arguments, ..
}) => arguments.is_empty() && semantic.match_builtin_expr(func, "list"),
Expr::List(list) => list.is_empty() && list.ctx.is_load(),
_ => false,
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/ruff/rules/mutable_class_default.rs | crates/ruff_linter/src/rules/ruff/rules/mutable_class_default.rs | use rustc_hash::FxHashSet;
use ruff_macros::{ViolationMetadata, derive_message_formats};
use ruff_python_ast::{self as ast, Stmt};
use ruff_python_semantic::analyze::typing::{is_immutable_annotation, is_mutable_expr};
use ruff_text_size::Ranged;
use crate::Violation;
use crate::checkers::ast::Checker;
use crate::rules::ruff::helpers::{
dataclass_kind, has_default_copy_semantics, is_class_var_annotation, is_final_annotation,
is_special_attribute,
};
/// ## What it does
/// Checks for mutable default values in class attributes.
///
/// ## Why is this bad?
/// Mutable default values share state across all instances of the class,
/// while not being obvious. This can lead to bugs when the attributes are
/// changed in one instance, as those changes will unexpectedly affect all
/// other instances.
///
/// Generally speaking, you probably want to avoid having mutable default
/// values in the class body at all; instead, these variables should usually
/// be initialized in `__init__`. However, other possible fixes for the issue
/// can include:
/// - Explicitly annotating the variable with [`typing.ClassVar`][ClassVar] to
/// indicate that it is intended to be shared across all instances.
/// - Using an immutable data type (e.g. a tuple instead of a list)
/// for the default value.
///
/// ## Example
///
/// ```python
/// class A:
/// variable_1: list[int] = []
/// variable_2: set[int] = set()
/// variable_3: dict[str, int] = {}
/// ```
///
/// Use instead:
///
/// ```python
/// class A:
/// def __init__(self) -> None:
/// self.variable_1: list[int] = []
/// self.variable_2: set[int] = set()
/// self.variable_3: dict[str, int] = {}
/// ```
///
/// Or:
///
/// ```python
/// from typing import ClassVar
///
///
/// class A:
/// variable_1: ClassVar[list[int]] = []
/// variable_2: ClassVar[set[int]] = set()
/// variable_3: ClassVar[dict[str, int]] = {}
/// ```
///
/// Or:
///
/// ```python
/// class A:
/// variable_1: list[int] | None = None
/// variable_2: set[int] | None = None
/// variable_3: dict[str, int] | None = None
/// ```
///
/// Or:
///
/// ```python
/// from collections.abc import Sequence, Mapping, Set as AbstractSet
/// from types import MappingProxyType
///
///
/// class A:
/// variable_1: Sequence[int] = ()
/// variable_2: AbstractSet[int] = frozenset()
/// variable_3: Mapping[str, int] = MappingProxyType({})
/// ```
///
/// [ClassVar]: https://docs.python.org/3/library/typing.html#typing.ClassVar
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "v0.0.273")]
pub(crate) struct MutableClassDefault;
impl Violation for MutableClassDefault {
#[derive_message_formats]
fn message(&self) -> String {
"Mutable class attributes should be annotated with `typing.ClassVar`".to_string()
}
}
/// RUF012
pub(crate) fn mutable_class_default(checker: &Checker, class_def: &ast::StmtClassDef) {
// Collect any `ClassVar`s we find in case they get reassigned later.
let mut class_var_targets = FxHashSet::default();
for statement in &class_def.body {
match statement {
Stmt::AnnAssign(ast::StmtAnnAssign {
annotation,
target,
value: Some(value),
..
}) => {
if let ast::Expr::Name(ast::ExprName { id, .. }) = target.as_ref() {
if is_class_var_annotation(annotation, checker.semantic()) {
class_var_targets.insert(id);
}
}
if !is_special_attribute(target)
&& is_mutable_expr(value, checker.semantic())
&& !is_class_var_annotation(annotation, checker.semantic())
&& !is_final_annotation(annotation, checker.semantic())
&& !is_immutable_annotation(annotation, checker.semantic(), &[])
{
if dataclass_kind(class_def, checker.semantic()).is_some() {
continue;
}
// Avoid, e.g., Pydantic and msgspec models, which end up copying defaults on instance creation.
if has_default_copy_semantics(class_def, checker.semantic()) {
return;
}
checker.report_diagnostic(MutableClassDefault, value.range());
}
}
Stmt::Assign(ast::StmtAssign { value, targets, .. }) => {
if !targets.iter().all(|target| {
is_special_attribute(target)
|| target
.as_name_expr()
.is_some_and(|name| class_var_targets.contains(&name.id))
}) && is_mutable_expr(value, checker.semantic())
{
// Avoid, e.g., Pydantic and msgspec models, which end up copying defaults on instance creation.
if has_default_copy_semantics(class_def, checker.semantic()) {
return;
}
checker.report_diagnostic(MutableClassDefault, value.range());
}
}
_ => (),
}
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/ruff/rules/default_factory_kwarg.rs | crates/ruff_linter/src/rules/ruff/rules/default_factory_kwarg.rs | use anyhow::Result;
use ast::Keyword;
use ruff_macros::{ViolationMetadata, derive_message_formats};
use ruff_python_ast::helpers::is_constant;
use ruff_python_ast::token::Tokens;
use ruff_python_ast::{self as ast, Expr};
use ruff_text_size::Ranged;
use crate::Locator;
use crate::checkers::ast::Checker;
use crate::fix::edits::{Parentheses, remove_argument};
use crate::fix::snippet::SourceCodeSnippet;
use crate::{Edit, Fix, FixAvailability, Violation};
/// ## What it does
/// Checks for incorrect usages of `default_factory` as a keyword argument when
/// initializing a `defaultdict`.
///
/// ## Why is this bad?
/// The `defaultdict` constructor accepts a callable as its first argument.
/// For example, it's common to initialize a `defaultdict` with `int` or `list`
/// via `defaultdict(int)` or `defaultdict(list)`, to create a dictionary that
/// returns `0` or `[]` respectively when a key is missing.
///
/// The default factory _must_ be provided as a positional argument, as all
/// keyword arguments to `defaultdict` are interpreted as initial entries in
/// the dictionary. For example, `defaultdict(foo=1, bar=2)` will create a
/// dictionary with `{"foo": 1, "bar": 2}` as its initial entries.
///
/// As such, `defaultdict(default_factory=list)` will create a dictionary with
/// `{"default_factory": list}` as its initial entry, instead of a dictionary
/// that returns `[]` when a key is missing. Specifying a `default_factory`
/// keyword argument is almost always a mistake, and one that type checkers
/// can't reliably detect.
///
/// ## Fix safety
/// This rule's fix is marked as unsafe, as converting `default_factory` from a
/// keyword to a positional argument will change the behavior of the code, even
/// if the keyword argument was used erroneously.
///
/// ## Example
/// ```python
/// defaultdict(default_factory=int)
/// defaultdict(default_factory=list)
/// ```
///
/// Use instead:
/// ```python
/// defaultdict(int)
/// defaultdict(list)
/// ```
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "0.5.0")]
pub(crate) struct DefaultFactoryKwarg {
default_factory: SourceCodeSnippet,
}
impl Violation for DefaultFactoryKwarg {
const FIX_AVAILABILITY: FixAvailability = FixAvailability::Sometimes;
#[derive_message_formats]
fn message(&self) -> String {
"`default_factory` is a positional-only argument to `defaultdict`".to_string()
}
fn fix_title(&self) -> Option<String> {
let DefaultFactoryKwarg { default_factory } = self;
if let Some(default_factory) = default_factory.full_display() {
Some(format!("Replace with `defaultdict({default_factory})`"))
} else {
Some("Use positional argument".to_string())
}
}
}
/// RUF026
pub(crate) fn default_factory_kwarg(checker: &Checker, call: &ast::ExprCall) {
// If the call isn't a `defaultdict` constructor, return.
if !checker
.semantic()
.resolve_qualified_name(call.func.as_ref())
.is_some_and(|qualified_name| {
matches!(qualified_name.segments(), ["collections", "defaultdict"])
})
{
return;
}
// If the user provided a positional argument for `default_factory`, return.
if !call.arguments.args.is_empty() {
return;
}
// If the user didn't provide a `default_factory` keyword argument, return.
let Some(keyword) = call.arguments.find_keyword("default_factory") else {
return;
};
// If the value is definitively not callable, return.
if is_non_callable_value(&keyword.value) {
return;
}
let mut diagnostic = checker.report_diagnostic(
DefaultFactoryKwarg {
default_factory: SourceCodeSnippet::from_str(checker.locator().slice(keyword)),
},
call.range(),
);
diagnostic
.try_set_fix(|| convert_to_positional(call, keyword, checker.locator(), checker.tokens()));
}
/// Returns `true` if a value is definitively not callable (e.g., `1` or `[]`).
fn is_non_callable_value(value: &Expr) -> bool {
is_constant(value)
|| matches!(value, |Expr::List(_)| Expr::Dict(_)
| Expr::Set(_)
| Expr::Tuple(_)
| Expr::Slice(_)
| Expr::ListComp(_)
| Expr::SetComp(_)
| Expr::DictComp(_)
| Expr::Generator(_)
| Expr::FString(_)
| Expr::TString(_))
}
/// Generate an [`Expr`] to replace `defaultdict(default_factory=callable)` with
/// `defaultdict(callable)`.
///
/// For example, given `defaultdict(default_factory=list)`, generate `defaultdict(list)`.
fn convert_to_positional(
call: &ast::ExprCall,
default_factory: &Keyword,
locator: &Locator,
tokens: &Tokens,
) -> Result<Fix> {
if call.arguments.len() == 1 {
// Ex) `defaultdict(default_factory=list)`
Ok(Fix::unsafe_edit(Edit::range_replacement(
locator.slice(&default_factory.value).to_string(),
default_factory.range(),
)))
} else {
// Ex) `defaultdict(member=1, default_factory=list)`
// First, remove the `default_factory` keyword argument.
let removal_edit = remove_argument(
default_factory,
&call.arguments,
Parentheses::Preserve,
locator.contents(),
tokens,
)?;
// Second, insert the value as the first positional argument.
let insertion_edit = Edit::insertion(
format!("{}, ", locator.slice(&default_factory.value)),
call.arguments
.arguments_source_order()
.next()
.ok_or_else(|| anyhow::anyhow!("`default_factory` keyword argument not found"))?
.start(),
);
Ok(Fix::unsafe_edits(insertion_edit, [removal_edit]))
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/ruff/rules/in_empty_collection.rs | crates/ruff_linter/src/rules/ruff/rules/in_empty_collection.rs | use ruff_macros::{ViolationMetadata, derive_message_formats};
use ruff_python_ast::{self as ast, CmpOp, Expr, helpers::is_empty_f_string};
use ruff_python_semantic::SemanticModel;
use ruff_text_size::Ranged;
use crate::Violation;
use crate::checkers::ast::Checker;
/// ## What it does
/// Checks for membership tests on empty collections (such as `list`, `tuple`, `set` or `dict`).
///
/// ## Why is this bad?
/// If the collection is always empty, the check is unnecessary, and can be removed.
///
/// ## Example
///
/// ```python
/// if 1 not in set():
/// print("got it!")
/// ```
///
/// Use instead:
///
/// ```python
/// print("got it!")
/// ```
#[derive(ViolationMetadata)]
#[violation_metadata(preview_since = "0.11.9")]
pub(crate) struct InEmptyCollection;
impl Violation for InEmptyCollection {
#[derive_message_formats]
fn message(&self) -> String {
"Unnecessary membership test on empty collection".to_string()
}
}
/// RUF060
pub(crate) fn in_empty_collection(checker: &Checker, compare: &ast::ExprCompare) {
let [op] = &*compare.ops else {
return;
};
if !matches!(op, CmpOp::In | CmpOp::NotIn) {
return;
}
let [right] = &*compare.comparators else {
return;
};
let semantic = checker.semantic();
if is_empty(right, semantic) {
checker.report_diagnostic(InEmptyCollection, compare.range());
}
}
fn is_empty(expr: &Expr, semantic: &SemanticModel) -> bool {
let set_methods = ["set", "frozenset"];
let collection_methods = [
"list",
"tuple",
"set",
"frozenset",
"dict",
"bytes",
"bytearray",
"str",
];
match expr {
Expr::List(ast::ExprList { elts, .. }) => elts.is_empty(),
Expr::Tuple(ast::ExprTuple { elts, .. }) => elts.is_empty(),
Expr::Set(ast::ExprSet { elts, .. }) => elts.is_empty(),
Expr::Dict(ast::ExprDict { items, .. }) => items.is_empty(),
Expr::BytesLiteral(ast::ExprBytesLiteral { value, .. }) => value.is_empty(),
Expr::StringLiteral(ast::ExprStringLiteral { value, .. }) => value.is_empty(),
Expr::FString(s) => is_empty_f_string(s),
Expr::Call(ast::ExprCall {
func,
arguments,
range: _,
node_index: _,
}) => {
if arguments.is_empty() {
collection_methods
.iter()
.any(|s| semantic.match_builtin_expr(func, s))
} else if let Some(arg) = arguments.find_positional(0) {
set_methods
.iter()
.any(|s| semantic.match_builtin_expr(func, s))
&& is_empty(arg, semantic)
} else {
false
}
}
_ => false,
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/ruff/rules/useless_if_else.rs | crates/ruff_linter/src/rules/ruff/rules/useless_if_else.rs | use ruff_macros::{ViolationMetadata, derive_message_formats};
use ruff_python_ast as ast;
use ruff_python_ast::comparable::ComparableExpr;
use crate::Violation;
use crate::checkers::ast::Checker;
/// ## What it does
/// Checks for useless `if`-`else` conditions with identical arms.
///
/// ## Why is this bad?
/// Useless `if`-`else` conditions add unnecessary complexity to the code without
/// providing any logical benefit. Assigning the value directly is clearer.
///
/// ## Example
/// ```python
/// foo = x if y else x
/// ```
///
/// Use instead:
/// ```python
/// foo = x
/// ```
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "0.9.0")]
pub(crate) struct UselessIfElse;
impl Violation for UselessIfElse {
#[derive_message_formats]
fn message(&self) -> String {
"Useless `if`-`else` condition".to_string()
}
}
/// RUF034
pub(crate) fn useless_if_else(checker: &Checker, if_expr: &ast::ExprIf) {
let ast::ExprIf {
body,
orelse,
range,
..
} = if_expr;
// Skip if the `body` and `orelse` are not the same.
if ComparableExpr::from(body) != ComparableExpr::from(orelse) {
return;
}
checker.report_diagnostic(UselessIfElse, *range);
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/ruff/rules/unnecessary_nested_literal.rs | crates/ruff_linter/src/rules/ruff/rules/unnecessary_nested_literal.rs | use ruff_macros::{ViolationMetadata, derive_message_formats};
use ruff_python_ast::{AnyNodeRef, Expr, ExprContext, ExprSubscript, ExprTuple};
use ruff_python_semantic::analyze::typing::traverse_literal;
use ruff_text_size::{Ranged, TextRange};
use crate::checkers::ast::Checker;
use crate::{Applicability, Edit, Fix, FixAvailability, Violation};
/// ## What it does
/// Checks for unnecessary nested `Literal`.
///
/// ## Why is this bad?
/// Prefer using a single `Literal`, which is equivalent and more concise.
///
/// Parameterization of literals by other literals is supported as an ergonomic
/// feature as proposed in [PEP 586], to enable patterns such as:
/// ```python
/// ReadOnlyMode = Literal["r", "r+"]
/// WriteAndTruncateMode = Literal["w", "w+", "wt", "w+t"]
/// WriteNoTruncateMode = Literal["r+", "r+t"]
/// AppendMode = Literal["a", "a+", "at", "a+t"]
///
/// AllModes = Literal[ReadOnlyMode, WriteAndTruncateMode,
/// WriteNoTruncateMode, AppendMode]
/// ```
///
/// As a consequence, type checkers also support nesting of literals
/// which is less readable than a flat `Literal`:
/// ```python
/// AllModes = Literal[Literal["r", "r+"], Literal["w", "w+", "wt", "w+t"],
/// Literal["r+", "r+t"], Literal["a", "a+", "at", "a+t"]]
/// ```
///
/// ## Example
/// ```python
/// AllModes = Literal[
/// Literal["r", "r+"],
/// Literal["w", "w+", "wt", "w+t"],
/// Literal["r+", "r+t"],
/// Literal["a", "a+", "at", "a+t"],
/// ]
/// ```
///
/// Use instead:
/// ```python
/// AllModes = Literal[
/// "r", "r+", "w", "w+", "wt", "w+t", "r+", "r+t", "a", "a+", "at", "a+t"
/// ]
/// ```
///
/// or assign the literal to a variable as in the first example.
///
/// ## Fix safety
/// The fix for this rule is marked as unsafe when the `Literal` slice is split
/// across multiple lines and some of the lines have trailing comments.
///
/// ## References
/// - [Typing documentation: Legal parameters for `Literal` at type check time](https://typing.python.org/en/latest/spec/literal.html#legal-parameters-for-literal-at-type-check-time)
///
/// [PEP 586]: https://peps.python.org/pep-0586/
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "0.10.0")]
pub(crate) struct UnnecessaryNestedLiteral;
impl Violation for UnnecessaryNestedLiteral {
const FIX_AVAILABILITY: FixAvailability = FixAvailability::Sometimes;
#[derive_message_formats]
fn message(&self) -> String {
"Unnecessary nested `Literal`".to_string()
}
fn fix_title(&self) -> Option<String> {
Some("Replace with flattened `Literal`".to_string())
}
}
/// RUF041
pub(crate) fn unnecessary_nested_literal<'a>(checker: &Checker, literal_expr: &'a Expr) {
let mut is_nested = false;
// Traverse the type expressions in the `Literal`.
traverse_literal(
&mut |_: &'a Expr, parent: &'a Expr| {
// If the parent is not equal to the `literal_expr` then we know we are traversing recursively.
if !AnyNodeRef::ptr_eq(parent.into(), literal_expr.into()) {
is_nested = true;
}
},
checker.semantic(),
literal_expr,
);
if !is_nested {
return;
}
// Collect the literal nodes for the fix
let mut nodes: Vec<&Expr> = Vec::new();
traverse_literal(
&mut |expr, _| {
nodes.push(expr);
},
checker.semantic(),
literal_expr,
);
let mut diagnostic = checker.report_diagnostic(UnnecessaryNestedLiteral, literal_expr.range());
// Create a [`Fix`] that flattens all nodes.
if let Expr::Subscript(subscript) = literal_expr {
let subscript = Expr::Subscript(ExprSubscript {
slice: Box::new(if let [elt] = nodes.as_slice() {
(*elt).clone()
} else {
Expr::Tuple(ExprTuple {
elts: nodes.into_iter().cloned().collect(),
range: TextRange::default(),
node_index: ruff_python_ast::AtomicNodeIndex::NONE,
ctx: ExprContext::Load,
parenthesized: false,
})
}),
value: subscript.value.clone(),
range: TextRange::default(),
node_index: ruff_python_ast::AtomicNodeIndex::NONE,
ctx: ExprContext::Load,
});
let fix = Fix::applicable_edit(
Edit::range_replacement(checker.generator().expr(&subscript), literal_expr.range()),
if checker.comment_ranges().intersects(literal_expr.range()) {
Applicability::Unsafe
} else {
Applicability::Safe
},
);
diagnostic.set_fix(fix);
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/ruff/rules/implicit_optional.rs | crates/ruff_linter/src/rules/ruff/rules/implicit_optional.rs | use std::fmt;
use anyhow::{Context, Result};
use ruff_macros::{ViolationMetadata, derive_message_formats};
use ruff_python_ast::name::Name;
use ruff_python_ast::{self as ast, Expr, Operator, Parameters};
use ruff_text_size::{Ranged, TextRange};
use crate::checkers::ast::Checker;
use crate::{Edit, Fix, FixAvailability, Violation};
use ruff_python_ast::PythonVersion;
use crate::rules::ruff::typing::type_hint_explicitly_allows_none;
/// ## What it does
/// Checks for the use of implicit `Optional` in type annotations when the
/// default parameter value is `None`.
///
/// If [`lint.future-annotations`] is set to `true`, `from __future__ import
/// annotations` will be added if doing so would allow using the `|` operator on
/// a Python version before 3.10.
///
/// ## Why is this bad?
/// Implicit `Optional` is prohibited by [PEP 484]. It is confusing and
/// inconsistent with the rest of the type system.
///
/// It's recommended to use `Optional[T]` instead. For Python 3.10 and later,
/// you can also use `T | None`.
///
/// ## Example
/// ```python
/// def foo(arg: int = None):
/// pass
/// ```
///
/// Use instead:
/// ```python
/// from typing import Optional
///
///
/// def foo(arg: Optional[int] = None):
/// pass
/// ```
///
/// Or, for Python 3.10 and later:
/// ```python
/// def foo(arg: int | None = None):
/// pass
/// ```
///
/// If you want to use the `|` operator in Python 3.9 and earlier, you can
/// use future imports:
/// ```python
/// from __future__ import annotations
///
///
/// def foo(arg: int | None = None):
/// pass
/// ```
///
/// ## Limitations
///
/// Type aliases are not supported and could result in false negatives.
/// For example, the following code will not be flagged:
/// ```python
/// Text = str | bytes
///
///
/// def foo(arg: Text = None):
/// pass
/// ```
///
/// ## Options
/// - `target-version`
/// - `lint.future-annotations`
///
/// ## Fix safety
///
/// This fix is always marked as unsafe because it can change the behavior of code that relies on
/// type hints, and it assumes the default value is always appropriateβwhich might not be the case.
///
/// [PEP 484]: https://peps.python.org/pep-0484/#union-types
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "v0.0.273")]
pub(crate) struct ImplicitOptional {
conversion_type: ConversionType,
}
impl Violation for ImplicitOptional {
const FIX_AVAILABILITY: FixAvailability = FixAvailability::Sometimes;
#[derive_message_formats]
fn message(&self) -> String {
"PEP 484 prohibits implicit `Optional`".to_string()
}
fn fix_title(&self) -> Option<String> {
let ImplicitOptional { conversion_type } = self;
Some(format!("Convert to `{conversion_type}`"))
}
}
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
enum ConversionType {
/// Conversion using the `|` operator e.g., `str | None`
BinOpOr,
/// Conversion using the `typing.Optional` type e.g., `typing.Optional[str]`
Optional,
}
impl fmt::Display for ConversionType {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match self {
Self::BinOpOr => f.write_str("T | None"),
Self::Optional => f.write_str("Optional[T]"),
}
}
}
impl From<PythonVersion> for ConversionType {
fn from(target_version: PythonVersion) -> Self {
if target_version >= PythonVersion::PY310 {
Self::BinOpOr
} else {
Self::Optional
}
}
}
/// Generate a [`Fix`] for the given [`Expr`] as per the [`ConversionType`].
fn generate_fix(checker: &Checker, conversion_type: ConversionType, expr: &Expr) -> Result<Fix> {
match conversion_type {
ConversionType::BinOpOr => {
let new_expr = Expr::BinOp(ast::ExprBinOp {
left: Box::new(expr.clone()),
op: Operator::BitOr,
right: Box::new(Expr::NoneLiteral(ast::ExprNoneLiteral::default())),
range: TextRange::default(),
node_index: ruff_python_ast::AtomicNodeIndex::NONE,
});
let content = checker.generator().expr(&new_expr);
let edit = Edit::range_replacement(content, expr.range());
if checker.target_version() < PythonVersion::PY310 {
Ok(Fix::unsafe_edits(
edit,
[checker.importer().add_future_import()],
))
} else {
Ok(Fix::unsafe_edit(edit))
}
}
ConversionType::Optional => {
let importer = checker
.typing_importer("Optional", PythonVersion::lowest())
.context("Optional should be available on all supported Python versions")?;
let (import_edit, binding) = importer.import(expr.start())?;
let new_expr = Expr::Subscript(ast::ExprSubscript {
range: TextRange::default(),
node_index: ruff_python_ast::AtomicNodeIndex::NONE,
value: Box::new(Expr::Name(ast::ExprName {
id: Name::new(binding),
ctx: ast::ExprContext::Store,
range: TextRange::default(),
node_index: ruff_python_ast::AtomicNodeIndex::NONE,
})),
slice: Box::new(expr.clone()),
ctx: ast::ExprContext::Load,
});
let content = checker.generator().expr(&new_expr);
Ok(Fix::unsafe_edits(
Edit::range_replacement(content, expr.range()),
[import_edit],
))
}
}
}
/// RUF013
pub(crate) fn implicit_optional(checker: &Checker, parameters: &Parameters) {
for parameter in parameters.iter_non_variadic_params() {
let Some(Expr::NoneLiteral(_)) = parameter.default() else {
continue;
};
let Some(annotation) = parameter.annotation() else {
continue;
};
if let Expr::StringLiteral(string_expr) = annotation {
// Quoted annotation.
if let Ok(parsed_annotation) = checker.parse_type_annotation(string_expr) {
let Some(expr) = type_hint_explicitly_allows_none(
parsed_annotation.expression(),
checker,
checker.target_version(),
) else {
continue;
};
let conversion_type = checker.target_version().into();
let mut diagnostic =
checker.report_diagnostic(ImplicitOptional { conversion_type }, expr.range());
if parsed_annotation.kind().is_simple() {
diagnostic.try_set_fix(|| generate_fix(checker, conversion_type, expr));
}
}
} else {
// Unquoted annotation.
let Some(expr) =
type_hint_explicitly_allows_none(annotation, checker, checker.target_version())
else {
continue;
};
let conversion_type = if checker.target_version() >= PythonVersion::PY310
|| checker.settings().future_annotations
{
ConversionType::BinOpOr
} else {
ConversionType::Optional
};
let mut diagnostic =
checker.report_diagnostic(ImplicitOptional { conversion_type }, expr.range());
diagnostic.try_set_fix(|| generate_fix(checker, conversion_type, expr));
}
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/ruff/rules/assignment_in_assert.rs | crates/ruff_linter/src/rules/ruff/rules/assignment_in_assert.rs | use ruff_macros::{ViolationMetadata, derive_message_formats};
use ruff_python_semantic::Binding;
use ruff_text_size::Ranged;
use crate::Violation;
use crate::checkers::ast::Checker;
/// ## What it does
/// Checks for named assignment expressions (e.g., `x := 0`) in `assert`
/// statements.
///
/// ## Why is this bad?
/// Named assignment expressions (also known as "walrus operators") are used to
/// assign a value to a variable as part of a larger expression.
///
/// Named assignments are syntactically valid in `assert` statements. However,
/// when the Python interpreter is run under the `-O` flag, `assert` statements
/// are not executed. In this case, the named assignment will also be ignored,
/// which may result in unexpected behavior (e.g., undefined variable
/// accesses).
///
/// ## Example
/// ```python
/// assert (x := 0) == 0
/// print(x)
/// ```
///
/// Use instead:
/// ```python
/// x = 0
/// assert x == 0
/// print(x)
/// ```
///
/// The rule avoids flagging named expressions that define variables which are
/// only referenced from inside `assert` statements; the following will not
/// trigger the rule:
/// ```python
/// assert (x := y**2) > 42, f"Expected >42 but got {x}"
/// ```
///
/// Nor will this:
/// ```python
/// assert (x := y**2) > 42
/// assert x < 1_000_000
/// ```
///
/// ## References
/// - [Python documentation: `-O`](https://docs.python.org/3/using/cmdline.html#cmdoption-O)
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "v0.2.0")]
pub(crate) struct AssignmentInAssert;
impl Violation for AssignmentInAssert {
#[derive_message_formats]
fn message(&self) -> String {
"Avoid assignment expressions in `assert` statements".to_string()
}
}
/// RUF018
pub(crate) fn assignment_in_assert(checker: &Checker, binding: &Binding) {
if !binding.in_assert_statement() {
return;
}
let semantic = checker.semantic();
let Some(parent_expression) = binding
.expression(semantic)
.and_then(|expr| expr.as_named_expr())
else {
return;
};
if binding
.references()
.all(|reference| semantic.reference(reference).in_assert_statement())
{
return;
}
checker.report_diagnostic(AssignmentInAssert, parent_expression.range());
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/ruff/rules/redundant_bool_literal.rs | crates/ruff_linter/src/rules/ruff/rules/redundant_bool_literal.rs | use ruff_macros::{ViolationMetadata, derive_message_formats};
use ruff_python_ast::Expr;
use ruff_python_semantic::analyze::typing::traverse_literal;
use ruff_text_size::Ranged;
use bitflags::bitflags;
use crate::checkers::ast::Checker;
use crate::{Edit, Fix, FixAvailability, Violation};
/// ## What it does
/// Checks for `Literal[True, False]` type annotations.
///
/// ## Why is this bad?
/// `Literal[True, False]` can be replaced with `bool` in type annotations,
/// which has the same semantic meaning but is more concise and readable.
///
/// `bool` type has exactly two constant instances: `True` and `False`. Static
/// type checkers such as [mypy] treat `Literal[True, False]` as equivalent to
/// `bool` in a type annotation.
///
/// ## Example
/// ```python
/// from typing import Literal
///
/// x: Literal[True, False]
/// y: Literal[True, False, "hello", "world"]
/// ```
///
/// Use instead:
/// ```python
/// from typing import Literal
///
/// x: bool
/// y: Literal["hello", "world"] | bool
/// ```
///
/// ## Fix safety
/// The fix for this rule is marked as unsafe, as it may change the semantics of the code.
/// Specifically:
///
/// - Type checkers may not treat `bool` as equivalent when overloading boolean arguments
/// with `Literal[True]` and `Literal[False]` (see, e.g., [#14764] and [#5421]).
/// - `bool` is not strictly equivalent to `Literal[True, False]`, as `bool` is
/// a subclass of `int`, and this rule may not apply if the type annotations are used
/// in a numeric context.
///
/// Further, the `Literal` slice may contain trailing-line comments which the fix would remove.
///
/// ## References
/// - [Typing documentation: Legal parameters for `Literal` at type check time](https://typing.python.org/en/latest/spec/literal.html#legal-parameters-for-literal-at-type-check-time)
/// - [Python documentation: Boolean type - `bool`](https://docs.python.org/3/library/stdtypes.html#boolean-type-bool)
///
/// [mypy]: https://github.com/python/mypy/blob/master/mypy/typeops.py#L985
/// [#14764]: https://github.com/python/mypy/issues/14764
/// [#5421]: https://github.com/microsoft/pyright/issues/5421
#[derive(ViolationMetadata)]
#[violation_metadata(preview_since = "0.8.0")]
pub(crate) struct RedundantBoolLiteral {
seen_others: bool,
}
impl Violation for RedundantBoolLiteral {
const FIX_AVAILABILITY: FixAvailability = FixAvailability::Sometimes;
#[derive_message_formats]
fn message(&self) -> String {
if self.seen_others {
"`Literal[True, False, ...]` can be replaced with `Literal[...] | bool`".to_string()
} else {
"`Literal[True, False]` can be replaced with `bool`".to_string()
}
}
fn fix_title(&self) -> Option<String> {
Some(if self.seen_others {
"Replace with `Literal[...] | bool`".to_string()
} else {
"Replace with `bool`".to_string()
})
}
}
/// RUF038
pub(crate) fn redundant_bool_literal<'a>(checker: &Checker, literal_expr: &'a Expr) {
if !checker.semantic().seen_typing() {
return;
}
let mut seen_expr = BooleanLiteral::empty();
let mut find_bools = |expr: &'a Expr, _parent: &'a Expr| {
let expr_type = match expr {
Expr::BooleanLiteral(boolean_expr) => {
if boolean_expr.value {
BooleanLiteral::TRUE
} else {
BooleanLiteral::FALSE
}
}
_ => BooleanLiteral::OTHER,
};
seen_expr.insert(expr_type);
};
traverse_literal(&mut find_bools, checker.semantic(), literal_expr);
if !seen_expr.contains(BooleanLiteral::TRUE | BooleanLiteral::FALSE) {
return;
}
let seen_others = seen_expr.contains(BooleanLiteral::OTHER);
let mut diagnostic =
checker.report_diagnostic(RedundantBoolLiteral { seen_others }, literal_expr.range());
// Provide a [`Fix`] when the complete `Literal` can be replaced. Applying the fix
// can leave an unused import to be fixed by the `unused-import` rule.
if !seen_others {
if checker.semantic().has_builtin_binding("bool") {
diagnostic.set_fix(Fix::unsafe_edit(Edit::range_replacement(
"bool".to_string(),
literal_expr.range(),
)));
}
}
}
bitflags! {
#[derive(Default, Debug)]
struct BooleanLiteral: u8 {
const TRUE = 1 << 0;
const FALSE = 1 << 1;
const OTHER = 1 << 2;
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/ruff/rules/assert_with_print_message.rs | crates/ruff_linter/src/rules/ruff/rules/assert_with_print_message.rs | use ruff_python_ast::{self as ast, Expr, Stmt};
use ruff_text_size::{Ranged, TextRange};
use ruff_macros::{ViolationMetadata, derive_message_formats};
use crate::checkers::ast::Checker;
use crate::{AlwaysFixableViolation, Edit, Fix};
/// ## What it does
/// Checks for uses of `assert expression, print(message)`.
///
/// ## Why is this bad?
/// If an `assert x, y` assertion fails, the Python interpreter raises an
/// `AssertionError`, and the evaluated value of `y` is used as the contents of
/// that assertion error. The `print` function always returns `None`, however,
/// so the evaluated value of a call to `print` will always be `None`.
///
/// Using a `print` call in this context will therefore output the message to
/// `stdout`, before raising an empty `AssertionError(None)`. Instead, remove
/// the `print` and pass the message directly as the second expression,
/// allowing `stderr` to capture the message in a well-formatted context.
///
/// ## Example
/// ```python
/// assert False, print("This is a message")
/// ```
///
/// Use instead:
/// ```python
/// assert False, "This is a message"
/// ```
///
/// ## Fix safety
/// This rule's fix is marked as unsafe, as changing the second expression
/// will result in a different `AssertionError` message being raised, as well as
/// a change in `stdout` output.
///
/// ## References
/// - [Python documentation: `assert`](https://docs.python.org/3/reference/simple_stmts.html#the-assert-statement)
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "0.8.0")]
pub(crate) struct AssertWithPrintMessage;
impl AlwaysFixableViolation for AssertWithPrintMessage {
#[derive_message_formats]
fn message(&self) -> String {
"`print()` call in `assert` statement is likely unintentional".to_string()
}
fn fix_title(&self) -> String {
"Remove `print`".to_owned()
}
}
/// RUF030
///
/// Checks if the `msg` argument to an `assert` statement is a `print` call, and if so,
/// replace the message with the arguments to the `print` call.
pub(crate) fn assert_with_print_message(checker: &Checker, stmt: &ast::StmtAssert) {
if let Some(Expr::Call(call)) = stmt.msg.as_deref() {
// We have to check that the print call is a call to the built-in `print` function
let semantic = checker.semantic();
if semantic.match_builtin_expr(&call.func, "print") {
// This is the confirmed rule condition
let mut diagnostic = checker.report_diagnostic(AssertWithPrintMessage, call.range());
diagnostic.set_fix(Fix::unsafe_edit(Edit::range_replacement(
checker.generator().stmt(&Stmt::Assert(ast::StmtAssert {
test: stmt.test.clone(),
msg: print_arguments::to_expr(&call.arguments, checker).map(Box::new),
range: TextRange::default(),
node_index: ruff_python_ast::AtomicNodeIndex::NONE,
})),
// We have to replace the entire statement,
// as the `print` could be empty and thus `call.range()`
// will cease to exist.
stmt.range(),
)));
}
}
}
/// Extracts the arguments from a `print` call and converts them to some kind of string
/// expression.
///
/// Three cases are handled:
/// - if there are no arguments, return `None` so that `diagnostic` can remove `msg` from `assert`;
/// - if all of `print` arguments including `sep` are string literals, return a `Expr::StringLiteral`;
/// - otherwise, return a `Expr::FString`.
mod print_arguments {
use itertools::Itertools;
use ruff_python_ast::{
Arguments, ConversionFlag, Expr, ExprFString, FString, FStringFlags, FStringValue,
InterpolatedElement, InterpolatedStringElement, InterpolatedStringElements,
InterpolatedStringLiteralElement, StringLiteral, StringLiteralFlags,
};
use ruff_text_size::TextRange;
use crate::checkers::ast::Checker;
/// Converts an expression to a list of `FStringElement`s.
///
/// Three cases are handled:
/// - if the expression is a string literal, each part of the string will be converted to a
/// `FStringLiteralElement`.
/// - if the expression is an f-string, the elements will be returned as-is.
/// - otherwise, the expression will be wrapped in a `FStringExpressionElement`.
fn expr_to_fstring_elements(expr: &Expr) -> Vec<InterpolatedStringElement> {
match expr {
// If the expression is a string literal, convert each part to a `FStringLiteralElement`.
Expr::StringLiteral(string) => string
.value
.iter()
.map(|part| {
InterpolatedStringElement::Literal(InterpolatedStringLiteralElement {
value: part.value.clone(),
range: TextRange::default(),
node_index: ruff_python_ast::AtomicNodeIndex::NONE,
})
})
.collect(),
// If the expression is an f-string, return the elements.
Expr::FString(fstring) => fstring.value.elements().cloned().collect(),
// Otherwise, return the expression as a single `FStringExpressionElement` wrapping
// the expression.
expr => vec![InterpolatedStringElement::Interpolation(
InterpolatedElement {
expression: Box::new(expr.clone()),
debug_text: None,
conversion: ConversionFlag::None,
format_spec: None,
range: TextRange::default(),
node_index: ruff_python_ast::AtomicNodeIndex::NONE,
},
)],
}
}
/// Converts a list of `FStringElement`s to a list of `StringLiteral`s.
///
/// If any of the elements are not string literals, `None` is returned.
///
/// This is useful (in combination with [`expr_to_fstring_elements`]) for
/// checking if the `sep` and `args` arguments to `print` are all string
/// literals.
fn fstring_elements_to_string_literals<'a>(
mut elements: impl ExactSizeIterator<Item = &'a InterpolatedStringElement>,
flags: StringLiteralFlags,
) -> Option<Vec<StringLiteral>> {
elements.try_fold(Vec::with_capacity(elements.len()), |mut acc, element| {
if let InterpolatedStringElement::Literal(literal) = element {
acc.push(StringLiteral {
value: literal.value.clone(),
flags,
range: TextRange::default(),
node_index: ruff_python_ast::AtomicNodeIndex::NONE,
});
Some(acc)
} else {
None
}
})
}
/// Converts the `sep` and `args` arguments to a [`Expr::StringLiteral`].
///
/// This function will return [`None`] if any of the arguments are not string literals,
/// or if there are no arguments at all.
fn args_to_string_literal_expr<'a>(
args: impl ExactSizeIterator<Item = &'a Vec<InterpolatedStringElement>>,
sep: impl ExactSizeIterator<Item = &'a InterpolatedStringElement>,
flags: StringLiteralFlags,
) -> Option<Expr> {
// If there are no arguments, short-circuit and return `None`
if args.len() == 0 {
return None;
}
// Attempt to convert the `sep` and `args` arguments to string literals.
// We need to maintain `args` as a Vec of Vecs, as the first Vec represents
// the arguments to the `print` call, and the inner Vecs represent the elements
// of a concatenated string literal. (e.g. "text", "text" "text") The `sep` will
// be inserted only between the outer Vecs.
let (Some(sep), Some(args)) = (
fstring_elements_to_string_literals(sep, flags),
args.map(|arg| fstring_elements_to_string_literals(arg.iter(), flags))
.collect::<Option<Vec<_>>>(),
) else {
// If any of the arguments are not string literals, return None
return None;
};
// Put the `sep` into a single Rust `String`
let sep_string = sep
.into_iter()
.map(|string_literal| string_literal.value)
.join("");
// Join the `args` with the `sep`
let combined_string = args
.into_iter()
.map(|string_literals| {
string_literals
.into_iter()
.map(|string_literal| string_literal.value)
.join("")
})
.join(&sep_string);
Some(Expr::from(StringLiteral {
value: combined_string.into(),
flags,
range: TextRange::default(),
node_index: ruff_python_ast::AtomicNodeIndex::NONE,
}))
}
/// Converts the `sep` and `args` arguments to a [`Expr::FString`].
///
/// This function will only return [`None`] if there are no arguments at all.
///
/// ## Note
/// This function will always return an f-string, even if all arguments are string literals.
/// This can produce unnecessary f-strings.
///
/// Also note that the iterator arguments of this function are consumed,
/// as opposed to the references taken by [`args_to_string_literal_expr`].
fn args_to_fstring_expr(
mut args: impl ExactSizeIterator<Item = Vec<InterpolatedStringElement>>,
sep: impl ExactSizeIterator<Item = InterpolatedStringElement>,
flags: FStringFlags,
) -> Option<Expr> {
// If there are no arguments, short-circuit and return `None`
let first_arg = args.next()?;
let sep = sep.collect::<Vec<_>>();
let fstring_elements = args.fold(first_arg, |mut elements, arg| {
elements.extend(sep.clone());
elements.extend(arg);
elements
});
Some(Expr::FString(ExprFString {
value: FStringValue::single(FString {
elements: InterpolatedStringElements::from(fstring_elements),
flags,
range: TextRange::default(),
node_index: ruff_python_ast::AtomicNodeIndex::NONE,
}),
range: TextRange::default(),
node_index: ruff_python_ast::AtomicNodeIndex::NONE,
}))
}
/// Attempts to convert the `print` arguments to a suitable string expression.
///
/// If the `sep` argument is provided, it will be used as the separator between
/// arguments. Otherwise, a space will be used.
///
/// `end` and `file` keyword arguments are ignored, as they don't affect the
/// output of the `print` statement.
///
/// ## Returns
///
/// - [`Some`]<[`Expr::StringLiteral`]> if all arguments including `sep` are string literals.
/// - [`Some`]<[`Expr::FString`]> if any of the arguments are not string literals.
/// - [`None`] if the `print` contains no positional arguments at all.
pub(super) fn to_expr(arguments: &Arguments, checker: &Checker) -> Option<Expr> {
// Convert the `sep` argument into `FStringElement`s
let sep = arguments
.find_keyword("sep")
.and_then(
// If the `sep` argument is `None`, treat this as default behavior.
|keyword| {
if let Expr::NoneLiteral(_) = keyword.value {
None
} else {
Some(&keyword.value)
}
},
)
.map(expr_to_fstring_elements)
.unwrap_or_else(|| {
vec![InterpolatedStringElement::Literal(
InterpolatedStringLiteralElement {
range: TextRange::default(),
node_index: ruff_python_ast::AtomicNodeIndex::NONE,
value: " ".into(),
},
)]
});
let args = arguments
.args
.iter()
.map(expr_to_fstring_elements)
.collect::<Vec<_>>();
// Attempt to convert the `sep` and `args` arguments to a string literal,
// falling back to an f-string if the arguments are not all string literals.
args_to_string_literal_expr(args.iter(), sep.iter(), checker.default_string_flags())
.or_else(|| {
args_to_fstring_expr(
args.into_iter(),
sep.into_iter(),
checker.default_fstring_flags(),
)
})
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/ruff/rules/post_init_default.rs | crates/ruff_linter/src/rules/ruff/rules/post_init_default.rs | use anyhow::Context;
use ruff_macros::{ViolationMetadata, derive_message_formats};
use ruff_python_ast as ast;
use ruff_python_ast::token::parenthesized_range;
use ruff_python_semantic::{Scope, ScopeKind};
use ruff_python_trivia::{indentation_at_offset, textwrap};
use ruff_source_file::LineRanges;
use ruff_text_size::Ranged;
use crate::{Edit, Fix, FixAvailability, Violation};
use crate::{checkers::ast::Checker, importer::ImportRequest};
use crate::rules::ruff::helpers::{DataclassKind, dataclass_kind};
/// ## What it does
/// Checks for `__post_init__` dataclass methods with parameter defaults.
///
/// ## Why is this bad?
/// Adding a default value to a parameter in a `__post_init__` method has no
/// impact on whether the parameter will have a default value in the dataclass's
/// generated `__init__` method. To create an init-only dataclass parameter with
/// a default value, you should use an `InitVar` field in the dataclass's class
/// body and give that `InitVar` field a default value.
///
/// As the [documentation] states:
///
/// > Init-only fields are added as parameters to the generated `__init__()`
/// > method, and are passed to the optional `__post_init__()` method. They are
/// > not otherwise used by dataclasses.
///
/// ## Example
/// ```python
/// from dataclasses import InitVar, dataclass
///
///
/// @dataclass
/// class Foo:
/// bar: InitVar[int] = 0
///
/// def __post_init__(self, bar: int = 1, baz: int = 2) -> None:
/// print(bar, baz)
///
///
/// foo = Foo() # Prints '0 2'.
/// ```
///
/// Use instead:
/// ```python
/// from dataclasses import InitVar, dataclass
///
///
/// @dataclass
/// class Foo:
/// bar: InitVar[int] = 1
/// baz: InitVar[int] = 2
///
/// def __post_init__(self, bar: int, baz: int) -> None:
/// print(bar, baz)
///
///
/// foo = Foo() # Prints '1 2'.
/// ```
///
/// ## Fix safety
///
/// This fix is always marked as unsafe because, although switching to `InitVar` is usually correct,
/// it is incorrect when the parameter is not intended to be part of the public API or when the value
/// is meant to be shared across all instances.
///
/// ## References
/// - [Python documentation: Post-init processing](https://docs.python.org/3/library/dataclasses.html#post-init-processing)
/// - [Python documentation: Init-only variables](https://docs.python.org/3/library/dataclasses.html#init-only-variables)
///
/// [documentation]: https://docs.python.org/3/library/dataclasses.html#init-only-variables
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "0.9.0")]
pub(crate) struct PostInitDefault;
impl Violation for PostInitDefault {
const FIX_AVAILABILITY: FixAvailability = FixAvailability::Sometimes;
#[derive_message_formats]
fn message(&self) -> String {
"`__post_init__` method with argument defaults".to_string()
}
fn fix_title(&self) -> Option<String> {
Some("Use `dataclasses.InitVar` instead".to_string())
}
}
/// RUF033
pub(crate) fn post_init_default(checker: &Checker, function_def: &ast::StmtFunctionDef) {
if &function_def.name != "__post_init__" {
return;
}
let current_scope = checker.semantic().current_scope();
match current_scope.kind {
ScopeKind::Class(class_def) => {
if !matches!(
dataclass_kind(class_def, checker.semantic()),
Some((DataclassKind::Stdlib, _))
) {
return;
}
}
_ => return,
}
let mut stopped_fixes = false;
for parameter in function_def.parameters.iter_non_variadic_params() {
let Some(default) = parameter.default() else {
continue;
};
let mut diagnostic = checker.report_diagnostic(PostInitDefault, default.range());
if !stopped_fixes {
diagnostic.try_set_fix(|| {
use_initvar(current_scope, function_def, parameter, default, checker)
});
// Need to stop fixes as soon as there is a parameter we cannot fix.
// Otherwise, we risk a syntax error (a parameter without a default
// following parameter with a default).
stopped_fixes |= diagnostic.fix().is_none();
}
}
}
/// Generate a [`Fix`] to transform a `__post_init__` default argument into a
/// `dataclasses.InitVar` pseudo-field.
fn use_initvar(
current_scope: &Scope,
post_init_def: &ast::StmtFunctionDef,
parameter_with_default: &ast::ParameterWithDefault,
default: &ast::Expr,
checker: &Checker,
) -> anyhow::Result<Fix> {
let parameter = ¶meter_with_default.parameter;
if current_scope.has(¶meter.name) {
return Err(anyhow::anyhow!(
"Cannot add a `{}: InitVar` field to the class body, as a field by that name already exists",
parameter.name
));
}
// Ensure that `dataclasses.InitVar` is accessible. For example,
// + `from dataclasses import InitVar`
let (import_edit, initvar_binding) = checker.importer().get_or_import_symbol(
&ImportRequest::import("dataclasses", "InitVar"),
default.start(),
checker.semantic(),
)?;
let locator = checker.locator();
let default_loc = parenthesized_range(
default.into(),
parameter_with_default.into(),
checker.tokens(),
)
.unwrap_or(default.range());
// Delete the default value. For example,
// - def __post_init__(self, foo: int = 0) -> None: ...
// + def __post_init__(self, foo: int) -> None: ...
let default_edit = Edit::deletion(parameter.end(), default_loc.end());
// Add `dataclasses.InitVar` field to class body.
let content = {
let default = locator.slice(default_loc);
let parameter_name = locator.slice(¶meter.name);
let line_ending = checker.stylist().line_ending().as_str();
if let Some(annotation) = ¶meter
.annotation()
.map(|annotation| locator.slice(annotation))
{
format!("{parameter_name}: {initvar_binding}[{annotation}] = {default}{line_ending}")
} else {
format!("{parameter_name}: {initvar_binding} = {default}{line_ending}")
}
};
let indentation = indentation_at_offset(post_init_def.start(), checker.source())
.context("Failed to calculate leading indentation of `__post_init__` method")?;
let content = textwrap::indent_first_line(&content, indentation);
let initvar_edit = Edit::insertion(
content.into_owned(),
locator.line_start(post_init_def.start()),
);
Ok(Fix::unsafe_edits(import_edit, [default_edit, initvar_edit]))
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/ruff/rules/property_without_return.rs | crates/ruff_linter/src/rules/ruff/rules/property_without_return.rs | use ruff_macros::{ViolationMetadata, derive_message_formats};
use ruff_python_ast::identifier::Identifier;
use ruff_python_ast::visitor::{Visitor, walk_expr, walk_stmt};
use ruff_python_ast::{Expr, Stmt, StmtFunctionDef};
use ruff_python_semantic::analyze::{function_type, visibility};
use crate::checkers::ast::Checker;
use crate::{FixAvailability, Violation};
/// ## What it does
/// Detects class `@property` methods that does not have a `return` statement.
///
/// ## Why is this bad?
/// Property methods are expected to return a computed value, a missing return in a property usually indicates an implementation mistake.
///
/// ## Example
/// ```python
/// class User:
/// @property
/// def full_name(self):
/// f"{self.first_name} {self.last_name}"
/// ```
///
/// Use instead:
/// ```python
/// class User:
/// @property
/// def full_name(self):
/// return f"{self.first_name} {self.last_name}"
/// ```
///
/// ## References
/// - [Python documentation: The property class](https://docs.python.org/3/library/functions.html#property)
#[derive(ViolationMetadata)]
#[violation_metadata(preview_since = "0.14.7")]
pub(crate) struct PropertyWithoutReturn {
name: String,
}
impl Violation for PropertyWithoutReturn {
const FIX_AVAILABILITY: FixAvailability = FixAvailability::None;
#[derive_message_formats]
fn message(&self) -> String {
let Self { name } = self;
format!("`{name}` is a property without a `return` statement")
}
}
/// RUF066
pub(crate) fn property_without_return(checker: &Checker, function_def: &StmtFunctionDef) {
let semantic = checker.semantic();
if checker.source_type.is_stub() || semantic.in_protocol_or_abstract_method() {
return;
}
let StmtFunctionDef {
decorator_list,
body,
name,
..
} = function_def;
if !visibility::is_property(decorator_list, [], semantic)
|| visibility::is_overload(decorator_list, semantic)
|| function_type::is_stub(function_def, semantic)
{
return;
}
let mut visitor = PropertyVisitor::default();
visitor.visit_body(body);
if visitor.found {
return;
}
checker.report_diagnostic(
PropertyWithoutReturn {
name: name.to_string(),
},
function_def.identifier(),
);
}
#[derive(Default)]
struct PropertyVisitor {
found: bool,
}
// NOTE: We are actually searching for the presence of
// `yield`/`yield from`/`raise`/`return` statement/expression,
// as having one of those indicates that there's likely no implementation mistake
impl Visitor<'_> for PropertyVisitor {
fn visit_expr(&mut self, expr: &Expr) {
if self.found {
return;
}
match expr {
Expr::Yield(_) | Expr::YieldFrom(_) => self.found = true,
_ => walk_expr(self, expr),
}
}
fn visit_stmt(&mut self, stmt: &Stmt) {
if self.found {
return;
}
match stmt {
Stmt::Return(_) | Stmt::Raise(_) => self.found = true,
Stmt::FunctionDef(_) => {
// Do not recurse into nested functions; they're evaluated separately.
}
_ => walk_stmt(self, stmt),
}
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/ruff/rules/access_annotations_from_class_dict.rs | crates/ruff_linter/src/rules/ruff/rules/access_annotations_from_class_dict.rs | use crate::checkers::ast::Checker;
use crate::{FixAvailability, Violation};
use ruff_macros::{ViolationMetadata, derive_message_formats};
use ruff_python_ast::{Expr, ExprCall, ExprSubscript, PythonVersion};
use ruff_text_size::Ranged;
/// ## What it does
/// Checks for uses of `foo.__dict__.get("__annotations__")` or
/// `foo.__dict__["__annotations__"]` on Python 3.10+ and Python < 3.10 when
/// [typing-extensions](https://docs.astral.sh/ruff/settings/#lint_typing-extensions)
/// is enabled.
///
/// ## Why is this bad?
/// Starting with Python 3.14, directly accessing `__annotations__` via
/// `foo.__dict__.get("__annotations__")` or `foo.__dict__["__annotations__"]`
/// will only return annotations if the class is defined under
/// `from __future__ import annotations`.
///
/// Therefore, it is better to use dedicated library functions like
/// `annotationlib.get_annotations` (Python 3.14+), `inspect.get_annotations`
/// (Python 3.10+), or `typing_extensions.get_annotations` (for Python < 3.10 if
/// [typing-extensions](https://pypi.org/project/typing-extensions/) is
/// available).
///
/// The benefits of using these functions include:
/// 1. **Avoiding Undocumented Internals:** They provide a stable, public API,
/// unlike direct `__dict__` access which relies on implementation details.
/// 2. **Forward-Compatibility:** They are designed to handle changes in
/// Python's annotation system across versions, ensuring your code remains
/// robust (e.g., correctly handling the Python 3.14 behavior mentioned
/// above).
///
/// See [Python Annotations Best Practices](https://docs.python.org/3.14/howto/annotations.html)
/// for alternatives.
///
/// ## Example
///
/// ```python
/// foo.__dict__.get("__annotations__", {})
/// # or
/// foo.__dict__["__annotations__"]
/// ```
///
/// On Python 3.14+, use instead:
/// ```python
/// import annotationlib
///
/// annotationlib.get_annotations(foo)
/// ```
///
/// On Python 3.10+, use instead:
/// ```python
/// import inspect
///
/// inspect.get_annotations(foo)
/// ```
///
/// On Python < 3.10 with [typing-extensions](https://pypi.org/project/typing-extensions/)
/// installed, use instead:
/// ```python
/// import typing_extensions
///
/// typing_extensions.get_annotations(foo)
/// ```
///
/// ## Fix safety
///
/// No autofix is currently provided for this rule.
///
/// ## Fix availability
///
/// No autofix is currently provided for this rule.
///
/// ## References
/// - [Python Annotations Best Practices](https://docs.python.org/3.14/howto/annotations.html)
#[derive(ViolationMetadata)]
#[violation_metadata(preview_since = "0.12.1")]
pub(crate) struct AccessAnnotationsFromClassDict {
python_version: PythonVersion,
}
impl Violation for AccessAnnotationsFromClassDict {
const FIX_AVAILABILITY: FixAvailability = FixAvailability::None;
#[derive_message_formats]
fn message(&self) -> String {
let suggestion = if self.python_version >= PythonVersion::PY314 {
"annotationlib.get_annotations"
} else if self.python_version >= PythonVersion::PY310 {
"inspect.get_annotations"
} else {
"typing_extensions.get_annotations"
};
format!("Use `{suggestion}` instead of `__dict__` access")
}
}
/// RUF063
pub(crate) fn access_annotations_from_class_dict_with_get(checker: &Checker, call: &ExprCall) {
let python_version = checker.target_version();
let typing_extensions = checker.settings().typing_extensions;
// Only apply this rule for Python 3.10 and newer unless `typing-extensions` is enabled.
if python_version < PythonVersion::PY310 && !typing_extensions {
return;
}
// Expected pattern: foo.__dict__.get("__annotations__" [, <default>])
// Here, `call` is the `.get(...)` part.
// 1. Check that the `call.func` is `get`
let get_attribute = match call.func.as_ref() {
Expr::Attribute(attr) if attr.attr.as_str() == "get" => attr,
_ => return,
};
// 2. Check that the `get_attribute.value` is `__dict__`
match get_attribute.value.as_ref() {
Expr::Attribute(attr) if attr.attr.as_str() == "__dict__" => {}
_ => return,
}
// At this point, we have `foo.__dict__.get`.
// 3. Check arguments to `.get()`:
// - No keyword arguments.
// - One or two positional arguments.
// - First positional argument must be the string literal "__annotations__".
// - The value of the second positional argument (if present) does not affect the match.
if !call.arguments.keywords.is_empty() || call.arguments.len() > 2 {
return;
}
let Some(first_arg) = &call.arguments.find_positional(0) else {
return;
};
let is_first_arg_correct = first_arg
.as_string_literal_expr()
.is_some_and(|s| s.value.to_str() == "__annotations__");
if is_first_arg_correct {
checker.report_diagnostic(
AccessAnnotationsFromClassDict { python_version },
call.range(),
);
}
}
/// RUF063
pub(crate) fn access_annotations_from_class_dict_by_key(
checker: &Checker,
subscript: &ExprSubscript,
) {
let python_version = checker.target_version();
let typing_extensions = checker.settings().typing_extensions;
// Only apply this rule for Python 3.10 and newer unless `typing-extensions` is enabled.
if python_version < PythonVersion::PY310 && !typing_extensions {
return;
}
// Expected pattern: foo.__dict__["__annotations__"]
// 1. Check that the slice is a string literal "__annotations__"
if subscript
.slice
.as_string_literal_expr()
.is_none_or(|s| s.value.to_str() != "__annotations__")
{
return;
}
// 2. Check that the `subscript.value` is `__dict__`
let is_value_correct = subscript
.value
.as_attribute_expr()
.is_some_and(|attr| attr.attr.as_str() == "__dict__");
if is_value_correct {
checker.report_diagnostic(
AccessAnnotationsFromClassDict { python_version },
subscript.range(),
);
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/ruff/rules/legacy_form_pytest_raises.rs | crates/ruff_linter/src/rules/ruff/rules/legacy_form_pytest_raises.rs | use itertools::{Either, Itertools};
use ruff_diagnostics::{Edit, Fix};
use ruff_macros::{ViolationMetadata, derive_message_formats};
use ruff_python_ast::{self as ast, AtomicNodeIndex, Expr, Stmt, StmtExpr, StmtWith, WithItem};
use ruff_python_semantic::SemanticModel;
use ruff_python_trivia::{has_leading_content, has_trailing_content, leading_indentation};
use ruff_source_file::UniversalNewlines;
use ruff_text_size::{Ranged, TextRange};
use std::fmt;
use crate::{FixAvailability, Violation, checkers::ast::Checker};
/// ## What it does
/// Checks for non-contextmanager use of `pytest.raises`, `pytest.warns`, and `pytest.deprecated_call`.
///
/// ## Why is this bad?
/// The context-manager form is more readable, easier to extend, and supports additional kwargs.
///
/// ## Example
/// ```python
/// import pytest
///
///
/// excinfo = pytest.raises(ValueError, int, "hello")
/// pytest.warns(UserWarning, my_function, arg)
/// pytest.deprecated_call(my_deprecated_function, arg1, arg2)
/// ```
///
/// Use instead:
/// ```python
/// import pytest
///
///
/// with pytest.raises(ValueError) as excinfo:
/// int("hello")
/// with pytest.warns(UserWarning):
/// my_function(arg)
/// with pytest.deprecated_call():
/// my_deprecated_function(arg1, arg2)
/// ```
///
/// ## References
/// - [`pytest` documentation: `pytest.raises`](https://docs.pytest.org/en/latest/reference/reference.html#pytest-raises)
/// - [`pytest` documentation: `pytest.warns`](https://docs.pytest.org/en/latest/reference/reference.html#pytest-warns)
/// - [`pytest` documentation: `pytest.deprecated_call`](https://docs.pytest.org/en/latest/reference/reference.html#pytest-deprecated-call)
#[derive(ViolationMetadata)]
#[violation_metadata(preview_since = "0.12.0")]
pub(crate) struct LegacyFormPytestRaises {
context_type: PytestContextType,
}
impl Violation for LegacyFormPytestRaises {
const FIX_AVAILABILITY: FixAvailability = FixAvailability::Sometimes;
#[derive_message_formats]
fn message(&self) -> String {
format!(
"Use context-manager form of `pytest.{}()`",
self.context_type
)
}
fn fix_title(&self) -> Option<String> {
Some(format!(
"Use `pytest.{}()` as a context-manager",
self.context_type
))
}
}
/// Enum representing the type of pytest context manager
#[derive(PartialEq, Clone, Copy)]
enum PytestContextType {
Raises,
Warns,
DeprecatedCall,
}
impl fmt::Display for PytestContextType {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let name = match self {
Self::Raises => "raises",
Self::Warns => "warns",
Self::DeprecatedCall => "deprecated_call",
};
write!(f, "{name}")
}
}
impl PytestContextType {
fn from_expr_name(func: &Expr, semantic: &SemanticModel) -> Option<Self> {
semantic
.resolve_qualified_name(func)
.and_then(|qualified_name| match qualified_name.segments() {
["pytest", "raises"] => Some(Self::Raises),
["pytest", "warns"] => Some(Self::Warns),
["pytest", "deprecated_call"] => Some(Self::DeprecatedCall),
_ => None,
})
}
fn expected_arg(self) -> Option<(&'static str, usize)> {
match self {
Self::Raises => Some(("expected_exception", 0)),
Self::Warns => Some(("expected_warning", 0)),
Self::DeprecatedCall => None,
}
}
fn func_arg(self) -> (&'static str, usize) {
match self {
Self::Raises | Self::Warns => ("func", 1),
Self::DeprecatedCall => ("func", 0),
}
}
}
/// RUF061
pub(crate) fn legacy_raises_warns_deprecated_call(checker: &Checker, call: &ast::ExprCall) {
let semantic = checker.semantic();
let Some(context_type) = PytestContextType::from_expr_name(&call.func, semantic) else {
return;
};
let (func_arg_name, func_arg_position) = context_type.func_arg();
if call
.arguments
.find_argument(func_arg_name, func_arg_position)
.is_none()
{
return;
}
let mut diagnostic =
checker.report_diagnostic(LegacyFormPytestRaises { context_type }, call.range());
let stmt = semantic.current_statement();
if !has_leading_content(stmt.start(), checker.source())
&& !has_trailing_content(stmt.end(), checker.source())
{
if let Some(with_stmt) = try_fix_legacy_call(context_type, stmt, semantic) {
let generated = checker.generator().stmt(&Stmt::With(with_stmt));
let first_line = checker.locator().line_str(stmt.start());
let indentation = leading_indentation(first_line);
let mut indented = String::new();
for (idx, line) in generated.universal_newlines().enumerate() {
if idx == 0 {
indented.push_str(&line);
} else {
indented.push_str(checker.stylist().line_ending().as_str());
indented.push_str(indentation);
indented.push_str(&line);
}
}
diagnostic.set_fix(Fix::unsafe_edit(Edit::range_replacement(
indented,
stmt.range(),
)));
}
}
}
fn try_fix_legacy_call(
context_type: PytestContextType,
stmt: &Stmt,
semantic: &SemanticModel,
) -> Option<StmtWith> {
match stmt {
Stmt::Expr(StmtExpr { value, .. }) => {
let call = value.as_call_expr()?;
// Handle two patterns for legacy calls:
// 1. Direct usage: `pytest.raises(ZeroDivisionError, func, 1, b=0)`
// 2. With match method: `pytest.raises(ZeroDivisionError, func, 1, b=0).match("division by zero")`
//
// The second branch specifically looks for raises().match() pattern which only exists for
// `raises` (not `warns`/`deprecated_call`) since only `raises` returns an ExceptionInfo
// object with a .match() method. We need to preserve this match condition when converting
// to context manager form.
if PytestContextType::from_expr_name(&call.func, semantic) == Some(context_type) {
generate_with_statement(context_type, call, None, None, None)
} else if let PytestContextType::Raises = context_type {
let inner_raises_call = call
.func
.as_attribute_expr()
.filter(|expr_attribute| &expr_attribute.attr == "match")
.and_then(|expr_attribute| expr_attribute.value.as_call_expr())
.filter(|inner_call| {
PytestContextType::from_expr_name(&inner_call.func, semantic)
== Some(PytestContextType::Raises)
})?;
let match_arg = call.arguments.args.first();
generate_with_statement(context_type, inner_raises_call, match_arg, None, None)
} else {
None
}
}
Stmt::Assign(ast::StmtAssign { targets, value, .. }) => {
let call = value.as_call_expr().filter(|call| {
PytestContextType::from_expr_name(&call.func, semantic) == Some(context_type)
})?;
let (optional_vars, assign_targets) = match context_type {
PytestContextType::Raises => {
let [target] = targets.as_slice() else {
return None;
};
(Some(target), None)
}
PytestContextType::Warns | PytestContextType::DeprecatedCall => {
(None, Some(targets.as_slice()))
}
};
generate_with_statement(context_type, call, None, optional_vars, assign_targets)
}
_ => None,
}
}
fn generate_with_statement(
context_type: PytestContextType,
legacy_call: &ast::ExprCall,
match_arg: Option<&Expr>,
optional_vars: Option<&Expr>,
assign_targets: Option<&[Expr]>,
) -> Option<StmtWith> {
let expected = if let Some((name, position)) = context_type.expected_arg() {
Some(legacy_call.arguments.find_argument_value(name, position)?)
} else {
None
};
let (func_arg_name, func_arg_position) = context_type.func_arg();
let func = legacy_call
.arguments
.find_argument_value(func_arg_name, func_arg_position)?;
let (func_args, func_keywords): (Vec<_>, Vec<_>) = legacy_call
.arguments
.arguments_source_order()
.skip(if expected.is_some() { 2 } else { 1 })
.partition_map(|arg_or_keyword| match arg_or_keyword {
ast::ArgOrKeyword::Arg(expr) => Either::Left(expr.clone()),
ast::ArgOrKeyword::Keyword(keyword) => Either::Right(keyword.clone()),
});
let context_call = ast::ExprCall {
node_index: AtomicNodeIndex::NONE,
range: TextRange::default(),
func: legacy_call.func.clone(),
arguments: ast::Arguments {
node_index: AtomicNodeIndex::NONE,
range: TextRange::default(),
args: expected.cloned().as_slice().into(),
keywords: match_arg
.map(|expr| ast::Keyword {
node_index: AtomicNodeIndex::NONE,
// Take range from the original expression so that the keyword
// argument is generated after positional arguments
range: expr.range(),
arg: Some(ast::Identifier::new("match", TextRange::default())),
value: expr.clone(),
})
.as_slice()
.into(),
},
};
let func_call = ast::ExprCall {
node_index: AtomicNodeIndex::NONE,
range: TextRange::default(),
func: Box::new(func.clone()),
arguments: ast::Arguments {
node_index: AtomicNodeIndex::NONE,
range: TextRange::default(),
args: func_args.into(),
keywords: func_keywords.into(),
},
};
let body = if let Some(assign_targets) = assign_targets {
Stmt::Assign(ast::StmtAssign {
node_index: AtomicNodeIndex::NONE,
range: TextRange::default(),
targets: assign_targets.to_vec(),
value: Box::new(func_call.into()),
})
} else {
Stmt::Expr(StmtExpr {
node_index: AtomicNodeIndex::NONE,
range: TextRange::default(),
value: Box::new(func_call.into()),
})
};
Some(StmtWith {
node_index: AtomicNodeIndex::NONE,
range: TextRange::default(),
is_async: false,
items: vec![WithItem {
node_index: AtomicNodeIndex::NONE,
range: TextRange::default(),
context_expr: context_call.into(),
optional_vars: optional_vars.map(|var| Box::new(var.clone())),
}],
body: vec![body],
})
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/ruff/rules/non_empty_init_module.rs | crates/ruff_linter/src/rules/ruff/rules/non_empty_init_module.rs | use ruff_macros::{ViolationMetadata, derive_message_formats};
use ruff_python_ast::{self as ast, Expr, Stmt};
use ruff_python_semantic::analyze::typing::is_type_checking_block;
use ruff_text_size::Ranged;
use crate::{Violation, checkers::ast::Checker};
/// ## What it does
///
/// Detects the presence of code in `__init__.py` files.
///
/// ## Why is this bad?
///
/// `__init__.py` files are often empty or only contain simple code to modify a module's API. As
/// such, it's easy to overlook them and their possible side effects when debugging.
///
/// ## Example
///
/// Instead of defining `MyClass` directly in `__init__.py`:
///
/// ```python
/// """My module docstring."""
///
///
/// class MyClass:
/// def my_method(self): ...
/// ```
///
/// move the definition to another file, import it, and include it in `__all__`:
///
/// ```python
/// """My module docstring."""
///
/// from submodule import MyClass
///
/// __all__ = ["MyClass"]
/// ```
///
/// Code in `__init__.py` files is also run at import time and can cause surprising slowdowns. To
/// disallow any code in `__init__.py` files, you can enable the
/// [`lint.ruff.strictly-empty-init-modules`] setting. In this case:
///
/// ```python
/// from submodule import MyClass
///
/// __all__ = ["MyClass"]
/// ```
///
/// the only fix is entirely emptying the file:
///
/// ```python
/// ```
///
/// ## Details
///
/// In non-strict mode, this rule allows several common patterns in `__init__.py` files:
///
/// - Imports
/// - Assignments to `__all__`, `__path__`, `__version__`, and `__author__`
/// - Module-level and attribute docstrings
/// - `if TYPE_CHECKING` blocks
/// - [PEP-562] module-level `__getattr__` and `__dir__` functions
///
/// ## Options
///
/// - [`lint.ruff.strictly-empty-init-modules`]
///
/// ## References
///
/// - [`flake8-empty-init-modules`](https://github.com/samueljsb/flake8-empty-init-modules/)
///
/// [PEP-562]: https://peps.python.org/pep-0562/
#[derive(ViolationMetadata)]
#[violation_metadata(preview_since = "0.14.11")]
pub(crate) struct NonEmptyInitModule {
strictly_empty_init_modules: bool,
}
impl Violation for NonEmptyInitModule {
#[derive_message_formats]
fn message(&self) -> String {
if self.strictly_empty_init_modules {
"`__init__` module should not contain any code".to_string()
} else {
"`__init__` module should only contain docstrings and re-exports".to_string()
}
}
}
/// RUF067
pub(crate) fn non_empty_init_module(checker: &Checker, stmt: &Stmt) {
if !checker.in_init_module() {
return;
}
let semantic = checker.semantic();
// Only flag top-level statements
if !semantic.at_top_level() {
return;
}
let strictly_empty_init_modules = checker.settings().ruff.strictly_empty_init_modules;
if !strictly_empty_init_modules {
// Even though module-level attributes are disallowed, we still allow attribute docstrings
// to avoid needing two `noqa` comments in a case like:
//
// ```py
// MY_CONSTANT = 1 # noqa: RUF067
// "A very important constant"
// ```
if semantic.in_pep_257_docstring() || semantic.in_attribute_docstring() {
return;
}
match stmt {
// Allow imports
Stmt::Import(_) | Stmt::ImportFrom(_) => return,
// Allow PEP-562 module `__getattr__` and `__dir__`
Stmt::FunctionDef(func) if matches!(&*func.name, "__getattr__" | "__dir__") => return,
// Allow `TYPE_CHECKING` blocks
Stmt::If(stmt_if) if is_type_checking_block(stmt_if, semantic) => return,
_ => {}
}
if let Some(assignment) = Assignment::from_stmt(stmt) {
// Allow assignments to `__all__`.
//
// TODO(brent) should we allow additional cases here? Beyond simple assignments, you could
// also append or extend `__all__`.
//
// This is actually going slightly beyond the upstream rule already, which only checks for
// `Stmt::Assign`.
if assignment.is_assignment_to("__all__") {
return;
}
// Allow legacy namespace packages with assignments like:
//
// ```py
// __path__ = __import__('pkgutil').extend_path(__path__, __name__)
// ```
if assignment.is_assignment_to("__path__") && assignment.is_pkgutil_extend_path() {
return;
}
// Allow assignments to `__version__`.
if assignment.is_assignment_to("__version__") {
return;
}
// Allow assignments to `__author__`.
if assignment.is_assignment_to("__author__") {
return;
}
}
}
checker.report_diagnostic(
NonEmptyInitModule {
strictly_empty_init_modules,
},
stmt.range(),
);
}
/// Any assignment statement, including plain assignment, annotated assignments, and augmented
/// assignments.
struct Assignment<'a> {
targets: &'a [Expr],
value: Option<&'a Expr>,
}
impl<'a> Assignment<'a> {
fn from_stmt(stmt: &'a Stmt) -> Option<Self> {
let (targets, value) = match stmt {
Stmt::Assign(ast::StmtAssign { targets, value, .. }) => {
(targets.as_slice(), Some(&**value))
}
Stmt::AnnAssign(ast::StmtAnnAssign { target, value, .. }) => {
(std::slice::from_ref(&**target), value.as_deref())
}
Stmt::AugAssign(ast::StmtAugAssign { target, value, .. }) => {
(std::slice::from_ref(&**target), Some(&**value))
}
_ => return None,
};
Some(Self { targets, value })
}
/// Returns whether all of the assignment targets match `name`.
///
/// For example, both of the following would be allowed for a `name` of `__all__`:
///
/// ```py
/// __all__ = ["foo"]
/// __all__ = __all__ = ["foo"]
/// ```
///
/// but not:
///
/// ```py
/// __all__ = another_list = ["foo"]
/// ```
fn is_assignment_to(&self, name: &str) -> bool {
self.targets
.iter()
.all(|target| target.as_name_expr().is_some_and(|expr| expr.id == name))
}
/// Returns `true` if the value being assigned is a call to `pkgutil.extend_path`.
///
/// For example, both of the following would return true:
///
/// ```py
/// __path__ = __import__('pkgutil').extend_path(__path__, __name__)
/// __path__ = other.extend_path(__path__, __name__)
/// ```
///
/// We're intentionally a bit less strict here, not requiring that the receiver of the
/// `extend_path` call is the typical `__import__('pkgutil')` or `pkgutil`.
fn is_pkgutil_extend_path(&self) -> bool {
let Some(Expr::Call(ast::ExprCall {
func: extend_func,
arguments: extend_arguments,
..
})) = self.value
else {
return false;
};
let Expr::Attribute(ast::ExprAttribute {
attr: maybe_extend_path,
..
}) = &**extend_func
else {
return false;
};
// Test that this is an `extend_path(__path__, __name__)` call
if maybe_extend_path != "extend_path" {
return false;
}
let Some(Expr::Name(path)) = extend_arguments.find_argument_value("path", 0) else {
return false;
};
let Some(Expr::Name(name)) = extend_arguments.find_argument_value("name", 1) else {
return false;
};
path.id() == "__path__" && name.id() == "__name__"
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/ruff/rules/unnecessary_cast_to_int.rs | crates/ruff_linter/src/rules/ruff/rules/unnecessary_cast_to_int.rs | use ruff_macros::{ViolationMetadata, derive_message_formats};
use ruff_python_ast::token::{Tokens, parenthesized_range};
use ruff_python_ast::{Arguments, Expr, ExprCall};
use ruff_python_semantic::SemanticModel;
use ruff_python_semantic::analyze::type_inference::{NumberLike, PythonType, ResolvedPythonType};
use ruff_python_trivia::{CommentRanges, lines_after_ignoring_trivia};
use ruff_source_file::LineRanges;
use ruff_text_size::{Ranged, TextRange};
use crate::Locator;
use crate::checkers::ast::Checker;
use crate::rules::ruff::rules::unnecessary_round::{
InferredType, NdigitsValue, RoundedValue, rounded_and_ndigits,
};
use crate::{AlwaysFixableViolation, Applicability, Edit, Fix};
/// ## What it does
/// Checks for `int` conversions of values that are already integers.
///
/// ## Why is this bad?
/// Such a conversion is unnecessary.
///
/// ## Known problems
/// This rule may produce false positives for `round`, `math.ceil`, `math.floor`,
/// and `math.trunc` calls when values override the `__round__`, `__ceil__`, `__floor__`,
/// or `__trunc__` operators such that they don't return an integer.
///
/// ## Example
///
/// ```python
/// int(len([]))
/// int(round(foo, None))
/// ```
///
/// Use instead:
///
/// ```python
/// len([])
/// round(foo)
/// ```
///
/// ## Fix safety
/// The fix for `round`, `math.ceil`, `math.floor`, and `math.truncate` is unsafe
/// because removing the `int` conversion can change the semantics for values
/// overriding the `__round__`, `__ceil__`, `__floor__`, or `__trunc__` dunder methods
/// such that they don't return an integer.
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "0.10.0")]
pub(crate) struct UnnecessaryCastToInt;
impl AlwaysFixableViolation for UnnecessaryCastToInt {
#[derive_message_formats]
fn message(&self) -> String {
"Value being cast to `int` is already an integer".to_string()
}
fn fix_title(&self) -> String {
"Remove unnecessary `int` call".to_string()
}
}
/// RUF046
pub(crate) fn unnecessary_cast_to_int(checker: &Checker, call: &ExprCall) {
let Some(argument) = single_argument_to_int_call(call, checker.semantic()) else {
return;
};
let applicability = if matches!(
ResolvedPythonType::from(argument),
ResolvedPythonType::Atom(PythonType::Number(NumberLike::Integer))
) {
Some(Applicability::Safe)
} else if let Expr::Call(inner_call) = argument {
call_applicability(checker, inner_call)
} else {
None
};
let Some(applicability) = applicability else {
return;
};
let fix = unwrap_int_expression(
call,
argument,
applicability,
checker.semantic(),
checker.locator(),
checker.tokens(),
checker.comment_ranges(),
checker.source(),
);
checker
.report_diagnostic(UnnecessaryCastToInt, call.range())
.set_fix(fix);
}
/// Creates a fix that replaces `int(expression)` with `expression`.
#[allow(clippy::too_many_arguments)]
fn unwrap_int_expression(
call: &ExprCall,
argument: &Expr,
applicability: Applicability,
semantic: &SemanticModel,
locator: &Locator,
tokens: &Tokens,
comment_ranges: &CommentRanges,
source: &str,
) -> Fix {
let content = if let Some(range) =
parenthesized_range(argument.into(), (&call.arguments).into(), tokens)
{
locator.slice(range).to_string()
} else {
let parenthesize = semantic.current_expression_parent().is_some()
|| argument.is_named_expr()
|| locator.count_lines(argument.range()) > 0;
if parenthesize && !has_own_parentheses(argument, tokens, source) {
format!("({})", locator.slice(argument.range()))
} else {
locator.slice(argument.range()).to_string()
}
};
// Since we're deleting the complement of the argument range within
// the call range, we have to check both ends for comments.
//
// For example:
// ```python
// int( # comment
// round(
// 42.1
// ) # comment
// )
// ```
let applicability = {
let call_to_arg_start = TextRange::new(call.start(), argument.start());
let arg_to_call_end = TextRange::new(argument.end(), call.end());
if comment_ranges.intersects(call_to_arg_start)
|| comment_ranges.intersects(arg_to_call_end)
{
Applicability::Unsafe
} else {
applicability
}
};
let edit = Edit::range_replacement(content, call.range());
Fix::applicable_edit(edit, applicability)
}
/// Returns `Some` if `call` in `int(call(...))` is a method that returns an `int`
/// and `None` otherwise.
fn call_applicability(checker: &Checker, inner_call: &ExprCall) -> Option<Applicability> {
let (func, arguments) = (&inner_call.func, &inner_call.arguments);
let qualified_name = checker.semantic().resolve_qualified_name(func)?;
match qualified_name.segments() {
// Always returns a strict instance of `int`
["" | "builtins", "len" | "id" | "hash" | "ord" | "int"]
| [
"math",
"comb" | "factorial" | "gcd" | "lcm" | "isqrt" | "perm",
] => Some(Applicability::Safe),
// Depends on `ndigits` and `number.__round__`
["" | "builtins", "round"] => round_applicability(arguments, checker.semantic()),
// Depends on `__ceil__`/`__floor__`/`__trunc__`
["math", "ceil" | "floor" | "trunc"] => Some(Applicability::Unsafe),
_ => None,
}
}
fn single_argument_to_int_call<'a>(
call: &'a ExprCall,
semantic: &SemanticModel,
) -> Option<&'a Expr> {
let ExprCall {
func, arguments, ..
} = call;
if !semantic.match_builtin_expr(func, "int") {
return None;
}
if !arguments.keywords.is_empty() {
return None;
}
let [argument] = &*arguments.args else {
return None;
};
Some(argument)
}
/// Determines the [`Applicability`] for a `round(..)` call.
///
/// The Applicability depends on the `ndigits` and the number argument.
fn round_applicability(arguments: &Arguments, semantic: &SemanticModel) -> Option<Applicability> {
let (_rounded, rounded_value, ndigits_value) = rounded_and_ndigits(arguments, semantic)?;
match (rounded_value, ndigits_value) {
// ```python
// int(round(2, -1))
// int(round(2, 0))
// int(round(2))
// int(round(2, None))
// ```
(
RoundedValue::Int(InferredType::Equivalent),
NdigitsValue::LiteralInt { .. }
| NdigitsValue::Int(InferredType::Equivalent)
| NdigitsValue::NotGivenOrNone,
) => Some(Applicability::Safe),
// ```python
// int(round(2.0))
// int(round(2.0, None))
// ```
(RoundedValue::Float(InferredType::Equivalent), NdigitsValue::NotGivenOrNone) => {
Some(Applicability::Safe)
}
// ```python
// a: int = 2 # or True
// int(round(a, -2))
// int(round(a, 1))
// int(round(a))
// int(round(a, None))
// ```
(
RoundedValue::Int(InferredType::AssignableTo),
NdigitsValue::LiteralInt { .. }
| NdigitsValue::Int(InferredType::Equivalent)
| NdigitsValue::NotGivenOrNone,
) => Some(Applicability::Unsafe),
// ```python
// int(round(2.0))
// int(round(2.0, None))
// int(round(x))
// int(round(x, None))
// ```
(
RoundedValue::Float(InferredType::AssignableTo) | RoundedValue::Other,
NdigitsValue::NotGivenOrNone,
) => Some(Applicability::Unsafe),
_ => None,
}
}
/// Returns `true` if the given [`Expr`] has its own parentheses (e.g., `()`, `[]`, `{}`).
fn has_own_parentheses(expr: &Expr, tokens: &Tokens, source: &str) -> bool {
match expr {
Expr::ListComp(_)
| Expr::SetComp(_)
| Expr::DictComp(_)
| Expr::List(_)
| Expr::Set(_)
| Expr::Dict(_) => true,
Expr::Call(call_expr) => {
// A call where the function and parenthesized
// argument(s) appear on separate lines
// requires outer parentheses. That is:
// ```
// (f
// (10))
// ```
// is different than
// ```
// f
// (10)
// ```
let func_end =
parenthesized_range(call_expr.func.as_ref().into(), call_expr.into(), tokens)
.unwrap_or(call_expr.func.range())
.end();
lines_after_ignoring_trivia(func_end, source) == 0
}
Expr::Subscript(subscript_expr) => {
// Same as above
let subscript_end = parenthesized_range(
subscript_expr.value.as_ref().into(),
subscript_expr.into(),
tokens,
)
.unwrap_or(subscript_expr.value.range())
.end();
lines_after_ignoring_trivia(subscript_end, source) == 0
}
Expr::Generator(generator) => generator.parenthesized,
Expr::Tuple(tuple) => tuple.parenthesized,
_ => false,
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/ruff/rules/mutable_fromkeys_value.rs | crates/ruff_linter/src/rules/ruff/rules/mutable_fromkeys_value.rs | use ruff_macros::{ViolationMetadata, derive_message_formats};
use ruff_python_ast::name::Name;
use ruff_python_ast::{self as ast, Expr};
use ruff_python_semantic::analyze::typing::is_mutable_expr;
use ruff_python_codegen::Generator;
use ruff_text_size::Ranged;
use ruff_text_size::TextRange;
use crate::checkers::ast::Checker;
use crate::{Edit, Fix, FixAvailability, Violation};
/// ## What it does
/// Checks for mutable objects passed as a value argument to `dict.fromkeys`.
///
/// ## Why is this bad?
/// All values in the dictionary created by the `dict.fromkeys` method
/// refer to the same instance of the provided object. If that object is
/// modified, all values are modified, which can lead to unexpected behavior.
/// For example, if the empty list (`[]`) is provided as the default value,
/// all values in the dictionary will use the same list; as such, appending to
/// any one entry will append to all entries.
///
/// Instead, use a comprehension to generate a dictionary with distinct
/// instances of the default value.
///
/// ## Example
/// ```python
/// cities = dict.fromkeys(["UK", "Poland"], [])
/// cities["UK"].append("London")
/// cities["Poland"].append("Poznan")
/// print(cities) # {'UK': ['London', 'Poznan'], 'Poland': ['London', 'Poznan']}
/// ```
///
/// Use instead:
/// ```python
/// cities = {country: [] for country in ["UK", "Poland"]}
/// cities["UK"].append("London")
/// cities["Poland"].append("Poznan")
/// print(cities) # {'UK': ['London'], 'Poland': ['Poznan']}
/// ```
///
/// ## Fix safety
/// This rule's fix is marked as unsafe, as the edit will change the behavior of
/// the program by using a distinct object for every value in the dictionary,
/// rather than a shared mutable instance. In some cases, programs may rely on
/// the previous behavior.
///
/// ## References
/// - [Python documentation: `dict.fromkeys`](https://docs.python.org/3/library/stdtypes.html#dict.fromkeys)
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "0.5.0")]
pub(crate) struct MutableFromkeysValue;
impl Violation for MutableFromkeysValue {
const FIX_AVAILABILITY: FixAvailability = FixAvailability::Sometimes;
#[derive_message_formats]
fn message(&self) -> String {
"Do not pass mutable objects as values to `dict.fromkeys`".to_string()
}
fn fix_title(&self) -> Option<String> {
Some("Replace with comprehension".to_string())
}
}
/// RUF024
pub(crate) fn mutable_fromkeys_value(checker: &Checker, call: &ast::ExprCall) {
let Expr::Attribute(ast::ExprAttribute { value, attr, .. }) = call.func.as_ref() else {
return;
};
// Check that the call is to `dict.fromkeys`.
if attr != "fromkeys" {
return;
}
let semantic = checker.semantic();
if !semantic.match_builtin_expr(value, "dict") {
return;
}
// Check that the value parameter is a mutable object.
let [keys, value] = &*call.arguments.args else {
return;
};
if !is_mutable_expr(value, semantic) {
return;
}
let mut diagnostic = checker.report_diagnostic(MutableFromkeysValue, call.range());
diagnostic.set_fix(Fix::unsafe_edit(Edit::range_replacement(
generate_dict_comprehension(keys, value, checker.generator()),
call.range(),
)));
}
/// Format a code snippet to expression `{key: value for key in keys}`, where
/// `keys` and `value` are the parameters of `dict.fromkeys`.
fn generate_dict_comprehension(keys: &Expr, value: &Expr, generator: Generator) -> String {
// Construct `key`.
let key = ast::ExprName {
id: Name::new_static("key"),
ctx: ast::ExprContext::Load,
range: TextRange::default(),
node_index: ruff_python_ast::AtomicNodeIndex::NONE,
};
// Construct `key in keys`.
let comp = ast::Comprehension {
target: key.clone().into(),
iter: keys.clone(),
ifs: vec![],
range: TextRange::default(),
node_index: ruff_python_ast::AtomicNodeIndex::NONE,
is_async: false,
};
// Construct the dict comprehension.
let dict_comp = ast::ExprDictComp {
key: Box::new(key.into()),
value: Box::new(value.clone()),
generators: vec![comp],
range: TextRange::default(),
node_index: ruff_python_ast::AtomicNodeIndex::NONE,
};
generator.expr(&dict_comp.into())
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/ruff/rules/unnecessary_key_check.rs | crates/ruff_linter/src/rules/ruff/rules/unnecessary_key_check.rs | use ruff_python_ast::comparable::ComparableExpr;
use ruff_python_ast::{self as ast, BoolOp, CmpOp, Expr};
use ruff_macros::{ViolationMetadata, derive_message_formats};
use ruff_python_ast::helpers::contains_effect;
use ruff_python_ast::token::parenthesized_range;
use ruff_text_size::Ranged;
use crate::checkers::ast::Checker;
use crate::{AlwaysFixableViolation, Edit, Fix};
/// ## What it does
/// Checks for unnecessary key checks prior to accessing a dictionary.
///
/// ## Why is this bad?
/// When working with dictionaries, the `get` can be used to access a value
/// without having to check if the dictionary contains the relevant key,
/// returning `None` if the key is not present.
///
/// ## Example
/// ```python
/// if "key" in dct and dct["key"]:
/// ...
/// ```
///
/// Use instead:
/// ```python
/// if dct.get("key"):
/// ...
/// ```
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "v0.2.0")]
pub(crate) struct UnnecessaryKeyCheck;
impl AlwaysFixableViolation for UnnecessaryKeyCheck {
#[derive_message_formats]
fn message(&self) -> String {
"Unnecessary key check before dictionary access".to_string()
}
fn fix_title(&self) -> String {
"Replace with `dict.get`".to_string()
}
}
/// RUF019
pub(crate) fn unnecessary_key_check(checker: &Checker, expr: &Expr) {
if !checker.semantic().in_boolean_test() {
return;
}
let Expr::BoolOp(ast::ExprBoolOp {
op: BoolOp::And,
values,
..
}) = expr
else {
return;
};
let [left, right] = values.as_slice() else {
return;
};
// Left should be, e.g., `key in dct`.
let Expr::Compare(ast::ExprCompare {
left: key_left,
ops,
comparators,
..
}) = left
else {
return;
};
if !matches!(&**ops, [CmpOp::In]) {
return;
}
let [obj_left] = &**comparators else {
return;
};
// Right should be, e.g., `dct[key]`.
let Expr::Subscript(ast::ExprSubscript {
value: obj_right,
slice: key_right,
..
}) = right
else {
return;
};
if ComparableExpr::from(obj_left) != ComparableExpr::from(obj_right)
|| ComparableExpr::from(key_left) != ComparableExpr::from(key_right)
{
return;
}
if contains_effect(obj_left, |id| checker.semantic().has_builtin_binding(id))
|| contains_effect(key_left, |id| checker.semantic().has_builtin_binding(id))
{
return;
}
let mut diagnostic = checker.report_diagnostic(UnnecessaryKeyCheck, expr.range());
diagnostic.set_fix(Fix::safe_edit(Edit::range_replacement(
format!(
"{}.get({})",
checker.locator().slice(
parenthesized_range(obj_right.into(), right.into(), checker.tokens(),)
.unwrap_or(obj_right.range())
),
checker.locator().slice(
parenthesized_range(key_right.into(), right.into(), checker.tokens(),)
.unwrap_or(key_right.range())
),
),
expr.range(),
)));
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/ruff/rules/invalid_pyproject_toml.rs | crates/ruff_linter/src/rules/ruff/rules/invalid_pyproject_toml.rs | use ruff_macros::{ViolationMetadata, derive_message_formats};
use crate::{FixAvailability, Violation};
/// ## What it does
/// Checks for any pyproject.toml that does not conform to the schema from the relevant PEPs.
///
/// ## Why is this bad?
/// Your project may contain invalid metadata or configuration without you noticing
///
/// ## Example
/// ```toml
/// [project]
/// name = "crab"
/// version = "1.0.0"
/// authors = ["Ferris the Crab <ferris@example.org>"]
/// ```
///
/// Use instead:
/// ```toml
/// [project]
/// name = "crab"
/// version = "1.0.0"
/// authors = [
/// { name = "Ferris the Crab", email = "ferris@example.org" }
/// ]
/// ```
///
/// ## References
/// - [Specification of `[project]` in pyproject.toml](https://packaging.python.org/en/latest/specifications/declaring-project-metadata/)
/// - [Specification of `[build-system]` in pyproject.toml](https://peps.python.org/pep-0518/)
/// - [Draft but implemented license declaration extensions](https://peps.python.org/pep-0639)
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "v0.0.271")]
pub(crate) struct InvalidPyprojectToml {
pub message: String,
}
impl Violation for InvalidPyprojectToml {
const FIX_AVAILABILITY: FixAvailability = FixAvailability::None;
#[derive_message_formats]
fn message(&self) -> String {
let InvalidPyprojectToml { message } = self;
format!("Failed to parse pyproject.toml: {message}")
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/ruff/rules/parenthesize_chained_operators.rs | crates/ruff_linter/src/rules/ruff/rules/parenthesize_chained_operators.rs | use ruff_macros::{ViolationMetadata, derive_message_formats};
use ruff_python_ast as ast;
use ruff_python_ast::token::parenthesized_range;
use ruff_text_size::Ranged;
use crate::checkers::ast::Checker;
use crate::{AlwaysFixableViolation, Edit, Fix};
/// ## What it does
/// Checks for chained operators where adding parentheses could improve the
/// clarity of the code.
///
/// ## Why is this bad?
/// `and` always binds more tightly than `or` when chaining the two together,
/// but this can be hard to remember (and sometimes surprising).
/// Adding parentheses in these situations can greatly improve code readability,
/// with no change to semantics or performance.
///
/// For example:
/// ```python
/// a, b, c = 1, 0, 2
/// x = a or b and c
///
/// d, e, f = 0, 1, 2
/// y = d and e or f
/// ```
///
/// Use instead:
/// ```python
/// a, b, c = 1, 0, 2
/// x = a or (b and c)
///
/// d, e, f = 0, 1, 2
/// y = (d and e) or f
/// ```
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "0.8.0")]
pub(crate) struct ParenthesizeChainedOperators;
impl AlwaysFixableViolation for ParenthesizeChainedOperators {
#[derive_message_formats]
fn message(&self) -> String {
"Parenthesize `a and b` expressions when chaining `and` and `or` together, to make the precedence clear".to_string()
}
fn fix_title(&self) -> String {
"Parenthesize the `and` subexpression".to_string()
}
}
/// RUF021
pub(crate) fn parenthesize_chained_logical_operators(checker: &Checker, expr: &ast::ExprBoolOp) {
// We're only interested in `and` expressions inside `or` expressions:
// - `a or b or c` => `BoolOp(values=[Name("a"), Name("b"), Name("c")], op=Or)`
// - `a and b and c` => `BoolOp(values=[Name("a"), Name("b"), Name("c")], op=And)`
// - `a or b and c` => `BoolOp(value=[Name("a"), BoolOp(values=[Name("b"), Name("c")], op=And), op=Or)`
//
// While it is *possible* to get an `Or` node inside an `And` node,
// you can only achieve it by parenthesizing the `or` subexpression
// (since normally, `and` always binds more tightly):
// - `a and (b or c)` => `BoolOp(values=[Name("a"), BoolOp(values=[Name("b"), Name("c"), op=Or), op=And)`
//
// We only care about unparenthesized boolean subexpressions here
// (if they're parenthesized already, that's great!),
// so we can ignore all cases where an `Or` node
// exists inside an `And` node.
if expr.op.is_and() {
return;
}
for condition in &expr.values {
match condition {
ast::Expr::BoolOp(
bool_op @ ast::ExprBoolOp {
op: ast::BoolOp::And,
..
},
) => {
let locator = checker.locator();
let source_range = bool_op.range();
if parenthesized_range(bool_op.into(), expr.into(), checker.tokens()).is_none() {
let new_source = format!("({})", locator.slice(source_range));
let edit = Edit::range_replacement(new_source, source_range);
checker
.report_diagnostic(ParenthesizeChainedOperators, source_range)
.set_fix(Fix::safe_edit(edit));
}
}
_ => continue,
}
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/flake8_implicit_str_concat/settings.rs | crates/ruff_linter/src/rules/flake8_implicit_str_concat/settings.rs | //! Settings for the `flake8-implicit-str-concat` plugin.
use crate::display_settings;
use ruff_macros::CacheKey;
use std::fmt::{Display, Formatter};
#[derive(Debug, Clone, CacheKey)]
pub struct Settings {
pub allow_multiline: bool,
}
impl Default for Settings {
fn default() -> Self {
Self {
allow_multiline: true,
}
}
}
impl Display for Settings {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
display_settings! {
formatter = f,
namespace = "linter.flake8_implicit_str_concat",
fields = [
self.allow_multiline
]
}
Ok(())
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/flake8_implicit_str_concat/mod.rs | crates/ruff_linter/src/rules/flake8_implicit_str_concat/mod.rs | //! Rules from [flake8-implicit-str-concat](https://pypi.org/project/flake8-implicit-str-concat/).
pub(crate) mod rules;
pub mod settings;
#[cfg(test)]
mod tests {
use std::path::Path;
use anyhow::Result;
use test_case::test_case;
use crate::registry::Rule;
use crate::test::test_path;
use crate::{assert_diagnostics, settings};
#[test_case(Rule::SingleLineImplicitStringConcatenation, Path::new("ISC.py"))]
#[test_case(Rule::MultiLineImplicitStringConcatenation, Path::new("ISC.py"))]
#[test_case(
Rule::SingleLineImplicitStringConcatenation,
Path::new("ISC_syntax_error.py")
)]
#[test_case(
Rule::MultiLineImplicitStringConcatenation,
Path::new("ISC_syntax_error.py")
)]
#[test_case(
Rule::SingleLineImplicitStringConcatenation,
Path::new("ISC_syntax_error_2.py")
)]
#[test_case(
Rule::MultiLineImplicitStringConcatenation,
Path::new("ISC_syntax_error_2.py")
)]
#[test_case(Rule::ExplicitStringConcatenation, Path::new("ISC.py"))]
#[test_case(
Rule::ImplicitStringConcatenationInCollectionLiteral,
Path::new("ISC004.py")
)]
fn rules(rule_code: Rule, path: &Path) -> Result<()> {
let snapshot = format!("{}_{}", rule_code.noqa_code(), path.to_string_lossy());
let diagnostics = test_path(
Path::new("flake8_implicit_str_concat").join(path).as_path(),
&settings::LinterSettings::for_rule(rule_code),
)?;
assert_diagnostics!(snapshot, diagnostics);
Ok(())
}
#[test_case(Rule::SingleLineImplicitStringConcatenation, Path::new("ISC.py"))]
#[test_case(Rule::MultiLineImplicitStringConcatenation, Path::new("ISC.py"))]
#[test_case(Rule::ExplicitStringConcatenation, Path::new("ISC.py"))]
fn multiline(rule_code: Rule, path: &Path) -> Result<()> {
let snapshot = format!(
"multiline_{}_{}",
rule_code.noqa_code(),
path.to_string_lossy()
);
let diagnostics = test_path(
Path::new("flake8_implicit_str_concat").join(path).as_path(),
&settings::LinterSettings {
flake8_implicit_str_concat: super::settings::Settings {
allow_multiline: false,
},
..settings::LinterSettings::for_rule(rule_code)
},
)?;
assert_diagnostics!(snapshot, diagnostics);
Ok(())
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/flake8_implicit_str_concat/rules/collection_literal.rs | crates/ruff_linter/src/rules/flake8_implicit_str_concat/rules/collection_literal.rs | use ruff_macros::{ViolationMetadata, derive_message_formats};
use ruff_python_ast::token::parenthesized_range;
use ruff_python_ast::{Expr, StringLike};
use ruff_text_size::Ranged;
use crate::checkers::ast::Checker;
use crate::{Edit, Fix, FixAvailability, Violation};
/// ## What it does
/// Checks for implicitly concatenated strings inside list, tuple, and set literals.
///
/// ## Why is this bad?
/// In collection literals, implicit string concatenation is often the result of
/// a missing comma between elements, which can silently merge items together.
///
/// ## Example
/// ```python
/// facts = (
/// "Lobsters have blue blood.",
/// "The liver is the only human organ that can fully regenerate itself.",
/// "Clarinets are made almost entirely out of wood from the mpingo tree."
/// "In 1971, astronaut Alan Shepard played golf on the moon.",
/// )
/// ```
///
/// Instead, you likely intended:
/// ```python
/// facts = (
/// "Lobsters have blue blood.",
/// "The liver is the only human organ that can fully regenerate itself.",
/// "Clarinets are made almost entirely out of wood from the mpingo tree.",
/// "In 1971, astronaut Alan Shepard played golf on the moon.",
/// )
/// ```
///
/// If the concatenation is intentional, wrap it in parentheses to make it
/// explicit:
/// ```python
/// facts = (
/// "Lobsters have blue blood.",
/// "The liver is the only human organ that can fully regenerate itself.",
/// (
/// "Clarinets are made almost entirely out of wood from the mpingo tree."
/// "In 1971, astronaut Alan Shepard played golf on the moon."
/// ),
/// )
/// ```
///
/// ## Fix safety
/// The fix is safe in that it does not change the semantics of your code.
/// However, the issue is that you may often want to change semantics
/// by adding a missing comma.
#[derive(ViolationMetadata)]
#[violation_metadata(preview_since = "0.14.10")]
pub(crate) struct ImplicitStringConcatenationInCollectionLiteral;
impl Violation for ImplicitStringConcatenationInCollectionLiteral {
const FIX_AVAILABILITY: FixAvailability = FixAvailability::Always;
#[derive_message_formats]
fn message(&self) -> String {
"Unparenthesized implicit string concatenation in collection".to_string()
}
fn fix_title(&self) -> Option<String> {
Some("Wrap implicitly concatenated strings in parentheses".to_string())
}
}
/// ISC004
pub(crate) fn implicit_string_concatenation_in_collection_literal(
checker: &Checker,
expr: &Expr,
elements: &[Expr],
) {
for element in elements {
let Ok(string_like) = StringLike::try_from(element) else {
continue;
};
if !string_like.is_implicit_concatenated() {
continue;
}
if parenthesized_range(
string_like.as_expression_ref(),
expr.into(),
checker.tokens(),
)
.is_some()
{
continue;
}
let mut diagnostic = checker.report_diagnostic(
ImplicitStringConcatenationInCollectionLiteral,
string_like.range(),
);
diagnostic.help("Did you forget a comma?");
diagnostic.set_fix(Fix::unsafe_edits(
Edit::insertion("(".to_string(), string_like.range().start()),
[Edit::insertion(")".to_string(), string_like.range().end())],
));
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/flake8_implicit_str_concat/rules/explicit.rs | crates/ruff_linter/src/rules/flake8_implicit_str_concat/rules/explicit.rs | use ruff_macros::{ViolationMetadata, derive_message_formats};
use ruff_python_ast::token::parenthesized_range;
use ruff_python_ast::{self as ast, Expr, Operator};
use ruff_python_trivia::is_python_whitespace;
use ruff_source_file::LineRanges;
use ruff_text_size::{Ranged, TextLen, TextRange, TextSize};
use crate::checkers::ast::Checker;
use crate::{Edit, Fix, FixAvailability, Violation};
/// ## What it does
/// Checks for string literals that are explicitly concatenated (using the
/// `+` operator).
///
/// ## Why is this bad?
/// For string literals that wrap across multiple lines, implicit string
/// concatenation within parentheses is preferred over explicit
/// concatenation using the `+` operator, as the former is more readable.
///
/// ## Example
/// ```python
/// z = (
/// "The quick brown fox jumps over the lazy "
/// + "dog"
/// )
/// ```
///
/// Use instead:
/// ```python
/// z = (
/// "The quick brown fox jumps over the lazy "
/// "dog"
/// )
/// ```
///
/// ## Options
///
/// Setting `lint.flake8-implicit-str-concat.allow-multiline = false` will disable this rule because
/// it would leave no allowed way to write a multi-line string.
///
/// - `lint.flake8-implicit-str-concat.allow-multiline`
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "v0.0.201")]
pub(crate) struct ExplicitStringConcatenation;
impl Violation for ExplicitStringConcatenation {
const FIX_AVAILABILITY: FixAvailability = FixAvailability::Sometimes;
#[derive_message_formats]
fn message(&self) -> String {
"Explicitly concatenated string should be implicitly concatenated".to_string()
}
fn fix_title(&self) -> Option<String> {
Some("Remove redundant '+' operator to implicitly concatenate".to_string())
}
}
/// ISC003
pub(crate) fn explicit(checker: &Checker, expr: &Expr) {
// If the user sets `allow-multiline` to `false`, then we should allow explicitly concatenated
// strings that span multiple lines even if this rule is enabled. Otherwise, there's no way
// for the user to write multiline strings, and that setting is "more explicit" than this rule
// being enabled.
if !checker
.settings()
.flake8_implicit_str_concat
.allow_multiline
{
return;
}
if let Expr::BinOp(bin_op) = expr {
if let ast::ExprBinOp {
left,
right,
op: Operator::Add,
..
} = bin_op
{
let concatable = matches!(
(left.as_ref(), right.as_ref()),
(
Expr::StringLiteral(_) | Expr::FString(_),
Expr::StringLiteral(_) | Expr::FString(_)
) | (Expr::BytesLiteral(_), Expr::BytesLiteral(_))
| (Expr::TString(_), Expr::TString(_))
);
if concatable
&& checker
.locator()
.contains_line_break(TextRange::new(left.end(), right.start()))
{
let mut diagnostic =
checker.report_diagnostic(ExplicitStringConcatenation, expr.range());
let is_parenthesized = |expr: &Expr| {
parenthesized_range(expr.into(), bin_op.into(), checker.tokens()).is_some()
};
// If either `left` or `right` is parenthesized, generating
// a fix would be too involved. Just report the diagnostic.
// Currently, attempting `generate_fix` would result in
// an invalid code. See: #19757
if is_parenthesized(left) || is_parenthesized(right) {
return;
}
diagnostic.set_fix(generate_fix(checker, bin_op));
}
}
}
}
fn generate_fix(checker: &Checker, expr_bin_op: &ast::ExprBinOp) -> Fix {
let ast::ExprBinOp { left, right, .. } = expr_bin_op;
let between_operands_range = TextRange::new(left.end(), right.start());
let between_operands = checker.locator().slice(between_operands_range);
let (before_plus, after_plus) = between_operands.split_once('+').unwrap();
let linebreak_before_operator =
before_plus.contains_line_break(TextRange::at(TextSize::new(0), before_plus.text_len()));
// If removing `+` from first line trim trailing spaces
// Preserve indentation when removing `+` from second line
let before_plus = if linebreak_before_operator {
before_plus
} else {
before_plus.trim_end_matches(is_python_whitespace)
};
Fix::safe_edit(Edit::range_replacement(
format!("{before_plus}{after_plus}"),
between_operands_range,
))
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/flake8_implicit_str_concat/rules/implicit.rs | crates/ruff_linter/src/rules/flake8_implicit_str_concat/rules/implicit.rs | use std::borrow::Cow;
use itertools::Itertools;
use ruff_macros::{ViolationMetadata, derive_message_formats};
use ruff_python_ast::StringFlags;
use ruff_python_ast::token::{Token, TokenKind, Tokens};
use ruff_python_index::Indexer;
use ruff_source_file::LineRanges;
use ruff_text_size::{Ranged, TextLen, TextRange};
use crate::Locator;
use crate::checkers::ast::LintContext;
use crate::{Edit, Fix, FixAvailability, Violation};
/// ## What it does
/// Checks for implicitly concatenated strings on a single line.
///
/// ## Why is this bad?
/// While it is valid Python syntax to concatenate multiple string or byte
/// literals implicitly (via whitespace delimiters), it is unnecessary and
/// negatively affects code readability.
///
/// In some cases, the implicit concatenation may also be unintentional, as
/// code formatters are capable of introducing single-line implicit
/// concatenations when collapsing long lines.
///
/// ## Example
/// ```python
/// z = "The quick " "brown fox."
/// ```
///
/// Use instead:
/// ```python
/// z = "The quick brown fox."
/// ```
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "v0.0.201")]
pub(crate) struct SingleLineImplicitStringConcatenation;
impl Violation for SingleLineImplicitStringConcatenation {
const FIX_AVAILABILITY: FixAvailability = FixAvailability::Sometimes;
#[derive_message_formats]
fn message(&self) -> String {
"Implicitly concatenated string literals on one line".to_string()
}
fn fix_title(&self) -> Option<String> {
Some("Combine string literals".to_string())
}
}
/// ## What it does
/// Checks for implicitly concatenated strings that span multiple lines.
///
/// ## Why is this bad?
/// For string literals that wrap across multiple lines, [PEP 8] recommends
/// the use of implicit string concatenation within parentheses instead of
/// using a backslash for line continuation, as the former is more readable
/// than the latter.
///
/// By default, this rule will only trigger if the string literal is
/// concatenated via a backslash. To disallow implicit string concatenation
/// altogether, set the [`lint.flake8-implicit-str-concat.allow-multiline`] option
/// to `false`.
///
/// ## Example
/// ```python
/// z = "The quick brown fox jumps over the lazy "\
/// "dog."
/// ```
///
/// Use instead:
/// ```python
/// z = (
/// "The quick brown fox jumps over the lazy "
/// "dog."
/// )
/// ```
///
/// ## Options
/// - `lint.flake8-implicit-str-concat.allow-multiline`
///
/// ## Formatter compatibility
/// Using this rule with `allow-multiline = false` can be incompatible with the
/// formatter because the [formatter] can introduce new multi-line implicitly
/// concatenated strings. We recommend to either:
///
/// * Enable `ISC001` to disallow all implicit concatenated strings
/// * Setting `allow-multiline = true`
///
/// [PEP 8]: https://peps.python.org/pep-0008/#maximum-line-length
/// [formatter]:https://docs.astral.sh/ruff/formatter/
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "v0.0.201")]
pub(crate) struct MultiLineImplicitStringConcatenation;
impl Violation for MultiLineImplicitStringConcatenation {
#[derive_message_formats]
fn message(&self) -> String {
"Implicitly concatenated string literals over multiple lines".to_string()
}
}
/// ISC001, ISC002
pub(crate) fn implicit(
context: &LintContext,
tokens: &Tokens,
locator: &Locator,
indexer: &Indexer,
) {
for (a_token, b_token) in tokens
.iter()
.filter(|token| {
token.kind() != TokenKind::Comment
&& (context
.settings()
.flake8_implicit_str_concat
.allow_multiline
|| token.kind() != TokenKind::NonLogicalNewline)
})
.tuple_windows()
{
let (a_range, b_range) = match (a_token.kind(), b_token.kind()) {
(TokenKind::String, TokenKind::String) => (a_token.range(), b_token.range()),
(TokenKind::String, TokenKind::FStringStart) => {
match indexer
.interpolated_string_ranges()
.innermost(b_token.start())
{
Some(b_range) => (a_token.range(), b_range),
None => continue,
}
}
(TokenKind::FStringEnd, TokenKind::String) => {
match indexer
.interpolated_string_ranges()
.innermost(a_token.start())
{
Some(a_range) => (a_range, b_token.range()),
None => continue,
}
}
(TokenKind::FStringEnd, TokenKind::FStringStart)
| (TokenKind::TStringEnd, TokenKind::TStringStart) => {
match (
indexer
.interpolated_string_ranges()
.innermost(a_token.start()),
indexer
.interpolated_string_ranges()
.innermost(b_token.start()),
) {
(Some(a_range), Some(b_range)) => (a_range, b_range),
_ => continue,
}
}
_ => continue,
};
if locator.contains_line_break(TextRange::new(a_range.end(), b_range.start())) {
context.report_diagnostic_if_enabled(
MultiLineImplicitStringConcatenation,
TextRange::new(a_range.start(), b_range.end()),
);
} else {
if let Some(mut diagnostic) = context.report_diagnostic_if_enabled(
SingleLineImplicitStringConcatenation,
TextRange::new(a_range.start(), b_range.end()),
) {
if let Some(fix) = concatenate_strings(a_token, b_token, a_range, b_range, locator)
{
diagnostic.set_fix(fix);
}
}
}
}
}
/// Concatenates two strings
///
/// The `a_string_range` and `b_string_range` are the range of the entire string,
/// not just of the string token itself (important for interpolated strings where
/// the start token doesn't span the entire token).
fn concatenate_strings(
a_token: &Token,
b_token: &Token,
a_string_range: TextRange,
b_string_range: TextRange,
locator: &Locator,
) -> Option<Fix> {
if a_token.string_flags()?.is_unclosed() || b_token.string_flags()?.is_unclosed() {
return None;
}
let a_string_flags = a_token.string_flags()?;
let b_string_flags = b_token.string_flags()?;
let a_prefix = a_string_flags.prefix();
let b_prefix = b_string_flags.prefix();
// Require, for now, that the strings have the same prefix,
// quote style, and number of quotes
if a_prefix != b_prefix
|| a_string_flags.quote_style() != b_string_flags.quote_style()
|| a_string_flags.is_triple_quoted() != b_string_flags.is_triple_quoted()
{
return None;
}
let a_text = locator.slice(a_string_range);
let b_text = locator.slice(b_string_range);
let quotes = a_string_flags.quote_str();
let opener_len = a_string_flags.opener_len();
let closer_len = a_string_flags.closer_len();
let mut a_body =
Cow::Borrowed(&a_text[TextRange::new(opener_len, a_text.text_len() - closer_len)]);
let b_body = &b_text[TextRange::new(opener_len, b_text.text_len() - closer_len)];
if !a_string_flags.is_raw_string() && matches!(b_body.bytes().next(), Some(b'0'..=b'7')) {
normalize_ending_octal(&mut a_body);
}
let concatenation = format!("{a_prefix}{quotes}{a_body}{b_body}{quotes}");
let range = TextRange::new(a_string_range.start(), b_string_range.end());
Some(Fix::safe_edit(Edit::range_replacement(
concatenation,
range,
)))
}
/// Pads an octal at the end of the string
/// to three digits, if necessary.
fn normalize_ending_octal(text: &mut Cow<'_, str>) {
// Early return for short strings
if text.len() < 2 {
return;
}
let mut rev_bytes = text.bytes().rev();
if let Some(last_byte @ b'0'..=b'7') = rev_bytes.next() {
// "\y" -> "\00y"
if has_odd_consecutive_backslashes(&mut rev_bytes.clone()) {
let prefix = &text[..text.len() - 2];
*text = Cow::Owned(format!("{prefix}\\00{}", last_byte as char));
}
// "\xy" -> "\0xy"
else if let Some(penultimate_byte @ b'0'..=b'7') = rev_bytes.next() {
if has_odd_consecutive_backslashes(&mut rev_bytes.clone()) {
let prefix = &text[..text.len() - 3];
*text = Cow::Owned(format!(
"{prefix}\\0{}{}",
penultimate_byte as char, last_byte as char
));
}
}
}
}
fn has_odd_consecutive_backslashes(mut itr: impl Iterator<Item = u8>) -> bool {
let mut odd_backslashes = false;
while let Some(b'\\') = itr.next() {
odd_backslashes = !odd_backslashes;
}
odd_backslashes
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/flake8_implicit_str_concat/rules/mod.rs | crates/ruff_linter/src/rules/flake8_implicit_str_concat/rules/mod.rs | pub(crate) use collection_literal::*;
pub(crate) use explicit::*;
pub(crate) use implicit::*;
mod collection_literal;
mod explicit;
mod implicit;
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/flake8_return/branch.rs | crates/ruff_linter/src/rules/flake8_return/branch.rs | use std::fmt;
#[derive(Debug, PartialEq, Eq, Copy, Clone)]
pub(crate) enum Branch {
Elif,
Else,
}
impl fmt::Display for Branch {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
match self {
Branch::Elif => fmt.write_str("elif"),
Branch::Else => fmt.write_str("else"),
}
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/flake8_return/visitor.rs | crates/ruff_linter/src/rules/flake8_return/visitor.rs | use ruff_python_ast::{self as ast, ElifElseClause, Expr, Identifier, Stmt};
use rustc_hash::FxHashSet;
use ruff_python_ast::visitor;
use ruff_python_ast::visitor::Visitor;
use ruff_python_semantic::SemanticModel;
#[derive(Default)]
pub(super) struct Stack<'data> {
/// The `return` statements in the current function.
pub(super) returns: Vec<&'data ast::StmtReturn>,
/// The `elif` or `else` statements in the current function.
pub(super) elifs_elses: Vec<(&'data [Stmt], &'data ElifElseClause)>,
/// The non-local variables in the current function.
pub(super) non_locals: FxHashSet<&'data str>,
/// The annotated variables in the current function.
///
/// For example, consider:
/// ```python
/// x: int
///
/// if True:
/// x = foo()
/// return x
/// ```
///
/// In this case, the annotation on `x` is used to cast the return value
/// of `foo()` to an `int`. Removing the `x = foo()` statement would
/// change the return type of the function.
pub(super) annotations: FxHashSet<&'data str>,
/// Whether the current function is a generator.
pub(super) is_generator: bool,
/// The `assignment`-to-`return` statement pairs in the current function.
/// TODO(charlie): Remove the extra [`Stmt`] here, which is necessary to support statement
/// removal for the `return` statement.
pub(super) assignment_return:
Vec<(&'data ast::StmtAssign, &'data ast::StmtReturn, &'data Stmt)>,
}
pub(super) struct ReturnVisitor<'semantic, 'data> {
/// The semantic model of the current file.
semantic: &'semantic SemanticModel<'data>,
/// The current stack of nodes.
pub(super) stack: Stack<'data>,
/// The preceding sibling of the current node.
sibling: Option<&'data Stmt>,
/// The parent nodes of the current node.
parents: Vec<&'data Stmt>,
}
impl<'semantic, 'data> ReturnVisitor<'semantic, 'data> {
pub(super) fn new(semantic: &'semantic SemanticModel<'data>) -> Self {
Self {
semantic,
stack: Stack::default(),
sibling: None,
parents: Vec::new(),
}
}
}
impl<'a> Visitor<'a> for ReturnVisitor<'_, 'a> {
fn visit_stmt(&mut self, stmt: &'a Stmt) {
match stmt {
Stmt::ClassDef(ast::StmtClassDef { decorator_list, .. }) => {
// Visit the decorators, etc.
self.sibling = Some(stmt);
self.parents.push(stmt);
for decorator in decorator_list {
visitor::walk_decorator(self, decorator);
}
self.parents.pop();
// But don't recurse into the body.
return;
}
Stmt::FunctionDef(ast::StmtFunctionDef {
parameters,
decorator_list,
returns,
..
}) => {
// Visit the decorators, etc.
self.sibling = Some(stmt);
self.parents.push(stmt);
for decorator in decorator_list {
visitor::walk_decorator(self, decorator);
}
if let Some(returns) = returns {
visitor::walk_expr(self, returns);
}
visitor::walk_parameters(self, parameters);
self.parents.pop();
// But don't recurse into the body.
return;
}
Stmt::Global(ast::StmtGlobal {
names,
range: _,
node_index: _,
})
| Stmt::Nonlocal(ast::StmtNonlocal {
names,
range: _,
node_index: _,
}) => {
self.stack
.non_locals
.extend(names.iter().map(Identifier::as_str));
}
Stmt::AnnAssign(ast::StmtAnnAssign { target, value, .. }) => {
// Ex) `x: int`
if value.is_none() {
if let Expr::Name(name) = target.as_ref() {
self.stack.annotations.insert(name.id.as_str());
}
}
}
Stmt::Return(stmt_return) => {
// If the `return` statement is preceded by an `assignment` statement, then the
// `assignment` statement may be redundant.
if let Some(sibling) = self.sibling {
match sibling {
// Example:
// ```python
// def foo():
// x = 1
// return x
// ```
Stmt::Assign(stmt_assign) => {
self.stack
.assignment_return
.push((stmt_assign, stmt_return, stmt));
}
// Example:
// ```python
// def foo():
// with open("foo.txt", "r") as f:
// x = f.read()
// return x
// ```
Stmt::With(with) => {
if let Some(stmt_assign) =
with.body.last().and_then(Stmt::as_assign_stmt)
{
if !has_conditional_body(with, self.semantic) {
self.stack.assignment_return.push((
stmt_assign,
stmt_return,
stmt,
));
}
}
}
_ => {}
}
}
self.stack.returns.push(stmt_return);
}
Stmt::If(ast::StmtIf {
body,
elif_else_clauses,
..
}) => {
if let Some(first) = elif_else_clauses.first() {
self.stack.elifs_elses.push((body, first));
}
}
_ => {}
}
self.sibling = Some(stmt);
self.parents.push(stmt);
visitor::walk_stmt(self, stmt);
self.parents.pop();
}
fn visit_expr(&mut self, expr: &'a Expr) {
match expr {
Expr::YieldFrom(_) | Expr::Yield(_) => {
self.stack.is_generator = true;
}
_ => visitor::walk_expr(self, expr),
}
}
fn visit_body(&mut self, body: &'a [Stmt]) {
let sibling = self.sibling;
self.sibling = None;
visitor::walk_body(self, body);
self.sibling = sibling;
}
}
/// Returns `true` if the [`With`] statement is known to have a conditional body. In other words:
/// if the [`With`] statement's body may or may not run.
///
/// For example, in the following, it's unsafe to inline the `return` into the `with`, since if
/// `data.decode()` fails, the behavior of the program will differ. (As-is, the function will return
/// the input `data`; if we inline the `return`, the function will return `None`.)
///
/// ```python
/// def func(data):
/// with suppress(JSONDecoderError):
/// data = data.decode()
/// return data
/// ```
fn has_conditional_body(with: &ast::StmtWith, semantic: &SemanticModel) -> bool {
with.items.iter().any(|item| {
let ast::WithItem {
context_expr: Expr::Call(ast::ExprCall { func, .. }),
..
} = item
else {
return false;
};
if let Some(qualified_name) = semantic.resolve_qualified_name(func) {
if qualified_name.segments() == ["contextlib", "suppress"] {
return true;
}
}
false
})
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/flake8_return/helpers.rs | crates/ruff_linter/src/rules/flake8_return/helpers.rs | use ruff_python_ast as ast;
use ruff_python_ast::Stmt;
use ruff_source_file::UniversalNewlines;
use ruff_text_size::{Ranged, TextSize};
use crate::Locator;
/// Return `true` if a function's return statement include at least one
/// non-`None` value.
pub(super) fn result_exists(returns: &[&ast::StmtReturn]) -> bool {
returns.iter().any(|stmt| {
stmt.value
.as_deref()
.is_some_and(|value| !value.is_none_literal_expr())
})
}
/// Given a statement, find its "logical end".
///
/// For example: the statement could be following by a trailing semicolon, by an end-of-line
/// comment, or by any number of continuation lines (and then by a comment, and so on).
///
/// This method assumes that the statement is the last statement in its body; specifically, that
/// the statement isn't followed by a semicolon, followed by a multi-line statement.
pub(super) fn end_of_last_statement(stmt: &Stmt, locator: &Locator) -> TextSize {
// Find the end of the last line that's "part of" the statement.
for line in locator.after(stmt.end()).universal_newlines() {
if !line.ends_with('\\') {
return stmt.end() + line.end();
}
}
locator.text_len()
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/flake8_return/mod.rs | crates/ruff_linter/src/rules/flake8_return/mod.rs | //! Rules from [flake8-return](https://pypi.org/project/flake8-return/).
mod branch;
mod helpers;
pub(crate) mod rules;
mod visitor;
#[cfg(test)]
mod tests {
use std::path::Path;
use anyhow::Result;
use test_case::test_case;
use crate::assert_diagnostics;
use crate::registry::Rule;
use crate::settings::LinterSettings;
use crate::test::test_path;
#[test_case(Rule::UnnecessaryReturnNone, Path::new("RET501.py"))]
#[test_case(Rule::ImplicitReturnValue, Path::new("RET502.py"))]
#[test_case(Rule::ImplicitReturn, Path::new("RET503.py"))]
#[test_case(Rule::UnnecessaryAssign, Path::new("RET504.py"))]
#[test_case(Rule::SuperfluousElseReturn, Path::new("RET505.py"))]
#[test_case(Rule::SuperfluousElseRaise, Path::new("RET506.py"))]
#[test_case(Rule::SuperfluousElseContinue, Path::new("RET507.py"))]
#[test_case(Rule::SuperfluousElseBreak, Path::new("RET508.py"))]
fn rules(rule_code: Rule, path: &Path) -> Result<()> {
let snapshot = format!("{}_{}", rule_code.noqa_code(), path.to_string_lossy());
let diagnostics = test_path(
Path::new("flake8_return").join(path).as_path(),
&LinterSettings::for_rule(rule_code),
)?;
assert_diagnostics!(snapshot, diagnostics);
Ok(())
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/flake8_return/rules/function.rs | crates/ruff_linter/src/rules/flake8_return/rules/function.rs | use anyhow::Result;
use ruff_diagnostics::Applicability;
use ruff_macros::{ViolationMetadata, derive_message_formats};
use ruff_python_ast::helpers::{is_const_false, is_const_true};
use ruff_python_ast::stmt_if::elif_else_range;
use ruff_python_ast::token::TokenKind;
use ruff_python_ast::visitor::Visitor;
use ruff_python_ast::whitespace::indentation;
use ruff_python_ast::{self as ast, Decorator, ElifElseClause, Expr, Stmt};
use ruff_python_semantic::SemanticModel;
use ruff_python_semantic::analyze::visibility::is_property;
use ruff_python_trivia::{SimpleTokenKind, SimpleTokenizer, is_python_whitespace};
use ruff_source_file::LineRanges;
use ruff_text_size::{Ranged, TextRange, TextSize};
use crate::checkers::ast::Checker;
use crate::fix::edits;
use crate::fix::edits::adjust_indentation;
use crate::registry::Rule;
use crate::rules::flake8_return::helpers::end_of_last_statement;
use crate::{AlwaysFixableViolation, FixAvailability, Violation};
use crate::{Edit, Fix};
use crate::rules::flake8_return::branch::Branch;
use crate::rules::flake8_return::helpers::result_exists;
use crate::rules::flake8_return::visitor::{ReturnVisitor, Stack};
/// ## What it does
/// Checks for the presence of a `return None` statement when `None` is the only
/// possible return value.
///
/// ## Why is this bad?
/// Python implicitly assumes `return None` if an explicit `return` value is
/// omitted. Therefore, explicitly returning `None` is redundant and should be
/// avoided when it is the only possible `return` value across all code paths
/// in a given function.
///
/// ## Example
/// ```python
/// def foo(bar):
/// if not bar:
/// return
/// return None
/// ```
///
/// Use instead:
/// ```python
/// def foo(bar):
/// if not bar:
/// return
/// return
/// ```
///
/// ## Fix safety
/// This rule's fix is marked as unsafe for cases in which comments would be
/// dropped from the `return` statement.
///
/// ## Options
///
/// This rule ignores functions marked as properties.
///
/// - `lint.pydocstyle.property-decorators`
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "v0.0.154")]
pub(crate) struct UnnecessaryReturnNone;
impl AlwaysFixableViolation for UnnecessaryReturnNone {
#[derive_message_formats]
fn message(&self) -> String {
"Do not explicitly `return None` in function if it is the only possible return value"
.to_string()
}
fn fix_title(&self) -> String {
"Remove explicit `return None`".to_string()
}
}
/// ## What it does
/// Checks for the presence of a `return` statement with no explicit value,
/// for functions that return non-`None` values elsewhere.
///
/// ## Why is this bad?
/// Including a `return` statement with no explicit value can cause confusion
/// when other `return` statements in the function return non-`None` values.
/// Python implicitly assumes return `None` if no other return value is present.
/// Adding an explicit `return None` can make the code more readable by clarifying
/// intent.
///
/// ## Example
/// ```python
/// def foo(bar):
/// if not bar:
/// return
/// return 1
/// ```
///
/// Use instead:
/// ```python
/// def foo(bar):
/// if not bar:
/// return None
/// return 1
/// ```
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "v0.0.154")]
pub(crate) struct ImplicitReturnValue;
impl AlwaysFixableViolation for ImplicitReturnValue {
#[derive_message_formats]
fn message(&self) -> String {
"Do not implicitly `return None` in function able to return non-`None` value".to_string()
}
fn fix_title(&self) -> String {
"Add explicit `None` return value".to_string()
}
}
/// ## What it does
/// Checks for missing explicit `return` statements at the end of functions
/// that can return non-`None` values.
///
/// ## Why is this bad?
/// The lack of an explicit `return` statement at the end of a function that
/// can return non-`None` values can cause confusion. Python implicitly returns
/// `None` if no other return value is present. Adding an explicit
/// `return None` can make the code more readable by clarifying intent.
///
/// ## Example
/// ```python
/// def foo(bar):
/// if not bar:
/// return 1
/// ```
///
/// Use instead:
/// ```python
/// def foo(bar):
/// if not bar:
/// return 1
/// return None
/// ```
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "v0.0.154")]
pub(crate) struct ImplicitReturn;
impl AlwaysFixableViolation for ImplicitReturn {
#[derive_message_formats]
fn message(&self) -> String {
"Missing explicit `return` at the end of function able to return non-`None` value"
.to_string()
}
fn fix_title(&self) -> String {
"Add explicit `return` statement".to_string()
}
}
/// ## What it does
/// Checks for variable assignments that immediately precede a `return` of the
/// assigned variable.
///
/// ## Why is this bad?
/// The variable assignment is not necessary, as the value can be returned
/// directly.
///
/// ## Example
/// ```python
/// def foo():
/// bar = 1
/// return bar
/// ```
///
/// Use instead:
/// ```python
/// def foo():
/// return 1
/// ```
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "v0.0.154")]
pub(crate) struct UnnecessaryAssign {
name: String,
}
impl AlwaysFixableViolation for UnnecessaryAssign {
#[derive_message_formats]
fn message(&self) -> String {
let UnnecessaryAssign { name } = self;
format!("Unnecessary assignment to `{name}` before `return` statement")
}
fn fix_title(&self) -> String {
"Remove unnecessary assignment".to_string()
}
}
/// ## What it does
/// Checks for `else` statements with a `return` statement in the preceding
/// `if` block.
///
/// ## Why is this bad?
/// The `else` statement is not needed as the `return` statement will always
/// break out of the enclosing function. Removing the `else` will reduce
/// nesting and make the code more readable.
///
/// ## Example
/// ```python
/// def foo(bar, baz):
/// if bar:
/// return 1
/// else:
/// return baz
/// ```
///
/// Use instead:
/// ```python
/// def foo(bar, baz):
/// if bar:
/// return 1
/// return baz
/// ```
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "v0.0.154")]
pub(crate) struct SuperfluousElseReturn {
branch: Branch,
}
impl Violation for SuperfluousElseReturn {
const FIX_AVAILABILITY: FixAvailability = FixAvailability::Sometimes;
#[derive_message_formats]
fn message(&self) -> String {
let SuperfluousElseReturn { branch } = self;
format!("Unnecessary `{branch}` after `return` statement")
}
fn fix_title(&self) -> Option<String> {
let SuperfluousElseReturn { branch } = self;
Some(format!("Remove unnecessary `{branch}`"))
}
}
/// ## What it does
/// Checks for `else` statements with a `raise` statement in the preceding `if`
/// block.
///
/// ## Why is this bad?
/// The `else` statement is not needed as the `raise` statement will always
/// break out of the current scope. Removing the `else` will reduce nesting
/// and make the code more readable.
///
/// ## Example
/// ```python
/// def foo(bar, baz):
/// if bar == "Specific Error":
/// raise Exception(bar)
/// else:
/// raise Exception(baz)
/// ```
///
/// Use instead:
/// ```python
/// def foo(bar, baz):
/// if bar == "Specific Error":
/// raise Exception(bar)
/// raise Exception(baz)
/// ```
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "v0.0.154")]
pub(crate) struct SuperfluousElseRaise {
branch: Branch,
}
impl Violation for SuperfluousElseRaise {
const FIX_AVAILABILITY: FixAvailability = FixAvailability::Sometimes;
#[derive_message_formats]
fn message(&self) -> String {
let SuperfluousElseRaise { branch } = self;
format!("Unnecessary `{branch}` after `raise` statement")
}
fn fix_title(&self) -> Option<String> {
let SuperfluousElseRaise { branch } = self;
Some(format!("Remove unnecessary `{branch}`"))
}
}
/// ## What it does
/// Checks for `else` statements with a `continue` statement in the preceding
/// `if` block.
///
/// ## Why is this bad?
/// The `else` statement is not needed, as the `continue` statement will always
/// continue onto the next iteration of a loop. Removing the `else` will reduce
/// nesting and make the code more readable.
///
/// ## Example
/// ```python
/// def foo(bar, baz):
/// for i in bar:
/// if i < baz:
/// continue
/// else:
/// x = 0
/// ```
///
/// Use instead:
/// ```python
/// def foo(bar, baz):
/// for i in bar:
/// if i < baz:
/// continue
/// x = 0
/// ```
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "v0.0.154")]
pub(crate) struct SuperfluousElseContinue {
branch: Branch,
}
impl Violation for SuperfluousElseContinue {
const FIX_AVAILABILITY: FixAvailability = FixAvailability::Sometimes;
#[derive_message_formats]
fn message(&self) -> String {
let SuperfluousElseContinue { branch } = self;
format!("Unnecessary `{branch}` after `continue` statement")
}
fn fix_title(&self) -> Option<String> {
let SuperfluousElseContinue { branch } = self;
Some(format!("Remove unnecessary `{branch}`"))
}
}
/// ## What it does
/// Checks for `else` statements with a `break` statement in the preceding `if`
/// block.
///
/// ## Why is this bad?
/// The `else` statement is not needed, as the `break` statement will always
/// break out of the loop. Removing the `else` will reduce nesting and make the
/// code more readable.
///
/// ## Example
/// ```python
/// def foo(bar, baz):
/// for i in bar:
/// if i > baz:
/// break
/// else:
/// x = 0
/// ```
///
/// Use instead:
/// ```python
/// def foo(bar, baz):
/// for i in bar:
/// if i > baz:
/// break
/// x = 0
/// ```
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "v0.0.154")]
pub(crate) struct SuperfluousElseBreak {
branch: Branch,
}
impl Violation for SuperfluousElseBreak {
const FIX_AVAILABILITY: FixAvailability = FixAvailability::Sometimes;
#[derive_message_formats]
fn message(&self) -> String {
let SuperfluousElseBreak { branch } = self;
format!("Unnecessary `{branch}` after `break` statement")
}
fn fix_title(&self) -> Option<String> {
let SuperfluousElseBreak { branch } = self;
Some(format!("Remove unnecessary `{branch}`"))
}
}
/// RET501
fn unnecessary_return_none(checker: &Checker, decorator_list: &[Decorator], stack: &Stack) {
for stmt in &stack.returns {
let Some(expr) = stmt.value.as_deref() else {
continue;
};
if !expr.is_none_literal_expr() {
continue;
}
// Skip property functions
if is_property(
decorator_list,
checker.settings().pydocstyle.property_decorators(),
checker.semantic(),
) {
return;
}
let mut diagnostic = checker.report_diagnostic(UnnecessaryReturnNone, stmt.range());
let edit = Edit::range_replacement("return".to_string(), stmt.range());
diagnostic.set_fix(Fix::applicable_edit(
edit,
if checker.comment_ranges().intersects(stmt.range()) {
Applicability::Unsafe
} else {
Applicability::Safe
},
));
}
}
/// RET502
fn implicit_return_value(checker: &Checker, stack: &Stack) {
for stmt in &stack.returns {
if stmt.value.is_some() {
continue;
}
let mut diagnostic = checker.report_diagnostic(ImplicitReturnValue, stmt.range());
diagnostic.set_fix(Fix::safe_edit(Edit::range_replacement(
"return None".to_string(),
stmt.range(),
)));
}
}
/// Return `true` if the `func` appears to be non-returning.
fn is_noreturn_func(func: &Expr, semantic: &SemanticModel) -> bool {
// First, look for known functions that never return from the standard library and popular
// libraries.
if semantic
.resolve_qualified_name(func)
.is_some_and(|qualified_name| {
matches!(
qualified_name.segments(),
["" | "builtins" | "sys" | "_thread" | "pytest", "exit"]
| ["" | "builtins", "quit"]
| ["os" | "posix", "_exit" | "abort"]
| ["_winapi", "ExitProcess"]
| ["pytest", "fail" | "skip" | "xfail"]
) || semantic.match_typing_qualified_name(&qualified_name, "assert_never")
})
{
return true;
}
// Second, look for `NoReturn` annotations on the return type.
let Some(func_binding) = semantic.lookup_attribute(func) else {
return false;
};
let Some(node_id) = semantic.binding(func_binding).source else {
return false;
};
let Stmt::FunctionDef(ast::StmtFunctionDef { returns, .. }) = semantic.statement(node_id)
else {
return false;
};
let Some(returns) = returns.as_ref() else {
return false;
};
let Some(qualified_name) = semantic.resolve_qualified_name(returns) else {
return false;
};
semantic.match_typing_qualified_name(&qualified_name, "NoReturn")
|| semantic.match_typing_qualified_name(&qualified_name, "Never")
}
fn add_return_none(checker: &Checker, stmt: &Stmt, range: TextRange) {
let mut diagnostic = checker.report_diagnostic(ImplicitReturn, range);
if let Some(indent) = indentation(checker.source(), stmt) {
let mut content = String::new();
content.push_str(checker.stylist().line_ending().as_str());
content.push_str(indent);
content.push_str("return None");
diagnostic.set_fix(Fix::unsafe_edit(Edit::insertion(
content,
end_of_last_statement(stmt, checker.locator()),
)));
}
}
fn has_implicit_return(checker: &Checker, stmt: &Stmt) -> bool {
match stmt {
Stmt::If(ast::StmtIf {
body,
elif_else_clauses,
..
}) => {
if body
.last()
.is_some_and(|last| has_implicit_return(checker, last))
{
return true;
}
if elif_else_clauses.iter().any(|clause| {
clause
.body
.last()
.is_some_and(|last| has_implicit_return(checker, last))
}) {
return true;
}
// Check if we don't have an else clause
matches!(
elif_else_clauses.last(),
None | Some(ast::ElifElseClause { test: Some(_), .. })
)
}
Stmt::Assert(ast::StmtAssert { test, .. }) if is_const_false(test) => false,
Stmt::While(ast::StmtWhile { test, .. }) if is_const_true(test) => false,
Stmt::For(ast::StmtFor { orelse, .. }) | Stmt::While(ast::StmtWhile { orelse, .. }) => {
if let Some(last_stmt) = orelse.last() {
has_implicit_return(checker, last_stmt)
} else {
true
}
}
Stmt::Match(ast::StmtMatch { cases, .. }) => cases.iter().any(|case| {
case.body
.last()
.is_some_and(|last| has_implicit_return(checker, last))
}),
Stmt::With(ast::StmtWith { body, .. }) => body
.last()
.is_some_and(|last_stmt| has_implicit_return(checker, last_stmt)),
Stmt::Return(_) | Stmt::Raise(_) | Stmt::Try(_) => false,
Stmt::Expr(ast::StmtExpr { value, .. })
if matches!(
value.as_ref(),
Expr::Call(ast::ExprCall { func, .. })
if is_noreturn_func(func, checker.semantic())
) =>
{
false
}
_ => true,
}
}
/// RET503
fn implicit_return(checker: &Checker, function_def: &ast::StmtFunctionDef, stmt: &Stmt) {
if has_implicit_return(checker, stmt) {
add_return_none(checker, stmt, function_def.range());
}
}
/// RET504
pub(crate) fn unnecessary_assign(checker: &Checker, function_stmt: &Stmt) {
let Stmt::FunctionDef(function_def) = function_stmt else {
return;
};
let Some(stack) = create_stack(checker, function_def) else {
return;
};
if !result_exists(&stack.returns) {
return;
}
let Some(function_scope) = checker.semantic().function_scope(function_def) else {
return;
};
for (assign, return_, stmt) in &stack.assignment_return {
// Identify, e.g., `return x`.
let Some(value) = return_.value.as_ref() else {
continue;
};
let Expr::Name(ast::ExprName {
id: returned_id, ..
}) = value.as_ref()
else {
continue;
};
// Identify, e.g., `x = 1`.
if assign.targets.len() > 1 {
continue;
}
let Some(target) = assign.targets.first() else {
continue;
};
let Expr::Name(ast::ExprName {
id: assigned_id, ..
}) = target
else {
continue;
};
if returned_id != assigned_id {
continue;
}
// Ignore variables that have an annotation defined elsewhere.
if stack.annotations.contains(assigned_id.as_str()) {
continue;
}
// Ignore `nonlocal` and `global` variables.
if stack.non_locals.contains(assigned_id.as_str()) {
continue;
}
let Some(assigned_binding) = function_scope
.get(assigned_id)
.map(|binding_id| checker.semantic().binding(binding_id))
else {
continue;
};
// Check if there's any reference made to `assigned_binding` in another scope, e.g, nested
// functions. If there is, ignore them.
if assigned_binding
.references()
.map(|reference_id| checker.semantic().reference(reference_id))
.any(|reference| reference.scope_id() != assigned_binding.scope)
{
continue;
}
let mut diagnostic = checker.report_diagnostic(
UnnecessaryAssign {
name: assigned_id.to_string(),
},
value.range(),
);
diagnostic.try_set_fix(|| {
// Delete the `return` statement. There's no need to treat this as an isolated
// edit, since we're editing the preceding statement, so no conflicting edit would
// be allowed to remove that preceding statement.
let delete_return =
edits::delete_stmt(stmt, None, checker.locator(), checker.indexer());
let eq_token = checker
.tokens()
.before(assign.value.start())
.iter()
.rfind(|token| token.kind() == TokenKind::Equal)
.unwrap();
let content = checker.source();
// Replace the `x = 1` statement with `return 1`.
let replace_assign = Edit::range_replacement(
if content[eq_token.end().to_usize()..]
.chars()
.next()
.is_some_and(is_python_whitespace)
{
"return".to_string()
} else {
"return ".to_string()
},
// Replace from the start of the assignment statement to the end of the equals
// sign.
TextRange::new(assign.start(), eq_token.range().end()),
);
Ok(Fix::unsafe_edits(replace_assign, [delete_return]))
});
}
}
/// RET505, RET506, RET507, RET508
fn superfluous_else_node(
checker: &Checker,
if_elif_body: &[Stmt],
elif_else: &ElifElseClause,
) -> bool {
let branch = if elif_else.test.is_some() {
Branch::Elif
} else {
Branch::Else
};
let range = elif_else_range(elif_else, checker.locator().contents())
.unwrap_or_else(|| elif_else.range());
for child in if_elif_body {
let diagnostic = if child.is_return_stmt() {
checker.report_diagnostic_if_enabled(SuperfluousElseReturn { branch }, range)
} else if child.is_break_stmt() {
checker.report_diagnostic_if_enabled(SuperfluousElseBreak { branch }, range)
} else if child.is_raise_stmt() {
checker.report_diagnostic_if_enabled(SuperfluousElseRaise { branch }, range)
} else if child.is_continue_stmt() {
checker.report_diagnostic_if_enabled(SuperfluousElseContinue { branch }, range)
} else {
continue;
};
if let Some(mut d) = diagnostic {
d.try_set_fix(|| remove_else(checker, elif_else));
}
return true;
}
false
}
/// RET505, RET506, RET507, RET508
fn superfluous_elif_else(checker: &Checker, stack: &Stack) {
for (if_elif_body, elif_else) in &stack.elifs_elses {
superfluous_else_node(checker, if_elif_body, elif_else);
}
}
fn create_stack<'a>(
checker: &'a Checker,
function_def: &'a ast::StmtFunctionDef,
) -> Option<Stack<'a>> {
let ast::StmtFunctionDef { body, .. } = function_def;
// Find the last statement in the function.
let Some(last_stmt) = body.last() else {
// Skip empty functions.
return None;
};
// Skip functions that consist of a single return statement.
if body.len() == 1 && matches!(last_stmt, Stmt::Return(_)) {
return None;
}
// Traverse the function body, to collect the stack.
let stack = {
let mut visitor = ReturnVisitor::new(checker.semantic());
for stmt in body {
visitor.visit_stmt(stmt);
}
visitor.stack
};
// Avoid false positives for generators.
if stack.is_generator {
return None;
}
Some(stack)
}
/// Run all checks from the `flake8-return` plugin, but `RET504` which is ran
/// after the semantic model is fully built.
pub(crate) fn function(checker: &Checker, function_def: &ast::StmtFunctionDef) {
let ast::StmtFunctionDef {
decorator_list,
returns,
body,
..
} = function_def;
let Some(stack) = create_stack(checker, function_def) else {
return;
};
let Some(last_stmt) = body.last() else {
return;
};
if checker.any_rule_enabled(&[
Rule::SuperfluousElseReturn,
Rule::SuperfluousElseRaise,
Rule::SuperfluousElseContinue,
Rule::SuperfluousElseBreak,
]) {
superfluous_elif_else(checker, &stack);
}
// Skip any functions without return statements.
if stack.returns.is_empty() {
return;
}
// If we have at least one non-`None` return...
if result_exists(&stack.returns) {
if checker.is_rule_enabled(Rule::ImplicitReturnValue) {
implicit_return_value(checker, &stack);
}
if checker.is_rule_enabled(Rule::ImplicitReturn) {
implicit_return(checker, function_def, last_stmt);
}
} else {
if checker.is_rule_enabled(Rule::UnnecessaryReturnNone) {
// Skip functions that have a return annotation that is not `None`.
if returns.as_deref().is_none_or(Expr::is_none_literal_expr) {
unnecessary_return_none(checker, decorator_list, &stack);
}
}
}
}
/// Generate a [`Fix`] to remove an `else` or `elif` clause.
fn remove_else(checker: &Checker, elif_else: &ElifElseClause) -> Result<Fix> {
let locator = checker.locator();
let indexer = checker.indexer();
let stylist = checker.stylist();
if elif_else.test.is_some() {
// Ex) `elif` -> `if`
Ok(Fix::safe_edit(Edit::deletion(
elif_else.start(),
elif_else.start() + TextSize::from(2),
)))
} else {
// the start of the line where the `else`` is
let else_line_start = locator.line_start(elif_else.start());
// making a tokenizer to find the Colon for the `else`, not always on the same line!
let mut else_line_tokenizer =
SimpleTokenizer::starts_at(elif_else.start(), locator.contents());
// find the Colon for the `else`
let Some(else_colon) =
else_line_tokenizer.find(|token| token.kind == SimpleTokenKind::Colon)
else {
return Err(anyhow::anyhow!("Cannot find `:` in `else` statement"));
};
// get the indentation of the `else`, since that is the indent level we want to end with
let Some(desired_indentation) = indentation(locator.contents(), elif_else) else {
return Err(anyhow::anyhow!("Compound statement cannot be inlined"));
};
// If the statement is on the same line as the `else`, just remove the `else: `.
// Ex) `else: return True` -> `return True`
if let Some(first) = elif_else.body.first() {
if indexer.preceded_by_multi_statement_line(first, locator.contents()) {
return Ok(Fix::safe_edit(Edit::deletion(
elif_else.start(),
first.start(),
)));
}
}
// we're deleting the `else`, and it's Colon, and the rest of the line(s) they're on,
// so here we get the last position of the line the Colon is on
let else_colon_end = locator.full_line_end(else_colon.end());
// if there is a comment on the same line as the Colon, let's keep it
// and give it the proper indentation once we unindent it
let else_comment_after_colon = else_line_tokenizer
.find(|token| token.kind.is_comment())
.and_then(|token| {
if token.kind == SimpleTokenKind::Comment && token.start() < else_colon_end {
return Some(format!(
"{desired_indentation}{}{}",
locator.slice(token),
stylist.line_ending().as_str(),
));
}
None
})
.unwrap_or(String::new());
let indented = adjust_indentation(
TextRange::new(else_colon_end, elif_else.end()),
desired_indentation,
locator,
indexer,
stylist,
)?;
Ok(Fix::safe_edit(Edit::replacement(
format!("{else_comment_after_colon}{indented}"),
else_line_start,
elif_else.end(),
)))
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/flake8_return/rules/mod.rs | crates/ruff_linter/src/rules/flake8_return/rules/mod.rs | pub(crate) use function::*;
mod function;
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/perflint/helpers.rs | crates/ruff_linter/src/rules/perflint/helpers.rs | use ruff_python_trivia::{
BackwardsTokenizer, PythonWhitespace, SimpleToken, SimpleTokenKind, SimpleTokenizer,
};
use ruff_source_file::LineRanges;
use ruff_text_size::{Ranged, TextRange};
use crate::checkers::ast::Checker;
pub(super) fn comment_strings_in_range<'a>(
checker: &'a Checker,
range: TextRange,
ranges_to_ignore: &[TextRange],
) -> Vec<&'a str> {
checker
.comment_ranges()
.comments_in_range(range)
.iter()
// Ignore comments inside of the append or iterator, since these are preserved
.filter(|comment| {
!ranges_to_ignore
.iter()
.any(|to_ignore| to_ignore.contains_range(**comment))
})
.map(|range| checker.locator().slice(range).trim_whitespace_start())
.collect()
}
fn semicolon_before_and_after(
checker: &Checker,
statement: TextRange,
) -> (Option<SimpleToken>, Option<SimpleToken>) {
// determine whether there's a semicolon either before or after the binding statement.
// Since it's a binding statement, we can just check whether there's a semicolon immediately
// after the whitespace in front of or behind it
let mut after_tokenizer =
SimpleTokenizer::starts_at(statement.end(), checker.locator().contents()).skip_trivia();
let after_semicolon = if after_tokenizer
.next()
.is_some_and(|token| token.kind() == SimpleTokenKind::Semi)
{
after_tokenizer.next()
} else {
None
};
let semicolon_before = BackwardsTokenizer::up_to(
statement.start(),
checker.locator().contents(),
checker.comment_ranges(),
)
.skip_trivia()
.next()
.filter(|token| token.kind() == SimpleTokenKind::Semi);
(semicolon_before, after_semicolon)
}
/// Finds the range necessary to delete a statement (including any semicolons around it).
/// Returns the range and whether there were multiple statements on the line
pub(super) fn statement_deletion_range(
checker: &Checker,
statement_range: TextRange,
) -> (TextRange, bool) {
let locator = checker.locator();
// If the binding has multiple statements on its line, the fix would be substantially more complicated
let (semicolon_before, after_semicolon) = semicolon_before_and_after(checker, statement_range);
// If there are multiple binding statements in one line, we don't want to accidentally delete them
// Instead, we just delete the binding statement and leave any comments where they are
match (semicolon_before, after_semicolon) {
// ```python
// a = []
// ```
(None, None) => (locator.full_lines_range(statement_range), false),
// ```python
// a = 1; b = []
// ^^^^^^^^
// a = 1; b = []; c = 3
// ^^^^^^^^
// ```
(Some(semicolon_before), Some(_) | None) => (
TextRange::new(semicolon_before.start(), statement_range.end()),
true,
),
// ```python
// a = []; b = 3
// ^^^^^^^
// ```
(None, Some(after_semicolon)) => (
TextRange::new(statement_range.start(), after_semicolon.start()),
true,
),
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/perflint/mod.rs | crates/ruff_linter/src/rules/perflint/mod.rs | //! Rules from [perflint](https://pypi.org/project/perflint/).
mod helpers;
pub(crate) mod rules;
#[cfg(test)]
mod tests {
use std::path::Path;
use anyhow::Result;
use ruff_python_ast::PythonVersion;
use test_case::test_case;
use crate::assert_diagnostics;
use crate::registry::Rule;
use crate::settings::LinterSettings;
use crate::settings::types::PreviewMode;
use crate::test::test_path;
#[test_case(Rule::UnnecessaryListCast, Path::new("PERF101.py"))]
#[test_case(Rule::IncorrectDictIterator, Path::new("PERF102.py"))]
#[test_case(Rule::TryExceptInLoop, Path::new("PERF203.py"))]
#[test_case(Rule::ManualListComprehension, Path::new("PERF401.py"))]
#[test_case(Rule::ManualListCopy, Path::new("PERF402.py"))]
#[test_case(Rule::ManualDictComprehension, Path::new("PERF403.py"))]
fn rules(rule_code: Rule, path: &Path) -> Result<()> {
let snapshot = format!("{}_{}", rule_code.noqa_code(), path.to_string_lossy());
let diagnostics = test_path(
Path::new("perflint").join(path).as_path(),
&LinterSettings::for_rule(rule_code).with_target_version(PythonVersion::PY310),
)?;
assert_diagnostics!(snapshot, diagnostics);
Ok(())
}
// TODO: remove this test case when the fixes for `perf401` and `perf403` are stabilized
#[test_case(Rule::ManualDictComprehension, Path::new("PERF403.py"))]
#[test_case(Rule::ManualListComprehension, Path::new("PERF401.py"))]
fn preview_rules(rule_code: Rule, path: &Path) -> Result<()> {
let snapshot = format!(
"preview__{}_{}",
rule_code.noqa_code(),
path.to_string_lossy()
);
let diagnostics = test_path(
Path::new("perflint").join(path).as_path(),
&LinterSettings {
preview: PreviewMode::Enabled,
unresolved_target_version: PythonVersion::PY310.into(),
..LinterSettings::for_rule(rule_code)
},
)?;
assert_diagnostics!(snapshot, diagnostics);
Ok(())
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/perflint/rules/manual_list_copy.rs | crates/ruff_linter/src/rules/perflint/rules/manual_list_copy.rs | use ruff_macros::{ViolationMetadata, derive_message_formats};
use ruff_python_ast::helpers::any_over_expr;
use ruff_python_ast::{self as ast, Arguments, Expr, Stmt};
use ruff_python_semantic::analyze::typing::is_list;
use crate::Violation;
use crate::checkers::ast::Checker;
/// ## What it does
/// Checks for `for` loops that can be replaced by a making a copy of a list.
///
/// ## Why is this bad?
/// When creating a copy of an existing list using a for-loop, prefer
/// `list` or `list.copy` instead. Making a direct copy is more readable and
/// more performant.
///
/// Using the below as an example, the `list`-based copy is ~2x faster on
/// Python 3.11.
///
/// Note that, as with all `perflint` rules, this is only intended as a
/// micro-optimization, and will have a negligible impact on performance in
/// most cases.
///
/// ## Example
/// ```python
/// original = list(range(10000))
/// filtered = []
/// for i in original:
/// filtered.append(i)
/// ```
///
/// Use instead:
/// ```python
/// original = list(range(10000))
/// filtered = list(original)
/// ```
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "v0.0.276")]
pub(crate) struct ManualListCopy;
impl Violation for ManualListCopy {
#[derive_message_formats]
fn message(&self) -> String {
"Use `list` or `list.copy` to create a copy of a list".to_string()
}
}
/// PERF402
pub(crate) fn manual_list_copy(checker: &Checker, for_stmt: &ast::StmtFor) {
if for_stmt.is_async {
return;
}
let Expr::Name(ast::ExprName { id, .. }) = &*for_stmt.target else {
return;
};
let [stmt] = &*for_stmt.body else {
return;
};
let Stmt::Expr(ast::StmtExpr { value, .. }) = stmt else {
return;
};
let Expr::Call(ast::ExprCall {
func,
arguments:
Arguments {
args,
keywords,
range: _,
node_index: _,
},
range,
node_index: _,
}) = value.as_ref()
else {
return;
};
if !keywords.is_empty() {
return;
}
let [arg] = &**args else {
return;
};
let Expr::Attribute(ast::ExprAttribute { attr, value, .. }) = func.as_ref() else {
return;
};
if !matches!(attr.as_str(), "append" | "insert") {
return;
}
// Only flag direct list copies (e.g., `for x in y: filtered.append(x)`).
if arg.as_name_expr().is_none_or(|arg| arg.id != *id) {
return;
}
// Avoid, e.g., `for x in y: filtered[x].append(x)`.
if any_over_expr(value, &|expr| {
expr.as_name_expr().is_some_and(|expr| expr.id == *id)
}) {
return;
}
// Avoid non-list values.
let Some(name) = value.as_name_expr() else {
return;
};
let Some(binding) = checker
.semantic()
.only_binding(name)
.map(|id| checker.semantic().binding(id))
else {
return;
};
if !is_list(binding, checker.semantic()) {
return;
}
checker.report_diagnostic(ManualListCopy, *range);
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/perflint/rules/incorrect_dict_iterator.rs | crates/ruff_linter/src/rules/perflint/rules/incorrect_dict_iterator.rs | use std::fmt;
use ruff_macros::{ViolationMetadata, derive_message_formats};
use ruff_python_ast as ast;
use ruff_python_ast::{Arguments, Expr};
use ruff_text_size::Ranged;
use crate::checkers::ast::Checker;
use crate::fix::edits::pad;
use crate::{AlwaysFixableViolation, Edit, Fix};
/// ## What it does
/// Checks for uses of `dict.items()` that discard either the key or the value
/// when iterating over the dictionary.
///
/// ## Why is this bad?
/// If you only need the keys or values of a dictionary, you should use
/// `dict.keys()` or `dict.values()` respectively, instead of `dict.items()`.
/// These specialized methods are more efficient than `dict.items()`, as they
/// avoid allocating tuples for every item in the dictionary. They also
/// communicate the intent of the code more clearly.
///
/// Note that, as with all `perflint` rules, this is only intended as a
/// micro-optimization, and will have a negligible impact on performance in
/// most cases.
///
/// ## Example
/// ```python
/// obj = {"a": 1, "b": 2}
/// for key, value in obj.items():
/// print(value)
/// ```
///
/// Use instead:
/// ```python
/// obj = {"a": 1, "b": 2}
/// for value in obj.values():
/// print(value)
/// ```
///
/// ## Fix safety
/// The fix does not perform any type analysis and, as such, may suggest an
/// incorrect fix if the object in question does not duck-type as a mapping
/// (e.g., if it is missing a `.keys()` or `.values()` method, or if those
/// methods behave differently than they do on standard mapping types).
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "v0.0.273")]
pub(crate) struct IncorrectDictIterator {
subset: DictSubset,
}
impl AlwaysFixableViolation for IncorrectDictIterator {
#[derive_message_formats]
fn message(&self) -> String {
let IncorrectDictIterator { subset } = self;
format!("When using only the {subset} of a dict use the `{subset}()` method")
}
fn fix_title(&self) -> String {
let IncorrectDictIterator { subset } = self;
format!("Replace `.items()` with `.{subset}()`")
}
}
/// PERF102
pub(crate) fn incorrect_dict_iterator(checker: &Checker, stmt_for: &ast::StmtFor) {
let Expr::Tuple(ast::ExprTuple { elts, .. }) = stmt_for.target.as_ref() else {
return;
};
let [key, value] = elts.as_slice() else {
return;
};
let Expr::Call(ast::ExprCall {
func,
arguments: Arguments { args, .. },
..
}) = stmt_for.iter.as_ref()
else {
return;
};
if !args.is_empty() {
return;
}
let Expr::Attribute(ast::ExprAttribute { attr, .. }) = func.as_ref() else {
return;
};
if attr != "items" {
return;
}
match (
checker.semantic().is_unused(key),
checker.semantic().is_unused(value),
) {
(true, true) => {
// Both the key and the value are unused.
}
(false, false) => {
// Neither the key nor the value are unused.
}
(true, false) => {
// The key is unused, so replace with `dict.values()`.
let mut diagnostic = checker.report_diagnostic(
IncorrectDictIterator {
subset: DictSubset::Values,
},
func.range(),
);
let replace_attribute = Edit::range_replacement("values".to_string(), attr.range());
let replace_target = Edit::range_replacement(
pad(
checker.locator().slice(value).to_string(),
stmt_for.target.range(),
checker.locator(),
),
stmt_for.target.range(),
);
diagnostic.set_fix(Fix::unsafe_edits(replace_attribute, [replace_target]));
}
(false, true) => {
// The value is unused, so replace with `dict.keys()`.
let mut diagnostic = checker.report_diagnostic(
IncorrectDictIterator {
subset: DictSubset::Keys,
},
func.range(),
);
let replace_attribute = Edit::range_replacement("keys".to_string(), attr.range());
let replace_target = Edit::range_replacement(
pad(
checker.locator().slice(key).to_string(),
stmt_for.target.range(),
checker.locator(),
),
stmt_for.target.range(),
);
diagnostic.set_fix(Fix::unsafe_edits(replace_attribute, [replace_target]));
}
}
}
#[derive(Debug, PartialEq, Eq)]
enum DictSubset {
Keys,
Values,
}
impl fmt::Display for DictSubset {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
match self {
DictSubset::Keys => fmt.write_str("keys"),
DictSubset::Values => fmt.write_str("values"),
}
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/perflint/rules/manual_dict_comprehension.rs | crates/ruff_linter/src/rules/perflint/rules/manual_dict_comprehension.rs | use ruff_macros::{ViolationMetadata, derive_message_formats};
use ruff_python_ast::{
self as ast, Expr, Stmt, comparable::ComparableExpr, helpers::any_over_expr,
};
use ruff_python_semantic::{Binding, analyze::typing::is_dict};
use ruff_source_file::LineRanges;
use ruff_text_size::{Ranged, TextRange, TextSize};
use crate::checkers::ast::Checker;
use crate::preview::is_fix_manual_dict_comprehension_enabled;
use crate::rules::perflint::helpers::{comment_strings_in_range, statement_deletion_range};
use crate::{Edit, Fix, FixAvailability, Violation};
/// ## What it does
/// Checks for `for` loops that can be replaced by a dictionary comprehension.
///
/// ## Why is this bad?
/// When creating or extending a dictionary in a for-loop, prefer a dictionary
/// comprehension. Comprehensions are more readable and more performant.
///
/// For example, when comparing `{x: x for x in list(range(1000))}` to the `for`
/// loop version, the comprehension is ~10% faster on Python 3.11.
///
/// Note that, as with all `perflint` rules, this is only intended as a
/// micro-optimization, and will have a negligible impact on performance in
/// most cases.
///
/// ## Example
/// ```python
/// pairs = (("a", 1), ("b", 2))
/// result = {}
/// for x, y in pairs:
/// if y % 2:
/// result[x] = y
/// ```
///
/// Use instead:
/// ```python
/// pairs = (("a", 1), ("b", 2))
/// result = {x: y for x, y in pairs if y % 2}
/// ```
///
/// If you're appending to an existing dictionary, use the `update` method instead:
/// ```python
/// pairs = (("a", 1), ("b", 2))
/// result.update({x: y for x, y in pairs if y % 2})
/// ```
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "0.5.0")]
pub(crate) struct ManualDictComprehension {
fix_type: DictComprehensionType,
is_async: bool,
}
impl Violation for ManualDictComprehension {
const FIX_AVAILABILITY: FixAvailability = FixAvailability::Sometimes;
#[derive_message_formats]
fn message(&self) -> String {
let modifier = if self.is_async { "an async" } else { "a" };
match self.fix_type {
DictComprehensionType::Comprehension => {
format!("Use a dictionary comprehension instead of {modifier} for-loop")
}
DictComprehensionType::Update => {
format!("Use `dict.update` instead of {modifier} for-loop")
}
}
}
fn fix_title(&self) -> Option<String> {
let modifier = if self.is_async { "async " } else { "" };
match self.fix_type {
DictComprehensionType::Comprehension => Some(format!(
"Replace {modifier}for loop with dict comprehension"
)),
DictComprehensionType::Update => {
Some(format!("Replace {modifier}for loop with `dict.update`"))
}
}
}
}
/// PERF403
pub(crate) fn manual_dict_comprehension(checker: &Checker, for_stmt: &ast::StmtFor) {
let ast::StmtFor { body, target, .. } = for_stmt;
let body = body.as_slice();
let target = target.as_ref();
let (stmt, if_test) = match body {
// ```python
// for idx, name in enumerate(names):
// if idx % 2 == 0:
// result[name] = idx
// ```
[
Stmt::If(ast::StmtIf {
body,
elif_else_clauses,
test,
..
}),
] => {
// TODO(charlie): If there's an `else` clause, verify that the `else` has the
// same structure.
if !elif_else_clauses.is_empty() {
return;
}
let [stmt] = body.as_slice() else {
return;
};
(stmt, Some(test))
}
// ```python
// for idx, name in enumerate(names):
// result[name] = idx
// ```
[stmt] => (stmt, None),
_ => return,
};
let Stmt::Assign(ast::StmtAssign {
targets,
value,
range,
node_index: _,
}) = stmt
else {
return;
};
let [
Expr::Subscript(ast::ExprSubscript {
value: subscript_value,
slice: key,
..
}),
] = targets.as_slice()
else {
return;
};
// If any references to a target variable are after the loop,
// then removing the loop would cause a NameError. Make sure none
// of the variables are used outside the for loop.
if has_post_loop_references(checker, target, for_stmt.end()) {
return;
}
match target {
Expr::Tuple(tuple) => {
if !tuple
.iter()
.any(|element| ComparableExpr::from(key) == ComparableExpr::from(element))
{
return;
}
if !tuple
.iter()
.any(|element| ComparableExpr::from(value) == ComparableExpr::from(element))
{
return;
}
}
Expr::Name(_) => {
if ComparableExpr::from(key) != ComparableExpr::from(target) {
return;
}
if ComparableExpr::from(value) != ComparableExpr::from(target) {
return;
}
}
_ => return,
}
// Exclude non-dictionary value.
let Expr::Name(name) = &**subscript_value else {
return;
};
let Some(binding) = checker
.semantic()
.only_binding(name)
.map(|id| checker.semantic().binding(id))
else {
return;
};
if !is_dict(binding, checker.semantic()) {
return;
}
// Avoid if the value is used in the conditional test, e.g.,
//
// ```python
// for x in y:
// if x in filtered:
// filtered[x] = y
// ```
//
// Converting this to a dictionary comprehension would raise a `NameError` as
// `filtered` is not defined yet:
//
// ```python
// filtered = {x: y for x in y if x in filtered}
// ```
if if_test.is_some_and(|test| {
any_over_expr(test, &|expr| {
ComparableExpr::from(expr) == ComparableExpr::from(name)
})
}) {
return;
}
if is_fix_manual_dict_comprehension_enabled(checker.settings()) {
let binding_stmt = binding.statement(checker.semantic());
let binding_value = binding_stmt.and_then(|binding_stmt| match binding_stmt {
ast::Stmt::AnnAssign(assign) => assign.value.as_deref(),
ast::Stmt::Assign(assign) => Some(&assign.value),
_ => None,
});
// If the variable is an empty dict literal, then we might be able to replace it with a full dict comprehension.
// otherwise, it has to be replaced with a `dict.update`
let binding_is_empty_dict =
binding_value.is_some_and(|binding_value| match binding_value {
// value = {}
Expr::Dict(dict_expr) => dict_expr.is_empty(),
// value = dict()
Expr::Call(call) => {
checker
.semantic()
.resolve_builtin_symbol(&call.func)
.is_some_and(|name| name == "dict")
&& call.arguments.is_empty()
}
_ => false,
});
let assignment_in_same_statement = binding.source.is_some_and(|binding_source| {
let for_loop_parent = checker.semantic().current_statement_parent_id();
let binding_parent = checker.semantic().parent_statement_id(binding_source);
for_loop_parent == binding_parent
});
// If the binding is not a single name expression, it could be replaced with a dict comprehension,
// but not necessarily, so this needs to be manually fixed. This does not apply when using an update.
let binding_has_one_target = binding_stmt.is_some_and(|binding_stmt| match binding_stmt {
ast::Stmt::AnnAssign(_) => true,
ast::Stmt::Assign(assign) => assign.targets.len() == 1,
_ => false,
});
// If the binding gets used in between the assignment and the for loop, a comprehension is no longer safe
// If the binding is after the for loop, then it can't be fixed, and this check would panic,
// so we check that they are in the same statement first
let binding_unused_between = assignment_in_same_statement
&& binding_stmt.is_some_and(|binding_stmt| {
let from_assign_to_loop = TextRange::new(binding_stmt.end(), for_stmt.start());
// Test if there's any reference to the result dictionary between its definition and the for loop.
// If there's at least one, then it's been accessed in the middle somewhere, so it's not safe to change into a comprehension
!binding
.references()
.map(|ref_id| checker.semantic().reference(ref_id).range())
.any(|text_range| from_assign_to_loop.contains_range(text_range))
});
// A dict update works in every context, while a dict comprehension only works when all the criteria are true
let fix_type = if binding_is_empty_dict
&& assignment_in_same_statement
&& binding_has_one_target
&& binding_unused_between
{
DictComprehensionType::Comprehension
} else {
DictComprehensionType::Update
};
let mut diagnostic = checker.report_diagnostic(
ManualDictComprehension {
fix_type,
is_async: for_stmt.is_async,
},
*range,
);
diagnostic.try_set_optional_fix(|| {
Ok(convert_to_dict_comprehension(
fix_type,
binding,
for_stmt,
if_test.map(std::convert::AsRef::as_ref),
key.as_ref(),
value.as_ref(),
checker,
))
});
} else {
checker.report_diagnostic(
ManualDictComprehension {
fix_type: DictComprehensionType::Comprehension,
is_async: for_stmt.is_async,
},
*range,
);
}
}
fn convert_to_dict_comprehension(
fix_type: DictComprehensionType,
binding: &Binding,
for_stmt: &ast::StmtFor,
if_test: Option<&ast::Expr>,
key: &Expr,
value: &Expr,
checker: &Checker,
) -> Option<Fix> {
let locator = checker.locator();
let if_str = match if_test {
Some(test) => {
// If the test is an assignment expression,
// we must parenthesize it when it appears
// inside the comprehension to avoid a syntax error.
//
// Notice that we do not need `any_over_expr` here,
// since if the assignment expression appears
// internally (e.g. as an operand in a boolean
// operation) then it will already be parenthesized.
match test {
Expr::Named(_) | Expr::If(_) | Expr::Lambda(_) => {
format!(" if ({})", locator.slice(test.range()))
}
_ => format!(" if {}", locator.slice(test.range())),
}
}
None => String::new(),
};
// if the loop target was an implicit tuple, add parentheses around it
// ```python
// for i in a, b:
// ...
// ```
// becomes
// {... for i in (a, b)}
let iter_str = if let Expr::Tuple(ast::ExprTuple {
parenthesized: false,
..
}) = &*for_stmt.iter
{
format!("({})", locator.slice(for_stmt.iter.range()))
} else {
locator.slice(for_stmt.iter.range()).to_string()
};
let target_str = locator.slice(for_stmt.target.range());
let for_type = if for_stmt.is_async {
"async for"
} else {
"for"
};
// Handles the case where `key` has a trailing comma, e.g, `dict[x,] = y`
let key_str = if let Expr::Tuple(ast::ExprTuple {
elts,
parenthesized,
..
}) = key
{
if elts.len() != 1 {
return None;
}
if *parenthesized {
locator.slice(key).to_string()
} else {
format!("({})", locator.slice(key))
}
} else {
locator.slice(key).to_string()
};
// If the value is a tuple without parentheses, add them
let value_str = if let Expr::Tuple(ast::ExprTuple {
parenthesized: false,
..
}) = value
{
format!("({})", locator.slice(value))
} else {
locator.slice(value).to_string()
};
let comprehension_str =
format!("{{{key_str}: {value_str} {for_type} {target_str} in {iter_str}{if_str}}}");
let for_loop_inline_comments = comment_strings_in_range(
checker,
for_stmt.range,
&[key.range(), value.range(), for_stmt.iter.range()],
);
let newline = checker.stylist().line_ending().as_str();
let indent = locator.slice(TextRange::new(
locator.line_start(for_stmt.range.start()),
for_stmt.range.start(),
));
let variable_name = locator.slice(binding);
match fix_type {
DictComprehensionType::Update => {
let indentation = if for_loop_inline_comments.is_empty() {
String::new()
} else {
format!("{newline}{indent}")
};
let comprehension_body = format!("{variable_name}.update({comprehension_str})");
let text_to_replace = format!(
"{}{indentation}{comprehension_body}",
for_loop_inline_comments.join(&indentation)
);
Some(Fix::unsafe_edit(Edit::range_replacement(
text_to_replace,
for_stmt.range,
)))
}
DictComprehensionType::Comprehension => {
let binding_stmt = binding.statement(checker.semantic());
debug_assert!(
binding_stmt.is_some(),
"must be passed a binding with a statement"
);
let binding_stmt = binding_stmt?;
let binding_stmt_range = binding_stmt.range();
let annotations = match binding_stmt.as_ann_assign_stmt() {
Some(assign) => format!(": {}", locator.slice(assign.annotation.range())),
None => String::new(),
};
// If there are multiple binding statements in one line, we don't want to accidentally delete them
// Instead, we just delete the binding statement and leave any comments where they are
let (binding_stmt_deletion_range, binding_is_multiple_stmts) =
statement_deletion_range(checker, binding_stmt_range);
let comments_to_move = if binding_is_multiple_stmts {
for_loop_inline_comments
} else {
let mut new_comments =
comment_strings_in_range(checker, binding_stmt_deletion_range, &[]);
new_comments.extend(for_loop_inline_comments);
new_comments
};
let indentation = if comments_to_move.is_empty() {
String::new()
} else {
format!("{newline}{indent}")
};
let leading_comments = format!("{}{indentation}", comments_to_move.join(&indentation));
let comprehension_body =
format!("{leading_comments}{variable_name}{annotations} = {comprehension_str}");
Some(Fix::unsafe_edits(
Edit::range_deletion(binding_stmt_deletion_range),
[Edit::range_replacement(comprehension_body, for_stmt.range)],
))
}
}
}
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
enum DictComprehensionType {
Update,
Comprehension,
}
fn has_post_loop_references(checker: &Checker, expr: &Expr, loop_end: TextSize) -> bool {
any_over_expr(expr, &|expr| match expr {
Expr::Tuple(ast::ExprTuple { elts, .. }) => elts
.iter()
.any(|expr| has_post_loop_references(checker, expr, loop_end)),
Expr::Name(name) => {
let Some(target_binding) = checker
.semantic()
.bindings
.iter()
.find(|binding| name.range() == binding.range)
else {
// no binding in for statement => err on the safe side and make the checker skip
// e.g., `for foo[0] in bar:` or `for foo.bar in baz:`
return true;
};
target_binding
.references()
.map(|reference| checker.semantic().reference(reference))
.any(|other_reference| other_reference.start() > loop_end)
}
_ => false,
})
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/perflint/rules/try_except_in_loop.rs | crates/ruff_linter/src/rules/perflint/rules/try_except_in_loop.rs | use ruff_macros::{ViolationMetadata, derive_message_formats};
use ruff_python_ast::statement_visitor::{StatementVisitor, walk_stmt};
use ruff_python_ast::{self as ast, PythonVersion, Stmt};
use ruff_text_size::Ranged;
use crate::Violation;
use crate::checkers::ast::Checker;
/// ## What it does
/// Checks for uses of except handling via `try`-`except` within `for` and
/// `while` loops.
///
/// ## Why is this bad?
/// Exception handling via `try`-`except` blocks incurs some performance
/// overhead, regardless of whether an exception is raised.
///
/// To optimize your code, two techniques are possible:
/// 1. Refactor your code to put the entire loop into the `try`-`except` block,
/// rather than wrapping each iteration in a separate `try`-`except` block.
/// 2. Use "Look Before You Leap" idioms that attempt to avoid exceptions
/// being raised in the first place, avoiding the need to use `try`-`except`
/// blocks in the first place.
///
/// This rule is only enforced for Python versions prior to 3.11, which
/// introduced "zero-cost" exception handling. However, note that even on
/// Python 3.11 and newer, refactoring your code to avoid exception handling in
/// tight loops can provide a significant speedup in some cases, as zero-cost
/// exception handling is only zero-cost in the "happy path" where no exception
/// is raised in the `try`-`except` block.
///
/// As with all `perflint` rules, this is only intended as a
/// micro-optimization. In many cases, it will have a negligible impact on
/// performance.
///
/// ## Example
/// ```python
/// string_numbers: list[str] = ["1", "2", "three", "4", "5"]
///
/// # `try`/`except` that could be moved out of the loop:
/// int_numbers: list[int] = []
/// for num in string_numbers:
/// try:
/// int_numbers.append(int(num))
/// except ValueError as e:
/// print(f"Couldn't convert to integer: {e}")
/// break
///
/// # `try`/`except` used when "look before you leap" idioms could be used:
/// number_names: dict[int, str] = {1: "one", 3: "three", 4: "four"}
/// for number in range(5):
/// try:
/// name = number_names[number]
/// except KeyError:
/// continue
/// else:
/// print(f"The name of {number} is {name}")
/// ```
///
/// Use instead:
/// ```python
/// string_numbers: list[str] = ["1", "2", "three", "4", "5"]
///
/// int_numbers: list[int] = []
/// try:
/// for num in string_numbers:
/// int_numbers.append(int(num))
/// except ValueError as e:
/// print(f"Couldn't convert to integer: {e}")
///
/// number_names: dict[int, str] = {1: "one", 3: "three", 4: "four"}
/// for number in range(5):
/// name = number_names.get(number)
/// if name is not None:
/// print(f"The name of {number} is {name}")
/// ```
///
/// ## Options
/// - `target-version`
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "v0.0.276")]
pub(crate) struct TryExceptInLoop;
impl Violation for TryExceptInLoop {
#[derive_message_formats]
fn message(&self) -> String {
"`try`-`except` within a loop incurs performance overhead".to_string()
}
}
/// PERF203
pub(crate) fn try_except_in_loop(checker: &Checker, body: &[Stmt]) {
if checker.target_version() >= PythonVersion::PY311 {
return;
}
let [Stmt::Try(ast::StmtTry { handlers, body, .. })] = body else {
return;
};
let Some(handler) = handlers.first() else {
return;
};
// Avoid flagging `try`-`except` blocks that contain `break` or `continue`,
// which rely on the exception handling mechanism.
if has_break_or_continue(body) {
return;
}
checker.report_diagnostic(TryExceptInLoop, handler.range());
}
/// Returns `true` if a `break` or `continue` statement is present in `body`.
fn has_break_or_continue(body: &[Stmt]) -> bool {
let mut visitor = LoopControlFlowVisitor::default();
visitor.visit_body(body);
visitor.has_break_or_continue
}
#[derive(Debug, Default)]
struct LoopControlFlowVisitor {
has_break_or_continue: bool,
}
impl StatementVisitor<'_> for LoopControlFlowVisitor {
fn visit_stmt(&mut self, stmt: &Stmt) {
match stmt {
Stmt::Break(_) | Stmt::Continue(_) => self.has_break_or_continue = true,
Stmt::FunctionDef(_) | Stmt::ClassDef(_) => {
// Don't recurse.
}
_ => walk_stmt(self, stmt),
}
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/perflint/rules/mod.rs | crates/ruff_linter/src/rules/perflint/rules/mod.rs | pub(crate) use incorrect_dict_iterator::*;
pub(crate) use manual_dict_comprehension::*;
pub(crate) use manual_list_comprehension::*;
pub(crate) use manual_list_copy::*;
pub(crate) use try_except_in_loop::*;
pub(crate) use unnecessary_list_cast::*;
mod incorrect_dict_iterator;
mod manual_dict_comprehension;
mod manual_list_comprehension;
mod manual_list_copy;
mod try_except_in_loop;
mod unnecessary_list_cast;
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/perflint/rules/manual_list_comprehension.rs | crates/ruff_linter/src/rules/perflint/rules/manual_list_comprehension.rs | use ruff_python_ast::{self as ast, Arguments, Expr};
use crate::{Edit, Fix, FixAvailability, Violation};
use crate::{
checkers::ast::Checker, preview::is_fix_manual_list_comprehension_enabled,
rules::perflint::helpers::statement_deletion_range,
};
use anyhow::{Result, anyhow};
use crate::rules::perflint::helpers::comment_strings_in_range;
use ruff_macros::{ViolationMetadata, derive_message_formats};
use ruff_python_ast::helpers::any_over_expr;
use ruff_python_semantic::{Binding, analyze::typing::is_list};
use ruff_source_file::LineRanges;
use ruff_text_size::{Ranged, TextRange};
/// ## What it does
/// Checks for `for` loops that can be replaced by a list comprehension.
///
/// ## Why is this bad?
/// When creating a transformed list from an existing list using a for-loop,
/// prefer a list comprehension. List comprehensions are more readable and
/// more performant.
///
/// Using the below as an example, the list comprehension is ~10% faster on
/// Python 3.11, and ~25% faster on Python 3.10.
///
/// Note that, as with all `perflint` rules, this is only intended as a
/// micro-optimization, and will have a negligible impact on performance in
/// most cases.
///
/// ## Example
/// ```python
/// original = list(range(10000))
/// filtered = []
/// for i in original:
/// if i % 2:
/// filtered.append(i)
/// ```
///
/// Use instead:
/// ```python
/// original = list(range(10000))
/// filtered = [x for x in original if x % 2]
/// ```
///
/// If you're appending to an existing list, use the `extend` method instead:
/// ```python
/// original = list(range(10000))
/// filtered.extend(x for x in original if x % 2)
/// ```
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "v0.0.276")]
pub(crate) struct ManualListComprehension {
is_async: bool,
comprehension_type: Option<ComprehensionType>,
}
impl Violation for ManualListComprehension {
const FIX_AVAILABILITY: FixAvailability = FixAvailability::Sometimes;
#[derive_message_formats]
fn message(&self) -> String {
let ManualListComprehension {
is_async,
comprehension_type,
} = self;
let message_str = match comprehension_type {
Some(ComprehensionType::Extend) => {
if *is_async {
"`list.extend` with an async comprehension"
} else {
"`list.extend`"
}
}
Some(ComprehensionType::ListComprehension) | None => {
if *is_async {
"an async list comprehension"
} else {
"a list comprehension"
}
}
};
format!("Use {message_str} to create a transformed list")
}
fn fix_title(&self) -> Option<String> {
match self.comprehension_type? {
ComprehensionType::ListComprehension => {
Some("Replace for loop with list comprehension".to_string())
}
ComprehensionType::Extend => Some("Replace for loop with list.extend".to_string()),
}
}
}
/// PERF401
pub(crate) fn manual_list_comprehension(checker: &Checker, for_stmt: &ast::StmtFor) {
let Expr::Name(ast::ExprName {
id: for_stmt_target_id,
..
}) = &*for_stmt.target
else {
return;
};
let (stmt, if_test) = match &*for_stmt.body {
// ```python
// for x in y:
// if z:
// filtered.append(x)
// ```
[
ast::Stmt::If(ast::StmtIf {
body,
elif_else_clauses,
test,
..
}),
] => {
if !elif_else_clauses.is_empty() {
return;
}
let [stmt] = body.as_slice() else {
return;
};
(stmt, Some(test))
}
// ```python
// for x in y:
// filtered.append(f(x))
// ```
[stmt] => (stmt, None),
_ => return,
};
let ast::Stmt::Expr(ast::StmtExpr { value, .. }) = stmt else {
return;
};
let Expr::Call(ast::ExprCall {
func,
arguments:
Arguments {
args,
keywords,
range: _,
node_index: _,
},
range,
node_index: _,
}) = value.as_ref()
else {
return;
};
if !keywords.is_empty() {
return;
}
let [arg] = &**args else {
return;
};
let Expr::Attribute(ast::ExprAttribute { attr, value, .. }) = &**func else {
return;
};
if attr.as_str() != "append" {
return;
}
// Avoid non-list values.
let Some(list_name) = value.as_name_expr() else {
return;
};
// Ignore direct list copies (e.g., `for x in y: filtered.append(x)`), unless it's async, which
// `manual-list-copy` doesn't cover.
if !for_stmt.is_async {
if if_test.is_none() {
if arg
.as_name_expr()
.is_some_and(|arg| arg.id == *for_stmt_target_id)
{
return;
}
}
}
// Avoid, e.g., `for x in y: filtered.append(filtered[-1] * 2)`.
if any_over_expr(arg, &|expr| {
expr.as_name_expr()
.is_some_and(|expr| expr.id == list_name.id)
}) {
return;
}
let Some(list_binding) = checker
.semantic()
.only_binding(list_name)
.map(|id| checker.semantic().binding(id))
else {
return;
};
if !is_list(list_binding, checker.semantic()) {
return;
}
// Avoid if the list is used in the conditional test, e.g.,
//
// ```python
// for x in y:
// if x in filtered:
// filtered.append(x)
// ```
//
// Converting this to a list comprehension would raise a `NameError` as
// `filtered` is not defined yet:
//
// ```python
// filtered = [x for x in y if x in filtered]
// ```
if if_test.is_some_and(|test| {
any_over_expr(test, &|expr| {
expr.as_name_expr()
.is_some_and(|expr| expr.id == list_name.id)
})
}) {
return;
}
// Avoid if the for-loop target is used outside the for loop, e.g.,
//
// ```python
// for x in y:
// filtered.append(x)
// print(x)
// ```
//
// If this were a comprehension, x would no longer have the correct scope:
//
// ```python
// filtered = [x for x in y]
// print(x)
// ```
let target_binding = checker
.semantic()
.bindings
.iter()
.find(|binding| for_stmt.target.range() == binding.range)
.unwrap();
// If the target variable is global (e.g., `global INDEX`) or nonlocal (e.g., `nonlocal INDEX`),
// then it is intended to be used elsewhere outside the for loop.
if target_binding.is_global() || target_binding.is_nonlocal() {
return;
}
// If any references to the loop target variable are after the loop,
// then converting it into a comprehension would cause a NameError
if target_binding
.references()
.map(|reference| checker.semantic().reference(reference))
.any(|other_reference| for_stmt.end() < other_reference.start())
{
return;
}
let list_binding_stmt = list_binding.statement(checker.semantic());
let list_binding_value = list_binding_stmt.and_then(|binding_stmt| match binding_stmt {
ast::Stmt::AnnAssign(assign) => assign.value.as_deref(),
ast::Stmt::Assign(assign) => Some(&assign.value),
_ => None,
});
// If the variable is an empty list literal, then we might be able to replace it with a full list comprehension
// otherwise, it has to be replaced with a `list.extend`.
let binding_is_empty_list =
list_binding_value.is_some_and(|binding_value| match binding_value {
// `value = []`
Expr::List(list_expr) => list_expr.is_empty(),
// `value = list()`
// This might be linted against, but turning it into a list comprehension will also remove it
Expr::Call(call) => {
checker
.semantic()
.resolve_builtin_symbol(&call.func)
.is_some_and(|name| name == "list")
&& call.arguments.is_empty()
}
_ => false,
});
// If the for loop does not have the same parent element as the binding, then it cannot always be
// deleted and replaced with a list comprehension. This does not apply when using `extend`.
let assignment_in_same_statement = {
list_binding.source.is_some_and(|binding_source| {
let for_loop_parent = checker.semantic().current_statement_parent_id();
let binding_parent = checker.semantic().parent_statement_id(binding_source);
for_loop_parent == binding_parent
})
};
// If the binding is not a single name expression, it could be replaced with a list comprehension,
// but not necessarily, so this needs to be manually fixed. This does not apply when using an extend.
let binding_has_one_target = list_binding_stmt.is_some_and(|binding_stmt| match binding_stmt {
ast::Stmt::AnnAssign(_) => true,
ast::Stmt::Assign(assign) => assign.targets.len() == 1,
_ => false,
});
// If the binding gets used in between the assignment and the for loop, a list comprehension is no longer safe
// If the binding is after the for loop, then it can't be fixed, and this check would panic,
// so we check that they are in the same statement first
let binding_unused_between = assignment_in_same_statement
&& list_binding_stmt.is_some_and(|binding_stmt| {
let from_assign_to_loop = TextRange::new(binding_stmt.end(), for_stmt.start());
// Test if there's any reference to the list symbol between its definition and the for loop.
// if there's at least one, then it's been accessed in the middle somewhere, so it's not safe to change into a list comprehension
!list_binding
.references()
.map(|ref_id| checker.semantic().reference(ref_id).range())
.any(|text_range| from_assign_to_loop.contains_range(text_range))
});
// A list extend works in every context, while a list comprehension only works when all the criteria are true
let comprehension_type = if binding_is_empty_list
&& assignment_in_same_statement
&& binding_has_one_target
&& binding_unused_between
{
ComprehensionType::ListComprehension
} else {
ComprehensionType::Extend
};
let mut diagnostic = checker.report_diagnostic(
ManualListComprehension {
is_async: for_stmt.is_async,
comprehension_type: Some(comprehension_type),
},
*range,
);
// TODO: once this fix is stabilized, change the rule to always fixable
if is_fix_manual_list_comprehension_enabled(checker.settings()) {
diagnostic.try_set_fix(|| {
convert_to_list_extend(
comprehension_type,
list_binding,
for_stmt,
if_test.map(std::convert::AsRef::as_ref),
arg,
checker,
)
});
}
}
fn convert_to_list_extend(
fix_type: ComprehensionType,
binding: &Binding,
for_stmt: &ast::StmtFor,
if_test: Option<&Expr>,
to_append: &Expr,
checker: &Checker,
) -> Result<Fix> {
let semantic = checker.semantic();
let locator = checker.locator();
let if_str = match if_test {
Some(test) => {
// If the test is an assignment expression,
// we must parenthesize it when it appears
// inside the comprehension to avoid a syntax error.
//
// Notice that we do not need `any_over_expr` here,
// since if the assignment expression appears
// internally (e.g. as an operand in a boolean
// operation) then it will already be parenthesized.
match test {
Expr::Named(_) | Expr::If(_) | Expr::Lambda(_) => {
format!(" if ({})", locator.slice(test.range()))
}
_ => format!(" if {}", locator.slice(test.range())),
}
}
None => String::new(),
};
// if the loop target was an implicit tuple, add parentheses around it
// ```python
// for i in a, b:
// ...
// ```
// becomes
// [... for i in (a, b)]
let for_iter_str = if for_stmt
.iter
.as_ref()
.as_tuple_expr()
.is_some_and(|expr| !expr.parenthesized)
{
format!("({})", locator.slice(for_stmt.iter.range()))
} else {
locator.slice(for_stmt.iter.range()).to_string()
};
let for_type = if for_stmt.is_async {
"async for"
} else {
"for"
};
let target_str = locator.slice(for_stmt.target.range());
let elt_str = locator.slice(to_append);
let generator_str = if to_append
.as_generator_expr()
.is_some_and(|generator| !generator.parenthesized)
{
format!("({elt_str}) {for_type} {target_str} in {for_iter_str}{if_str}")
} else {
format!("{elt_str} {for_type} {target_str} in {for_iter_str}{if_str}")
};
let variable_name = locator.slice(binding);
let for_loop_inline_comments = comment_strings_in_range(
checker,
for_stmt.range,
&[to_append.range(), for_stmt.iter.range()],
);
let newline = checker.stylist().line_ending().as_str();
let indent = locator.slice(TextRange::new(
locator.line_start(for_stmt.range.start()),
for_stmt.range.start(),
));
match fix_type {
ComprehensionType::Extend => {
let generator_str = if for_stmt.is_async {
// generators do not implement __iter__, so `async for` requires the generator to be a list
format!("[{generator_str}]")
} else {
generator_str
};
let comprehension_body = format!("{variable_name}.extend({generator_str})");
let indentation = if for_loop_inline_comments.is_empty() {
String::new()
} else {
format!("{newline}{indent}")
};
let text_to_replace = format!(
"{}{indentation}{comprehension_body}",
for_loop_inline_comments.join(&indentation)
);
Ok(Fix::unsafe_edit(Edit::range_replacement(
text_to_replace,
for_stmt.range,
)))
}
ComprehensionType::ListComprehension => {
let binding_stmt = binding.statement(semantic);
let binding_stmt_range = binding_stmt
.and_then(|stmt| match stmt {
ast::Stmt::AnnAssign(assign) => Some(assign.range),
ast::Stmt::Assign(assign) => Some(assign.range),
_ => None,
})
.ok_or(anyhow!(
"Binding must have a statement to convert into a list comprehension"
))?;
// If there are multiple binding statements in one line, we don't want to accidentally delete them
// Instead, we just delete the binding statement and leave any comments where they are
let (binding_stmt_deletion_range, binding_is_multiple_stmts) =
statement_deletion_range(checker, binding_stmt_range);
let annotations = match binding_stmt.and_then(|stmt| stmt.as_ann_assign_stmt()) {
Some(assign) => format!(": {}", locator.slice(assign.annotation.range())),
None => String::new(),
};
let comments_to_move = if binding_is_multiple_stmts {
for_loop_inline_comments
} else {
let mut new_comments =
comment_strings_in_range(checker, binding_stmt_deletion_range, &[]);
new_comments.extend(for_loop_inline_comments);
new_comments
};
let indentation = if comments_to_move.is_empty() {
String::new()
} else {
format!("{newline}{indent}")
};
let leading_comments = format!("{}{indentation}", comments_to_move.join(&indentation));
let comprehension_body =
format!("{leading_comments}{variable_name}{annotations} = [{generator_str}]");
Ok(Fix::unsafe_edits(
Edit::range_deletion(binding_stmt_deletion_range),
[Edit::range_replacement(comprehension_body, for_stmt.range)],
))
}
}
}
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
enum ComprehensionType {
Extend,
ListComprehension,
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/perflint/rules/unnecessary_list_cast.rs | crates/ruff_linter/src/rules/perflint/rules/unnecessary_list_cast.rs | use ruff_diagnostics::Applicability;
use ruff_macros::{ViolationMetadata, derive_message_formats};
use ruff_python_ast::statement_visitor::{StatementVisitor, walk_stmt};
use ruff_python_ast::{self as ast, Arguments, Expr, Stmt};
use ruff_python_semantic::analyze::typing::find_assigned_value;
use ruff_text_size::TextRange;
use crate::checkers::ast::Checker;
use crate::fix::edits;
use crate::{AlwaysFixableViolation, Edit, Fix};
/// ## What it does
/// Checks for explicit casts to `list` on for-loop iterables.
///
/// ## Why is this bad?
/// Using a `list()` call to eagerly iterate over an already-iterable type
/// (like a tuple, list, or set) is inefficient, as it forces Python to create
/// a new list unnecessarily.
///
/// Removing the `list()` call will not change the behavior of the code, but
/// may improve performance.
///
/// Note that, as with all `perflint` rules, this is only intended as a
/// micro-optimization, and will have a negligible impact on performance in
/// most cases.
///
/// ## Example
/// ```python
/// items = (1, 2, 3)
/// for i in list(items):
/// print(i)
/// ```
///
/// Use instead:
/// ```python
/// items = (1, 2, 3)
/// for i in items:
/// print(i)
/// ```
///
/// ## Fix safety
/// This rule's fix is marked as unsafe if there's comments in the
/// `list()` call, as comments may be removed.
///
/// For example, the fix would be marked as unsafe in the following case:
/// ```python
/// items = (1, 2, 3)
/// for i in list( # comment
/// items
/// ):
/// print(i)
/// ```
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "v0.0.276")]
pub(crate) struct UnnecessaryListCast;
impl AlwaysFixableViolation for UnnecessaryListCast {
#[derive_message_formats]
fn message(&self) -> String {
"Do not cast an iterable to `list` before iterating over it".to_string()
}
fn fix_title(&self) -> String {
"Remove `list()` cast".to_string()
}
}
/// PERF101
pub(crate) fn unnecessary_list_cast(checker: &Checker, iter: &Expr, body: &[Stmt]) {
let Expr::Call(ast::ExprCall {
func,
arguments:
Arguments {
args,
keywords: _,
range: _,
node_index: _,
},
range: list_range,
node_index: _,
}) = iter
else {
return;
};
let [arg] = &**args else {
return;
};
if !checker.semantic().match_builtin_expr(func, "list") {
return;
}
match arg {
Expr::Tuple(ast::ExprTuple {
range: iterable_range,
..
})
| Expr::List(ast::ExprList {
range: iterable_range,
..
})
| Expr::Set(ast::ExprSet {
range: iterable_range,
..
}) => {
let mut diagnostic = checker.report_diagnostic(UnnecessaryListCast, *list_range);
diagnostic.set_fix(remove_cast(checker, *list_range, *iterable_range));
}
Expr::Name(ast::ExprName {
id,
range: iterable_range,
..
}) => {
let Some(value) = find_assigned_value(id, checker.semantic()) else {
return;
};
if matches!(value, Expr::Tuple(_) | Expr::List(_) | Expr::Set(_)) {
// If the variable is being modified to, don't suggest removing the cast:
//
// ```python
// items = ["foo", "bar"]
// for item in list(items):
// items.append("baz")
// ```
//
// Here, removing the `list()` cast would change the behavior of the code.
let mut visitor = MutationVisitor::new(id);
visitor.visit_body(body);
if visitor.is_mutated {
return;
}
let mut diagnostic = checker.report_diagnostic(UnnecessaryListCast, *list_range);
diagnostic.set_fix(remove_cast(checker, *list_range, *iterable_range));
}
}
_ => {}
}
}
/// Generate a [`Fix`] to remove a `list` cast from an expression.
fn remove_cast(checker: &Checker, list_range: TextRange, iterable_range: TextRange) -> Fix {
let content = edits::pad(
checker.locator().slice(iterable_range).to_string(),
list_range,
checker.locator(),
);
Fix::applicable_edit(
Edit::range_replacement(content, list_range),
if checker.comment_ranges().intersects(list_range) {
Applicability::Unsafe
} else {
Applicability::Safe
},
)
}
/// A [`StatementVisitor`] that (conservatively) identifies mutations to a variable.
#[derive(Default)]
pub(crate) struct MutationVisitor<'a> {
pub(crate) target: &'a str,
pub(crate) is_mutated: bool,
}
impl<'a> MutationVisitor<'a> {
pub(crate) fn new(target: &'a str) -> Self {
Self {
target,
is_mutated: false,
}
}
}
impl<'a> StatementVisitor<'a> for MutationVisitor<'a> {
fn visit_stmt(&mut self, stmt: &'a Stmt) {
if match_mutation(stmt, self.target) {
self.is_mutated = true;
} else {
walk_stmt(self, stmt);
}
}
}
/// Check if a statement is (probably) a modification to the list assigned to the given identifier.
///
/// For example, `foo.append(bar)` would return `true` if `id` is `foo`.
fn match_mutation(stmt: &Stmt, id: &str) -> bool {
match stmt {
// Ex) `foo.append(bar)`
Stmt::Expr(ast::StmtExpr { value, .. }) => {
let Some(ast::ExprCall { func, .. }) = value.as_call_expr() else {
return false;
};
let Some(ast::ExprAttribute { value, attr, .. }) = func.as_attribute_expr() else {
return false;
};
if !matches!(
attr.as_str(),
"append" | "insert" | "extend" | "remove" | "pop" | "clear" | "reverse" | "sort"
) {
return false;
}
let Some(ast::ExprName { id: target_id, .. }) = value.as_name_expr() else {
return false;
};
target_id == id
}
// Ex) `foo[0] = bar`
Stmt::Assign(ast::StmtAssign { targets, .. }) => targets.iter().any(|target| {
if let Some(ast::ExprSubscript { value: target, .. }) = target.as_subscript_expr() {
if let Some(ast::ExprName { id: target_id, .. }) = target.as_name_expr() {
return target_id == id;
}
}
false
}),
// Ex) `foo += bar`
Stmt::AugAssign(ast::StmtAugAssign { target, .. }) => {
if let Some(ast::ExprName { id: target_id, .. }) = target.as_name_expr() {
target_id == id
} else {
false
}
}
// Ex) `foo[0]: int = bar`
Stmt::AnnAssign(ast::StmtAnnAssign { target, .. }) => {
if let Some(ast::ExprSubscript { value: target, .. }) = target.as_subscript_expr() {
if let Some(ast::ExprName { id: target_id, .. }) = target.as_name_expr() {
return target_id == id;
}
}
false
}
// Ex) `del foo[0]`
Stmt::Delete(ast::StmtDelete { targets, .. }) => targets.iter().any(|target| {
if let Some(ast::ExprSubscript { value: target, .. }) = target.as_subscript_expr() {
if let Some(ast::ExprName { id: target_id, .. }) = target.as_name_expr() {
return target_id == id;
}
}
false
}),
_ => false,
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/numpy/helpers.rs | crates/ruff_linter/src/rules/numpy/helpers.rs | use ruff_python_ast::Expr;
use ruff_python_ast::name::QualifiedName;
use ruff_python_ast::statement_visitor::StatementVisitor;
use ruff_python_ast::visitor::Visitor;
use ruff_python_ast::visitor::{walk_expr, walk_stmt};
use ruff_python_ast::{Alias, Stmt, StmtImportFrom, statement_visitor};
use ruff_python_semantic::SemanticModel;
/// AST visitor that searches an AST tree for [`ast::StmtImportFrom`] nodes
/// that match a certain [`QualifiedName`].
pub(crate) struct ImportSearcher<'a> {
module: &'a str,
name: &'a str,
pub found_import: bool,
}
impl<'a> ImportSearcher<'a> {
pub(crate) fn new(module: &'a str, name: &'a str) -> Self {
Self {
module,
name,
found_import: false,
}
}
}
impl StatementVisitor<'_> for ImportSearcher<'_> {
fn visit_stmt(&mut self, stmt: &Stmt) {
if self.found_import {
return;
}
if let Stmt::ImportFrom(StmtImportFrom { module, names, .. }) = stmt {
if module.as_ref().is_some_and(|module| module == self.module)
&& names.iter().any(|Alias { name, .. }| name == self.name)
{
self.found_import = true;
return;
}
}
statement_visitor::walk_stmt(self, stmt);
}
fn visit_body(&mut self, body: &[ruff_python_ast::Stmt]) {
for stmt in body {
self.visit_stmt(stmt);
if self.found_import {
return;
}
}
}
}
/// AST visitor that searches an AST tree for [`ast::ExprAttribute`] nodes
/// that match a certain [`QualifiedName`].
pub(crate) struct AttributeSearcher<'a> {
attribute_to_find: QualifiedName<'a>,
semantic: &'a SemanticModel<'a>,
pub found_attribute: bool,
}
impl<'a> AttributeSearcher<'a> {
pub(crate) fn new(
attribute_to_find: QualifiedName<'a>,
semantic: &'a SemanticModel<'a>,
) -> Self {
Self {
attribute_to_find,
semantic,
found_attribute: false,
}
}
}
impl Visitor<'_> for AttributeSearcher<'_> {
fn visit_expr(&mut self, expr: &'_ Expr) {
if self.found_attribute {
return;
}
if expr.is_attribute_expr()
&& self
.semantic
.resolve_qualified_name(expr)
.is_some_and(|qualified_name| qualified_name == self.attribute_to_find)
{
self.found_attribute = true;
return;
}
walk_expr(self, expr);
}
fn visit_stmt(&mut self, stmt: &ruff_python_ast::Stmt) {
if !self.found_attribute {
walk_stmt(self, stmt);
}
}
fn visit_body(&mut self, body: &[ruff_python_ast::Stmt]) {
for stmt in body {
self.visit_stmt(stmt);
if self.found_attribute {
return;
}
}
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/numpy/mod.rs | crates/ruff_linter/src/rules/numpy/mod.rs | //! NumPy-specific rules.
pub(crate) mod helpers;
pub(crate) mod rules;
#[cfg(test)]
mod tests {
use std::path::Path;
use anyhow::Result;
use test_case::test_case;
use crate::registry::Rule;
use crate::test::test_path;
use crate::{assert_diagnostics, settings};
#[test_case(Rule::NumpyDeprecatedTypeAlias, Path::new("NPY001.py"))]
#[test_case(Rule::NumpyLegacyRandom, Path::new("NPY002.py"))]
#[test_case(Rule::NumpyDeprecatedFunction, Path::new("NPY003.py"))]
// The NPY201 tests are split into multiple files because they get fixed one by one and too many diagnostic exceed the max-iterations limit.
#[test_case(Rule::Numpy2Deprecation, Path::new("NPY201.py"))]
#[test_case(Rule::Numpy2Deprecation, Path::new("NPY201_2.py"))]
#[test_case(Rule::Numpy2Deprecation, Path::new("NPY201_3.py"))]
fn rules(rule_code: Rule, path: &Path) -> Result<()> {
let snapshot = format!("{}_{}", rule_code.name(), path.to_string_lossy());
let diagnostics = test_path(
Path::new("numpy").join(path).as_path(),
&settings::LinterSettings::for_rule(rule_code),
)?;
assert_diagnostics!(snapshot, diagnostics);
Ok(())
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/numpy/rules/deprecated_type_alias.rs | crates/ruff_linter/src/rules/numpy/rules/deprecated_type_alias.rs | use ruff_macros::{ViolationMetadata, derive_message_formats};
use ruff_python_ast::Expr;
use ruff_python_semantic::Modules;
use ruff_text_size::Ranged;
use crate::checkers::ast::Checker;
use crate::{Edit, Fix, FixAvailability, Violation};
/// ## What it does
/// Checks for deprecated NumPy type aliases.
///
/// ## Why is this bad?
/// NumPy's `np.int` has long been an alias of the builtin `int`; the same
/// is true of `np.float` and others. These aliases exist primarily
/// for historic reasons, and have been a cause of frequent confusion
/// for newcomers.
///
/// These aliases were deprecated in 1.20, and removed in 1.24.
/// Note, however, that `np.bool` and `np.long` were reintroduced in 2.0 with
/// different semantics, and are thus omitted from this rule.
///
/// ## Example
/// ```python
/// import numpy as np
///
/// np.int
/// ```
///
/// Use instead:
/// ```python
/// int
/// ```
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "v0.0.247")]
pub(crate) struct NumpyDeprecatedTypeAlias {
type_name: String,
}
impl Violation for NumpyDeprecatedTypeAlias {
const FIX_AVAILABILITY: FixAvailability = FixAvailability::Sometimes;
#[derive_message_formats]
fn message(&self) -> String {
let NumpyDeprecatedTypeAlias { type_name } = self;
format!("Type alias `np.{type_name}` is deprecated, replace with builtin type")
}
fn fix_title(&self) -> Option<String> {
let NumpyDeprecatedTypeAlias { type_name } = self;
Some(format!("Replace `np.{type_name}` with builtin type"))
}
}
/// NPY001
pub(crate) fn deprecated_type_alias(checker: &Checker, expr: &Expr) {
if !checker.semantic().seen_module(Modules::NUMPY) {
return;
}
if let Some(type_name) =
checker
.semantic()
.resolve_qualified_name(expr)
.and_then(|qualified_name| {
if matches!(
qualified_name.segments(),
[
"numpy",
"int" | "float" | "complex" | "object" | "str" | "unicode"
]
) {
Some(qualified_name.segments()[1])
} else {
None
}
})
{
let mut diagnostic = checker.report_diagnostic(
NumpyDeprecatedTypeAlias {
type_name: type_name.to_string(),
},
expr.range(),
);
diagnostic.add_primary_tag(ruff_db::diagnostic::DiagnosticTag::Deprecated);
let type_name = match type_name {
"unicode" => "str",
_ => type_name,
};
diagnostic.try_set_fix(|| {
let (import_edit, binding) = checker.importer().get_or_import_builtin_symbol(
type_name,
expr.start(),
checker.semantic(),
)?;
let binding_edit = Edit::range_replacement(binding, expr.range());
Ok(Fix::safe_edits(binding_edit, import_edit))
});
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/numpy/rules/numpy_2_0_deprecation.rs | crates/ruff_linter/src/rules/numpy/rules/numpy_2_0_deprecation.rs | use ruff_macros::{ViolationMetadata, derive_message_formats};
use ruff_python_ast::name::QualifiedNameBuilder;
use ruff_python_ast::statement_visitor::StatementVisitor;
use ruff_python_ast::visitor::Visitor;
use ruff_python_ast::{self as ast, Expr};
use ruff_python_semantic::{Exceptions, Modules, SemanticModel};
use ruff_text_size::Ranged;
use crate::checkers::ast::Checker;
use crate::importer::ImportRequest;
use crate::rules::numpy::helpers::{AttributeSearcher, ImportSearcher};
use crate::{Edit, Fix, FixAvailability, Violation};
/// ## What it does
/// Checks for uses of NumPy functions and constants that were removed from
/// the main namespace in NumPy 2.0.
///
/// ## Why is this bad?
/// NumPy 2.0 includes an overhaul of NumPy's Python API, intended to remove
/// redundant aliases and routines, and establish unambiguous mechanisms for
/// accessing constants, dtypes, and functions.
///
/// As part of this overhaul, a variety of deprecated NumPy functions and
/// constants were removed from the main namespace.
///
/// The majority of these functions and constants can be automatically replaced
/// by other members of the NumPy API or by equivalents from the Python
/// standard library. With the exception of renaming `numpy.byte_bounds` to
/// `numpy.lib.array_utils.byte_bounds`, all such replacements are backwards
/// compatible with earlier versions of NumPy.
///
/// This rule flags all uses of removed members, along with automatic fixes for
/// any backwards-compatible replacements.
///
/// ## Example
/// ```python
/// import numpy as np
///
/// arr1 = [np.Infinity, np.NaN, np.nan, np.PINF, np.inf]
/// arr2 = [np.float_(1.5), np.float64(5.1)]
/// np.round_(arr2)
/// ```
///
/// Use instead:
/// ```python
/// import numpy as np
///
/// arr1 = [np.inf, np.nan, np.nan, np.inf, np.inf]
/// arr2 = [np.float64(1.5), np.float64(5.1)]
/// np.round(arr2)
/// ```
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "v0.2.0")]
pub(crate) struct Numpy2Deprecation {
existing: String,
migration_guide: Option<String>,
code_action: Option<String>,
}
impl Violation for Numpy2Deprecation {
const FIX_AVAILABILITY: FixAvailability = FixAvailability::Sometimes;
#[derive_message_formats]
fn message(&self) -> String {
let Numpy2Deprecation {
existing,
migration_guide,
code_action: _,
} = self;
match migration_guide {
Some(migration_guide) => {
format!("`np.{existing}` will be removed in NumPy 2.0. {migration_guide}",)
}
None => format!("`np.{existing}` will be removed without replacement in NumPy 2.0"),
}
}
fn fix_title(&self) -> Option<String> {
let Numpy2Deprecation {
existing: _,
migration_guide: _,
code_action,
} = self;
code_action.clone()
}
}
#[derive(Debug)]
struct Replacement<'a> {
existing: &'a str,
details: Details<'a>,
}
#[derive(Debug)]
enum Details<'a> {
/// The deprecated member can be replaced by another member in the NumPy API.
AutoImport {
path: &'a str,
name: &'a str,
compatibility: Compatibility,
},
/// The deprecated member can be replaced by a member of the Python standard library.
AutoPurePython { python_expr: &'a str },
/// The deprecated member can be replaced by a manual migration.
Manual { guideline: Option<&'a str> },
}
impl Details<'_> {
fn guideline(&self) -> Option<String> {
match self {
Details::AutoImport {
path,
name,
compatibility: Compatibility::BackwardsCompatible,
} => Some(format!("Use `{path}.{name}` instead.")),
Details::AutoImport {
path,
name,
compatibility: Compatibility::Breaking,
} => Some(format!(
"Use `{path}.{name}` on NumPy 2.0, or ignore this warning on earlier versions."
)),
Details::AutoPurePython { python_expr } => {
Some(format!("Use `{python_expr}` instead."))
}
Details::Manual { guideline } => guideline.map(ToString::to_string),
}
}
fn code_action(&self) -> Option<String> {
match self {
Details::AutoImport {
path,
name,
compatibility: Compatibility::BackwardsCompatible,
} => Some(format!("Replace with `{path}.{name}`")),
Details::AutoImport {
path,
name,
compatibility: Compatibility::Breaking,
} => Some(format!(
"Replace with `{path}.{name}` (requires NumPy 2.0 or greater)"
)),
Details::AutoPurePython { python_expr } => {
Some(format!("Replace with `{python_expr}`"))
}
Details::Manual { guideline: _ } => None,
}
}
}
#[derive(Debug)]
enum Compatibility {
/// The changes is backwards compatible with earlier versions of NumPy.
BackwardsCompatible,
/// The change is breaking in NumPy 2.0.
Breaking,
}
/// NPY201
pub(crate) fn numpy_2_0_deprecation(checker: &Checker, expr: &Expr) {
let semantic = checker.semantic();
if !semantic.seen_module(Modules::NUMPY) {
return;
}
let Some(qualified_name) = semantic.resolve_qualified_name(expr) else {
return;
};
let replacement = match qualified_name.segments() {
// NumPy's main namespace np.* members removed in 2.0
["numpy", "add_docstring"] => Replacement {
existing: "add_docstring",
details: Details::AutoImport {
path: "numpy.lib",
name: "add_docstring",
compatibility: Compatibility::BackwardsCompatible,
},
},
["numpy", "add_newdoc"] => Replacement {
existing: "add_newdoc",
details: Details::AutoImport {
path: "numpy.lib",
name: "add_newdoc",
compatibility: Compatibility::BackwardsCompatible,
},
},
["numpy", "add_newdoc_ufunc"] => Replacement {
existing: "add_newdoc_ufunc",
details: Details::Manual {
guideline: Some("`add_newdoc_ufunc` is an internal function."),
},
},
["numpy", "alltrue"] => Replacement {
existing: "alltrue",
details: Details::AutoImport {
path: "numpy",
name: "all",
compatibility: Compatibility::BackwardsCompatible,
},
},
["numpy", "asfarray"] => Replacement {
existing: "asfarray",
details: Details::Manual {
guideline: Some("Use `np.asarray` with a `float` dtype instead."),
},
},
["numpy", "byte_bounds"] => Replacement {
existing: "byte_bounds",
details: Details::AutoImport {
path: "numpy.lib.array_utils",
name: "byte_bounds",
compatibility: Compatibility::Breaking,
},
},
["numpy", "cast"] => Replacement {
existing: "cast",
details: Details::Manual {
guideline: Some("Use `np.asarray(arr, dtype=dtype)` instead."),
},
},
["numpy", "cfloat"] => Replacement {
existing: "cfloat",
details: Details::AutoImport {
path: "numpy",
name: "complex128",
compatibility: Compatibility::BackwardsCompatible,
},
},
["numpy", "clongfloat"] => Replacement {
existing: "clongfloat",
details: Details::AutoImport {
path: "numpy",
name: "clongdouble",
compatibility: Compatibility::BackwardsCompatible,
},
},
["numpy", "compat"] => Replacement {
existing: "compat",
details: Details::Manual {
guideline: Some("Python 2 is no longer supported."),
},
},
["numpy", "complex_"] => Replacement {
existing: "complex_",
details: Details::AutoImport {
path: "numpy",
name: "complex128",
compatibility: Compatibility::BackwardsCompatible,
},
},
["numpy", "cumproduct"] => Replacement {
existing: "cumproduct",
details: Details::AutoImport {
path: "numpy",
name: "cumprod",
compatibility: Compatibility::BackwardsCompatible,
},
},
["numpy", "DataSource"] => Replacement {
existing: "DataSource",
details: Details::AutoImport {
path: "numpy.lib.npyio",
name: "DataSource",
compatibility: Compatibility::BackwardsCompatible,
},
},
["numpy", "deprecate"] => Replacement {
existing: "deprecate",
details: Details::Manual {
guideline: Some(
"Emit `DeprecationWarning` with `warnings.warn` directly, or use `typing.deprecated`.",
),
},
},
["numpy", "deprecate_with_doc"] => Replacement {
existing: "deprecate_with_doc",
details: Details::Manual {
guideline: Some(
"Emit `DeprecationWarning` with `warnings.warn` directly, or use `typing.deprecated`.",
),
},
},
["numpy", "disp"] => Replacement {
existing: "disp",
details: Details::Manual {
guideline: Some("Use a dedicated print function instead."),
},
},
["numpy", "fastCopyAndTranspose"] => Replacement {
existing: "fastCopyAndTranspose",
details: Details::Manual {
guideline: Some("Use `arr.T.copy()` instead."),
},
},
["numpy", "find_common_type"] => Replacement {
existing: "find_common_type",
details: Details::Manual {
guideline: Some(
"Use `numpy.promote_types` or `numpy.result_type` instead. To achieve semantics for the `scalar_types` argument, use `numpy.result_type` and pass the Python values `0`, `0.0`, or `0j`.",
),
},
},
["numpy", "get_array_wrap"] => Replacement {
existing: "get_array_wrap",
details: Details::Manual { guideline: None },
},
["numpy", "float_"] => Replacement {
existing: "float_",
details: Details::AutoImport {
path: "numpy",
name: "float64",
compatibility: Compatibility::BackwardsCompatible,
},
},
["numpy", "geterrobj"] => Replacement {
existing: "geterrobj",
details: Details::Manual {
guideline: Some("Use the `np.errstate` context manager instead."),
},
},
["numpy", "in1d"] => Replacement {
existing: "in1d",
details: Details::AutoImport {
path: "numpy",
name: "isin",
compatibility: Compatibility::BackwardsCompatible,
},
},
["numpy", "INF"] => Replacement {
existing: "INF",
details: Details::AutoImport {
path: "numpy",
name: "inf",
compatibility: Compatibility::BackwardsCompatible,
},
},
["numpy", "Inf"] => Replacement {
existing: "Inf",
details: Details::AutoImport {
path: "numpy",
name: "inf",
compatibility: Compatibility::BackwardsCompatible,
},
},
["numpy", "Infinity"] => Replacement {
existing: "Infinity",
details: Details::AutoImport {
path: "numpy",
name: "inf",
compatibility: Compatibility::BackwardsCompatible,
},
},
["numpy", "infty"] => Replacement {
existing: "infty",
details: Details::AutoImport {
path: "numpy",
name: "inf",
compatibility: Compatibility::BackwardsCompatible,
},
},
["numpy", "issctype"] => Replacement {
existing: "issctype",
details: Details::Manual { guideline: None },
},
["numpy", "issubclass_"] => Replacement {
existing: "issubclass_",
details: Details::AutoPurePython {
python_expr: "issubclass",
},
},
["numpy", "issubsctype"] => Replacement {
existing: "issubsctype",
details: Details::AutoImport {
path: "numpy",
name: "issubdtype",
compatibility: Compatibility::BackwardsCompatible,
},
},
["numpy", "mat"] => Replacement {
existing: "mat",
details: Details::AutoImport {
path: "numpy",
name: "asmatrix",
compatibility: Compatibility::BackwardsCompatible,
},
},
["numpy", "maximum_sctype"] => Replacement {
existing: "maximum_sctype",
details: Details::Manual { guideline: None },
},
["numpy", existing @ ("NaN" | "NAN")] => Replacement {
existing,
details: Details::AutoImport {
path: "numpy",
name: "nan",
compatibility: Compatibility::BackwardsCompatible,
},
},
["numpy", "nbytes"] => Replacement {
existing: "nbytes",
details: Details::Manual {
guideline: Some("Use `np.dtype(<dtype>).itemsize` instead."),
},
},
["numpy", "NINF"] => Replacement {
existing: "NINF",
details: Details::AutoPurePython {
python_expr: "-np.inf",
},
},
["numpy", "NZERO"] => Replacement {
existing: "NZERO",
details: Details::AutoPurePython {
python_expr: "-0.0",
},
},
["numpy", "longcomplex"] => Replacement {
existing: "longcomplex",
details: Details::AutoImport {
path: "numpy",
name: "clongdouble",
compatibility: Compatibility::BackwardsCompatible,
},
},
["numpy", "longfloat"] => Replacement {
existing: "longfloat",
details: Details::AutoImport {
path: "numpy",
name: "longdouble",
compatibility: Compatibility::BackwardsCompatible,
},
},
["numpy", "lookfor"] => Replacement {
existing: "lookfor",
details: Details::Manual {
guideline: Some("Search NumPyβs documentation directly."),
},
},
["numpy", "obj2sctype"] => Replacement {
existing: "obj2sctype",
details: Details::Manual { guideline: None },
},
["numpy", "PINF"] => Replacement {
existing: "PINF",
details: Details::AutoImport {
path: "numpy",
name: "inf",
compatibility: Compatibility::BackwardsCompatible,
},
},
["numpy", "product"] => Replacement {
existing: "product",
details: Details::AutoImport {
path: "numpy",
name: "prod",
compatibility: Compatibility::BackwardsCompatible,
},
},
["numpy", "PZERO"] => Replacement {
existing: "PZERO",
details: Details::AutoPurePython { python_expr: "0.0" },
},
["numpy", "recfromcsv"] => Replacement {
existing: "recfromcsv",
details: Details::Manual {
guideline: Some("Use `np.genfromtxt` with comma delimiter instead."),
},
},
["numpy", "recfromtxt"] => Replacement {
existing: "recfromtxt",
details: Details::Manual {
guideline: Some("Use `np.genfromtxt` instead."),
},
},
["numpy", "round_"] => Replacement {
existing: "round_",
details: Details::AutoImport {
path: "numpy",
name: "round",
compatibility: Compatibility::BackwardsCompatible,
},
},
["numpy", "safe_eval"] => Replacement {
existing: "safe_eval",
details: Details::AutoImport {
path: "ast",
name: "literal_eval",
compatibility: Compatibility::BackwardsCompatible,
},
},
["numpy", "sctype2char"] => Replacement {
existing: "sctype2char",
details: Details::Manual { guideline: None },
},
["numpy", "sctypes"] => Replacement {
existing: "sctypes",
details: Details::Manual { guideline: None },
},
["numpy", "seterrobj"] => Replacement {
existing: "seterrobj",
details: Details::Manual {
guideline: Some("Use the `np.errstate` context manager instead."),
},
},
["numpy", "set_string_function"] => Replacement {
existing: "set_string_function",
details: Details::Manual {
guideline: Some("Use `np.set_printoptions` for custom printing of NumPy objects."),
},
},
["numpy", "singlecomplex"] => Replacement {
existing: "singlecomplex",
details: Details::AutoImport {
path: "numpy",
name: "complex64",
compatibility: Compatibility::BackwardsCompatible,
},
},
["numpy", "string_"] => Replacement {
existing: "string_",
details: Details::AutoImport {
path: "numpy",
name: "bytes_",
compatibility: Compatibility::BackwardsCompatible,
},
},
["numpy", "sometrue"] => Replacement {
existing: "sometrue",
details: Details::AutoImport {
path: "numpy",
name: "any",
compatibility: Compatibility::BackwardsCompatible,
},
},
["numpy", "source"] => Replacement {
existing: "source",
details: Details::AutoImport {
path: "inspect",
name: "getsource",
compatibility: Compatibility::BackwardsCompatible,
},
},
["numpy", "tracemalloc_domain"] => Replacement {
existing: "tracemalloc_domain",
details: Details::AutoImport {
path: "numpy.lib",
name: "tracemalloc_domain",
compatibility: Compatibility::BackwardsCompatible,
},
},
["numpy", "trapz"] => Replacement {
existing: "trapz",
details: Details::AutoImport {
path: "numpy",
name: "trapezoid",
compatibility: Compatibility::Breaking,
},
},
["numpy", "unicode_"] => Replacement {
existing: "unicode_",
details: Details::AutoImport {
path: "numpy",
name: "str_",
compatibility: Compatibility::BackwardsCompatible,
},
},
["numpy", "who"] => Replacement {
existing: "who",
details: Details::Manual {
guideline: Some("Use an IDE variable explorer or `locals()` instead."),
},
},
["numpy", "row_stack"] => Replacement {
existing: "row_stack",
details: Details::AutoImport {
path: "numpy",
name: "vstack",
compatibility: Compatibility::BackwardsCompatible,
},
},
["numpy", "AxisError"] => Replacement {
existing: "AxisError",
details: Details::AutoImport {
path: "numpy.exceptions",
name: "AxisError",
compatibility: Compatibility::BackwardsCompatible,
},
},
["numpy", "ComplexWarning"] => Replacement {
existing: "ComplexWarning",
details: Details::AutoImport {
path: "numpy.exceptions",
name: "ComplexWarning",
compatibility: Compatibility::BackwardsCompatible,
},
},
["numpy", "DTypePromotionError"] => Replacement {
existing: "DTypePromotionError",
details: Details::AutoImport {
path: "numpy.exceptions",
name: "DTypePromotionError",
compatibility: Compatibility::BackwardsCompatible,
},
},
["numpy", "ModuleDeprecationWarning"] => Replacement {
existing: "ModuleDeprecationWarning",
details: Details::AutoImport {
path: "numpy.exceptions",
name: "ModuleDeprecationWarning",
compatibility: Compatibility::BackwardsCompatible,
},
},
["numpy", "RankWarning"] => Replacement {
existing: "RankWarning",
details: Details::AutoImport {
path: "numpy.exceptions",
name: "RankWarning",
compatibility: Compatibility::Breaking,
},
},
["numpy", "TooHardError"] => Replacement {
existing: "TooHardError",
details: Details::AutoImport {
path: "numpy.exceptions",
name: "TooHardError",
compatibility: Compatibility::BackwardsCompatible,
},
},
["numpy", "VisibleDeprecationWarning"] => Replacement {
existing: "VisibleDeprecationWarning",
details: Details::AutoImport {
path: "numpy.exceptions",
name: "VisibleDeprecationWarning",
compatibility: Compatibility::BackwardsCompatible,
},
},
["numpy", "compare_chararrays"] => Replacement {
existing: "compare_chararrays",
details: Details::AutoImport {
path: "numpy.char",
name: "compare_chararrays",
compatibility: Compatibility::BackwardsCompatible,
},
},
["numpy", "chararray"] => Replacement {
existing: "chararray",
details: Details::AutoImport {
path: "numpy.char",
name: "chararray",
compatibility: Compatibility::BackwardsCompatible,
},
},
["numpy", "format_parser"] => Replacement {
existing: "format_parser",
details: Details::AutoImport {
path: "numpy.rec",
name: "format_parser",
compatibility: Compatibility::BackwardsCompatible,
},
},
_ => return,
};
if is_guarded_by_try_except(expr, &replacement, semantic) {
return;
}
let mut diagnostic = checker.report_diagnostic(
Numpy2Deprecation {
existing: replacement.existing.to_string(),
migration_guide: replacement.details.guideline(),
code_action: replacement.details.code_action(),
},
expr.range(),
);
match replacement.details {
Details::AutoImport {
path,
name,
compatibility,
} => {
diagnostic.try_set_fix(|| {
let (import_edit, binding) = checker.importer().get_or_import_symbol(
&ImportRequest::import_from(path, name),
expr.start(),
checker.semantic(),
)?;
let replacement_edit = Edit::range_replacement(binding, expr.range());
Ok(match compatibility {
Compatibility::BackwardsCompatible => {
Fix::safe_edits(import_edit, [replacement_edit])
}
Compatibility::Breaking => Fix::unsafe_edits(import_edit, [replacement_edit]),
})
});
}
Details::AutoPurePython { python_expr } => diagnostic.set_fix(Fix::safe_edit(
Edit::range_replacement(python_expr.to_string(), expr.range()),
)),
Details::Manual { guideline: _ } => {}
}
}
/// Ignore attempts to access a `numpy` member via its deprecated name
/// if the access takes place in an `except` block that provides compatibility
/// with older numpy versions.
///
/// For attribute accesses (e.g. `np.ComplexWarning`), we only ignore the violation
/// if it's inside an `except AttributeError` block, and the member is accessed
/// through its non-deprecated name in the associated `try` block.
///
/// For uses of the `numpy` member where it's simply an `ExprName` node,
/// we check to see how the `numpy` member was bound. If it was bound via a
/// `from numpy import foo` statement, we check to see if that import statement
/// took place inside an `except ImportError` or `except ModuleNotFoundError` block.
/// If so, and if the `numpy` member was imported through its non-deprecated name
/// in the associated try block, we ignore the violation in the same way.
///
/// Examples:
///
/// ```py
/// import numpy as np
///
/// try:
/// np.all([True, True])
/// except AttributeError:
/// np.alltrue([True, True]) # Okay
///
/// try:
/// from numpy.exceptions import ComplexWarning
/// except ImportError:
/// from numpy import ComplexWarning
///
/// x = ComplexWarning() # Okay
/// ```
fn is_guarded_by_try_except(
expr: &Expr,
replacement: &Replacement,
semantic: &SemanticModel,
) -> bool {
match expr {
Expr::Attribute(_) => {
if !semantic.in_exception_handler() {
return false;
}
let Some(try_node) = semantic
.current_statements()
.find_map(|stmt| stmt.as_try_stmt())
else {
return false;
};
let suspended_exceptions = Exceptions::from_try_stmt(try_node, semantic);
if !suspended_exceptions.contains(Exceptions::ATTRIBUTE_ERROR) {
return false;
}
try_block_contains_undeprecated_attribute(try_node, &replacement.details, semantic)
}
Expr::Name(ast::ExprName { id, .. }) => {
let Some(binding_id) = semantic.lookup_symbol(id.as_str()) else {
return false;
};
let binding = semantic.binding(binding_id);
if !binding.is_external() {
return false;
}
if !binding.in_exception_handler() {
return false;
}
let Some(try_node) = binding.source.and_then(|import_id| {
semantic
.statements(import_id)
.find_map(|stmt| stmt.as_try_stmt())
}) else {
return false;
};
let suspended_exceptions = Exceptions::from_try_stmt(try_node, semantic);
if !suspended_exceptions
.intersects(Exceptions::IMPORT_ERROR | Exceptions::MODULE_NOT_FOUND_ERROR)
{
return false;
}
try_block_contains_undeprecated_import(try_node, &replacement.details)
}
_ => false,
}
}
/// Given an [`ast::StmtTry`] node, does the `try` branch of that node
/// contain any [`ast::ExprAttribute`] nodes that indicate the numpy
/// member is being accessed from the non-deprecated location?
fn try_block_contains_undeprecated_attribute(
try_node: &ast::StmtTry,
replacement_details: &Details,
semantic: &SemanticModel,
) -> bool {
let Details::AutoImport {
path,
name,
compatibility: _,
} = replacement_details
else {
return false;
};
let undeprecated_qualified_name = {
let mut builder = QualifiedNameBuilder::default();
for part in path.split('.') {
builder.push(part);
}
builder.push(name);
builder.build()
};
let mut attribute_searcher = AttributeSearcher::new(undeprecated_qualified_name, semantic);
attribute_searcher.visit_body(&try_node.body);
attribute_searcher.found_attribute
}
/// Given an [`ast::StmtTry`] node, does the `try` branch of that node
/// contain any [`ast::StmtImportFrom`] nodes that indicate the numpy
/// member is being imported from the non-deprecated location?
fn try_block_contains_undeprecated_import(
try_node: &ast::StmtTry,
replacement_details: &Details,
) -> bool {
let Details::AutoImport {
path,
name,
compatibility: _,
} = replacement_details
else {
return false;
};
let mut import_searcher = ImportSearcher::new(path, name);
import_searcher.visit_body(&try_node.body);
import_searcher.found_import
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/numpy/rules/mod.rs | crates/ruff_linter/src/rules/numpy/rules/mod.rs | pub(crate) use deprecated_function::*;
pub(crate) use deprecated_type_alias::*;
pub(crate) use legacy_random::*;
pub(crate) use numpy_2_0_deprecation::*;
mod deprecated_function;
mod deprecated_type_alias;
mod legacy_random;
mod numpy_2_0_deprecation;
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/numpy/rules/deprecated_function.rs | crates/ruff_linter/src/rules/numpy/rules/deprecated_function.rs | use ruff_macros::{ViolationMetadata, derive_message_formats};
use ruff_python_ast::Expr;
use ruff_python_semantic::Modules;
use ruff_text_size::Ranged;
use crate::checkers::ast::Checker;
use crate::importer::ImportRequest;
use crate::{Edit, Fix, FixAvailability, Violation};
/// ## What it does
/// Checks for uses of deprecated NumPy functions.
///
/// ## Why is this bad?
/// When NumPy functions are deprecated, they are usually replaced with
/// newer, more efficient versions, or with functions that are more
/// consistent with the rest of the NumPy API.
///
/// Prefer newer APIs over deprecated ones.
///
/// ## Example
/// ```python
/// import numpy as np
///
/// np.alltrue([True, False])
/// ```
///
/// Use instead:
/// ```python
/// import numpy as np
///
/// np.all([True, False])
/// ```
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "v0.0.276")]
pub(crate) struct NumpyDeprecatedFunction {
existing: String,
replacement: String,
}
impl Violation for NumpyDeprecatedFunction {
const FIX_AVAILABILITY: FixAvailability = FixAvailability::Sometimes;
#[derive_message_formats]
fn message(&self) -> String {
let NumpyDeprecatedFunction {
existing,
replacement,
} = self;
format!("`np.{existing}` is deprecated; use `np.{replacement}` instead")
}
fn fix_title(&self) -> Option<String> {
let NumpyDeprecatedFunction { replacement, .. } = self;
Some(format!("Replace with `np.{replacement}`"))
}
}
/// NPY003
pub(crate) fn deprecated_function(checker: &Checker, expr: &Expr) {
if !checker.semantic().seen_module(Modules::NUMPY) {
return;
}
if let Some((existing, replacement)) =
checker
.semantic()
.resolve_qualified_name(expr)
.and_then(|qualified_name| match qualified_name.segments() {
["numpy", "round_"] => Some(("round_", "round")),
["numpy", "product"] => Some(("product", "prod")),
["numpy", "cumproduct"] => Some(("cumproduct", "cumprod")),
["numpy", "sometrue"] => Some(("sometrue", "any")),
["numpy", "alltrue"] => Some(("alltrue", "all")),
_ => None,
})
{
let mut diagnostic = checker.report_diagnostic(
NumpyDeprecatedFunction {
existing: existing.to_string(),
replacement: replacement.to_string(),
},
expr.range(),
);
diagnostic.add_primary_tag(ruff_db::diagnostic::DiagnosticTag::Deprecated);
diagnostic.try_set_fix(|| {
let (import_edit, binding) = checker.importer().get_or_import_symbol(
&ImportRequest::import_from("numpy", replacement),
expr.start(),
checker.semantic(),
)?;
let replacement_edit = Edit::range_replacement(binding, expr.range());
Ok(Fix::safe_edits(import_edit, [replacement_edit]))
});
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/numpy/rules/legacy_random.rs | crates/ruff_linter/src/rules/numpy/rules/legacy_random.rs | use ruff_python_ast::Expr;
use ruff_macros::{ViolationMetadata, derive_message_formats};
use ruff_python_semantic::Modules;
use ruff_text_size::Ranged;
use crate::Violation;
use crate::checkers::ast::Checker;
/// ## What it does
/// Checks for the use of legacy `np.random` function calls.
///
/// ## Why is this bad?
/// According to the NumPy documentation's [Legacy Random Generation]:
///
/// > The `RandomState` provides access to legacy generators... This class
/// > should only be used if it is essential to have randoms that are
/// > identical to what would have been produced by previous versions of
/// > NumPy.
///
/// The members exposed directly on the `random` module are convenience
/// functions that alias to methods on a global singleton `RandomState`
/// instance. NumPy recommends using a dedicated `Generator` instance
/// rather than the random variate generation methods exposed directly on
/// the `random` module, as the new `Generator` is both faster and has
/// better statistical properties.
///
/// See the documentation on [Random Sampling] and [NEP 19] for further
/// details.
///
/// ## Example
/// ```python
/// import numpy as np
///
/// np.random.seed(1337)
/// np.random.normal()
/// ```
///
/// Use instead:
/// ```python
/// rng = np.random.default_rng(1337)
/// rng.normal()
/// ```
///
/// [Legacy Random Generation]: https://numpy.org/doc/stable/reference/random/legacy.html#legacy
/// [Random Sampling]: https://numpy.org/doc/stable/reference/random/index.html#random-quick-start
/// [NEP 19]: https://numpy.org/neps/nep-0019-rng-policy.html
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "v0.0.248")]
pub(crate) struct NumpyLegacyRandom {
method_name: String,
}
impl Violation for NumpyLegacyRandom {
#[derive_message_formats]
fn message(&self) -> String {
let NumpyLegacyRandom { method_name } = self;
format!("Replace legacy `np.random.{method_name}` call with `np.random.Generator`")
}
}
/// NPY002
pub(crate) fn legacy_random(checker: &Checker, expr: &Expr) {
if !checker.semantic().seen_module(Modules::NUMPY) {
return;
}
if let Some(method_name) =
checker
.semantic()
.resolve_qualified_name(expr)
.and_then(|qualified_name| {
// seeding state
if matches!(
qualified_name.segments(),
[
"numpy",
"random",
// Seeds
"seed" |
"get_state" |
"set_state" |
// Simple random data
"rand" |
"ranf" |
"sample" |
"randn" |
"randint" |
"random" |
"random_integers" |
"random_sample" |
"choice" |
"bytes" |
// Permutations
"shuffle" |
"permutation" |
// Distributions
"beta" |
"binomial" |
"chisquare" |
"dirichlet" |
"exponential" |
"f" |
"gamma" |
"geometric" |
"gumbel" |
"hypergeometric" |
"laplace" |
"logistic" |
"lognormal" |
"logseries" |
"multinomial" |
"multivariate_normal" |
"negative_binomial" |
"noncentral_chisquare" |
"noncentral_f" |
"normal" |
"pareto" |
"poisson" |
"power" |
"rayleigh" |
"standard_cauchy" |
"standard_exponential" |
"standard_gamma" |
"standard_normal" |
"standard_t" |
"triangular" |
"uniform" |
"vonmises" |
"wald" |
"weibull" |
"zipf"
]
) {
Some(qualified_name.segments()[2])
} else {
None
}
})
{
checker.report_diagnostic(
NumpyLegacyRandom {
method_name: method_name.to_string(),
},
expr.range(),
);
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/pyflakes/settings.rs | crates/ruff_linter/src/rules/pyflakes/settings.rs | //! Settings for the `Pyflakes` plugin.
use crate::display_settings;
use ruff_macros::CacheKey;
use std::fmt;
#[derive(Debug, Clone, Default, CacheKey)]
pub struct Settings {
pub extend_generics: Vec<String>,
pub allowed_unused_imports: Vec<String>,
}
impl fmt::Display for Settings {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
display_settings! {
formatter = f,
namespace = "linter.pyflakes",
fields = [
self.extend_generics | debug,
self.allowed_unused_imports | debug
]
}
Ok(())
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/pyflakes/cformat.rs | crates/ruff_linter/src/rules/pyflakes/cformat.rs | //! Implements helper functions for using vendored/cformat.rs
use std::convert::TryFrom;
use std::str::FromStr;
use ruff_python_literal::cformat::{
CFormatError, CFormatPart, CFormatPrecision, CFormatQuantity, CFormatSpec, CFormatString,
};
use rustc_hash::FxHashSet;
pub(crate) struct CFormatSummary {
pub(crate) starred: bool,
pub(crate) num_positional: usize,
pub(crate) keywords: FxHashSet<String>,
}
impl From<&CFormatString> for CFormatSummary {
fn from(format_string: &CFormatString) -> Self {
let mut starred = false;
let mut num_positional = 0;
let mut keywords = FxHashSet::default();
for format_part in format_string.iter() {
let CFormatPart::Spec(CFormatSpec {
ref mapping_key,
ref min_field_width,
ref precision,
..
}) = format_part.1
else {
continue;
};
match mapping_key {
Some(k) => {
keywords.insert(k.clone());
}
None => {
num_positional += 1;
}
}
if min_field_width == &Some(CFormatQuantity::FromValuesTuple) {
num_positional += 1;
starred = true;
}
if precision == &Some(CFormatPrecision::Quantity(CFormatQuantity::FromValuesTuple)) {
num_positional += 1;
starred = true;
}
}
Self {
starred,
num_positional,
keywords,
}
}
}
impl TryFrom<&str> for CFormatSummary {
type Error = CFormatError;
fn try_from(literal: &str) -> Result<Self, Self::Error> {
let format_string = CFormatString::from_str(literal)?;
Ok(Self::from(&format_string))
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_cformat_summary() {
let literal = "%(foo)s %s %d %(bar)x";
let expected_positional = 2;
let expected_keywords = ["foo", "bar"].into_iter().map(String::from).collect();
let format_summary = CFormatSummary::try_from(literal).unwrap();
assert!(!format_summary.starred);
assert_eq!(format_summary.num_positional, expected_positional);
assert_eq!(format_summary.keywords, expected_keywords);
}
#[test]
fn test_cformat_summary_starred() {
let format_summary1 = CFormatSummary::try_from("%*s %*d").unwrap();
assert!(format_summary1.starred);
assert_eq!(format_summary1.num_positional, 4);
let format_summary2 = CFormatSummary::try_from("%s %.*d").unwrap();
assert!(format_summary2.starred);
assert_eq!(format_summary2.num_positional, 3);
let format_summary3 = CFormatSummary::try_from("%s %*.*d").unwrap();
assert!(format_summary3.starred);
assert_eq!(format_summary3.num_positional, 4);
let format_summary4 = CFormatSummary::try_from("%s %1d").unwrap();
assert!(!format_summary4.starred);
}
#[test]
fn test_cformat_summary_invalid() {
assert!(CFormatSummary::try_from("%").is_err());
assert!(CFormatSummary::try_from("%(foo).").is_err());
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/pyflakes/fixes.rs | crates/ruff_linter/src/rules/pyflakes/fixes.rs | use anyhow::{Context, Ok, Result};
use ruff_python_ast as ast;
use ruff_python_ast::Expr;
use ruff_python_codegen::Stylist;
use ruff_python_semantic::Binding;
use ruff_python_trivia::{BackwardsTokenizer, SimpleTokenKind, SimpleTokenizer};
use ruff_text_size::Ranged;
use crate::Edit;
use crate::Locator;
use crate::cst::matchers::{match_call_mut, match_dict, transform_expression};
/// Generate a [`Edit`] to remove unused keys from format dict.
pub(super) fn remove_unused_format_arguments_from_dict(
unused_arguments: &[usize],
dict: &ast::ExprDict,
locator: &Locator,
stylist: &Stylist,
) -> Result<Edit> {
let source_code = locator.slice(dict);
transform_expression(source_code, stylist, |mut expression| {
let dict = match_dict(&mut expression)?;
// Remove the elements at the given indexes.
let mut index = 0;
dict.elements.retain(|_| {
let is_unused = unused_arguments.contains(&index);
index += 1;
!is_unused
});
Ok(expression)
})
.map(|output| Edit::range_replacement(output, dict.range()))
}
/// Generate a [`Edit`] to remove unused keyword arguments from a `format` call.
pub(super) fn remove_unused_keyword_arguments_from_format_call(
unused_arguments: &[usize],
call: &ast::ExprCall,
locator: &Locator,
stylist: &Stylist,
) -> Result<Edit> {
let source_code = locator.slice(call);
transform_expression(source_code, stylist, |mut expression| {
let call = match_call_mut(&mut expression)?;
// Remove the keyword arguments at the given indexes.
let mut index = 0;
call.args.retain(|arg| {
if arg.keyword.is_none() {
return true;
}
let is_unused = unused_arguments.contains(&index);
index += 1;
!is_unused
});
Ok(expression)
})
.map(|output| Edit::range_replacement(output, call.range()))
}
/// Generate a [`Edit`] to remove unused positional arguments from a `format` call.
pub(crate) fn remove_unused_positional_arguments_from_format_call(
unused_arguments: &[usize],
call: &ast::ExprCall,
locator: &Locator,
stylist: &Stylist,
) -> Result<Edit> {
// If we're removing _all_ arguments, we can remove the entire call.
//
// For example, `"Hello".format(", world!")` -> `"Hello"`, as opposed to `"Hello".format()`.
if unused_arguments.len() == call.arguments.len() {
if let Expr::Attribute(attribute) = &*call.func {
return Ok(Edit::range_replacement(
locator.slice(&*attribute.value).to_string(),
call.range(),
));
}
}
let source_code = locator.slice(call);
transform_expression(source_code, stylist, |mut expression| {
let call = match_call_mut(&mut expression)?;
// Remove any unused arguments.
let mut index = 0;
call.args.retain(|_| {
let is_unused = unused_arguments.contains(&index);
index += 1;
!is_unused
});
// If there are no arguments left, remove the parentheses.
if call.args.is_empty() {
Ok((*call.func).clone())
} else {
Ok(expression)
}
})
.map(|output| Edit::range_replacement(output, call.range()))
}
/// Generate a [`Edit`] to remove the binding from an exception handler.
pub(crate) fn remove_exception_handler_assignment(
bound_exception: &Binding,
locator: &Locator,
) -> Result<Edit> {
// Find the position just after the exception name. This is a late pass so we only have the
// binding and can't look its parent in the AST up anymore.
// ```
// except ZeroDivisionError as err:
// ^^^ This is the bound_exception range
// ^^^^ lex this range
// ^ preceding_end (we want to remove from here)
// ```
// There can't be any comments in that range.
let mut tokenizer =
BackwardsTokenizer::up_to(bound_exception.start(), locator.contents(), &[]).skip_trivia();
// Eat the `as` token.
let preceding = tokenizer
.next()
.context("expected the exception name to be preceded by `as`")?;
debug_assert!(matches!(preceding.kind, SimpleTokenKind::As));
// Lex to the end of the preceding token, which should be the exception value.
let preceding = tokenizer
.next()
.context("expected the exception name to be preceded by a token")?;
// Lex forwards, to the `:` token.
let following = SimpleTokenizer::starts_at(bound_exception.end(), locator.contents())
.skip_trivia()
.next()
.context("expected the exception name to be followed by a colon")?;
debug_assert!(matches!(following.kind, SimpleTokenKind::Colon));
Ok(Edit::deletion(preceding.end(), following.start()))
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/pyflakes/mod.rs | crates/ruff_linter/src/rules/pyflakes/mod.rs | //! Rules from [Pyflakes](https://pypi.org/project/pyflakes/).
pub(crate) mod cformat;
pub(crate) mod fixes;
pub(crate) mod format;
pub(crate) mod rules;
pub mod settings;
#[cfg(test)]
mod tests {
use std::path::Path;
use anyhow::Result;
use regex::Regex;
use ruff_db::diagnostic::Diagnostic;
use ruff_python_parser::ParseOptions;
use rustc_hash::FxHashMap;
use test_case::test_case;
use ruff_python_ast::PySourceType;
use ruff_python_codegen::Stylist;
use ruff_python_index::Indexer;
use ruff_python_trivia::textwrap::dedent;
use crate::linter::check_path;
use crate::registry::{Linter, Rule};
use crate::rules::isort;
use crate::rules::pyflakes;
use crate::settings::types::PreviewMode;
use crate::settings::{LinterSettings, flags};
use crate::source_kind::SourceKind;
use crate::suppression::Suppressions;
use crate::test::{test_contents, test_path, test_snippet};
use crate::{Locator, assert_diagnostics, assert_diagnostics_diff, directives};
#[test_case(Rule::UnusedImport, Path::new("F401_0.py"))]
#[test_case(Rule::UnusedImport, Path::new("F401_1.py"))]
#[test_case(Rule::UnusedImport, Path::new("F401_2.py"))]
#[test_case(Rule::UnusedImport, Path::new("F401_3.py"))]
#[test_case(Rule::UnusedImport, Path::new("F401_4.py"))]
#[test_case(Rule::UnusedImport, Path::new("F401_5.py"))]
#[test_case(Rule::UnusedImport, Path::new("F401_6.py"))]
#[test_case(Rule::UnusedImport, Path::new("F401_7.py"))]
#[test_case(Rule::UnusedImport, Path::new("F401_8.py"))]
#[test_case(Rule::UnusedImport, Path::new("F401_9.py"))]
#[test_case(Rule::UnusedImport, Path::new("F401_10.py"))]
#[test_case(Rule::UnusedImport, Path::new("F401_11.py"))]
#[test_case(Rule::UnusedImport, Path::new("F401_12.py"))]
#[test_case(Rule::UnusedImport, Path::new("F401_13.py"))]
#[test_case(Rule::UnusedImport, Path::new("F401_14.py"))]
#[test_case(Rule::UnusedImport, Path::new("F401_15.py"))]
#[test_case(Rule::UnusedImport, Path::new("F401_16.py"))]
#[test_case(Rule::UnusedImport, Path::new("F401_17.py"))]
#[test_case(Rule::UnusedImport, Path::new("F401_18.py"))]
#[test_case(Rule::UnusedImport, Path::new("F401_19.py"))]
#[test_case(Rule::UnusedImport, Path::new("F401_20.py"))]
#[test_case(Rule::UnusedImport, Path::new("F401_21.py"))]
#[test_case(Rule::UnusedImport, Path::new("F401_22.py"))]
#[test_case(Rule::UnusedImport, Path::new("F401_23.py"))]
#[test_case(Rule::UnusedImport, Path::new("F401_32.py"))]
#[test_case(Rule::UnusedImport, Path::new("F401_34.py"))]
#[test_case(Rule::ImportShadowedByLoopVar, Path::new("F402.py"))]
#[test_case(Rule::ImportShadowedByLoopVar, Path::new("F402.ipynb"))]
#[test_case(Rule::UndefinedLocalWithImportStar, Path::new("F403.py"))]
#[test_case(Rule::LateFutureImport, Path::new("F404_0.py"))]
#[test_case(Rule::LateFutureImport, Path::new("F404_1.py"))]
#[test_case(Rule::UndefinedLocalWithImportStarUsage, Path::new("F405.py"))]
#[test_case(Rule::UndefinedLocalWithNestedImportStarUsage, Path::new("F406.py"))]
#[test_case(Rule::FutureFeatureNotDefined, Path::new("F407.py"))]
#[test_case(Rule::PercentFormatInvalidFormat, Path::new("F50x.py"))]
#[test_case(Rule::PercentFormatExpectedMapping, Path::new("F502.py"))]
#[test_case(Rule::PercentFormatExpectedMapping, Path::new("F50x.py"))]
#[test_case(Rule::PercentFormatExpectedSequence, Path::new("F503.py"))]
#[test_case(Rule::PercentFormatExpectedSequence, Path::new("F50x.py"))]
#[test_case(Rule::PercentFormatExtraNamedArguments, Path::new("F504.py"))]
#[test_case(Rule::PercentFormatExtraNamedArguments, Path::new("F50x.py"))]
#[test_case(Rule::PercentFormatMissingArgument, Path::new("F504.py"))]
#[test_case(Rule::PercentFormatMissingArgument, Path::new("F50x.py"))]
#[test_case(Rule::PercentFormatMixedPositionalAndNamed, Path::new("F50x.py"))]
#[test_case(Rule::PercentFormatPositionalCountMismatch, Path::new("F50x.py"))]
#[test_case(Rule::PercentFormatStarRequiresSequence, Path::new("F50x.py"))]
#[test_case(Rule::PercentFormatUnsupportedFormatCharacter, Path::new("F50x.py"))]
#[test_case(Rule::StringDotFormatInvalidFormat, Path::new("F521.py"))]
#[test_case(Rule::StringDotFormatExtraNamedArguments, Path::new("F522.py"))]
#[test_case(Rule::StringDotFormatExtraPositionalArguments, Path::new("F523.py"))]
#[test_case(Rule::StringDotFormatMissingArguments, Path::new("F524.py"))]
#[test_case(Rule::StringDotFormatMixingAutomatic, Path::new("F525.py"))]
#[test_case(Rule::FStringMissingPlaceholders, Path::new("F541.py"))]
#[test_case(Rule::MultiValueRepeatedKeyLiteral, Path::new("F601.py"))]
#[test_case(Rule::MultiValueRepeatedKeyVariable, Path::new("F602.py"))]
#[test_case(Rule::MultipleStarredExpressions, Path::new("F622.py"))]
#[test_case(Rule::AssertTuple, Path::new("F631.py"))]
#[test_case(Rule::IsLiteral, Path::new("F632.py"))]
#[test_case(Rule::InvalidPrintSyntax, Path::new("F633.py"))]
#[test_case(Rule::IfTuple, Path::new("F634.py"))]
#[test_case(Rule::BreakOutsideLoop, Path::new("F701.py"))]
#[test_case(Rule::ContinueOutsideLoop, Path::new("F702.py"))]
#[test_case(Rule::YieldOutsideFunction, Path::new("F704.py"))]
#[test_case(Rule::ReturnOutsideFunction, Path::new("F706.py"))]
#[test_case(Rule::DefaultExceptNotLast, Path::new("F707.py"))]
#[test_case(Rule::ForwardAnnotationSyntaxError, Path::new("F722.py"))]
#[test_case(Rule::ForwardAnnotationSyntaxError, Path::new("F722_1.py"))]
#[test_case(Rule::RedefinedWhileUnused, Path::new("F811_0.py"))]
#[test_case(Rule::RedefinedWhileUnused, Path::new("F811_1.py"))]
#[test_case(Rule::RedefinedWhileUnused, Path::new("F811_2.py"))]
#[test_case(Rule::RedefinedWhileUnused, Path::new("F811_3.py"))]
#[test_case(Rule::RedefinedWhileUnused, Path::new("F811_4.py"))]
#[test_case(Rule::RedefinedWhileUnused, Path::new("F811_5.py"))]
#[test_case(Rule::RedefinedWhileUnused, Path::new("F811_6.py"))]
#[test_case(Rule::RedefinedWhileUnused, Path::new("F811_7.py"))]
#[test_case(Rule::RedefinedWhileUnused, Path::new("F811_8.py"))]
#[test_case(Rule::RedefinedWhileUnused, Path::new("F811_9.py"))]
#[test_case(Rule::RedefinedWhileUnused, Path::new("F811_10.py"))]
#[test_case(Rule::RedefinedWhileUnused, Path::new("F811_11.py"))]
#[test_case(Rule::RedefinedWhileUnused, Path::new("F811_12.py"))]
#[test_case(Rule::RedefinedWhileUnused, Path::new("F811_13.py"))]
#[test_case(Rule::RedefinedWhileUnused, Path::new("F811_14.py"))]
#[test_case(Rule::RedefinedWhileUnused, Path::new("F811_15.py"))]
#[test_case(Rule::RedefinedWhileUnused, Path::new("F811_16.py"))]
#[test_case(Rule::RedefinedWhileUnused, Path::new("F811_17.py"))]
#[test_case(Rule::RedefinedWhileUnused, Path::new("F811_18.py"))]
#[test_case(Rule::RedefinedWhileUnused, Path::new("F811_19.py"))]
#[test_case(Rule::RedefinedWhileUnused, Path::new("F811_20.py"))]
#[test_case(Rule::RedefinedWhileUnused, Path::new("F811_21.py"))]
#[test_case(Rule::RedefinedWhileUnused, Path::new("F811_22.py"))]
#[test_case(Rule::RedefinedWhileUnused, Path::new("F811_23.py"))]
#[test_case(Rule::RedefinedWhileUnused, Path::new("F811_24.py"))]
#[test_case(Rule::RedefinedWhileUnused, Path::new("F811_25.py"))]
#[test_case(Rule::RedefinedWhileUnused, Path::new("F811_26.py"))]
#[test_case(Rule::RedefinedWhileUnused, Path::new("F811_27.py"))]
#[test_case(Rule::RedefinedWhileUnused, Path::new("F811_28.py"))]
#[test_case(Rule::RedefinedWhileUnused, Path::new("F811_29.pyi"))]
#[test_case(Rule::RedefinedWhileUnused, Path::new("F811_30.py"))]
#[test_case(Rule::RedefinedWhileUnused, Path::new("F811_31.py"))]
#[test_case(Rule::RedefinedWhileUnused, Path::new("F811_32.py"))]
#[test_case(Rule::UndefinedName, Path::new("F821_0.py"))]
#[test_case(Rule::UndefinedName, Path::new("F821_1.py"))]
#[test_case(Rule::UndefinedName, Path::new("F821_2.py"))]
#[test_case(Rule::UndefinedName, Path::new("F821_3.py"))]
#[test_case(Rule::UndefinedName, Path::new("F821_4.py"))]
#[test_case(Rule::UndefinedName, Path::new("F821_5.py"))]
#[test_case(Rule::UndefinedName, Path::new("F821_5.pyi"))]
#[test_case(Rule::UndefinedName, Path::new("F821_6.py"))]
#[test_case(Rule::UndefinedName, Path::new("F821_7.py"))]
#[test_case(Rule::UndefinedName, Path::new("F821_8.pyi"))]
#[test_case(Rule::UndefinedName, Path::new("F821_9.py"))]
#[test_case(Rule::UndefinedName, Path::new("F821_10.py"))]
#[test_case(Rule::UndefinedName, Path::new("F821_11.py"))]
#[test_case(Rule::UndefinedName, Path::new("F821_11.pyi"))]
#[test_case(Rule::UndefinedName, Path::new("F821_12.py"))]
#[test_case(Rule::UndefinedName, Path::new("F821_13.py"))]
#[test_case(Rule::UndefinedName, Path::new("F821_14.py"))]
#[test_case(Rule::UndefinedName, Path::new("F821_15.py"))]
#[test_case(Rule::UndefinedName, Path::new("F821_16.py"))]
#[test_case(Rule::UndefinedName, Path::new("F821_17.py"))]
#[test_case(Rule::UndefinedName, Path::new("F821_18.py"))]
#[test_case(Rule::UndefinedName, Path::new("F821_19.py"))]
#[test_case(Rule::UndefinedName, Path::new("F821_20.py"))]
#[test_case(Rule::UndefinedName, Path::new("F821_21.py"))]
#[test_case(Rule::UndefinedName, Path::new("F821_22.ipynb"))]
#[test_case(Rule::UndefinedName, Path::new("F821_23.py"))]
#[test_case(Rule::UndefinedName, Path::new("F821_24.py"))]
#[test_case(Rule::UndefinedName, Path::new("F821_25.py"))]
#[test_case(Rule::UndefinedName, Path::new("F821_26.py"))]
#[test_case(Rule::UndefinedName, Path::new("F821_26.pyi"))]
#[test_case(Rule::UndefinedName, Path::new("F821_27.py"))]
#[test_case(Rule::UndefinedName, Path::new("F821_28.py"))]
#[test_case(Rule::UndefinedName, Path::new("F821_30.py"))]
#[test_case(Rule::UndefinedName, Path::new("F821_31.py"))]
#[test_case(Rule::UndefinedName, Path::new("F821_32.pyi"))]
#[test_case(Rule::UndefinedName, Path::new("F821_33.py"))]
#[test_case(Rule::UndefinedExport, Path::new("F822_0.py"))]
#[test_case(Rule::UndefinedExport, Path::new("F822_0.pyi"))]
#[test_case(Rule::UndefinedExport, Path::new("F822_1.py"))]
#[test_case(Rule::UndefinedExport, Path::new("F822_1b.py"))]
#[test_case(Rule::UndefinedExport, Path::new("F822_2.py"))]
#[test_case(Rule::UndefinedExport, Path::new("F822_3.py"))]
#[test_case(Rule::UndefinedLocal, Path::new("F823.py"))]
#[test_case(Rule::UnusedVariable, Path::new("F841_0.py"))]
#[test_case(Rule::UnusedVariable, Path::new("F841_1.py"))]
#[test_case(Rule::UnusedVariable, Path::new("F841_2.py"))]
#[test_case(Rule::UnusedVariable, Path::new("F841_3.py"))]
#[test_case(Rule::UnusedAnnotation, Path::new("F842.py"))]
#[test_case(Rule::RaiseNotImplemented, Path::new("F901.py"))]
fn rules(rule_code: Rule, path: &Path) -> Result<()> {
let snapshot = format!("{}_{}", rule_code.noqa_code(), path.to_string_lossy());
let diagnostics = test_path(
Path::new("pyflakes").join(path).as_path(),
&LinterSettings::for_rule(rule_code),
)?;
assert_diagnostics!(snapshot, diagnostics);
Ok(())
}
#[test_case(Rule::UndefinedName, Path::new("F821_29.py"))]
fn rules_with_flake8_type_checking_settings_enabled(
rule_code: Rule,
path: &Path,
) -> Result<()> {
let snapshot = format!("{}_{}", rule_code.noqa_code(), path.to_string_lossy());
let diagnostics = test_path(
Path::new("pyflakes").join(path).as_path(),
&LinterSettings {
flake8_type_checking: crate::rules::flake8_type_checking::settings::Settings {
runtime_required_base_classes: vec![
"pydantic.BaseModel".to_string(),
"sqlalchemy.orm.DeclarativeBase".to_string(),
],
..Default::default()
},
..LinterSettings::for_rule(rule_code)
},
)?;
assert_diagnostics!(snapshot, diagnostics);
Ok(())
}
#[test]
fn f821_with_builtin_added_on_new_py_version_but_old_target_version_specified() {
let diagnostics = test_snippet(
"PythonFinalizationError",
&LinterSettings {
unresolved_target_version: ruff_python_ast::PythonVersion::PY312.into(),
..LinterSettings::for_rule(Rule::UndefinedName)
},
);
assert_diagnostics!(diagnostics);
}
#[test_case(Rule::UnusedImport, Path::new("__init__.py"))]
#[test_case(Rule::UnusedImport, Path::new("F401_24/__init__.py"))]
#[test_case(Rule::UnusedImport, Path::new("F401_25__all_nonempty/__init__.py"))]
#[test_case(Rule::UnusedImport, Path::new("F401_26__all_empty/__init__.py"))]
#[test_case(Rule::UnusedImport, Path::new("F401_27__all_mistyped/__init__.py"))]
#[test_case(Rule::UnusedImport, Path::new("F401_28__all_multiple/__init__.py"))]
#[test_case(Rule::UnusedImport, Path::new("F401_29__all_conditional/__init__.py"))]
#[test_case(Rule::UndefinedExport, Path::new("__init__.py"))]
fn preview_rules(rule_code: Rule, path: &Path) -> Result<()> {
let snapshot = format!(
"preview__{}_{}",
rule_code.noqa_code(),
path.to_string_lossy()
);
let diagnostics = test_path(
Path::new("pyflakes").join(path).as_path(),
&LinterSettings {
preview: PreviewMode::Enabled,
..LinterSettings::for_rule(rule_code)
},
)?;
assert_diagnostics!(snapshot, diagnostics);
Ok(())
}
#[test_case(
r"import submodule.a",
"f401_preview_first_party_submodule_no_dunder_all"
)]
#[test_case(
r"
import submodule.a
__all__ = ['FOO']
FOO = 42",
"f401_preview_first_party_submodule_dunder_all"
)]
fn f401_preview_first_party_submodule(contents: &str, snapshot: &str) {
let diagnostics = test_contents(
&SourceKind::Python(dedent(contents).to_string()),
Path::new("f401_preview_first_party_submodule/__init__.py"),
&LinterSettings {
preview: PreviewMode::Enabled,
isort: isort::settings::Settings {
// This case specifically tests the scenario where
// the unused import is a first-party submodule import;
// use the isort settings to ensure that the `submodule.a` import
// is recognised as first-party in the test:
known_modules: isort::categorize::KnownModules::new(
vec!["submodule".parse().unwrap()],
vec![],
vec![],
vec![],
FxHashMap::default(),
),
..isort::settings::Settings::default()
},
..LinterSettings::for_rule(Rule::UnusedImport)
},
)
.0;
assert_diagnostics!(snapshot, diagnostics);
}
// Regression test for https://github.com/astral-sh/ruff/issues/12897
#[test_case(Rule::UnusedImport, Path::new("F401_33/__init__.py"))]
fn f401_preview_local_init_import(rule_code: Rule, path: &Path) -> Result<()> {
let snapshot = format!(
"preview__{}_{}",
rule_code.noqa_code(),
path.to_string_lossy()
);
let settings = LinterSettings {
preview: PreviewMode::Enabled,
isort: isort::settings::Settings {
// Like `f401_preview_first_party_submodule`, this test requires the input module to
// be first-party
known_modules: isort::categorize::KnownModules::new(
vec!["F401_*".parse()?],
vec![],
vec![],
vec![],
FxHashMap::default(),
),
..isort::settings::Settings::default()
},
..LinterSettings::for_rule(rule_code)
};
let diagnostics = test_path(Path::new("pyflakes").join(path).as_path(), &settings)?;
assert_diagnostics!(snapshot, diagnostics);
Ok(())
}
#[test_case(Rule::UnusedImport, Path::new("F401_24/__init__.py"))]
#[test_case(Rule::UnusedImport, Path::new("F401_25__all_nonempty/__init__.py"))]
#[test_case(Rule::UnusedImport, Path::new("F401_26__all_empty/__init__.py"))]
#[test_case(Rule::UnusedImport, Path::new("F401_27__all_mistyped/__init__.py"))]
#[test_case(Rule::UnusedImport, Path::new("F401_28__all_multiple/__init__.py"))]
#[test_case(Rule::UnusedImport, Path::new("F401_29__all_conditional/__init__.py"))]
fn f401_stable(rule_code: Rule, path: &Path) -> Result<()> {
let snapshot = format!(
"{}_stable_{}",
rule_code.noqa_code(),
path.to_string_lossy()
);
let diagnostics = test_path(
Path::new("pyflakes").join(path).as_path(),
&LinterSettings::for_rule(rule_code),
)?;
assert_diagnostics!(snapshot, diagnostics);
Ok(())
}
#[test_case(Rule::UnusedImport, Path::new("F401_24/__init__.py"))]
#[test_case(Rule::UnusedImport, Path::new("F401_25__all_nonempty/__init__.py"))]
#[test_case(Rule::UnusedImport, Path::new("F401_26__all_empty/__init__.py"))]
#[test_case(Rule::UnusedImport, Path::new("F401_27__all_mistyped/__init__.py"))]
#[test_case(Rule::UnusedImport, Path::new("F401_28__all_multiple/__init__.py"))]
#[test_case(Rule::UnusedImport, Path::new("F401_29__all_conditional/__init__.py"))]
#[test_case(Rule::UnusedImport, Path::new("F401_30.py"))]
fn f401_deprecated_option(rule_code: Rule, path: &Path) -> Result<()> {
let snapshot = format!(
"{}_deprecated_option_{}",
rule_code.noqa_code(),
path.to_string_lossy()
);
let diagnostics = test_path(
Path::new("pyflakes").join(path).as_path(),
&LinterSettings {
ignore_init_module_imports: false,
..LinterSettings::for_rule(rule_code)
},
)?;
assert_diagnostics!(snapshot, diagnostics);
Ok(())
}
#[test_case(Rule::UnusedImport, Path::new("F401_31.py"))]
fn f401_allowed_unused_imports_option(rule_code: Rule, path: &Path) -> Result<()> {
let diagnostics = test_path(
Path::new("pyflakes").join(path).as_path(),
&LinterSettings {
pyflakes: pyflakes::settings::Settings {
allowed_unused_imports: vec!["hvplot.pandas".to_string()],
..pyflakes::settings::Settings::default()
},
..LinterSettings::for_rule(rule_code)
},
)?;
assert_diagnostics!(diagnostics);
Ok(())
}
#[test_case(Rule::UnusedImport, Path::new("F401_35.py"))]
fn f401_allowed_unused_imports_top_level_module(rule_code: Rule, path: &Path) -> Result<()> {
let diagnostics = test_path(
Path::new("pyflakes").join(path).as_path(),
&LinterSettings {
pyflakes: pyflakes::settings::Settings {
allowed_unused_imports: vec!["hvplot".to_string()],
..pyflakes::settings::Settings::default()
},
..LinterSettings::for_rule(rule_code)
},
)?;
assert_diagnostics!(diagnostics);
Ok(())
}
#[test_case(Rule::UnusedImport, Path::new("F401_0.py"))]
#[test_case(Rule::UnusedImport, Path::new("F401_1.py"))]
#[test_case(Rule::UnusedImport, Path::new("F401_2.py"))]
#[test_case(Rule::UnusedImport, Path::new("F401_3.py"))]
#[test_case(Rule::UnusedImport, Path::new("F401_4.py"))]
#[test_case(Rule::UnusedImport, Path::new("F401_5.py"))]
#[test_case(Rule::UnusedImport, Path::new("F401_6.py"))]
#[test_case(Rule::UnusedImport, Path::new("F401_7.py"))]
#[test_case(Rule::UnusedImport, Path::new("F401_8.py"))]
#[test_case(Rule::UnusedImport, Path::new("F401_9.py"))]
#[test_case(Rule::UnusedImport, Path::new("F401_10.py"))]
#[test_case(Rule::UnusedImport, Path::new("F401_11.py"))]
#[test_case(Rule::UnusedImport, Path::new("F401_12.py"))]
#[test_case(Rule::UnusedImport, Path::new("F401_13.py"))]
#[test_case(Rule::UnusedImport, Path::new("F401_14.py"))]
#[test_case(Rule::UnusedImport, Path::new("F401_15.py"))]
#[test_case(Rule::UnusedImport, Path::new("F401_16.py"))]
#[test_case(Rule::UnusedImport, Path::new("F401_17.py"))]
#[test_case(Rule::UnusedImport, Path::new("F401_18.py"))]
#[test_case(Rule::UnusedImport, Path::new("F401_19.py"))]
#[test_case(Rule::UnusedImport, Path::new("F401_20.py"))]
#[test_case(Rule::UnusedImport, Path::new("F401_21.py"))]
#[test_case(Rule::UnusedImport, Path::new("F401_22.py"))]
#[test_case(Rule::UnusedImport, Path::new("F401_23.py"))]
#[test_case(Rule::UnusedImport, Path::new("F401_32.py"))]
#[test_case(Rule::UnusedImport, Path::new("F401_34.py"))]
#[test_case(Rule::UnusedImport, Path::new("F401_35.py"))]
fn f401_preview_refined_submodule_handling_diffs(rule_code: Rule, path: &Path) -> Result<()> {
let snapshot = format!("preview_diff__{}", path.to_string_lossy());
assert_diagnostics_diff!(
snapshot,
Path::new("pyflakes").join(path).as_path(),
&LinterSettings::for_rule(rule_code),
&LinterSettings {
preview: PreviewMode::Enabled,
..LinterSettings::for_rule(rule_code)
}
);
Ok(())
}
#[test_case(
r"
import a
import a.b
import a.c",
"f401_multiple_unused_submodules"
)]
#[test_case(
r"
import a
import a.b
a.foo()",
"f401_use_top_member"
)]
#[test_case(
r"
import a
import a.b
a.foo()
a.bar()",
"f401_use_top_member_twice"
)]
#[test_case(
r"
# reverts to stable behavior - used between imports
import a
a.foo()
import a.b",
"f401_use_top_member_before_second_import"
)]
#[test_case(
r"
# reverts to stable behavior - used between imports
import a
a.foo()
a = 1
import a.b",
"f401_use_top_member_and_redefine_before_second_import"
)]
#[test_case(
r"
# reverts to stable behavior - used between imports
import a
a.foo()
import a.b
a = 1",
"f401_use_top_member_then_import_then_redefine"
)]
#[test_case(
r#"
import a
import a.b
__all__ = ["a"]"#,
"f401_use_in_dunder_all"
)]
#[test_case(
r"
import a.c
import a.b
a.foo()",
"f401_import_submodules_but_use_top_level"
)]
#[test_case(
r"
import a.c
import a.b.d
a.foo()",
"f401_import_submodules_different_lengths_but_use_top_level"
)]
#[test_case(
r"
# refined logic only applied _within_ scope
import a
def foo():
import a.b
a.foo()",
"f401_import_submodules_in_function_scope"
)]
#[test_case(
r"
# reverts to stable behavior - used between bindings
import a
a.b
import a.b",
"f401_use_in_between_imports"
)]
#[test_case(
r"
# reverts to stable behavior - used between bindings
import a.b
a
import a",
"f401_use_in_between_imports"
)]
#[test_case(
r"
if cond:
import a
import a.b
a.foo()
",
"f401_same_branch"
)]
#[test_case(
r"
try:
import a.b.c
except ImportError:
import argparse
import a
a.b = argparse.Namespace()
",
"f401_different_branch"
)]
#[test_case(
r"
import mlflow.pyfunc.loaders.chat_agent
import mlflow.pyfunc.loaders.chat_model
import mlflow.pyfunc.loaders.code_model
from mlflow.utils.pydantic_utils import IS_PYDANTIC_V2_OR_NEWER
if IS_PYDANTIC_V2_OR_NEWER:
import mlflow.pyfunc.loaders.responses_agent
",
"f401_type_checking"
)]
fn f401_preview_refined_submodule_handling(contents: &str, snapshot: &str) {
let diagnostics = test_contents(
&SourceKind::Python(dedent(contents).to_string()),
Path::new("f401_preview_submodule.py"),
&LinterSettings {
preview: PreviewMode::Enabled,
..LinterSettings::for_rule(Rule::UnusedImport)
},
)
.0;
assert_diagnostics!(snapshot, diagnostics);
}
#[test]
fn f841_dummy_variable_rgx() -> Result<()> {
let diagnostics = test_path(
Path::new("pyflakes/F841_0.py"),
&LinterSettings {
dummy_variable_rgx: Regex::new(r"^z$").unwrap(),
..LinterSettings::for_rule(Rule::UnusedVariable)
},
)?;
assert_diagnostics!(diagnostics);
Ok(())
}
#[test]
fn init() -> Result<()> {
let diagnostics = test_path(
Path::new("pyflakes/__init__.py"),
&LinterSettings::for_rules(vec![
Rule::UndefinedName,
Rule::UndefinedExport,
Rule::UnusedImport,
]),
)?;
assert_diagnostics!(diagnostics);
Ok(())
}
#[test]
fn default_builtins() -> Result<()> {
let diagnostics = test_path(
Path::new("pyflakes/builtins.py"),
&LinterSettings::for_rules(vec![Rule::UndefinedName]),
)?;
assert_diagnostics!(diagnostics);
Ok(())
}
#[test]
fn extra_builtins() -> Result<()> {
let diagnostics = test_path(
Path::new("pyflakes/builtins.py"),
&LinterSettings {
builtins: vec!["_".to_string()],
..LinterSettings::for_rules(vec![Rule::UndefinedName])
},
)?;
assert_diagnostics!(diagnostics);
Ok(())
}
#[test]
fn default_typing_modules() -> Result<()> {
let diagnostics = test_path(
Path::new("pyflakes/typing_modules.py"),
&LinterSettings::for_rules(vec![Rule::UndefinedName]),
)?;
assert_diagnostics!(diagnostics);
Ok(())
}
#[test]
fn extra_typing_modules() -> Result<()> {
let diagnostics = test_path(
Path::new("pyflakes/typing_modules.py"),
&LinterSettings {
typing_modules: vec!["airflow.typing_compat".to_string()],
..LinterSettings::for_rules(vec![Rule::UndefinedName])
},
)?;
assert_diagnostics!(diagnostics);
Ok(())
}
#[test]
fn future_annotations() -> Result<()> {
let diagnostics = test_path(
Path::new("pyflakes/future_annotations.py"),
&LinterSettings::for_rules(vec![Rule::UnusedImport, Rule::UndefinedName]),
)?;
assert_diagnostics!(diagnostics);
Ok(())
}
#[test]
fn multi_statement_lines() -> Result<()> {
let diagnostics = test_path(
Path::new("pyflakes/multi_statement_lines.py"),
&LinterSettings::for_rule(Rule::UnusedImport),
)?;
assert_diagnostics!(diagnostics);
Ok(())
}
#[test]
fn relative_typing_module() -> Result<()> {
let diagnostics = test_path(
Path::new("pyflakes/project/foo/bar.py"),
&LinterSettings {
typing_modules: vec!["foo.typical".to_string()],
..LinterSettings::for_rule(Rule::UndefinedName)
},
)?;
assert_diagnostics!(diagnostics);
Ok(())
}
#[test]
fn nested_relative_typing_module() -> Result<()> {
let diagnostics = test_path(
Path::new("pyflakes/project/foo/bop/baz.py"),
&LinterSettings {
typing_modules: vec!["foo.typical".to_string()],
..LinterSettings::for_rule(Rule::UndefinedName)
},
)?;
assert_diagnostics!(diagnostics);
Ok(())
}
#[test]
fn extend_generics() -> Result<()> {
let snapshot = "extend_immutable_calls".to_string();
let diagnostics = test_path(
Path::new("pyflakes/F401_15.py"),
&LinterSettings {
pyflakes: pyflakes::settings::Settings {
extend_generics: vec!["django.db.models.ForeignKey".to_string()],
..pyflakes::settings::Settings::default()
},
..LinterSettings::for_rule(Rule::UnusedImport)
},
)?;
assert_diagnostics!(snapshot, diagnostics);
Ok(())
}
#[test_case(
r"
import os
def f():
import os
# Despite this `del`, `import os` in `f` should still be flagged as shadowing an unused
# import.
del os
",
"del_shadowed_global_import_in_local_scope"
)]
#[test_case(
r"
import os
def f():
import os
# Despite this `del`, `import os` in `f` should still be flagged as shadowing an unused
# import. (This is a false negative, but is consistent with Pyflakes.)
del os
",
"del_shadowed_global_import_in_global_scope"
)]
#[test_case(
r"
def f():
import os
import os
# Despite this `del`, `import os` should still be flagged as shadowing an unused
# import.
del os
",
"del_shadowed_local_import_in_local_scope"
)]
#[test_case(
r"
import os
def f():
os = 1
print(os)
del os
def g():
# `import os` doesn't need to be flagged as shadowing an import.
os = 1
print(os)
",
"del_shadowed_import_shadow_in_local_scope"
)]
#[test_case(
r"
x = 1
def foo():
x = 2
del x
# Flake8 treats this as an F823 error, because it removes the binding
# entirely after the `del` statement. However, it should be an F821
# error, because the name is defined in the scope, but unbound.
x += 1
",
"augmented_assignment_after_del"
)]
#[test_case(
r"
def f():
x = 1
try:
1 / 0
except Exception as x:
pass
# No error here, though it should arguably be an F821 error. `x` will
# be unbound after the `except` block (assuming an exception is raised
# and caught).
print(x)
",
"print_in_body_after_shadowing_except"
)]
#[test_case(
r"
def f():
x = 1
try:
1 / 0
except ValueError as x:
pass
except ImportError as x:
pass
# No error here, though it should arguably be an F821 error. `x` will
# be unbound after the `except` block (assuming an exception is raised
# and caught).
print(x)
",
"print_in_body_after_double_shadowing_except"
)]
#[test_case(
r"
def f():
try:
x = 3
except ImportError as x:
print(x)
else:
print(x)
",
"print_in_try_else_after_shadowing_except"
)]
#[test_case(
r"
def f():
list = [1, 2, 3]
for e in list:
if e % 2 == 0:
try:
pass
except Exception as e:
print(e)
else:
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | true |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/pyflakes/format.rs | crates/ruff_linter/src/rules/pyflakes/format.rs | //! Implements helper functions for using vendored/format.rs
use ruff_python_ast::name::Name;
use ruff_python_literal::format::{
FieldName, FieldType, FormatParseError, FormatPart, FormatString, FromTemplate,
};
use std::convert::TryFrom;
pub(crate) fn error_to_string(err: &FormatParseError) -> String {
match err {
FormatParseError::EmptyAttribute => "Empty attribute in format string",
FormatParseError::InvalidCharacterAfterRightBracket => {
"Only '.' or '[' may follow ']' in format field specifier"
}
FormatParseError::PlaceholderRecursionExceeded => {
"Max format placeholder recursion exceeded"
}
FormatParseError::MissingStartBracket => "Single '}' encountered in format string",
FormatParseError::MissingRightBracket => "Expected '}' before end of string",
FormatParseError::UnmatchedBracket => "Single '{' encountered in format string",
_ => "Unexpected error parsing format string",
}
.to_string()
}
#[derive(Debug)]
pub(crate) struct FormatSummary {
pub(crate) autos: Vec<usize>,
pub(crate) indices: Vec<usize>,
pub(crate) keywords: Vec<Name>,
pub(crate) has_nested_parts: bool,
}
impl TryFrom<&str> for FormatSummary {
type Error = FormatParseError;
fn try_from(literal: &str) -> Result<Self, Self::Error> {
let format_string = FormatString::from_str(literal)?;
let mut autos = Vec::new();
let mut indices = Vec::new();
let mut keywords = Vec::new();
let mut has_nested_parts = false;
for format_part in &format_string.format_parts {
let FormatPart::Field {
field_name,
format_spec,
..
} = format_part
else {
continue;
};
let parsed = FieldName::parse(field_name)?;
match parsed.field_type {
FieldType::Auto => autos.push(autos.len()),
FieldType::Index(i) => indices.push(i),
FieldType::Keyword(k) => keywords.push(Name::from(k)),
}
let nested = FormatString::from_str(format_spec)?;
for nested_part in nested.format_parts {
let FormatPart::Field { field_name, .. } = nested_part else {
continue;
};
let parsed = FieldName::parse(&field_name)?;
match parsed.field_type {
FieldType::Auto => autos.push(autos.len()),
FieldType::Index(i) => indices.push(i),
FieldType::Keyword(k) => keywords.push(Name::from(k)),
}
has_nested_parts = true;
}
}
Ok(FormatSummary {
autos,
indices,
keywords,
has_nested_parts,
})
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_format_summary() {
let literal = "foo{foo}a{}b{2}c{2}d{1}{}{}e{bar}{foo}f{spam}";
let expected_autos = [0usize, 1usize, 2usize].to_vec();
let expected_indices = [2usize, 2usize, 1usize].to_vec();
let expected_keywords: Vec<_> = ["foo", "bar", "foo", "spam"]
.into_iter()
.map(String::from)
.collect();
let format_summary = FormatSummary::try_from(literal).unwrap();
assert_eq!(format_summary.autos, expected_autos);
assert_eq!(format_summary.indices, expected_indices);
assert_eq!(format_summary.keywords, expected_keywords);
assert!(!format_summary.has_nested_parts);
}
#[test]
fn test_format_summary_nested() {
let literal = "foo{foo}a{:{}{}}b{2:{3}{4}}c{2}d{1}{}e{bar:{spam}{eggs}}";
let expected_autos = [0usize, 1usize, 2usize, 3usize].to_vec();
let expected_indices = [2usize, 3usize, 4usize, 2usize, 1usize].to_vec();
let expected_keywords: Vec<_> = ["foo", "bar", "spam", "eggs"]
.into_iter()
.map(String::from)
.collect();
let format_summary = FormatSummary::try_from(literal).unwrap();
assert_eq!(format_summary.autos, expected_autos);
assert_eq!(format_summary.indices, expected_indices);
assert_eq!(format_summary.keywords, expected_keywords);
assert!(format_summary.has_nested_parts);
}
#[test]
fn test_format_summary_invalid() {
assert!(FormatSummary::try_from("{").is_err());
let literal = "{foo}a{}b{bar..}";
assert!(FormatString::from_str(literal).is_ok());
assert!(FormatSummary::try_from(literal).is_err());
let literal_nested = "{foo}a{}b{bar:{spam..}}";
assert!(FormatString::from_str(literal_nested).is_ok());
assert!(FormatSummary::try_from(literal_nested).is_err());
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/pyflakes/rules/unused_annotation.rs | crates/ruff_linter/src/rules/pyflakes/rules/unused_annotation.rs | use ruff_macros::{ViolationMetadata, derive_message_formats};
use ruff_python_semantic::Scope;
use ruff_text_size::Ranged;
use crate::Violation;
use crate::checkers::ast::Checker;
/// ## What it does
/// Checks for local variables that are annotated but never used.
///
/// ## Why is this bad?
/// Annotations are used to provide type hints to static type checkers. If a
/// variable is annotated but never used, the annotation is unnecessary.
///
/// ## Example
/// ```python
/// def foo():
/// bar: int
/// ```
///
/// ## Options
///
/// This rule ignores dummy variables, as determined by:
///
/// - `lint.dummy-variable-rgx`
///
/// ## References
/// - [PEP 484 β Type Hints](https://peps.python.org/pep-0484/)
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "v0.0.172")]
pub(crate) struct UnusedAnnotation {
name: String,
}
impl Violation for UnusedAnnotation {
#[derive_message_formats]
fn message(&self) -> String {
let UnusedAnnotation { name } = self;
format!("Local variable `{name}` is annotated but never used")
}
}
/// F842
pub(crate) fn unused_annotation(checker: &Checker, scope: &Scope) {
for (name, range) in scope.bindings().filter_map(|(name, binding_id)| {
let binding = checker.semantic().binding(binding_id);
if binding.kind.is_annotation()
&& binding.is_unused()
&& !checker.settings().dummy_variable_rgx.is_match(name)
{
Some((name.to_string(), binding.range()))
} else {
None
}
}) {
checker.report_diagnostic(UnusedAnnotation { name }, range);
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/pyflakes/rules/raise_not_implemented.rs | crates/ruff_linter/src/rules/pyflakes/rules/raise_not_implemented.rs | use ruff_python_ast::{self as ast, Expr};
use ruff_macros::{ViolationMetadata, derive_message_formats};
use ruff_text_size::Ranged;
use crate::checkers::ast::Checker;
use crate::{Edit, Fix, FixAvailability, Violation};
/// ## What it does
/// Checks for `raise` statements that raise `NotImplemented`.
///
/// ## Why is this bad?
/// `NotImplemented` is an exception used by binary special methods to indicate
/// that an operation is not implemented with respect to a particular type.
///
/// `NotImplemented` should not be raised directly. Instead, raise
/// `NotImplementedError`, which is used to indicate that the method is
/// abstract or not implemented in the derived class.
///
/// ## Example
/// ```python
/// class Foo:
/// def bar(self):
/// raise NotImplemented
/// ```
///
/// Use instead:
/// ```python
/// class Foo:
/// def bar(self):
/// raise NotImplementedError
/// ```
///
/// ## References
/// - [Python documentation: `NotImplemented`](https://docs.python.org/3/library/constants.html#NotImplemented)
/// - [Python documentation: `NotImplementedError`](https://docs.python.org/3/library/exceptions.html#NotImplementedError)
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "v0.0.18")]
pub(crate) struct RaiseNotImplemented;
impl Violation for RaiseNotImplemented {
const FIX_AVAILABILITY: FixAvailability = FixAvailability::Sometimes;
#[derive_message_formats]
fn message(&self) -> String {
"`raise NotImplemented` should be `raise NotImplementedError`".to_string()
}
fn fix_title(&self) -> Option<String> {
Some("Use `raise NotImplementedError`".to_string())
}
}
fn match_not_implemented(expr: &Expr) -> Option<&Expr> {
match expr {
Expr::Call(ast::ExprCall { func, .. }) => {
if let Expr::Name(ast::ExprName { id, .. }) = func.as_ref() {
if id == "NotImplemented" {
return Some(func);
}
}
}
Expr::Name(ast::ExprName { id, .. }) => {
if id == "NotImplemented" {
return Some(expr);
}
}
_ => {}
}
None
}
/// F901
pub(crate) fn raise_not_implemented(checker: &Checker, expr: &Expr) {
let Some(expr) = match_not_implemented(expr) else {
return;
};
let mut diagnostic = checker.report_diagnostic(RaiseNotImplemented, expr.range());
diagnostic.try_set_fix(|| {
let (import_edit, binding) = checker.importer().get_or_import_builtin_symbol(
"NotImplementedError",
expr.start(),
checker.semantic(),
)?;
Ok(Fix::safe_edits(
Edit::range_replacement(binding, expr.range()),
import_edit,
))
});
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/pyflakes/rules/imports.rs | crates/ruff_linter/src/rules/pyflakes/rules/imports.rs | use ruff_macros::{ViolationMetadata, derive_message_formats};
use ruff_python_semantic::{BindingKind, Scope, ScopeId};
use ruff_source_file::SourceRow;
use ruff_text_size::Ranged;
use crate::Violation;
use crate::checkers::ast::Checker;
/// ## What it does
/// Checks for import bindings that are shadowed by loop variables.
///
/// ## Why is this bad?
/// Shadowing an import with loop variables makes the code harder to read and
/// reason about, as the identify of the imported binding is no longer clear.
/// It's also often indicative of a mistake, as it's unlikely that the loop
/// variable is intended to be used as the imported binding.
///
/// Consider using a different name for the loop variable.
///
/// ## Example
/// ```python
/// from os import path
///
/// for path in files:
/// print(path)
/// ```
///
/// Use instead:
/// ```python
/// from os import path
///
///
/// for filename in files:
/// print(filename)
/// ```
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "v0.0.44")]
pub(crate) struct ImportShadowedByLoopVar {
pub(crate) name: String,
pub(crate) row: SourceRow,
}
impl Violation for ImportShadowedByLoopVar {
#[derive_message_formats]
fn message(&self) -> String {
let ImportShadowedByLoopVar { name, row } = self;
format!("Import `{name}` from {row} shadowed by loop variable")
}
}
/// F402
pub(crate) fn import_shadowed_by_loop_var(checker: &Checker, scope_id: ScopeId, scope: &Scope) {
for (name, binding_id) in scope.bindings() {
for shadow in checker.semantic().shadowed_bindings(scope_id, binding_id) {
// If the shadowing binding isn't a loop variable, abort.
let binding = &checker.semantic().bindings[shadow.binding_id()];
if !binding.kind.is_loop_var() {
continue;
}
// If the shadowed binding isn't an import, abort.
let shadowed = &checker.semantic().bindings[shadow.shadowed_id()];
if !matches!(
shadowed.kind,
BindingKind::Import(..)
| BindingKind::FromImport(..)
| BindingKind::SubmoduleImport(..)
| BindingKind::FutureImport
) {
continue;
}
// If the bindings are in different forks, abort.
if shadowed.source.is_none_or(|left| {
binding
.source
.is_none_or(|right| !checker.semantic().same_branch(left, right))
}) {
continue;
}
checker.report_diagnostic(
ImportShadowedByLoopVar {
name: name.to_string(),
row: checker.compute_source_row(shadowed.start()),
},
binding.range(),
);
}
}
}
/// ## What it does
/// Checks for the use of wildcard imports.
///
/// ## Why is this bad?
/// Wildcard imports (e.g., `from module import *`) make it hard to determine
/// which symbols are available in the current namespace, and from which module
/// they were imported. They're also discouraged by [PEP 8].
///
/// ## Example
/// ```python
/// from math import *
///
///
/// def area(radius):
/// return pi * radius**2
/// ```
///
/// Use instead:
/// ```python
/// from math import pi
///
///
/// def area(radius):
/// return pi * radius**2
/// ```
///
/// [PEP 8]: https://peps.python.org/pep-0008/#imports
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "v0.0.18")]
pub(crate) struct UndefinedLocalWithImportStar {
pub(crate) name: String,
}
impl Violation for UndefinedLocalWithImportStar {
#[derive_message_formats]
fn message(&self) -> String {
let UndefinedLocalWithImportStar { name } = self;
format!("`from {name} import *` used; unable to detect undefined names")
}
}
/// ## What it does
/// Checks for `__future__` imports that are not located at the beginning of a
/// file.
///
/// ## Why is this bad?
/// Imports from `__future__` must be placed the beginning of the file, before any
/// other statements (apart from docstrings). The use of `__future__` imports
/// elsewhere is invalid and will result in a `SyntaxError`.
///
/// ## Example
/// ```python
/// from pathlib import Path
///
/// from __future__ import annotations
/// ```
///
/// Use instead:
/// ```python
/// from __future__ import annotations
///
/// from pathlib import Path
/// ```
///
/// ## References
/// - [Python documentation: Future statements](https://docs.python.org/3/reference/simple_stmts.html#future)
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "v0.0.34")]
pub(crate) struct LateFutureImport;
impl Violation for LateFutureImport {
#[derive_message_formats]
fn message(&self) -> String {
"`from __future__` imports must occur at the beginning of the file".to_string()
}
}
/// ## What it does
/// Checks for names that might be undefined, but may also be defined in a
/// wildcard import.
///
/// ## Why is this bad?
/// Wildcard imports (e.g., `from module import *`) make it hard to determine
/// which symbols are available in the current namespace. If a module contains
/// a wildcard import, and a name in the current namespace has not been
/// explicitly defined or imported, then it's unclear whether the name is
/// undefined or was imported by the wildcard import.
///
/// If the name _is_ defined in via a wildcard import, that member should be
/// imported explicitly to avoid confusion.
///
/// If the name is _not_ defined in a wildcard import, it should be defined or
/// imported.
///
/// ## Example
/// ```python
/// from math import *
///
///
/// def area(radius):
/// return pi * radius**2
/// ```
///
/// Use instead:
/// ```python
/// from math import pi
///
///
/// def area(radius):
/// return pi * radius**2
/// ```
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "v0.0.44")]
pub(crate) struct UndefinedLocalWithImportStarUsage {
pub(crate) name: String,
}
impl Violation for UndefinedLocalWithImportStarUsage {
#[derive_message_formats]
fn message(&self) -> String {
let UndefinedLocalWithImportStarUsage { name } = self;
format!("`{name}` may be undefined, or defined from star imports")
}
}
/// ## What it does
/// Check for the use of wildcard imports outside of the module namespace.
///
/// ## Why is this bad?
/// The use of wildcard imports outside of the module namespace (e.g., within
/// functions) can lead to confusion, as the import can shadow local variables.
///
/// Though wildcard imports are discouraged by [PEP 8], when necessary, they
/// should be placed in the module namespace (i.e., at the top-level of a
/// module).
///
/// ## Example
///
/// ```python
/// def foo():
/// from math import *
/// ```
///
/// Use instead:
///
/// ```python
/// from math import *
///
///
/// def foo(): ...
/// ```
///
/// [PEP 8]: https://peps.python.org/pep-0008/#imports
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "v0.0.37")]
pub(crate) struct UndefinedLocalWithNestedImportStarUsage {
pub(crate) name: String,
}
impl Violation for UndefinedLocalWithNestedImportStarUsage {
#[derive_message_formats]
fn message(&self) -> String {
let UndefinedLocalWithNestedImportStarUsage { name } = self;
format!("`from {name} import *` only allowed at module level")
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/pyflakes/rules/undefined_local.rs | crates/ruff_linter/src/rules/pyflakes/rules/undefined_local.rs | use std::string::ToString;
use ruff_macros::{ViolationMetadata, derive_message_formats};
use ruff_python_semantic::{Scope, ScopeId};
use ruff_text_size::Ranged;
use crate::Violation;
use crate::checkers::ast::Checker;
/// ## What it does
/// Checks for undefined local variables.
///
/// ## Why is this bad?
/// Referencing a local variable before it has been assigned will raise
/// an `UnboundLocalError` at runtime.
///
/// ## Example
/// ```python
/// x = 1
///
///
/// def foo():
/// x += 1
/// ```
///
/// Use instead:
/// ```python
/// x = 1
///
///
/// def foo():
/// global x
/// x += 1
/// ```
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "v0.0.24")]
pub(crate) struct UndefinedLocal {
name: String,
}
impl Violation for UndefinedLocal {
#[derive_message_formats]
fn message(&self) -> String {
let UndefinedLocal { name } = self;
format!("Local variable `{name}` referenced before assignment")
}
}
/// F823
pub(crate) fn undefined_local(checker: &Checker, scope_id: ScopeId, scope: &Scope) {
if scope.kind.is_function() {
for (name, binding_id) in scope.bindings() {
// If the variable shadows a binding in a parent scope...
if let Some(shadowed_id) = checker.semantic().shadowed_binding(binding_id) {
let shadowed = checker.semantic().binding(shadowed_id);
// And that binding was referenced in the current scope...
if let Some(range) = shadowed.references().find_map(|reference_id| {
let reference = checker.semantic().reference(reference_id);
if reference.scope_id() == scope_id {
Some(reference.range())
} else {
None
}
}) {
// Then it's probably an error.
checker.report_diagnostic(
UndefinedLocal {
name: name.to_string(),
},
range,
);
}
}
}
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/pyflakes/rules/assert_tuple.rs | crates/ruff_linter/src/rules/pyflakes/rules/assert_tuple.rs | use ruff_python_ast::{Expr, Stmt};
use ruff_macros::{ViolationMetadata, derive_message_formats};
use ruff_text_size::Ranged;
use crate::Violation;
use crate::checkers::ast::Checker;
/// ## What it does
/// Checks for `assert` statements that use non-empty tuples as test
/// conditions.
///
/// ## Why is this bad?
/// Non-empty tuples are always `True`, so an `assert` statement with a
/// non-empty tuple as its test condition will always pass. This is likely a
/// mistake.
///
/// ## Example
/// ```python
/// assert (some_condition,)
/// ```
///
/// Use instead:
/// ```python
/// assert some_condition
/// ```
///
/// ## References
/// - [Python documentation: The `assert` statement](https://docs.python.org/3/reference/simple_stmts.html#the-assert-statement)
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "v0.0.28")]
pub(crate) struct AssertTuple;
impl Violation for AssertTuple {
#[derive_message_formats]
fn message(&self) -> String {
"Assert test is a non-empty tuple, which is always `True`".to_string()
}
}
/// F631
pub(crate) fn assert_tuple(checker: &Checker, stmt: &Stmt, test: &Expr) {
if let Expr::Tuple(tuple) = &test {
if !tuple.is_empty() {
checker.report_diagnostic(AssertTuple, stmt.range());
}
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/pyflakes/rules/default_except_not_last.rs | crates/ruff_linter/src/rules/pyflakes/rules/default_except_not_last.rs | use ruff_macros::{ViolationMetadata, derive_message_formats};
use ruff_python_ast::identifier::except;
use ruff_python_ast::{self as ast, ExceptHandler};
use crate::Locator;
use crate::Violation;
use crate::checkers::ast::Checker;
/// ## What it does
/// Checks for `except` blocks that handle all exceptions, but are not the last
/// `except` block in a `try` statement.
///
/// ## Why is this bad?
/// When an exception is raised within a `try` block, the `except` blocks are
/// evaluated in order, and the first matching block is executed. If an `except`
/// block handles all exceptions, but isn't the last block, Python will raise a
/// `SyntaxError`, as the following blocks would never be executed.
///
/// ## Example
/// ```python
/// def reciprocal(n):
/// try:
/// reciprocal = 1 / n
/// except:
/// print("An exception occurred.")
/// except ZeroDivisionError:
/// print("Cannot divide by zero.")
/// else:
/// return reciprocal
/// ```
///
/// Use instead:
/// ```python
/// def reciprocal(n):
/// try:
/// reciprocal = 1 / n
/// except ZeroDivisionError:
/// print("Cannot divide by zero.")
/// except:
/// print("An exception occurred.")
/// else:
/// return reciprocal
/// ```
///
/// ## References
/// - [Python documentation: `except` clause](https://docs.python.org/3/reference/compound_stmts.html#except-clause)
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "v0.0.28")]
pub(crate) struct DefaultExceptNotLast;
impl Violation for DefaultExceptNotLast {
#[derive_message_formats]
fn message(&self) -> String {
"An `except` block as not the last exception handler".to_string()
}
}
/// F707
pub(crate) fn default_except_not_last(
checker: &Checker,
handlers: &[ExceptHandler],
locator: &Locator,
) {
for (idx, handler) in handlers.iter().enumerate() {
let ExceptHandler::ExceptHandler(ast::ExceptHandlerExceptHandler { type_, .. }) = handler;
if type_.is_none() && idx < handlers.len() - 1 {
checker.report_diagnostic(DefaultExceptNotLast, except(handler, locator.contents()));
}
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/pyflakes/rules/redefined_while_unused.rs | crates/ruff_linter/src/rules/pyflakes/rules/redefined_while_unused.rs | use ruff_macros::{ViolationMetadata, derive_message_formats};
use ruff_python_semantic::analyze::visibility;
use ruff_python_semantic::{BindingKind, Imported, Scope, ScopeId};
use ruff_source_file::SourceRow;
use ruff_text_size::Ranged;
use crate::checkers::ast::Checker;
use crate::fix::edits;
use crate::{Fix, FixAvailability, Violation};
use rustc_hash::FxHashMap;
/// ## What it does
/// Checks for variable definitions that redefine (or "shadow") unused
/// variables.
///
/// ## Why is this bad?
/// Redefinitions of unused names are unnecessary and often indicative of a
/// mistake.
///
/// ## Example
/// ```python
/// import foo
/// import bar
/// import foo # Redefinition of unused `foo` from line 1
/// ```
///
/// Use instead:
/// ```python
/// import foo
/// import bar
/// ```
///
/// ## Options
///
/// This rule ignores dummy variables, as determined by:
///
/// - `lint.dummy-variable-rgx`
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "v0.0.171")]
pub(crate) struct RedefinedWhileUnused {
pub name: String,
pub row: SourceRow,
}
impl Violation for RedefinedWhileUnused {
const FIX_AVAILABILITY: FixAvailability = FixAvailability::Sometimes;
#[derive_message_formats]
fn message(&self) -> String {
let RedefinedWhileUnused { name, row } = self;
format!("Redefinition of unused `{name}` from {row}")
}
fn fix_title(&self) -> Option<String> {
let RedefinedWhileUnused { name, .. } = self;
Some(format!("Remove definition: `{name}`"))
}
}
/// F811
pub(crate) fn redefined_while_unused(checker: &Checker, scope_id: ScopeId, scope: &Scope) {
// Index the redefined bindings by statement.
let mut redefinitions = FxHashMap::default();
for (name, binding_id) in scope.bindings() {
for shadow in checker.semantic().shadowed_bindings(scope_id, binding_id) {
// If the shadowing binding is a loop variable, abort, to avoid overlap
// with F402.
let binding = &checker.semantic().bindings[shadow.binding_id()];
if binding.kind.is_loop_var() {
continue;
}
// If the shadowed binding is used, abort.
let shadowed = &checker.semantic().bindings[shadow.shadowed_id()];
if shadowed.is_used() {
continue;
}
// If the shadowing binding isn't considered a "redefinition" of the
// shadowed binding, abort.
if !binding.redefines(shadowed) {
continue;
}
if shadow.same_scope() {
// If the symbol is a dummy variable, abort, unless the shadowed
// binding is an import.
if !matches!(
shadowed.kind,
BindingKind::Import(..)
| BindingKind::FromImport(..)
| BindingKind::SubmoduleImport(..)
| BindingKind::FutureImport
) && checker.settings().dummy_variable_rgx.is_match(name)
{
continue;
}
let Some(node_id) = shadowed.source else {
continue;
};
// If this is an overloaded function, abort.
if shadowed.kind.is_function_definition() {
if checker
.semantic()
.statement(node_id)
.as_function_def_stmt()
.is_some_and(|function| {
visibility::is_overload(&function.decorator_list, checker.semantic())
})
{
continue;
}
}
} else {
// Only enforce cross-scope shadowing for imports.
if !matches!(
shadowed.kind,
BindingKind::Import(..)
| BindingKind::FromImport(..)
| BindingKind::SubmoduleImport(..)
| BindingKind::FutureImport
) {
continue;
}
}
// If the bindings are in different forks, abort.
if shadowed.source.is_none_or(|left| {
binding
.source
.is_none_or(|right| !checker.semantic().same_branch(left, right))
}) {
continue;
}
redefinitions
.entry(binding.source)
.or_insert_with(Vec::new)
.push((shadowed, binding));
}
}
// Create a fix for each source statement.
let mut fixes = FxHashMap::default();
for (source, entries) in &redefinitions {
let Some(source) = source else {
continue;
};
let member_names = entries
.iter()
.filter_map(|(shadowed, binding)| {
if let Some(shadowed_import) = shadowed.as_any_import() {
if let Some(import) = binding.as_any_import() {
if shadowed_import.qualified_name() == import.qualified_name() {
return Some(import.member_name());
}
}
}
None
})
.collect::<Vec<_>>();
if !member_names.is_empty() {
let statement = checker.semantic().statement(*source);
let parent = checker.semantic().parent_statement(*source);
let Ok(edit) = edits::remove_unused_imports(
member_names.iter().map(std::convert::AsRef::as_ref),
statement,
parent,
checker.locator(),
checker.stylist(),
checker.indexer(),
) else {
continue;
};
fixes.insert(
*source,
Fix::safe_edit(edit).isolate(Checker::isolation(
checker.semantic().parent_statement_id(*source),
)),
);
}
}
// Create diagnostics for each statement.
for (source, entries) in &redefinitions {
for (shadowed, binding) in entries {
let name = binding.name(checker.source());
let mut diagnostic = checker.report_diagnostic(
RedefinedWhileUnused {
name: name.to_string(),
row: checker.compute_source_row(shadowed.start()),
},
binding.range(),
);
diagnostic.add_primary_tag(ruff_db::diagnostic::DiagnosticTag::Unnecessary);
diagnostic.secondary_annotation(
format_args!("previous definition of `{name}` here"),
shadowed,
);
diagnostic.set_primary_message(format_args!("`{name}` redefined here"));
if let Some(range) = binding.parent_range(checker.semantic()) {
diagnostic.set_parent(range.start());
}
if let Some(fix) = source.as_ref().and_then(|source| fixes.get(source)) {
diagnostic.set_fix(fix.clone());
}
}
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/pyflakes/rules/return_outside_function.rs | crates/ruff_linter/src/rules/pyflakes/rules/return_outside_function.rs | use ruff_macros::{ViolationMetadata, derive_message_formats};
use crate::Violation;
/// ## What it does
/// Checks for `return` statements outside of functions.
///
/// ## Why is this bad?
/// The use of a `return` statement outside of a function will raise a
/// `SyntaxError`.
///
/// ## Example
/// ```python
/// class Foo:
/// return 1
/// ```
///
/// ## References
/// - [Python documentation: `return`](https://docs.python.org/3/reference/simple_stmts.html#the-return-statement)
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "v0.0.18")]
pub(crate) struct ReturnOutsideFunction;
impl Violation for ReturnOutsideFunction {
#[derive_message_formats]
fn message(&self) -> String {
"`return` statement outside of a function/method".to_string()
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/pyflakes/rules/repeated_keys.rs | crates/ruff_linter/src/rules/pyflakes/rules/repeated_keys.rs | use rustc_hash::{FxBuildHasher, FxHashMap, FxHashSet};
use std::collections::hash_map::Entry;
use ruff_macros::{ViolationMetadata, derive_message_formats};
use ruff_python_ast::comparable::{ComparableExpr, HashableExpr};
use ruff_python_ast::token::parenthesized_range;
use ruff_python_ast::{self as ast, Expr};
use ruff_text_size::Ranged;
use crate::checkers::ast::Checker;
use crate::fix::snippet::SourceCodeSnippet;
use crate::registry::Rule;
use crate::{Edit, Fix, FixAvailability, Violation};
/// ## What it does
/// Checks for dictionary literals that associate multiple values with the
/// same key.
///
/// ## Why is this bad?
/// Dictionary keys should be unique. If a key is associated with multiple values,
/// the earlier values will be overwritten. Including multiple values for the
/// same key in a dictionary literal is likely a mistake.
///
/// ## Example
/// ```python
/// foo = {
/// "bar": 1,
/// "baz": 2,
/// "baz": 3,
/// }
/// foo["baz"] # 3
/// ```
///
/// Use instead:
/// ```python
/// foo = {
/// "bar": 1,
/// "baz": 2,
/// }
/// foo["baz"] # 2
/// ```
///
/// ## Fix safety
///
/// This rule's fix is marked as unsafe because removing a repeated dictionary key
/// may delete comments that are attached to the removed key-value pair. This can also change
/// the program's behavior if the value expressions have side effects.
///
/// ## References
/// - [Python documentation: Dictionaries](https://docs.python.org/3/tutorial/datastructures.html#dictionaries)
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "v0.0.30")]
pub(crate) struct MultiValueRepeatedKeyLiteral {
name: SourceCodeSnippet,
existing: SourceCodeSnippet,
}
impl Violation for MultiValueRepeatedKeyLiteral {
const FIX_AVAILABILITY: FixAvailability = FixAvailability::Sometimes;
#[derive_message_formats]
fn message(&self) -> String {
match (self.name.full_display(), self.existing.full_display()) {
(Some(name), None) => {
format!("Dictionary key literal `{name}` repeated")
}
(Some(name), Some(existing)) => {
if name == existing {
format!("Dictionary key literal `{name}` repeated")
} else {
format!(
"Dictionary key literal `{name}` repeated (`{name}` hashes to the same value as `{existing}`)"
)
}
}
_ => "Dictionary key literal repeated".to_string(),
}
}
fn fix_title(&self) -> Option<String> {
let title = match self.name.full_display() {
Some(name) => format!("Remove repeated key literal `{name}`"),
None => "Remove repeated key literal".to_string(),
};
Some(title)
}
}
/// ## What it does
/// Checks for dictionary keys that are repeated with different values.
///
/// ## Why is this bad?
/// Dictionary keys should be unique. If a key is repeated with a different
/// value, the first values will be overwritten and the key will correspond to
/// the last value. This is likely a mistake.
///
/// ## Example
/// ```python
/// foo = {
/// bar: 1,
/// baz: 2,
/// baz: 3,
/// }
/// foo[baz] # 3
/// ```
///
/// Use instead:
/// ```python
/// foo = {
/// bar: 1,
/// baz: 2,
/// }
/// foo[baz] # 2
/// ```
///
/// ## Fix safety
///
/// This rule's fix is marked as unsafe because removing a repeated dictionary key
/// may delete comments that are attached to the removed key-value pair. This can also change
/// the program's behavior if the value expressions have side effects.
///
/// ## References
/// - [Python documentation: Dictionaries](https://docs.python.org/3/tutorial/datastructures.html#dictionaries)
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "v0.0.30")]
pub(crate) struct MultiValueRepeatedKeyVariable {
name: SourceCodeSnippet,
}
impl Violation for MultiValueRepeatedKeyVariable {
const FIX_AVAILABILITY: FixAvailability = FixAvailability::Sometimes;
#[derive_message_formats]
fn message(&self) -> String {
if let Some(name) = self.name.full_display() {
format!("Dictionary key `{name}` repeated")
} else {
"Dictionary key repeated".to_string()
}
}
fn fix_title(&self) -> Option<String> {
let title = match self.name.full_display() {
Some(name) => format!("Remove repeated key `{name}`"),
None => "Remove repeated key".to_string(),
};
Some(title)
}
}
/// F601, F602
pub(crate) fn repeated_keys(checker: &Checker, dict: &ast::ExprDict) {
// Generate a map from key to (index, value).
let mut seen: FxHashMap<HashableExpr, (&Expr, FxHashSet<ComparableExpr>)> =
FxHashMap::with_capacity_and_hasher(dict.len(), FxBuildHasher);
// Detect duplicate keys.
for (i, ast::DictItem { key, value }) in dict.iter().enumerate() {
let Some(key) = key else {
continue;
};
let comparable_key = HashableExpr::from(key);
let comparable_value = ComparableExpr::from(value);
match seen.entry(comparable_key) {
Entry::Vacant(entry) => {
entry.insert((key, FxHashSet::from_iter([comparable_value])));
}
Entry::Occupied(mut entry) => {
let (seen_key, seen_values) = entry.get_mut();
match key {
Expr::StringLiteral(_)
| Expr::BytesLiteral(_)
| Expr::NumberLiteral(_)
| Expr::BooleanLiteral(_)
| Expr::NoneLiteral(_)
| Expr::EllipsisLiteral(_)
| Expr::Tuple(_)
| Expr::FString(_) => {
if checker.is_rule_enabled(Rule::MultiValueRepeatedKeyLiteral) {
let mut diagnostic = checker.report_diagnostic(
MultiValueRepeatedKeyLiteral {
name: SourceCodeSnippet::from_str(checker.locator().slice(key)),
existing: SourceCodeSnippet::from_str(
checker.locator().slice(*seen_key),
),
},
key.range(),
);
if !seen_values.insert(comparable_value) {
diagnostic.set_fix(Fix::unsafe_edit(Edit::deletion(
parenthesized_range(
dict.value(i - 1).into(),
dict.into(),
checker.tokens(),
)
.unwrap_or_else(|| dict.value(i - 1).range())
.end(),
parenthesized_range(
dict.value(i).into(),
dict.into(),
checker.tokens(),
)
.unwrap_or_else(|| dict.value(i).range())
.end(),
)));
}
}
}
Expr::Name(_) => {
if checker.is_rule_enabled(Rule::MultiValueRepeatedKeyVariable) {
let mut diagnostic = checker.report_diagnostic(
MultiValueRepeatedKeyVariable {
name: SourceCodeSnippet::from_str(checker.locator().slice(key)),
},
key.range(),
);
let comparable_value: ComparableExpr = dict.value(i).into();
if !seen_values.insert(comparable_value) {
diagnostic.set_fix(Fix::unsafe_edit(Edit::deletion(
parenthesized_range(
dict.value(i - 1).into(),
dict.into(),
checker.tokens(),
)
.unwrap_or_else(|| dict.value(i - 1).range())
.end(),
parenthesized_range(
dict.value(i).into(),
dict.into(),
checker.tokens(),
)
.unwrap_or_else(|| dict.value(i).range())
.end(),
)));
}
}
}
_ => {}
}
}
}
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/pyflakes/rules/if_tuple.rs | crates/ruff_linter/src/rules/pyflakes/rules/if_tuple.rs | use ruff_python_ast::{Expr, StmtIf};
use ruff_macros::{ViolationMetadata, derive_message_formats};
use ruff_python_ast::stmt_if::if_elif_branches;
use ruff_text_size::Ranged;
use crate::Violation;
use crate::checkers::ast::Checker;
/// ## What it does
/// Checks for `if` statements that use non-empty tuples as test conditions.
///
/// ## Why is this bad?
/// Non-empty tuples are always `True`, so an `if` statement with a non-empty
/// tuple as its test condition will always pass. This is likely a mistake.
///
/// ## Example
/// ```python
/// if (False,):
/// print("This will always run")
/// ```
///
/// Use instead:
/// ```python
/// if False:
/// print("This will never run")
/// ```
///
/// ## References
/// - [Python documentation: The `if` statement](https://docs.python.org/3/reference/compound_stmts.html#the-if-statement)
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "v0.0.18")]
pub(crate) struct IfTuple;
impl Violation for IfTuple {
#[derive_message_formats]
fn message(&self) -> String {
"If test is a tuple, which is always `True`".to_string()
}
}
/// F634
pub(crate) fn if_tuple(checker: &Checker, stmt_if: &StmtIf) {
for branch in if_elif_branches(stmt_if) {
let Expr::Tuple(tuple) = &branch.test else {
continue;
};
if tuple.is_empty() {
continue;
}
checker.report_diagnostic(IfTuple, branch.test.range());
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/pyflakes/rules/f_string_missing_placeholders.rs | crates/ruff_linter/src/rules/pyflakes/rules/f_string_missing_placeholders.rs | use ruff_macros::{ViolationMetadata, derive_message_formats};
use ruff_python_ast as ast;
use ruff_text_size::{Ranged, TextRange, TextSize};
use crate::Locator;
use crate::checkers::ast::Checker;
use crate::{AlwaysFixableViolation, Edit, Fix};
/// ## What it does
/// Checks for f-strings that do not contain any placeholder expressions.
///
/// ## Why is this bad?
/// f-strings are a convenient way to format strings, but they are not
/// necessary if there are no placeholder expressions to format. In this
/// case, a regular string should be used instead, as an f-string without
/// placeholders can be confusing for readers, who may expect such a
/// placeholder to be present.
///
/// An f-string without any placeholders could also indicate that the
/// author forgot to add a placeholder expression.
///
/// ## Example
/// ```python
/// f"Hello, world!"
/// ```
///
/// Use instead:
/// ```python
/// "Hello, world!"
/// ```
///
/// **Note:** to maintain compatibility with PyFlakes, this rule only flags
/// f-strings that are part of an implicit concatenation if _none_ of the
/// f-string segments contain placeholder expressions.
///
/// For example:
///
/// ```python
/// # Will not be flagged.
/// (
/// f"Hello,"
/// f" {name}!"
/// )
///
/// # Will be flagged.
/// (
/// f"Hello,"
/// f" World!"
/// )
/// ```
///
/// See [#10885](https://github.com/astral-sh/ruff/issues/10885) for more.
///
/// ## References
/// - [PEP 498 β Literal String Interpolation](https://peps.python.org/pep-0498/)
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "v0.0.18")]
pub(crate) struct FStringMissingPlaceholders;
impl AlwaysFixableViolation for FStringMissingPlaceholders {
#[derive_message_formats]
fn message(&self) -> String {
"f-string without any placeholders".to_string()
}
fn fix_title(&self) -> String {
"Remove extraneous `f` prefix".to_string()
}
}
/// F541
pub(crate) fn f_string_missing_placeholders(checker: &Checker, expr: &ast::ExprFString) {
if expr.value.f_strings().any(|f_string| {
f_string
.elements
.iter()
.any(ast::InterpolatedStringElement::is_interpolation)
}) {
return;
}
for f_string in expr.value.f_strings() {
let first_char = checker
.locator()
.slice(TextRange::at(f_string.start(), TextSize::new(1)));
// f"..." => f_position = 0
// fr"..." => f_position = 0
// rf"..." => f_position = 1
let f_position = u32::from(!(first_char == "f" || first_char == "F"));
let prefix_range = TextRange::at(
f_string.start() + TextSize::new(f_position),
TextSize::new(1),
);
let mut diagnostic =
checker.report_diagnostic(FStringMissingPlaceholders, f_string.range());
diagnostic.set_fix(convert_f_string_to_regular_string(
prefix_range,
f_string.range(),
checker.locator(),
));
}
}
/// Unescape an f-string body by replacing `{{` with `{` and `}}` with `}`.
///
/// In Python, curly-brace literals within f-strings must be escaped by doubling the braces.
/// When rewriting an f-string to a regular string, we need to unescape any curly-brace literals.
/// For example, given `{{Hello, world!}}`, return `{Hello, world!}`.
fn unescape_f_string(content: &str) -> String {
content.replace("{{", "{").replace("}}", "}")
}
/// Generate a [`Fix`] to rewrite an f-string as a regular string.
fn convert_f_string_to_regular_string(
prefix_range: TextRange,
node_range: TextRange,
locator: &Locator,
) -> Fix {
// Extract the f-string body.
let mut content =
unescape_f_string(locator.slice(TextRange::new(prefix_range.end(), node_range.end())));
// If the preceding character is equivalent to the quote character, insert a space to avoid a
// syntax error. For example, when removing the `f` prefix in `""f""`, rewrite to `"" ""`
// instead of `""""`.
if locator
.slice(TextRange::up_to(prefix_range.start()))
.chars()
.last()
.is_some_and(|char| content.starts_with(char))
{
content.insert(0, ' ');
}
Fix::safe_edit(Edit::replacement(
content,
prefix_range.start(),
node_range.end(),
))
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/pyflakes/rules/forward_annotation_syntax_error.rs | crates/ruff_linter/src/rules/pyflakes/rules/forward_annotation_syntax_error.rs | use ruff_macros::{ViolationMetadata, derive_message_formats};
use crate::Violation;
/// ## What it does
/// Checks for forward annotations that include invalid syntax.
///
///
/// ## Why is this bad?
/// In Python, type annotations can be quoted as strings literals to enable
/// references to types that have not yet been defined, known as "forward
/// references".
///
/// However, these quoted annotations must be valid Python expressions. The use
/// of invalid syntax in a quoted annotation won't raise a `SyntaxError`, but
/// will instead raise an error when type checking is performed.
///
/// ## Example
///
/// ```python
/// def foo() -> "/": ...
/// ```
///
/// ## References
/// - [PEP 563 β Postponed Evaluation of Annotations](https://peps.python.org/pep-0563/)
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "v0.0.39")]
pub(crate) struct ForwardAnnotationSyntaxError {
pub parse_error: String,
}
impl Violation for ForwardAnnotationSyntaxError {
#[derive_message_formats]
fn message(&self) -> String {
let ForwardAnnotationSyntaxError { parse_error } = self;
format!("Syntax error in forward annotation: {parse_error}")
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/pyflakes/rules/undefined_name.rs | crates/ruff_linter/src/rules/pyflakes/rules/undefined_name.rs | use ruff_macros::{ViolationMetadata, derive_message_formats};
use crate::Violation;
/// ## What it does
/// Checks for uses of undefined names.
///
/// ## Why is this bad?
/// An undefined name is likely to raise `NameError` at runtime.
///
/// ## Example
/// ```python
/// def double():
/// return n * 2 # raises `NameError` if `n` is undefined when `double` is called
/// ```
///
/// Use instead:
/// ```python
/// def double(n):
/// return n * 2
/// ```
///
/// ## Options
/// - [`target-version`]: Can be used to configure which symbols Ruff will understand
/// as being available in the `builtins` namespace.
///
/// ## References
/// - [Python documentation: Naming and binding](https://docs.python.org/3/reference/executionmodel.html#naming-and-binding)
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "v0.0.20")]
pub(crate) struct UndefinedName {
pub(crate) name: String,
pub(crate) minor_version_builtin_added: Option<u8>,
}
impl Violation for UndefinedName {
#[derive_message_formats]
fn message(&self) -> String {
let UndefinedName {
name,
minor_version_builtin_added,
} = self;
let tip = minor_version_builtin_added.map(|version_added| {
format!(
r#"Consider specifying `requires-python = ">= 3.{version_added}"` or `tool.ruff.target-version = "py3{version_added}"` in your `pyproject.toml` file."#
)
});
if let Some(tip) = tip {
format!("Undefined name `{name}`. {tip}")
} else {
format!("Undefined name `{name}`")
}
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/pyflakes/rules/yield_outside_function.rs | crates/ruff_linter/src/rules/pyflakes/rules/yield_outside_function.rs | use std::fmt;
use ruff_macros::{ViolationMetadata, derive_message_formats};
use ruff_python_parser::semantic_errors::YieldOutsideFunctionKind;
use crate::Violation;
#[derive(Debug, PartialEq, Eq)]
pub(crate) enum DeferralKeyword {
Yield,
YieldFrom,
Await,
}
impl fmt::Display for DeferralKeyword {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
match self {
DeferralKeyword::Yield => fmt.write_str("yield"),
DeferralKeyword::YieldFrom => fmt.write_str("yield from"),
DeferralKeyword::Await => fmt.write_str("await"),
}
}
}
impl From<YieldOutsideFunctionKind> for DeferralKeyword {
fn from(value: YieldOutsideFunctionKind) -> Self {
match value {
YieldOutsideFunctionKind::Yield => Self::Yield,
YieldOutsideFunctionKind::YieldFrom => Self::YieldFrom,
YieldOutsideFunctionKind::Await => Self::Await,
}
}
}
/// ## What it does
/// Checks for `yield`, `yield from`, and `await` usages outside of functions.
///
/// ## Why is this bad?
/// The use of `yield`, `yield from`, or `await` outside of a function will
/// raise a `SyntaxError`.
///
/// ## Example
/// ```python
/// class Foo:
/// yield 1
/// ```
///
/// ## Notebook behavior
/// As an exception, `await` is allowed at the top level of a Jupyter notebook
/// (see: [autoawait]).
///
/// ## References
/// - [Python documentation: `yield`](https://docs.python.org/3/reference/simple_stmts.html#the-yield-statement)
///
/// [autoawait]: https://ipython.readthedocs.io/en/stable/interactive/autoawait.html
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "v0.0.22")]
pub(crate) struct YieldOutsideFunction {
keyword: DeferralKeyword,
}
impl YieldOutsideFunction {
pub(crate) fn new(keyword: impl Into<DeferralKeyword>) -> Self {
Self {
keyword: keyword.into(),
}
}
}
impl Violation for YieldOutsideFunction {
#[derive_message_formats]
fn message(&self) -> String {
let YieldOutsideFunction { keyword } = self;
format!("`{keyword}` statement outside of a function")
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/pyflakes/rules/break_outside_loop.rs | crates/ruff_linter/src/rules/pyflakes/rules/break_outside_loop.rs | use ruff_macros::{ViolationMetadata, derive_message_formats};
use crate::Violation;
/// ## What it does
/// Checks for `break` statements outside of loops.
///
/// ## Why is this bad?
/// The use of a `break` statement outside of a `for` or `while` loop will
/// raise a `SyntaxError`.
///
/// ## Example
/// ```python
/// def foo():
/// break
/// ```
///
/// ## References
/// - [Python documentation: `break`](https://docs.python.org/3/reference/simple_stmts.html#the-break-statement)
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "v0.0.36")]
pub(crate) struct BreakOutsideLoop;
impl Violation for BreakOutsideLoop {
#[derive_message_formats]
fn message(&self) -> String {
"`break` outside loop".to_string()
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/pyflakes/rules/undefined_export.rs | crates/ruff_linter/src/rules/pyflakes/rules/undefined_export.rs | use ruff_macros::{ViolationMetadata, derive_message_formats};
use crate::Violation;
/// ## What it does
/// Checks for undefined names in `__all__`.
///
/// ## Why is this bad?
/// In Python, the `__all__` variable is used to define the names that are
/// exported when a module is imported as a wildcard (e.g.,
/// `from foo import *`). The names in `__all__` must be defined in the module,
/// but are included as strings.
///
/// Including an undefined name in `__all__` is likely to raise `NameError` at
/// runtime, when the module is imported.
///
/// In [preview], this rule will flag undefined names in `__init__.py` file,
/// even if those names implicitly refer to other modules in the package. Users
/// that rely on implicit exports should disable this rule in `__init__.py`
/// files via [`lint.per-file-ignores`].
///
/// ## Example
/// ```python
/// from foo import bar
///
///
/// __all__ = ["bar", "baz"] # undefined name `baz` in `__all__`
/// ```
///
/// Use instead:
/// ```python
/// from foo import bar, baz
///
///
/// __all__ = ["bar", "baz"]
/// ```
///
/// ## References
/// - [Python documentation: `__all__`](https://docs.python.org/3/tutorial/modules.html#importing-from-a-package)
///
/// [preview]: https://docs.astral.sh/ruff/preview/
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "v0.0.25")]
pub(crate) struct UndefinedExport {
pub name: String,
}
impl Violation for UndefinedExport {
#[derive_message_formats]
fn message(&self) -> String {
let UndefinedExport { name } = self;
format!("Undefined name `{name}` in `__all__`")
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/pyflakes/rules/starred_expressions.rs | crates/ruff_linter/src/rules/pyflakes/rules/starred_expressions.rs | use ruff_python_ast::Expr;
use ruff_text_size::TextRange;
use ruff_macros::{ViolationMetadata, derive_message_formats};
use crate::{Violation, checkers::ast::Checker};
/// ## What it does
/// Checks for the use of too many expressions in starred assignment statements.
///
/// ## Why is this bad?
/// In assignment statements, starred expressions can be used to unpack iterables.
///
/// In Python 3, no more than `1 << 8` assignments are allowed before a starred
/// expression, and no more than `1 << 24` expressions are allowed after a starred
/// expression.
///
/// ## References
/// - [PEP 3132 β Extended Iterable Unpacking](https://peps.python.org/pep-3132/)
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "v0.0.32")]
pub(crate) struct ExpressionsInStarAssignment;
impl Violation for ExpressionsInStarAssignment {
#[derive_message_formats]
fn message(&self) -> String {
"Too many expressions in star-unpacking assignment".to_string()
}
}
/// ## What it does
/// Checks for the use of multiple starred expressions in assignment statements.
///
/// ## Why is this bad?
/// In assignment statements, starred expressions can be used to unpack iterables.
/// Including more than one starred expression on the left-hand-side of an
/// assignment will cause a `SyntaxError`, as it is unclear which expression
/// should receive the remaining values.
///
/// ## Example
/// ```python
/// *foo, *bar, baz = (1, 2, 3)
/// ```
///
/// ## References
/// - [PEP 3132 β Extended Iterable Unpacking](https://peps.python.org/pep-3132/)
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "v0.0.32")]
pub(crate) struct MultipleStarredExpressions;
impl Violation for MultipleStarredExpressions {
#[derive_message_formats]
fn message(&self) -> String {
"Two starred expressions in assignment".to_string()
}
}
/// F621
pub(crate) fn starred_expressions(
checker: &Checker,
elts: &[Expr],
check_too_many_expressions: bool,
location: TextRange,
) {
let starred_index: Option<usize> = None;
if check_too_many_expressions {
if let Some(starred_index) = starred_index {
if starred_index >= 1 << 8 || elts.len() - starred_index > 1 << 24 {
checker.report_diagnostic(ExpressionsInStarAssignment, location);
}
}
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/pyflakes/rules/mod.rs | crates/ruff_linter/src/rules/pyflakes/rules/mod.rs | pub(crate) use assert_tuple::*;
pub(crate) use break_outside_loop::*;
pub(crate) use continue_outside_loop::*;
pub(crate) use default_except_not_last::*;
pub(crate) use f_string_missing_placeholders::*;
pub(crate) use forward_annotation_syntax_error::*;
pub(crate) use future_feature_not_defined::*;
pub(crate) use if_tuple::*;
pub(crate) use imports::*;
pub(crate) use invalid_literal_comparisons::*;
pub(crate) use invalid_print_syntax::*;
pub(crate) use raise_not_implemented::*;
pub(crate) use redefined_while_unused::*;
pub(crate) use repeated_keys::*;
pub(crate) use return_outside_function::*;
pub(crate) use starred_expressions::*;
pub(crate) use strings::*;
pub(crate) use undefined_export::*;
pub(crate) use undefined_local::*;
pub(crate) use undefined_name::*;
pub(crate) use unused_annotation::*;
pub(crate) use unused_import::*;
pub(crate) use unused_variable::*;
pub(crate) use yield_outside_function::*;
mod assert_tuple;
mod break_outside_loop;
mod continue_outside_loop;
mod default_except_not_last;
mod f_string_missing_placeholders;
mod forward_annotation_syntax_error;
mod future_feature_not_defined;
mod if_tuple;
mod imports;
mod invalid_literal_comparisons;
mod invalid_print_syntax;
mod raise_not_implemented;
mod redefined_while_unused;
mod repeated_keys;
mod return_outside_function;
mod starred_expressions;
mod strings;
mod undefined_export;
mod undefined_local;
mod undefined_name;
mod unused_annotation;
mod unused_import;
mod unused_variable;
mod yield_outside_function;
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/pyflakes/rules/invalid_print_syntax.rs | crates/ruff_linter/src/rules/pyflakes/rules/invalid_print_syntax.rs | use ruff_macros::{ViolationMetadata, derive_message_formats};
use ruff_python_ast::Expr;
use ruff_text_size::Ranged;
use crate::Violation;
use crate::checkers::ast::Checker;
/// ## What it does
/// Checks for `print` statements that use the `>>` syntax.
///
/// ## Why is this bad?
/// In Python 2, the `print` statement can be used with the `>>` syntax to
/// print to a file-like object. This `print >> sys.stderr` syntax no
/// longer exists in Python 3, where `print` is only a function, not a
/// statement.
///
/// Instead, use the `file` keyword argument to the `print` function, the
/// `sys.stderr.write` function, or the `logging` module.
///
/// ## Example
/// ```python
/// from __future__ import print_function
/// import sys
///
/// print >> sys.stderr, "Hello, world!"
/// ```
///
/// Use instead:
/// ```python
/// print("Hello, world!", file=sys.stderr)
/// ```
///
/// Or:
/// ```python
/// import sys
///
/// sys.stderr.write("Hello, world!\n")
/// ```
///
/// Or:
/// ```python
/// import logging
///
/// logging.error("Hello, world!")
/// ```
///
/// ## References
/// - [Python documentation: `print`](https://docs.python.org/3/library/functions.html#print)
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "v0.0.39")]
pub(crate) struct InvalidPrintSyntax;
impl Violation for InvalidPrintSyntax {
#[derive_message_formats]
fn message(&self) -> String {
"Use of `>>` is invalid with `print` function".to_string()
}
}
/// F633
pub(crate) fn invalid_print_syntax(checker: &Checker, left: &Expr) {
if checker.semantic().match_builtin_expr(left, "print") {
checker.report_diagnostic(InvalidPrintSyntax, left.range());
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/pyflakes/rules/unused_import.rs | crates/ruff_linter/src/rules/pyflakes/rules/unused_import.rs | use std::borrow::Cow;
use std::iter;
use anyhow::{Result, anyhow, bail};
use std::collections::BTreeMap;
use ruff_macros::{ViolationMetadata, derive_message_formats};
use ruff_python_ast::name::{QualifiedName, QualifiedNameBuilder};
use ruff_python_ast::{self as ast, Stmt};
use ruff_python_semantic::{
AnyImport, Binding, BindingFlags, BindingId, BindingKind, Exceptions, Imported, NodeId, Scope,
ScopeId, SemanticModel, SubmoduleImport,
};
use ruff_text_size::{Ranged, TextRange};
use crate::checkers::ast::Checker;
use crate::fix;
use crate::preview::{
is_dunder_init_fix_unused_import_enabled, is_refined_submodule_import_match_enabled,
};
use crate::registry::Rule;
use crate::rules::{isort, isort::ImportSection, isort::ImportType};
use crate::settings::LinterSettings;
use crate::{Applicability, Fix, FixAvailability, Violation};
/// ## What it does
/// Checks for unused imports.
///
/// ## Why is this bad?
/// Unused imports add a performance overhead at runtime, and risk creating
/// import cycles. They also increase the cognitive load of reading the code.
///
/// If an import statement is used to check for the availability or existence
/// of a module, consider using `importlib.util.find_spec` instead.
///
/// If an import statement is used to re-export a symbol as part of a module's
/// public interface, consider using a "redundant" import alias, which
/// instructs Ruff (and other tools) to respect the re-export, and avoid
/// marking it as unused, as in:
///
/// ```python
/// from module import member as member
/// ```
///
/// Alternatively, you can use `__all__` to declare a symbol as part of the module's
/// interface, as in:
///
/// ```python
/// # __init__.py
/// import some_module
///
/// __all__ = ["some_module"]
/// ```
///
/// ## Preview
/// When [preview] is enabled (and certain simplifying assumptions
/// are met), we analyze all import statements for a given module
/// when determining whether an import is used, rather than simply
/// the last of these statements. This can result in both different and
/// more import statements being marked as unused.
///
/// For example, if a module consists of
///
/// ```python
/// import a
/// import a.b
/// ```
///
/// then both statements are marked as unused under [preview], whereas
/// only the second is marked as unused under stable behavior.
///
/// As another example, if a module consists of
///
/// ```python
/// import a.b
/// import a
///
/// a.b.foo()
/// ```
///
/// then a diagnostic will only be emitted for the first line under [preview],
/// whereas a diagnostic would only be emitted for the second line under
/// stable behavior.
///
/// Note that this behavior is somewhat subjective and is designed
/// to conform to the developer's intuition rather than Python's actual
/// execution. To wit, the statement `import a.b` automatically executes
/// `import a`, so in some sense `import a` is _always_ redundant
/// in the presence of `import a.b`.
///
///
/// ## Fix safety
///
/// Fixes to remove unused imports are safe, except in `__init__.py` files.
///
/// Applying fixes to `__init__.py` files is currently in preview. The fix offered depends on the
/// type of the unused import. Ruff will suggest a safe fix to export first-party imports with
/// either a redundant alias or, if already present in the file, an `__all__` entry. If multiple
/// `__all__` declarations are present, Ruff will not offer a fix. Ruff will suggest an unsafe fix
/// to remove third-party and standard library imports -- the fix is unsafe because the module's
/// interface changes.
///
/// See [this FAQ section](https://docs.astral.sh/ruff/faq/#how-does-ruff-determine-which-of-my-imports-are-first-party-third-party-etc)
/// for more details on how Ruff
/// determines whether an import is first or third-party.
///
/// ## Example
///
/// ```python
/// import numpy as np # unused import
///
///
/// def area(radius):
/// return 3.14 * radius**2
/// ```
///
/// Use instead:
///
/// ```python
/// def area(radius):
/// return 3.14 * radius**2
/// ```
///
/// To check the availability of a module, use `importlib.util.find_spec`:
///
/// ```python
/// from importlib.util import find_spec
///
/// if find_spec("numpy") is not None:
/// print("numpy is installed")
/// else:
/// print("numpy is not installed")
/// ```
///
/// ## Options
/// - `lint.ignore-init-module-imports`
/// - `lint.pyflakes.allowed-unused-imports`
///
/// ## References
/// - [Python documentation: `import`](https://docs.python.org/3/reference/simple_stmts.html#the-import-statement)
/// - [Python documentation: `importlib.util.find_spec`](https://docs.python.org/3/library/importlib.html#importlib.util.find_spec)
/// - [Typing documentation: interface conventions](https://typing.python.org/en/latest/spec/distributing.html#library-interface-public-and-private-symbols)
///
/// [preview]: https://docs.astral.sh/ruff/preview/
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "v0.0.18")]
pub(crate) struct UnusedImport {
/// Qualified name of the import
name: String,
/// Unqualified name of the import
module: String,
/// Name of the import binding
binding: String,
context: UnusedImportContext,
multiple: bool,
ignore_init_module_imports: bool,
}
impl Violation for UnusedImport {
const FIX_AVAILABILITY: FixAvailability = FixAvailability::Sometimes;
#[derive_message_formats]
fn message(&self) -> String {
let UnusedImport { name, context, .. } = self;
match context {
UnusedImportContext::ExceptHandler => {
format!(
"`{name}` imported but unused; consider using `importlib.util.find_spec` to test for availability"
)
}
UnusedImportContext::DunderInitFirstParty { .. } => {
format!(
"`{name}` imported but unused; consider removing, adding to `__all__`, or using a redundant alias"
)
}
UnusedImportContext::Other => format!("`{name}` imported but unused"),
}
}
fn fix_title(&self) -> Option<String> {
let UnusedImport {
name,
module,
binding,
multiple,
ignore_init_module_imports,
context,
} = self;
if *ignore_init_module_imports {
match context {
UnusedImportContext::DunderInitFirstParty {
dunder_all_count: DunderAllCount::Zero,
submodule_import: false,
} => return Some(format!("Use an explicit re-export: `{module} as {module}`")),
UnusedImportContext::DunderInitFirstParty {
dunder_all_count: DunderAllCount::Zero,
submodule_import: true,
} => {
return Some(format!(
"Use an explicit re-export: `import {parent} as {parent}; import {binding}`",
parent = binding
.split('.')
.next()
.expect("Expected all submodule imports to contain a '.'")
));
}
UnusedImportContext::DunderInitFirstParty {
dunder_all_count: DunderAllCount::One,
submodule_import: false,
} => return Some(format!("Add unused import `{binding}` to __all__")),
UnusedImportContext::DunderInitFirstParty {
dunder_all_count: DunderAllCount::One,
submodule_import: true,
} => {
return Some(format!(
"Add `{}` to __all__",
binding
.split('.')
.next()
.expect("Expected all submodule imports to contain a '.'")
));
}
UnusedImportContext::DunderInitFirstParty {
dunder_all_count: DunderAllCount::Many,
submodule_import: _,
}
| UnusedImportContext::ExceptHandler
| UnusedImportContext::Other => {}
}
}
Some(if *multiple {
"Remove unused import".to_string()
} else {
format!("Remove unused import: `{name}`")
})
}
}
/// Enumeration providing three possible answers to the question:
/// "How many `__all__` definitions are there in this file?"
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
enum DunderAllCount {
Zero,
One,
Many,
}
impl From<usize> for DunderAllCount {
fn from(value: usize) -> Self {
match value {
0 => Self::Zero,
1 => Self::One,
_ => Self::Many,
}
}
}
#[derive(Debug, Copy, Clone, Eq, PartialEq, is_macro::Is)]
enum UnusedImportContext {
/// The unused import occurs inside an except handler
ExceptHandler,
/// The unused import is a first-party import in an `__init__.py` file
DunderInitFirstParty {
dunder_all_count: DunderAllCount,
submodule_import: bool,
},
/// The unused import is something else
Other,
}
fn is_first_party(import: &AnyImport, checker: &Checker) -> bool {
let source_name = import.source_name().join(".");
let category = isort::categorize(
&source_name,
import.qualified_name().is_unresolved_import(),
&checker.settings().src,
checker.package(),
checker.settings().isort.detect_same_package,
&checker.settings().isort.known_modules,
checker.target_version(),
checker.settings().isort.no_sections,
&checker.settings().isort.section_order,
&checker.settings().isort.default_section,
);
matches! {
category,
ImportSection::Known(ImportType::FirstParty | ImportType::LocalFolder)
}
}
/// Find the `Expr` for top-level `__all__` bindings.
fn find_dunder_all_exprs<'a>(semantic: &'a SemanticModel) -> Vec<&'a ast::Expr> {
semantic
.global_scope()
.get_all("__all__")
.filter_map(|binding_id| {
let binding = semantic.binding(binding_id);
let stmt = match binding.kind {
BindingKind::Export(_) => binding.statement(semantic),
_ => None,
}?;
match stmt {
Stmt::Assign(ast::StmtAssign { value, .. }) => Some(&**value),
Stmt::AnnAssign(ast::StmtAnnAssign { value, .. }) => value.as_deref(),
Stmt::AugAssign(ast::StmtAugAssign { value, .. }) => Some(&**value),
_ => None,
}
})
.collect()
}
/// F401
/// For some unused binding in an import statement...
///
/// __init__.py β§ 1stpty β safe, if one __all__, add to __all__
/// safe, if no __all__, convert to redundant-alias
/// n/a, if multiple __all__, offer no fix
/// __init__.py β§ stdlib β unsafe, remove
/// __init__.py β§ 3rdpty β unsafe, remove
///
/// Β¬__init__.py β§ 1stpty β safe, remove
/// Β¬__init__.py β§ stdlib β safe, remove
/// Β¬__init__.py β§ 3rdpty β safe, remove
///
pub(crate) fn unused_import(checker: &Checker, scope: &Scope) {
// Collect all unused imports by statement.
let mut unused: BTreeMap<(NodeId, Exceptions), Vec<ImportBinding>> = BTreeMap::default();
let mut ignored: BTreeMap<(NodeId, Exceptions), Vec<ImportBinding>> = BTreeMap::default();
for binding in unused_imports_in_scope(checker.semantic(), scope, checker.settings()) {
let Some(import) = binding.as_any_import() else {
continue;
};
let Some(node_id) = binding.source else {
continue;
};
let name = binding.name(checker.source());
// If an import is marked as required, avoid treating it as unused, regardless of whether
// it was _actually_ used.
if checker
.settings()
.isort
.required_imports
.iter()
.any(|required_import| required_import.matches(name, &import))
{
continue;
}
// If an import was marked as allowed, avoid treating it as unused.
if checker
.settings()
.pyflakes
.allowed_unused_imports
.iter()
.any(|allowed_unused_import| {
let allowed_unused_import = QualifiedName::user_defined(allowed_unused_import);
import.qualified_name().starts_with(&allowed_unused_import)
})
{
continue;
}
let import = ImportBinding {
name,
import,
range: binding.range(),
parent_range: binding.parent_range(checker.semantic()),
scope: binding.scope,
};
if checker.rule_is_ignored(Rule::UnusedImport, import.start())
|| import.parent_range.is_some_and(|parent_range| {
checker.rule_is_ignored(Rule::UnusedImport, parent_range.start())
})
{
ignored
.entry((node_id, binding.exceptions))
.or_default()
.push(import);
} else {
unused
.entry((node_id, binding.exceptions))
.or_default()
.push(import);
}
}
let in_init = checker.in_init_module();
let fix_init = !checker.settings().ignore_init_module_imports;
let preview_mode = is_dunder_init_fix_unused_import_enabled(checker.settings());
let dunder_all_exprs = find_dunder_all_exprs(checker.semantic());
// Generate a diagnostic for every import, but share fixes across all imports within the same
// statement (excluding those that are ignored).
for ((import_statement, exceptions), bindings) in unused {
let in_except_handler =
exceptions.intersects(Exceptions::MODULE_NOT_FOUND_ERROR | Exceptions::IMPORT_ERROR);
let multiple = bindings.len() > 1;
// pair each binding with context; divide them by how we want to fix them
let (to_reexport, to_remove): (Vec<_>, Vec<_>) = bindings
.into_iter()
.map(|binding| {
let context = if in_except_handler {
UnusedImportContext::ExceptHandler
} else if in_init
&& binding.scope.is_global()
&& is_first_party(&binding.import, checker)
{
UnusedImportContext::DunderInitFirstParty {
dunder_all_count: DunderAllCount::from(dunder_all_exprs.len()),
submodule_import: binding.import.is_submodule_import(),
}
} else {
UnusedImportContext::Other
};
(binding, context)
})
.partition(|(_, context)| context.is_dunder_init_first_party() && preview_mode);
// generate fixes that are shared across bindings in the statement
let (fix_remove, fix_reexport) =
if (!in_init || fix_init || preview_mode) && !in_except_handler {
(
fix_by_removing_imports(
checker,
import_statement,
to_remove.iter().map(|(binding, _)| binding),
in_init,
)
.ok(),
fix_by_reexporting(
checker,
import_statement,
to_reexport.iter().map(|(b, _)| b),
&dunder_all_exprs,
)
.ok(),
)
} else {
(None, None)
};
for ((binding, context), fix) in iter::Iterator::chain(
iter::zip(to_remove, iter::repeat(fix_remove)),
iter::zip(to_reexport, iter::repeat(fix_reexport)),
) {
let mut diagnostic = checker.report_diagnostic(
UnusedImport {
name: binding.import.qualified_name().to_string(),
module: binding.import.member_name().to_string(),
binding: binding.name.to_string(),
context,
multiple,
ignore_init_module_imports: !fix_init,
},
binding.range,
);
if let Some(range) = binding.parent_range {
diagnostic.set_parent(range.start());
}
if !in_except_handler {
if let Some(fix) = fix.as_ref() {
diagnostic.set_fix(fix.clone());
}
}
diagnostic.add_primary_tag(ruff_db::diagnostic::DiagnosticTag::Unnecessary);
}
}
// Separately, generate a diagnostic for every _ignored_ import, to ensure that the
// suppression comments aren't marked as unused.
for binding in ignored.into_values().flatten() {
let mut diagnostic = checker.report_diagnostic(
UnusedImport {
name: binding.import.qualified_name().to_string(),
module: binding.import.member_name().to_string(),
binding: binding.name.to_string(),
context: UnusedImportContext::Other,
multiple: false,
ignore_init_module_imports: !fix_init,
},
binding.range,
);
if let Some(range) = binding.parent_range {
diagnostic.set_parent(range.start());
}
diagnostic.add_primary_tag(ruff_db::diagnostic::DiagnosticTag::Unnecessary);
}
}
/// An unused import with its surrounding context.
#[derive(Debug)]
struct ImportBinding<'a> {
/// Name of the binding, which for renamed imports will differ from the qualified name.
name: &'a str,
/// The qualified name of the import (e.g., `typing.List` for `from typing import List`).
import: AnyImport<'a, 'a>,
/// The trimmed range of the import (e.g., `List` in `from typing import List`).
range: TextRange,
/// The range of the import's parent statement.
parent_range: Option<TextRange>,
/// The [`ScopeId`] of the scope in which the [`ImportBinding`] was defined.
scope: ScopeId,
}
impl ImportBinding<'_> {
/// The symbol that is stored in the outer scope as a result of this import.
///
/// For example:
/// - `import foo` => `foo` symbol stored in outer scope
/// - `import foo as bar` => `bar` symbol stored in outer scope
/// - `from foo import bar` => `bar` symbol stored in outer scope
/// - `from foo import bar as baz` => `baz` symbol stored in outer scope
/// - `import foo.bar` => `foo` symbol stored in outer scope
fn symbol_stored_in_outer_scope(&self) -> &str {
match &self.import {
AnyImport::FromImport(_) => self.name,
AnyImport::Import(_) => self.name,
AnyImport::SubmoduleImport(SubmoduleImport { qualified_name }) => {
qualified_name.segments().first().unwrap_or_else(|| {
panic!(
"Expected an import binding to have a non-empty qualified name;
got {qualified_name}"
)
})
}
}
}
}
impl Ranged for ImportBinding<'_> {
fn range(&self) -> TextRange {
self.range
}
}
/// Generate a [`Fix`] to remove unused imports from a statement.
fn fix_by_removing_imports<'a>(
checker: &Checker,
node_id: NodeId,
imports: impl Iterator<Item = &'a ImportBinding<'a>>,
in_init: bool,
) -> Result<Fix> {
let statement = checker.semantic().statement(node_id);
let parent = checker.semantic().parent_statement(node_id);
let member_names: Vec<Cow<'_, str>> = imports
.map(|ImportBinding { import, .. }| import)
.map(Imported::member_name)
.collect();
if member_names.is_empty() {
bail!("Expected import bindings");
}
let edit = fix::edits::remove_unused_imports(
member_names.iter().map(AsRef::as_ref),
statement,
parent,
checker.locator(),
checker.stylist(),
checker.indexer(),
)?;
// It's unsafe to remove things from `__init__.py` because it can break public interfaces.
let applicability = if in_init {
Applicability::Unsafe
} else {
Applicability::Safe
};
Ok(
Fix::applicable_edit(edit, applicability).isolate(Checker::isolation(
checker.semantic().parent_statement_id(node_id),
)),
)
}
/// Generate a [`Fix`] to make bindings in a statement explicit, either by adding them to `__all__`
/// or by changing them from `import a` to `import a as a`.
fn fix_by_reexporting<'a>(
checker: &Checker,
node_id: NodeId,
imports: impl IntoIterator<Item = &'a ImportBinding<'a>>,
dunder_all_exprs: &[&ast::Expr],
) -> Result<Fix> {
let statement = checker.semantic().statement(node_id);
let imports = {
let mut imports: Vec<&str> = imports
.into_iter()
.map(ImportBinding::symbol_stored_in_outer_scope)
.collect();
if imports.is_empty() {
bail!("Expected import bindings");
}
imports.sort_unstable();
imports
};
let edits = match dunder_all_exprs {
[] => fix::edits::make_redundant_alias(imports.into_iter(), statement),
[dunder_all] => {
fix::edits::add_to_dunder_all(imports.into_iter(), dunder_all, checker.stylist())
}
_ => bail!("Cannot offer a fix when there are multiple __all__ definitions"),
};
// Only emit a fix if there are edits.
let mut tail = edits.into_iter();
let head = tail.next().ok_or(anyhow!("No edits to make"))?;
let isolation = Checker::isolation(checker.semantic().parent_statement_id(node_id));
Ok(Fix::safe_edits(head, tail).isolate(isolation))
}
/// Returns an iterator over bindings to import statements that appear unused.
///
/// The stable behavior is to return those bindings to imports
/// satisfying the following properties:
///
/// - they are not shadowed
/// - they are not `global`, not `nonlocal`, and not explicit exports (i.e. `import foo as foo`)
/// - they have no references, according to the semantic model
///
/// Under preview, there is a more refined analysis performed
/// in the case where all bindings shadowed by a given import
/// binding (including the binding itself) are of a simple form:
/// they are required to be un-aliased imports or submodule imports.
///
/// This alternative analysis is described in the documentation for
/// [`unused_imports_from_binding`].
fn unused_imports_in_scope<'a, 'b>(
semantic: &'a SemanticModel<'b>,
scope: &'a Scope,
settings: &'a LinterSettings,
) -> impl Iterator<Item = &'a Binding<'b>> {
scope
.binding_ids()
.map(|id| (id, semantic.binding(id)))
.filter(|(_, bdg)| {
matches!(
bdg.kind,
BindingKind::Import(_)
| BindingKind::FromImport(_)
| BindingKind::SubmoduleImport(_)
)
})
.filter(|(_, bdg)| !bdg.is_global() && !bdg.is_nonlocal() && !bdg.is_explicit_export())
.flat_map(|(id, bdg)| {
if is_refined_submodule_import_match_enabled(settings)
// No need to apply refined logic if there is only a single binding
&& scope.shadowed_bindings(id).nth(1).is_some()
// Only apply the new logic in certain situations to avoid
// complexity, false positives, and intersection with
// `redefined-while-unused` (`F811`).
&& has_simple_shadowed_bindings(scope, id, semantic)
{
unused_imports_from_binding(semantic, id, scope)
} else if bdg.is_used() {
vec![]
} else {
vec![bdg]
}
})
}
/// Returns a `Vec` of bindings to unused import statements that
/// are shadowed by a given binding.
///
/// This is best explained by example. So suppose we have:
///
/// ```python
/// import a
/// import a.b
/// import a.b.c
///
/// __all__ = ["a"]
///
/// a.b.foo()
/// ```
///
/// As of 2025-09-25, Ruff's semantic model, upon visiting
/// the whole module, will have a single live binding for
/// the symbol `a` that points to the line `import a.b.c`,
/// and the remaining two import bindings are considered shadowed
/// by the last.
///
/// This function expects to receive the `id`
/// for the live binding and will begin by collecting
/// all bindings shadowed by the given one - i.e. all
/// the different import statements binding the symbol `a`.
/// We iterate over references to this
/// module and decide (somewhat subjectively) which
/// import statement the user "intends" to reference. To that end,
/// to each reference we attempt to build a [`QualifiedName`]
/// corresponding to an iterated attribute access (e.g. `a.b.foo`).
/// We then determine the closest matching import statement to that
/// qualified name, and mark it as used.
///
/// In the present example, the qualified name associated to the
/// reference from the dunder all export is `"a"` and the qualified
/// name associated to the reference in the last line is `"a.b.foo"`.
/// The closest matches are `import a` and `import a.b`, respectively,
/// leaving `import a.b.c` unused.
///
/// For a precise definition of "closest match" see [`best_match`]
/// and [`rank_matches`].
///
/// Note: if any reference comes from something other than
/// a `Name` or a dunder all expression, then we return just
/// the original binding, thus reverting the stable behavior.
fn unused_imports_from_binding<'a, 'b>(
semantic: &'a SemanticModel<'b>,
id: BindingId,
scope: &'a Scope,
) -> Vec<&'a Binding<'b>> {
let mut marked = MarkedBindings::from_binding_id(semantic, id, scope);
let binding = semantic.binding(id);
// ensure we only do this once
let mut marked_dunder_all = false;
for ref_id in binding.references() {
let resolved_reference = semantic.reference(ref_id);
if !marked_dunder_all && resolved_reference.in_dunder_all_definition() {
let first = *binding
.as_any_import()
.expect("binding to be import binding since current function called after restricting to these in `unused_imports_in_scope`")
.qualified_name()
.segments().first().expect("import binding to have nonempty qualified name");
mark_uses_of_qualified_name(&mut marked, &QualifiedName::user_defined(first));
marked_dunder_all = true;
continue;
}
let Some(expr_id) = resolved_reference.expression_id() else {
// If there is some other kind of reference, abandon
// the refined approach for the usual one
return vec![binding];
};
let Some(prototype) = expand_to_qualified_name_attribute(semantic, expr_id) else {
return vec![binding];
};
mark_uses_of_qualified_name(&mut marked, &prototype);
}
marked.into_unused()
}
#[derive(Debug)]
struct MarkedBindings<'a, 'b> {
bindings: Vec<&'a Binding<'b>>,
used: Vec<bool>,
}
impl<'a, 'b> MarkedBindings<'a, 'b> {
fn from_binding_id(semantic: &'a SemanticModel<'b>, id: BindingId, scope: &'a Scope) -> Self {
let bindings: Vec<_> = scope
.shadowed_bindings(id)
.map(|id| semantic.binding(id))
.collect();
Self {
used: vec![false; bindings.len()],
bindings,
}
}
fn into_unused(self) -> Vec<&'a Binding<'b>> {
self.bindings
.into_iter()
.zip(self.used)
.filter_map(|(bdg, is_used)| (!is_used).then_some(bdg))
.collect()
}
fn iter_mut(&mut self) -> impl Iterator<Item = (&'a Binding<'b>, &mut bool)> {
self.bindings.iter().copied().zip(self.used.iter_mut())
}
}
/// Returns `Some` [`QualifiedName`] delineating the path for the
/// maximal [`ExprName`] or [`ExprAttribute`] containing the expression
/// associated to the given [`NodeId`], or `None` otherwise.
///
/// For example, if the `expr_id` points to `a` in `a.b.c.foo()`
/// then the qualified name would have segments [`a`, `b`, `c`, `foo`].
fn expand_to_qualified_name_attribute<'b>(
semantic: &SemanticModel<'b>,
expr_id: NodeId,
) -> Option<QualifiedName<'b>> {
let mut builder = QualifiedNameBuilder::with_capacity(16);
let mut expr_id = expr_id;
let expr = semantic.expression(expr_id)?;
let name = expr.as_name_expr()?;
builder.push(&name.id);
while let Some(node_id) = semantic.parent_expression_id(expr_id) {
let Some(expr) = semantic.expression(node_id) else {
break;
};
let Some(expr_attr) = expr.as_attribute_expr() else {
break;
};
builder.push(expr_attr.attr.as_str());
expr_id = node_id;
}
Some(builder.build())
}
fn mark_uses_of_qualified_name(marked: &mut MarkedBindings, prototype: &QualifiedName) {
let Some(best) = best_match(&marked.bindings, prototype) else {
return;
};
let Some(best_import) = best.as_any_import() else {
return;
};
let best_name = best_import.qualified_name();
// We loop through all bindings in case there are repeated instances
// of the `best_name`. For example, if we have
//
// ```python
// import a
// import a
//
// a.foo()
// ```
//
// then we want to mark both import statements as used. It
// is the job of `redefined-while-unused` (`F811`) to catch
// the repeated binding in this case.
for (binding, is_used) in marked.iter_mut() {
if *is_used {
continue;
}
if binding
.as_any_import()
.is_some_and(|imp| imp.qualified_name() == best_name)
{
*is_used = true;
}
}
}
/// Returns a pair with first component the length of the largest
/// shared prefix between the qualified name of the import binding
/// and the `prototype` and second component the length of the
/// qualified name of the import binding (i.e. the number of path
/// segments). Moreover, we regard the second component as ordered
/// in reverse.
///
/// For example, if the binding corresponds to `import a.b.c`
/// and the prototype to `a.b.foo()`, then the function returns
/// `(2,std::cmp::Reverse(3))`.
fn rank_matches(binding: &Binding, prototype: &QualifiedName) -> (usize, std::cmp::Reverse<usize>) {
let Some(import) = binding.as_any_import() else {
unreachable!()
};
let qname = import.qualified_name();
let left = qname
.segments()
.iter()
.zip(prototype.segments())
.take_while(|(x, y)| x == y)
.count();
(left, std::cmp::Reverse(qname.segments().len()))
}
/// Returns the import binding that shares the longest prefix
/// with the `prototype` and is of minimal length amongst these.
///
/// See also [`rank_matches`].
fn best_match<'a, 'b>(
bindings: &Vec<&'a Binding<'b>>,
prototype: &QualifiedName,
) -> Option<&'a Binding<'b>> {
bindings
.iter()
.copied()
.max_by_key(|binding| rank_matches(binding, prototype))
}
#[inline]
fn has_simple_shadowed_bindings(scope: &Scope, id: BindingId, semantic: &SemanticModel) -> bool {
let Some(binding_node) = semantic.binding(id).source else {
return false;
};
scope.shadowed_bindings(id).enumerate().all(|(i, shadow)| {
let shadowed_binding = semantic.binding(shadow);
// Bail if one of the shadowed bindings is
// used before the last live binding. This is
// to avoid situations like this:
//
// ```
// import a
// a.b
// import a.b
// ```
if i > 0 && shadowed_binding.is_used() {
return false;
}
// We want to allow a situation like this:
//
// ```python
// import a.b
// if TYPE_CHECKING:
// import a.b.c
// ```
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | true |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/pyflakes/rules/unused_variable.rs | crates/ruff_linter/src/rules/pyflakes/rules/unused_variable.rs | use itertools::Itertools;
use ruff_macros::{ViolationMetadata, derive_message_formats};
use ruff_python_ast::helpers::contains_effect;
use ruff_python_ast::token::parenthesized_range;
use ruff_python_ast::token::{TokenKind, Tokens};
use ruff_python_ast::{self as ast, Stmt};
use ruff_python_semantic::Binding;
use ruff_text_size::{Ranged, TextRange, TextSize};
use crate::checkers::ast::Checker;
use crate::fix::edits::delete_stmt;
use crate::{Edit, Fix, FixAvailability, Violation};
/// ## What it does
/// Checks for the presence of unused variables in function scopes.
///
/// ## Why is this bad?
/// A variable that is defined but not used is likely a mistake, and should
/// be removed to avoid confusion.
///
/// If a variable is intentionally defined-but-not-used, it should be
/// prefixed with an underscore, or some other value that adheres to the
/// [`lint.dummy-variable-rgx`] pattern.
///
/// ## Example
/// ```python
/// def foo():
/// x = 1
/// y = 2
/// return x
/// ```
///
/// Use instead:
/// ```python
/// def foo():
/// x = 1
/// return x
/// ```
///
/// ## Fix safety
///
/// This rule's fix is marked as unsafe because removing an unused variable assignment may
/// delete comments that are attached to the assignment.
///
/// ## See also
///
/// This rule does not apply to bindings in unpacked assignments (e.g. `x, y = 1, 2`). See
/// [`unused-unpacked-variable`][RUF059] for this case.
///
/// ## Options
/// - `lint.dummy-variable-rgx`
///
/// [RUF059]: https://docs.astral.sh/ruff/rules/unused-unpacked-variable/
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "v0.0.22")]
pub(crate) struct UnusedVariable {
pub name: String,
}
impl Violation for UnusedVariable {
const FIX_AVAILABILITY: FixAvailability = FixAvailability::Sometimes;
#[derive_message_formats]
fn message(&self) -> String {
let UnusedVariable { name } = self;
format!("Local variable `{name}` is assigned to but never used")
}
fn fix_title(&self) -> Option<String> {
let UnusedVariable { name } = self;
Some(format!("Remove assignment to unused variable `{name}`"))
}
}
/// Return the [`TextRange`] of the token before the next match of the predicate
fn match_token_before<F>(tokens: &Tokens, location: TextSize, f: F) -> Option<TextRange>
where
F: Fn(TokenKind) -> bool,
{
for (prev, current) in tokens.after(location).iter().tuple_windows() {
if f(current.kind()) {
return Some(prev.range());
}
}
None
}
/// Return the [`TextRange`] of the token after the next match of the predicate, skipping over
/// any bracketed expressions.
fn match_token_after<F>(tokens: &Tokens, location: TextSize, f: F) -> Option<TextRange>
where
F: Fn(TokenKind) -> bool,
{
// Track the bracket depth.
let mut nesting = 0u32;
for (current, next) in tokens.after(location).iter().tuple_windows() {
match current.kind() {
TokenKind::Lpar | TokenKind::Lsqb | TokenKind::Lbrace => {
nesting = nesting.saturating_add(1);
}
TokenKind::Rpar | TokenKind::Rsqb | TokenKind::Rbrace => {
nesting = nesting.saturating_sub(1);
}
_ => {}
}
// If we're in nested brackets, continue.
if nesting > 0 {
continue;
}
if f(current.kind()) {
return Some(next.range());
}
}
None
}
/// Return the [`TextRange`] of the token matching the predicate or the first mismatched
/// bracket, skipping over any bracketed expressions.
fn match_token_or_closing_brace<F>(tokens: &Tokens, location: TextSize, f: F) -> Option<TextRange>
where
F: Fn(TokenKind) -> bool,
{
// Track the nesting level.
let mut nesting = 0u32;
for token in tokens.after(location) {
match token.kind() {
TokenKind::Lpar | TokenKind::Lsqb | TokenKind::Lbrace => {
nesting = nesting.saturating_add(1);
}
TokenKind::Rpar | TokenKind::Rsqb | TokenKind::Rbrace => {
if nesting == 0 {
return Some(token.range());
}
nesting = nesting.saturating_sub(1);
}
_ => {}
}
// If we're in nested brackets, continue.
if nesting > 0 {
continue;
}
if f(token.kind()) {
return Some(token.range());
}
}
None
}
/// Generate a [`Edit`] to remove an unused variable assignment to a [`Binding`].
fn remove_unused_variable(binding: &Binding, checker: &Checker) -> Option<Fix> {
let node_id = binding.source?;
let statement = checker.semantic().statement(node_id);
let parent = checker.semantic().parent_statement(node_id);
let isolation = Checker::isolation(checker.semantic().parent_statement_id(node_id));
// First case: simple assignment (`x = 1`)
if let Stmt::Assign(ast::StmtAssign { targets, value, .. }) = statement {
if let Some(target) = targets
.iter()
.find(|target| binding.range() == target.range())
{
if target.is_name_expr() {
return if targets.len() > 1
|| contains_effect(value, |id| checker.semantic().has_builtin_binding(id))
{
// If the expression is complex (`x = foo()`), remove the assignment,
// but preserve the right-hand side.
let start =
parenthesized_range(target.into(), statement.into(), checker.tokens())
.unwrap_or(target.range())
.start();
let end = match_token_after(checker.tokens(), target.end(), |token| {
token == TokenKind::Equal
})?
.start();
let edit = Edit::deletion(start, end);
Some(Fix::unsafe_edit(edit))
} else {
// If (e.g.) assigning to a constant (`x = 1`), delete the entire statement.
let edit = delete_stmt(statement, parent, checker.locator(), checker.indexer());
Some(Fix::unsafe_edit(edit).isolate(isolation))
};
}
} else {
let name = binding.name(checker.source());
let renamed = format!("_{name}");
if checker.settings().dummy_variable_rgx.is_match(&renamed) {
let edit = Edit::range_replacement(renamed, binding.range());
return Some(Fix::unsafe_edit(edit).isolate(isolation));
}
}
}
// Second case: simple annotated assignment (`x: int = 1`)
if let Stmt::AnnAssign(ast::StmtAnnAssign {
target,
value: Some(value),
..
}) = statement
{
if target.is_name_expr() {
return if contains_effect(value, |id| checker.semantic().has_builtin_binding(id)) {
// If the expression is complex (`x = foo()`), remove the assignment,
// but preserve the right-hand side.
let start = statement.start();
let end =
match_token_after(checker.tokens(), start, |token| token == TokenKind::Equal)?
.start();
let edit = Edit::deletion(start, end);
Some(Fix::unsafe_edit(edit))
} else {
// If (e.g.) assigning to a constant (`x = 1`), delete the entire statement.
let edit = delete_stmt(statement, parent, checker.locator(), checker.indexer());
Some(Fix::unsafe_edit(edit).isolate(isolation))
};
}
}
// Third case: with_item (`with foo() as x:`)
if let Stmt::With(ast::StmtWith { items, .. }) = statement {
// Find the binding that matches the given `Range`.
// TODO(charlie): Store the `WithItem` in the `Binding`.
for item in items {
if let Some(optional_vars) = &item.optional_vars {
if optional_vars.range() == binding.range() {
// Find the first token before the `as` keyword.
let start =
match_token_before(checker.tokens(), item.context_expr.start(), |token| {
token == TokenKind::As
})?
.end();
// Find the first colon, comma, or closing bracket after the `as` keyword.
let end = match_token_or_closing_brace(checker.tokens(), start, |token| {
token == TokenKind::Colon || token == TokenKind::Comma
})?
.start();
let edit = Edit::deletion(start, end);
return Some(Fix::unsafe_edit(edit));
}
}
}
}
None
}
/// F841
pub(crate) fn unused_variable(checker: &Checker, name: &str, binding: &Binding) {
if binding.is_unpacked_assignment() {
return;
}
let mut diagnostic = checker.report_diagnostic(
UnusedVariable {
name: name.to_string(),
},
binding.range(),
);
if let Some(fix) = remove_unused_variable(binding, checker) {
diagnostic.set_fix(fix);
}
// Add Unnecessary tag for unused variables
diagnostic.add_primary_tag(ruff_db::diagnostic::DiagnosticTag::Unnecessary);
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/pyflakes/rules/strings.rs | crates/ruff_linter/src/rules/pyflakes/rules/strings.rs | use std::string::ToString;
use ruff_diagnostics::Applicability;
use ruff_python_ast::helpers::contains_effect;
use rustc_hash::FxHashSet;
use ruff_macros::{ViolationMetadata, derive_message_formats};
use ruff_python_ast::name::Name;
use ruff_python_ast::{self as ast, Expr, Keyword};
use ruff_text_size::{Ranged, TextRange};
use crate::checkers::ast::Checker;
use crate::{AlwaysFixableViolation, Fix, FixAvailability, Violation};
use crate::rules::pyflakes::cformat::CFormatSummary;
use crate::rules::pyflakes::fixes::{
remove_unused_format_arguments_from_dict, remove_unused_keyword_arguments_from_format_call,
remove_unused_positional_arguments_from_format_call,
};
use crate::rules::pyflakes::format::FormatSummary;
/// ## What it does
/// Checks for invalid `printf`-style format strings.
///
/// ## Why is this bad?
/// Conversion specifiers are required for `printf`-style format strings. These
/// specifiers must contain a `%` character followed by a conversion type.
///
/// ## Example
/// ```python
/// "Hello, %" % "world"
/// ```
///
/// Use instead:
/// ```python
/// "Hello, %s" % "world"
/// ```
///
/// ## References
/// - [Python documentation: `printf`-style String Formatting](https://docs.python.org/3/library/stdtypes.html#printf-style-string-formatting)
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "v0.0.142")]
pub(crate) struct PercentFormatInvalidFormat {
pub(crate) message: String,
}
impl Violation for PercentFormatInvalidFormat {
#[derive_message_formats]
fn message(&self) -> String {
let PercentFormatInvalidFormat { message } = self;
format!("`%`-format string has invalid format string: {message}")
}
}
/// ## What it does
/// Checks for named placeholders in `printf`-style format strings without
/// mapping-type values.
///
/// ## Why is this bad?
/// When using named placeholders in `printf`-style format strings, the values
/// must be a map type (such as a dictionary). Otherwise, the expression will
/// raise a `TypeError`.
///
/// ## Example
/// ```python
/// "%(greeting)s, %(name)s" % ("Hello", "World")
/// ```
///
/// Use instead:
/// ```python
/// "%(greeting)s, %(name)s" % {"greeting": "Hello", "name": "World"}
/// ```
///
/// Or:
/// ```python
/// "%s, %s" % ("Hello", "World")
/// ```
///
/// ## References
/// - [Python documentation: `printf`-style String Formatting](https://docs.python.org/3/library/stdtypes.html#printf-style-string-formatting)
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "v0.0.142")]
pub(crate) struct PercentFormatExpectedMapping;
impl Violation for PercentFormatExpectedMapping {
#[derive_message_formats]
fn message(&self) -> String {
"`%`-format string expected mapping but got sequence".to_string()
}
}
/// ## What it does
/// Checks for uses of mapping-type values in `printf`-style format strings
/// without named placeholders.
///
/// ## Why is this bad?
/// When using mapping-type values (such as `dict`) in `printf`-style format
/// strings, the keys must be named. Otherwise, the expression will raise a
/// `TypeError`.
///
/// ## Example
/// ```python
/// "%s, %s" % {"greeting": "Hello", "name": "World"}
/// ```
///
/// Use instead:
/// ```python
/// "%(greeting)s, %(name)s" % {"greeting": "Hello", "name": "World"}
/// ```
///
/// Or:
/// ```python
/// "%s, %s" % ("Hello", "World")
/// ```
///
/// ## References
/// - [Python documentation: `printf`-style String Formatting](https://docs.python.org/3/library/stdtypes.html#printf-style-string-formatting)
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "v0.0.142")]
pub(crate) struct PercentFormatExpectedSequence;
impl Violation for PercentFormatExpectedSequence {
#[derive_message_formats]
fn message(&self) -> String {
"`%`-format string expected sequence but got mapping".to_string()
}
}
/// ## What it does
/// Checks for unused mapping keys in `printf`-style format strings.
///
/// ## Why is this bad?
/// Unused named placeholders in `printf`-style format strings are unnecessary,
/// and likely indicative of a mistake. They should be removed.
///
/// ## Example
/// ```python
/// "Hello, %(name)s" % {"greeting": "Hello", "name": "World"}
/// ```
///
/// Use instead:
/// ```python
/// "Hello, %(name)s" % {"name": "World"}
/// ```
///
/// ## Fix safety
/// This rule's fix is marked as unsafe for mapping key
/// containing function calls with potential side effects,
/// because removing such arguments could change the behavior of the code.
///
/// For example, the fix would be marked as unsafe in the following case:
/// ```python
/// "Hello, %(name)s" % {"greeting": print(1), "name": "World"}
/// ```
///
/// ## References
/// - [Python documentation: `printf`-style String Formatting](https://docs.python.org/3/library/stdtypes.html#printf-style-string-formatting)
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "v0.0.142")]
pub(crate) struct PercentFormatExtraNamedArguments {
missing: Vec<String>,
}
impl AlwaysFixableViolation for PercentFormatExtraNamedArguments {
#[derive_message_formats]
fn message(&self) -> String {
let PercentFormatExtraNamedArguments { missing } = self;
let message = missing.join(", ");
format!("`%`-format string has unused named argument(s): {message}")
}
fn fix_title(&self) -> String {
let PercentFormatExtraNamedArguments { missing } = self;
let message = missing.join(", ");
format!("Remove extra named arguments: {message}")
}
}
/// ## What it does
/// Checks for named placeholders in `printf`-style format strings that are not
/// present in the provided mapping.
///
/// ## Why is this bad?
/// Named placeholders that lack a corresponding value in the provided mapping
/// will raise a `KeyError`.
///
/// ## Example
/// ```python
/// "%(greeting)s, %(name)s" % {"name": "world"}
/// ```
///
/// Use instead:
/// ```python
/// "Hello, %(name)s" % {"name": "world"}
/// ```
///
/// ## References
/// - [Python documentation: `printf`-style String Formatting](https://docs.python.org/3/library/stdtypes.html#printf-style-string-formatting)
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "v0.0.142")]
pub(crate) struct PercentFormatMissingArgument {
missing: Vec<String>,
}
impl Violation for PercentFormatMissingArgument {
#[derive_message_formats]
fn message(&self) -> String {
let PercentFormatMissingArgument { missing } = self;
let message = missing.join(", ");
format!("`%`-format string is missing argument(s) for placeholder(s): {message}")
}
}
/// ## What it does
/// Checks for `printf`-style format strings that have mixed positional and
/// named placeholders.
///
/// ## Why is this bad?
/// Python does not support mixing positional and named placeholders in
/// `printf`-style format strings. The use of mixed placeholders will raise a
/// `TypeError` at runtime.
///
/// ## Example
/// ```python
/// "%s, %(name)s" % ("Hello", {"name": "World"})
/// ```
///
/// Use instead:
/// ```python
/// "%s, %s" % ("Hello", "World")
/// ```
///
/// Or:
/// ```python
/// "%(greeting)s, %(name)s" % {"greeting": "Hello", "name": "World"}
/// ```
///
/// ## References
/// - [Python documentation: `printf`-style String Formatting](https://docs.python.org/3/library/stdtypes.html#printf-style-string-formatting)
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "v0.0.142")]
pub(crate) struct PercentFormatMixedPositionalAndNamed;
impl Violation for PercentFormatMixedPositionalAndNamed {
#[derive_message_formats]
fn message(&self) -> String {
"`%`-format string has mixed positional and named placeholders".to_string()
}
}
/// ## What it does
/// Checks for `printf`-style format strings that have a mismatch between the
/// number of positional placeholders and the number of substitution values.
///
/// ## Why is this bad?
/// When a `printf`-style format string is provided with too many or too few
/// substitution values, it will raise a `TypeError` at runtime.
///
/// ## Example
/// ```python
/// "%s, %s" % ("Hello", "world", "!")
/// ```
///
/// Use instead:
/// ```python
/// "%s, %s" % ("Hello", "world")
/// ```
///
/// ## References
/// - [Python documentation: `printf`-style String Formatting](https://docs.python.org/3/library/stdtypes.html#printf-style-string-formatting)
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "v0.0.142")]
pub(crate) struct PercentFormatPositionalCountMismatch {
wanted: usize,
got: usize,
}
impl Violation for PercentFormatPositionalCountMismatch {
#[derive_message_formats]
fn message(&self) -> String {
let PercentFormatPositionalCountMismatch { wanted, got } = self;
format!("`%`-format string has {wanted} placeholder(s) but {got} substitution(s)")
}
}
/// ## What it does
/// Checks for `printf`-style format strings that use the `*` specifier with
/// non-tuple values.
///
/// ## Why is this bad?
/// The use of the `*` specifier with non-tuple values will raise a
/// `TypeError` at runtime.
///
/// ## Example
/// ```python
/// from math import pi
///
/// "%(n).*f" % {"n": (2, pi)}
/// ```
///
/// Use instead:
/// ```python
/// from math import pi
///
/// "%.*f" % (2, pi) # 3.14
/// ```
///
/// ## References
/// - [Python documentation: `printf`-style String Formatting](https://docs.python.org/3/library/stdtypes.html#printf-style-string-formatting)
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "v0.0.142")]
pub(crate) struct PercentFormatStarRequiresSequence;
impl Violation for PercentFormatStarRequiresSequence {
#[derive_message_formats]
fn message(&self) -> String {
"`%`-format string `*` specifier requires sequence".to_string()
}
}
/// ## What it does
/// Checks for `printf`-style format strings with invalid format characters.
///
/// ## Why is this bad?
/// In `printf`-style format strings, the `%` character is used to indicate
/// placeholders. If a `%` character is not followed by a valid format
/// character, it will raise a `ValueError` at runtime.
///
/// ## Example
/// ```python
/// "Hello, %S" % "world"
/// ```
///
/// Use instead:
/// ```python
/// "Hello, %s" % "world"
/// ```
///
/// ## References
/// - [Python documentation: `printf`-style String Formatting](https://docs.python.org/3/library/stdtypes.html#printf-style-string-formatting)
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "v0.0.142")]
pub(crate) struct PercentFormatUnsupportedFormatCharacter {
pub(crate) char: char,
}
impl Violation for PercentFormatUnsupportedFormatCharacter {
#[derive_message_formats]
fn message(&self) -> String {
let PercentFormatUnsupportedFormatCharacter { char } = self;
format!("`%`-format string has unsupported format character `{char}`")
}
}
/// ## What it does
/// Checks for `str.format` calls with invalid format strings.
///
/// ## Why is this bad?
/// Invalid format strings will raise a `ValueError`.
///
/// ## Example
/// ```python
/// "{".format(foo)
/// ```
///
/// Use instead:
/// ```python
/// "{}".format(foo)
/// ```
///
/// ## References
/// - [Python documentation: `str.format`](https://docs.python.org/3/library/stdtypes.html#str.format)
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "v0.0.138")]
pub(crate) struct StringDotFormatInvalidFormat {
pub(crate) message: String,
}
impl Violation for StringDotFormatInvalidFormat {
#[derive_message_formats]
fn message(&self) -> String {
let StringDotFormatInvalidFormat { message } = self;
format!("`.format` call has invalid format string: {message}")
}
}
/// ## What it does
/// Checks for `str.format` calls with unused keyword arguments.
///
/// ## Why is this bad?
/// Unused keyword arguments are redundant, and often indicative of a mistake.
/// They should be removed.
///
/// ## Example
/// ```python
/// "Hello, {name}".format(greeting="Hello", name="World")
/// ```
///
/// Use instead:
/// ```python
/// "Hello, {name}".format(name="World")
/// ```
///
/// ## Fix safety
/// This rule's fix is marked as unsafe if the unused keyword argument
/// contains a function call with potential side effects,
/// because removing such arguments could change the behavior of the code.
///
/// For example, the fix would be marked as unsafe in the following case:
/// ```python
/// "Hello, {name}".format(greeting=print(1), name="World")
/// ```
///
/// ## References
/// - [Python documentation: `str.format`](https://docs.python.org/3/library/stdtypes.html#str.format)
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "v0.0.139")]
pub(crate) struct StringDotFormatExtraNamedArguments {
missing: Vec<Name>,
}
impl Violation for StringDotFormatExtraNamedArguments {
const FIX_AVAILABILITY: FixAvailability = FixAvailability::Sometimes;
#[derive_message_formats]
fn message(&self) -> String {
let StringDotFormatExtraNamedArguments { missing } = self;
let message = missing.join(", ");
format!("`.format` call has unused named argument(s): {message}")
}
fn fix_title(&self) -> Option<String> {
let StringDotFormatExtraNamedArguments { missing } = self;
let message = missing.join(", ");
Some(format!("Remove extra named arguments: {message}"))
}
}
/// ## What it does
/// Checks for `str.format` calls with unused positional arguments.
///
/// ## Why is this bad?
/// Unused positional arguments are redundant, and often indicative of a mistake.
/// They should be removed.
///
/// ## Example
/// ```python
/// "Hello, {0}".format("world", "!")
/// ```
///
/// Use instead:
/// ```python
/// "Hello, {0}".format("world")
/// ```
///
/// ## Fix safety
/// This rule's fix is marked as unsafe if the unused positional argument
/// contains a function call with potential side effects,
/// because removing such arguments could change the behavior of the code.
///
/// For example, the fix would be marked as unsafe in the following case:
/// ```python
/// "Hello, {0}".format("world", print(1))
/// ```
///
/// ## References
/// - [Python documentation: `str.format`](https://docs.python.org/3/library/stdtypes.html#str.format)
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "v0.0.139")]
pub(crate) struct StringDotFormatExtraPositionalArguments {
missing: Vec<String>,
}
impl Violation for StringDotFormatExtraPositionalArguments {
const FIX_AVAILABILITY: FixAvailability = FixAvailability::Sometimes;
#[derive_message_formats]
fn message(&self) -> String {
let StringDotFormatExtraPositionalArguments { missing } = self;
let message = missing.join(", ");
format!("`.format` call has unused arguments at position(s): {message}")
}
fn fix_title(&self) -> Option<String> {
let StringDotFormatExtraPositionalArguments { missing } = self;
let message = missing.join(", ");
Some(format!(
"Remove extra positional arguments at position(s): {message}"
))
}
}
/// ## What it does
/// Checks for `str.format` calls with placeholders that are missing arguments.
///
/// ## Why is this bad?
/// In `str.format` calls, omitting arguments for placeholders will raise a
/// `KeyError` at runtime.
///
/// ## Example
/// ```python
/// "{greeting}, {name}".format(name="World")
/// ```
///
/// Use instead:
/// ```python
/// "{greeting}, {name}".format(greeting="Hello", name="World")
/// ```
///
/// ## References
/// - [Python documentation: `str.format`](https://docs.python.org/3/library/stdtypes.html#str.format)
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "v0.0.139")]
pub(crate) struct StringDotFormatMissingArguments {
missing: Vec<String>,
}
impl Violation for StringDotFormatMissingArguments {
#[derive_message_formats]
fn message(&self) -> String {
let StringDotFormatMissingArguments { missing } = self;
let message = missing.join(", ");
format!("`.format` call is missing argument(s) for placeholder(s): {message}")
}
}
/// ## What it does
/// Checks for `str.format` calls that mix automatic and manual numbering.
///
/// ## Why is this bad?
/// In `str.format` calls, mixing automatic and manual numbering will raise a
/// `ValueError` at runtime.
///
/// ## Example
/// ```python
/// "{0}, {}".format("Hello", "World")
/// ```
///
/// Use instead:
/// ```python
/// "{0}, {1}".format("Hello", "World")
/// ```
///
/// Or:
/// ```python
/// "{}, {}".format("Hello", "World")
/// ```
///
/// ## References
/// - [Python documentation: `str.format`](https://docs.python.org/3/library/stdtypes.html#str.format)
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "v0.0.139")]
pub(crate) struct StringDotFormatMixingAutomatic;
impl Violation for StringDotFormatMixingAutomatic {
#[derive_message_formats]
fn message(&self) -> String {
"`.format` string mixes automatic and manual numbering".to_string()
}
}
fn has_star_star_kwargs(keywords: &[Keyword]) -> bool {
keywords
.iter()
.any(|keyword| matches!(keyword, Keyword { arg: None, .. }))
}
fn has_star_args(args: &[Expr]) -> bool {
args.iter().any(Expr::is_starred_expr)
}
/// F502
pub(crate) fn percent_format_expected_mapping(
checker: &Checker,
summary: &CFormatSummary,
right: &Expr,
location: TextRange,
) {
if !summary.keywords.is_empty() {
// Tuple, List, Set (+comprehensions)
match right {
Expr::List(_)
| Expr::Tuple(_)
| Expr::Set(_)
| Expr::ListComp(_)
| Expr::SetComp(_)
| Expr::Generator(_) => {
checker.report_diagnostic(PercentFormatExpectedMapping, location);
}
_ => {}
}
}
}
/// F503
pub(crate) fn percent_format_expected_sequence(
checker: &Checker,
summary: &CFormatSummary,
right: &Expr,
location: TextRange,
) {
if summary.num_positional > 1 && matches!(right, Expr::Dict(_) | Expr::DictComp(_)) {
checker.report_diagnostic(PercentFormatExpectedSequence, location);
}
}
/// F504
pub(crate) fn percent_format_extra_named_arguments(
checker: &Checker,
summary: &CFormatSummary,
right: &Expr,
location: TextRange,
) {
if summary.num_positional > 0 {
return;
}
let Expr::Dict(dict) = &right else {
return;
};
// If any of the keys are spread, abort.
if dict.iter_keys().any(|key| key.is_none()) {
return;
}
let missing: Vec<(usize, &str)> = dict
.iter_keys()
.enumerate()
.filter_map(|(index, key)| match key {
Some(Expr::StringLiteral(ast::ExprStringLiteral { value, .. })) => {
if summary.keywords.contains(value.to_str()) {
None
} else {
Some((index, value.to_str()))
}
}
_ => None,
})
.collect();
if missing.is_empty() {
return;
}
let names: Vec<String> = missing
.iter()
.map(|(_, name)| (*name).to_string())
.collect();
let mut diagnostic = checker.report_diagnostic(
PercentFormatExtraNamedArguments { missing: names },
location,
);
diagnostic.try_set_fix(|| {
let indexes: Vec<usize> = missing.iter().map(|(index, _)| *index).collect();
let edit = remove_unused_format_arguments_from_dict(
&indexes,
dict,
checker.locator(),
checker.stylist(),
)?;
Ok(Fix::applicable_edit(
edit,
// Mark fix as unsafe if `dict` contains a call with side effect
if contains_effect(right, |id| checker.semantic().has_builtin_binding(id)) {
Applicability::Unsafe
} else {
Applicability::Safe
},
))
});
}
/// F505
pub(crate) fn percent_format_missing_arguments(
checker: &Checker,
summary: &CFormatSummary,
right: &Expr,
location: TextRange,
) {
if summary.num_positional > 0 {
return;
}
let Expr::Dict(dict) = &right else {
return;
};
if dict.iter_keys().any(|key| key.is_none()) {
return; // contains **x splat
}
let mut keywords = FxHashSet::default();
for key in dict.iter_keys().flatten() {
match key {
Expr::StringLiteral(ast::ExprStringLiteral { value, .. }) => {
keywords.insert(value.to_str());
}
_ => {
return; // Dynamic keys present
}
}
}
let missing: Vec<&String> = summary
.keywords
.iter()
.filter(|k| !keywords.contains(k.as_str()))
.collect();
if !missing.is_empty() {
checker.report_diagnostic(
PercentFormatMissingArgument {
missing: missing.iter().map(|&s| s.clone()).collect(),
},
location,
);
}
}
/// F506
pub(crate) fn percent_format_mixed_positional_and_named(
checker: &Checker,
summary: &CFormatSummary,
location: TextRange,
) {
if !(summary.num_positional == 0 || summary.keywords.is_empty()) {
checker.report_diagnostic(PercentFormatMixedPositionalAndNamed, location);
}
}
/// F507
pub(crate) fn percent_format_positional_count_mismatch(
checker: &Checker,
summary: &CFormatSummary,
right: &Expr,
location: TextRange,
) {
if !summary.keywords.is_empty() {
return;
}
if let Expr::Tuple(tuple) = right {
let mut found = 0;
for element in tuple {
if element.is_starred_expr() {
return;
}
found += 1;
}
if found != summary.num_positional {
checker.report_diagnostic(
PercentFormatPositionalCountMismatch {
wanted: summary.num_positional,
got: found,
},
location,
);
}
}
}
/// F508
pub(crate) fn percent_format_star_requires_sequence(
checker: &Checker,
summary: &CFormatSummary,
right: &Expr,
location: TextRange,
) {
if summary.starred {
match right {
Expr::Dict(_) | Expr::DictComp(_) => {
checker.report_diagnostic(PercentFormatStarRequiresSequence, location);
}
_ => {}
}
}
}
/// F522
pub(crate) fn string_dot_format_extra_named_arguments(
checker: &Checker,
call: &ast::ExprCall,
summary: &FormatSummary,
keywords: &[Keyword],
) {
// If there are any **kwargs, abort.
if has_star_star_kwargs(keywords) {
return;
}
let keyword_names = keywords
.iter()
.filter_map(|Keyword { arg, value, .. }| Some((arg.as_ref()?, value)));
let mut side_effects = false;
let missing: Vec<(usize, &Name)> = keyword_names
.enumerate()
.filter_map(|(index, (keyword, value))| {
if summary.keywords.contains(keyword.id()) {
None
} else {
side_effects |=
contains_effect(value, |id| checker.semantic().has_builtin_binding(id));
Some((index, &keyword.id))
}
})
.collect();
if missing.is_empty() {
return;
}
let names: Vec<Name> = missing.iter().map(|(_, name)| (*name).clone()).collect();
let mut diagnostic = checker.report_diagnostic(
StringDotFormatExtraNamedArguments { missing: names },
call.range(),
);
let indexes: Vec<usize> = missing.into_iter().map(|(index, _)| index).collect();
diagnostic.try_set_fix(|| {
let edit = remove_unused_keyword_arguments_from_format_call(
&indexes,
call,
checker.locator(),
checker.stylist(),
)?;
Ok(Fix::applicable_edit(
edit,
// Mark fix as unsafe if the `format` call contains an argument with side effect
if side_effects {
Applicability::Unsafe
} else {
Applicability::Safe
},
))
});
}
/// F523
pub(crate) fn string_dot_format_extra_positional_arguments(
checker: &Checker,
call: &ast::ExprCall,
summary: &FormatSummary,
args: &[Expr],
) {
// We can only fix if the positional arguments we're removing don't require re-indexing
// the format string itself. For example, we can't fix `"{1}{2}".format(0, 1, 2)"`, since
// this requires changing the format string to `"{0}{1}"`. But we can fix
// `"{0}{1}".format(0, 1, 2)`, since this only requires modifying the call arguments.
fn is_contiguous_from_end<T>(indexes: &[usize], target: &[T]) -> bool {
if indexes.is_empty() {
return true;
}
let mut expected_index = target.len() - 1;
for &index in indexes.iter().rev() {
if index != expected_index {
return false;
}
if expected_index == 0 {
break;
}
expected_index -= 1;
}
true
}
let mut side_effects = false;
let missing: Vec<usize> = args
.iter()
.enumerate()
.filter(|(i, arg)| {
!(arg.is_starred_expr() || summary.autos.contains(i) || summary.indices.contains(i))
})
.map(|(i, arg)| {
side_effects |= contains_effect(arg, |id| checker.semantic().has_builtin_binding(id));
i
})
.collect();
if missing.is_empty() {
return;
}
let mut diagnostic = checker.report_diagnostic(
StringDotFormatExtraPositionalArguments {
missing: missing
.iter()
.map(ToString::to_string)
.collect::<Vec<String>>(),
},
call.range(),
);
if is_contiguous_from_end(&missing, args) {
diagnostic.try_set_fix(|| {
let edit = remove_unused_positional_arguments_from_format_call(
&missing,
call,
checker.locator(),
checker.stylist(),
)?;
Ok(Fix::applicable_edit(
edit,
// Mark fix as unsafe if the `format` call contains an argument with side effect
if side_effects {
Applicability::Unsafe
} else {
Applicability::Safe
},
))
});
}
}
/// F524
pub(crate) fn string_dot_format_missing_argument(
checker: &Checker,
call: &ast::ExprCall,
summary: &FormatSummary,
args: &[Expr],
keywords: &[Keyword],
) {
if has_star_args(args) || has_star_star_kwargs(keywords) {
return;
}
let keywords: FxHashSet<_> = keywords
.iter()
.filter_map(|k| {
let Keyword { arg, .. } = &k;
arg.as_ref().map(ruff_python_ast::Identifier::id)
})
.collect();
let missing: Vec<String> = summary
.autos
.iter()
.chain(&summary.indices)
.filter(|&&i| i >= args.len())
.map(ToString::to_string)
.chain(
summary
.keywords
.iter()
.filter(|k| !keywords.contains(*k))
.map(ToString::to_string),
)
.collect();
if !missing.is_empty() {
checker.report_diagnostic(StringDotFormatMissingArguments { missing }, call.range());
}
}
/// F525
pub(crate) fn string_dot_format_mixing_automatic(
checker: &Checker,
call: &ast::ExprCall,
summary: &FormatSummary,
) {
if !(summary.autos.is_empty() || summary.indices.is_empty()) {
checker.report_diagnostic(StringDotFormatMixingAutomatic, call.range());
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/pyflakes/rules/continue_outside_loop.rs | crates/ruff_linter/src/rules/pyflakes/rules/continue_outside_loop.rs | use ruff_macros::{ViolationMetadata, derive_message_formats};
use crate::Violation;
/// ## What it does
/// Checks for `continue` statements outside of loops.
///
/// ## Why is this bad?
/// The use of a `continue` statement outside of a `for` or `while` loop will
/// raise a `SyntaxError`.
///
/// ## Example
/// ```python
/// def foo():
/// continue # SyntaxError
/// ```
///
/// ## References
/// - [Python documentation: `continue`](https://docs.python.org/3/reference/simple_stmts.html#the-continue-statement)
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "v0.0.36")]
pub(crate) struct ContinueOutsideLoop;
impl Violation for ContinueOutsideLoop {
#[derive_message_formats]
fn message(&self) -> String {
"`continue` not properly in loop".to_string()
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/pyflakes/rules/future_feature_not_defined.rs | crates/ruff_linter/src/rules/pyflakes/rules/future_feature_not_defined.rs | use ruff_macros::{ViolationMetadata, derive_message_formats};
use crate::Violation;
/// ## What it does
/// Checks for `__future__` imports that are not defined in the current Python
/// version.
///
/// ## Why is this bad?
/// Importing undefined or unsupported members from the `__future__` module is
/// a `SyntaxError`.
///
/// ## References
/// - [Python documentation: `__future__`](https://docs.python.org/3/library/__future__.html)
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "v0.0.34")]
pub(crate) struct FutureFeatureNotDefined {
pub name: String,
}
impl Violation for FutureFeatureNotDefined {
#[derive_message_formats]
fn message(&self) -> String {
let FutureFeatureNotDefined { name } = self;
format!("Future feature `{name}` is not defined")
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/pyflakes/rules/invalid_literal_comparisons.rs | crates/ruff_linter/src/rules/pyflakes/rules/invalid_literal_comparisons.rs | use anyhow::{Error, bail};
use ruff_macros::{ViolationMetadata, derive_message_formats};
use ruff_python_ast::helpers;
use ruff_python_ast::token::{TokenKind, Tokens};
use ruff_python_ast::{CmpOp, Expr};
use ruff_text_size::{Ranged, TextRange};
use crate::checkers::ast::Checker;
use crate::{AlwaysFixableViolation, Edit, Fix};
/// ## What it does
/// Checks for `is` and `is not` comparisons against literals, like integers,
/// strings, or lists.
///
/// ## Why is this bad?
/// The `is` and `is not` comparators operate on identity, in that they check
/// whether two objects are the same object. If the objects are not the same
/// object, the comparison will always be `False`. Using `is` and `is not` with
/// constant literals often works "by accident", but are not guaranteed to produce
/// the expected result.
///
/// As of Python 3.8, using `is` and `is not` with constant literals will produce
/// a `SyntaxWarning`.
///
/// This rule will also flag `is` and `is not` comparisons against non-constant
/// literals, like lists, sets, and dictionaries. While such comparisons will
/// not raise a `SyntaxWarning`, they are still likely to be incorrect, as they
/// will compare the identities of the objects instead of their values, which
/// will always evaluate to `False`.
///
/// Instead, use `==` and `!=` to compare literals, which will compare the
/// values of the objects instead of their identities.
///
/// ## Example
/// ```python
/// x = 200
/// if x is 200:
/// print("It's 200!")
/// ```
///
/// Use instead:
/// ```python
/// x = 200
/// if x == 200:
/// print("It's 200!")
/// ```
///
/// ## References
/// - [Python documentation: Identity comparisons](https://docs.python.org/3/reference/expressions.html#is-not)
/// - [Python documentation: Value comparisons](https://docs.python.org/3/reference/expressions.html#value-comparisons)
/// - [_Why does Python log a SyntaxWarning for βisβ with literals?_ by Adam Johnson](https://adamj.eu/tech/2020/01/21/why-does-python-3-8-syntaxwarning-for-is-literal/)
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "v0.0.39")]
pub(crate) struct IsLiteral {
cmp_op: IsCmpOp,
}
impl AlwaysFixableViolation for IsLiteral {
#[derive_message_formats]
fn message(&self) -> String {
match self.cmp_op {
IsCmpOp::Is => "Use `==` to compare constant literals".to_string(),
IsCmpOp::IsNot => "Use `!=` to compare constant literals".to_string(),
}
}
fn fix_title(&self) -> String {
let title = match self.cmp_op {
IsCmpOp::Is => "Replace `is` with `==`",
IsCmpOp::IsNot => "Replace `is not` with `!=`",
};
title.to_string()
}
}
/// F632
pub(crate) fn invalid_literal_comparison(
checker: &Checker,
left: &Expr,
ops: &[CmpOp],
comparators: &[Expr],
expr: &Expr,
) {
let mut lazy_located = None;
let mut left = left;
for (index, (op, right)) in ops.iter().zip(comparators).enumerate() {
if matches!(op, CmpOp::Is | CmpOp::IsNot)
&& (helpers::is_constant_non_singleton(left)
|| helpers::is_constant_non_singleton(right)
|| helpers::is_mutable_iterable_initializer(left)
|| helpers::is_mutable_iterable_initializer(right))
{
let mut diagnostic =
checker.report_diagnostic(IsLiteral { cmp_op: op.into() }, expr.range());
if lazy_located.is_none() {
lazy_located = Some(locate_cmp_ops(expr, checker.tokens()));
}
diagnostic.try_set_optional_fix(|| {
if let Some(located_op) =
lazy_located.as_ref().and_then(|located| located.get(index))
{
assert_eq!(located_op.op, *op);
if let Ok(content) = match located_op.op {
CmpOp::Is => Ok::<String, Error>("==".to_string()),
CmpOp::IsNot => Ok("!=".to_string()),
node => {
bail!("Failed to fix invalid comparison: {node:?}")
}
} {
Ok(Some(Fix::safe_edit(Edit::range_replacement(
content,
located_op.range,
))))
} else {
Ok(None)
}
} else {
bail!("Failed to fix invalid comparison due to missing op")
}
});
}
left = right;
}
}
#[derive(Debug, PartialEq, Eq, Copy, Clone)]
enum IsCmpOp {
Is,
IsNot,
}
impl From<&CmpOp> for IsCmpOp {
fn from(cmp_op: &CmpOp) -> Self {
match cmp_op {
CmpOp::Is => IsCmpOp::Is,
CmpOp::IsNot => IsCmpOp::IsNot,
_ => panic!("Expected CmpOp::Is | CmpOp::IsNot"),
}
}
}
/// Extract all [`CmpOp`] operators from an expression snippet, with appropriate ranges.
///
/// This method iterates over the token stream and re-identifies [`CmpOp`] nodes, annotating them
/// with valid ranges.
fn locate_cmp_ops(expr: &Expr, tokens: &Tokens) -> Vec<LocatedCmpOp> {
let mut tok_iter = tokens
.in_range(expr.range())
.iter()
.filter(|token| !token.kind().is_trivia())
.peekable();
let mut ops: Vec<LocatedCmpOp> = vec![];
// Track the nesting level.
let mut nesting = 0u32;
loop {
let Some(token) = tok_iter.next() else {
break;
};
match token.kind() {
TokenKind::Lpar | TokenKind::Lsqb | TokenKind::Lbrace => {
nesting = nesting.saturating_add(1);
}
TokenKind::Rpar | TokenKind::Rsqb | TokenKind::Rbrace => {
nesting = nesting.saturating_sub(1);
}
_ => {}
}
if nesting > 0 {
continue;
}
match token.kind() {
TokenKind::Not => {
if let Some(next_token) = tok_iter.next_if(|token| token.kind() == TokenKind::In) {
ops.push(LocatedCmpOp::new(
TextRange::new(token.start(), next_token.end()),
CmpOp::NotIn,
));
}
}
TokenKind::In => {
ops.push(LocatedCmpOp::new(token.range(), CmpOp::In));
}
TokenKind::Is => {
let op = if let Some(next_token) =
tok_iter.next_if(|token| token.kind() == TokenKind::Not)
{
LocatedCmpOp::new(
TextRange::new(token.start(), next_token.end()),
CmpOp::IsNot,
)
} else {
LocatedCmpOp::new(token.range(), CmpOp::Is)
};
ops.push(op);
}
TokenKind::NotEqual => {
ops.push(LocatedCmpOp::new(token.range(), CmpOp::NotEq));
}
TokenKind::EqEqual => {
ops.push(LocatedCmpOp::new(token.range(), CmpOp::Eq));
}
TokenKind::GreaterEqual => {
ops.push(LocatedCmpOp::new(token.range(), CmpOp::GtE));
}
TokenKind::Greater => {
ops.push(LocatedCmpOp::new(token.range(), CmpOp::Gt));
}
TokenKind::LessEqual => {
ops.push(LocatedCmpOp::new(token.range(), CmpOp::LtE));
}
TokenKind::Less => {
ops.push(LocatedCmpOp::new(token.range(), CmpOp::Lt));
}
_ => {}
}
}
ops
}
#[derive(Debug, Clone, PartialEq, Eq)]
struct LocatedCmpOp {
range: TextRange,
op: CmpOp,
}
impl LocatedCmpOp {
fn new<T: Into<TextRange>>(range: T, op: CmpOp) -> Self {
Self {
range: range.into(),
op,
}
}
}
#[cfg(test)]
mod tests {
use anyhow::Result;
use ruff_python_ast::CmpOp;
use ruff_python_parser::parse_expression;
use ruff_text_size::TextSize;
use super::{LocatedCmpOp, locate_cmp_ops};
fn extract_cmp_op_locations(source: &str) -> Result<Vec<LocatedCmpOp>> {
let parsed = parse_expression(source)?;
Ok(locate_cmp_ops(parsed.expr(), parsed.tokens()))
}
#[test]
fn test_locate_cmp_ops() -> Result<()> {
let contents = "x == 1";
assert_eq!(
extract_cmp_op_locations(contents)?,
vec![LocatedCmpOp::new(
TextSize::from(2)..TextSize::from(4),
CmpOp::Eq
)]
);
let contents = "x != 1";
assert_eq!(
extract_cmp_op_locations(contents)?,
vec![LocatedCmpOp::new(
TextSize::from(2)..TextSize::from(4),
CmpOp::NotEq
)]
);
let contents = "x is 1";
assert_eq!(
extract_cmp_op_locations(contents)?,
vec![LocatedCmpOp::new(
TextSize::from(2)..TextSize::from(4),
CmpOp::Is
)]
);
let contents = "x is not 1";
assert_eq!(
extract_cmp_op_locations(contents)?,
vec![LocatedCmpOp::new(
TextSize::from(2)..TextSize::from(8),
CmpOp::IsNot
)]
);
let contents = "x in 1";
assert_eq!(
extract_cmp_op_locations(contents)?,
vec![LocatedCmpOp::new(
TextSize::from(2)..TextSize::from(4),
CmpOp::In
)]
);
let contents = "x not in 1";
assert_eq!(
extract_cmp_op_locations(contents)?,
vec![LocatedCmpOp::new(
TextSize::from(2)..TextSize::from(8),
CmpOp::NotIn
)]
);
let contents = "x != (1 is not 2)";
assert_eq!(
extract_cmp_op_locations(contents)?,
vec![LocatedCmpOp::new(
TextSize::from(2)..TextSize::from(4),
CmpOp::NotEq
)]
);
Ok(())
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/flake8_quotes/settings.rs | crates/ruff_linter/src/rules/flake8_quotes/settings.rs | //! Settings for the `flake8-quotes` plugin.
use serde::{Deserialize, Serialize};
use std::fmt::{Display, Formatter};
use crate::display_settings;
use ruff_macros::CacheKey;
#[derive(Debug, Copy, Clone, PartialEq, Eq, Serialize, Deserialize, CacheKey)]
#[serde(deny_unknown_fields, rename_all = "kebab-case")]
#[cfg_attr(feature = "schemars", derive(schemars::JsonSchema))]
#[derive(Default)]
pub enum Quote {
/// Use double quotes.
#[default]
Double,
/// Use single quotes.
Single,
}
impl From<ruff_python_ast::str::Quote> for Quote {
fn from(value: ruff_python_ast::str::Quote) -> Self {
match value {
ruff_python_ast::str::Quote::Double => Self::Double,
ruff_python_ast::str::Quote::Single => Self::Single,
}
}
}
#[derive(Debug, Clone, CacheKey)]
pub struct Settings {
pub inline_quotes: Quote,
pub multiline_quotes: Quote,
pub docstring_quotes: Quote,
pub avoid_escape: bool,
}
impl Default for Settings {
fn default() -> Self {
Self {
inline_quotes: Quote::default(),
multiline_quotes: Quote::default(),
docstring_quotes: Quote::default(),
avoid_escape: true,
}
}
}
impl Display for Settings {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
display_settings! {
formatter = f,
namespace = "linter.flake8_quotes",
fields = [
self.inline_quotes,
self.multiline_quotes,
self.docstring_quotes,
self.avoid_escape
]
}
Ok(())
}
}
impl Quote {
#[must_use]
pub const fn opposite(self) -> Self {
match self {
Self::Double => Self::Single,
Self::Single => Self::Double,
}
}
/// Get the character used to represent this quote.
pub const fn as_char(self) -> char {
match self {
Self::Double => '"',
Self::Single => '\'',
}
}
}
impl Display for Quote {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
match self {
Self::Double => write!(f, "double"),
Self::Single => write!(f, "single"),
}
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/flake8_quotes/helpers.rs | crates/ruff_linter/src/rules/flake8_quotes/helpers.rs | use ruff_python_ast::{AnyStringFlags, StringFlags};
use ruff_text_size::TextLen;
/// Returns the raw contents of the string given the string's contents and flags.
/// This is a string without the prefix and quotes.
pub(super) fn raw_contents(contents: &str, flags: AnyStringFlags) -> &str {
&contents[flags.opener_len().to_usize()..(contents.text_len() - flags.closer_len()).to_usize()]
}
/// Return `true` if the haystack contains an escaped quote.
pub(super) fn contains_escaped_quote(haystack: &str, quote: char) -> bool {
for index in memchr::memchr_iter(quote as u8, haystack.as_bytes()) {
// If the quote is preceded by an even number of backslashes, it's not escaped.
if haystack.as_bytes()[..index]
.iter()
.rev()
.take_while(|&&c| c == b'\\')
.count()
% 2
!= 0
{
return true;
}
}
false
}
/// Return a modified version of the string with all quote escapes removed.
pub(super) fn unescape_string(haystack: &str, quote: char) -> String {
let mut fixed_contents = String::with_capacity(haystack.len());
let mut chars = haystack.chars().peekable();
let mut backslashes = 0;
while let Some(char) = chars.next() {
if char != '\\' {
fixed_contents.push(char);
backslashes = 0;
continue;
}
// If we're at the end of the line
let Some(next_char) = chars.peek() else {
fixed_contents.push(char);
continue;
};
// Remove quote escape
if *next_char == quote && backslashes % 2 == 0 {
backslashes = 0;
continue;
}
backslashes += 1;
fixed_contents.push(char);
}
fixed_contents
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/flake8_quotes/mod.rs | crates/ruff_linter/src/rules/flake8_quotes/mod.rs | //! Rules from [flake8-quotes](https://pypi.org/project/flake8-quotes/).
mod helpers;
pub(crate) mod rules;
pub mod settings;
#[cfg(test)]
mod tests {
use std::path::Path;
use anyhow::Result;
use test_case::test_case;
use crate::assert_diagnostics;
use crate::registry::Rule;
use crate::settings::LinterSettings;
use crate::test::test_path;
use ruff_python_ast::PythonVersion;
use super::settings::Quote;
#[test_case(Path::new("doubles.py"))]
#[test_case(Path::new("doubles_escaped.py"))]
#[test_case(Path::new("doubles_escaped_unnecessary.py"))]
#[test_case(Path::new("doubles_implicit.py"))]
#[test_case(Path::new("doubles_multiline_string.py"))]
#[test_case(Path::new("doubles_noqa.py"))]
#[test_case(Path::new("doubles_wrapped.py"))]
#[test_case(Path::new("doubles_would_be_triple_quotes.py"))]
fn require_singles(path: &Path) -> Result<()> {
let snapshot = format!("require_singles_over_{}", path.to_string_lossy());
let diagnostics = test_path(
Path::new("flake8_quotes").join(path).as_path(),
&LinterSettings {
flake8_quotes: super::settings::Settings {
inline_quotes: Quote::Single,
multiline_quotes: Quote::Single,
docstring_quotes: Quote::Single,
avoid_escape: true,
},
..LinterSettings::for_rules(vec![
Rule::BadQuotesInlineString,
Rule::BadQuotesMultilineString,
Rule::BadQuotesDocstring,
Rule::AvoidableEscapedQuote,
Rule::UnnecessaryEscapedQuote,
])
},
)?;
assert_diagnostics!(snapshot, diagnostics);
Ok(())
}
#[test]
fn require_singles_over_doubles_escaped_py311() -> Result<()> {
let diagnostics = test_path(
Path::new("flake8_quotes/doubles_escaped.py"),
&LinterSettings {
flake8_quotes: super::settings::Settings {
inline_quotes: Quote::Single,
multiline_quotes: Quote::Single,
docstring_quotes: Quote::Single,
avoid_escape: true,
},
..LinterSettings::for_rule(Rule::AvoidableEscapedQuote)
.with_target_version(PythonVersion::PY311)
},
)?;
assert_diagnostics!(diagnostics);
Ok(())
}
#[test]
fn require_doubles_over_singles_escaped_py311() -> Result<()> {
let diagnostics = test_path(
Path::new("flake8_quotes/singles_escaped.py"),
&LinterSettings {
flake8_quotes: super::settings::Settings {
inline_quotes: Quote::Double,
multiline_quotes: Quote::Double,
docstring_quotes: Quote::Double,
avoid_escape: true,
},
..LinterSettings::for_rule(Rule::AvoidableEscapedQuote)
.with_target_version(PythonVersion::PY311)
},
)?;
assert_diagnostics!(diagnostics);
Ok(())
}
#[test_case(Path::new("singles.py"))]
#[test_case(Path::new("singles_escaped.py"))]
#[test_case(Path::new("singles_escaped_unnecessary.py"))]
#[test_case(Path::new("singles_implicit.py"))]
#[test_case(Path::new("singles_multiline_string.py"))]
#[test_case(Path::new("singles_noqa.py"))]
#[test_case(Path::new("singles_wrapped.py"))]
#[test_case(Path::new("singles_would_be_triple_quotes.py"))]
fn require_doubles(path: &Path) -> Result<()> {
let snapshot = format!("require_doubles_over_{}", path.to_string_lossy());
let diagnostics = test_path(
Path::new("flake8_quotes").join(path).as_path(),
&LinterSettings {
flake8_quotes: super::settings::Settings {
inline_quotes: Quote::Double,
multiline_quotes: Quote::Double,
docstring_quotes: Quote::Double,
avoid_escape: true,
},
..LinterSettings::for_rules(vec![
Rule::BadQuotesInlineString,
Rule::BadQuotesMultilineString,
Rule::BadQuotesDocstring,
Rule::AvoidableEscapedQuote,
Rule::UnnecessaryEscapedQuote,
])
},
)?;
assert_diagnostics!(snapshot, diagnostics);
Ok(())
}
#[test_case(Path::new("docstring_doubles.py"))]
#[test_case(Path::new("docstring_doubles_module_multiline.py"))]
#[test_case(Path::new("docstring_doubles_module_singleline.py"))]
#[test_case(Path::new("docstring_doubles_class.py"))]
#[test_case(Path::new("docstring_doubles_function.py"))]
#[test_case(Path::new("docstring_singles.py"))]
#[test_case(Path::new("docstring_singles_module_multiline.py"))]
#[test_case(Path::new("docstring_singles_module_singleline.py"))]
#[test_case(Path::new("docstring_singles_class.py"))]
#[test_case(Path::new("docstring_singles_function.py"))]
#[test_case(Path::new("docstring_singles_mixed_quotes_module_singleline_var_1.py"))]
#[test_case(Path::new("docstring_singles_mixed_quotes_module_singleline_var_2.py"))]
#[test_case(Path::new("docstring_singles_mixed_quotes_class_var_1.py"))]
#[test_case(Path::new("docstring_singles_mixed_quotes_class_var_2.py"))]
fn require_docstring_doubles(path: &Path) -> Result<()> {
let snapshot = format!("require_docstring_doubles_over_{}", path.to_string_lossy());
let diagnostics = test_path(
Path::new("flake8_quotes").join(path).as_path(),
&LinterSettings {
flake8_quotes: super::settings::Settings {
inline_quotes: Quote::Single,
multiline_quotes: Quote::Single,
docstring_quotes: Quote::Double,
avoid_escape: true,
},
..LinterSettings::for_rules(vec![
Rule::BadQuotesInlineString,
Rule::BadQuotesMultilineString,
Rule::BadQuotesDocstring,
Rule::AvoidableEscapedQuote,
Rule::UnnecessaryEscapedQuote,
])
},
)?;
assert_diagnostics!(snapshot, diagnostics);
Ok(())
}
#[test_case(Path::new("docstring_doubles.py"))]
#[test_case(Path::new("docstring_doubles_module_multiline.py"))]
#[test_case(Path::new("docstring_doubles_module_singleline.py"))]
#[test_case(Path::new("docstring_doubles_class.py"))]
#[test_case(Path::new("docstring_doubles_function.py"))]
#[test_case(Path::new("docstring_singles.py"))]
#[test_case(Path::new("docstring_singles_module_multiline.py"))]
#[test_case(Path::new("docstring_singles_module_singleline.py"))]
#[test_case(Path::new("docstring_singles_class.py"))]
#[test_case(Path::new("docstring_singles_function.py"))]
#[test_case(Path::new("docstring_doubles_mixed_quotes_module_singleline_var_1.py"))]
#[test_case(Path::new("docstring_doubles_mixed_quotes_module_singleline_var_2.py"))]
#[test_case(Path::new("docstring_doubles_mixed_quotes_class_var_1.py"))]
#[test_case(Path::new("docstring_doubles_mixed_quotes_class_var_2.py"))]
fn require_docstring_singles(path: &Path) -> Result<()> {
let snapshot = format!("require_docstring_singles_over_{}", path.to_string_lossy());
let diagnostics = test_path(
Path::new("flake8_quotes").join(path).as_path(),
&LinterSettings {
flake8_quotes: super::settings::Settings {
inline_quotes: Quote::Single,
multiline_quotes: Quote::Double,
docstring_quotes: Quote::Single,
avoid_escape: true,
},
..LinterSettings::for_rules(vec![
Rule::BadQuotesInlineString,
Rule::BadQuotesMultilineString,
Rule::BadQuotesDocstring,
Rule::AvoidableEscapedQuote,
Rule::UnnecessaryEscapedQuote,
])
},
)?;
assert_diagnostics!(snapshot, diagnostics);
Ok(())
}
#[test_case(Path::new("doubles_all.py"))]
fn only_inline(path: &Path) -> Result<()> {
let snapshot = format!("only_inline_{}", path.to_string_lossy());
let diagnostics = test_path(
Path::new("flake8_quotes").join(path).as_path(),
&LinterSettings {
flake8_quotes: super::settings::Settings {
inline_quotes: Quote::Single,
multiline_quotes: Quote::Single,
docstring_quotes: Quote::Single,
avoid_escape: true,
},
..LinterSettings::for_rules(vec![Rule::BadQuotesInlineString])
},
)?;
assert_diagnostics!(snapshot, diagnostics);
Ok(())
}
#[test_case(Path::new("doubles_all.py"))]
fn only_multiline(path: &Path) -> Result<()> {
let snapshot = format!("only_multiline_{}", path.to_string_lossy());
let diagnostics = test_path(
Path::new("flake8_quotes").join(path).as_path(),
&LinterSettings {
flake8_quotes: super::settings::Settings {
inline_quotes: Quote::Single,
multiline_quotes: Quote::Single,
docstring_quotes: Quote::Single,
avoid_escape: true,
},
..LinterSettings::for_rules(vec![Rule::BadQuotesMultilineString])
},
)?;
assert_diagnostics!(snapshot, diagnostics);
Ok(())
}
#[test_case(Path::new("doubles_all.py"))]
fn only_docstring(path: &Path) -> Result<()> {
let snapshot = format!("only_docstring_{}", path.to_string_lossy());
let diagnostics = test_path(
Path::new("flake8_quotes").join(path).as_path(),
&LinterSettings {
flake8_quotes: super::settings::Settings {
inline_quotes: Quote::Single,
multiline_quotes: Quote::Single,
docstring_quotes: Quote::Single,
avoid_escape: true,
},
..LinterSettings::for_rules(vec![Rule::BadQuotesDocstring])
},
)?;
assert_diagnostics!(snapshot, diagnostics);
Ok(())
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/flake8_quotes/rules/check_string_quotes.rs | crates/ruff_linter/src/rules/flake8_quotes/rules/check_string_quotes.rs | use ruff_macros::{ViolationMetadata, derive_message_formats};
use ruff_python_ast::StringLike;
use ruff_text_size::{Ranged, TextRange};
use crate::Locator;
use crate::checkers::ast::Checker;
use crate::registry::Rule;
use crate::{AlwaysFixableViolation, Edit, Fix, FixAvailability, Violation};
use crate::rules::flake8_quotes::settings::Quote;
/// ## What it does
/// Checks for inline strings that use single quotes or double quotes,
/// depending on the value of the [`lint.flake8-quotes.inline-quotes`] option.
///
/// ## Why is this bad?
/// Consistency is good. Use either single or double quotes for inline
/// strings, but be consistent.
///
/// ## Example
/// ```python
/// foo = 'bar'
/// ```
///
/// Assuming `inline-quotes` is set to `double`, use instead:
/// ```python
/// foo = "bar"
/// ```
///
/// ## Options
/// - `lint.flake8-quotes.inline-quotes`
///
/// ## Formatter compatibility
/// We recommend against using this rule alongside the [formatter]. The
/// formatter enforces consistent quotes for inline strings, making the rule
/// redundant.
///
/// [formatter]: https://docs.astral.sh/ruff/formatter
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "v0.0.88")]
pub(crate) struct BadQuotesInlineString {
preferred_quote: Quote,
}
impl Violation for BadQuotesInlineString {
const FIX_AVAILABILITY: FixAvailability = FixAvailability::Sometimes;
#[derive_message_formats]
fn message(&self) -> String {
match self.preferred_quote {
Quote::Double => "Single quotes found but double quotes preferred".to_string(),
Quote::Single => "Double quotes found but single quotes preferred".to_string(),
}
}
fn fix_title(&self) -> Option<String> {
let title = match self.preferred_quote {
Quote::Double => "Replace single quotes with double quotes",
Quote::Single => "Replace double quotes with single quotes",
};
Some(title.to_string())
}
}
/// ## What it does
/// Checks for multiline strings that use single quotes or double quotes,
/// depending on the value of the [`lint.flake8-quotes.multiline-quotes`]
/// setting.
///
/// ## Why is this bad?
/// Consistency is good. Use either single or double quotes for multiline
/// strings, but be consistent.
///
/// ## Example
/// ```python
/// foo = '''
/// bar
/// '''
/// ```
///
/// Assuming `multiline-quotes` is set to `double`, use instead:
/// ```python
/// foo = """
/// bar
/// """
/// ```
///
/// ## Options
/// - `lint.flake8-quotes.multiline-quotes`
///
/// ## Formatter compatibility
/// We recommend against using this rule alongside the [formatter]. The
/// formatter enforces double quotes for multiline strings, making the rule
/// redundant.
///
/// [formatter]: https://docs.astral.sh/ruff/formatter
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "v0.0.88")]
pub(crate) struct BadQuotesMultilineString {
preferred_quote: Quote,
}
impl AlwaysFixableViolation for BadQuotesMultilineString {
#[derive_message_formats]
fn message(&self) -> String {
let BadQuotesMultilineString { preferred_quote } = self;
match preferred_quote {
Quote::Double => "Single quote multiline found but double quotes preferred".to_string(),
Quote::Single => "Double quote multiline found but single quotes preferred".to_string(),
}
}
fn fix_title(&self) -> String {
let BadQuotesMultilineString { preferred_quote } = self;
match preferred_quote {
Quote::Double => "Replace single multiline quotes with double quotes".to_string(),
Quote::Single => "Replace double multiline quotes with single quotes".to_string(),
}
}
}
/// ## What it does
/// Checks for docstrings that use single quotes or double quotes, depending
/// on the value of the [`lint.flake8-quotes.docstring-quotes`] setting.
///
/// ## Why is this bad?
/// Consistency is good. Use either single or double quotes for docstring
/// strings, but be consistent.
///
/// ## Example
/// ```python
/// '''
/// bar
/// '''
/// ```
///
/// Assuming `docstring-quotes` is set to `double`, use instead:
/// ```python
/// """
/// bar
/// """
/// ```
///
/// ## Options
/// - `lint.flake8-quotes.docstring-quotes`
///
/// ## Formatter compatibility
/// We recommend against using this rule alongside the [formatter]. The
/// formatter enforces double quotes for docstrings, making the rule
/// redundant.
///
/// [formatter]: https://docs.astral.sh/ruff/formatter
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "v0.0.88")]
pub(crate) struct BadQuotesDocstring {
preferred_quote: Quote,
}
impl Violation for BadQuotesDocstring {
const FIX_AVAILABILITY: FixAvailability = FixAvailability::Sometimes;
#[derive_message_formats]
fn message(&self) -> String {
match self.preferred_quote {
Quote::Double => "Single quote docstring found but double quotes preferred".to_string(),
Quote::Single => "Double quote docstring found but single quotes preferred".to_string(),
}
}
fn fix_title(&self) -> Option<String> {
match self.preferred_quote {
Quote::Double => Some("Replace single quotes docstring with double quotes".to_string()),
Quote::Single => Some("Replace double quotes docstring with single quotes".to_string()),
}
}
}
const fn good_multiline(quote: Quote) -> &'static str {
match quote {
Quote::Double => "\"\"\"",
Quote::Single => "'''",
}
}
const fn good_multiline_ending(quote: Quote) -> &'static str {
match quote {
Quote::Double => "\"'''",
Quote::Single => "'\"\"\"",
}
}
const fn good_docstring(quote: Quote) -> char {
match quote {
Quote::Double => '"',
Quote::Single => '\'',
}
}
#[derive(Debug)]
struct Trivia<'a> {
last_quote_char: char,
prefix: &'a str,
raw_text: &'a str,
is_multiline: bool,
}
impl Trivia<'_> {
fn has_empty_text(&self) -> bool {
self.raw_text == "\"\"" || self.raw_text == "''"
}
}
impl<'a> From<&'a str> for Trivia<'a> {
fn from(value: &'a str) -> Self {
// Remove any prefixes (e.g., remove `u` from `u"foo"`).
let last_quote_char = value.chars().last().unwrap();
let first_quote_char = value.find(last_quote_char).unwrap();
let prefix = &value[..first_quote_char];
let raw_text = &value[first_quote_char..];
// Determine if the string is multiline-based.
let is_multiline = if raw_text.len() >= 3 {
let mut chars = raw_text.chars();
let first = chars.next().unwrap();
let second = chars.next().unwrap();
let third = chars.next().unwrap();
first == second && second == third
} else {
false
};
Self {
last_quote_char,
prefix,
raw_text,
is_multiline,
}
}
}
/// Returns `true` if the [`TextRange`] is preceded by two consecutive quotes.
fn text_starts_at_consecutive_quote(locator: &Locator, range: TextRange, quote: Quote) -> bool {
let mut previous_two_chars = locator.up_to(range.start()).chars().rev();
previous_two_chars.next() == Some(good_docstring(quote))
&& previous_two_chars.next() == Some(good_docstring(quote))
}
/// Returns `true` if the [`TextRange`] ends at a quote character.
fn text_ends_at_quote(locator: &Locator, range: TextRange, quote: Quote) -> bool {
locator
.after(range.end())
.starts_with(good_docstring(quote))
}
/// Q002
fn docstring(checker: &Checker, range: TextRange) {
let quotes_settings = &checker.settings().flake8_quotes;
let locator = checker.locator();
let text = locator.slice(range);
let trivia: Trivia = text.into();
if trivia.has_empty_text()
&& text_ends_at_quote(locator, range, quotes_settings.docstring_quotes)
{
// Fixing this would result in a one-sided multi-line docstring, which would
// introduce a syntax error.
checker.report_diagnostic(
BadQuotesDocstring {
preferred_quote: quotes_settings.docstring_quotes,
},
range,
);
return;
}
if trivia
.raw_text
.contains(good_docstring(quotes_settings.docstring_quotes))
{
return;
}
let mut diagnostic = checker.report_diagnostic(
BadQuotesDocstring {
preferred_quote: quotes_settings.docstring_quotes,
},
range,
);
let quote_count = if trivia.is_multiline { 3 } else { 1 };
let string_contents = &trivia.raw_text[quote_count..trivia.raw_text.len() - quote_count];
let quote = good_docstring(quotes_settings.docstring_quotes)
.to_string()
.repeat(quote_count);
let mut fixed_contents =
String::with_capacity(trivia.prefix.len() + string_contents.len() + quote.len() * 2);
fixed_contents.push_str(trivia.prefix);
fixed_contents.push_str("e);
fixed_contents.push_str(string_contents);
fixed_contents.push_str("e);
diagnostic.set_fix(Fix::safe_edit(Edit::range_replacement(
fixed_contents,
range,
)));
}
/// Q000, Q001
fn strings(checker: &Checker, sequence: &[TextRange]) {
let quotes_settings = &checker.settings().flake8_quotes;
let locator = checker.locator();
let trivia = sequence
.iter()
.map(|range| {
let text = locator.slice(*range);
let trivia: Trivia = text.into();
trivia
})
.collect::<Vec<_>>();
// Return `true` if any of the strings are inline strings that contain the quote
// character in the body.
let relax_quote = trivia.iter().any(|trivia| {
if trivia.is_multiline {
return false;
}
if trivia.last_quote_char == quotes_settings.inline_quotes.as_char() {
return false;
}
let string_contents = &trivia.raw_text[1..trivia.raw_text.len() - 1];
string_contents.contains(quotes_settings.inline_quotes.as_char())
});
for (range, trivia) in sequence.iter().zip(trivia) {
if trivia.is_multiline {
// If multiline strings aren't enforced, ignore it.
if !checker.is_rule_enabled(Rule::BadQuotesMultilineString) {
continue;
}
// If our string is or contains a known good string, ignore it.
if trivia
.raw_text
.contains(good_multiline(quotes_settings.multiline_quotes))
{
continue;
}
// If our string ends with a known good ending, then ignore it.
if trivia
.raw_text
.ends_with(good_multiline_ending(quotes_settings.multiline_quotes))
{
continue;
}
let mut diagnostic = checker.report_diagnostic(
BadQuotesMultilineString {
preferred_quote: quotes_settings.multiline_quotes,
},
*range,
);
let string_contents = &trivia.raw_text[3..trivia.raw_text.len() - 3];
let quote = good_multiline(quotes_settings.multiline_quotes);
let mut fixed_contents = String::with_capacity(
trivia.prefix.len() + string_contents.len() + quote.len() * 2,
);
fixed_contents.push_str(trivia.prefix);
fixed_contents.push_str(quote);
fixed_contents.push_str(string_contents);
fixed_contents.push_str(quote);
diagnostic.set_fix(Fix::safe_edit(Edit::range_replacement(
fixed_contents,
*range,
)));
} else if trivia.last_quote_char != quotes_settings.inline_quotes.as_char()
// If we're not using the preferred type, only allow use to avoid escapes.
&& !relax_quote
{
// If inline strings aren't enforced, ignore it.
if !checker.is_rule_enabled(Rule::BadQuotesInlineString) {
continue;
}
if trivia.has_empty_text()
&& text_ends_at_quote(locator, *range, quotes_settings.inline_quotes)
{
// Fixing this would introduce a syntax error. For example, changing the initial
// single quotes to double quotes would result in a syntax error:
// ```python
// ''"assert" ' SAM macro definitions '''
// ```
checker.report_diagnostic(
BadQuotesInlineString {
preferred_quote: quotes_settings.inline_quotes,
},
*range,
);
continue;
}
if text_starts_at_consecutive_quote(locator, *range, quotes_settings.inline_quotes) {
// Fixing this would introduce a syntax error. For example, changing the double
// doubles to single quotes would result in a syntax error:
// ```python
// ''"assert" ' SAM macro definitions '''
// ```
checker.report_diagnostic(
BadQuotesInlineString {
preferred_quote: quotes_settings.inline_quotes,
},
*range,
);
continue;
}
let mut diagnostic = checker.report_diagnostic(
BadQuotesInlineString {
preferred_quote: quotes_settings.inline_quotes,
},
*range,
);
let quote = quotes_settings.inline_quotes.as_char();
let string_contents = &trivia.raw_text[1..trivia.raw_text.len() - 1];
let mut fixed_contents =
String::with_capacity(trivia.prefix.len() + string_contents.len() + 2);
fixed_contents.push_str(trivia.prefix);
fixed_contents.push(quote);
fixed_contents.push_str(string_contents);
fixed_contents.push(quote);
diagnostic.set_fix(Fix::safe_edit(Edit::range_replacement(
fixed_contents,
*range,
)));
}
}
}
/// Generate `flake8-quote` diagnostics from a token stream.
pub(crate) fn check_string_quotes(checker: &Checker, string_like: StringLike) {
// Ignore if the string is part of a forward reference. For example,
// `x: "Literal['foo', 'bar']"`.
if checker.semantic().in_string_type_definition() {
return;
}
// TODO(dhruvmanila): Support checking for escaped quotes in f-strings.
if checker
.semantic()
.in_interpolated_string_replacement_field()
{
return;
}
let ranges: Vec<_> = string_like.parts().map(|part| part.range()).collect();
if checker.semantic().in_pep_257_docstring() {
if checker.is_rule_enabled(Rule::BadQuotesDocstring) {
for range in ranges {
docstring(checker, range);
}
}
} else {
if checker.any_rule_enabled(&[Rule::BadQuotesInlineString, Rule::BadQuotesMultilineString])
{
strings(checker, &ranges);
}
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/flake8_quotes/rules/avoidable_escaped_quote.rs | crates/ruff_linter/src/rules/flake8_quotes/rules/avoidable_escaped_quote.rs | use flake8_quotes::helpers::{contains_escaped_quote, raw_contents, unescape_string};
use flake8_quotes::settings::Quote;
use ruff_macros::{ViolationMetadata, derive_message_formats};
use ruff_python_ast::visitor::{Visitor, walk_f_string, walk_t_string};
use ruff_python_ast::{self as ast, AnyStringFlags, PythonVersion, StringFlags, StringLike};
use ruff_text_size::{Ranged, TextRange, TextSize};
use crate::checkers::ast::Checker;
use crate::rules::flake8_quotes;
use crate::{AlwaysFixableViolation, Edit, Fix};
/// ## What it does
/// Checks for strings that include escaped quotes, and suggests changing
/// the quote style to avoid the need to escape them.
///
/// ## Why is this bad?
/// It's preferable to avoid escaped quotes in strings. By changing the
/// outer quote style, you can avoid escaping inner quotes.
///
/// ## Example
/// ```python
/// foo = "bar\"s"
/// ```
///
/// Use instead:
/// ```python
/// foo = 'bar"s'
/// ```
///
/// ## Formatter compatibility
/// We recommend against using this rule alongside the [formatter]. The
/// formatter automatically removes unnecessary escapes, making the rule
/// redundant.
///
/// ## Options
///
/// - `lint.flake8-quotes.inline-quotes`
///
/// [formatter]: https://docs.astral.sh/ruff/formatter
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "v0.0.88")]
pub(crate) struct AvoidableEscapedQuote;
impl AlwaysFixableViolation for AvoidableEscapedQuote {
#[derive_message_formats]
fn message(&self) -> String {
"Change outer quotes to avoid escaping inner quotes".to_string()
}
fn fix_title(&self) -> String {
"Change outer quotes to avoid escaping inner quotes".to_string()
}
}
/// Q003
pub(crate) fn avoidable_escaped_quote(checker: &Checker, string_like: StringLike) {
if checker.semantic().in_pep_257_docstring()
|| checker.semantic().in_string_type_definition()
// This rule has support for strings nested inside another f-strings but they're checked
// via the outermost f-string. This means that we shouldn't be checking any nested string
// or f-string.
|| checker.semantic().in_interpolated_string_replacement_field()
{
return;
}
let mut rule_checker = AvoidableEscapedQuoteChecker::new(checker, checker.target_version());
for part in string_like.parts() {
match part {
ast::StringLikePart::String(string_literal) => {
rule_checker.visit_string_literal(string_literal);
}
ast::StringLikePart::Bytes(bytes_literal) => {
rule_checker.visit_bytes_literal(bytes_literal);
}
ast::StringLikePart::FString(f_string) => rule_checker.visit_f_string(f_string),
ast::StringLikePart::TString(t_string) => rule_checker.visit_t_string(t_string),
}
}
}
/// Checks for `Q003` violations using the [`Visitor`] implementation.
struct AvoidableEscapedQuoteChecker<'a, 'b> {
checker: &'a Checker<'b>,
quotes_settings: &'a flake8_quotes::settings::Settings,
supports_pep701: bool,
}
impl<'a, 'b> AvoidableEscapedQuoteChecker<'a, 'b> {
fn new(checker: &'a Checker<'b>, target_version: PythonVersion) -> Self {
Self {
checker,
quotes_settings: &checker.settings().flake8_quotes,
supports_pep701: target_version.supports_pep_701(),
}
}
}
impl Visitor<'_> for AvoidableEscapedQuoteChecker<'_, '_> {
fn visit_string_literal(&mut self, string_literal: &ast::StringLiteral) {
check_string_or_bytes(
self.checker,
self.quotes_settings,
string_literal.range(),
AnyStringFlags::from(string_literal.flags),
);
}
fn visit_bytes_literal(&mut self, bytes_literal: &ast::BytesLiteral) {
check_string_or_bytes(
self.checker,
self.quotes_settings,
bytes_literal.range(),
AnyStringFlags::from(bytes_literal.flags),
);
}
fn visit_f_string(&mut self, f_string: &'_ ast::FString) {
// If the target version doesn't support PEP 701, skip this entire f-string if it contains
// any string literal in any of the expression element. For example:
//
// ```python
// f"\"foo\" {'nested'}"
// ```
//
// If we try to fix the above example, the outer and inner quote will be the same which is
// invalid for any Python version before 3.12:
//
// ```python
// f'"foo" {'nested'}"
// ```
//
// Note that this check needs to be done globally to ignore the entire f-string. It is
// implicitly global in that we avoid recursing into this f-string if this is the case.
if !self.supports_pep701 {
let contains_any_string = {
let mut visitor = ContainsAnyString::default();
// We need to use the `walk_f_string` instead of `visit_f_string` to avoid
// considering the top level f-string.
walk_f_string(&mut visitor, f_string);
visitor.result
};
if contains_any_string {
return;
}
}
let opposite_quote_char = self.quotes_settings.inline_quotes.opposite().as_char();
// If any literal part of this f-string contains the quote character which is opposite to
// the configured inline quotes, we can't change the quote style for this f-string. For
// example:
//
// ```py
// f"\"hello\" {x} 'world'"
// ```
//
// If we try to fix the above example, the f-string will end in the middle and "world" will
// be considered as a variable which is outside this f-string:
//
// ```py
// f'"hello" {x} 'world''
// # ^
// # f-string ends here now
// ```
//
// The check is local to this f-string and it shouldn't check for any literal parts of any
// nested f-string. This is correct because by this point, we know that the target version
// is 3.12 or that this f-string doesn't have any strings nested in it. For example:
//
// ```py
// f'\'normal\' {f'\'nested\' {x} "double quotes"'} normal'
// ```
//
// This contains a nested f-string but if we reached here that means the target version
// supports PEP 701. The double quotes in the nested f-string shouldn't affect the outer
// f-string because the following is valid for Python version 3.12 and later:
//
// ```py
// f"'normal' {f'\'nested\' {x} "double quotes"'} normal"
// ```
if !f_string
.elements
.literals()
.any(|literal| contains_quote(literal, opposite_quote_char))
{
check_interpolated_string(
self.checker,
self.quotes_settings,
AnyStringFlags::from(f_string.flags),
&f_string.elements,
f_string.range,
);
}
walk_f_string(self, f_string);
}
fn visit_t_string(&mut self, t_string: &'_ ast::TString) {
let opposite_quote_char = self.quotes_settings.inline_quotes.opposite().as_char();
// If any literal part of this t-string contains the quote character which is opposite to
// the configured inline quotes, we can't change the quote style for this t-string. For
// example:
//
// ```py
// t"\"hello\" {x} 'world'"
// ```
//
// If we try to fix the above example, the t-string will end in the middle and "world" will
// be considered as a variable which is outside this t-string:
//
// ```py
// t'"hello" {x} 'world''
// # ^
// # t-string ends here now
// ```
//
// The check is local to this t-string and it shouldn't check for any literal parts of any
// nested t-string.
if !t_string
.elements
.literals()
.any(|literal| contains_quote(literal, opposite_quote_char))
{
check_interpolated_string(
self.checker,
self.quotes_settings,
AnyStringFlags::from(t_string.flags),
&t_string.elements,
t_string.range,
);
}
walk_t_string(self, t_string);
}
}
/// Checks for unnecessary escaped quotes in a string or bytes literal.
///
/// # Panics
///
/// If the string kind is an f-string or a t-string.
fn check_string_or_bytes(
checker: &Checker,
quotes_settings: &flake8_quotes::settings::Settings,
range: TextRange,
flags: AnyStringFlags,
) {
assert!(!flags.is_interpolated_string());
let locator = checker.locator();
if flags.is_triple_quoted() || flags.is_raw_string() {
return;
}
// Check if we're using the preferred quotation style.
if Quote::from(flags.quote_style()) != quotes_settings.inline_quotes {
return;
}
let contents = raw_contents(locator.slice(range), flags);
if !contains_escaped_quote(contents, quotes_settings.inline_quotes.as_char())
|| contains_quote(contents, quotes_settings.inline_quotes.opposite().as_char())
{
return;
}
let mut diagnostic = checker.report_diagnostic(AvoidableEscapedQuote, range);
let fixed_contents = format!(
"{prefix}{quote}{value}{quote}",
prefix = flags.prefix(),
quote = quotes_settings.inline_quotes.opposite().as_char(),
value = unescape_string(contents, quotes_settings.inline_quotes.as_char())
);
diagnostic.set_fix(Fix::safe_edit(Edit::range_replacement(
fixed_contents,
range,
)));
}
/// Checks for unnecessary escaped quotes in an f-string or t-string.
fn check_interpolated_string(
checker: &Checker,
quotes_settings: &flake8_quotes::settings::Settings,
flags: ast::AnyStringFlags,
elements: &ast::InterpolatedStringElements,
range: TextRange,
) {
if flags.is_triple_quoted() || flags.prefix().is_raw() {
return;
}
// Check if we're using the preferred quotation style.
if Quote::from(flags.quote_style()) != quotes_settings.inline_quotes {
return;
}
let quote_char = quotes_settings.inline_quotes.as_char();
let opposite_quote_char = quotes_settings.inline_quotes.opposite().as_char();
let mut edits = vec![];
for literal in elements.literals() {
let content = checker.locator().slice(literal);
if !contains_escaped_quote(content, quote_char) {
continue;
}
edits.push(Edit::range_replacement(
unescape_string(content, quote_char),
literal.range(),
));
}
if edits.is_empty() {
return;
}
// Replacement for the f/t-string opening quote. We don't perform the check for raw and
// triple-quoted f-strings, so no need to account for them.
let start_edit = Edit::range_replacement(
format!("{}{opposite_quote_char}", flags.prefix()),
TextRange::at(
range.start(),
// Prefix + quote char
TextSize::new(2),
),
);
// Replacement for the f/t-string ending quote. We don't perform the check for triple-quoted
// f-string, so no need to account for them.
edits.push(Edit::range_replacement(
opposite_quote_char.to_string(),
TextRange::at(
// Offset would either be the end offset of the start edit in case there are no
// elements in the f/t-string (e.g., `f""`) or the end offset of the last f/t-string
// element (e.g., `f"hello"`).
elements
.last()
.map_or_else(|| start_edit.end(), Ranged::end),
// Quote char
TextSize::new(1),
),
));
checker
.report_diagnostic(AvoidableEscapedQuote, range)
.set_fix(Fix::safe_edits(start_edit, edits));
}
#[derive(Debug, Default)]
struct ContainsAnyString {
result: bool,
}
impl Visitor<'_> for ContainsAnyString {
fn visit_string_literal(&mut self, _: &'_ ast::StringLiteral) {
self.result = true;
}
fn visit_bytes_literal(&mut self, _: &'_ ast::BytesLiteral) {
self.result = true;
}
fn visit_f_string(&mut self, _: &'_ ast::FString) {
self.result = true;
// We don't need to recurse into this f-string now that we already know the result.
}
fn visit_t_string(&mut self, _: &'_ ast::TString) {
self.result = true;
// We don't need to recurse into this t-string now that we already know the result.
}
}
/// Return `true` if the haystack contains the quote.
fn contains_quote(haystack: &str, quote: char) -> bool {
memchr::memchr(quote as u8, haystack.as_bytes()).is_some()
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/flake8_quotes/rules/unnecessary_escaped_quote.rs | crates/ruff_linter/src/rules/flake8_quotes/rules/unnecessary_escaped_quote.rs | use ruff_macros::{ViolationMetadata, derive_message_formats};
use ruff_python_ast::{
self as ast, AnyStringFlags, InterpolatedStringElements, StringFlags, StringLike,
};
use ruff_text_size::{Ranged, TextRange};
use crate::checkers::ast::Checker;
use crate::{AlwaysFixableViolation, Edit, Fix};
use crate::rules::flake8_quotes::helpers::{contains_escaped_quote, raw_contents, unescape_string};
/// ## What it does
/// Checks for strings that include unnecessarily escaped quotes.
///
/// ## Why is this bad?
/// If a string contains an escaped quote that doesn't match the quote
/// character used for the string, it's unnecessary and can be removed.
///
/// ## Example
/// ```python
/// foo = "bar\'s"
/// ```
///
/// Use instead:
/// ```python
/// foo = "bar's"
/// ```
///
/// ## Formatter compatibility
/// We recommend against using this rule alongside the [formatter]. The
/// formatter automatically removes unnecessary escapes, making the rule
/// redundant.
///
/// [formatter]: https://docs.astral.sh/ruff/formatter
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "v0.2.0")]
pub(crate) struct UnnecessaryEscapedQuote;
impl AlwaysFixableViolation for UnnecessaryEscapedQuote {
#[derive_message_formats]
fn message(&self) -> String {
"Unnecessary escape on inner quote character".to_string()
}
fn fix_title(&self) -> String {
"Remove backslash".to_string()
}
}
/// Q004
pub(crate) fn unnecessary_escaped_quote(checker: &Checker, string_like: StringLike) {
if checker.semantic().in_pep_257_docstring() {
return;
}
for part in string_like.parts() {
match part {
ast::StringLikePart::String(string_literal) => check_string_or_bytes(
checker,
string_literal.range(),
AnyStringFlags::from(string_literal.flags),
),
ast::StringLikePart::Bytes(bytes_literal) => check_string_or_bytes(
checker,
bytes_literal.range(),
AnyStringFlags::from(bytes_literal.flags),
),
ast::StringLikePart::FString(ast::FString {
elements,
range,
node_index: _,
flags,
}) => {
check_interpolated_string(checker, AnyStringFlags::from(*flags), *range, elements);
}
ast::StringLikePart::TString(ast::TString {
elements,
range,
node_index: _,
flags,
}) => {
check_interpolated_string(checker, AnyStringFlags::from(*flags), *range, elements);
}
}
}
}
/// Checks for unnecessary escaped quotes in a string or bytes literal.
///
/// # Panics
///
/// If the string kind is an f-string.
fn check_string_or_bytes(checker: &Checker, range: TextRange, flags: AnyStringFlags) {
assert!(!flags.is_interpolated_string());
if flags.is_triple_quoted() || flags.is_raw_string() {
return;
}
let contents = raw_contents(checker.locator().slice(range), flags);
let quote = flags.quote_style();
let opposite_quote_char = quote.opposite().as_char();
if !contains_escaped_quote(contents, opposite_quote_char) {
return;
}
let mut diagnostic = checker.report_diagnostic(UnnecessaryEscapedQuote, range);
diagnostic.set_fix(Fix::safe_edit(Edit::range_replacement(
flags
.display_contents(&unescape_string(contents, opposite_quote_char))
.to_string(),
range,
)));
}
/// Checks for unnecessary escaped quotes in an f-string or t-string.
fn check_interpolated_string(
checker: &Checker,
flags: AnyStringFlags,
range: TextRange,
elements: &InterpolatedStringElements,
) {
if flags.is_triple_quoted() || flags.prefix().is_raw() {
return;
}
let opposite_quote_char = flags.quote_style().opposite().as_char();
let mut edits = vec![];
for literal in elements.literals() {
let content = checker.locator().slice(literal);
if !contains_escaped_quote(content, opposite_quote_char) {
continue;
}
edits.push(Edit::range_replacement(
unescape_string(content, opposite_quote_char),
literal.range(),
));
}
let mut edits_iter = edits.into_iter();
let Some(first) = edits_iter.next() else {
return;
};
let mut diagnostic = checker.report_diagnostic(UnnecessaryEscapedQuote, range);
diagnostic.set_fix(Fix::safe_edits(first, edits_iter));
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/flake8_quotes/rules/mod.rs | crates/ruff_linter/src/rules/flake8_quotes/rules/mod.rs | pub(crate) use avoidable_escaped_quote::*;
pub(crate) use check_string_quotes::*;
pub(crate) use unnecessary_escaped_quote::*;
mod avoidable_escaped_quote;
mod check_string_quotes;
mod unnecessary_escaped_quote;
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/flake8_bandit/settings.rs | crates/ruff_linter/src/rules/flake8_bandit/settings.rs | //! Settings for the `flake8-bandit` plugin.
use crate::display_settings;
use ruff_macros::CacheKey;
use std::fmt::{Display, Formatter};
pub fn default_tmp_dirs() -> Vec<String> {
["/tmp", "/var/tmp", "/dev/shm"]
.map(ToString::to_string)
.to_vec()
}
#[derive(Debug, Clone, CacheKey)]
pub struct Settings {
pub hardcoded_tmp_directory: Vec<String>,
pub check_typed_exception: bool,
pub extend_markup_names: Vec<String>,
pub allowed_markup_calls: Vec<String>,
}
impl Default for Settings {
fn default() -> Self {
Self {
hardcoded_tmp_directory: default_tmp_dirs(),
check_typed_exception: false,
extend_markup_names: vec![],
allowed_markup_calls: vec![],
}
}
}
impl Display for Settings {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
display_settings! {
formatter = f,
namespace = "linter.flake8_bandit",
fields = [
self.hardcoded_tmp_directory | array,
self.check_typed_exception,
self.extend_markup_names | array,
self.allowed_markup_calls | array,
]
}
Ok(())
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/flake8_bandit/helpers.rs | crates/ruff_linter/src/rules/flake8_bandit/helpers.rs | use regex::Regex;
use ruff_python_ast::{self as ast, Expr};
use std::sync::LazyLock;
use ruff_python_semantic::SemanticModel;
static PASSWORD_CANDIDATE_REGEX: LazyLock<Regex> = LazyLock::new(|| {
Regex::new(r"(^|_)(?i)(pas+wo?r?d|pass(phrase)?|pwd|token|secrete?)($|_)").unwrap()
});
pub(super) fn string_literal(expr: &Expr) -> Option<&str> {
match expr {
Expr::StringLiteral(ast::ExprStringLiteral { value, .. }) => Some(value.to_str()),
_ => None,
}
}
pub(super) fn matches_password_name(string: &str) -> bool {
PASSWORD_CANDIDATE_REGEX.is_match(string)
}
pub(super) fn is_untyped_exception(type_: Option<&Expr>, semantic: &SemanticModel) -> bool {
type_.is_none_or(|type_| {
if let Expr::Tuple(ast::ExprTuple { elts, .. }) = &type_ {
elts.iter().any(|type_| {
semantic
.resolve_builtin_symbol(type_)
.is_some_and(|builtin| matches!(builtin, "Exception" | "BaseException"))
})
} else {
semantic
.resolve_builtin_symbol(type_)
.is_some_and(|builtin| matches!(builtin, "Exception" | "BaseException"))
}
})
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/flake8_bandit/mod.rs | crates/ruff_linter/src/rules/flake8_bandit/mod.rs | //! Rules from [flake8-bandit](https://pypi.org/project/flake8-bandit/).
mod helpers;
pub(crate) mod rules;
pub mod settings;
#[cfg(test)]
mod tests {
use std::path::Path;
use anyhow::Result;
use test_case::test_case;
use crate::registry::Rule;
use crate::settings::LinterSettings;
use crate::settings::types::PreviewMode;
use crate::test::test_path;
use crate::{assert_diagnostics, assert_diagnostics_diff};
#[test_case(Rule::Assert, Path::new("S101.py"))]
#[test_case(Rule::BadFilePermissions, Path::new("S103.py"))]
#[test_case(Rule::CallWithShellEqualsTrue, Path::new("S604.py"))]
#[test_case(Rule::ExecBuiltin, Path::new("S102.py"))]
#[test_case(Rule::FlaskDebugTrue, Path::new("S201.py"))]
#[test_case(Rule::HardcodedBindAllInterfaces, Path::new("S104.py"))]
#[test_case(Rule::HardcodedPasswordDefault, Path::new("S107.py"))]
#[test_case(Rule::HardcodedPasswordFuncArg, Path::new("S106.py"))]
#[test_case(Rule::HardcodedPasswordString, Path::new("S105.py"))]
#[test_case(Rule::HardcodedSQLExpression, Path::new("S608.py"))]
#[test_case(Rule::HardcodedTempFile, Path::new("S108.py"))]
#[test_case(Rule::HashlibInsecureHashFunction, Path::new("S324.py"))]
#[test_case(Rule::Jinja2AutoescapeFalse, Path::new("S701.py"))]
#[test_case(Rule::MakoTemplates, Path::new("S702.py"))]
#[test_case(Rule::LoggingConfigInsecureListen, Path::new("S612.py"))]
#[test_case(Rule::ParamikoCall, Path::new("S601.py"))]
#[test_case(Rule::RequestWithNoCertValidation, Path::new("S501.py"))]
#[test_case(Rule::RequestWithoutTimeout, Path::new("S113.py"))]
#[test_case(Rule::SSHNoHostKeyVerification, Path::new("S507.py"))]
#[test_case(Rule::SnmpInsecureVersion, Path::new("S508.py"))]
#[test_case(Rule::SnmpWeakCryptography, Path::new("S509.py"))]
#[test_case(Rule::SslInsecureVersion, Path::new("S502.py"))]
#[test_case(Rule::SslWithBadDefaults, Path::new("S503.py"))]
#[test_case(Rule::SslWithNoVersion, Path::new("S504.py"))]
#[test_case(Rule::StartProcessWithAShell, Path::new("S605.py"))]
#[test_case(Rule::StartProcessWithNoShell, Path::new("S606.py"))]
#[test_case(Rule::StartProcessWithPartialPath, Path::new("S607.py"))]
#[test_case(Rule::SubprocessPopenWithShellEqualsTrue, Path::new("S602.py"))]
#[test_case(Rule::SubprocessWithoutShellEqualsTrue, Path::new("S603.py"))]
#[test_case(Rule::SuspiciousPickleUsage, Path::new("S301.py"))]
#[test_case(Rule::SuspiciousEvalUsage, Path::new("S307.py"))]
#[test_case(Rule::SuspiciousMarkSafeUsage, Path::new("S308.py"))]
#[test_case(Rule::SuspiciousURLOpenUsage, Path::new("S310.py"))]
#[test_case(Rule::SuspiciousNonCryptographicRandomUsage, Path::new("S311.py"))]
#[test_case(Rule::SuspiciousTelnetUsage, Path::new("S312.py"))]
#[test_case(Rule::SuspiciousTelnetlibImport, Path::new("S401.py"))]
#[test_case(Rule::SuspiciousTelnetlibImport, Path::new("S401.pyi"))]
#[test_case(Rule::SuspiciousFtplibImport, Path::new("S402.py"))]
#[test_case(Rule::SuspiciousFtplibImport, Path::new("S402.pyi"))]
#[test_case(Rule::SuspiciousPickleImport, Path::new("S403.py"))]
#[test_case(Rule::SuspiciousPickleImport, Path::new("S403.pyi"))]
#[test_case(Rule::SuspiciousSubprocessImport, Path::new("S404.py"))]
#[test_case(Rule::SuspiciousSubprocessImport, Path::new("S404.pyi"))]
#[test_case(Rule::SuspiciousXmlEtreeImport, Path::new("S405.py"))]
#[test_case(Rule::SuspiciousXmlEtreeImport, Path::new("S405.pyi"))]
#[test_case(Rule::SuspiciousXmlSaxImport, Path::new("S406.py"))]
#[test_case(Rule::SuspiciousXmlSaxImport, Path::new("S406.pyi"))]
#[test_case(Rule::SuspiciousXmlExpatImport, Path::new("S407.py"))]
#[test_case(Rule::SuspiciousXmlExpatImport, Path::new("S407.pyi"))]
#[test_case(Rule::SuspiciousXmlMinidomImport, Path::new("S408.py"))]
#[test_case(Rule::SuspiciousXmlMinidomImport, Path::new("S408.pyi"))]
#[test_case(Rule::SuspiciousXmlPulldomImport, Path::new("S409.py"))]
#[test_case(Rule::SuspiciousXmlPulldomImport, Path::new("S409.pyi"))]
#[test_case(Rule::SuspiciousLxmlImport, Path::new("S410.py"))]
#[test_case(Rule::SuspiciousLxmlImport, Path::new("S410.pyi"))]
#[test_case(Rule::SuspiciousXmlrpcImport, Path::new("S411.py"))]
#[test_case(Rule::SuspiciousXmlrpcImport, Path::new("S411.pyi"))]
#[test_case(Rule::SuspiciousHttpoxyImport, Path::new("S412.py"))]
#[test_case(Rule::SuspiciousHttpoxyImport, Path::new("S412.pyi"))]
#[test_case(Rule::SuspiciousPycryptoImport, Path::new("S413.py"))]
#[test_case(Rule::SuspiciousPycryptoImport, Path::new("S413.pyi"))]
#[test_case(Rule::SuspiciousPyghmiImport, Path::new("S415.py"))]
#[test_case(Rule::SuspiciousPyghmiImport, Path::new("S415.pyi"))]
#[test_case(Rule::TryExceptContinue, Path::new("S112.py"))]
#[test_case(Rule::TryExceptPass, Path::new("S110.py"))]
#[test_case(Rule::UnixCommandWildcardInjection, Path::new("S609.py"))]
#[test_case(Rule::UnsafeYAMLLoad, Path::new("S506.py"))]
#[test_case(Rule::WeakCryptographicKey, Path::new("S505.py"))]
#[test_case(Rule::DjangoExtra, Path::new("S610.py"))]
#[test_case(Rule::DjangoRawSql, Path::new("S611.py"))]
#[test_case(Rule::TarfileUnsafeMembers, Path::new("S202.py"))]
#[test_case(Rule::UnsafeMarkupUse, Path::new("S704.py"))]
fn rules(rule_code: Rule, path: &Path) -> Result<()> {
let snapshot = format!("{}_{}", rule_code.noqa_code(), path.to_string_lossy());
let diagnostics = test_path(
Path::new("flake8_bandit").join(path).as_path(),
&LinterSettings::for_rule(rule_code),
)?;
assert_diagnostics!(snapshot, diagnostics);
Ok(())
}
#[test_case(Rule::SuspiciousPickleUsage, Path::new("S301.py"))]
#[test_case(Rule::SuspiciousEvalUsage, Path::new("S307.py"))]
#[test_case(Rule::SuspiciousMarkSafeUsage, Path::new("S308.py"))]
#[test_case(Rule::SuspiciousURLOpenUsage, Path::new("S310.py"))]
#[test_case(Rule::SuspiciousNonCryptographicRandomUsage, Path::new("S311.py"))]
#[test_case(Rule::SuspiciousTelnetUsage, Path::new("S312.py"))]
#[test_case(Rule::SnmpInsecureVersion, Path::new("S508.py"))]
#[test_case(Rule::SnmpWeakCryptography, Path::new("S509.py"))]
fn preview_rules(rule_code: Rule, path: &Path) -> Result<()> {
let snapshot = format!(
"preview__{}_{}",
rule_code.noqa_code(),
path.to_string_lossy()
);
assert_diagnostics_diff!(
snapshot,
Path::new("flake8_bandit").join(path).as_path(),
&LinterSettings {
preview: PreviewMode::Disabled,
..LinterSettings::for_rule(rule_code)
},
&LinterSettings {
preview: PreviewMode::Enabled,
..LinterSettings::for_rule(rule_code)
}
);
Ok(())
}
#[test_case(Rule::UnsafeMarkupUse, Path::new("S704_extend_markup_names.py"))]
#[test_case(Rule::UnsafeMarkupUse, Path::new("S704_skip_early_out.py"))]
fn extend_allowed_callable(rule_code: Rule, path: &Path) -> Result<()> {
let snapshot = format!(
"extend_allow_callables__{}_{}",
rule_code.noqa_code(),
path.to_string_lossy()
);
let diagnostics = test_path(
Path::new("flake8_bandit").join(path).as_path(),
&LinterSettings {
flake8_bandit: super::settings::Settings {
extend_markup_names: vec!["webhelpers.html.literal".to_string()],
..Default::default()
},
..LinterSettings::for_rule(rule_code)
},
)?;
assert_diagnostics!(snapshot, diagnostics);
Ok(())
}
#[test_case(Rule::UnsafeMarkupUse, Path::new("S704_whitelisted_markup_calls.py"))]
fn whitelisted_markup_calls(rule_code: Rule, path: &Path) -> Result<()> {
let snapshot = format!(
"whitelisted_markup_calls__{}_{}",
rule_code.noqa_code(),
path.to_string_lossy()
);
let diagnostics = test_path(
Path::new("flake8_bandit").join(path).as_path(),
&LinterSettings {
flake8_bandit: super::settings::Settings {
allowed_markup_calls: vec!["bleach.clean".to_string()],
..Default::default()
},
..LinterSettings::for_rule(rule_code)
},
)?;
assert_diagnostics!(snapshot, diagnostics);
Ok(())
}
#[test]
fn check_hardcoded_tmp_additional_dirs() -> Result<()> {
let diagnostics = test_path(
Path::new("flake8_bandit/S108.py"),
&LinterSettings {
flake8_bandit: super::settings::Settings {
hardcoded_tmp_directory: vec![
"/tmp".to_string(),
"/var/tmp".to_string(),
"/dev/shm".to_string(),
"/foo".to_string(),
],
..Default::default()
},
..LinterSettings::for_rule(Rule::HardcodedTempFile)
},
)?;
assert_diagnostics!("S108_extend", diagnostics);
Ok(())
}
#[test]
fn check_typed_exception() -> Result<()> {
let diagnostics = test_path(
Path::new("flake8_bandit/S110.py"),
&LinterSettings {
flake8_bandit: super::settings::Settings {
check_typed_exception: true,
..Default::default()
},
..LinterSettings::for_rule(Rule::TryExceptPass)
},
)?;
assert_diagnostics!("S110_typed", diagnostics);
Ok(())
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/flake8_bandit/rules/suspicious_function_call.rs | crates/ruff_linter/src/rules/flake8_bandit/rules/suspicious_function_call.rs | //! Check for calls to suspicious functions, or calls into suspicious modules.
//!
//! See: <https://bandit.readthedocs.io/en/latest/blacklists/blacklist_calls.html>
use itertools::Either;
use ruff_macros::{ViolationMetadata, derive_message_formats};
use ruff_python_ast::{self as ast, Arguments, Decorator, Expr, ExprCall, Operator};
use ruff_python_semantic::SemanticModel;
use ruff_python_semantic::analyze::typing::find_binding_value;
use ruff_text_size::{Ranged, TextRange};
use crate::Violation;
use crate::checkers::ast::Checker;
use crate::preview::{
is_s310_resolve_string_literal_bindings_enabled, is_suspicious_function_reference_enabled,
};
use crate::settings::LinterSettings;
/// ## What it does
/// Checks for calls to `pickle` functions or modules that wrap them.
///
/// ## Why is this bad?
/// Deserializing untrusted data with `pickle` and other deserialization
/// modules is insecure as it can allow for the creation of arbitrary objects,
/// which can then be used to achieve arbitrary code execution and otherwise
/// unexpected behavior.
///
/// Avoid deserializing untrusted data with `pickle` and other deserialization
/// modules. Instead, consider safer formats, such as JSON.
///
/// If you must deserialize untrusted data with `pickle`, consider signing the
/// data with a secret key and verifying the signature before deserializing the
/// payload, This will prevent an attacker from injecting arbitrary objects
/// into the serialized data.
///
/// In [preview], this rule will also flag references to `pickle` functions.
///
/// ## Example
/// ```python
/// import pickle
///
/// with open("foo.pickle", "rb") as file:
/// foo = pickle.load(file)
/// ```
///
/// Use instead:
/// ```python
/// import json
///
/// with open("foo.json", "rb") as file:
/// foo = json.load(file)
/// ```
///
/// ## References
/// - [Python documentation: `pickle` β Python object serialization](https://docs.python.org/3/library/pickle.html)
/// - [Common Weakness Enumeration: CWE-502](https://cwe.mitre.org/data/definitions/502.html)
///
/// [preview]: https://docs.astral.sh/ruff/preview/
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "v0.0.258")]
pub(crate) struct SuspiciousPickleUsage;
impl Violation for SuspiciousPickleUsage {
#[derive_message_formats]
fn message(&self) -> String {
"`pickle` and modules that wrap it can be unsafe when used to deserialize untrusted data, possible security issue".to_string()
}
}
/// ## What it does
/// Checks for calls to `marshal` functions.
///
/// ## Why is this bad?
/// Deserializing untrusted data with `marshal` is insecure, as it can allow for
/// the creation of arbitrary objects, which can then be used to achieve
/// arbitrary code execution and otherwise unexpected behavior.
///
/// Avoid deserializing untrusted data with `marshal`. Instead, consider safer
/// formats, such as JSON.
///
/// If you must deserialize untrusted data with `marshal`, consider signing the
/// data with a secret key and verifying the signature before deserializing the
/// payload. This will prevent an attacker from injecting arbitrary objects
/// into the serialized data.
///
/// In [preview], this rule will also flag references to `marshal` functions.
///
/// ## Example
/// ```python
/// import marshal
///
/// with open("foo.marshal", "rb") as file:
/// foo = marshal.load(file)
/// ```
///
/// Use instead:
/// ```python
/// import json
///
/// with open("foo.json", "rb") as file:
/// foo = json.load(file)
/// ```
///
/// ## References
/// - [Python documentation: `marshal` β Internal Python object serialization](https://docs.python.org/3/library/marshal.html)
/// - [Common Weakness Enumeration: CWE-502](https://cwe.mitre.org/data/definitions/502.html)
///
/// [preview]: https://docs.astral.sh/ruff/preview/
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "v0.0.258")]
pub(crate) struct SuspiciousMarshalUsage;
impl Violation for SuspiciousMarshalUsage {
#[derive_message_formats]
fn message(&self) -> String {
"Deserialization with the `marshal` module is possibly dangerous".to_string()
}
}
/// ## What it does
/// Checks for uses of weak or broken cryptographic hash functions.
///
/// ## Why is this bad?
/// Weak or broken cryptographic hash functions may be susceptible to
/// collision attacks (where two different inputs produce the same hash) or
/// pre-image attacks (where an attacker can find an input that produces a
/// given hash). This can lead to security vulnerabilities in applications
/// that rely on these hash functions.
///
/// Avoid using weak or broken cryptographic hash functions in security
/// contexts. Instead, use a known secure hash function such as SHA-256.
///
/// In [preview], this rule will also flag references to insecure hash functions.
///
/// ## Example
/// ```python
/// from cryptography.hazmat.primitives import hashes
///
/// digest = hashes.Hash(hashes.MD5())
/// digest.update(b"Hello, world!")
/// digest.finalize()
/// ```
///
/// Use instead:
/// ```python
/// from cryptography.hazmat.primitives import hashes
///
/// digest = hashes.Hash(hashes.SHA256())
/// digest.update(b"Hello, world!")
/// digest.finalize()
/// ```
///
/// ## References
/// - [Python documentation: `hashlib` β Secure hashes and message digests](https://docs.python.org/3/library/hashlib.html)
/// - [Common Weakness Enumeration: CWE-327](https://cwe.mitre.org/data/definitions/327.html)
/// - [Common Weakness Enumeration: CWE-328](https://cwe.mitre.org/data/definitions/328.html)
/// - [Common Weakness Enumeration: CWE-916](https://cwe.mitre.org/data/definitions/916.html)
///
/// [preview]: https://docs.astral.sh/ruff/preview/
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "v0.0.258")]
pub(crate) struct SuspiciousInsecureHashUsage;
impl Violation for SuspiciousInsecureHashUsage {
#[derive_message_formats]
fn message(&self) -> String {
"Use of insecure MD2, MD4, MD5, or SHA1 hash function".to_string()
}
}
/// ## What it does
/// Checks for uses of weak or broken cryptographic ciphers.
///
/// ## Why is this bad?
/// Weak or broken cryptographic ciphers may be susceptible to attacks that
/// allow an attacker to decrypt ciphertext without knowing the key or
/// otherwise compromise the security of the cipher, such as forgeries.
///
/// Use strong, modern cryptographic ciphers instead of weak or broken ones.
///
/// In [preview], this rule will also flag references to insecure ciphers.
///
/// ## Example
/// ```python
/// from cryptography.hazmat.primitives.ciphers import Cipher, algorithms
///
/// algorithm = algorithms.ARC4(key)
/// cipher = Cipher(algorithm, mode=None)
/// encryptor = cipher.encryptor()
/// ```
///
/// Use instead:
/// ```python
/// from cryptography.fernet import Fernet
///
/// fernet = Fernet(key)
/// ```
///
/// ## References
/// - [Common Weakness Enumeration: CWE-327](https://cwe.mitre.org/data/definitions/327.html)
///
/// [preview]: https://docs.astral.sh/ruff/preview/
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "v0.0.258")]
pub(crate) struct SuspiciousInsecureCipherUsage;
impl Violation for SuspiciousInsecureCipherUsage {
#[derive_message_formats]
fn message(&self) -> String {
"Use of insecure cipher, replace with a known secure cipher such as AES".to_string()
}
}
/// ## What it does
/// Checks for uses of weak or broken cryptographic cipher modes.
///
/// ## Why is this bad?
/// Weak or broken cryptographic ciphers may be susceptible to attacks that
/// allow an attacker to decrypt ciphertext without knowing the key or
/// otherwise compromise the security of the cipher, such as forgeries.
///
/// Use strong, modern cryptographic ciphers instead of weak or broken ones.
///
/// In [preview], this rule will also flag references to insecure cipher modes.
///
/// ## Example
/// ```python
/// from cryptography.hazmat.primitives.ciphers import Cipher, algorithms, modes
///
/// algorithm = algorithms.ARC4(key)
/// cipher = Cipher(algorithm, mode=modes.ECB(iv))
/// encryptor = cipher.encryptor()
/// ```
///
/// Use instead:
/// ```python
/// from cryptography.hazmat.primitives.ciphers import Cipher, algorithms, modes
///
/// algorithm = algorithms.ARC4(key)
/// cipher = Cipher(algorithm, mode=modes.CTR(iv))
/// encryptor = cipher.encryptor()
/// ```
///
/// ## References
/// - [Common Weakness Enumeration: CWE-327](https://cwe.mitre.org/data/definitions/327.html)
///
/// [preview]: https://docs.astral.sh/ruff/preview/
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "v0.0.258")]
pub(crate) struct SuspiciousInsecureCipherModeUsage;
impl Violation for SuspiciousInsecureCipherModeUsage {
#[derive_message_formats]
fn message(&self) -> String {
"Use of insecure block cipher mode, replace with a known secure mode such as CBC or CTR"
.to_string()
}
}
/// ## What it does
/// Checks for uses of `tempfile.mktemp`.
///
/// ## Why is this bad?
/// `tempfile.mktemp` returns a pathname of a file that does not exist at the
/// time the call is made; then, the caller is responsible for creating the
/// file and subsequently using it. This is insecure because another process
/// could create a file with the same name between the time the function
/// returns and the time the caller creates the file.
///
/// `tempfile.mktemp` is deprecated in favor of `tempfile.mkstemp` which
/// creates the file when it is called. Consider using `tempfile.mkstemp`
/// instead, either directly or via a context manager such as
/// `tempfile.TemporaryFile`.
///
/// In [preview], this rule will also flag references to `tempfile.mktemp`.
///
/// ## Example
/// ```python
/// import tempfile
///
/// tmp_file = tempfile.mktemp()
/// with open(tmp_file, "w") as file:
/// file.write("Hello, world!")
/// ```
///
/// Use instead:
/// ```python
/// import tempfile
///
/// with tempfile.TemporaryFile() as file:
/// file.write("Hello, world!")
/// ```
///
/// ## References
/// - [Python documentation:`mktemp`](https://docs.python.org/3/library/tempfile.html#tempfile.mktemp)
///
/// [preview]: https://docs.astral.sh/ruff/preview/
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "v0.0.258")]
pub(crate) struct SuspiciousMktempUsage;
impl Violation for SuspiciousMktempUsage {
#[derive_message_formats]
fn message(&self) -> String {
"Use of insecure and deprecated function (`mktemp`)".to_string()
}
}
/// ## What it does
/// Checks for uses of the builtin `eval()` function.
///
/// ## Why is this bad?
/// The `eval()` function is insecure as it enables arbitrary code execution.
///
/// If you need to evaluate an expression from a string, consider using
/// `ast.literal_eval()` instead, which will raise an exception if the
/// expression is not a valid Python literal.
///
/// In [preview], this rule will also flag references to `eval`.
///
/// ## Example
/// ```python
/// x = eval(input("Enter a number: "))
/// ```
///
/// Use instead:
/// ```python
/// from ast import literal_eval
///
/// x = literal_eval(input("Enter a number: "))
/// ```
///
/// ## References
/// - [Python documentation: `eval`](https://docs.python.org/3/library/functions.html#eval)
/// - [Python documentation: `literal_eval`](https://docs.python.org/3/library/ast.html#ast.literal_eval)
/// - [_Eval really is dangerous_ by Ned Batchelder](https://nedbatchelder.com/blog/201206/eval_really_is_dangerous.html)
///
/// [preview]: https://docs.astral.sh/ruff/preview/
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "v0.0.258")]
pub(crate) struct SuspiciousEvalUsage;
impl Violation for SuspiciousEvalUsage {
#[derive_message_formats]
fn message(&self) -> String {
"Use of possibly insecure function; consider using `ast.literal_eval`".to_string()
}
}
/// ## What it does
/// Checks for uses of calls to `django.utils.safestring.mark_safe`.
///
/// ## Why is this bad?
/// Cross-site scripting (XSS) vulnerabilities allow attackers to execute
/// arbitrary JavaScript. To guard against XSS attacks, Django templates
/// assumes that data is unsafe and automatically escapes malicious strings
/// before rending them.
///
/// `django.utils.safestring.mark_safe` marks a string as safe for use in HTML
/// templates, bypassing XSS protection. Its usage can be dangerous if the
/// contents of the string are dynamically generated, because it may allow
/// cross-site scripting attacks if the string is not properly escaped.
///
/// For dynamically generated strings, consider utilizing
/// `django.utils.html.format_html`.
///
/// In [preview], this rule will also flag references to `django.utils.safestring.mark_safe`.
///
/// ## Example
/// ```python
/// from django.utils.safestring import mark_safe
///
///
/// def render_username(username):
/// return mark_safe(f"<i>{username}</i>") # Dangerous if username is user-provided.
/// ```
///
/// Use instead:
/// ```python
/// from django.utils.html import format_html
///
///
/// def render_username(username):
/// return format_html("<i>{}</i>", username) # username is escaped.
/// ```
///
/// ## References
/// - [Django documentation: `mark_safe`](https://docs.djangoproject.com/en/dev/ref/utils/#django.utils.safestring.mark_safe)
/// - [Django documentation: Cross Site Scripting (XSS) protection](https://docs.djangoproject.com/en/dev/topics/security/#cross-site-scripting-xss-protection)
/// - [Common Weakness Enumeration: CWE-80](https://cwe.mitre.org/data/definitions/80.html)
///
/// [preview]: https://docs.astral.sh/ruff/preview/
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "v0.0.258")]
pub(crate) struct SuspiciousMarkSafeUsage;
impl Violation for SuspiciousMarkSafeUsage {
#[derive_message_formats]
fn message(&self) -> String {
"Use of `mark_safe` may expose cross-site scripting vulnerabilities".to_string()
}
}
/// ## What it does
/// Checks for instances where URL open functions are used with unexpected schemes.
///
/// ## Why is this bad?
/// Some URL open functions allow the use of `file:` or custom schemes (for use
/// instead of `http:` or `https:`). An attacker may be able to use these
/// schemes to access or modify unauthorized resources, and cause unexpected
/// behavior.
///
/// To mitigate this risk, audit all uses of URL open functions and ensure that
/// only permitted schemes are used (e.g., allowing `http:` and `https:`, and
/// disallowing `file:` and `ftp:`).
///
/// In [preview], this rule will also flag references to URL open functions.
///
/// ## Example
/// ```python
/// from urllib.request import urlopen
///
/// url = input("Enter a URL: ")
///
/// with urlopen(url) as response:
/// ...
/// ```
///
/// Use instead:
/// ```python
/// from urllib.request import urlopen
///
/// url = input("Enter a URL: ")
///
/// if not url.startswith(("http:", "https:")):
/// raise ValueError("URL must start with 'http:' or 'https:'")
///
/// with urlopen(url) as response:
/// ...
/// ```
///
/// ## References
/// - [Python documentation: `urlopen`](https://docs.python.org/3/library/urllib.request.html#urllib.request.urlopen)
///
/// [preview]: https://docs.astral.sh/ruff/preview/
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "v0.0.258")]
pub(crate) struct SuspiciousURLOpenUsage;
impl Violation for SuspiciousURLOpenUsage {
#[derive_message_formats]
fn message(&self) -> String {
"Audit URL open for permitted schemes. Allowing use of `file:` or custom schemes is often unexpected.".to_string()
}
}
/// ## What it does
/// Checks for uses of cryptographically weak pseudo-random number generators.
///
/// ## Why is this bad?
/// Cryptographically weak pseudo-random number generators are insecure, as they
/// are easily predictable. This can allow an attacker to guess the generated
/// numbers and compromise the security of the system.
///
/// Instead, use a cryptographically secure pseudo-random number generator
/// (such as using the [`secrets` module](https://docs.python.org/3/library/secrets.html))
/// when generating random numbers for security purposes.
///
/// In [preview], this rule will also flag references to these generators.
///
/// ## Example
/// ```python
/// import random
///
/// random.randrange(10)
/// ```
///
/// Use instead:
/// ```python
/// import secrets
///
/// secrets.randbelow(10)
/// ```
///
/// ## References
/// - [Python documentation: `random` β Generate pseudo-random numbers](https://docs.python.org/3/library/random.html)
///
/// [preview]: https://docs.astral.sh/ruff/preview/
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "v0.0.258")]
pub(crate) struct SuspiciousNonCryptographicRandomUsage;
impl Violation for SuspiciousNonCryptographicRandomUsage {
#[derive_message_formats]
fn message(&self) -> String {
"Standard pseudo-random generators are not suitable for cryptographic purposes".to_string()
}
}
/// ## What it does
/// Checks for uses of insecure XML parsers.
///
/// ## Why is this bad?
/// Many XML parsers are vulnerable to XML attacks (such as entity expansion),
/// which cause excessive memory and CPU usage by exploiting recursion. An
/// attacker could use such methods to access unauthorized resources.
///
/// Consider using the `defusedxml` package when parsing untrusted XML data,
/// to protect against XML attacks.
///
/// In [preview], this rule will also flag references to insecure XML parsers.
///
/// ## Example
/// ```python
/// from xml.etree.cElementTree import parse
///
/// tree = parse("untrusted.xml") # Vulnerable to XML attacks.
/// ```
///
/// Use instead:
/// ```python
/// from defusedxml.cElementTree import parse
///
/// tree = parse("untrusted.xml")
/// ```
///
/// ## References
/// - [Python documentation: `xml` β XML processing modules](https://docs.python.org/3/library/xml.html)
/// - [PyPI: `defusedxml`](https://pypi.org/project/defusedxml/)
/// - [Common Weakness Enumeration: CWE-400](https://cwe.mitre.org/data/definitions/400.html)
/// - [Common Weakness Enumeration: CWE-776](https://cwe.mitre.org/data/definitions/776.html)
///
/// [preview]: https://docs.astral.sh/ruff/preview/
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "v0.0.258")]
pub(crate) struct SuspiciousXMLCElementTreeUsage;
impl Violation for SuspiciousXMLCElementTreeUsage {
#[derive_message_formats]
fn message(&self) -> String {
"Using `xml` to parse untrusted data is known to be vulnerable to XML attacks; use `defusedxml` equivalents".to_string()
}
}
/// ## What it does
/// Checks for uses of insecure XML parsers.
///
/// ## Why is this bad?
/// Many XML parsers are vulnerable to XML attacks (such as entity expansion),
/// which cause excessive memory and CPU usage by exploiting recursion. An
/// attacker could use such methods to access unauthorized resources.
///
/// Consider using the `defusedxml` package when parsing untrusted XML data,
/// to protect against XML attacks.
///
/// In [preview], this rule will also flag references to insecure XML parsers.
///
/// ## Example
/// ```python
/// from xml.etree.ElementTree import parse
///
/// tree = parse("untrusted.xml") # Vulnerable to XML attacks.
/// ```
///
/// Use instead:
/// ```python
/// from defusedxml.ElementTree import parse
///
/// tree = parse("untrusted.xml")
/// ```
///
/// ## References
/// - [Python documentation: `xml` β XML processing modules](https://docs.python.org/3/library/xml.html)
/// - [PyPI: `defusedxml`](https://pypi.org/project/defusedxml/)
/// - [Common Weakness Enumeration: CWE-400](https://cwe.mitre.org/data/definitions/400.html)
/// - [Common Weakness Enumeration: CWE-776](https://cwe.mitre.org/data/definitions/776.html)
///
/// [preview]: https://docs.astral.sh/ruff/preview/
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "v0.0.258")]
pub(crate) struct SuspiciousXMLElementTreeUsage;
impl Violation for SuspiciousXMLElementTreeUsage {
#[derive_message_formats]
fn message(&self) -> String {
"Using `xml` to parse untrusted data is known to be vulnerable to XML attacks; use `defusedxml` equivalents".to_string()
}
}
/// ## What it does
/// Checks for uses of insecure XML parsers.
///
/// ## Why is this bad?
/// Many XML parsers are vulnerable to XML attacks (such as entity expansion),
/// which cause excessive memory and CPU usage by exploiting recursion. An
/// attacker could use such methods to access unauthorized resources.
///
/// Consider using the `defusedxml` package when parsing untrusted XML data,
/// to protect against XML attacks.
///
/// In [preview], this rule will also flag references to insecure XML parsers.
///
/// ## Example
/// ```python
/// from xml.sax.expatreader import create_parser
///
/// parser = create_parser()
/// ```
///
/// Use instead:
/// ```python
/// from defusedxml.sax import create_parser
///
/// parser = create_parser()
/// ```
///
/// ## References
/// - [Python documentation: `xml` β XML processing modules](https://docs.python.org/3/library/xml.html)
/// - [PyPI: `defusedxml`](https://pypi.org/project/defusedxml/)
/// - [Common Weakness Enumeration: CWE-400](https://cwe.mitre.org/data/definitions/400.html)
/// - [Common Weakness Enumeration: CWE-776](https://cwe.mitre.org/data/definitions/776.html)
///
/// [preview]: https://docs.astral.sh/ruff/preview/
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "v0.0.258")]
pub(crate) struct SuspiciousXMLExpatReaderUsage;
impl Violation for SuspiciousXMLExpatReaderUsage {
#[derive_message_formats]
fn message(&self) -> String {
"Using `xml` to parse untrusted data is known to be vulnerable to XML attacks; use `defusedxml` equivalents".to_string()
}
}
/// ## What it does
/// Checks for uses of insecure XML parsers.
///
/// ## Why is this bad?
/// Many XML parsers are vulnerable to XML attacks (such as entity expansion),
/// which cause excessive memory and CPU usage by exploiting recursion. An
/// attacker could use such methods to access unauthorized resources.
///
/// Consider using the `defusedxml` package when parsing untrusted XML data,
/// to protect against XML attacks.
///
/// In [preview], this rule will also flag references to insecure XML parsers.
///
/// ## Example
/// ```python
/// from xml.dom.expatbuilder import parse
///
/// parse("untrusted.xml")
/// ```
///
/// Use instead:
/// ```python
/// from defusedxml.expatbuilder import parse
///
/// tree = parse("untrusted.xml")
/// ```
///
/// ## References
/// - [Python documentation: `xml` β XML processing modules](https://docs.python.org/3/library/xml.html)
/// - [PyPI: `defusedxml`](https://pypi.org/project/defusedxml/)
/// - [Common Weakness Enumeration: CWE-400](https://cwe.mitre.org/data/definitions/400.html)
/// - [Common Weakness Enumeration: CWE-776](https://cwe.mitre.org/data/definitions/776.html)
///
/// [preview]: https://docs.astral.sh/ruff/preview/
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "v0.0.258")]
pub(crate) struct SuspiciousXMLExpatBuilderUsage;
impl Violation for SuspiciousXMLExpatBuilderUsage {
#[derive_message_formats]
fn message(&self) -> String {
"Using `xml` to parse untrusted data is known to be vulnerable to XML attacks; use `defusedxml` equivalents".to_string()
}
}
/// ## What it does
/// Checks for uses of insecure XML parsers.
///
/// ## Why is this bad?
/// Many XML parsers are vulnerable to XML attacks (such as entity expansion),
/// which cause excessive memory and CPU usage by exploiting recursion. An
/// attacker could use such methods to access unauthorized resources.
///
/// Consider using the `defusedxml` package when parsing untrusted XML data,
/// to protect against XML attacks.
///
/// In [preview], this rule will also flag references to insecure XML parsers.
///
/// ## Example
/// ```python
/// from xml.sax import make_parser
///
/// make_parser()
/// ```
///
/// Use instead:
/// ```python
/// from defusedxml.sax import make_parser
///
/// make_parser()
/// ```
///
/// ## References
/// - [Python documentation: `xml` β XML processing modules](https://docs.python.org/3/library/xml.html)
/// - [PyPI: `defusedxml`](https://pypi.org/project/defusedxml/)
/// - [Common Weakness Enumeration: CWE-400](https://cwe.mitre.org/data/definitions/400.html)
/// - [Common Weakness Enumeration: CWE-776](https://cwe.mitre.org/data/definitions/776.html)
///
/// [preview]: https://docs.astral.sh/ruff/preview/
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "v0.0.258")]
pub(crate) struct SuspiciousXMLSaxUsage;
impl Violation for SuspiciousXMLSaxUsage {
#[derive_message_formats]
fn message(&self) -> String {
"Using `xml` to parse untrusted data is known to be vulnerable to XML attacks; use `defusedxml` equivalents".to_string()
}
}
/// ## What it does
/// Checks for uses of insecure XML parsers.
///
/// ## Why is this bad?
/// Many XML parsers are vulnerable to XML attacks (such as entity expansion),
/// which cause excessive memory and CPU usage by exploiting recursion. An
/// attacker could use such methods to access unauthorized resources.
///
/// Consider using the `defusedxml` package when parsing untrusted XML data,
/// to protect against XML attacks.
///
/// In [preview], this rule will also flag references to insecure XML parsers.
///
/// ## Example
/// ```python
/// from xml.dom.minidom import parse
///
/// content = parse("untrusted.xml")
/// ```
///
/// Use instead:
/// ```python
/// from defusedxml.minidom import parse
///
/// content = parse("untrusted.xml")
/// ```
///
/// ## References
/// - [Python documentation: `xml` β XML processing modules](https://docs.python.org/3/library/xml.html)
/// - [PyPI: `defusedxml`](https://pypi.org/project/defusedxml/)
/// - [Common Weakness Enumeration: CWE-400](https://cwe.mitre.org/data/definitions/400.html)
/// - [Common Weakness Enumeration: CWE-776](https://cwe.mitre.org/data/definitions/776.html)
///
/// [preview]: https://docs.astral.sh/ruff/preview/
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "v0.0.258")]
pub(crate) struct SuspiciousXMLMiniDOMUsage;
impl Violation for SuspiciousXMLMiniDOMUsage {
#[derive_message_formats]
fn message(&self) -> String {
"Using `xml` to parse untrusted data is known to be vulnerable to XML attacks; use `defusedxml` equivalents".to_string()
}
}
/// ## What it does
/// Checks for uses of insecure XML parsers.
///
/// ## Why is this bad?
/// Many XML parsers are vulnerable to XML attacks (such as entity expansion),
/// which cause excessive memory and CPU usage by exploiting recursion. An
/// attacker could use such methods to access unauthorized resources.
///
/// Consider using the `defusedxml` package when parsing untrusted XML data,
/// to protect against XML attacks.
///
/// In [preview], this rule will also flag references to insecure XML parsers.
///
/// ## Example
/// ```python
/// from xml.dom.pulldom import parse
///
/// content = parse("untrusted.xml")
/// ```
///
/// Use instead:
/// ```python
/// from defusedxml.pulldom import parse
///
/// content = parse("untrusted.xml")
/// ```
///
/// ## References
/// - [Python documentation: `xml` β XML processing modules](https://docs.python.org/3/library/xml.html)
/// - [PyPI: `defusedxml`](https://pypi.org/project/defusedxml/)
/// - [Common Weakness Enumeration: CWE-400](https://cwe.mitre.org/data/definitions/400.html)
/// - [Common Weakness Enumeration: CWE-776](https://cwe.mitre.org/data/definitions/776.html)
///
/// [preview]: https://docs.astral.sh/ruff/preview/
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "v0.0.258")]
pub(crate) struct SuspiciousXMLPullDOMUsage;
impl Violation for SuspiciousXMLPullDOMUsage {
#[derive_message_formats]
fn message(&self) -> String {
"Using `xml` to parse untrusted data is known to be vulnerable to XML attacks; use `defusedxml` equivalents".to_string()
}
}
/// ## Removed
///
/// This rule was removed as the `lxml` library has been modified to address
/// known vulnerabilities and unsafe defaults. As such, the `defusedxml`
/// library is no longer necessary, `defusedxml` has [deprecated] its `lxml`
/// module.
///
/// ## What it does
/// Checks for uses of insecure XML parsers.
///
/// ## Why is this bad?
/// Many XML parsers are vulnerable to XML attacks (such as entity expansion),
/// which cause excessive memory and CPU usage by exploiting recursion. An
/// attacker could use such methods to access unauthorized resources.
///
/// In [preview], this rule will also flag references to insecure XML parsers.
///
/// ## Example
/// ```python
/// from lxml import etree
///
/// content = etree.parse("untrusted.xml")
/// ```
///
/// ## References
/// - [PyPI: `lxml`](https://pypi.org/project/lxml/)
/// - [Common Weakness Enumeration: CWE-400](https://cwe.mitre.org/data/definitions/400.html)
/// - [Common Weakness Enumeration: CWE-776](https://cwe.mitre.org/data/definitions/776.html)
///
/// [preview]: https://docs.astral.sh/ruff/preview/
/// [deprecated]: https://pypi.org/project/defusedxml/0.8.0rc2/#defusedxml-lxml
#[derive(ViolationMetadata)]
#[violation_metadata(removed_since = "0.12.0")]
pub(crate) struct SuspiciousXMLETreeUsage;
impl Violation for SuspiciousXMLETreeUsage {
#[derive_message_formats]
fn message(&self) -> String {
"Using `lxml` to parse untrusted data is known to be vulnerable to XML attacks".to_string()
}
}
/// ## What it does
/// Checks for uses of `ssl._create_unverified_context`.
///
/// ## Why is this bad?
/// [PEP 476] enabled certificate and hostname validation by default in Python
/// standard library HTTP clients. Previously, Python did not validate
/// certificates by default, which could allow an attacker to perform a "man in
/// the middle" attack by intercepting and modifying traffic between client and
/// server.
///
/// To support legacy environments, `ssl._create_unverified_context` reverts to
/// the previous behavior that does perform verification. Otherwise, use
/// `ssl.create_default_context` to create a secure context.
///
/// In [preview], this rule will also flag references to `ssl._create_unverified_context`.
///
/// ## Example
/// ```python
/// import ssl
///
/// context = ssl._create_unverified_context()
/// ```
///
/// Use instead:
/// ```python
/// import ssl
///
/// context = ssl.create_default_context()
/// ```
///
/// ## References
/// - [PEP 476 β Enabling certificate verification by default for stdlib http clients: Opting out](https://peps.python.org/pep-0476/#opting-out)
/// - [Python documentation: `ssl` β TLS/SSL wrapper for socket objects](https://docs.python.org/3/library/ssl.html)
///
/// [PEP 476]: https://peps.python.org/pep-0476/
/// [preview]: https://docs.astral.sh/ruff/preview/
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "v0.0.258")]
pub(crate) struct SuspiciousUnverifiedContextUsage;
impl Violation for SuspiciousUnverifiedContextUsage {
#[derive_message_formats]
fn message(&self) -> String {
"Python allows using an insecure context via the `_create_unverified_context` that reverts to the previous behavior that does not validate certificates or perform hostname checks.".to_string()
}
}
/// ## What it does
/// Checks for the use of Telnet-related functions.
///
/// ## Why is this bad?
/// Telnet is considered insecure because it does not encrypt data sent over
/// the connection and is vulnerable to numerous attacks.
///
/// Instead, consider using a more secure protocol such as SSH.
///
/// In [preview], this rule will also flag references to Telnet-related functions.
///
/// ## References
/// - [Python documentation: `telnetlib` β Telnet client](https://docs.python.org/3/library/telnetlib.html)
///
/// [preview]: https://docs.astral.sh/ruff/preview/
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "v0.0.258")]
pub(crate) struct SuspiciousTelnetUsage;
impl Violation for SuspiciousTelnetUsage {
#[derive_message_formats]
fn message(&self) -> String {
"Telnet is considered insecure. Use SSH or some other encrypted protocol.".to_string()
}
}
/// ## What it does
/// Checks for the use of FTP-related functions.
///
/// ## Why is this bad?
/// FTP is considered insecure as it does not encrypt data sent over the
/// connection and is thus vulnerable to numerous attacks.
///
/// Instead, consider using FTPS (which secures FTP using SSL/TLS) or SFTP.
///
/// In [preview], this rule will also flag references to FTP-related functions.
///
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | true |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/flake8_bandit/rules/shell_injection.rs | crates/ruff_linter/src/rules/flake8_bandit/rules/shell_injection.rs | //! Checks relating to shell injection.
use ruff_macros::{ViolationMetadata, derive_message_formats};
use ruff_python_ast::helpers::Truthiness;
use ruff_python_ast::{self as ast, Arguments, Expr};
use ruff_python_semantic::SemanticModel;
use ruff_text_size::Ranged;
use crate::Violation;
use crate::{
checkers::ast::Checker, registry::Rule, rules::flake8_bandit::helpers::string_literal,
};
/// ## What it does
/// Check for method calls that initiate a subprocess with a shell.
///
/// ## Why is this bad?
/// Starting a subprocess with a shell can allow attackers to execute arbitrary
/// shell commands. Consider starting the process without a shell call and
/// sanitize the input to mitigate the risk of shell injection.
///
/// ## Example
/// ```python
/// import subprocess
///
/// subprocess.run("ls -l", shell=True)
/// ```
///
/// Use instead:
/// ```python
/// import subprocess
///
/// subprocess.run(["ls", "-l"])
/// ```
///
/// ## References
/// - [Python documentation: `subprocess` β Subprocess management](https://docs.python.org/3/library/subprocess.html)
/// - [Common Weakness Enumeration: CWE-78](https://cwe.mitre.org/data/definitions/78.html)
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "v0.0.262")]
pub(crate) struct SubprocessPopenWithShellEqualsTrue {
safety: Safety,
is_exact: bool,
}
impl Violation for SubprocessPopenWithShellEqualsTrue {
#[derive_message_formats]
fn message(&self) -> String {
match (self.safety, self.is_exact) {
(Safety::SeemsSafe, true) => "`subprocess` call with `shell=True` seems safe, but may be changed in the future; consider rewriting without `shell`".to_string(),
(Safety::Unknown, true) => "`subprocess` call with `shell=True` identified, security issue".to_string(),
(Safety::SeemsSafe, false) => "`subprocess` call with truthy `shell` seems safe, but may be changed in the future; consider rewriting without `shell`".to_string(),
(Safety::Unknown, false) => "`subprocess` call with truthy `shell` identified, security issue".to_string(),
}
}
}
/// ## What it does
/// Check for method calls that initiate a subprocess without a shell.
///
/// ## Why is this bad?
/// Starting a subprocess without a shell can prevent attackers from executing
/// arbitrary shell commands; however, it is still error-prone. Consider
/// validating the input.
///
/// ## Known problems
/// Prone to false positives as it is difficult to determine whether the
/// passed arguments have been validated ([#4045]).
///
/// ## Example
/// ```python
/// import subprocess
///
/// cmd = input("Enter a command: ").split()
/// subprocess.run(cmd)
/// ```
///
/// ## References
/// - [Python documentation: `subprocess` β Subprocess management](https://docs.python.org/3/library/subprocess.html)
///
/// [#4045]: https://github.com/astral-sh/ruff/issues/4045
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "v0.0.262")]
pub(crate) struct SubprocessWithoutShellEqualsTrue;
impl Violation for SubprocessWithoutShellEqualsTrue {
#[derive_message_formats]
fn message(&self) -> String {
"`subprocess` call: check for execution of untrusted input".to_string()
}
}
/// ## What it does
/// Checks for method calls that set the `shell` parameter to `true` or another
/// truthy value when invoking a subprocess.
///
/// ## Why is this bad?
/// Setting the `shell` parameter to `true` or another truthy value when
/// invoking a subprocess can introduce security vulnerabilities, as it allows
/// shell metacharacters and whitespace to be passed to child processes,
/// potentially leading to shell injection attacks.
///
/// It is recommended to avoid using `shell=True` unless absolutely necessary
/// and, when used, to ensure that all inputs are properly sanitized and quoted
/// to prevent such vulnerabilities.
///
/// ## Known problems
/// Prone to false positives as it is triggered on any function call with a
/// `shell=True` parameter.
///
/// ## Example
/// ```python
/// import my_custom_subprocess
///
/// user_input = input("Enter a command: ")
/// my_custom_subprocess.run(user_input, shell=True)
/// ```
///
/// ## References
/// - [Python documentation: Security Considerations](https://docs.python.org/3/library/subprocess.html#security-considerations)
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "v0.0.262")]
pub(crate) struct CallWithShellEqualsTrue {
is_exact: bool,
}
impl Violation for CallWithShellEqualsTrue {
#[derive_message_formats]
fn message(&self) -> String {
if self.is_exact {
"Function call with `shell=True` parameter identified, security issue".to_string()
} else {
"Function call with truthy `shell` parameter identified, security issue".to_string()
}
}
}
/// ## What it does
/// Checks for calls that start a process with a shell, providing guidance on
/// whether the usage is safe or not.
///
/// ## Why is this bad?
/// Starting a process with a shell can introduce security risks, such as
/// code injection vulnerabilities. It's important to be aware of whether the
/// usage of the shell is safe or not.
///
/// This rule triggers on functions like `os.system`, `popen`, etc., which
/// start processes with a shell. It evaluates whether the provided command
/// is a literal string or an expression. If the command is a literal string,
/// it's considered safe. If the command is an expression, it's considered
/// (potentially) unsafe.
///
/// ## Example
/// ```python
/// import os
///
/// # Safe usage (literal string)
/// command = "ls -l"
/// os.system(command)
///
/// # Potentially unsafe usage (expression)
/// cmd = get_user_input()
/// os.system(cmd)
/// ```
///
/// ## Note
/// The `subprocess` module provides more powerful facilities for spawning new
/// processes and retrieving their results, and using that module is preferable
/// to using `os.system` or similar functions. Consider replacing such usages
/// with `subprocess.call` or related functions.
///
/// ## References
/// - [Python documentation: `subprocess`](https://docs.python.org/3/library/subprocess.html)
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "v0.0.262")]
pub(crate) struct StartProcessWithAShell {
safety: Safety,
}
impl Violation for StartProcessWithAShell {
#[derive_message_formats]
fn message(&self) -> String {
match self.safety {
Safety::SeemsSafe => "Starting a process with a shell: seems safe, but may be changed in the future; consider rewriting without `shell`".to_string(),
Safety::Unknown => "Starting a process with a shell, possible injection detected".to_string(),
}
}
}
/// ## What it does
/// Checks for functions that start a process without a shell.
///
/// ## Why is this bad?
/// Invoking any kind of external executable via a function call can pose
/// security risks if arbitrary variables are passed to the executable, or if
/// the input is otherwise unsanitised or unvalidated.
///
/// This rule specifically flags functions in the `os` module that spawn
/// subprocesses *without* the use of a shell. Note that these typically pose a
/// much smaller security risk than subprocesses that are started *with* a
/// shell, which are flagged by [`start-process-with-a-shell`][S605] (`S605`).
/// This gives you the option of enabling one rule while disabling the other
/// if you decide that the security risk from these functions is acceptable
/// for your use case.
///
/// ## Example
/// ```python
/// import os
///
///
/// def insecure_function(arbitrary_user_input: str):
/// os.spawnlp(os.P_NOWAIT, "/bin/mycmd", "mycmd", arbitrary_user_input)
/// ```
///
/// [S605]: https://docs.astral.sh/ruff/rules/start-process-with-a-shell
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "v0.0.262")]
pub(crate) struct StartProcessWithNoShell;
impl Violation for StartProcessWithNoShell {
#[derive_message_formats]
fn message(&self) -> String {
"Starting a process without a shell".to_string()
}
}
/// ## What it does
/// Checks for the starting of a process with a partial executable path.
///
/// ## Why is this bad?
/// Starting a process with a partial executable path can allow attackers to
/// execute an arbitrary executable by adjusting the `PATH` environment variable.
/// Consider using a full path to the executable instead.
///
/// ## Example
/// ```python
/// import subprocess
///
/// subprocess.Popen(["ruff", "check", "file.py"])
/// ```
///
/// Use instead:
/// ```python
/// import subprocess
///
/// subprocess.Popen(["/usr/bin/ruff", "check", "file.py"])
/// ```
///
/// ## References
/// - [Python documentation: `subprocess.Popen()`](https://docs.python.org/3/library/subprocess.html#subprocess.Popen)
/// - [Common Weakness Enumeration: CWE-426](https://cwe.mitre.org/data/definitions/426.html)
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "v0.0.262")]
pub(crate) struct StartProcessWithPartialPath;
impl Violation for StartProcessWithPartialPath {
#[derive_message_formats]
fn message(&self) -> String {
"Starting a process with a partial executable path".to_string()
}
}
/// ## What it does
/// Checks for possible wildcard injections in calls to `subprocess.Popen()`.
///
/// ## Why is this bad?
/// Wildcard injections can lead to unexpected behavior if unintended files are
/// matched by the wildcard. Consider using a more specific path instead.
///
/// ## Example
/// ```python
/// import subprocess
///
/// subprocess.Popen(["chmod", "777", "*.py"], shell=True)
/// ```
///
/// Use instead:
/// ```python
/// import subprocess
///
/// subprocess.Popen(["chmod", "777", "main.py"], shell=True)
/// ```
///
/// ## References
/// - [Common Weakness Enumeration: CWE-78](https://cwe.mitre.org/data/definitions/78.html)
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "v0.0.271")]
pub(crate) struct UnixCommandWildcardInjection;
impl Violation for UnixCommandWildcardInjection {
#[derive_message_formats]
fn message(&self) -> String {
"Possible wildcard injection in call due to `*` usage".to_string()
}
}
/// Check if an expression is a trusted input for subprocess.run.
/// We assume that any str, list[str] or tuple[str] literal can be trusted.
fn is_trusted_input(arg: &Expr) -> bool {
match arg {
Expr::StringLiteral(_) => true,
Expr::List(ast::ExprList { elts, .. }) | Expr::Tuple(ast::ExprTuple { elts, .. }) => {
elts.iter().all(|elt| matches!(elt, Expr::StringLiteral(_)))
}
Expr::Named(named) => is_trusted_input(&named.value),
_ => false,
}
}
/// S602, S603, S604, S605, S606, S607, S609
pub(crate) fn shell_injection(checker: &Checker, call: &ast::ExprCall) {
let call_kind = get_call_kind(&call.func, checker.semantic());
let shell_keyword = find_shell_keyword(&call.arguments, checker.semantic());
if matches!(call_kind, Some(CallKind::Subprocess)) {
if let Some(arg) = call.arguments.args.first() {
match shell_keyword {
// S602
Some(ShellKeyword {
truthiness: truthiness @ (Truthiness::True | Truthiness::Truthy),
}) => {
checker.report_diagnostic_if_enabled(
SubprocessPopenWithShellEqualsTrue {
safety: Safety::from(arg),
is_exact: matches!(truthiness, Truthiness::True),
},
call.func.range(),
);
}
// S603
_ => {
if !is_trusted_input(arg) {
checker.report_diagnostic_if_enabled(
SubprocessWithoutShellEqualsTrue,
call.func.range(),
);
}
}
}
}
} else if let Some(ShellKeyword {
truthiness: truthiness @ (Truthiness::True | Truthiness::Truthy),
}) = shell_keyword
{
// S604
checker.report_diagnostic_if_enabled(
CallWithShellEqualsTrue {
is_exact: matches!(truthiness, Truthiness::True),
},
call.func.range(),
);
}
// S605
if checker.is_rule_enabled(Rule::StartProcessWithAShell) {
if matches!(call_kind, Some(CallKind::Shell)) {
if let Some(arg) = call.arguments.args.first() {
checker.report_diagnostic(
StartProcessWithAShell {
safety: Safety::from(arg),
},
call.func.range(),
);
}
}
}
// S606
if checker.is_rule_enabled(Rule::StartProcessWithNoShell) {
if matches!(call_kind, Some(CallKind::NoShell)) {
checker.report_diagnostic(StartProcessWithNoShell, call.func.range());
}
}
// S607
if checker.is_rule_enabled(Rule::StartProcessWithPartialPath) {
if call_kind.is_some() {
if let Some(arg) = call.arguments.args.first() {
if is_partial_path(arg) {
checker.report_diagnostic(StartProcessWithPartialPath, arg.range());
}
}
}
}
// S609
if checker.is_rule_enabled(Rule::UnixCommandWildcardInjection) {
if matches!(call_kind, Some(CallKind::Shell))
|| matches!(
(call_kind, shell_keyword),
(
Some(CallKind::Subprocess),
Some(ShellKeyword {
truthiness: Truthiness::True | Truthiness::Truthy,
})
)
)
{
if let Some(arg) = call.arguments.args.first() {
if is_wildcard_command(arg) {
checker.report_diagnostic(UnixCommandWildcardInjection, arg.range());
}
}
}
}
}
#[derive(Copy, Clone, Debug)]
enum CallKind {
Subprocess,
Shell,
NoShell,
}
/// Return the [`CallKind`] of the given function call.
fn get_call_kind(func: &Expr, semantic: &SemanticModel) -> Option<CallKind> {
semantic
.resolve_qualified_name(func)
.and_then(|qualified_name| match qualified_name.segments() {
&[module, submodule] => match module {
"os" => match submodule {
"execl" | "execle" | "execlp" | "execlpe" | "execv" | "execve" | "execvp"
| "execvpe" | "spawnl" | "spawnle" | "spawnlp" | "spawnlpe" | "spawnv"
| "spawnve" | "spawnvp" | "spawnvpe" | "startfile" => Some(CallKind::NoShell),
"system" | "popen" | "popen2" | "popen3" | "popen4" => Some(CallKind::Shell),
_ => None,
},
"subprocess" => match submodule {
"Popen" | "call" | "check_call" | "check_output" | "run" => {
Some(CallKind::Subprocess)
}
"getoutput" | "getstatusoutput" => Some(CallKind::Shell),
_ => None,
},
"popen2" => match submodule {
"popen2" | "popen3" | "popen4" | "Popen3" | "Popen4" => Some(CallKind::Shell),
_ => None,
},
"commands" => match submodule {
"getoutput" | "getstatusoutput" => Some(CallKind::Shell),
_ => None,
},
_ => None,
},
_ => None,
})
}
#[derive(Copy, Clone, Debug)]
struct ShellKeyword {
/// Whether the `shell` keyword argument is set and evaluates to `True`.
truthiness: Truthiness,
}
/// Return the `shell` keyword argument to the given function call, if any.
fn find_shell_keyword(arguments: &Arguments, semantic: &SemanticModel) -> Option<ShellKeyword> {
arguments.find_keyword("shell").map(|keyword| ShellKeyword {
truthiness: Truthiness::from_expr(&keyword.value, |id| semantic.has_builtin_binding(id)),
})
}
#[derive(Copy, Clone, Debug, PartialEq, Eq)]
enum Safety {
SeemsSafe,
Unknown,
}
impl From<&Expr> for Safety {
/// Return the [`Safety`] level for the [`Expr`]. This is based on Bandit's definition: string
/// literals are considered okay, but dynamically-computed values are not.
fn from(expr: &Expr) -> Self {
if expr.is_string_literal_expr() {
Self::SeemsSafe
} else {
Self::Unknown
}
}
}
/// Return `true` if the string appears to be a full file path.
///
/// ## Examples
/// ```python
/// import os
///
/// os.system("/bin/ls")
/// os.system("./bin/ls")
/// os.system(["/bin/ls"])
/// os.system(["/bin/ls", "/tmp"])
/// os.system(r"C:\\bin\ls")
fn is_full_path(text: &str) -> bool {
let mut chars = text.chars();
let Some(first_char) = chars.next() else {
return false;
};
// Ex) `/bin/ls`
if first_char == '\\' || first_char == '/' || first_char == '.' {
return true;
}
// Ex) `C:`
if first_char.is_alphabetic() {
if let Some(second_char) = chars.next() {
if second_char == ':' {
return true;
}
}
}
false
}
/// Return `true` if the [`Expr`] is a string literal or list of string literals that starts with a
/// partial path.
fn is_partial_path(expr: &Expr) -> bool {
let string_literal = match expr {
Expr::List(ast::ExprList { elts, .. }) => elts.first().and_then(string_literal),
_ => string_literal(expr),
};
string_literal.is_some_and(|text| !is_full_path(text))
}
/// Return `true` if the [`Expr`] is a wildcard command.
///
/// ## Examples
/// ```python
/// import subprocess
///
/// subprocess.Popen("/bin/chown root: *", shell=True)
/// subprocess.Popen(["/usr/local/bin/rsync", "*", "some_where:"], shell=True)
/// ```
fn is_wildcard_command(expr: &Expr) -> bool {
if let Expr::List(list) = expr {
let mut has_star = false;
let mut has_command = false;
for item in list {
if let Some(text) = string_literal(item) {
has_star |= text.contains('*');
has_command |= text.contains("chown")
|| text.contains("chmod")
|| text.contains("tar")
|| text.contains("rsync");
}
if has_star && has_command {
break;
}
}
has_star && has_command
} else {
let string_literal = string_literal(expr);
string_literal.is_some_and(|text| {
text.contains('*')
&& (text.contains("chown")
|| text.contains("chmod")
|| text.contains("tar")
|| text.contains("rsync"))
})
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/flake8_bandit/rules/snmp_weak_cryptography.rs | crates/ruff_linter/src/rules/flake8_bandit/rules/snmp_weak_cryptography.rs | use ruff_macros::{ViolationMetadata, derive_message_formats};
use ruff_python_ast::{self as ast};
use ruff_text_size::Ranged;
use crate::Violation;
use crate::checkers::ast::Checker;
use crate::preview::is_extended_snmp_api_path_detection_enabled;
/// ## What it does
/// Checks for uses of the SNMPv3 protocol without encryption.
///
/// ## Why is this bad?
/// Unencrypted SNMPv3 communication can be intercepted and read by
/// unauthorized parties. Instead, enable encryption when using SNMPv3.
///
/// ## Example
/// ```python
/// from pysnmp.hlapi import UsmUserData
///
/// UsmUserData("user")
/// ```
///
/// Use instead:
/// ```python
/// from pysnmp.hlapi import UsmUserData
///
/// UsmUserData("user", "authkey", "privkey")
/// ```
///
/// ## References
/// - [Common Weakness Enumeration: CWE-319](https://cwe.mitre.org/data/definitions/319.html)
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "v0.0.218")]
pub(crate) struct SnmpWeakCryptography;
impl Violation for SnmpWeakCryptography {
#[derive_message_formats]
fn message(&self) -> String {
"You should not use SNMPv3 without encryption. `noAuthNoPriv` & `authNoPriv` is insecure."
.to_string()
}
}
/// S509
pub(crate) fn snmp_weak_cryptography(checker: &Checker, call: &ast::ExprCall) {
if call.arguments.len() < 3 {
if checker
.semantic()
.resolve_qualified_name(&call.func)
.is_some_and(|qualified_name| {
if is_extended_snmp_api_path_detection_enabled(checker.settings()) {
matches!(
qualified_name.segments(),
["pysnmp", "hlapi", .., "UsmUserData"]
)
} else {
matches!(
qualified_name.segments(),
["pysnmp", "hlapi", "UsmUserData"]
)
}
})
{
checker.report_diagnostic(SnmpWeakCryptography, call.func.range());
}
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/flake8_bandit/rules/tarfile_unsafe_members.rs | crates/ruff_linter/src/rules/flake8_bandit/rules/tarfile_unsafe_members.rs | use ruff_macros::{ViolationMetadata, derive_message_formats};
use ruff_python_ast::{self as ast};
use ruff_python_semantic::Modules;
use ruff_text_size::Ranged;
use crate::Violation;
use crate::checkers::ast::Checker;
/// ## What it does
/// Checks for uses of `tarfile.extractall`.
///
/// ## Why is this bad?
///
/// Extracting archives from untrusted sources without prior inspection is
/// a security risk, as maliciously crafted archives may contain files that
/// will be written outside of the target directory. For example, the archive
/// could include files with absolute paths (e.g., `/etc/passwd`), or relative
/// paths with parent directory references (e.g., `../etc/passwd`).
///
/// On Python 3.12 and later, use `filter='data'` to prevent the most dangerous
/// security issues (see: [PEP 706]). On earlier versions, set the `members`
/// argument to a trusted subset of the archive's members.
///
/// ## Example
/// ```python
/// import tarfile
/// import tempfile
///
/// tar = tarfile.open(filename)
/// tar.extractall(path=tempfile.mkdtemp())
/// tar.close()
/// ```
///
/// ## References
/// - [Common Weakness Enumeration: CWE-22](https://cwe.mitre.org/data/definitions/22.html)
/// - [Python documentation: `TarFile.extractall`](https://docs.python.org/3/library/tarfile.html#tarfile.TarFile.extractall)
/// - [Python documentation: Extraction filters](https://docs.python.org/3/library/tarfile.html#tarfile-extraction-filter)
///
/// [PEP 706]: https://peps.python.org/pep-0706/#backporting-forward-compatibility
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "v0.2.0")]
pub(crate) struct TarfileUnsafeMembers;
impl Violation for TarfileUnsafeMembers {
#[derive_message_formats]
fn message(&self) -> String {
"Uses of `tarfile.extractall()`".to_string()
}
}
/// S202
pub(crate) fn tarfile_unsafe_members(checker: &Checker, call: &ast::ExprCall) {
if !checker.semantic().seen_module(Modules::TARFILE) {
return;
}
if call
.func
.as_attribute_expr()
.is_none_or(|attr| attr.attr.as_str() != "extractall")
{
return;
}
if call
.arguments
.find_keyword("filter")
.and_then(|keyword| keyword.value.as_string_literal_expr())
.is_some_and(|value| matches!(value.value.to_str(), "data" | "tar"))
{
return;
}
checker.report_diagnostic(TarfileUnsafeMembers, call.func.range());
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/flake8_bandit/rules/snmp_insecure_version.rs | crates/ruff_linter/src/rules/flake8_bandit/rules/snmp_insecure_version.rs | use ruff_macros::{ViolationMetadata, derive_message_formats};
use ruff_python_ast::{self as ast, Expr, Int};
use ruff_text_size::Ranged;
use crate::Violation;
use crate::checkers::ast::Checker;
use crate::preview::is_extended_snmp_api_path_detection_enabled;
/// ## What it does
/// Checks for uses of SNMPv1 or SNMPv2.
///
/// ## Why is this bad?
/// The SNMPv1 and SNMPv2 protocols are considered insecure as they do
/// not support encryption. Instead, prefer SNMPv3, which supports
/// encryption.
///
/// ## Example
/// ```python
/// from pysnmp.hlapi import CommunityData
///
/// CommunityData("public", mpModel=0)
/// ```
///
/// Use instead:
/// ```python
/// from pysnmp.hlapi import CommunityData
///
/// CommunityData("public", mpModel=2)
/// ```
///
/// ## References
/// - [Cybersecurity and Infrastructure Security Agency (CISA): Alert TA17-156A](https://www.cisa.gov/news-events/alerts/2017/06/05/reducing-risk-snmp-abuse)
/// - [Common Weakness Enumeration: CWE-319](https://cwe.mitre.org/data/definitions/319.html)
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "v0.0.218")]
pub(crate) struct SnmpInsecureVersion;
impl Violation for SnmpInsecureVersion {
#[derive_message_formats]
fn message(&self) -> String {
"The use of SNMPv1 and SNMPv2 is insecure. Use SNMPv3 if able.".to_string()
}
}
/// S508
pub(crate) fn snmp_insecure_version(checker: &Checker, call: &ast::ExprCall) {
if checker
.semantic()
.resolve_qualified_name(&call.func)
.is_some_and(|qualified_name| {
if is_extended_snmp_api_path_detection_enabled(checker.settings()) {
matches!(
qualified_name.segments(),
["pysnmp", "hlapi", .., "CommunityData"]
)
} else {
matches!(
qualified_name.segments(),
["pysnmp", "hlapi", "CommunityData"]
)
}
})
{
if let Some(keyword) = call.arguments.find_keyword("mpModel") {
if matches!(
keyword.value,
Expr::NumberLiteral(ast::ExprNumberLiteral {
value: ast::Number::Int(Int::ZERO | Int::ONE),
..
})
) {
checker.report_diagnostic(SnmpInsecureVersion, keyword.range());
}
}
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.