repo stringlengths 6 65 | file_url stringlengths 81 311 | file_path stringlengths 6 227 | content stringlengths 0 32.8k | language stringclasses 1
value | license stringclasses 7
values | commit_sha stringlengths 40 40 | retrieved_at stringdate 2026-01-04 15:31:58 2026-01-04 20:25:31 | truncated bool 2
classes |
|---|---|---|---|---|---|---|---|---|
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/flake8_tidy_imports/matchers.rs | crates/ruff_linter/src/rules/flake8_tidy_imports/matchers.rs | /// Match an imported member against the ban policy. For example, given `from foo import bar`,
/// `foo` is the module and `bar` is the member. Performs an exact match.
#[derive(Debug)]
pub(crate) struct MatchName<'a> {
pub(crate) module: &'a str,
pub(crate) member: &'a str,
}
impl MatchName<'_> {
fn is_match(&self, banned_module: &str) -> bool {
// Ex) Match banned `foo.bar` to import `foo.bar`, without allocating, assuming that
// `module` is `foo`, `member` is `bar`, and `banned_module` is `foo.bar`.
banned_module
.strip_prefix(self.module)
.and_then(|banned_module| banned_module.strip_prefix('.'))
.and_then(|banned_module| banned_module.strip_prefix(self.member))
.is_some_and(str::is_empty)
}
}
/// Match an imported module against the ban policy. For example, given `import foo.bar`,
/// `foo.bar` is the module. Matches against the module name or any of its parents.
#[derive(Debug)]
pub(crate) struct MatchNameOrParent<'a> {
pub(crate) module: &'a str,
}
impl MatchNameOrParent<'_> {
fn is_match(&self, banned_module: &str) -> bool {
// Ex) Match banned `foo` to import `foo`.
if self.module == banned_module {
return true;
}
// Ex) Match banned `foo` to import `foo.bar`.
if self
.module
.strip_prefix(banned_module)
.is_some_and(|suffix| suffix.starts_with('.'))
{
return true;
}
false
}
}
#[derive(Debug)]
pub(crate) enum NameMatchPolicy<'a> {
/// Only match an exact module name (e.g., given `import foo.bar`, only match `foo.bar`).
MatchName(MatchName<'a>),
/// Match an exact module name or any of its parents (e.g., given `import foo.bar`, match
/// `foo.bar` or `foo`).
MatchNameOrParent(MatchNameOrParent<'a>),
}
impl NameMatchPolicy<'_> {
pub(crate) fn find<'a>(&self, banned_modules: impl Iterator<Item = &'a str>) -> Option<String> {
for banned_module in banned_modules {
match self {
NameMatchPolicy::MatchName(matcher) => {
if matcher.is_match(banned_module) {
return Some(banned_module.to_string());
}
}
NameMatchPolicy::MatchNameOrParent(matcher) => {
if matcher.is_match(banned_module) {
return Some(banned_module.to_string());
}
}
}
}
None
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/flake8_tidy_imports/mod.rs | crates/ruff_linter/src/rules/flake8_tidy_imports/mod.rs | //! Rules from [flake8-tidy-imports](https://pypi.org/project/flake8-tidy-imports/).
pub(crate) mod matchers;
pub(crate) mod rules;
pub mod settings;
#[cfg(test)]
mod tests {
use std::path::Path;
use anyhow::Result;
use rustc_hash::FxHashMap;
use crate::assert_diagnostics;
use crate::registry::Rule;
use crate::rules::flake8_tidy_imports;
use crate::rules::flake8_tidy_imports::settings::{ApiBan, Strictness};
use crate::settings::LinterSettings;
use crate::test::test_path;
#[test]
fn banned_api() -> Result<()> {
let diagnostics = test_path(
Path::new("flake8_tidy_imports/TID251.py"),
&LinterSettings {
flake8_tidy_imports: flake8_tidy_imports::settings::Settings {
banned_api: FxHashMap::from_iter([
(
"cgi".to_string(),
ApiBan {
msg: "The cgi module is deprecated.".to_string(),
},
),
(
"typing.TypedDict".to_string(),
ApiBan {
msg: "Use typing_extensions.TypedDict instead.".to_string(),
},
),
]),
..Default::default()
},
..LinterSettings::for_rules(vec![Rule::BannedApi])
},
)?;
assert_diagnostics!(diagnostics);
Ok(())
}
#[test]
fn banned_api_package() -> Result<()> {
let diagnostics = test_path(
Path::new("flake8_tidy_imports/TID/my_package/sublib/api/application.py"),
&LinterSettings {
flake8_tidy_imports: flake8_tidy_imports::settings::Settings {
banned_api: FxHashMap::from_iter([
(
"attrs".to_string(),
ApiBan {
msg: "The attrs module is deprecated.".to_string(),
},
),
(
"my_package.sublib.protocol".to_string(),
ApiBan {
msg: "The protocol module is deprecated.".to_string(),
},
),
]),
..Default::default()
},
namespace_packages: vec![Path::new("my_package").to_path_buf()],
..LinterSettings::for_rules(vec![Rule::BannedApi])
},
)?;
assert_diagnostics!(diagnostics);
Ok(())
}
#[test]
fn ban_parent_imports() -> Result<()> {
let diagnostics = test_path(
Path::new("flake8_tidy_imports/TID252.py"),
&LinterSettings {
flake8_tidy_imports: flake8_tidy_imports::settings::Settings {
ban_relative_imports: Strictness::Parents,
..Default::default()
},
..LinterSettings::for_rules(vec![Rule::RelativeImports])
},
)?;
assert_diagnostics!(diagnostics);
Ok(())
}
#[test]
fn ban_all_imports() -> Result<()> {
let diagnostics = test_path(
Path::new("flake8_tidy_imports/TID252.py"),
&LinterSettings {
flake8_tidy_imports: flake8_tidy_imports::settings::Settings {
ban_relative_imports: Strictness::All,
..Default::default()
},
..LinterSettings::for_rules(vec![Rule::RelativeImports])
},
)?;
assert_diagnostics!(diagnostics);
Ok(())
}
#[test]
fn ban_parent_imports_package() -> Result<()> {
let diagnostics = test_path(
Path::new("flake8_tidy_imports/TID/my_package/sublib/api/application.py"),
&LinterSettings {
flake8_tidy_imports: flake8_tidy_imports::settings::Settings {
ban_relative_imports: Strictness::Parents,
..Default::default()
},
namespace_packages: vec![Path::new("my_package").to_path_buf()],
..LinterSettings::for_rules(vec![Rule::RelativeImports])
},
)?;
assert_diagnostics!(diagnostics);
Ok(())
}
#[test]
fn banned_module_level_imports() -> Result<()> {
let diagnostics = test_path(
Path::new("flake8_tidy_imports/TID253.py"),
&LinterSettings {
flake8_tidy_imports: flake8_tidy_imports::settings::Settings {
banned_module_level_imports: vec![
"torch".to_string(),
"tensorflow".to_string(),
],
..Default::default()
},
..LinterSettings::for_rules(vec![Rule::BannedModuleLevelImports])
},
)?;
assert_diagnostics!(diagnostics);
Ok(())
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/flake8_tidy_imports/rules/relative_imports.rs | crates/ruff_linter/src/rules/flake8_tidy_imports/rules/relative_imports.rs | use ruff_python_ast::{self as ast, Identifier, Stmt};
use ruff_text_size::{Ranged, TextRange};
use ruff_macros::{ViolationMetadata, derive_message_formats};
use ruff_python_ast::helpers::resolve_imported_module_path;
use ruff_python_codegen::Generator;
use ruff_python_stdlib::identifiers::is_identifier;
use crate::checkers::ast::Checker;
use crate::{Edit, Fix, FixAvailability, Violation};
use crate::rules::flake8_tidy_imports::settings::Strictness;
/// ## What it does
/// Checks for relative imports.
///
/// ## Why is this bad?
/// Absolute imports, or relative imports from siblings, are recommended by [PEP 8]:
///
/// > Absolute imports are recommended, as they are usually more readable and tend to be better behaved...
/// > ```python
/// > import mypkg.sibling
/// > from mypkg import sibling
/// > from mypkg.sibling import example
/// > ```
/// > However, explicit relative imports are an acceptable alternative to absolute imports,
/// > especially when dealing with complex package layouts where using absolute imports would be
/// > unnecessarily verbose:
/// > ```python
/// > from . import sibling
/// > from .sibling import example
/// > ```
///
/// ## Example
/// ```python
/// from .. import foo
/// ```
///
/// Use instead:
/// ```python
/// from mypkg import foo
/// ```
///
/// ## Options
/// - `lint.flake8-tidy-imports.ban-relative-imports`
///
/// [PEP 8]: https://peps.python.org/pep-0008/#imports
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "v0.0.169")]
pub(crate) struct RelativeImports {
strictness: Strictness,
}
impl Violation for RelativeImports {
const FIX_AVAILABILITY: FixAvailability = FixAvailability::Sometimes;
#[derive_message_formats]
fn message(&self) -> String {
match self.strictness {
Strictness::Parents => {
"Prefer absolute imports over relative imports from parent modules".to_string()
}
Strictness::All => "Prefer absolute imports over relative imports".to_string(),
}
}
fn fix_title(&self) -> Option<String> {
let RelativeImports { strictness } = self;
Some(match strictness {
Strictness::Parents => {
"Replace relative imports from parent modules with absolute imports".to_string()
}
Strictness::All => "Replace relative imports with absolute imports".to_string(),
})
}
}
fn fix_banned_relative_import(
stmt: &Stmt,
level: u32,
module: Option<&str>,
module_path: Option<&[String]>,
generator: Generator,
) -> Option<Fix> {
// Only fix is the module path is known.
let module_path = resolve_imported_module_path(level, module, module_path)?;
// Require import to be a valid module:
// https://python.org/dev/peps/pep-0008/#package-and-module-names
if !module_path.split('.').all(is_identifier) {
return None;
}
let Stmt::ImportFrom(ast::StmtImportFrom { names, .. }) = stmt else {
panic!("Expected Stmt::ImportFrom");
};
let node = ast::StmtImportFrom {
module: Some(Identifier::new(
module_path.to_string(),
TextRange::default(),
)),
names: names.clone(),
level: 0,
range: TextRange::default(),
node_index: ruff_python_ast::AtomicNodeIndex::NONE,
};
let content = generator.stmt(&node.into());
Some(Fix::unsafe_edit(Edit::range_replacement(
content,
stmt.range(),
)))
}
/// TID252
pub(crate) fn banned_relative_import(
checker: &Checker,
stmt: &Stmt,
level: u32,
module: Option<&str>,
module_path: Option<&[String]>,
strictness: Strictness,
) {
let strictness_level = match strictness {
Strictness::All => 0,
Strictness::Parents => 1,
};
if level > strictness_level {
let mut diagnostic =
checker.report_diagnostic(RelativeImports { strictness }, stmt.range());
if let Some(fix) =
fix_banned_relative_import(stmt, level, module, module_path, checker.generator())
{
diagnostic.set_fix(fix);
}
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/flake8_tidy_imports/rules/banned_module_level_imports.rs | crates/ruff_linter/src/rules/flake8_tidy_imports/rules/banned_module_level_imports.rs | use ruff_macros::{ViolationMetadata, derive_message_formats};
use ruff_python_ast::helpers::resolve_imported_module_path;
use ruff_python_ast::{Alias, AnyNodeRef, Stmt, StmtImport, StmtImportFrom};
use ruff_text_size::Ranged;
use std::borrow::Cow;
use crate::Violation;
use crate::checkers::ast::Checker;
use crate::rules::flake8_tidy_imports::matchers::{MatchName, MatchNameOrParent, NameMatchPolicy};
/// ## What it does
/// Checks for module-level imports that should instead be imported lazily
/// (e.g., within a function definition, or an `if TYPE_CHECKING:` block, or
/// some other nested context).
///
/// ## Why is this bad?
/// Some modules are expensive to import. For example, importing `torch` or
/// `tensorflow` can introduce a noticeable delay in the startup time of a
/// Python program.
///
/// In such cases, you may want to enforce that the module is imported lazily
/// as needed, rather than at the top of the file. This could involve inlining
/// the import into the function that uses it, rather than importing it
/// unconditionally, to ensure that the module is only imported when necessary.
///
/// ## Example
/// ```python
/// import tensorflow as tf
///
///
/// def show_version():
/// print(tf.__version__)
/// ```
///
/// Use instead:
/// ```python
/// def show_version():
/// import tensorflow as tf
///
/// print(tf.__version__)
/// ```
///
/// ## Options
/// - `lint.flake8-tidy-imports.banned-module-level-imports`
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "v0.0.285")]
pub(crate) struct BannedModuleLevelImports {
name: String,
}
impl Violation for BannedModuleLevelImports {
#[derive_message_formats]
fn message(&self) -> String {
let BannedModuleLevelImports { name } = self;
format!("`{name}` is banned at the module level")
}
}
/// TID253
pub(crate) fn banned_module_level_imports(checker: &Checker, stmt: &Stmt) {
if !checker.semantic().at_top_level() {
return;
}
for (policy, node) in &BannedModuleImportPolicies::new(stmt, checker) {
if let Some(banned_module) = policy.find(
checker
.settings()
.flake8_tidy_imports
.banned_module_level_imports(),
) {
checker.report_diagnostic(
BannedModuleLevelImports {
name: banned_module,
},
node.range(),
);
}
}
}
pub(crate) enum BannedModuleImportPolicies<'a> {
Import(&'a StmtImport),
ImportFrom {
module: Option<Cow<'a, str>>,
node: &'a StmtImportFrom,
},
NonImport,
}
impl<'a> BannedModuleImportPolicies<'a> {
pub(crate) fn new(stmt: &'a Stmt, checker: &Checker) -> Self {
match stmt {
Stmt::Import(import) => Self::Import(import),
Stmt::ImportFrom(import @ StmtImportFrom { module, level, .. }) => {
let module = resolve_imported_module_path(
*level,
module.as_deref(),
checker.module.qualified_name(),
);
Self::ImportFrom {
module,
node: import,
}
}
_ => Self::NonImport,
}
}
}
impl<'a> IntoIterator for &'a BannedModuleImportPolicies<'a> {
type Item = <Self::IntoIter as Iterator>::Item;
type IntoIter = BannedModuleImportPoliciesIter<'a>;
fn into_iter(self) -> Self::IntoIter {
match self {
BannedModuleImportPolicies::Import(import) => {
BannedModuleImportPoliciesIter::Import(import.names.iter())
}
BannedModuleImportPolicies::ImportFrom { module, node } => {
BannedModuleImportPoliciesIter::ImportFrom {
module: module.as_deref(),
names: node.names.iter(),
import: Some(node),
}
}
BannedModuleImportPolicies::NonImport => BannedModuleImportPoliciesIter::NonImport,
}
}
}
pub(crate) enum BannedModuleImportPoliciesIter<'a> {
Import(std::slice::Iter<'a, Alias>),
ImportFrom {
module: Option<&'a str>,
names: std::slice::Iter<'a, Alias>,
import: Option<&'a StmtImportFrom>,
},
NonImport,
}
impl<'a> Iterator for BannedModuleImportPoliciesIter<'a> {
type Item = (NameMatchPolicy<'a>, AnyNodeRef<'a>);
fn next(&mut self) -> Option<Self::Item> {
match self {
Self::Import(names) => {
let name = names.next()?;
Some((
NameMatchPolicy::MatchNameOrParent(MatchNameOrParent { module: &name.name }),
name.into(),
))
}
Self::ImportFrom {
module,
import,
names,
} => {
let module = module.as_ref()?;
if let Some(import) = import.take() {
return Some((
NameMatchPolicy::MatchNameOrParent(MatchNameOrParent { module }),
import.into(),
));
}
loop {
let alias = names.next()?;
if &alias.name == "*" {
continue;
}
break Some((
NameMatchPolicy::MatchName(MatchName {
module,
member: &alias.name,
}),
alias.into(),
));
}
}
Self::NonImport => None,
}
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/flake8_tidy_imports/rules/mod.rs | crates/ruff_linter/src/rules/flake8_tidy_imports/rules/mod.rs | pub(crate) use banned_api::*;
pub(crate) use banned_module_level_imports::*;
pub(crate) use relative_imports::*;
mod banned_api;
mod banned_module_level_imports;
mod relative_imports;
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/flake8_tidy_imports/rules/banned_api.rs | crates/ruff_linter/src/rules/flake8_tidy_imports/rules/banned_api.rs | use ruff_python_ast::Expr;
use ruff_macros::{ViolationMetadata, derive_message_formats};
use ruff_python_ast::name::QualifiedName;
use ruff_text_size::Ranged;
use crate::Violation;
use crate::checkers::ast::Checker;
use crate::rules::flake8_tidy_imports::matchers::NameMatchPolicy;
/// ## What it does
/// Checks for banned imports.
///
/// ## Why is this bad?
/// Projects may want to ensure that specific modules or module members are
/// not imported or accessed.
///
/// Security or other company policies may be a reason to impose
/// restrictions on importing external Python libraries. In some cases,
/// projects may adopt conventions around the use of certain modules or
/// module members that are not enforceable by the language itself.
///
/// This rule enforces certain import conventions project-wide automatically.
///
/// ## Options
/// - `lint.flake8-tidy-imports.banned-api`
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "v0.0.201")]
pub(crate) struct BannedApi {
name: String,
message: String,
}
impl Violation for BannedApi {
#[derive_message_formats]
fn message(&self) -> String {
let BannedApi { name, message } = self;
format!("`{name}` is banned: {message}")
}
}
/// TID251
pub(crate) fn banned_api<T: Ranged>(checker: &Checker, policy: &NameMatchPolicy, node: &T) {
let banned_api = &checker.settings().flake8_tidy_imports.banned_api;
if let Some(banned_module) = policy.find(banned_api.keys().map(AsRef::as_ref)) {
if let Some(reason) = banned_api.get(&banned_module) {
checker.report_diagnostic(
BannedApi {
name: banned_module,
message: reason.msg.clone(),
},
node.range(),
);
}
}
}
/// TID251
pub(crate) fn banned_attribute_access(checker: &Checker, expr: &Expr) {
let banned_api = &checker.settings().flake8_tidy_imports.banned_api;
if banned_api.is_empty() {
return;
}
if let Some((banned_path, ban)) =
checker
.semantic()
.resolve_qualified_name(expr)
.and_then(|qualified_name| {
banned_api.iter().find(|(banned_path, ..)| {
qualified_name == QualifiedName::from_dotted_name(banned_path)
})
})
{
checker.report_diagnostic(
BannedApi {
name: banned_path.clone(),
message: ban.msg.clone(),
},
expr.range(),
);
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/flake8_type_checking/settings.rs | crates/ruff_linter/src/rules/flake8_type_checking/settings.rs | //! Settings for the `flake8-type-checking` plugin.
use crate::display_settings;
use ruff_macros::CacheKey;
use std::fmt::{Display, Formatter};
#[derive(Debug, Clone, CacheKey)]
pub struct Settings {
pub strict: bool,
pub exempt_modules: Vec<String>,
pub runtime_required_base_classes: Vec<String>,
pub runtime_required_decorators: Vec<String>,
pub quote_annotations: bool,
}
impl Default for Settings {
fn default() -> Self {
Self {
strict: false,
exempt_modules: vec!["typing".to_string(), "typing_extensions".to_string()],
runtime_required_base_classes: vec![],
runtime_required_decorators: vec![],
quote_annotations: false,
}
}
}
impl Display for Settings {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
display_settings! {
formatter = f,
namespace = "linter.flake8_type_checking",
fields = [
self.strict,
self.exempt_modules | array,
self.runtime_required_base_classes | array,
self.runtime_required_decorators | array,
self.quote_annotations
]
}
Ok(())
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/flake8_type_checking/imports.rs | crates/ruff_linter/src/rules/flake8_type_checking/imports.rs | use ruff_python_semantic::{AnyImport, Binding, ResolvedReferenceId};
use ruff_text_size::{Ranged, TextRange};
/// An import with its surrounding context.
pub(crate) struct ImportBinding<'a> {
/// The qualified name of the import (e.g., `typing.List` for `from typing import List`).
pub(crate) import: AnyImport<'a, 'a>,
/// The binding for the imported symbol.
pub(crate) binding: &'a Binding<'a>,
/// The first reference to the imported symbol.
pub(crate) reference_id: ResolvedReferenceId,
/// The trimmed range of the import (e.g., `List` in `from typing import List`).
pub(crate) range: TextRange,
/// The range of the import's parent statement.
pub(crate) parent_range: Option<TextRange>,
/// Whether the binding needs `from __future__ import annotations` to be imported.
pub(crate) needs_future_import: bool,
}
impl Ranged for ImportBinding<'_> {
fn range(&self) -> TextRange {
self.range
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/flake8_type_checking/helpers.rs | crates/ruff_linter/src/rules/flake8_type_checking/helpers.rs | use std::cmp::Reverse;
use ruff_python_ast::helpers::{map_callable, map_subscript};
use ruff_python_ast::name::QualifiedName;
use ruff_python_ast::str::Quote;
use ruff_python_ast::visitor::transformer::{Transformer, walk_expr};
use ruff_python_ast::{self as ast, Decorator, Expr, StringLiteralFlags};
use ruff_python_codegen::{Generator, Stylist};
use ruff_python_parser::typing::parse_type_annotation;
use ruff_python_semantic::{
Binding, BindingKind, Modules, NodeId, ScopeKind, SemanticModel, analyze,
};
use ruff_text_size::{Ranged, TextRange};
use crate::Edit;
use crate::Locator;
use crate::settings::LinterSettings;
/// Represents the kind of an existing or potential typing-only annotation.
///
/// Note that the order of variants is important here. `Runtime` has the highest precedence when
/// calling [`TypingReference::combine`] on two references, followed by `Future`, `Quote`, and
/// `TypingOnly` with the lowest precedence.
#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord)]
pub(crate) enum TypingReference {
/// The reference is in a runtime-evaluated context.
Runtime,
/// The reference is in a runtime-evaluated context, but the
/// `lint.future-annotations` setting is enabled.
///
/// This takes precedence if both quoting and future imports are enabled.
Future,
/// The reference is in a runtime-evaluated context, but the
/// `lint.flake8-type-checking.quote-annotations` setting is enabled.
Quote,
/// The reference is in a typing-only context.
TypingOnly,
}
impl TypingReference {
/// Determine the kind of [`TypingReference`] for all references to a binding.
pub(crate) fn from_references(
binding: &Binding,
semantic: &SemanticModel,
settings: &LinterSettings,
) -> Self {
let references = binding
.references()
.map(|reference_id| semantic.reference(reference_id));
let mut kind = Self::TypingOnly;
for reference in references {
if reference.in_type_checking_block() {
kind = kind.combine(Self::TypingOnly);
continue;
}
// if we're not in a type checking block, we necessarily need to be within a
// type definition to be considered a typing reference
if !reference.in_type_definition() {
return Self::Runtime;
}
if reference.in_typing_only_annotation() || reference.in_string_type_definition() {
kind = kind.combine(Self::TypingOnly);
continue;
}
// prefer `from __future__ import annotations` to quoting
if settings.future_annotations
&& !reference.in_typing_only_annotation()
&& reference.in_runtime_evaluated_annotation()
{
kind = kind.combine(Self::Future);
continue;
}
if settings.flake8_type_checking.quote_annotations
&& reference.in_runtime_evaluated_annotation()
{
kind = kind.combine(Self::Quote);
continue;
}
return Self::Runtime;
}
kind
}
/// Logically combine two `TypingReference`s into one.
///
/// `TypingReference::Runtime` has the highest precedence, followed by
/// `TypingReference::Future`, `TypingReference::Quote`, and then `TypingReference::TypingOnly`.
fn combine(self, other: TypingReference) -> TypingReference {
self.min(other)
}
fn is_runtime(self) -> bool {
matches!(self, Self::Runtime)
}
}
/// Returns `true` if the [`Binding`] represents a runtime-required import.
pub(crate) fn is_valid_runtime_import(
binding: &Binding,
semantic: &SemanticModel,
settings: &LinterSettings,
) -> bool {
if matches!(
binding.kind,
BindingKind::Import(..) | BindingKind::FromImport(..) | BindingKind::SubmoduleImport(..)
) {
binding.context.is_runtime()
&& TypingReference::from_references(binding, semantic, settings).is_runtime()
} else {
false
}
}
/// Returns `true` if a function's parameters should be treated as runtime-required.
pub(crate) fn runtime_required_function(
function_def: &ast::StmtFunctionDef,
decorators: &[String],
semantic: &SemanticModel,
) -> bool {
if runtime_required_decorators(&function_def.decorator_list, decorators, semantic) {
return true;
}
false
}
/// Returns `true` if a class's assignments should be treated as runtime-required.
pub(crate) fn runtime_required_class(
class_def: &ast::StmtClassDef,
base_classes: &[String],
decorators: &[String],
semantic: &SemanticModel,
) -> bool {
if runtime_required_base_class(class_def, base_classes, semantic) {
return true;
}
if runtime_required_decorators(&class_def.decorator_list, decorators, semantic) {
return true;
}
false
}
/// Return `true` if a class is a subclass of a runtime-required base class.
fn runtime_required_base_class(
class_def: &ast::StmtClassDef,
base_classes: &[String],
semantic: &SemanticModel,
) -> bool {
analyze::class::any_qualified_base_class(class_def, semantic, &|qualified_name| {
base_classes
.iter()
.any(|base_class| QualifiedName::from_dotted_name(base_class) == qualified_name)
})
}
fn runtime_required_decorators(
decorator_list: &[Decorator],
decorators: &[String],
semantic: &SemanticModel,
) -> bool {
if decorators.is_empty() {
return false;
}
decorator_list.iter().any(|decorator| {
let expression = map_callable(&decorator.expression);
semantic
// First try to resolve the qualified name normally for cases like:
// ```python
// from mymodule import app
//
// @app.get(...)
// def test(): ...
// ```
.resolve_qualified_name(expression)
// If we can't resolve the name, then try resolving the assignment
// in order to support cases like:
// ```python
// from fastapi import FastAPI
//
// app = FastAPI()
//
// @app.get(...)
// def test(): ...
// ```
.or_else(|| analyze::typing::resolve_assignment(expression, semantic))
.is_some_and(|qualified_name| {
decorators
.iter()
.any(|decorator| QualifiedName::from_dotted_name(decorator) == qualified_name)
})
})
}
/// Returns `true` if an annotation will be inspected at runtime by the `dataclasses` module.
///
/// Specifically, detects whether an annotation is to either `dataclasses.InitVar` or
/// `typing.ClassVar` within a `@dataclass` class definition.
///
/// See: <https://docs.python.org/3/library/dataclasses.html#init-only-variables>
pub(crate) fn is_dataclass_meta_annotation(annotation: &Expr, semantic: &SemanticModel) -> bool {
if !semantic.seen_module(Modules::DATACLASSES) {
return false;
}
// Determine whether the assignment is in a `@dataclass` class definition.
if let ScopeKind::Class(class_def) = semantic.current_scope().kind {
if class_def.decorator_list.iter().any(|decorator| {
semantic
.resolve_qualified_name(map_callable(&decorator.expression))
.is_some_and(|qualified_name| {
matches!(qualified_name.segments(), ["dataclasses", "dataclass"])
})
}) {
// Determine whether the annotation is `typing.ClassVar`, `dataclasses.InitVar`, or `dataclasses.KW_ONLY`.
return semantic
.resolve_qualified_name(map_subscript(annotation))
.is_some_and(|qualified_name| {
matches!(
qualified_name.segments(),
["dataclasses", "InitVar" | "KW_ONLY"]
) || semantic.match_typing_qualified_name(&qualified_name, "ClassVar")
});
}
}
false
}
/// Returns `true` if a function is registered as a `singledispatch` or `singledispatchmethod` interface.
///
/// For example, `fun` below is a `singledispatch` interface:
/// ```python
/// from functools import singledispatch
///
///
/// @singledispatch
/// def fun(arg, verbose=False):
/// ...
/// ```
pub(crate) fn is_singledispatch_interface(
function_def: &ast::StmtFunctionDef,
semantic: &SemanticModel,
) -> bool {
function_def.decorator_list.iter().any(|decorator| {
semantic
.resolve_qualified_name(&decorator.expression)
.is_some_and(|qualified_name| {
matches!(
qualified_name.segments(),
["functools", "singledispatch" | "singledispatchmethod"]
)
})
})
}
/// Returns `true` if a function is registered as a `singledispatch` or `singledispatchmethod` implementation.
///
/// For example, `_` below is a `singledispatch` implementation:
/// ```python
/// from functools import singledispatch
///
///
/// @singledispatch
/// def fun(arg, verbose=False):
/// ...
///
/// @fun.register
/// def _(arg: int, verbose=False):
/// ...
/// ```
pub(crate) fn is_singledispatch_implementation(
function_def: &ast::StmtFunctionDef,
semantic: &SemanticModel,
) -> bool {
function_def.decorator_list.iter().any(|decorator| {
let Expr::Attribute(attribute) = &decorator.expression else {
return false;
};
if attribute.attr.as_str() != "register" {
return false;
}
let Some(id) = semantic.lookup_attribute(attribute.value.as_ref()) else {
return false;
};
let binding = semantic.binding(id);
let Some(function_def) = binding
.kind
.as_function_definition()
.map(|id| &semantic.scopes[*id])
.and_then(|scope| scope.kind.as_function())
else {
return false;
};
is_singledispatch_interface(function_def, semantic)
})
}
/// Wrap a type annotation in quotes.
///
/// This requires more than just wrapping the reference itself in quotes. For example:
/// - When quoting `Series` in `Series[pd.Timestamp]`, we want `"Series[pd.Timestamp]"`.
/// - When quoting `kubernetes` in `kubernetes.SecurityContext`, we want `"kubernetes.SecurityContext"`.
/// - When quoting `Series` in `Series["pd.Timestamp"]`, we want `"Series[pd.Timestamp]"`.
/// - When quoting `Series` in `Series[Literal["pd.Timestamp"]]`, we want `"Series[Literal['pd.Timestamp']]"`.
///
/// In general, when expanding a component of a call chain, we want to quote the entire call chain.
pub(crate) fn quote_annotation(
node_id: NodeId,
semantic: &SemanticModel,
stylist: &Stylist,
locator: &Locator,
flags: StringLiteralFlags,
) -> Edit {
let expr = semantic.expression(node_id).expect("Expression not found");
if let Some(parent_id) = semantic.parent_expression_id(node_id) {
match semantic.expression(parent_id) {
Some(Expr::Subscript(parent)) => {
if expr == parent.value.as_ref() {
// If we're quoting the value of a subscript, we need to quote the entire
// expression. For example, when quoting `DataFrame` in `DataFrame[int]`, we
// should generate `"DataFrame[int]"`.
return quote_annotation(parent_id, semantic, stylist, locator, flags);
}
}
Some(Expr::Attribute(parent)) => {
if expr == parent.value.as_ref() {
// If we're quoting the value of an attribute, we need to quote the entire
// expression. For example, when quoting `DataFrame` in `pd.DataFrame`, we
// should generate `"pd.DataFrame"`.
return quote_annotation(parent_id, semantic, stylist, locator, flags);
}
}
Some(Expr::Call(parent)) => {
if expr == parent.func.as_ref() {
// If we're quoting the function of a call, we need to quote the entire
// expression. For example, when quoting `DataFrame` in `DataFrame()`, we
// should generate `"DataFrame()"`.
return quote_annotation(parent_id, semantic, stylist, locator, flags);
}
}
Some(Expr::BinOp(parent)) => {
if parent.op.is_bit_or() {
// If we're quoting the left or right side of a binary operation, we need to
// quote the entire expression. For example, when quoting `DataFrame` in
// `DataFrame | Series`, we should generate `"DataFrame | Series"`.
return quote_annotation(parent_id, semantic, stylist, locator, flags);
}
}
_ => {}
}
}
quote_type_expression(expr, semantic, stylist, locator, flags)
}
/// Wrap a type expression in quotes.
///
/// This function assumes that the callee already expanded expression components
/// to the minimum acceptable range for quoting, i.e. the parent node may not be
/// a [`Expr::Subscript`], [`Expr::Attribute`], `[Expr::Call]` or `[Expr::BinOp]`.
///
/// In most cases you want to call [`quote_annotation`] instead, which provides
/// that guarantee by expanding the expression before calling into this function.
pub(crate) fn quote_type_expression(
expr: &Expr,
semantic: &SemanticModel,
stylist: &Stylist,
locator: &Locator,
flags: StringLiteralFlags,
) -> Edit {
// Quote the entire expression.
let quote_annotator = QuoteAnnotator::new(semantic, stylist, locator, flags);
Edit::range_replacement(quote_annotator.into_annotation(expr), expr.range())
}
/// Filter out any [`Edit`]s that are completely contained by any other [`Edit`].
pub(crate) fn filter_contained(edits: Vec<Edit>) -> Vec<Edit> {
let mut edits = edits;
// Sort such that the largest edits are prioritized.
edits.sort_unstable_by_key(|edit| (edit.start(), Reverse(edit.end())));
// Remove any edits that are completely contained by another edit.
let mut filtered: Vec<Edit> = Vec::with_capacity(edits.len());
for edit in edits {
if !filtered
.iter()
.any(|filtered_edit| filtered_edit.range().contains_range(edit.range()))
{
filtered.push(edit);
}
}
filtered
}
pub(crate) struct QuoteAnnotator<'a> {
semantic: &'a SemanticModel<'a>,
stylist: &'a Stylist<'a>,
locator: &'a Locator<'a>,
flags: StringLiteralFlags,
}
impl<'a> QuoteAnnotator<'a> {
fn new(
semantic: &'a SemanticModel<'a>,
stylist: &'a Stylist<'a>,
locator: &'a Locator<'a>,
flags: StringLiteralFlags,
) -> Self {
Self {
semantic,
stylist,
locator,
flags,
}
}
fn into_annotation(self, expr: &Expr) -> String {
let mut expr_without_forward_references = expr.clone();
self.visit_expr(&mut expr_without_forward_references);
let generator = Generator::from(self.stylist);
// we first generate the annotation with the inverse quote, so we can
// generate the string literal with the preferred quote
let subgenerator = Generator::new(self.stylist.indentation(), self.stylist.line_ending());
let annotation = subgenerator.expr(&expr_without_forward_references);
generator.expr(&Expr::from(ast::StringLiteral {
range: TextRange::default(),
node_index: ruff_python_ast::AtomicNodeIndex::NONE,
value: annotation.into_boxed_str(),
flags: self.flags,
}))
}
fn visit_annotated_slice(&self, slice: &mut Expr) {
// we only want to walk the first tuple element if it exists,
// anything else should not be transformed
if let Expr::Tuple(ast::ExprTuple { elts, .. }) = slice {
if !elts.is_empty() {
self.visit_expr(&mut elts[0]);
// The outer annotation will use the preferred quote.
// As such, any quotes found in metadata elements inside an `Annotated` slice
// should use the opposite quote to the preferred quote.
for elt in elts.iter_mut().skip(1) {
QuoteRewriter::new(self.stylist).visit_expr(elt);
}
}
}
}
}
impl Transformer for QuoteAnnotator<'_> {
fn visit_expr(&self, expr: &mut Expr) {
match expr {
Expr::Subscript(ast::ExprSubscript { value, slice, .. }) => {
if let Some(qualified_name) = self.semantic.resolve_qualified_name(value) {
if self
.semantic
.match_typing_qualified_name(&qualified_name, "Literal")
{
// The outer annotation will use the preferred quote.
// As such, any quotes found inside a `Literal` slice
// should use the opposite quote to the preferred quote.
QuoteRewriter::new(self.stylist).visit_expr(slice);
} else if self
.semantic
.match_typing_qualified_name(&qualified_name, "Annotated")
{
self.visit_annotated_slice(slice);
} else {
self.visit_expr(slice);
}
}
}
Expr::StringLiteral(literal) => {
// try to parse the forward reference and replace the string
// literal node with the parsed expression, if we fail to
// parse the forward reference, we just keep treating this
// like a regular string literal
if let Ok(annotation) = parse_type_annotation(literal, self.locator.contents()) {
*expr = annotation.expression().clone();
// we need to visit the parsed expression too
// since it may contain forward references itself
self.visit_expr(expr);
}
}
_ => {
walk_expr(self, expr);
}
}
}
}
/// A [`Transformer`] struct that rewrites all strings in an expression
/// to use a specified quotation style
#[derive(Debug)]
struct QuoteRewriter {
preferred_inner_quote: Quote,
}
impl QuoteRewriter {
fn new(stylist: &Stylist) -> Self {
Self {
preferred_inner_quote: stylist.quote().opposite(),
}
}
}
impl Transformer for QuoteRewriter {
fn visit_string_literal(&self, literal: &mut ast::StringLiteral) {
literal.flags = literal.flags.with_quote_style(self.preferred_inner_quote);
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/flake8_type_checking/mod.rs | crates/ruff_linter/src/rules/flake8_type_checking/mod.rs | //! Rules from [flake8-type-checking](https://pypi.org/project/flake8-type-checking/).
pub(crate) mod helpers;
mod imports;
pub(crate) mod rules;
pub mod settings;
#[cfg(test)]
mod tests {
use std::path::Path;
use anyhow::Result;
use itertools::Itertools;
use ruff_python_ast::PythonVersion;
use test_case::test_case;
use crate::registry::{Linter, Rule};
use crate::test::{test_path, test_snippet};
use crate::{assert_diagnostics, settings};
#[test_case(Rule::EmptyTypeCheckingBlock, Path::new("TC005.py"))]
#[test_case(Rule::RuntimeCastValue, Path::new("TC006.py"))]
#[test_case(Rule::RuntimeImportInTypeCheckingBlock, Path::new("TC004_1.py"))]
#[test_case(Rule::RuntimeImportInTypeCheckingBlock, Path::new("TC004_10.py"))]
#[test_case(Rule::RuntimeImportInTypeCheckingBlock, Path::new("TC004_11.py"))]
#[test_case(Rule::RuntimeImportInTypeCheckingBlock, Path::new("TC004_12.py"))]
#[test_case(Rule::RuntimeImportInTypeCheckingBlock, Path::new("TC004_13.py"))]
#[test_case(Rule::RuntimeImportInTypeCheckingBlock, Path::new("TC004_14.pyi"))]
#[test_case(Rule::RuntimeImportInTypeCheckingBlock, Path::new("TC004_15.py"))]
#[test_case(Rule::RuntimeImportInTypeCheckingBlock, Path::new("TC004_16.py"))]
#[test_case(Rule::RuntimeImportInTypeCheckingBlock, Path::new("TC004_17.py"))]
#[test_case(Rule::RuntimeImportInTypeCheckingBlock, Path::new("TC004_2.py"))]
#[test_case(Rule::RuntimeImportInTypeCheckingBlock, Path::new("TC004_3.py"))]
#[test_case(Rule::RuntimeImportInTypeCheckingBlock, Path::new("TC004_4.py"))]
#[test_case(Rule::RuntimeImportInTypeCheckingBlock, Path::new("TC004_5.py"))]
#[test_case(Rule::RuntimeImportInTypeCheckingBlock, Path::new("TC004_6.py"))]
#[test_case(Rule::RuntimeImportInTypeCheckingBlock, Path::new("TC004_7.py"))]
#[test_case(Rule::RuntimeImportInTypeCheckingBlock, Path::new("TC004_8.py"))]
#[test_case(Rule::RuntimeImportInTypeCheckingBlock, Path::new("TC004_9.py"))]
#[test_case(Rule::RuntimeImportInTypeCheckingBlock, Path::new("quote.py"))]
#[test_case(Rule::RuntimeImportInTypeCheckingBlock, Path::new("whitespace.py"))]
#[test_case(Rule::RuntimeStringUnion, Path::new("TC010_1.py"))]
#[test_case(Rule::RuntimeStringUnion, Path::new("TC010_2.py"))]
#[test_case(Rule::TypingOnlyFirstPartyImport, Path::new("TC001.py"))]
#[test_case(Rule::TypingOnlyStandardLibraryImport, Path::new("TC003.py"))]
#[test_case(Rule::TypingOnlyStandardLibraryImport, Path::new("init_var.py"))]
#[test_case(Rule::TypingOnlyStandardLibraryImport, Path::new("kw_only.py"))]
#[test_case(Rule::TypingOnlyStandardLibraryImport, Path::new("snapshot.py"))]
#[test_case(
Rule::TypingOnlyStandardLibraryImport,
Path::new("singledispatchmethod.py")
)]
#[test_case(Rule::TypingOnlyThirdPartyImport, Path::new("TC002.py"))]
#[test_case(Rule::TypingOnlyThirdPartyImport, Path::new("quote.py"))]
#[test_case(Rule::TypingOnlyThirdPartyImport, Path::new("singledispatch.py"))]
#[test_case(Rule::TypingOnlyThirdPartyImport, Path::new("strict.py"))]
#[test_case(Rule::TypingOnlyThirdPartyImport, Path::new("typing_modules_1.py"))]
#[test_case(Rule::TypingOnlyThirdPartyImport, Path::new("typing_modules_2.py"))]
fn rules(rule_code: Rule, path: &Path) -> Result<()> {
let snapshot = format!("{}_{}", rule_code.name(), path.to_string_lossy());
let diagnostics = test_path(
Path::new("flake8_type_checking").join(path).as_path(),
&settings::LinterSettings::for_rule(rule_code),
)?;
assert_diagnostics!(snapshot, diagnostics);
Ok(())
}
#[test_case(&[Rule::TypingOnlyFirstPartyImport], Path::new("TC001.py"))]
#[test_case(&[Rule::TypingOnlyThirdPartyImport], Path::new("TC002.py"))]
#[test_case(&[Rule::TypingOnlyStandardLibraryImport], Path::new("TC003.py"))]
#[test_case(
&[
Rule::TypingOnlyFirstPartyImport,
Rule::TypingOnlyThirdPartyImport,
Rule::TypingOnlyStandardLibraryImport,
],
Path::new("TC001-3_future.py")
)]
#[test_case(&[Rule::TypingOnlyFirstPartyImport], Path::new("TC001_future.py"))]
#[test_case(&[Rule::TypingOnlyFirstPartyImport], Path::new("TC001_future_present.py"))]
fn add_future_import(rules: &[Rule], path: &Path) -> Result<()> {
let name = rules.iter().map(Rule::noqa_code).join("-");
let snapshot = format!("add_future_import__{}_{}", name, path.to_string_lossy());
let diagnostics = test_path(
Path::new("flake8_type_checking").join(path).as_path(),
&settings::LinterSettings {
future_annotations: true,
// also enable quoting annotations to check the interaction. the future import
// should take precedence.
flake8_type_checking: super::settings::Settings {
quote_annotations: true,
..Default::default()
},
..settings::LinterSettings::for_rules(rules.iter().copied())
},
)?;
assert_diagnostics!(snapshot, diagnostics);
Ok(())
}
#[test_case(Rule::TypingOnlyStandardLibraryImport, Path::new("TC003.py"))]
fn add_future_import_dataclass_kw_only_py313(rule: Rule, path: &Path) -> Result<()> {
let snapshot = format!(
"add_future_import_kw_only__{}_{}",
rule.noqa_code(),
path.to_string_lossy()
);
let diagnostics = test_path(
Path::new("flake8_type_checking").join(path).as_path(),
&settings::LinterSettings {
future_annotations: true,
// The issue in #21121 also didn't trigger on Python 3.14
unresolved_target_version: PythonVersion::PY313.into(),
..settings::LinterSettings::for_rule(rule)
},
)?;
assert_diagnostics!(snapshot, diagnostics);
Ok(())
}
// we test these rules as a pair, since they're opposites of one another
// so we want to make sure their fixes are not going around in circles.
#[test_case(Rule::UnquotedTypeAlias, Path::new("TC007.py"))]
#[test_case(Rule::QuotedTypeAlias, Path::new("TC008.py"))]
#[test_case(Rule::QuotedTypeAlias, Path::new("TC008_typing_execution_context.py"))]
fn type_alias_rules(rule_code: Rule, path: &Path) -> Result<()> {
let snapshot = format!("{}_{}", rule_code.name(), path.to_string_lossy());
let diagnostics = test_path(
Path::new("flake8_type_checking").join(path).as_path(),
&settings::LinterSettings::for_rules(vec![
Rule::UnquotedTypeAlias,
Rule::QuotedTypeAlias,
]),
)?;
assert_diagnostics!(snapshot, diagnostics);
Ok(())
}
#[test_case(Rule::QuotedTypeAlias, Path::new("TC008_union_syntax_pre_py310.py"))]
fn type_alias_rules_pre_py310(rule_code: Rule, path: &Path) -> Result<()> {
let snapshot = format!("pre_py310_{}_{}", rule_code.name(), path.to_string_lossy());
let diagnostics = test_path(
Path::new("flake8_type_checking").join(path).as_path(),
&settings::LinterSettings {
unresolved_target_version: PythonVersion::PY39.into(),
..settings::LinterSettings::for_rule(rule_code)
},
)?;
assert_diagnostics!(snapshot, diagnostics);
Ok(())
}
#[test_case(Rule::RuntimeImportInTypeCheckingBlock, Path::new("quote.py"))]
#[test_case(Rule::TypingOnlyThirdPartyImport, Path::new("quote.py"))]
#[test_case(Rule::RuntimeImportInTypeCheckingBlock, Path::new("quote2.py"))]
#[test_case(Rule::TypingOnlyThirdPartyImport, Path::new("quote2.py"))]
#[test_case(Rule::RuntimeImportInTypeCheckingBlock, Path::new("quote3.py"))]
#[test_case(Rule::TypingOnlyThirdPartyImport, Path::new("quote3.py"))]
fn quote(rule_code: Rule, path: &Path) -> Result<()> {
let snapshot = format!("quote_{}_{}", rule_code.name(), path.to_string_lossy());
let diagnostics = test_path(
Path::new("flake8_type_checking").join(path).as_path(),
&settings::LinterSettings {
flake8_type_checking: super::settings::Settings {
quote_annotations: true,
..Default::default()
},
..settings::LinterSettings::for_rule(rule_code)
},
)?;
assert_diagnostics!(snapshot, diagnostics);
Ok(())
}
#[test_case(Rule::TypingOnlyThirdPartyImport, Path::new("strict.py"))]
#[test_case(Rule::TypingOnlyStandardLibraryImport, Path::new("init_var.py"))]
#[test_case(Rule::TypingOnlyStandardLibraryImport, Path::new("kw_only.py"))]
fn strict(rule_code: Rule, path: &Path) -> Result<()> {
let snapshot = format!("strict_{}_{}", rule_code.name(), path.to_string_lossy());
let diagnostics = test_path(
Path::new("flake8_type_checking").join(path).as_path(),
&settings::LinterSettings {
flake8_type_checking: super::settings::Settings {
strict: true,
..Default::default()
},
..settings::LinterSettings::for_rule(rule_code)
},
)?;
assert_diagnostics!(snapshot, diagnostics);
Ok(())
}
#[test_case(Rule::TypingOnlyThirdPartyImport, Path::new("exempt_modules.py"))]
fn exempt_modules(rule_code: Rule, path: &Path) -> Result<()> {
let diagnostics = test_path(
Path::new("flake8_type_checking").join(path).as_path(),
&settings::LinterSettings {
flake8_type_checking: super::settings::Settings {
exempt_modules: vec!["pandas".to_string()],
..Default::default()
},
..settings::LinterSettings::for_rule(rule_code)
},
)?;
assert_diagnostics!(diagnostics);
Ok(())
}
#[test_case(
Rule::TypingOnlyStandardLibraryImport,
Path::new("exempt_type_checking_1.py")
)]
#[test_case(
Rule::TypingOnlyStandardLibraryImport,
Path::new("exempt_type_checking_2.py")
)]
#[test_case(
Rule::TypingOnlyStandardLibraryImport,
Path::new("exempt_type_checking_3.py")
)]
fn exempt_type_checking(rule_code: Rule, path: &Path) -> Result<()> {
let snapshot = format!("{}_{}", rule_code.name(), path.to_string_lossy());
let diagnostics = test_path(
Path::new("flake8_type_checking").join(path).as_path(),
&settings::LinterSettings {
flake8_type_checking: super::settings::Settings {
exempt_modules: vec![],
strict: true,
..Default::default()
},
..settings::LinterSettings::for_rule(rule_code)
},
)?;
assert_diagnostics!(snapshot, diagnostics);
Ok(())
}
#[test_case(
Rule::RuntimeImportInTypeCheckingBlock,
Path::new("runtime_evaluated_base_classes_1.py")
)]
#[test_case(
Rule::TypingOnlyThirdPartyImport,
Path::new("runtime_evaluated_base_classes_2.py")
)]
#[test_case(
Rule::TypingOnlyStandardLibraryImport,
Path::new("runtime_evaluated_base_classes_3.py")
)]
#[test_case(
Rule::TypingOnlyStandardLibraryImport,
Path::new("runtime_evaluated_base_classes_4.py")
)]
#[test_case(
Rule::TypingOnlyThirdPartyImport,
Path::new("runtime_evaluated_base_classes_5.py")
)]
fn runtime_evaluated_base_classes(rule_code: Rule, path: &Path) -> Result<()> {
let snapshot = format!("{}_{}", rule_code.name(), path.to_string_lossy());
let diagnostics = test_path(
Path::new("flake8_type_checking").join(path).as_path(),
&settings::LinterSettings {
flake8_type_checking: super::settings::Settings {
runtime_required_base_classes: vec![
"pydantic.BaseModel".to_string(),
"sqlalchemy.orm.DeclarativeBase".to_string(),
],
..Default::default()
},
..settings::LinterSettings::for_rule(rule_code)
},
)?;
assert_diagnostics!(snapshot, diagnostics);
Ok(())
}
#[test_case(
Rule::RuntimeImportInTypeCheckingBlock,
Path::new("runtime_evaluated_decorators_1.py")
)]
#[test_case(
Rule::TypingOnlyThirdPartyImport,
Path::new("runtime_evaluated_decorators_2.py")
)]
#[test_case(
Rule::TypingOnlyStandardLibraryImport,
Path::new("runtime_evaluated_decorators_3.py")
)]
fn runtime_evaluated_decorators(rule_code: Rule, path: &Path) -> Result<()> {
let snapshot = format!("{}_{}", rule_code.name(), path.to_string_lossy());
let diagnostics = test_path(
Path::new("flake8_type_checking").join(path).as_path(),
&settings::LinterSettings {
flake8_type_checking: super::settings::Settings {
runtime_required_decorators: vec![
"attrs.define".to_string(),
"attrs.frozen".to_string(),
"pydantic.validate_call".to_string(),
],
..Default::default()
},
..settings::LinterSettings::for_rule(rule_code)
},
)?;
assert_diagnostics!(snapshot, diagnostics);
Ok(())
}
#[test_case(Rule::TypingOnlyStandardLibraryImport, Path::new("module/direct.py"))]
#[test_case(Rule::TypingOnlyStandardLibraryImport, Path::new("module/import.py"))]
#[test_case(
Rule::TypingOnlyStandardLibraryImport,
Path::new("module/undefined.py")
)]
fn base_class_same_file(rule_code: Rule, path: &Path) -> Result<()> {
let snapshot = format!("{}_{}", rule_code.name(), path.to_string_lossy());
let diagnostics = test_path(
Path::new("flake8_type_checking").join(path).as_path(),
&settings::LinterSettings {
flake8_type_checking: super::settings::Settings {
runtime_required_base_classes: vec!["module.direct.MyBaseClass".to_string()],
..Default::default()
},
..settings::LinterSettings::for_rule(rule_code)
},
)?;
assert_diagnostics!(snapshot, diagnostics);
Ok(())
}
#[test_case(Rule::RuntimeImportInTypeCheckingBlock, Path::new("module/app.py"))]
#[test_case(Rule::TypingOnlyStandardLibraryImport, Path::new("module/routes.py"))]
fn decorator_same_file(rule_code: Rule, path: &Path) -> Result<()> {
let snapshot = format!("{}_{}", rule_code.name(), path.to_string_lossy());
let diagnostics = test_path(
Path::new("flake8_type_checking").join(path).as_path(),
&settings::LinterSettings {
flake8_type_checking: super::settings::Settings {
runtime_required_decorators: vec![
"fastapi.FastAPI.get".to_string(),
"fastapi.FastAPI.put".to_string(),
"module.app.AppContainer.app.get".to_string(),
"module.app.AppContainer.app.put".to_string(),
"module.app.app.get".to_string(),
"module.app.app.put".to_string(),
"module.app.app_container.app.get".to_string(),
"module.app.app_container.app.put".to_string(),
],
..Default::default()
},
..settings::LinterSettings::for_rule(rule_code)
},
)?;
assert_diagnostics!(snapshot, diagnostics);
Ok(())
}
#[test_case(
r"
from __future__ import annotations
import pandas as pd
def f(x: pd.DataFrame):
pass
",
"no_typing_import"
)]
#[test_case(
r"
from __future__ import annotations
from typing import TYPE_CHECKING
import pandas as pd
def f(x: pd.DataFrame):
pass
",
"typing_import_before_package_import"
)]
#[test_case(
r"
from __future__ import annotations
import pandas as pd
from typing import TYPE_CHECKING
def f(x: pd.DataFrame):
pass
",
"typing_import_after_package_import"
)]
#[test_case(
r"
from __future__ import annotations
import pandas as pd
def f(x: pd.DataFrame):
pass
from typing import TYPE_CHECKING
",
"typing_import_after_usage"
)]
#[test_case(
r"
from __future__ import annotations
from typing import TYPE_CHECKING
import pandas as pd
if TYPE_CHECKING:
import os
def f(x: pd.DataFrame):
pass
",
"type_checking_block_own_line"
)]
#[test_case(
r"
from __future__ import annotations
from typing import TYPE_CHECKING
import pandas as pd
if TYPE_CHECKING: import os
def f(x: pd.DataFrame):
pass
",
"type_checking_block_inline"
)]
#[test_case(
r"
from __future__ import annotations
from typing import TYPE_CHECKING
import pandas as pd
if TYPE_CHECKING:
# This is a comment.
import os
def f(x: pd.DataFrame):
pass
",
"type_checking_block_comment"
)]
#[test_case(
r"
from __future__ import annotations
from typing import TYPE_CHECKING
import pandas as pd
def f(x: pd.DataFrame):
pass
if TYPE_CHECKING:
import os
",
"type_checking_block_after_usage"
)]
#[test_case(
r"
from __future__ import annotations
from pandas import (
DataFrame, # DataFrame
Series, # Series
)
def f(x: DataFrame):
pass
",
"import_from"
)]
#[test_case(
r"
from __future__ import annotations
from typing import TYPE_CHECKING
from pandas import (
DataFrame, # DataFrame
Series, # Series
)
if TYPE_CHECKING:
import os
def f(x: DataFrame):
pass
",
"import_from_type_checking_block"
)]
#[test_case(
r"
from __future__ import annotations
from typing import TYPE_CHECKING
from pandas import (
DataFrame, # DataFrame
Series, # Series
)
def f(x: DataFrame, y: Series):
pass
",
"multiple_members"
)]
#[test_case(
r"
from __future__ import annotations
from typing import TYPE_CHECKING
import os, sys
def f(x: os, y: sys):
pass
",
"multiple_modules_same_type"
)]
#[test_case(
r"
from __future__ import annotations
from typing import TYPE_CHECKING
import os, pandas
def f(x: os, y: pandas):
pass
",
"multiple_modules_different_types"
)]
#[test_case(
r"
from __future__ import annotations
from typing import TYPE_CHECKING, TypeAlias
if TYPE_CHECKING:
from foo import Foo # TC004
a: TypeAlias = Foo | None # OK
",
"tc004_precedence_over_tc007"
)]
#[test_case(
r"
from __future__ import annotations
from typing import TypeAlias
a: TypeAlias = 'int | None' # TC008
b: TypeAlias = 'int' | None # TC010
",
"tc010_precedence_over_tc008"
)]
fn contents(contents: &str, snapshot: &str) {
let diagnostics = test_snippet(
contents,
&settings::LinterSettings::for_rules(Linter::Flake8TypeChecking.rules()),
);
assert_diagnostics!(snapshot, diagnostics);
}
#[test_case(
r"
from __future__ import annotations
TYPE_CHECKING = False
if TYPE_CHECKING:
from types import TracebackType
def foo(tb: TracebackType): ...
",
"github_issue_15681_regression_test"
)]
#[test_case(
r"
from __future__ import annotations
import pathlib # TC003
TYPE_CHECKING = False
if TYPE_CHECKING:
from types import TracebackType
def foo(tb: TracebackType) -> pathlib.Path: ...
",
"github_issue_15681_fix_test"
)]
#[test_case(
r"
from __future__ import annotations
TYPE_CHECKING = False
if TYPE_CHECKING:
from typing import Any, Literal, Never, Self
else:
def __getattr__(name: str):
pass
__all__ = ['TYPE_CHECKING', 'Any', 'Literal', 'Never', 'Self']
",
"github_issue_16045"
)]
fn contents_preview(contents: &str, snapshot: &str) {
let diagnostics = test_snippet(
contents,
&settings::LinterSettings {
preview: settings::types::PreviewMode::Enabled,
..settings::LinterSettings::for_rules(Linter::Flake8TypeChecking.rules())
},
);
assert_diagnostics!(snapshot, diagnostics);
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/flake8_type_checking/rules/runtime_import_in_type_checking_block.rs | crates/ruff_linter/src/rules/flake8_type_checking/rules/runtime_import_in_type_checking_block.rs | use std::borrow::Cow;
use anyhow::Result;
use rustc_hash::FxHashMap;
use ruff_macros::{ViolationMetadata, derive_message_formats};
use ruff_python_semantic::{Imported, NodeId, Scope, ScopeId};
use ruff_text_size::Ranged;
use crate::checkers::ast::Checker;
use crate::codes::Rule;
use crate::fix;
use crate::importer::ImportedMembers;
use crate::rules::flake8_type_checking::helpers::{filter_contained, quote_annotation};
use crate::rules::flake8_type_checking::imports::ImportBinding;
use crate::{Fix, FixAvailability, Violation};
/// ## What it does
/// Checks for imports that are required at runtime but are only defined in
/// type-checking blocks.
///
/// ## Why is this bad?
/// The type-checking block is not executed at runtime, so if the only definition
/// of a symbol is in a type-checking block, it will not be available at runtime.
///
/// If [`lint.flake8-type-checking.quote-annotations`] is set to `true`,
/// annotations will be wrapped in quotes if doing so would enable the
/// corresponding import to remain in the type-checking block.
///
/// ## Example
/// ```python
/// from typing import TYPE_CHECKING
///
/// if TYPE_CHECKING:
/// import foo
///
///
/// def bar() -> None:
/// foo.bar() # raises NameError: name 'foo' is not defined
/// ```
///
/// Use instead:
/// ```python
/// import foo
///
///
/// def bar() -> None:
/// foo.bar()
/// ```
///
/// ## Options
/// - `lint.flake8-type-checking.quote-annotations`
///
/// ## References
/// - [PEP 563: Runtime annotation resolution and `TYPE_CHECKING`](https://peps.python.org/pep-0563/#runtime-annotation-resolution-and-type-checking)
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "0.8.0")]
pub(crate) struct RuntimeImportInTypeCheckingBlock {
qualified_name: String,
strategy: Strategy,
}
impl Violation for RuntimeImportInTypeCheckingBlock {
const FIX_AVAILABILITY: FixAvailability = FixAvailability::Sometimes;
#[derive_message_formats]
fn message(&self) -> String {
let Self {
qualified_name,
strategy,
} = self;
match strategy {
Strategy::MoveImport => format!(
"Move import `{qualified_name}` out of type-checking block. Import is used for more than type hinting."
),
Strategy::QuoteUsages => format!(
"Quote references to `{qualified_name}`. Import is in a type-checking block."
),
}
}
fn fix_title(&self) -> Option<String> {
let Self { strategy, .. } = self;
match strategy {
Strategy::MoveImport => Some("Move out of type-checking block".to_string()),
Strategy::QuoteUsages => Some("Quote references".to_string()),
}
}
}
#[derive(Debug, Clone, Copy, Hash, PartialEq, Eq)]
enum Action {
/// The import should be moved out of the type-checking block.
Move,
/// All usages of the import should be wrapped in quotes.
Quote,
/// The import should be ignored.
Ignore,
}
/// TC004
pub(crate) fn runtime_import_in_type_checking_block(checker: &Checker, scope: &Scope) {
// Collect all runtime imports by statement.
let mut actions: FxHashMap<(NodeId, Action), Vec<ImportBinding>> = FxHashMap::default();
// If we have a module-level __getattr__ method we don't necessarily know that the
// references in __all__ refer to typing-only imports, the __getattr__ might be
// able to handle that attribute access and return the correct thing at runtime.
let ignore_dunder_all_references = checker
.semantic()
.lookup_symbol_in_scope("__getattr__", ScopeId::global(), false)
.is_some();
for binding_id in scope.binding_ids() {
let binding = checker.semantic().binding(binding_id);
let Some(import) = binding.as_any_import() else {
continue;
};
let Some(reference_id) = binding.references.first().copied() else {
continue;
};
if binding.context.is_typing()
&& binding.references().any(|reference_id| {
let reference = checker.semantic().reference(reference_id);
reference.in_runtime_context()
&& !(ignore_dunder_all_references && reference.in_dunder_all_definition())
})
{
let Some(node_id) = binding.source else {
continue;
};
let import = ImportBinding {
import,
reference_id,
binding,
range: binding.range(),
parent_range: binding.parent_range(checker.semantic()),
needs_future_import: false, // TODO(brent) See #19359.
};
if checker.rule_is_ignored(Rule::RuntimeImportInTypeCheckingBlock, import.start())
|| import.parent_range.is_some_and(|parent_range| {
checker.rule_is_ignored(
Rule::RuntimeImportInTypeCheckingBlock,
parent_range.start(),
)
})
{
actions
.entry((node_id, Action::Ignore))
.or_default()
.push(import);
} else {
// Determine whether the member should be fixed by moving the import out of the
// type-checking block, or by quoting its references.
// TODO: We should check `reference.in_annotated_type_alias()`
// as well to match the behavior of the flake8 plugin
// although maybe the best way forward is to add an
// additional setting to configure whether quoting
// or moving the import is preferred for type aliases
// since some people will consistently use their
// type aliases at runtimes, while others won't, so
// the best solution is unclear.
if checker.settings().flake8_type_checking.quote_annotations
&& binding.references().all(|reference_id| {
let reference = checker.semantic().reference(reference_id);
reference.in_typing_context() || reference.in_runtime_evaluated_annotation()
})
{
actions
.entry((node_id, Action::Quote))
.or_default()
.push(import);
} else {
actions
.entry((node_id, Action::Move))
.or_default()
.push(import);
}
}
}
}
for ((node_id, action), imports) in actions {
match action {
// Generate a diagnostic for every import, but share a fix across all imports within the same
// statement (excluding those that are ignored).
Action::Move => {
let fix = move_imports(checker, node_id, &imports).ok();
for ImportBinding {
import,
range,
parent_range,
..
} in imports
{
let mut diagnostic = checker.report_diagnostic(
RuntimeImportInTypeCheckingBlock {
qualified_name: import.qualified_name().to_string(),
strategy: Strategy::MoveImport,
},
range,
);
if let Some(range) = parent_range {
diagnostic.set_parent(range.start());
}
if let Some(fix) = fix.as_ref() {
diagnostic.set_fix(fix.clone());
}
}
}
// Generate a diagnostic for every import, but share a fix across all imports within the same
// statement (excluding those that are ignored).
Action::Quote => {
let fix = quote_imports(checker, node_id, &imports);
for ImportBinding {
import,
range,
parent_range,
..
} in imports
{
let mut diagnostic = checker.report_diagnostic(
RuntimeImportInTypeCheckingBlock {
qualified_name: import.qualified_name().to_string(),
strategy: Strategy::QuoteUsages,
},
range,
);
if let Some(range) = parent_range {
diagnostic.set_parent(range.start());
}
diagnostic.set_fix(fix.clone());
}
}
// Separately, generate a diagnostic for every _ignored_ import, to ensure that the
// suppression comments aren't marked as unused.
Action::Ignore => {
for ImportBinding {
import,
range,
parent_range,
..
} in imports
{
let mut diagnostic = checker.report_diagnostic(
RuntimeImportInTypeCheckingBlock {
qualified_name: import.qualified_name().to_string(),
strategy: Strategy::MoveImport,
},
range,
);
if let Some(range) = parent_range {
diagnostic.set_parent(range.start());
}
}
}
}
}
}
/// Generate a [`Fix`] to quote runtime usages for imports in a type-checking block.
fn quote_imports(checker: &Checker, node_id: NodeId, imports: &[ImportBinding]) -> Fix {
let quote_reference_edits = filter_contained(
imports
.iter()
.flat_map(|ImportBinding { binding, .. }| {
binding.references.iter().filter_map(|reference_id| {
let reference = checker.semantic().reference(*reference_id);
if reference.in_runtime_context() {
Some(quote_annotation(
reference.expression_id()?,
checker.semantic(),
checker.stylist(),
checker.locator(),
checker.default_string_flags(),
))
} else {
None
}
})
})
.collect::<Vec<_>>(),
);
let mut rest = quote_reference_edits.into_iter();
let head = rest.next().expect("Expected at least one reference");
Fix::unsafe_edits(head, rest).isolate(Checker::isolation(
checker.semantic().parent_statement_id(node_id),
))
}
/// Generate a [`Fix`] to remove runtime imports from a type-checking block.
fn move_imports(checker: &Checker, node_id: NodeId, imports: &[ImportBinding]) -> Result<Fix> {
let statement = checker.semantic().statement(node_id);
let parent = checker.semantic().parent_statement(node_id);
let member_names: Vec<Cow<'_, str>> = imports
.iter()
.map(|ImportBinding { import, .. }| import)
.map(Imported::member_name)
.collect();
// Find the first reference across all imports.
let at = imports
.iter()
.map(|ImportBinding { reference_id, .. }| {
checker.semantic().reference(*reference_id).start()
})
.min()
.expect("Expected at least one import");
// Step 1) Remove the import.
let remove_import_edit = fix::edits::remove_unused_imports(
member_names.iter().map(AsRef::as_ref),
statement,
parent,
checker.locator(),
checker.stylist(),
checker.indexer(),
)?;
// Step 2) Add the import to the top-level.
let add_import_edit = checker.importer().runtime_import_edit(
&ImportedMembers {
statement,
names: member_names.iter().map(AsRef::as_ref).collect(),
},
at,
)?;
Ok(
Fix::unsafe_edits(remove_import_edit, add_import_edit.into_edits()).isolate(
Checker::isolation(checker.semantic().parent_statement_id(node_id)),
),
)
}
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
enum Strategy {
/// The import should be moved out of the type-checking block.
///
/// This is required when at least one reference to the symbol is in a runtime-required context.
/// For example, given `from foo import Bar`, `x = Bar()` would be runtime-required.
MoveImport,
/// All usages of the import should be wrapped in quotes.
///
/// This is acceptable when all references to the symbol are in a runtime-evaluated, but not
/// runtime-required context. For example, given `from foo import Bar`, `x: Bar` would be
/// runtime-evaluated, but not runtime-required.
QuoteUsages,
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/flake8_type_checking/rules/empty_type_checking_block.rs | crates/ruff_linter/src/rules/flake8_type_checking/rules/empty_type_checking_block.rs | use ruff_python_ast as ast;
use ruff_macros::{ViolationMetadata, derive_message_formats};
use ruff_python_semantic::analyze::typing;
use ruff_text_size::Ranged;
use crate::checkers::ast::Checker;
use crate::fix;
use crate::{AlwaysFixableViolation, Fix};
/// ## What it does
/// Checks for an empty type-checking block.
///
/// ## Why is this bad?
/// The type-checking block does not do anything and should be removed to avoid
/// confusion.
///
/// ## Example
/// ```python
/// from typing import TYPE_CHECKING
///
/// if TYPE_CHECKING:
/// pass
///
/// print("Hello, world!")
/// ```
///
/// Use instead:
/// ```python
/// print("Hello, world!")
/// ```
///
/// ## References
/// - [PEP 563: Runtime annotation resolution and `TYPE_CHECKING`](https://peps.python.org/pep-0563/#runtime-annotation-resolution-and-type-checking)
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "0.8.0")]
pub(crate) struct EmptyTypeCheckingBlock;
impl AlwaysFixableViolation for EmptyTypeCheckingBlock {
#[derive_message_formats]
fn message(&self) -> String {
"Found empty type-checking block".to_string()
}
fn fix_title(&self) -> String {
"Delete empty type-checking block".to_string()
}
}
/// TC005
pub(crate) fn empty_type_checking_block(checker: &Checker, stmt: &ast::StmtIf) {
if !typing::is_type_checking_block(stmt, checker.semantic()) {
return;
}
if !stmt.elif_else_clauses.is_empty() {
return;
}
let [stmt] = stmt.body.as_slice() else {
return;
};
if !stmt.is_pass_stmt() {
return;
}
let mut diagnostic = checker.report_diagnostic(EmptyTypeCheckingBlock, stmt.range());
// Delete the entire type-checking block.
let stmt = checker.semantic().current_statement();
let parent = checker.semantic().current_statement_parent();
let edit = fix::edits::delete_stmt(stmt, parent, checker.locator(), checker.indexer());
diagnostic.set_fix(Fix::safe_edit(edit).isolate(Checker::isolation(
checker.semantic().current_statement_parent_id(),
)));
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/flake8_type_checking/rules/runtime_cast_value.rs | crates/ruff_linter/src/rules/flake8_type_checking/rules/runtime_cast_value.rs | use ruff_python_ast::Expr;
use ruff_macros::{ViolationMetadata, derive_message_formats};
use ruff_text_size::Ranged;
use crate::checkers::ast::Checker;
use crate::rules::flake8_type_checking::helpers::quote_type_expression;
use crate::{AlwaysFixableViolation, Fix};
/// ## What it does
/// Checks for unquoted type expressions in `typing.cast()` calls.
///
/// ## Why is this bad?
/// This rule helps enforce a consistent style across your codebase.
///
/// It's often necessary to quote the first argument passed to `cast()`,
/// as type expressions can involve forward references, or references
/// to symbols which are only imported in `typing.TYPE_CHECKING` blocks.
/// This can lead to a visual inconsistency across different `cast()` calls,
/// where some type expressions are quoted but others are not. By enabling
/// this rule, you ensure that all type expressions passed to `cast()` are
/// quoted, enforcing stylistic consistency across all of your `cast()` calls.
///
/// In some cases where `cast()` is used in a hot loop, this rule may also
/// help avoid overhead from repeatedly evaluating complex type expressions at
/// runtime.
///
/// ## Example
/// ```python
/// from typing import cast
///
/// x = cast(dict[str, int], foo)
/// ```
///
/// Use instead:
/// ```python
/// from typing import cast
///
/// x = cast("dict[str, int]", foo)
/// ```
///
/// ## Fix safety
/// This fix is safe as long as the type expression doesn't span multiple
/// lines and includes comments on any of the lines apart from the last one.
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "0.10.0")]
pub(crate) struct RuntimeCastValue;
impl AlwaysFixableViolation for RuntimeCastValue {
#[derive_message_formats]
fn message(&self) -> String {
"Add quotes to type expression in `typing.cast()`".to_string()
}
fn fix_title(&self) -> String {
"Add quotes".to_string()
}
}
/// TC006
pub(crate) fn runtime_cast_value(checker: &Checker, type_expr: &Expr) {
if type_expr.is_string_literal_expr() {
return;
}
let mut diagnostic = checker.report_diagnostic(RuntimeCastValue, type_expr.range());
let edit = quote_type_expression(
type_expr,
checker.semantic(),
checker.stylist(),
checker.locator(),
checker.default_string_flags(),
);
if checker.comment_ranges().intersects(type_expr.range()) {
diagnostic.set_fix(Fix::unsafe_edit(edit));
} else {
diagnostic.set_fix(Fix::safe_edit(edit));
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/flake8_type_checking/rules/runtime_string_union.rs | crates/ruff_linter/src/rules/flake8_type_checking/rules/runtime_string_union.rs | use ruff_macros::{ViolationMetadata, derive_message_formats};
use ruff_python_ast as ast;
use ruff_python_ast::{Expr, Operator};
use ruff_text_size::Ranged;
use crate::Violation;
use crate::checkers::ast::Checker;
/// ## What it does
/// Checks for the presence of string literals in `X | Y`-style union types.
///
/// ## Why is this bad?
/// [PEP 604] introduced a new syntax for union type annotations based on the
/// `|` operator.
///
/// While Python's type annotations can typically be wrapped in strings to
/// avoid runtime evaluation, the use of a string member within an `X | Y`-style
/// union type will cause a runtime error.
///
/// Instead, remove the quotes, wrap the _entire_ union in quotes, or use
/// `from __future__ import annotations` to disable runtime evaluation of
/// annotations entirely.
///
/// ## Example
/// ```python
/// var: "Foo" | None
///
///
/// class Foo: ...
/// ```
///
/// Use instead:
/// ```python
/// from __future__ import annotations
///
/// var: Foo | None
///
///
/// class Foo: ...
/// ```
///
/// Or, extend the quotes to include the entire union:
/// ```python
/// var: "Foo | None"
///
///
/// class Foo: ...
/// ```
///
/// ## References
/// - [PEP 563 - Postponed Evaluation of Annotations](https://peps.python.org/pep-0563/)
/// - [PEP 604 – Allow writing union types as `X | Y`](https://peps.python.org/pep-0604/)
///
/// [PEP 604]: https://peps.python.org/pep-0604/
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "0.8.0")]
pub(crate) struct RuntimeStringUnion;
impl Violation for RuntimeStringUnion {
#[derive_message_formats]
fn message(&self) -> String {
"Invalid string member in `X | Y`-style union type".to_string()
}
}
/// TC010
pub(crate) fn runtime_string_union(checker: &Checker, expr: &Expr) {
if !checker.semantic().in_type_definition() {
return;
}
if !checker.semantic().execution_context().is_runtime() {
return;
}
// Search for strings within the binary operator.
let mut strings = Vec::new();
traverse_op(expr, &mut strings);
for string in strings {
checker.report_diagnostic(RuntimeStringUnion, string.range());
}
}
/// Collect all string members in possibly-nested binary `|` expressions.
fn traverse_op<'a>(expr: &'a Expr, strings: &mut Vec<&'a Expr>) {
match expr {
Expr::StringLiteral(_) => {
strings.push(expr);
}
Expr::BytesLiteral(_) => {
strings.push(expr);
}
Expr::BinOp(ast::ExprBinOp {
left,
right,
op: Operator::BitOr,
..
}) => {
traverse_op(left, strings);
traverse_op(right, strings);
}
_ => {}
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/flake8_type_checking/rules/mod.rs | crates/ruff_linter/src/rules/flake8_type_checking/rules/mod.rs | pub(crate) use empty_type_checking_block::*;
pub(crate) use runtime_cast_value::*;
pub(crate) use runtime_import_in_type_checking_block::*;
pub(crate) use runtime_string_union::*;
pub(crate) use type_alias_quotes::*;
pub(crate) use typing_only_runtime_import::*;
mod empty_type_checking_block;
mod runtime_cast_value;
mod runtime_import_in_type_checking_block;
mod runtime_string_union;
mod type_alias_quotes;
mod typing_only_runtime_import;
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/flake8_type_checking/rules/type_alias_quotes.rs | crates/ruff_linter/src/rules/flake8_type_checking/rules/type_alias_quotes.rs | use ast::{ExprContext, Operator};
use ruff_macros::{ViolationMetadata, derive_message_formats};
use ruff_python_ast as ast;
use ruff_python_ast::{Expr, Stmt};
use ruff_python_semantic::{Binding, SemanticModel, TypingOnlyBindingsStatus};
use ruff_python_stdlib::typing::{is_pep_593_generic_type, is_standard_library_literal};
use ruff_text_size::Ranged;
use crate::checkers::ast::Checker;
use crate::registry::Rule;
use crate::rules::flake8_type_checking::helpers::quote_type_expression;
use crate::{AlwaysFixableViolation, Edit, Fix, FixAvailability, Violation};
use ruff_python_ast::PythonVersion;
use ruff_python_ast::token::parenthesized_range;
/// ## What it does
/// Checks if [PEP 613] explicit type aliases contain references to
/// symbols that are not available at runtime.
///
/// ## Why is this bad?
/// Referencing type-checking only symbols results in a `NameError` at runtime.
///
/// ## Example
/// ```python
/// from typing import TYPE_CHECKING, TypeAlias
///
/// if TYPE_CHECKING:
/// from foo import Foo
/// OptFoo: TypeAlias = Foo | None
/// ```
///
/// Use instead:
/// ```python
/// from typing import TYPE_CHECKING, TypeAlias
///
/// if TYPE_CHECKING:
/// from foo import Foo
/// OptFoo: TypeAlias = "Foo | None"
/// ```
///
/// ## Fix safety
/// This rule's fix is currently always marked as unsafe, since runtime
/// typing libraries may try to access/resolve the type alias in a way
/// that we can't statically determine during analysis and relies on the
/// type alias not containing any forward references.
///
/// ## References
/// - [PEP 613 – Explicit Type Aliases](https://peps.python.org/pep-0613/)
///
/// [PEP 613]: https://peps.python.org/pep-0613/
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "0.10.0")]
pub(crate) struct UnquotedTypeAlias;
impl Violation for UnquotedTypeAlias {
const FIX_AVAILABILITY: FixAvailability = FixAvailability::Sometimes;
#[derive_message_formats]
fn message(&self) -> String {
"Add quotes to type alias".to_string()
}
fn fix_title(&self) -> Option<String> {
Some("Add quotes".to_string())
}
}
/// ## What it does
/// Checks for unnecessary quotes in [PEP 613] explicit type aliases
/// and [PEP 695] type statements.
///
/// ## Why is this bad?
/// Unnecessary string forward references can lead to additional overhead
/// in runtime libraries making use of type hints. They can also have bad
/// interactions with other runtime uses like [PEP 604] type unions.
///
/// PEP-613 type aliases are only flagged by the rule if Ruff can have high
/// confidence that the quotes are unnecessary. Specifically, any PEP-613
/// type alias where the type expression on the right-hand side contains
/// subscripts or attribute accesses will not be flagged. This is because
/// type aliases can reference types that are, for example, generic in stub
/// files but not at runtime. That can mean that a type checker expects the
/// referenced type to be subscripted with type arguments despite the fact
/// that doing so would fail at runtime if the type alias value was not
/// quoted. Similarly, a type alias might need to reference a module-level
/// attribute that exists in a stub file but not at runtime, meaning that
/// the type alias value would need to be quoted to avoid a runtime error.
///
/// ## Example
/// Given:
/// ```python
/// from typing import TypeAlias
///
/// OptInt: TypeAlias = "int | None"
/// ```
///
/// Use instead:
/// ```python
/// from typing import TypeAlias
///
/// OptInt: TypeAlias = int | None
/// ```
///
/// Given:
/// ```python
/// type OptInt = "int | None"
/// ```
///
/// Use instead:
/// ```python
/// type OptInt = int | None
/// ```
///
/// ## Fix safety
/// This rule's fix is marked as safe, unless the type annotation contains comments.
///
/// ## See also
/// This rule only applies to type aliases in non-stub files. For removing quotes in other
/// contexts or in stub files, see:
///
/// - [`quoted-annotation-in-stub`][PYI020]: A rule that
/// removes all quoted annotations from stub files
/// - [`quoted-annotation`][UP037]: A rule that removes unnecessary quotes
/// from *annotations* in runtime files.
///
/// ## References
/// - [PEP 613 – Explicit Type Aliases](https://peps.python.org/pep-0613/)
/// - [PEP 695: Generic Type Alias](https://peps.python.org/pep-0695/#generic-type-alias)
/// - [PEP 604 – Allow writing union types as `X | Y`](https://peps.python.org/pep-0604/)
///
/// [PEP 604]: https://peps.python.org/pep-0604/
/// [PEP 613]: https://peps.python.org/pep-0613/
/// [PEP 695]: https://peps.python.org/pep-0695/#generic-type-alias
/// [PYI020]: https://docs.astral.sh/ruff/rules/quoted-annotation-in-stub/
/// [UP037]: https://docs.astral.sh/ruff/rules/quoted-annotation/
#[derive(ViolationMetadata)]
#[violation_metadata(preview_since = "0.8.1")]
pub(crate) struct QuotedTypeAlias;
impl AlwaysFixableViolation for QuotedTypeAlias {
#[derive_message_formats]
fn message(&self) -> String {
"Remove quotes from type alias".to_string()
}
fn fix_title(&self) -> String {
"Remove quotes".to_string()
}
}
/// TC007
pub(crate) fn unquoted_type_alias(checker: &Checker, binding: &Binding) {
if binding.context.is_typing() {
return;
}
if !binding.is_annotated_type_alias() {
return;
}
let Some(Stmt::AnnAssign(ast::StmtAnnAssign {
value: Some(expr), ..
})) = binding.statement(checker.semantic())
else {
return;
};
let mut names = Vec::new();
collect_typing_references(checker, expr, &mut names);
if names.is_empty() {
return;
}
// We generate a diagnostic for every name that needs to be quoted
// but we currently emit a single shared fix that quotes the entire
// expression.
//
// Eventually we may try to be more clever and come up with the
// minimal set of subexpressions that need to be quoted.
let parent = expr.range().start();
let edit = quote_type_expression(
expr,
checker.semantic(),
checker.stylist(),
checker.locator(),
checker.default_string_flags(),
);
for name in names {
let mut diagnostic = checker.report_diagnostic(UnquotedTypeAlias, name.range());
diagnostic.set_parent(parent);
diagnostic.set_fix(Fix::unsafe_edit(edit.clone()));
}
}
/// Traverses the type expression and collects `[Expr::Name]` nodes that are
/// not available at runtime and thus need to be quoted, unless they would
/// become available through `[Rule::RuntimeImportInTypeCheckingBlock]`.
fn collect_typing_references<'a>(
checker: &Checker,
expr: &'a Expr,
names: &mut Vec<&'a ast::ExprName>,
) {
match expr {
Expr::BinOp(ast::ExprBinOp { left, right, .. }) => {
collect_typing_references(checker, left, names);
collect_typing_references(checker, right, names);
}
Expr::Starred(ast::ExprStarred {
value,
ctx: ExprContext::Load,
..
})
| Expr::Attribute(ast::ExprAttribute { value, .. }) => {
collect_typing_references(checker, value, names);
}
Expr::Subscript(ast::ExprSubscript { value, slice, .. }) => {
collect_typing_references(checker, value, names);
if let Some(qualified_name) = checker.semantic().resolve_qualified_name(value) {
if is_standard_library_literal(qualified_name.segments()) {
return;
}
if is_pep_593_generic_type(qualified_name.segments()) {
// First argument is a type (including forward references); the
// rest are arbitrary Python objects.
if let Expr::Tuple(ast::ExprTuple { elts, .. }) = slice.as_ref() {
let mut iter = elts.iter();
if let Some(expr) = iter.next() {
collect_typing_references(checker, expr, names);
}
}
return;
}
}
collect_typing_references(checker, slice, names);
}
Expr::List(ast::ExprList { elts, .. }) | Expr::Tuple(ast::ExprTuple { elts, .. }) => {
for elt in elts {
collect_typing_references(checker, elt, names);
}
}
Expr::Name(name) => {
let Some(binding_id) = checker.semantic().resolve_name(name) else {
return;
};
if checker
.semantic()
.simulate_runtime_load(name, TypingOnlyBindingsStatus::Disallowed)
.is_some()
{
return;
}
// if TC004 is enabled we shouldn't emit a TC007 for a reference to
// a binding that would emit a TC004, otherwise the fixes will never
// stabilize and keep going in circles
if checker.is_rule_enabled(Rule::RuntimeImportInTypeCheckingBlock)
&& checker
.semantic()
.binding(binding_id)
.references()
.any(|id| checker.semantic().reference(id).in_runtime_context())
{
return;
}
names.push(name);
}
_ => {}
}
}
/// TC008
pub(crate) fn quoted_type_alias(
checker: &Checker,
expr: &Expr,
annotation_expr: &ast::ExprStringLiteral,
) {
if checker.is_rule_enabled(Rule::RuntimeStringUnion) {
// this should return a TC010 error instead
if let Some(Expr::BinOp(ast::ExprBinOp {
op: Operator::BitOr,
..
})) = checker.semantic().current_expression_parent()
{
return;
}
}
// explicit type aliases require some additional checks to avoid false positives
if checker.semantic().in_annotated_type_alias_value()
&& quotes_are_unremovable(checker.semantic(), expr, checker.target_version())
{
return;
}
let range = annotation_expr.range();
let mut diagnostic = checker.report_diagnostic(QuotedTypeAlias, range);
let fix_string = annotation_expr.value.to_string();
let fix_string = if (fix_string.contains('\n') || fix_string.contains('\r'))
&& parenthesized_range(
// Check for parentheses outside the string ("""...""")
annotation_expr.into(),
checker.semantic().current_statement().into(),
checker.source_tokens(),
)
.is_none()
&& parenthesized_range(
// Check for parentheses inside the string """(...)"""
expr.into(),
annotation_expr.into(),
checker.tokens(),
)
.is_none()
{
format!("({fix_string})")
} else {
fix_string
};
let edit = Edit::range_replacement(fix_string, range);
if checker.comment_ranges().intersects(range) {
diagnostic.set_fix(Fix::unsafe_edit(edit));
} else {
diagnostic.set_fix(Fix::safe_edit(edit));
}
}
/// Traverses the type expression and checks if the expression can safely
/// be unquoted
fn quotes_are_unremovable(
semantic: &SemanticModel,
expr: &Expr,
target_version: PythonVersion,
) -> bool {
match expr {
Expr::BinOp(ast::ExprBinOp {
left, right, op, ..
}) => {
match op {
Operator::BitOr => {
if target_version < PythonVersion::PY310 {
return true;
}
quotes_are_unremovable(semantic, left, target_version)
|| quotes_are_unremovable(semantic, right, target_version)
}
// for now we'll treat uses of other operators as unremovable quotes
// since that would make it an invalid type expression anyways. We skip
// walking the nested non-type expressions from `typing.Annotated`, so
// we don't produce false negatives in this branch.
_ => true,
}
}
Expr::Starred(ast::ExprStarred {
value,
ctx: ExprContext::Load,
..
}) => quotes_are_unremovable(semantic, value, target_version),
Expr::Subscript(ast::ExprSubscript { value, slice, .. }) => {
// for subscripts we don't know whether it's safe to do at runtime
// since the operation may only be available at type checking time.
// E.g. stubs only generics.
if !semantic.in_type_checking_block() {
return true;
}
if quotes_are_unremovable(semantic, value, target_version) {
return true;
}
// for `typing.Annotated`, only analyze the first argument, since the rest may
// contain arbitrary expressions.
if let Some(qualified_name) = semantic.resolve_qualified_name(value) {
if semantic.match_typing_qualified_name(&qualified_name, "Annotated") {
if let Expr::Tuple(ast::ExprTuple { elts, .. }) = slice.as_ref() {
return !elts.is_empty()
&& quotes_are_unremovable(semantic, &elts[0], target_version);
}
return false;
}
}
quotes_are_unremovable(semantic, slice, target_version)
}
Expr::Attribute(ast::ExprAttribute { value, .. }) => {
// for attributes we also don't know whether it's safe
if !semantic.in_type_checking_block() {
return true;
}
quotes_are_unremovable(semantic, value, target_version)
}
Expr::List(ast::ExprList { elts, .. }) | Expr::Tuple(ast::ExprTuple { elts, .. }) => {
for elt in elts {
if quotes_are_unremovable(semantic, elt, target_version) {
return true;
}
}
false
}
Expr::Name(name) => {
semantic.resolve_name(name).is_some()
&& semantic
.simulate_runtime_load(name, semantic.in_type_checking_block().into())
.is_none()
}
_ => false,
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/flake8_type_checking/rules/typing_only_runtime_import.rs | crates/ruff_linter/src/rules/flake8_type_checking/rules/typing_only_runtime_import.rs | use std::borrow::Cow;
use anyhow::Result;
use rustc_hash::FxHashMap;
use ruff_macros::{ViolationMetadata, derive_message_formats};
use ruff_python_semantic::{Binding, Imported, NodeId, Scope};
use ruff_text_size::{Ranged, TextRange};
use crate::checkers::ast::{Checker, DiagnosticGuard};
use crate::codes::Rule;
use crate::fix;
use crate::importer::ImportedMembers;
use crate::rules::flake8_type_checking::helpers::{
TypingReference, filter_contained, quote_annotation,
};
use crate::rules::flake8_type_checking::imports::ImportBinding;
use crate::rules::isort::{ImportSection, ImportType, categorize};
use crate::{Fix, FixAvailability, Violation};
/// ## What it does
/// Checks for first-party imports that are only used for type annotations, but
/// aren't defined in a type-checking block.
///
/// ## Why is this bad?
/// Unused imports add a performance overhead at runtime, and risk creating
/// import cycles. If an import is _only_ used in typing-only contexts, it can
/// instead be imported conditionally under an `if TYPE_CHECKING:` block to
/// minimize runtime overhead.
///
/// If [`lint.flake8-type-checking.quote-annotations`] is set to `true`,
/// annotations will be wrapped in quotes if doing so would enable the
/// corresponding import to be moved into an `if TYPE_CHECKING:` block.
///
/// If a class _requires_ that type annotations be available at runtime (as is
/// the case for Pydantic, SQLAlchemy, and other libraries), consider using
/// the [`lint.flake8-type-checking.runtime-evaluated-base-classes`] and
/// [`lint.flake8-type-checking.runtime-evaluated-decorators`] settings to mark them
/// as such.
///
/// If [`lint.future-annotations`] is set to `true`, `from __future__ import
/// annotations` will be added if doing so would enable an import to be
/// moved into an `if TYPE_CHECKING:` block. This takes precedence over the
/// [`lint.flake8-type-checking.quote-annotations`] setting described above if
/// both settings are enabled.
///
///
/// ## Example
/// ```python
/// from __future__ import annotations
///
/// from . import local_module
///
///
/// def func(sized: local_module.Container) -> int:
/// return len(sized)
/// ```
///
/// Use instead:
/// ```python
/// from __future__ import annotations
///
/// from typing import TYPE_CHECKING
///
/// if TYPE_CHECKING:
/// from . import local_module
///
///
/// def func(sized: local_module.Container) -> int:
/// return len(sized)
/// ```
///
/// ## Options
/// - `lint.flake8-type-checking.quote-annotations`
/// - `lint.flake8-type-checking.runtime-evaluated-base-classes`
/// - `lint.flake8-type-checking.runtime-evaluated-decorators`
/// - `lint.flake8-type-checking.strict`
/// - `lint.typing-modules`
/// - `lint.future-annotations`
///
/// ## References
/// - [PEP 563: Runtime annotation resolution and `TYPE_CHECKING`](https://peps.python.org/pep-0563/#runtime-annotation-resolution-and-type-checking)
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "0.8.0")]
pub(crate) struct TypingOnlyFirstPartyImport {
qualified_name: String,
}
impl Violation for TypingOnlyFirstPartyImport {
const FIX_AVAILABILITY: FixAvailability = FixAvailability::Sometimes;
#[derive_message_formats]
fn message(&self) -> String {
format!(
"Move application import `{}` into a type-checking block",
self.qualified_name
)
}
fn fix_title(&self) -> Option<String> {
Some("Move into type-checking block".to_string())
}
}
/// ## What it does
/// Checks for third-party imports that are only used for type annotations, but
/// aren't defined in a type-checking block.
///
/// ## Why is this bad?
/// Unused imports add a performance overhead at runtime, and risk creating
/// import cycles. If an import is _only_ used in typing-only contexts, it can
/// instead be imported conditionally under an `if TYPE_CHECKING:` block to
/// minimize runtime overhead.
///
/// If [`lint.flake8-type-checking.quote-annotations`] is set to `true`,
/// annotations will be wrapped in quotes if doing so would enable the
/// corresponding import to be moved into an `if TYPE_CHECKING:` block.
///
/// If a class _requires_ that type annotations be available at runtime (as is
/// the case for Pydantic, SQLAlchemy, and other libraries), consider using
/// the [`lint.flake8-type-checking.runtime-evaluated-base-classes`] and
/// [`lint.flake8-type-checking.runtime-evaluated-decorators`] settings to mark them
/// as such.
///
/// If [`lint.future-annotations`] is set to `true`, `from __future__ import
/// annotations` will be added if doing so would enable an import to be
/// moved into an `if TYPE_CHECKING:` block. This takes precedence over the
/// [`lint.flake8-type-checking.quote-annotations`] setting described above if
/// both settings are enabled.
///
/// ## Example
/// ```python
/// from __future__ import annotations
///
/// import pandas as pd
///
///
/// def func(df: pd.DataFrame) -> int:
/// return len(df)
/// ```
///
/// Use instead:
/// ```python
/// from __future__ import annotations
///
/// from typing import TYPE_CHECKING
///
/// if TYPE_CHECKING:
/// import pandas as pd
///
///
/// def func(df: pd.DataFrame) -> int:
/// return len(df)
/// ```
///
/// ## Options
/// - `lint.flake8-type-checking.quote-annotations`
/// - `lint.flake8-type-checking.runtime-evaluated-base-classes`
/// - `lint.flake8-type-checking.runtime-evaluated-decorators`
/// - `lint.flake8-type-checking.strict`
/// - `lint.typing-modules`
/// - `lint.future-annotations`
///
/// ## References
/// - [PEP 563: Runtime annotation resolution and `TYPE_CHECKING`](https://peps.python.org/pep-0563/#runtime-annotation-resolution-and-type-checking)
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "0.8.0")]
pub(crate) struct TypingOnlyThirdPartyImport {
qualified_name: String,
}
impl Violation for TypingOnlyThirdPartyImport {
const FIX_AVAILABILITY: FixAvailability = FixAvailability::Sometimes;
#[derive_message_formats]
fn message(&self) -> String {
format!(
"Move third-party import `{}` into a type-checking block",
self.qualified_name
)
}
fn fix_title(&self) -> Option<String> {
Some("Move into type-checking block".to_string())
}
}
/// ## What it does
/// Checks for standard library imports that are only used for type
/// annotations, but aren't defined in a type-checking block.
///
/// ## Why is this bad?
/// Unused imports add a performance overhead at runtime, and risk creating
/// import cycles. If an import is _only_ used in typing-only contexts, it can
/// instead be imported conditionally under an `if TYPE_CHECKING:` block to
/// minimize runtime overhead.
///
/// If [`lint.flake8-type-checking.quote-annotations`] is set to `true`,
/// annotations will be wrapped in quotes if doing so would enable the
/// corresponding import to be moved into an `if TYPE_CHECKING:` block.
///
/// If a class _requires_ that type annotations be available at runtime (as is
/// the case for Pydantic, SQLAlchemy, and other libraries), consider using
/// the [`lint.flake8-type-checking.runtime-evaluated-base-classes`] and
/// [`lint.flake8-type-checking.runtime-evaluated-decorators`] settings to mark them
/// as such.
///
/// If [`lint.future-annotations`] is set to `true`, `from __future__ import
/// annotations` will be added if doing so would enable an import to be
/// moved into an `if TYPE_CHECKING:` block. This takes precedence over the
/// [`lint.flake8-type-checking.quote-annotations`] setting described above if
/// both settings are enabled.
///
/// ## Example
/// ```python
/// from __future__ import annotations
///
/// from pathlib import Path
///
///
/// def func(path: Path) -> str:
/// return str(path)
/// ```
///
/// Use instead:
/// ```python
/// from __future__ import annotations
///
/// from typing import TYPE_CHECKING
///
/// if TYPE_CHECKING:
/// from pathlib import Path
///
///
/// def func(path: Path) -> str:
/// return str(path)
/// ```
///
/// ## Options
/// - `lint.flake8-type-checking.quote-annotations`
/// - `lint.flake8-type-checking.runtime-evaluated-base-classes`
/// - `lint.flake8-type-checking.runtime-evaluated-decorators`
/// - `lint.flake8-type-checking.strict`
/// - `lint.typing-modules`
/// - `lint.future-annotations`
///
/// ## References
/// - [PEP 563: Runtime annotation resolution and `TYPE_CHECKING`](https://peps.python.org/pep-0563/#runtime-annotation-resolution-and-type-checking)
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "0.8.0")]
pub(crate) struct TypingOnlyStandardLibraryImport {
qualified_name: String,
}
impl Violation for TypingOnlyStandardLibraryImport {
const FIX_AVAILABILITY: FixAvailability = FixAvailability::Sometimes;
#[derive_message_formats]
fn message(&self) -> String {
format!(
"Move standard library import `{}` into a type-checking block",
self.qualified_name
)
}
fn fix_title(&self) -> Option<String> {
Some("Move into type-checking block".to_string())
}
}
/// TC001, TC002, TC003
pub(crate) fn typing_only_runtime_import(
checker: &Checker,
scope: &Scope,
runtime_imports: &[&Binding],
) {
// Collect all typing-only imports by statement and import type.
let mut errors_by_statement: FxHashMap<(NodeId, ImportType), Vec<ImportBinding>> =
FxHashMap::default();
let mut ignores_by_statement: FxHashMap<(NodeId, ImportType), Vec<ImportBinding>> =
FxHashMap::default();
for binding_id in scope.binding_ids() {
let binding = checker.semantic().binding(binding_id);
// If we can't add a `__future__` import and in un-strict mode, don't flag typing-only
// imports that are implicitly loaded by way of a valid runtime import.
if !checker.settings().future_annotations
&& !checker.settings().flake8_type_checking.strict
&& runtime_imports
.iter()
.any(|import| is_implicit_import(binding, import))
{
continue;
}
let Some(import) = binding.as_any_import() else {
continue;
};
let Some(reference_id) = binding.references.first().copied() else {
continue;
};
if !binding.context.is_runtime() {
continue;
}
let typing_reference =
TypingReference::from_references(binding, checker.semantic(), checker.settings());
let needs_future_import = match typing_reference {
TypingReference::Runtime => continue,
// We can only get the `Future` variant if `future_annotations` is
// enabled, so we can unconditionally set this here.
TypingReference::Future => true,
TypingReference::TypingOnly | TypingReference::Quote => false,
};
let qualified_name = import.qualified_name();
if is_exempt(
&qualified_name.to_string(),
&checker
.settings()
.flake8_type_checking
.exempt_modules
.iter()
.map(String::as_str)
.collect::<Vec<_>>(),
) {
continue;
}
let source_name = import.source_name().join(".");
// Categorize the import, using coarse-grained categorization.
let import_type = match categorize(
&source_name,
qualified_name.is_unresolved_import(),
&checker.settings().src,
checker.package(),
checker.settings().isort.detect_same_package,
&checker.settings().isort.known_modules,
checker.target_version(),
checker.settings().isort.no_sections,
&checker.settings().isort.section_order,
&checker.settings().isort.default_section,
) {
ImportSection::Known(ImportType::LocalFolder | ImportType::FirstParty) => {
ImportType::FirstParty
}
ImportSection::Known(ImportType::ThirdParty) | ImportSection::UserDefined(_) => {
ImportType::ThirdParty
}
ImportSection::Known(ImportType::StandardLibrary) => ImportType::StandardLibrary,
ImportSection::Known(ImportType::Future) => {
continue;
}
};
if !checker.is_rule_enabled(rule_for(import_type)) {
continue;
}
let Some(node_id) = binding.source else {
continue;
};
let import = ImportBinding {
import,
reference_id,
binding,
range: binding.range(),
parent_range: binding.parent_range(checker.semantic()),
needs_future_import,
};
if checker.rule_is_ignored(rule_for(import_type), import.start())
|| import.parent_range.is_some_and(|parent_range| {
checker.rule_is_ignored(rule_for(import_type), parent_range.start())
})
{
ignores_by_statement
.entry((node_id, import_type))
.or_default()
.push(import);
} else {
errors_by_statement
.entry((node_id, import_type))
.or_default()
.push(import);
}
}
// Generate a diagnostic for every import, but share a fix across all imports within the same
// statement (excluding those that are ignored).
for ((node_id, import_type), imports) in errors_by_statement {
let fix = fix_imports(checker, node_id, &imports).ok();
for ImportBinding {
import,
range,
parent_range,
..
} in imports
{
let mut diagnostic = diagnostic_for(
checker,
import_type,
import.qualified_name().to_string(),
range,
);
if let Some(range) = parent_range {
diagnostic.set_parent(range.start());
}
if let Some(fix) = fix.as_ref() {
diagnostic.set_fix(fix.clone());
}
}
}
// Separately, generate a diagnostic for every _ignored_ import, to ensure that the
// suppression comments aren't marked as unused.
for ((_, import_type), imports) in ignores_by_statement {
for ImportBinding {
import,
range,
parent_range,
..
} in imports
{
let mut diagnostic = diagnostic_for(
checker,
import_type,
import.qualified_name().to_string(),
range,
);
if let Some(range) = parent_range {
diagnostic.set_parent(range.start());
}
}
}
}
/// Return the [`Rule`] for the given import type.
fn rule_for(import_type: ImportType) -> Rule {
match import_type {
ImportType::StandardLibrary => Rule::TypingOnlyStandardLibraryImport,
ImportType::ThirdParty => Rule::TypingOnlyThirdPartyImport,
ImportType::FirstParty => Rule::TypingOnlyFirstPartyImport,
_ => unreachable!("Unexpected import type"),
}
}
/// Return the [`Diagnostic`] for the given import type.
fn diagnostic_for<'a, 'b>(
checker: &'a Checker<'b>,
import_type: ImportType,
qualified_name: String,
range: TextRange,
) -> DiagnosticGuard<'a, 'b> {
match import_type {
ImportType::StandardLibrary => {
checker.report_diagnostic(TypingOnlyStandardLibraryImport { qualified_name }, range)
}
ImportType::ThirdParty => {
checker.report_diagnostic(TypingOnlyThirdPartyImport { qualified_name }, range)
}
ImportType::FirstParty => {
checker.report_diagnostic(TypingOnlyFirstPartyImport { qualified_name }, range)
}
_ => unreachable!("Unexpected import type"),
}
}
/// Return `true` if `this` is implicitly loaded via importing `that`.
fn is_implicit_import(this: &Binding, that: &Binding) -> bool {
let Some(this_import) = this.as_any_import() else {
return false;
};
let Some(that_import) = that.as_any_import() else {
return false;
};
this_import.module_name() == that_import.module_name()
}
/// Return `true` if `name` is exempt from typing-only enforcement.
fn is_exempt(name: &str, exempt_modules: &[&str]) -> bool {
let mut name = name;
loop {
if exempt_modules.contains(&name) {
return true;
}
match name.rfind('.') {
Some(idx) => {
name = &name[..idx];
}
None => return false,
}
}
}
/// Generate a [`Fix`] to remove typing-only imports from a runtime context.
fn fix_imports(checker: &Checker, node_id: NodeId, imports: &[ImportBinding]) -> Result<Fix> {
let statement = checker.semantic().statement(node_id);
let parent = checker.semantic().parent_statement(node_id);
let member_names: Vec<Cow<'_, str>> = imports
.iter()
.map(|ImportBinding { import, .. }| import)
.map(Imported::member_name)
.collect();
// Find the first reference across all imports.
let at = imports
.iter()
.map(|ImportBinding { reference_id, .. }| {
checker.semantic().reference(*reference_id).start()
})
.min()
.expect("Expected at least one import");
let add_future_import = imports.iter().any(|binding| binding.needs_future_import);
// Step 1) Remove the import.
let remove_import_edit = fix::edits::remove_unused_imports(
member_names.iter().map(AsRef::as_ref),
statement,
parent,
checker.locator(),
checker.stylist(),
checker.indexer(),
)?;
// Step 2) Add the import to a `TYPE_CHECKING` block.
let (type_checking_edit, add_import_edit) = checker
.importer()
.typing_import_edit(
&ImportedMembers {
statement,
names: member_names.iter().map(AsRef::as_ref).collect(),
},
at,
checker.semantic(),
)?
.into_edits();
// Step 3) Either add a `__future__` import or quote any runtime usages of the referenced
// symbol.
let fix = if add_future_import {
let future_import = checker.importer().add_future_import();
// The order here is very important. We first need to add the `__future__` import, if
// needed, since it's a syntax error to come later. Then `type_checking_edit` imports
// `TYPE_CHECKING`, if available. Then we can add and/or remove existing imports.
Fix::unsafe_edits(
future_import,
std::iter::once(type_checking_edit)
.chain(add_import_edit)
.chain(std::iter::once(remove_import_edit)),
)
} else {
let quote_reference_edits = filter_contained(
imports
.iter()
.flat_map(|ImportBinding { binding, .. }| {
binding.references.iter().filter_map(|reference_id| {
let reference = checker.semantic().reference(*reference_id);
if reference.in_runtime_context() {
Some(quote_annotation(
reference.expression_id()?,
checker.semantic(),
checker.stylist(),
checker.locator(),
checker.default_string_flags(),
))
} else {
None
}
})
})
.collect::<Vec<_>>(),
);
Fix::unsafe_edits(
type_checking_edit,
add_import_edit
.into_iter()
.chain(std::iter::once(remove_import_edit))
.chain(quote_reference_edits),
)
};
Ok(fix.isolate(Checker::isolation(
checker.semantic().parent_statement_id(node_id),
)))
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/flake8_logging/helpers.rs | crates/ruff_linter/src/rules/flake8_logging/helpers.rs | use ruff_python_ast::{Stmt, StmtTry};
use ruff_python_semantic::SemanticModel;
use ruff_text_size::{Ranged, TextSize};
pub(super) fn outside_handlers(offset: TextSize, semantic: &SemanticModel) -> bool {
for stmt in semantic.current_statements() {
if matches!(stmt, Stmt::FunctionDef(_)) {
break;
}
let Stmt::Try(StmtTry { handlers, .. }) = stmt else {
continue;
};
if handlers
.iter()
.any(|handler| handler.range().contains(offset))
{
return false;
}
}
true
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/flake8_logging/mod.rs | crates/ruff_linter/src/rules/flake8_logging/mod.rs | //! Rules from [flake8-logging](https://pypi.org/project/flake8-logging/).
mod helpers;
pub(crate) mod rules;
#[cfg(test)]
mod tests {
use std::path::Path;
use anyhow::Result;
use test_case::test_case;
use crate::assert_diagnostics;
use crate::registry::Rule;
use crate::settings::LinterSettings;
use crate::test::test_path;
#[test_case(Rule::DirectLoggerInstantiation, Path::new("LOG001.py"))]
#[test_case(Rule::InvalidGetLoggerArgument, Path::new("LOG002.py"))]
#[test_case(Rule::ExceptionWithoutExcInfo, Path::new("LOG007.py"))]
#[test_case(Rule::UndocumentedWarn, Path::new("LOG009.py"))]
#[test_case(Rule::LogExceptionOutsideExceptHandler, Path::new("LOG004_0.py"))]
#[test_case(Rule::LogExceptionOutsideExceptHandler, Path::new("LOG004_1.py"))]
#[test_case(Rule::ExcInfoOutsideExceptHandler, Path::new("LOG014_0.py"))]
#[test_case(Rule::ExcInfoOutsideExceptHandler, Path::new("LOG014_1.py"))]
#[test_case(Rule::RootLoggerCall, Path::new("LOG015.py"))]
fn rules(rule_code: Rule, path: &Path) -> Result<()> {
let snapshot = format!("{}_{}", rule_code.noqa_code(), path.to_string_lossy());
let diagnostics = test_path(
Path::new("flake8_logging").join(path).as_path(),
&LinterSettings::for_rule(rule_code),
)?;
assert_diagnostics!(snapshot, diagnostics);
Ok(())
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/flake8_logging/rules/invalid_get_logger_argument.rs | crates/ruff_linter/src/rules/flake8_logging/rules/invalid_get_logger_argument.rs | use ruff_macros::{ViolationMetadata, derive_message_formats};
use ruff_python_ast::{self as ast, Expr};
use ruff_python_semantic::Modules;
use ruff_text_size::Ranged;
use crate::checkers::ast::Checker;
use crate::{Edit, Fix, FixAvailability, Violation};
/// ## What it does
/// Checks for any usage of `__cached__` and `__file__` as an argument to
/// `logging.getLogger()`.
///
/// ## Why is this bad?
/// The [logging documentation] recommends this pattern:
///
/// ```python
/// logging.getLogger(__name__)
/// ```
///
/// Here, `__name__` is the fully qualified module name, such as `foo.bar`,
/// which is the intended format for logger names.
///
/// This rule detects probably-mistaken usage of similar module-level dunder constants:
///
/// * `__cached__` - the pathname of the module's compiled version, such as `foo/__pycache__/bar.cpython-311.pyc`.
/// * `__file__` - the pathname of the module, such as `foo/bar.py`.
///
/// ## Example
/// ```python
/// import logging
///
/// logger = logging.getLogger(__file__)
/// ```
///
/// Use instead:
/// ```python
/// import logging
///
/// logger = logging.getLogger(__name__)
/// ```
///
/// ## Fix safety
/// This fix is always unsafe, as changing the arguments to `getLogger` can change the
/// received logger object, and thus program behavior.
///
/// [logging documentation]: https://docs.python.org/3/library/logging.html#logger-objects
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "v0.2.0")]
pub(crate) struct InvalidGetLoggerArgument;
impl Violation for InvalidGetLoggerArgument {
const FIX_AVAILABILITY: FixAvailability = FixAvailability::Sometimes;
#[derive_message_formats]
fn message(&self) -> String {
"Use `__name__` with `logging.getLogger()`".to_string()
}
fn fix_title(&self) -> Option<String> {
Some("Replace with `__name__`".to_string())
}
}
/// LOG002
pub(crate) fn invalid_get_logger_argument(checker: &Checker, call: &ast::ExprCall) {
if !checker.semantic().seen_module(Modules::LOGGING) {
return;
}
let Some(Expr::Name(expr @ ast::ExprName { id, .. })) =
call.arguments.find_argument_value("name", 0)
else {
return;
};
if !matches!(id.as_ref(), "__file__" | "__cached__") {
return;
}
if !checker.semantic().has_builtin_binding(id) {
return;
}
if !checker
.semantic()
.resolve_qualified_name(call.func.as_ref())
.is_some_and(|qualified_name| matches!(qualified_name.segments(), ["logging", "getLogger"]))
{
return;
}
let mut diagnostic = checker.report_diagnostic(InvalidGetLoggerArgument, expr.range());
if checker.semantic().has_builtin_binding("__name__") {
diagnostic.set_fix(Fix::unsafe_edit(Edit::range_replacement(
"__name__".to_string(),
expr.range(),
)));
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/flake8_logging/rules/root_logger_call.rs | crates/ruff_linter/src/rules/flake8_logging/rules/root_logger_call.rs | use ruff_macros::{ViolationMetadata, derive_message_formats};
use ruff_python_ast::ExprCall;
use ruff_python_semantic::Modules;
use crate::Violation;
use crate::checkers::ast::Checker;
/// ## What it does
/// Checks for usages of the following `logging` top-level functions:
/// `debug`, `info`, `warn`, `warning`, `error`, `critical`, `log`, `exception`.
///
/// ## Why is this bad?
/// Using the root logger causes the messages to have no source information,
/// making them less useful for debugging.
///
/// ## Example
/// ```python
/// import logging
///
/// logging.info("Foobar")
/// ```
///
/// Use instead:
/// ```python
/// import logging
///
/// logger = logging.getLogger(__name__)
/// logger.info("Foobar")
/// ```
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "0.10.0")]
pub(crate) struct RootLoggerCall {
attr: String,
}
impl Violation for RootLoggerCall {
#[derive_message_formats]
fn message(&self) -> String {
format!("`{}()` call on root logger", self.attr)
}
fn fix_title(&self) -> Option<String> {
Some("Use own logger instead".to_string())
}
}
/// LOG015
pub(crate) fn root_logger_call(checker: &Checker, call: &ExprCall) {
let semantic = checker.semantic();
if !semantic.seen_module(Modules::LOGGING) {
return;
}
let Some(qualified_name) = semantic.resolve_qualified_name(&call.func) else {
return;
};
let attr = match qualified_name.segments() {
["logging", attr] if is_logger_method_name(attr) => attr,
_ => return,
};
let kind = RootLoggerCall {
attr: (*attr).to_string(),
};
checker.report_diagnostic(kind, call.range);
}
#[inline]
fn is_logger_method_name(attr: &str) -> bool {
matches!(
attr,
"debug" | "info" | "warn" | "warning" | "error" | "critical" | "log" | "exception"
)
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/flake8_logging/rules/direct_logger_instantiation.rs | crates/ruff_linter/src/rules/flake8_logging/rules/direct_logger_instantiation.rs | use ruff_macros::{ViolationMetadata, derive_message_formats};
use ruff_python_ast as ast;
use ruff_python_semantic::Modules;
use ruff_text_size::Ranged;
use crate::checkers::ast::Checker;
use crate::importer::ImportRequest;
use crate::{Edit, Fix, FixAvailability, Violation};
/// ## What it does
/// Checks for direct instantiation of `logging.Logger`, as opposed to using
/// `logging.getLogger()`.
///
/// ## Why is this bad?
/// The [Logger Objects] documentation states that:
///
/// > Note that Loggers should NEVER be instantiated directly, but always
/// > through the module-level function `logging.getLogger(name)`.
///
/// If a logger is directly instantiated, it won't be added to the logger
/// tree, and will bypass all configuration. Messages logged to it will
/// only be sent to the "handler of last resort", skipping any filtering
/// or formatting.
///
/// ## Example
/// ```python
/// import logging
///
/// logger = logging.Logger(__name__)
/// ```
///
/// Use instead:
/// ```python
/// import logging
///
/// logger = logging.getLogger(__name__)
/// ```
///
/// ## Fix safety
/// This fix is always unsafe, as changing from `Logger` to `getLogger`
/// changes program behavior by will adding the logger to the logging tree.
///
/// [Logger Objects]: https://docs.python.org/3/library/logging.html#logger-objects
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "v0.2.0")]
pub(crate) struct DirectLoggerInstantiation;
impl Violation for DirectLoggerInstantiation {
const FIX_AVAILABILITY: FixAvailability = FixAvailability::Sometimes;
#[derive_message_formats]
fn message(&self) -> String {
"Use `logging.getLogger()` to instantiate loggers".to_string()
}
fn fix_title(&self) -> Option<String> {
Some("Replace with `logging.getLogger()`".to_string())
}
}
/// LOG001
pub(crate) fn direct_logger_instantiation(checker: &Checker, call: &ast::ExprCall) {
if !checker.semantic().seen_module(Modules::LOGGING) {
return;
}
if checker
.semantic()
.resolve_qualified_name(call.func.as_ref())
.is_some_and(|qualified_name| matches!(qualified_name.segments(), ["logging", "Logger"]))
{
let mut diagnostic =
checker.report_diagnostic(DirectLoggerInstantiation, call.func.range());
diagnostic.try_set_fix(|| {
let (import_edit, binding) = checker.importer().get_or_import_symbol(
&ImportRequest::import("logging", "getLogger"),
call.func.start(),
checker.semantic(),
)?;
let reference_edit = Edit::range_replacement(binding, call.func.range());
Ok(Fix::unsafe_edits(import_edit, [reference_edit]))
});
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/flake8_logging/rules/undocumented_warn.rs | crates/ruff_linter/src/rules/flake8_logging/rules/undocumented_warn.rs | use ruff_python_ast::Expr;
use ruff_macros::{ViolationMetadata, derive_message_formats};
use ruff_python_semantic::Modules;
use ruff_text_size::Ranged;
use crate::checkers::ast::Checker;
use crate::importer::ImportRequest;
use crate::{Edit, Fix, FixAvailability, Violation};
/// ## What it does
/// Checks for uses of `logging.WARN`.
///
/// ## Why is this bad?
/// The `logging.WARN` constant is an undocumented alias for `logging.WARNING`.
///
/// Although it’s not explicitly deprecated, `logging.WARN` is not mentioned
/// in the `logging` documentation. Prefer `logging.WARNING` instead.
///
/// ## Example
/// ```python
/// import logging
///
///
/// logging.basicConfig(level=logging.WARN)
/// ```
///
/// Use instead:
/// ```python
/// import logging
///
///
/// logging.basicConfig(level=logging.WARNING)
/// ```
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "v0.2.0")]
pub(crate) struct UndocumentedWarn;
impl Violation for UndocumentedWarn {
const FIX_AVAILABILITY: FixAvailability = FixAvailability::Sometimes;
#[derive_message_formats]
fn message(&self) -> String {
"Use of undocumented `logging.WARN` constant".to_string()
}
fn fix_title(&self) -> Option<String> {
Some("Replace `logging.WARN` with `logging.WARNING`".to_string())
}
}
/// LOG009
pub(crate) fn undocumented_warn(checker: &Checker, expr: &Expr) {
if !checker.semantic().seen_module(Modules::LOGGING) {
return;
}
if checker
.semantic()
.resolve_qualified_name(expr)
.is_some_and(|qualified_name| matches!(qualified_name.segments(), ["logging", "WARN"]))
{
let mut diagnostic = checker.report_diagnostic(UndocumentedWarn, expr.range());
diagnostic.try_set_fix(|| {
let (import_edit, binding) = checker.importer().get_or_import_symbol(
&ImportRequest::import("logging", "WARNING"),
expr.start(),
checker.semantic(),
)?;
let reference_edit = Edit::range_replacement(binding, expr.range());
Ok(Fix::safe_edits(import_edit, [reference_edit]))
});
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/flake8_logging/rules/exc_info_outside_except_handler.rs | crates/ruff_linter/src/rules/flake8_logging/rules/exc_info_outside_except_handler.rs | use ruff_macros::{ViolationMetadata, derive_message_formats};
use ruff_python_ast::helpers::Truthiness;
use ruff_python_ast::{Expr, ExprAttribute, ExprCall};
use ruff_python_semantic::analyze::logging::is_logger_candidate;
use ruff_python_stdlib::logging::LoggingLevel;
use ruff_text_size::Ranged;
use crate::checkers::ast::Checker;
use crate::fix::edits::{Parentheses, remove_argument};
use crate::rules::flake8_logging::helpers::outside_handlers;
use crate::{Fix, FixAvailability, Violation};
/// ## What it does
/// Checks for logging calls with `exc_info=` outside exception handlers.
///
/// ## Why is this bad?
/// Using `exc_info=True` outside of an exception handler
/// attaches `None` as the exception information, leading to confusing messages:
///
/// ```pycon
/// >>> logging.warning("Uh oh", exc_info=True)
/// WARNING:root:Uh oh
/// NoneType: None
/// ```
///
/// ## Example
///
/// ```python
/// import logging
///
///
/// logging.warning("Foobar", exc_info=True)
/// ```
///
/// Use instead:
///
/// ```python
/// import logging
///
///
/// logging.warning("Foobar")
/// ```
///
/// ## Fix safety
/// The fix is always marked as unsafe, as it changes runtime behavior.
///
/// ## Options
///
/// - `lint.logger-objects`
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "0.12.0")]
pub(crate) struct ExcInfoOutsideExceptHandler;
impl Violation for ExcInfoOutsideExceptHandler {
const FIX_AVAILABILITY: FixAvailability = FixAvailability::Sometimes;
#[derive_message_formats]
fn message(&self) -> String {
"`exc_info=` outside exception handlers".to_string()
}
fn fix_title(&self) -> Option<String> {
Some("Remove `exc_info=`".to_string())
}
}
/// LOG014
pub(crate) fn exc_info_outside_except_handler(checker: &Checker, call: &ExprCall) {
let semantic = checker.semantic();
if !outside_handlers(call.start(), semantic) {
return;
}
match &*call.func {
func @ Expr::Attribute(ExprAttribute { attr, .. }) => {
if !is_logger_candidate(func, semantic, &checker.settings().logger_objects) {
return;
}
if LoggingLevel::from_attribute(attr).is_none() {
return;
}
}
func @ Expr::Name(_) => {
let Some(qualified_name) = semantic.resolve_qualified_name(func) else {
return;
};
let ["logging", attr] = qualified_name.segments() else {
return;
};
if *attr != "log" && LoggingLevel::from_attribute(attr).is_none() {
return;
}
}
_ => return,
}
let Some(exc_info) = call.arguments.find_keyword("exc_info") else {
return;
};
if !exc_info.value.is_literal_expr() {
return;
}
let truthiness = Truthiness::from_expr(&exc_info.value, |id| semantic.has_builtin_binding(id));
if truthiness.into_bool() != Some(true) {
return;
}
let arguments = &call.arguments;
let mut diagnostic = checker.report_diagnostic(ExcInfoOutsideExceptHandler, exc_info.range);
diagnostic.try_set_fix(|| {
let edit = remove_argument(
exc_info,
arguments,
Parentheses::Preserve,
checker.source(),
checker.tokens(),
)?;
Ok(Fix::unsafe_edit(edit))
});
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/flake8_logging/rules/mod.rs | crates/ruff_linter/src/rules/flake8_logging/rules/mod.rs | pub(crate) use direct_logger_instantiation::*;
pub(crate) use exc_info_outside_except_handler::*;
pub(crate) use exception_without_exc_info::*;
pub(crate) use invalid_get_logger_argument::*;
pub(crate) use log_exception_outside_except_handler::*;
pub(crate) use root_logger_call::*;
pub(crate) use undocumented_warn::*;
mod direct_logger_instantiation;
mod exc_info_outside_except_handler;
mod exception_without_exc_info;
mod invalid_get_logger_argument;
mod log_exception_outside_except_handler;
mod root_logger_call;
mod undocumented_warn;
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/flake8_logging/rules/log_exception_outside_except_handler.rs | crates/ruff_linter/src/rules/flake8_logging/rules/log_exception_outside_except_handler.rs | use ruff_macros::{ViolationMetadata, derive_message_formats};
use ruff_python_ast::{Expr, ExprAttribute, ExprCall};
use ruff_python_semantic::analyze::logging;
use ruff_text_size::Ranged;
use crate::checkers::ast::Checker;
use crate::rules::flake8_logging::helpers::outside_handlers;
use crate::{Edit, Fix, FixAvailability, Violation};
/// ## What it does
/// Checks for `.exception()` logging calls outside of exception handlers.
///
/// ## Why is this bad?
/// [The documentation] states:
/// > This function should only be called from an exception handler.
///
/// Calling `.exception()` outside of an exception handler
/// attaches `None` as exception information, leading to confusing messages:
///
/// ```pycon
/// >>> logging.exception("example")
/// ERROR:root:example
/// NoneType: None
/// ```
///
/// ## Example
///
/// ```python
/// import logging
///
/// logging.exception("Foobar")
/// ```
///
/// Use instead:
///
/// ```python
/// import logging
///
/// logging.error("Foobar")
/// ```
///
/// ## Fix safety
/// The fix, if available, will always be marked as unsafe, as it changes runtime behavior.
///
/// ## Options
///
/// - `lint.logger-objects`
///
/// [The documentation]: https://docs.python.org/3/library/logging.html#logging.exception
#[derive(ViolationMetadata)]
#[violation_metadata(preview_since = "0.9.5")]
pub(crate) struct LogExceptionOutsideExceptHandler;
impl Violation for LogExceptionOutsideExceptHandler {
const FIX_AVAILABILITY: FixAvailability = FixAvailability::Sometimes;
#[derive_message_formats]
fn message(&self) -> String {
"`.exception()` call outside exception handlers".to_string()
}
fn fix_title(&self) -> Option<String> {
Some("Replace with `.error()`".to_string())
}
}
/// LOG004
pub(crate) fn log_exception_outside_except_handler(checker: &Checker, call: &ExprCall) {
let semantic = checker.semantic();
if !outside_handlers(call.start(), semantic) {
return;
}
let fix = match &*call.func {
func @ Expr::Attribute(ExprAttribute { attr, .. }) => {
let logger_objects = &checker.settings().logger_objects;
if !logging::is_logger_candidate(func, semantic, logger_objects) {
return;
}
if attr != "exception" {
return;
}
let edit = Edit::range_replacement("error".to_string(), attr.range);
Some(Fix::unsafe_edit(edit))
}
func @ Expr::Name(_) => {
let Some(qualified_name) = semantic.resolve_qualified_name(func) else {
return;
};
if !matches!(qualified_name.segments(), ["logging", "exception"]) {
return;
}
None
}
_ => return,
};
let mut diagnostic = checker.report_diagnostic(LogExceptionOutsideExceptHandler, call.range);
if let Some(fix) = fix {
diagnostic.set_fix(fix);
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/flake8_logging/rules/exception_without_exc_info.rs | crates/ruff_linter/src/rules/flake8_logging/rules/exception_without_exc_info.rs | use ruff_macros::{ViolationMetadata, derive_message_formats};
use ruff_python_ast::helpers::Truthiness;
use ruff_python_ast::{self as ast, Expr, ExprCall};
use ruff_python_semantic::analyze::logging;
use ruff_python_stdlib::logging::LoggingLevel;
use ruff_text_size::Ranged;
use crate::Violation;
use crate::checkers::ast::Checker;
/// ## What it does
/// Checks for uses of `logging.exception()` with `exc_info` set to `False`.
///
/// ## Why is this bad?
/// The `logging.exception()` method captures the exception automatically, but
/// accepts an optional `exc_info` argument to override this behavior. Setting
/// `exc_info` to `False` disables the automatic capture of the exception and
/// stack trace.
///
/// Instead of setting `exc_info` to `False`, prefer `logging.error()`, which
/// has equivalent behavior to `logging.exception()` with `exc_info` set to
/// `False`, but is clearer in intent.
///
/// ## Example
/// ```python
/// logging.exception("...", exc_info=False)
/// ```
///
/// Use instead:
/// ```python
/// logging.error("...")
/// ```
///
/// ## Options
///
/// - `lint.logger-objects`
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "v0.2.0")]
pub(crate) struct ExceptionWithoutExcInfo;
impl Violation for ExceptionWithoutExcInfo {
#[derive_message_formats]
fn message(&self) -> String {
"Use of `logging.exception` with falsy `exc_info`".to_string()
}
}
/// LOG007
pub(crate) fn exception_without_exc_info(checker: &Checker, call: &ExprCall) {
match call.func.as_ref() {
Expr::Attribute(ast::ExprAttribute { attr, .. }) => {
if !matches!(
LoggingLevel::from_attribute(attr.as_str()),
Some(LoggingLevel::Exception)
) {
return;
}
if !logging::is_logger_candidate(
&call.func,
checker.semantic(),
&checker.settings().logger_objects,
) {
return;
}
}
Expr::Name(_) => {
if !checker
.semantic()
.resolve_qualified_name(call.func.as_ref())
.is_some_and(|qualified_name| {
matches!(qualified_name.segments(), ["logging", "exception"])
})
{
return;
}
}
_ => return,
}
if exc_info_arg_is_falsey(call, checker) {
checker.report_diagnostic(ExceptionWithoutExcInfo, call.range());
}
}
fn exc_info_arg_is_falsey(call: &ExprCall, checker: &Checker) -> bool {
call.arguments
.find_keyword("exc_info")
.map(|keyword| &keyword.value)
.is_some_and(|value| {
let truthiness =
Truthiness::from_expr(value, |id| checker.semantic().has_builtin_binding(id));
truthiness.into_bool() == Some(false)
})
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/flake8_print/mod.rs | crates/ruff_linter/src/rules/flake8_print/mod.rs | //! Rules from [flake8-print](https://pypi.org/project/flake8-print/).
pub(crate) mod rules;
#[cfg(test)]
mod tests {
use std::path::Path;
use anyhow::Result;
use test_case::test_case;
use crate::registry::Rule;
use crate::test::test_path;
use crate::{assert_diagnostics, settings};
#[test_case(Rule::Print, Path::new("T201.py"))]
#[test_case(Rule::PPrint, Path::new("T203.py"))]
fn rules(rule_code: Rule, path: &Path) -> Result<()> {
let snapshot = format!("{}_{}", rule_code.noqa_code(), path.to_string_lossy());
let diagnostics = test_path(
Path::new("flake8_print").join(path).as_path(),
&settings::LinterSettings::for_rule(rule_code),
)?;
assert_diagnostics!(snapshot, diagnostics);
Ok(())
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/flake8_print/rules/mod.rs | crates/ruff_linter/src/rules/flake8_print/rules/mod.rs | pub(crate) use print_call::*;
mod print_call;
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/flake8_print/rules/print_call.rs | crates/ruff_linter/src/rules/flake8_print/rules/print_call.rs | use ruff_macros::{ViolationMetadata, derive_message_formats};
use ruff_python_ast as ast;
use ruff_text_size::Ranged;
use crate::checkers::ast::Checker;
use crate::fix::edits::delete_stmt;
use crate::{Fix, FixAvailability, Violation};
/// ## What it does
/// Checks for `print` statements.
///
/// ## Why is this bad?
/// `print` statements used for debugging should be omitted from production
/// code. They can lead the accidental inclusion of sensitive information in
/// logs, and are not configurable by clients, unlike `logging` statements.
///
/// `print` statements used to produce output as a part of a command-line
/// interface program are not typically a problem.
///
/// ## Example
/// ```python
/// def sum_less_than_four(a, b):
/// print(f"Calling sum_less_than_four")
/// return a + b < 4
/// ```
///
/// The automatic fix will remove the print statement entirely:
///
/// ```python
/// def sum_less_than_four(a, b):
/// return a + b < 4
/// ```
///
/// To keep the line for logging purposes, instead use something like:
///
/// ```python
/// import logging
///
/// logging.basicConfig(level=logging.INFO)
/// logger = logging.getLogger(__name__)
///
///
/// def sum_less_than_four(a, b):
/// logger.debug("Calling sum_less_than_four")
/// return a + b < 4
/// ```
///
/// ## Fix safety
/// This rule's fix is marked as unsafe, as it will remove `print` statements
/// that are used beyond debugging purposes.
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "v0.0.57")]
pub(crate) struct Print;
impl Violation for Print {
const FIX_AVAILABILITY: FixAvailability = FixAvailability::Sometimes;
#[derive_message_formats]
fn message(&self) -> String {
"`print` found".to_string()
}
fn fix_title(&self) -> Option<String> {
Some("Remove `print`".to_string())
}
}
/// ## What it does
/// Checks for `pprint` statements.
///
/// ## Why is this bad?
/// Like `print` statements, `pprint` statements used for debugging should
/// be omitted from production code. They can lead the accidental inclusion
/// of sensitive information in logs, and are not configurable by clients,
/// unlike `logging` statements.
///
/// `pprint` statements used to produce output as a part of a command-line
/// interface program are not typically a problem.
///
/// ## Example
/// ```python
/// import pprint
///
///
/// def merge_dicts(dict_a, dict_b):
/// dict_c = {**dict_a, **dict_b}
/// pprint.pprint(dict_c)
/// return dict_c
/// ```
///
/// Use instead:
/// ```python
/// def merge_dicts(dict_a, dict_b):
/// dict_c = {**dict_a, **dict_b}
/// return dict_c
/// ```
///
/// ## Fix safety
/// This rule's fix is marked as unsafe, as it will remove `pprint` statements
/// that are used beyond debugging purposes.
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "v0.0.57")]
pub(crate) struct PPrint;
impl Violation for PPrint {
const FIX_AVAILABILITY: FixAvailability = FixAvailability::Sometimes;
#[derive_message_formats]
fn message(&self) -> String {
"`pprint` found".to_string()
}
fn fix_title(&self) -> Option<String> {
Some("Remove `pprint`".to_string())
}
}
/// T201, T203
pub(crate) fn print_call(checker: &Checker, call: &ast::ExprCall) {
let semantic = checker.semantic();
let Some(qualified_name) = semantic.resolve_qualified_name(&call.func) else {
return;
};
let diagnostic = match qualified_name.segments() {
["" | "builtins", "print"] => {
// If the print call has a `file=` argument (that isn't `None`, `"sys.stdout"`,
// or `"sys.stderr"`), don't trigger T201.
if let Some(keyword) = call.arguments.find_keyword("file") {
if !keyword.value.is_none_literal_expr() {
if semantic.resolve_qualified_name(&keyword.value).is_none_or(
|qualified_name| {
!matches!(qualified_name.segments(), ["sys", "stdout" | "stderr"])
},
) {
return;
}
}
}
checker.report_diagnostic_if_enabled(Print, call.func.range())
}
["pprint", "pprint"] => checker.report_diagnostic_if_enabled(PPrint, call.func.range()),
_ => return,
};
let Some(mut diagnostic) = diagnostic else {
return;
};
// Remove the `print`, if it's a standalone statement.
if semantic.current_expression_parent().is_none() {
let statement = semantic.current_statement();
let parent = semantic.current_statement_parent();
let edit = delete_stmt(statement, parent, checker.locator(), checker.indexer());
diagnostic.set_fix(
Fix::unsafe_edit(edit)
.isolate(Checker::isolation(semantic.current_statement_parent_id())),
);
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/pydocstyle/settings.rs | crates/ruff_linter/src/rules/pydocstyle/settings.rs | //! Settings for the `pydocstyle` plugin.
use std::collections::BTreeSet;
use std::fmt;
use std::iter::FusedIterator;
use serde::{Deserialize, Serialize};
use ruff_macros::CacheKey;
use ruff_python_ast::name::QualifiedName;
use crate::display_settings;
use crate::registry::Rule;
#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize, CacheKey)]
#[serde(deny_unknown_fields, rename_all = "kebab-case")]
#[cfg_attr(feature = "schemars", derive(schemars::JsonSchema))]
pub enum Convention {
/// Use Google-style docstrings.
Google,
/// Use NumPy-style docstrings.
Numpy,
/// Use PEP257-style docstrings.
Pep257,
}
impl Convention {
pub const fn rules_to_be_ignored(self) -> &'static [Rule] {
match self {
Convention::Google => &[
Rule::IncorrectBlankLineBeforeClass,
Rule::IncorrectBlankLineAfterClass,
Rule::MultiLineSummarySecondLine,
Rule::OverindentedSectionUnderline,
Rule::MissingTrailingPeriod,
Rule::NonImperativeMood,
Rule::DocstringStartsWithThis,
Rule::MissingNewLineAfterSectionName,
Rule::MissingDashedUnderlineAfterSection,
Rule::MissingSectionUnderlineAfterName,
Rule::MismatchedSectionUnderlineLength,
Rule::MissingBlankLineAfterLastSection,
],
Convention::Numpy => &[
Rule::UndocumentedPublicInit,
Rule::IncorrectBlankLineBeforeClass,
Rule::MultiLineSummaryFirstLine,
Rule::MultiLineSummarySecondLine,
Rule::SignatureInDocstring,
Rule::MissingBlankLineAfterLastSection,
Rule::MissingTerminalPunctuation,
Rule::MissingSectionNameColon,
Rule::UndocumentedParam,
],
Convention::Pep257 => &[
Rule::IncorrectBlankLineBeforeClass,
Rule::MultiLineSummaryFirstLine,
Rule::MultiLineSummarySecondLine,
Rule::OverindentedSection,
Rule::OverindentedSectionUnderline,
Rule::DocstringStartsWithThis,
Rule::NonCapitalizedSectionName,
Rule::MissingNewLineAfterSectionName,
Rule::MissingDashedUnderlineAfterSection,
Rule::MissingSectionUnderlineAfterName,
Rule::MismatchedSectionUnderlineLength,
Rule::NoBlankLineAfterSection,
Rule::NoBlankLineBeforeSection,
Rule::MissingBlankLineAfterLastSection,
Rule::MissingTerminalPunctuation,
Rule::MissingSectionNameColon,
Rule::UndocumentedParam,
],
}
}
}
impl fmt::Display for Convention {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
Self::Google => write!(f, "google"),
Self::Numpy => write!(f, "numpy"),
Self::Pep257 => write!(f, "pep257"),
}
}
}
#[derive(Debug, Clone, Default, CacheKey)]
pub struct Settings {
pub convention: Option<Convention>,
pub ignore_decorators: BTreeSet<String>,
pub property_decorators: BTreeSet<String>,
pub ignore_var_parameters: bool,
}
impl Settings {
pub fn convention(&self) -> Option<Convention> {
self.convention
}
pub fn ignore_decorators(&self) -> DecoratorIterator<'_> {
DecoratorIterator::new(&self.ignore_decorators)
}
pub fn property_decorators(&self) -> DecoratorIterator<'_> {
DecoratorIterator::new(&self.property_decorators)
}
pub fn ignore_var_parameters(&self) -> bool {
self.ignore_var_parameters
}
}
impl fmt::Display for Settings {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
display_settings! {
formatter = f,
namespace = "linter.pydocstyle",
fields = [
self.convention | optional,
self.ignore_decorators | set,
self.property_decorators | set,
self.ignore_var_parameters
]
}
Ok(())
}
}
#[derive(Debug, Clone)]
pub struct DecoratorIterator<'a> {
decorators: std::collections::btree_set::Iter<'a, String>,
}
impl<'a> DecoratorIterator<'a> {
fn new(decorators: &'a BTreeSet<String>) -> Self {
Self {
decorators: decorators.iter(),
}
}
}
impl<'a> Iterator for DecoratorIterator<'a> {
type Item = QualifiedName<'a>;
fn next(&mut self) -> Option<QualifiedName<'a>> {
self.decorators
.next()
.map(|deco| QualifiedName::from_dotted_name(deco))
}
}
impl FusedIterator for DecoratorIterator<'_> {}
impl ExactSizeIterator for DecoratorIterator<'_> {
fn len(&self) -> usize {
self.decorators.len()
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/pydocstyle/helpers.rs | crates/ruff_linter/src/rules/pydocstyle/helpers.rs | use std::cmp::Ordering;
use ruff_python_ast::helpers::map_callable;
use ruff_python_semantic::{Definition, SemanticModel};
use ruff_python_trivia::Cursor;
use ruff_source_file::{Line, UniversalNewlines};
use ruff_text_size::{TextRange, TextSize};
use crate::docstrings::Docstring;
use crate::docstrings::sections::{SectionContexts, SectionKind};
use crate::docstrings::styles::SectionStyle;
use crate::rules::pydocstyle::settings::{Convention, Settings};
/// Return the index of the first logical line in a string.
pub(super) fn logical_line(content: &str) -> Option<usize> {
// Find the first logical line.
let mut logical_line = None;
for (i, line) in content.universal_newlines().enumerate() {
let trimmed = line.trim();
if trimmed.is_empty() || trimmed.chars().all(|c| matches!(c, '-' | '~' | '=' | '#')) {
// Empty line, or underline. If this is the line _after_ the first logical line, stop.
if logical_line.is_some() {
break;
}
} else {
// Non-empty line. Store the index.
logical_line = Some(i);
}
}
logical_line
}
/// Normalize a word by removing all non-alphanumeric characters
/// and converting it to lowercase.
pub(super) fn normalize_word(first_word: &str) -> String {
first_word
.replace(|c: char| !c.is_alphanumeric(), "")
.to_lowercase()
}
/// Return true if a line ends with an odd number of backslashes (i.e., ends with an escape).
pub(super) fn ends_with_backslash(line: &str) -> bool {
line.chars().rev().take_while(|c| *c == '\\').count() % 2 == 1
}
/// Check decorator list to see if function should be ignored.
pub(crate) fn should_ignore_definition(
definition: &Definition,
settings: &Settings,
semantic: &SemanticModel,
) -> bool {
let ignore_decorators = settings.ignore_decorators();
if ignore_decorators.len() == 0 {
return false;
}
let Some(function) = definition.as_function_def() else {
return false;
};
function.decorator_list.iter().any(|decorator| {
semantic
.resolve_qualified_name(map_callable(&decorator.expression))
.is_some_and(|qualified_name| {
ignore_decorators
.clone()
.any(|decorator| decorator == qualified_name)
})
})
}
pub(crate) fn get_section_contexts<'a>(
docstring: &'a Docstring<'a>,
convention: Option<Convention>,
) -> SectionContexts<'a> {
match convention {
Some(Convention::Google) => {
SectionContexts::from_docstring(docstring, SectionStyle::Google)
}
Some(Convention::Numpy) => SectionContexts::from_docstring(docstring, SectionStyle::Numpy),
Some(Convention::Pep257) | None => {
// There are some overlapping section names, between the Google and NumPy conventions
// (e.g., "Returns", "Raises"). Break ties by checking for the presence of some of the
// section names that are unique to each convention.
// If the docstring contains `Parameters:` or `Other Parameters:`, use the NumPy
// convention.
let numpy_sections = SectionContexts::from_docstring(docstring, SectionStyle::Numpy);
if numpy_sections.iter().any(|context| {
matches!(
context.kind(),
SectionKind::Parameters
| SectionKind::OtherParams
| SectionKind::OtherParameters
)
}) {
return numpy_sections;
}
// If the docstring contains any argument specifier, use the Google convention.
let google_sections = SectionContexts::from_docstring(docstring, SectionStyle::Google);
if google_sections.iter().any(|context| {
matches!(
context.kind(),
SectionKind::Args
| SectionKind::Arguments
| SectionKind::KeywordArgs
| SectionKind::KeywordArguments
| SectionKind::OtherArgs
| SectionKind::OtherArguments
)
}) {
return google_sections;
}
// Otherwise, If one convention matched more sections, return that...
match google_sections.len().cmp(&numpy_sections.len()) {
Ordering::Greater => return google_sections,
Ordering::Less => return numpy_sections,
Ordering::Equal => {}
}
// 0 sections of either convention? Default to numpy
if google_sections.len() == 0 {
return numpy_sections;
}
for section in &google_sections {
// If any section has something that could be an underline
// on the following line, assume Numpy.
// If it *doesn't* have an underline and it *does* have a colon
// at the end of a section name, assume Google.
if let Some(following_line) = section.following_lines().next() {
if find_underline(&following_line, '-').is_some() {
return numpy_sections;
}
}
if section.summary_after_section_name().starts_with(':') {
return google_sections;
}
}
// If all else fails, default to numpy
numpy_sections
}
}
}
/// Returns the [`TextRange`] of the underline, if a line consists of only dashes.
pub(super) fn find_underline(line: &Line, dash: char) -> Option<TextRange> {
let mut cursor = Cursor::new(line.as_str());
// Eat leading whitespace.
cursor.eat_while(char::is_whitespace);
// Determine the start of the dashes.
let offset = cursor.token_len();
// Consume the dashes.
cursor.start_token();
cursor.eat_while(|c| c == dash);
// Determine the end of the dashes.
let len = cursor.token_len();
// If there are no dashes, return None.
if len == TextSize::new(0) {
return None;
}
// Eat trailing whitespace.
cursor.eat_while(char::is_whitespace);
// If there are any characters after the dashes, return None.
if !cursor.is_eof() {
return None;
}
Some(TextRange::at(offset, len) + line.start())
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/pydocstyle/mod.rs | crates/ruff_linter/src/rules/pydocstyle/mod.rs | //! Rules from [pydocstyle](https://pypi.org/project/pydocstyle/).
pub(crate) mod helpers;
pub(crate) mod rules;
pub mod settings;
#[cfg(test)]
mod tests {
use std::path::Path;
use anyhow::Result;
use test_case::test_case;
use crate::registry::Rule;
use super::settings::{Convention, Settings};
use crate::test::test_path;
use crate::{assert_diagnostics, settings};
#[test_case(Rule::MissingBlankLineAfterLastSection, Path::new("sections.py"))]
#[test_case(Rule::NoBlankLineAfterSection, Path::new("sections.py"))]
#[test_case(Rule::MissingBlankLineAfterLastSection, Path::new("D413.py"))]
#[test_case(Rule::MissingBlankLineAfterSummary, Path::new("D.py"))]
#[test_case(Rule::NoBlankLineBeforeSection, Path::new("sections.py"))]
#[test_case(Rule::NonCapitalizedSectionName, Path::new("sections.py"))]
#[test_case(Rule::MissingDashedUnderlineAfterSection, Path::new("sections.py"))]
#[test_case(Rule::UndocumentedParam, Path::new("canonical_google_examples.py"))]
#[test_case(Rule::UndocumentedParam, Path::new("canonical_numpy_examples.py"))]
#[test_case(Rule::UndocumentedParam, Path::new("sections.py"))]
#[test_case(Rule::MissingTrailingPeriod, Path::new("D.py"))]
#[test_case(Rule::MissingTrailingPeriod, Path::new("D400.py"))]
#[test_case(Rule::MissingTrailingPeriod, Path::new("D400_415.py"))]
#[test_case(Rule::MissingTerminalPunctuation, Path::new("D.py"))]
#[test_case(Rule::MissingTerminalPunctuation, Path::new("D400_415.py"))]
#[test_case(Rule::FirstWordUncapitalized, Path::new("D.py"))]
#[test_case(Rule::FirstWordUncapitalized, Path::new("D403.py"))]
#[test_case(Rule::UnnecessaryMultilineDocstring, Path::new("D.py"))]
#[test_case(Rule::DocstringTabIndentation, Path::new("D.py"))]
#[test_case(Rule::UndocumentedMagicMethod, Path::new("D.py"))]
#[test_case(Rule::MultiLineSummaryFirstLine, Path::new("D.py"))]
#[test_case(Rule::MultiLineSummarySecondLine, Path::new("D.py"))]
#[test_case(Rule::NewLineAfterLastParagraph, Path::new("D.py"))]
#[test_case(Rule::MissingNewLineAfterSectionName, Path::new("sections.py"))]
#[test_case(Rule::BlankLineAfterFunction, Path::new("D.py"))]
#[test_case(Rule::UnnecessaryMultilineDocstring, Path::new("D200.py"))]
#[test_case(Rule::BlankLineAfterFunction, Path::new("D202.py"))]
#[test_case(Rule::BlankLineBeforeClass, Path::new("D.py"))]
#[test_case(Rule::BlankLineBeforeFunction, Path::new("D.py"))]
#[test_case(Rule::BlankLinesBetweenHeaderAndContent, Path::new("sections.py"))]
#[test_case(Rule::BlankLinesBetweenHeaderAndContent, Path::new("sphinx.py"))]
#[test_case(Rule::OverIndentation, Path::new("D.py"))]
#[test_case(Rule::OverIndentation, Path::new("D208.py"))]
#[test_case(Rule::SignatureInDocstring, Path::new("D.py"))]
#[test_case(Rule::SignatureInDocstring, Path::new("D402.py"))]
#[test_case(Rule::SurroundingWhitespace, Path::new("D.py"))]
#[test_case(Rule::DocstringStartsWithThis, Path::new("D.py"))]
#[test_case(Rule::UnderIndentation, Path::new("D.py"))]
#[test_case(Rule::EmptyDocstring, Path::new("D.py"))]
#[test_case(Rule::EmptyDocstringSection, Path::new("sections.py"))]
#[test_case(Rule::NonImperativeMood, Path::new("D401.py"))]
#[test_case(Rule::NoBlankLineAfterSection, Path::new("D410.py"))]
#[test_case(Rule::IncorrectBlankLineAfterClass, Path::new("D.py"))]
#[test_case(Rule::IncorrectBlankLineBeforeClass, Path::new("D.py"))]
#[test_case(Rule::UndocumentedPublicClass, Path::new("D.py"))]
#[test_case(Rule::UndocumentedPublicFunction, Path::new("D.py"))]
#[test_case(Rule::UndocumentedPublicInit, Path::new("D.py"))]
#[test_case(Rule::UndocumentedPublicMethod, Path::new("D.py"))]
#[test_case(Rule::UndocumentedPublicMethod, Path::new("setter.py"))]
#[test_case(Rule::UndocumentedPublicModule, Path::new("D.py"))]
#[test_case(Rule::UndocumentedPublicModule, Path::new("D100.ipynb"))]
#[test_case(
Rule::UndocumentedPublicModule,
Path::new("_unrelated/pkg/D100_pub.py")
)]
#[test_case(
Rule::UndocumentedPublicModule,
Path::new("_unrelated/pkg/_priv/no_D100_priv.py")
)]
#[test_case(
Rule::UndocumentedPublicModule,
Path::new("_unrelated/_no_pkg_priv.py")
)]
#[test_case(Rule::UndocumentedPublicNestedClass, Path::new("D.py"))]
#[test_case(Rule::UndocumentedPublicPackage, Path::new("D.py"))]
#[test_case(Rule::UndocumentedPublicPackage, Path::new("D104/__init__.py"))]
#[test_case(Rule::MissingSectionNameColon, Path::new("D.py"))]
#[test_case(Rule::OverindentedSection, Path::new("sections.py"))]
#[test_case(Rule::OverindentedSection, Path::new("D214_module.py"))]
#[test_case(Rule::OverindentedSectionUnderline, Path::new("D215.py"))]
#[test_case(Rule::MissingSectionUnderlineAfterName, Path::new("sections.py"))]
#[test_case(Rule::MismatchedSectionUnderlineLength, Path::new("sections.py"))]
#[test_case(Rule::OverindentedSectionUnderline, Path::new("sections.py"))]
#[test_case(Rule::OverloadWithDocstring, Path::new("D.py"))]
#[test_case(Rule::EscapeSequenceInDocstring, Path::new("D.py"))]
#[test_case(Rule::EscapeSequenceInDocstring, Path::new("D301.py"))]
#[test_case(Rule::TripleSingleQuotes, Path::new("D.py"))]
#[test_case(Rule::TripleSingleQuotes, Path::new("D300.py"))]
fn rules(rule_code: Rule, path: &Path) -> Result<()> {
let snapshot = format!("{}_{}", rule_code.noqa_code(), path.to_string_lossy());
let diagnostics = test_path(
Path::new("pydocstyle").join(path).as_path(),
&settings::LinterSettings {
pydocstyle: Settings {
ignore_decorators: ["functools.wraps".to_string()].into_iter().collect(),
property_decorators: ["gi.repository.GObject.Property".to_string()]
.into_iter()
.collect(),
..Settings::default()
},
..settings::LinterSettings::for_rule(rule_code)
},
)?;
assert_diagnostics!(snapshot, diagnostics);
Ok(())
}
#[test]
fn bom() -> Result<()> {
let diagnostics = test_path(
Path::new("pydocstyle/bom.py"),
&settings::LinterSettings::for_rule(Rule::TripleSingleQuotes),
)?;
assert_diagnostics!(diagnostics);
Ok(())
}
#[test]
fn d417_unspecified() -> Result<()> {
let diagnostics = test_path(
Path::new("pydocstyle/D417.py"),
&settings::LinterSettings {
// When inferring the convention, we'll see a few false negatives.
// See: https://github.com/PyCQA/pydocstyle/issues/459.
pydocstyle: Settings::default(),
..settings::LinterSettings::for_rule(Rule::UndocumentedParam)
},
)?;
assert_diagnostics!(diagnostics);
Ok(())
}
#[test]
fn d417_unspecified_ignore_var_parameters() -> Result<()> {
let diagnostics = test_path(
Path::new("pydocstyle/D417.py"),
&settings::LinterSettings {
pydocstyle: Settings::default(),
..settings::LinterSettings::for_rule(Rule::UndocumentedParam)
},
)?;
assert_diagnostics!(diagnostics);
Ok(())
}
#[test]
fn d417_google() -> Result<()> {
let diagnostics = test_path(
Path::new("pydocstyle/D417.py"),
&settings::LinterSettings {
// With explicit Google convention, we should flag every function.
pydocstyle: Settings {
convention: Some(Convention::Google),
..Settings::default()
},
..settings::LinterSettings::for_rule(Rule::UndocumentedParam)
},
)?;
assert_diagnostics!(diagnostics);
Ok(())
}
#[test]
fn d417_google_ignore_var_parameters() -> Result<()> {
let diagnostics = test_path(
Path::new("pydocstyle/D417.py"),
&settings::LinterSettings {
pydocstyle: Settings {
convention: Some(Convention::Google),
ignore_var_parameters: true,
..Settings::default()
},
..settings::LinterSettings::for_rule(Rule::UndocumentedParam)
},
)?;
assert_diagnostics!(diagnostics);
Ok(())
}
#[test]
fn d417_numpy() -> Result<()> {
let diagnostics = test_path(
Path::new("pydocstyle/D417.py"),
&settings::LinterSettings {
// With explicit numpy convention, we shouldn't flag anything.
pydocstyle: Settings {
convention: Some(Convention::Numpy),
..Settings::default()
},
..settings::LinterSettings::for_rule(Rule::UndocumentedParam)
},
)?;
assert_diagnostics!(diagnostics);
Ok(())
}
#[test]
fn d209_d400() -> Result<()> {
let diagnostics = test_path(
Path::new("pydocstyle/D209_D400.py"),
&settings::LinterSettings::for_rules([
Rule::NewLineAfterLastParagraph,
Rule::MissingTrailingPeriod,
]),
)?;
assert_diagnostics!(diagnostics);
Ok(())
}
#[test]
fn all() -> Result<()> {
let diagnostics = test_path(
Path::new("pydocstyle/all.py"),
&settings::LinterSettings::for_rules([
Rule::UndocumentedPublicModule,
Rule::UndocumentedPublicClass,
Rule::UndocumentedPublicMethod,
Rule::UndocumentedPublicFunction,
Rule::UndocumentedPublicPackage,
Rule::UndocumentedMagicMethod,
Rule::UndocumentedPublicNestedClass,
Rule::UndocumentedPublicInit,
]),
)?;
assert_diagnostics!(diagnostics);
Ok(())
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/pydocstyle/rules/not_empty.rs | crates/ruff_linter/src/rules/pydocstyle/rules/not_empty.rs | use ruff_macros::{ViolationMetadata, derive_message_formats};
use ruff_text_size::Ranged;
use crate::Violation;
use crate::checkers::ast::Checker;
use crate::docstrings::Docstring;
/// ## What it does
/// Checks for empty docstrings.
///
/// ## Why is this bad?
/// An empty docstring is indicative of incomplete documentation. It should either
/// be removed or replaced with a meaningful docstring.
///
/// ## Example
/// ```python
/// def average(values: list[float]) -> float:
/// """"""
/// ```
///
/// Use instead:
/// ```python
/// def average(values: list[float]) -> float:
/// """Return the mean of the given values."""
/// ```
///
/// ## Options
///
/// - `lint.pydocstyle.ignore-decorators`
///
/// ## References
/// - [PEP 257 – Docstring Conventions](https://peps.python.org/pep-0257/)
/// - [NumPy Style Guide](https://numpydoc.readthedocs.io/en/latest/format.html)
/// - [Google Python Style Guide - Docstrings](https://google.github.io/styleguide/pyguide.html#38-comments-and-docstrings)
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "v0.0.68")]
pub(crate) struct EmptyDocstring;
impl Violation for EmptyDocstring {
#[derive_message_formats]
fn message(&self) -> String {
"Docstring is empty".to_string()
}
}
/// D419
pub(crate) fn not_empty(checker: &Checker, docstring: &Docstring) -> bool {
if !docstring.body().trim().is_empty() {
return true;
}
checker.report_diagnostic_if_enabled(EmptyDocstring, docstring.range());
false
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/pydocstyle/rules/backslashes.rs | crates/ruff_linter/src/rules/pydocstyle/rules/backslashes.rs | use ruff_macros::{ViolationMetadata, derive_message_formats};
use ruff_text_size::Ranged;
use crate::checkers::ast::Checker;
use crate::docstrings::Docstring;
use crate::{Edit, Fix, FixAvailability, Violation};
/// ## What it does
/// Checks for docstrings that include backslashes, but are not defined as
/// raw string literals.
///
/// ## Why is this bad?
/// In Python, backslashes are typically used to escape characters in strings.
/// In raw strings (those prefixed with an `r`), however, backslashes are
/// treated as literal characters.
///
/// [PEP 257](https://peps.python.org/pep-0257/#what-is-a-docstring) recommends
/// the use of raw strings (i.e., `r"""raw triple double quotes"""`) for
/// docstrings that include backslashes. The use of a raw string ensures that
/// any backslashes are treated as literal characters, and not as escape
/// sequences, which avoids confusion.
///
/// ## Example
/// ```python
/// def foobar():
/// """Docstring for foo\bar."""
///
///
/// foobar.__doc__ # "Docstring for foar."
/// ```
///
/// Use instead:
/// ```python
/// def foobar():
/// r"""Docstring for foo\bar."""
///
///
/// foobar.__doc__ # "Docstring for foo\bar."
/// ```
///
/// ## Options
///
/// - `lint.pydocstyle.ignore-decorators`
///
/// ## References
/// - [PEP 257 – Docstring Conventions](https://peps.python.org/pep-0257/)
/// - [Python documentation: String and Bytes literals](https://docs.python.org/3/reference/lexical_analysis.html#string-and-bytes-literals)
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "v0.0.172")]
pub(crate) struct EscapeSequenceInDocstring;
impl Violation for EscapeSequenceInDocstring {
const FIX_AVAILABILITY: FixAvailability = FixAvailability::Sometimes;
#[derive_message_formats]
fn message(&self) -> String {
r#"Use `r"""` if any backslashes in a docstring"#.to_string()
}
fn fix_title(&self) -> Option<String> {
Some(r#"Add `r` prefix"#.to_string())
}
}
/// D301
pub(crate) fn backslashes(checker: &Checker, docstring: &Docstring) {
if docstring.is_raw_string() {
return;
}
// Docstring contains at least one backslash.
let body = docstring.body();
let bytes = body.as_bytes();
let mut offset = 0;
while let Some(position) = memchr::memchr(b'\\', &bytes[offset..]) {
if position + offset + 1 >= body.len() {
break;
}
let after_escape = &body[position + offset + 1..];
// End of Docstring.
let Some(escaped_char) = &after_escape.chars().next() else {
break;
};
if matches!(escaped_char, '"' | '\'') {
// If the next three characters are equal to """, it indicates an escaped docstring pattern.
if after_escape.starts_with("\"\"\"") || after_escape.starts_with("\'\'\'") {
offset += position + 3;
continue;
}
// If the next three characters are equal to "\"\", it indicates an escaped docstring pattern.
if after_escape.starts_with("\"\\\"\\\"") || after_escape.starts_with("\'\\\'\\\'") {
offset += position + 5;
continue;
}
}
offset += position + escaped_char.len_utf8();
// Only allow continuations (backslashes followed by newlines) and Unicode escapes.
if !matches!(*escaped_char, '\r' | '\n' | 'u' | 'U' | 'N') {
let mut diagnostic =
checker.report_diagnostic(EscapeSequenceInDocstring, docstring.range());
if !docstring.is_u_string() {
diagnostic.set_fix(Fix::unsafe_edit(Edit::insertion(
"r".to_string(),
docstring.start(),
)));
}
break;
}
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/pydocstyle/rules/ends_with_period.rs | crates/ruff_linter/src/rules/pydocstyle/rules/ends_with_period.rs | use ruff_text_size::TextLen;
use strum::IntoEnumIterator;
use ruff_macros::{ViolationMetadata, derive_message_formats};
use ruff_source_file::{UniversalNewlineIterator, UniversalNewlines};
use ruff_text_size::Ranged;
use crate::checkers::ast::Checker;
use crate::docstrings::Docstring;
use crate::docstrings::sections::SectionKind;
use crate::{Edit, Fix, FixAvailability, Violation};
use crate::rules::pydocstyle::helpers::logical_line;
/// ## What it does
/// Checks for docstrings in which the first line does not end in a period.
///
/// ## Why is this bad?
/// [PEP 257] recommends that the first line of a docstring is written in the
/// form of a command, ending in a period.
///
/// This rule may not apply to all projects; its applicability is a matter of
/// convention. By default, this rule is enabled when using the `numpy` and
/// `pep257` conventions, and disabled when using the `google` convention.
///
/// ## Example
/// ```python
/// def average(values: list[float]) -> float:
/// """Return the mean of the given values"""
/// ```
///
/// Use instead:
/// ```python
/// def average(values: list[float]) -> float:
/// """Return the mean of the given values."""
/// ```
///
/// ## Options
/// - `lint.pydocstyle.convention`
///
/// ## References
/// - [PEP 257 – Docstring Conventions](https://peps.python.org/pep-0257/)
/// - [NumPy Style Guide](https://numpydoc.readthedocs.io/en/latest/format.html)
/// - [Google Python Style Guide - Docstrings](https://google.github.io/styleguide/pyguide.html#38-comments-and-docstrings)
///
/// [PEP 257]: https://peps.python.org/pep-0257/
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "v0.0.68")]
pub(crate) struct MissingTrailingPeriod;
impl Violation for MissingTrailingPeriod {
/// `None` in the case a fix is never available or otherwise Some
/// [`FixAvailability`] describing the available fix.
const FIX_AVAILABILITY: FixAvailability = FixAvailability::Sometimes;
#[derive_message_formats]
fn message(&self) -> String {
"First line should end with a period".to_string()
}
fn fix_title(&self) -> Option<String> {
Some("Add period".to_string())
}
}
/// D400
pub(crate) fn ends_with_period(checker: &Checker, docstring: &Docstring) {
let body = docstring.body();
if let Some(first_line) = body.trim().universal_newlines().next() {
let trimmed = first_line.trim();
// Avoid false-positives: `:param`, etc.
for prefix in [":param", ":type", ":raises", ":return", ":rtype"] {
if trimmed.starts_with(prefix) {
return;
}
}
// Avoid false-positives: `Args:`, etc.
for section_kind in SectionKind::iter() {
if let Some(suffix) = trimmed.strip_suffix(section_kind.as_str()) {
if suffix.is_empty() {
return;
}
if suffix == ":" {
return;
}
}
}
}
if let Some(index) = logical_line(body.as_str()) {
let mut lines = UniversalNewlineIterator::with_offset(&body, body.start());
let line = lines.nth(index).unwrap();
let trimmed = line.trim_end();
if trimmed.ends_with('\\') {
// Ignore the edge case whether a single quoted string is multiple lines through an
// escape (https://github.com/astral-sh/ruff/issues/7139). Single quote docstrings are
// flagged by D300.
// ```python
// "\
// "
// ```
return;
}
if !trimmed.ends_with('.') {
let mut diagnostic =
checker.report_diagnostic(MissingTrailingPeriod, docstring.range());
// Best-effort fix: avoid adding a period after other punctuation marks.
if !trimmed.ends_with([':', ';', '?', '!']) {
diagnostic.set_fix(Fix::unsafe_edit(Edit::insertion(
".".to_string(),
line.start() + trimmed.text_len(),
)));
}
}
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/pydocstyle/rules/one_liner.rs | crates/ruff_linter/src/rules/pydocstyle/rules/one_liner.rs | use ruff_macros::{ViolationMetadata, derive_message_formats};
use ruff_source_file::NewlineWithTrailingNewline;
use ruff_text_size::Ranged;
use crate::checkers::ast::Checker;
use crate::docstrings::Docstring;
use crate::{Edit, Fix, FixAvailability, Violation};
/// ## What it does
/// Checks for single-line docstrings that are broken across multiple lines.
///
/// ## Why is this bad?
/// [PEP 257] recommends that docstrings that _can_ fit on one line should be
/// formatted on a single line, for consistency and readability.
///
/// ## Example
/// ```python
/// def average(values: list[float]) -> float:
/// """
/// Return the mean of the given values.
/// """
/// ```
///
/// Use instead:
/// ```python
/// def average(values: list[float]) -> float:
/// """Return the mean of the given values."""
/// ```
///
/// ## Fix safety
/// The fix is marked as unsafe because it could affect tools that parse docstrings,
/// documentation generators, or custom introspection utilities that rely on
/// specific docstring formatting.
///
/// ## Options
///
/// - `lint.pydocstyle.ignore-decorators`
///
/// ## References
/// - [PEP 257 – Docstring Conventions](https://peps.python.org/pep-0257/)
///
/// [PEP 257]: https://peps.python.org/pep-0257/
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "v0.0.68")]
pub(crate) struct UnnecessaryMultilineDocstring;
impl Violation for UnnecessaryMultilineDocstring {
const FIX_AVAILABILITY: FixAvailability = FixAvailability::Sometimes;
#[derive_message_formats]
fn message(&self) -> String {
"One-line docstring should fit on one line".to_string()
}
fn fix_title(&self) -> Option<String> {
Some("Reformat to one line".to_string())
}
}
/// D200
pub(crate) fn one_liner(checker: &Checker, docstring: &Docstring) {
let mut line_count = 0;
let mut non_empty_line_count = 0;
for line in NewlineWithTrailingNewline::from(docstring.body().as_str()) {
line_count += 1;
if !line.trim().is_empty() {
non_empty_line_count += 1;
}
if non_empty_line_count > 1 {
return;
}
}
if non_empty_line_count == 1 && line_count > 1 {
let mut diagnostic =
checker.report_diagnostic(UnnecessaryMultilineDocstring, docstring.range());
// If removing whitespace would lead to an invalid string of quote
// characters, avoid applying the fix.
let body = docstring.body();
let trimmed = body.trim();
let quote_char = docstring.quote_style().as_char();
if trimmed.chars().rev().take_while(|c| *c == '\\').count() % 2 == 0
&& !trimmed.ends_with(quote_char)
&& !trimmed.starts_with(quote_char)
{
diagnostic.set_fix(Fix::unsafe_edit(Edit::range_replacement(
format!(
"{leading}{trimmed}{trailing}",
leading = docstring.opener(),
trailing = docstring.closer()
),
docstring.range(),
)));
}
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/pydocstyle/rules/indent.rs | crates/ruff_linter/src/rules/pydocstyle/rules/indent.rs | use ruff_macros::{ViolationMetadata, derive_message_formats};
use ruff_python_ast::docstrings::{clean_space, leading_space};
use ruff_source_file::{Line, NewlineWithTrailingNewline};
use ruff_text_size::{Ranged, TextSize};
use ruff_text_size::{TextLen, TextRange};
use crate::checkers::ast::Checker;
use crate::docstrings::Docstring;
use crate::registry::Rule;
use crate::{AlwaysFixableViolation, Violation};
use crate::{Edit, Fix};
#[expect(clippy::tabs_in_doc_comments)]
/// ## What it does
/// Checks for docstrings that are indented with tabs.
///
/// ## Why is this bad?
/// [PEP 8] recommends using spaces over tabs for indentation.
///
/// ## Example
/// ```python
/// def sort_list(l: list[int]) -> list[int]:
/// """Return a sorted copy of the list.
///
/// Sort the list in ascending order and return a copy of the result using the bubble
/// sort algorithm.
/// """
/// ```
///
/// Use instead:
/// ```python
/// def sort_list(l: list[int]) -> list[int]:
/// """Return a sorted copy of the list.
///
/// Sort the list in ascending order and return a copy of the result using the bubble
/// sort algorithm.
/// """
/// ```
///
/// ## Formatter compatibility
/// We recommend against using this rule alongside the [formatter]. The
/// formatter enforces consistent indentation, making the rule redundant.
///
/// The rule is also incompatible with the [formatter] when using
/// `format.indent-style="tab"`.
///
/// ## Options
///
/// - `lint.pydocstyle.ignore-decorators`
///
/// ## References
/// - [PEP 257 – Docstring Conventions](https://peps.python.org/pep-0257/)
/// - [NumPy Style Guide](https://numpydoc.readthedocs.io/en/latest/format.html)
/// - [Google Python Style Guide - Docstrings](https://google.github.io/styleguide/pyguide.html#38-comments-and-docstrings)
///
/// [PEP 8]: https://peps.python.org/pep-0008/#tabs-or-spaces
/// [formatter]: https://docs.astral.sh/ruff/formatter
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "v0.0.75")]
pub(crate) struct DocstringTabIndentation;
impl Violation for DocstringTabIndentation {
#[derive_message_formats]
fn message(&self) -> String {
"Docstring should be indented with spaces, not tabs".to_string()
}
}
/// ## What it does
/// Checks for under-indented docstrings.
///
/// ## Why is this bad?
/// [PEP 257] recommends that docstrings be indented to the same level as their
/// opening quotes. Avoid under-indenting docstrings, for consistency.
///
/// ## Example
/// ```python
/// def sort_list(l: list[int]) -> list[int]:
/// """Return a sorted copy of the list.
///
/// Sort the list in ascending order and return a copy of the result using the bubble sort
/// algorithm.
/// """
/// ```
///
/// Use instead:
/// ```python
/// def sort_list(l: list[int]) -> list[int]:
/// """Return a sorted copy of the list.
///
/// Sort the list in ascending order and return a copy of the result using the bubble
/// sort algorithm.
/// """
/// ```
///
/// ## Formatter compatibility
/// We recommend against using this rule alongside the [formatter]. The
/// formatter enforces consistent indentation, making the rule redundant.
///
/// ## Options
///
/// - `lint.pydocstyle.ignore-decorators`
///
/// ## References
/// - [PEP 257 – Docstring Conventions](https://peps.python.org/pep-0257/)
/// - [NumPy Style Guide](https://numpydoc.readthedocs.io/en/latest/format.html)
/// - [Google Python Style Guide - Docstrings](https://google.github.io/styleguide/pyguide.html#38-comments-and-docstrings)
///
/// [PEP 257]: https://peps.python.org/pep-0257/
/// [formatter]: https://docs.astral.sh/ruff/formatter/
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "v0.0.75")]
pub(crate) struct UnderIndentation;
impl AlwaysFixableViolation for UnderIndentation {
#[derive_message_formats]
fn message(&self) -> String {
"Docstring is under-indented".to_string()
}
fn fix_title(&self) -> String {
"Increase indentation".to_string()
}
}
/// ## What it does
/// Checks for over-indented docstrings.
///
/// ## Why is this bad?
/// [PEP 257] recommends that docstrings be indented to the same level as their
/// opening quotes. Avoid over-indenting docstrings, for consistency.
///
/// ## Example
/// ```python
/// def sort_list(l: list[int]) -> list[int]:
/// """Return a sorted copy of the list.
///
/// Sort the list in ascending order and return a copy of the result using the
/// bubble sort algorithm.
/// """
/// ```
///
/// Use instead:
/// ```python
/// def sort_list(l: list[int]) -> list[int]:
/// """Return a sorted copy of the list.
///
/// Sort the list in ascending order and return a copy of the result using the bubble
/// sort algorithm.
/// """
/// ```
///
/// ## Formatter compatibility
/// We recommend against using this rule alongside the [formatter]. The
/// formatter enforces consistent indentation, making the rule redundant.
///
/// ## Options
///
/// - `lint.pydocstyle.ignore-decorators`
///
/// ## References
/// - [PEP 257 – Docstring Conventions](https://peps.python.org/pep-0257/)
/// - [NumPy Style Guide](https://numpydoc.readthedocs.io/en/latest/format.html)
/// - [Google Python Style Guide - Docstrings](https://google.github.io/styleguide/pyguide.html#38-comments-and-docstrings)
///
/// [PEP 257]: https://peps.python.org/pep-0257/
/// [formatter]:https://docs.astral.sh/ruff/formatter/
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "v0.0.75")]
pub(crate) struct OverIndentation;
impl AlwaysFixableViolation for OverIndentation {
#[derive_message_formats]
fn message(&self) -> String {
"Docstring is over-indented".to_string()
}
fn fix_title(&self) -> String {
"Remove over-indentation".to_string()
}
}
/// D206, D207, D208
pub(crate) fn indent(checker: &Checker, docstring: &Docstring) {
let body = docstring.body();
// Split the docstring into lines.
let mut lines = NewlineWithTrailingNewline::with_offset(&body, body.start()).peekable();
// The current line being processed
let mut current: Option<Line> = lines.next();
if lines.peek().is_none() {
return;
}
let docstring_indentation = docstring.compute_indentation();
let mut has_seen_tab = docstring_indentation.contains('\t');
let docstring_indent_size = docstring_indentation.chars().count();
// Lines, other than the last, that are over indented.
let mut over_indented_lines = vec![];
// The smallest over indent that all docstring lines have in common. None if any line isn't over indented.
let mut smallest_over_indent_size = Some(usize::MAX);
// The last processed line
let mut last = None;
while let Some(line) = current {
// First lines and continuations don't need any indentation.
if last.is_none()
|| last
.as_deref()
.is_some_and(|last: &str| last.ends_with('\\'))
{
last = Some(line);
current = lines.next();
continue;
}
let is_last = lines.peek().is_none();
// Omit empty lines, except for the last line, which is non-empty by way of
// containing the closing quotation marks.
let is_blank = line.trim().is_empty();
if !is_last && is_blank {
last = Some(line);
current = lines.next();
continue;
}
let line_indent = leading_space(&line);
let line_indent_size = line_indent.chars().count();
// We only report tab indentation once, so only check if we haven't seen a tab
// yet.
has_seen_tab = has_seen_tab || line_indent.contains('\t');
if checker.is_rule_enabled(Rule::UnderIndentation) {
// We report under-indentation on every line. This isn't great, but enables
// fix.
if (is_last || !is_blank) && line_indent_size < docstring_indent_size {
let mut diagnostic =
checker.report_diagnostic(UnderIndentation, TextRange::empty(line.start()));
diagnostic.set_fix(Fix::safe_edit(Edit::range_replacement(
clean_space(docstring_indentation),
TextRange::at(line.start(), line_indent.text_len()),
)));
}
}
// Only true when the last line is indentation only followed by the closing quotes.
// False when it is not the last line or the last line contains content other than the closing quotes.
// The last line only has special handling when it contains no other content.
let is_last_closing_quotes_only = is_last && is_blank;
// Like pydocstyle, we only report over-indentation if either: (1) every line
// (except, optionally, the last line) is over-indented, or (2) the last line
// (which contains the closing quotation marks) is
// over-indented. We can't know if we've achieved that condition
// until we've viewed all the lines, so for now, just track
// the over-indentation status of every line.
if !is_last_closing_quotes_only {
smallest_over_indent_size =
smallest_over_indent_size.and_then(|smallest_over_indent_size| {
let over_indent_size = line_indent_size.saturating_sub(docstring_indent_size);
// `docstring_indent_size < line_indent_size`
if over_indent_size > 0 {
over_indented_lines.push(line.clone());
// Track the _smallest_ offset we see, in terms of characters.
Some(smallest_over_indent_size.min(over_indent_size))
} else {
None
}
});
}
last = Some(line);
current = lines.next();
}
if checker.is_rule_enabled(Rule::DocstringTabIndentation) {
if has_seen_tab {
checker.report_diagnostic(DocstringTabIndentation, docstring.range());
}
}
if checker.is_rule_enabled(Rule::OverIndentation) {
// If every line (except the last) is over-indented...
if let Some(smallest_over_indent_size) = smallest_over_indent_size {
for line in over_indented_lines {
let line_indent = leading_space(&line);
let indent = clean_space(docstring_indentation);
// We report over-indentation on every line. This isn't great, but
// enables the fix capability.
let mut diagnostic =
checker.report_diagnostic(OverIndentation, TextRange::empty(line.start()));
let edit = if indent.is_empty() {
// Delete the entire indent.
Edit::range_deletion(TextRange::at(line.start(), line_indent.text_len()))
} else {
// Convert the character count to an offset within the source.
// Example, where `[]` is a 2 byte non-breaking space:
// ```
// def f():
// """ Docstring header
// ^^^^ Real indentation is 4 chars
// docstring body, over-indented
// ^^^^^^ Over-indentation is 6 - 4 = 2 chars due to this line
// [] [] docstring body 2, further indented
// ^^^^^ We take these 4 chars/5 bytes to match the docstring ...
// ^^^ ... and these 2 chars/3 bytes to remove the `over_indented_size` ...
// ^^ ... but preserve this real indent
// ```
let offset = checker
.locator()
.after(line.start())
.chars()
.take(docstring_indent_size + smallest_over_indent_size)
.map(TextLen::text_len)
.sum::<TextSize>();
let range = TextRange::at(line.start(), offset);
Edit::range_replacement(indent, range)
};
diagnostic.set_fix(Fix::safe_edit(edit));
}
}
// If the last line is over-indented...
if let Some(last) = last {
let line_indent = leading_space(&last);
let line_indent_size = line_indent.chars().count();
let last_line_over_indent = line_indent_size.saturating_sub(docstring_indent_size);
let is_indent_only = line_indent.len() == last.len();
if last_line_over_indent > 0 && is_indent_only {
let mut diagnostic =
checker.report_diagnostic(OverIndentation, TextRange::empty(last.start()));
let indent = clean_space(docstring_indentation);
let range = TextRange::at(last.start(), line_indent.text_len());
let edit = if indent.is_empty() {
Edit::range_deletion(range)
} else {
Edit::range_replacement(indent, range)
};
diagnostic.set_fix(Fix::safe_edit(edit));
}
}
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/pydocstyle/rules/starts_with_this.rs | crates/ruff_linter/src/rules/pydocstyle/rules/starts_with_this.rs | use ruff_macros::{ViolationMetadata, derive_message_formats};
use ruff_text_size::Ranged;
use crate::Violation;
use crate::checkers::ast::Checker;
use crate::docstrings::Docstring;
use crate::rules::pydocstyle::helpers::normalize_word;
/// ## What it does
/// Checks for docstrings that start with `This`.
///
/// ## Why is this bad?
/// [PEP 257] recommends that the first line of a docstring be written in the
/// imperative mood, for consistency.
///
/// Hint: to rewrite the docstring in the imperative, phrase the first line as
/// if it were a command.
///
/// This rule may not apply to all projects; its applicability is a matter of
/// convention. By default, this rule is enabled when using the `numpy`
/// convention,, and disabled when using the `google` and `pep257` conventions.
///
/// ## Example
/// ```python
/// def average(values: list[float]) -> float:
/// """This function returns the mean of the given values."""
/// ```
///
/// Use instead:
/// ```python
/// def average(values: list[float]) -> float:
/// """Return the mean of the given values."""
/// ```
///
/// ## Options
/// - `lint.pydocstyle.convention`
///
/// ## References
/// - [PEP 257 – Docstring Conventions](https://peps.python.org/pep-0257/)
///
/// [PEP 257]: https://peps.python.org/pep-0257/
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "v0.0.71")]
pub(crate) struct DocstringStartsWithThis;
impl Violation for DocstringStartsWithThis {
#[derive_message_formats]
fn message(&self) -> String {
r#"First word of the docstring should not be "This""#.to_string()
}
}
/// D404
pub(crate) fn starts_with_this(checker: &Checker, docstring: &Docstring) {
let body = docstring.body();
let trimmed = body.trim();
if trimmed.is_empty() {
return;
}
let Some(first_word) = trimmed.split(' ').next() else {
return;
};
if normalize_word(first_word) != "this" {
return;
}
checker.report_diagnostic(DocstringStartsWithThis, docstring.range());
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/pydocstyle/rules/no_signature.rs | crates/ruff_linter/src/rules/pydocstyle/rules/no_signature.rs | use ruff_macros::{ViolationMetadata, derive_message_formats};
use ruff_source_file::UniversalNewlines;
use ruff_text_size::Ranged;
use crate::Violation;
use crate::checkers::ast::Checker;
use crate::docstrings::Docstring;
/// ## What it does
/// Checks for function docstrings that include the function's signature in
/// the summary line.
///
/// ## Why is this bad?
/// [PEP 257] recommends against including a function's signature in its
/// docstring. Instead, consider using type annotations as a form of
/// documentation for the function's parameters and return value.
///
/// This rule may not apply to all projects; its applicability is a matter of
/// convention. By default, this rule is enabled when using the `google` and
/// `pep257` conventions, and disabled when using the `numpy` convention.
///
/// ## Example
/// ```python
/// def foo(a, b):
/// """foo(a: int, b: int) -> list[int]"""
/// ```
///
/// Use instead:
/// ```python
/// def foo(a: int, b: int) -> list[int]:
/// """Return a list of a and b."""
/// ```
///
/// ## Options
/// - `lint.pydocstyle.convention`
///
/// ## References
/// - [PEP 257 – Docstring Conventions](https://peps.python.org/pep-0257/)
/// - [NumPy Style Guide](https://numpydoc.readthedocs.io/en/latest/format.html)
/// - [Google Python Style Guide - Docstrings](https://google.github.io/styleguide/pyguide.html#38-comments-and-docstrings)
///
/// [PEP 257]: https://peps.python.org/pep-0257/
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "v0.0.70")]
pub(crate) struct SignatureInDocstring;
impl Violation for SignatureInDocstring {
#[derive_message_formats]
fn message(&self) -> String {
"First line should not be the function's signature".to_string()
}
}
/// D402
pub(crate) fn no_signature(checker: &Checker, docstring: &Docstring) {
let Some(function) = docstring.definition.as_function_def() else {
return;
};
let body = docstring.body();
let Some(first_line) = body.trim().universal_newlines().next() else {
return;
};
// Search for occurrences of the function name followed by an open parenthesis (e.g., `foo(` for
// a function named `foo`).
if first_line
.match_indices(function.name.as_str())
.any(|(index, _)| {
// The function name must be preceded by a word boundary.
let preceded_by_word_boundary = first_line[..index]
.chars()
.next_back()
.is_none_or(|c| matches!(c, ' ' | '\t' | ';' | ','));
if !preceded_by_word_boundary {
return false;
}
// The function name must be followed by an open parenthesis.
let followed_by_open_parenthesis =
first_line[index + function.name.len()..].starts_with('(');
if !followed_by_open_parenthesis {
return false;
}
true
})
{
checker.report_diagnostic(SignatureInDocstring, docstring.range());
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/pydocstyle/rules/blank_before_after_function.rs | crates/ruff_linter/src/rules/pydocstyle/rules/blank_before_after_function.rs | use regex::Regex;
use std::sync::LazyLock;
use ruff_macros::{ViolationMetadata, derive_message_formats};
use ruff_python_trivia::PythonWhitespace;
use ruff_source_file::{UniversalNewlineIterator, UniversalNewlines};
use ruff_text_size::Ranged;
use ruff_text_size::TextRange;
use crate::checkers::ast::Checker;
use crate::docstrings::Docstring;
use crate::registry::Rule;
use crate::{Edit, Fix, FixAvailability, Violation};
/// ## What it does
/// Checks for docstrings on functions that are separated by one or more blank
/// lines from the function definition.
///
/// ## Why is this bad?
/// Remove any blank lines between the function definition and its docstring,
/// for consistency.
///
/// ## Example
/// ```python
/// def average(values: list[float]) -> float:
///
/// """Return the mean of the given values."""
/// ```
///
/// Use instead:
/// ```python
/// def average(values: list[float]) -> float:
/// """Return the mean of the given values."""
/// ```
///
/// ## Options
///
/// - `lint.pydocstyle.ignore-decorators`
///
/// ## References
/// - [PEP 257 – Docstring Conventions](https://peps.python.org/pep-0257/)
/// - [NumPy Style Guide](https://numpydoc.readthedocs.io/en/latest/format.html)
/// - [Google Python Style Guide - Docstrings](https://google.github.io/styleguide/pyguide.html#38-comments-and-docstrings)
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "v0.0.70")]
pub(crate) struct BlankLineBeforeFunction {
num_lines: usize,
}
impl Violation for BlankLineBeforeFunction {
const FIX_AVAILABILITY: FixAvailability = FixAvailability::Sometimes;
#[derive_message_formats]
fn message(&self) -> String {
let BlankLineBeforeFunction { num_lines } = self;
format!("No blank lines allowed before function docstring (found {num_lines})")
}
fn fix_title(&self) -> Option<String> {
Some("Remove blank line(s) before function docstring".to_string())
}
}
/// ## What it does
/// Checks for docstrings on functions that are separated by one or more blank
/// lines from the function body.
///
/// ## Why is this bad?
/// Remove any blank lines between the function body and the function
/// docstring, for consistency.
///
/// ## Example
/// ```python
/// def average(values: list[float]) -> float:
/// """Return the mean of the given values."""
///
/// return sum(values) / len(values)
/// ```
///
/// Use instead:
/// ```python
/// def average(values: list[float]) -> float:
/// """Return the mean of the given values."""
/// return sum(values) / len(values)
/// ```
///
/// ## Options
///
/// - `lint.pydocstyle.ignore-decorators`
///
/// ## References
/// - [PEP 257 – Docstring Conventions](https://peps.python.org/pep-0257/)
/// - [NumPy Style Guide](https://numpydoc.readthedocs.io/en/latest/format.html)
/// - [Google Python Style Guide - Docstrings](https://google.github.io/styleguide/pyguide.html#38-comments-and-docstrings)
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "v0.0.70")]
pub(crate) struct BlankLineAfterFunction {
num_lines: usize,
}
impl Violation for BlankLineAfterFunction {
const FIX_AVAILABILITY: FixAvailability = FixAvailability::Sometimes;
#[derive_message_formats]
fn message(&self) -> String {
let BlankLineAfterFunction { num_lines } = self;
format!("No blank lines allowed after function docstring (found {num_lines})")
}
fn fix_title(&self) -> Option<String> {
Some("Remove blank line(s) after function docstring".to_string())
}
}
static INNER_FUNCTION_OR_CLASS_REGEX: LazyLock<Regex> =
LazyLock::new(|| Regex::new(r"^\s+(?:(?:class|def|async def)\s|@)").unwrap());
/// D201, D202
pub(crate) fn blank_before_after_function(checker: &Checker, docstring: &Docstring) {
let Some(function) = docstring.definition.as_function_def() else {
return;
};
if checker.is_rule_enabled(Rule::BlankLineBeforeFunction) {
let before = checker
.locator()
.slice(TextRange::new(function.start(), docstring.start()));
let mut lines = UniversalNewlineIterator::with_offset(before, function.start()).rev();
let mut blank_lines_before = 0usize;
let mut blank_lines_start = lines.next().map(|l| l.end()).unwrap_or_default();
let mut start_is_line_continuation = false;
for line in lines {
if line.trim().is_empty() {
blank_lines_before += 1;
blank_lines_start = line.start();
} else {
start_is_line_continuation = line.ends_with('\\');
break;
}
}
if blank_lines_before != 0 {
let mut diagnostic = checker.report_diagnostic(
BlankLineBeforeFunction {
num_lines: blank_lines_before,
},
docstring.range(),
);
// Do not offer fix if a \ would cause it to be a syntax error
if !start_is_line_continuation {
// Delete the blank line before the docstring.
diagnostic.set_fix(Fix::safe_edit(Edit::deletion(
blank_lines_start,
docstring.line_start(),
)));
}
}
}
if checker.is_rule_enabled(Rule::BlankLineAfterFunction) {
let after = checker
.locator()
.slice(TextRange::new(docstring.end(), function.end()));
// If the docstring is only followed by blank and commented lines, abort.
let all_blank_after = after.universal_newlines().skip(1).all(|line| {
line.trim_whitespace().is_empty() || line.trim_whitespace_start().starts_with('#')
});
if all_blank_after {
return;
}
// Count the number of blank lines after the docstring.
let mut blank_lines_after = 0usize;
let mut lines = UniversalNewlineIterator::with_offset(after, docstring.end()).peekable();
let first_line = lines.next();
let first_line_line_continuation = first_line.as_ref().is_some_and(|l| l.ends_with('\\'));
let first_line_end = first_line.map(|l| l.end()).unwrap_or_default();
let mut blank_lines_end = first_line_end;
while let Some(line) = lines.peek() {
if line.trim().is_empty() {
blank_lines_after += 1;
blank_lines_end = line.end();
lines.next();
} else {
break;
}
}
// Avoid violations for blank lines followed by inner functions or classes.
if blank_lines_after == 1
&& lines
.find(|line| !line.trim_whitespace_start().starts_with('#'))
.is_some_and(|line| INNER_FUNCTION_OR_CLASS_REGEX.is_match(&line))
{
return;
}
if blank_lines_after != 0 {
let mut diagnostic = checker.report_diagnostic(
BlankLineAfterFunction {
num_lines: blank_lines_after,
},
docstring.range(),
);
// Do not offer fix if a \ would cause it to be a syntax error
if !first_line_line_continuation {
// Delete the blank line after the docstring.
diagnostic.set_fix(Fix::safe_edit(Edit::deletion(
first_line_end,
blank_lines_end,
)));
}
}
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/pydocstyle/rules/triple_quotes.rs | crates/ruff_linter/src/rules/pydocstyle/rules/triple_quotes.rs | use ruff_macros::{ViolationMetadata, derive_message_formats};
use ruff_python_ast::str::Quote;
use ruff_text_size::Ranged;
use crate::checkers::ast::Checker;
use crate::docstrings::Docstring;
use crate::{Edit, Fix, FixAvailability, Violation};
/// ## What it does
/// Checks for docstrings that use `'''triple single quotes'''` instead of
/// `"""triple double quotes"""`.
///
/// ## Why is this bad?
/// [PEP 257](https://peps.python.org/pep-0257/#what-is-a-docstring) recommends
/// the use of `"""triple double quotes"""` for docstrings, to ensure
/// consistency.
///
/// ## Example
/// ```python
/// def kos_root():
/// '''Return the pathname of the KOS root directory.'''
/// ```
///
/// Use instead:
/// ```python
/// def kos_root():
/// """Return the pathname of the KOS root directory."""
/// ```
///
/// ## Formatter compatibility
/// We recommend against using this rule alongside the [formatter]. The
/// formatter enforces consistent quotes, making the rule redundant.
///
/// ## Options
///
/// - `lint.pydocstyle.ignore-decorators`
///
/// ## References
/// - [PEP 257 – Docstring Conventions](https://peps.python.org/pep-0257/)
/// - [NumPy Style Guide](https://numpydoc.readthedocs.io/en/latest/format.html)
/// - [Google Python Style Guide - Docstrings](https://google.github.io/styleguide/pyguide.html#38-comments-and-docstrings)
///
/// [formatter]: https://docs.astral.sh/ruff/formatter/
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "v0.0.69")]
pub(crate) struct TripleSingleQuotes {
expected_quote: Quote,
}
impl Violation for TripleSingleQuotes {
const FIX_AVAILABILITY: FixAvailability = FixAvailability::Sometimes;
#[derive_message_formats]
fn message(&self) -> String {
match self.expected_quote {
Quote::Double => r#"Use triple double quotes `"""`"#.to_string(),
Quote::Single => r"Use triple single quotes `'''`".to_string(),
}
}
fn fix_title(&self) -> Option<String> {
let title = match self.expected_quote {
Quote::Double => "Convert to triple double quotes",
Quote::Single => "Convert to triple single quotes",
};
Some(title.to_string())
}
}
/// D300
pub(crate) fn triple_quotes(checker: &Checker, docstring: &Docstring) {
let opener = docstring.opener();
let prefixes = docstring.prefix_str();
let expected_quote = if docstring.body().contains("\"\"\"") {
if docstring.body().contains("\'\'\'") {
return;
}
Quote::Single
} else {
Quote::Double
};
match expected_quote {
Quote::Single => {
if !opener.ends_with("'''") {
let mut diagnostic = checker
.report_diagnostic(TripleSingleQuotes { expected_quote }, docstring.range());
let body = docstring.body().as_str();
if !body.ends_with('\'') {
diagnostic.set_fix(Fix::safe_edit(Edit::range_replacement(
format!("{prefixes}'''{body}'''"),
docstring.range(),
)));
}
}
}
Quote::Double => {
if !opener.ends_with("\"\"\"") {
let mut diagnostic = checker
.report_diagnostic(TripleSingleQuotes { expected_quote }, docstring.range());
let body = docstring.body().as_str();
if !body.ends_with('"') {
diagnostic.set_fix(Fix::safe_edit(Edit::range_replacement(
format!("{prefixes}\"\"\"{body}\"\"\""),
docstring.range(),
)));
}
}
}
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/pydocstyle/rules/not_missing.rs | crates/ruff_linter/src/rules/pydocstyle/rules/not_missing.rs | use ruff_macros::{ViolationMetadata, derive_message_formats};
use ruff_python_ast::identifier::Identifier;
use ruff_python_semantic::analyze::visibility::{
Visibility, is_call, is_init, is_magic, is_new, is_overload, is_override,
};
use ruff_python_semantic::{Definition, Member, MemberKind, Module, ModuleKind};
use ruff_text_size::TextRange;
use crate::Violation;
use crate::checkers::ast::Checker;
/// ## What it does
/// Checks for undocumented public module definitions.
///
/// ## Why is this bad?
/// Public modules should be documented via docstrings to outline their purpose
/// and contents.
///
/// Generally, module docstrings should describe the purpose of the module and
/// list the classes, exceptions, functions, and other objects that are exported
/// by the module, alongside a one-line summary of each.
///
/// If the module is a script, the docstring should be usable as its "usage"
/// message.
///
/// If the codebase adheres to a standard format for module docstrings, follow
/// that format for consistency.
///
/// ## Example
///
/// ```python
/// class FasterThanLightError(ZeroDivisionError): ...
///
///
/// def calculate_speed(distance: float, time: float) -> float: ...
/// ```
///
/// Use instead:
///
/// ```python
/// """Utility functions and classes for calculating speed.
///
/// This module provides:
/// - FasterThanLightError: exception when FTL speed is calculated;
/// - calculate_speed: calculate speed given distance and time.
/// """
///
///
/// class FasterThanLightError(ZeroDivisionError): ...
///
///
/// def calculate_speed(distance: float, time: float) -> float: ...
/// ```
///
/// ## Notebook behavior
/// This rule is ignored for Jupyter Notebooks.
///
/// ## Options
///
/// - `lint.pydocstyle.ignore-decorators`
///
/// ## References
/// - [PEP 257 – Docstring Conventions](https://peps.python.org/pep-0257/)
/// - [PEP 287 – reStructuredText Docstring Format](https://peps.python.org/pep-0287/)
/// - [NumPy Style Guide](https://numpydoc.readthedocs.io/en/latest/format.html)
/// - [Google Python Style Guide - Docstrings](https://google.github.io/styleguide/pyguide.html#38-comments-and-docstrings)
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "v0.0.70")]
pub(crate) struct UndocumentedPublicModule;
impl Violation for UndocumentedPublicModule {
#[derive_message_formats]
fn message(&self) -> String {
"Missing docstring in public module".to_string()
}
}
/// ## What it does
/// Checks for undocumented public class definitions.
///
/// ## Why is this bad?
/// Public classes should be documented via docstrings to outline their purpose
/// and behavior.
///
/// Generally, a class docstring should describe the class's purpose and list
/// its public attributes and methods.
///
/// If the codebase adheres to a standard format for class docstrings, follow
/// that format for consistency.
///
/// ## Example
/// ```python
/// class Player:
/// def __init__(self, name: str, points: int = 0) -> None:
/// self.name: str = name
/// self.points: int = points
///
/// def add_points(self, points: int) -> None:
/// self.points += points
/// ```
///
/// Use instead (in the NumPy docstring format):
/// ```python
/// class Player:
/// """A player in the game.
///
/// Attributes
/// ----------
/// name : str
/// The name of the player.
/// points : int
/// The number of points the player has.
///
/// Methods
/// -------
/// add_points(points: int) -> None
/// Add points to the player's score.
/// """
///
/// def __init__(self, name: str, points: int = 0) -> None:
/// self.name: str = name
/// self.points: int = points
///
/// def add_points(self, points: int) -> None:
/// self.points += points
/// ```
///
/// Or (in the Google docstring format):
/// ```python
/// class Player:
/// """A player in the game.
///
/// Attributes:
/// name: The name of the player.
/// points: The number of points the player has.
/// """
///
/// def __init__(self, name: str, points: int = 0) -> None:
/// self.name: str = name
/// self.points: int = points
///
/// def add_points(self, points: int) -> None:
/// self.points += points
/// ```
///
/// ## Options
///
/// - `lint.pydocstyle.ignore-decorators`
///
/// ## References
/// - [PEP 257 – Docstring Conventions](https://peps.python.org/pep-0257/)
/// - [PEP 287 – reStructuredText Docstring Format](https://peps.python.org/pep-0287/)
/// - [NumPy Style Guide](https://numpydoc.readthedocs.io/en/latest/format.html)
/// - [Google Python Style Guide - Docstrings](https://google.github.io/styleguide/pyguide.html#38-comments-and-docstrings)
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "v0.0.70")]
pub(crate) struct UndocumentedPublicClass;
impl Violation for UndocumentedPublicClass {
#[derive_message_formats]
fn message(&self) -> String {
"Missing docstring in public class".to_string()
}
}
/// ## What it does
/// Checks for undocumented public method definitions.
///
/// ## Why is this bad?
/// Public methods should be documented via docstrings to outline their purpose
/// and behavior.
///
/// Generally, a method docstring should describe the method's behavior,
/// arguments, side effects, exceptions, return values, and any other
/// information that may be relevant to the user.
///
/// If the codebase adheres to a standard format for method docstrings, follow
/// that format for consistency.
///
/// This rule exempts methods decorated with [`@typing.override`][override],
/// since it is a common practice to document a method on a superclass but not
/// on an overriding method in a subclass.
///
/// ## Example
///
/// ```python
/// class Cat(Animal):
/// def greet(self, happy: bool = True):
/// if happy:
/// print("Meow!")
/// else:
/// raise ValueError("Tried to greet an unhappy cat.")
/// ```
///
/// Use instead (in the NumPy docstring format):
///
/// ```python
/// class Cat(Animal):
/// def greet(self, happy: bool = True):
/// """Print a greeting from the cat.
///
/// Parameters
/// ----------
/// happy : bool, optional
/// Whether the cat is happy, is True by default.
///
/// Raises
/// ------
/// ValueError
/// If the cat is not happy.
/// """
/// if happy:
/// print("Meow!")
/// else:
/// raise ValueError("Tried to greet an unhappy cat.")
/// ```
///
/// Or (in the Google docstring format):
///
/// ```python
/// class Cat(Animal):
/// def greet(self, happy: bool = True):
/// """Print a greeting from the cat.
///
/// Args:
/// happy: Whether the cat is happy, is True by default.
///
/// Raises:
/// ValueError: If the cat is not happy.
/// """
/// if happy:
/// print("Meow!")
/// else:
/// raise ValueError("Tried to greet an unhappy cat.")
/// ```
///
/// ## Options
/// - `lint.pydocstyle.ignore-decorators`
///
/// ## References
/// - [PEP 257 – Docstring Conventions](https://peps.python.org/pep-0257/)
/// - [PEP 287 – reStructuredText Docstring Format](https://peps.python.org/pep-0287/)
/// - [NumPy Style Guide](https://numpydoc.readthedocs.io/en/latest/format.html)
/// - [Google Python Style Guide - Docstrings](https://google.github.io/styleguide/pyguide.html#38-comments-and-docstrings)
///
/// [override]: https://docs.python.org/3/library/typing.html#typing.override
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "v0.0.70")]
pub(crate) struct UndocumentedPublicMethod;
impl Violation for UndocumentedPublicMethod {
#[derive_message_formats]
fn message(&self) -> String {
"Missing docstring in public method".to_string()
}
}
/// ## What it does
/// Checks for undocumented public function definitions.
///
/// ## Why is this bad?
/// Public functions should be documented via docstrings to outline their
/// purpose and behavior.
///
/// Generally, a function docstring should describe the function's behavior,
/// arguments, side effects, exceptions, return values, and any other
/// information that may be relevant to the user.
///
/// If the codebase adheres to a standard format for function docstrings, follow
/// that format for consistency.
///
/// ## Example
/// ```python
/// def calculate_speed(distance: float, time: float) -> float:
/// try:
/// return distance / time
/// except ZeroDivisionError as exc:
/// raise FasterThanLightError from exc
/// ```
///
/// Use instead (using the NumPy docstring format):
/// ```python
/// def calculate_speed(distance: float, time: float) -> float:
/// """Calculate speed as distance divided by time.
///
/// Parameters
/// ----------
/// distance : float
/// Distance traveled.
/// time : float
/// Time spent traveling.
///
/// Returns
/// -------
/// float
/// Speed as distance divided by time.
///
/// Raises
/// ------
/// FasterThanLightError
/// If speed is greater than the speed of light.
/// """
/// try:
/// return distance / time
/// except ZeroDivisionError as exc:
/// raise FasterThanLightError from exc
/// ```
///
/// Or, using the Google docstring format:
/// ```python
/// def calculate_speed(distance: float, time: float) -> float:
/// """Calculate speed as distance divided by time.
///
/// Args:
/// distance: Distance traveled.
/// time: Time spent traveling.
///
/// Returns:
/// Speed as distance divided by time.
///
/// Raises:
/// FasterThanLightError: If speed is greater than the speed of light.
/// """
/// try:
/// return distance / time
/// except ZeroDivisionError as exc:
/// raise FasterThanLightError from exc
/// ```
///
/// ## Options
/// - `lint.pydocstyle.ignore-decorators`
///
/// ## References
/// - [PEP 257 – Docstring Conventions](https://peps.python.org/pep-0257/)
/// - [PEP 287 – reStructuredText Docstring Format](https://peps.python.org/pep-0287/)
/// - [NumPy Style Guide](https://numpydoc.readthedocs.io/en/latest/format.html)
/// - [Google Style Python Docstrings](https://google.github.io/styleguide/pyguide.html#s3.8-comments-and-docstrings)
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "v0.0.70")]
pub(crate) struct UndocumentedPublicFunction;
impl Violation for UndocumentedPublicFunction {
#[derive_message_formats]
fn message(&self) -> String {
"Missing docstring in public function".to_string()
}
}
/// ## What it does
/// Checks for undocumented public package definitions.
///
/// ## Why is this bad?
/// Public packages should be documented via docstrings to outline their
/// purpose and contents.
///
/// Generally, package docstrings should list the modules and subpackages that
/// are exported by the package.
///
/// If the codebase adheres to a standard format for package docstrings, follow
/// that format for consistency.
///
/// ## Example
/// ```python
/// __all__ = ["Player", "Game"]
/// ```
///
/// Use instead:
/// ```python
/// """Game and player management package.
///
/// This package provides classes for managing players and games.
/// """
///
/// __all__ = ["player", "game"]
/// ```
///
/// ## Options
///
/// - `lint.pydocstyle.ignore-decorators`
///
/// ## References
/// - [PEP 257 – Docstring Conventions](https://peps.python.org/pep-0257/)
/// - [PEP 287 – reStructuredText Docstring Format](https://peps.python.org/pep-0287/)
/// - [NumPy Style Guide](https://numpydoc.readthedocs.io/en/latest/format.html)
/// - [Google Style Python Docstrings](https://google.github.io/styleguide/pyguide.html#s3.8-comments-and-docstrings)
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "v0.0.70")]
pub(crate) struct UndocumentedPublicPackage;
impl Violation for UndocumentedPublicPackage {
#[derive_message_formats]
fn message(&self) -> String {
"Missing docstring in public package".to_string()
}
}
/// ## What it does
/// Checks for undocumented magic method definitions.
///
/// ## Why is this bad?
/// Magic methods (methods with names that start and end with double
/// underscores) are used to implement operator overloading and other special
/// behavior. Such methods should be documented via docstrings to
/// outline their behavior.
///
/// Generally, magic method docstrings should describe the method's behavior,
/// arguments, side effects, exceptions, return values, and any other
/// information that may be relevant to the user.
///
/// If the codebase adheres to a standard format for method docstrings, follow
/// that format for consistency.
///
/// ## Example
/// ```python
/// class Cat(Animal):
/// def __str__(self) -> str:
/// return f"Cat: {self.name}"
///
///
/// cat = Cat("Dusty")
/// print(cat) # "Cat: Dusty"
/// ```
///
/// Use instead:
/// ```python
/// class Cat(Animal):
/// def __str__(self) -> str:
/// """Return a string representation of the cat."""
/// return f"Cat: {self.name}"
///
///
/// cat = Cat("Dusty")
/// print(cat) # "Cat: Dusty"
/// ```
///
/// ## Options
/// - `lint.pydocstyle.ignore-decorators`
///
/// ## References
/// - [PEP 257 – Docstring Conventions](https://peps.python.org/pep-0257/)
/// - [PEP 287 – reStructuredText Docstring Format](https://peps.python.org/pep-0287/)
/// - [NumPy Style Guide](https://numpydoc.readthedocs.io/en/latest/format.html)
/// - [Google Style Python Docstrings](https://google.github.io/styleguide/pyguide.html#s3.8-comments-and-docstrings)
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "v0.0.70")]
pub(crate) struct UndocumentedMagicMethod;
impl Violation for UndocumentedMagicMethod {
#[derive_message_formats]
fn message(&self) -> String {
"Missing docstring in magic method".to_string()
}
}
/// ## What it does
/// Checks for undocumented public class definitions, for nested classes.
///
/// ## Why is this bad?
/// Public classes should be documented via docstrings to outline their
/// purpose and behavior.
///
/// Nested classes do not inherit the docstring of their enclosing class, so
/// they should have their own docstrings.
///
/// If the codebase adheres to a standard format for class docstrings, follow
/// that format for consistency.
///
/// ## Example
///
/// ```python
/// class Foo:
/// """Class Foo."""
///
/// class Bar: ...
///
///
/// bar = Foo.Bar()
/// bar.__doc__ # None
/// ```
///
/// Use instead:
///
/// ```python
/// class Foo:
/// """Class Foo."""
///
/// class Bar:
/// """Class Bar."""
///
///
/// bar = Foo.Bar()
/// bar.__doc__ # "Class Bar."
/// ```
///
/// ## Options
///
/// - `lint.pydocstyle.ignore-decorators`
///
/// ## References
/// - [PEP 257 – Docstring Conventions](https://peps.python.org/pep-0257/)
/// - [PEP 287 – reStructuredText Docstring Format](https://peps.python.org/pep-0287/)
/// - [NumPy Style Guide](https://numpydoc.readthedocs.io/en/latest/format.html)
/// - [Google Style Python Docstrings](https://google.github.io/styleguide/pyguide.html#s3.8-comments-and-docstrings)
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "v0.0.70")]
pub(crate) struct UndocumentedPublicNestedClass;
impl Violation for UndocumentedPublicNestedClass {
#[derive_message_formats]
fn message(&self) -> String {
"Missing docstring in public nested class".to_string()
}
}
/// ## What it does
/// Checks for public `__init__` method definitions that are missing
/// docstrings.
///
/// ## Why is this bad?
/// Public `__init__` methods are used to initialize objects. `__init__`
/// methods should be documented via docstrings to describe the method's
/// behavior, arguments, side effects, exceptions, and any other information
/// that may be relevant to the user.
///
/// If the codebase adheres to a standard format for `__init__` method docstrings,
/// follow that format for consistency.
///
/// ## Example
/// ```python
/// class City:
/// def __init__(self, name: str, population: int) -> None:
/// self.name: str = name
/// self.population: int = population
/// ```
///
/// Use instead:
/// ```python
/// class City:
/// def __init__(self, name: str, population: int) -> None:
/// """Initialize a city with a name and population."""
/// self.name: str = name
/// self.population: int = population
/// ```
///
/// ## Options
/// - `lint.pydocstyle.ignore-decorators`
///
/// ## References
/// - [PEP 257 – Docstring Conventions](https://peps.python.org/pep-0257/)
/// - [PEP 287 – reStructuredText Docstring Format](https://peps.python.org/pep-0287/)
/// - [NumPy Style Guide](https://numpydoc.readthedocs.io/en/latest/format.html)
/// - [Google Style Python Docstrings](https://google.github.io/styleguide/pyguide.html#s3.8-comments-and-docstrings)
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "v0.0.70")]
pub(crate) struct UndocumentedPublicInit;
impl Violation for UndocumentedPublicInit {
#[derive_message_formats]
fn message(&self) -> String {
"Missing docstring in `__init__`".to_string()
}
}
/// D100, D101, D102, D103, D104, D105, D106, D107
pub(crate) fn not_missing(
checker: &Checker,
definition: &Definition,
visibility: Visibility,
) -> bool {
if checker.source_type.is_stub() {
return true;
}
if visibility.is_private() {
return true;
}
match definition {
Definition::Module(Module {
kind: ModuleKind::Module,
..
}) => {
if checker.source_type.is_ipynb() {
return true;
}
checker.report_diagnostic_if_enabled(UndocumentedPublicModule, TextRange::default());
false
}
Definition::Module(Module {
kind: ModuleKind::Package,
..
}) => {
checker.report_diagnostic_if_enabled(UndocumentedPublicPackage, TextRange::default());
false
}
Definition::Member(Member {
kind: MemberKind::Class(class),
..
}) => {
checker.report_diagnostic_if_enabled(UndocumentedPublicClass, class.identifier());
false
}
Definition::Member(Member {
kind: MemberKind::NestedClass(function),
..
}) => {
checker
.report_diagnostic_if_enabled(UndocumentedPublicNestedClass, function.identifier());
false
}
Definition::Member(Member {
kind: MemberKind::Function(function) | MemberKind::NestedFunction(function),
..
}) => {
if is_overload(&function.decorator_list, checker.semantic()) {
true
} else {
checker.report_diagnostic_if_enabled(
UndocumentedPublicFunction,
function.identifier(),
);
false
}
}
Definition::Member(Member {
kind: MemberKind::Method(function),
..
}) => {
if is_overload(&function.decorator_list, checker.semantic())
|| is_override(&function.decorator_list, checker.semantic())
{
true
} else if is_init(&function.name) {
checker.report_diagnostic_if_enabled(UndocumentedPublicInit, function.identifier());
true
} else if is_new(&function.name) || is_call(&function.name) {
checker
.report_diagnostic_if_enabled(UndocumentedPublicMethod, function.identifier());
true
} else if is_magic(&function.name) {
checker
.report_diagnostic_if_enabled(UndocumentedMagicMethod, function.identifier());
true
} else {
checker
.report_diagnostic_if_enabled(UndocumentedPublicMethod, function.identifier());
true
}
}
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/pydocstyle/rules/mod.rs | crates/ruff_linter/src/rules/pydocstyle/rules/mod.rs | pub(crate) use backslashes::*;
pub(crate) use blank_after_summary::*;
pub(crate) use blank_before_after_class::*;
pub(crate) use blank_before_after_function::*;
pub(crate) use capitalized::*;
pub(crate) use ends_with_period::*;
pub(crate) use ends_with_punctuation::*;
pub(crate) use if_needed::*;
pub(crate) use indent::*;
pub(crate) use multi_line_summary_start::*;
pub(crate) use newline_after_last_paragraph::*;
pub(crate) use no_signature::*;
pub(crate) use no_surrounding_whitespace::*;
pub(crate) use non_imperative_mood::*;
pub(crate) use not_empty::*;
pub(crate) use not_missing::*;
pub(crate) use one_liner::*;
pub(crate) use sections::*;
pub(crate) use starts_with_this::*;
pub(crate) use triple_quotes::*;
mod backslashes;
mod blank_after_summary;
mod blank_before_after_class;
mod blank_before_after_function;
mod capitalized;
mod ends_with_period;
mod ends_with_punctuation;
mod if_needed;
mod indent;
mod multi_line_summary_start;
mod newline_after_last_paragraph;
mod no_signature;
mod no_surrounding_whitespace;
mod non_imperative_mood;
mod not_empty;
mod not_missing;
mod one_liner;
mod sections;
mod starts_with_this;
mod triple_quotes;
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/pydocstyle/rules/blank_after_summary.rs | crates/ruff_linter/src/rules/pydocstyle/rules/blank_after_summary.rs | use ruff_macros::{ViolationMetadata, derive_message_formats};
use ruff_source_file::{UniversalNewlineIterator, UniversalNewlines};
use ruff_text_size::Ranged;
use crate::checkers::ast::Checker;
use crate::docstrings::Docstring;
use crate::{Edit, Fix, FixAvailability, Violation};
/// ## What it does
/// Checks for docstring summary lines that are not separated from the docstring
/// description by one blank line.
///
/// ## Why is this bad?
/// [PEP 257] recommends that multi-line docstrings consist of "a summary line
/// just like a one-line docstring, followed by a blank line, followed by a
/// more elaborate description."
///
/// ## Example
/// ```python
/// def sort_list(l: list[int]) -> list[int]:
/// """Return a sorted copy of the list.
/// Sort the list in ascending order and return a copy of the
/// result using the bubble sort algorithm.
/// """
/// ```
///
/// Use instead:
/// ```python
/// def sort_list(l: list[int]) -> list[int]:
/// """Return a sorted copy of the list.
///
/// Sort the list in ascending order and return a copy of the
/// result using the bubble sort algorithm.
/// """
/// ```
///
/// ## Options
///
/// - `lint.pydocstyle.ignore-decorators`
///
/// ## References
/// - [PEP 257 – Docstring Conventions](https://peps.python.org/pep-0257/)
/// - [NumPy Style Guide](https://numpydoc.readthedocs.io/en/latest/format.html)
/// - [Google Python Style Guide - Docstrings](https://google.github.io/styleguide/pyguide.html#38-comments-and-docstrings)
///
/// [PEP 257]: https://peps.python.org/pep-0257/
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "v0.0.68")]
pub(crate) struct MissingBlankLineAfterSummary {
num_lines: usize,
}
impl Violation for MissingBlankLineAfterSummary {
const FIX_AVAILABILITY: FixAvailability = FixAvailability::Sometimes;
#[derive_message_formats]
fn message(&self) -> String {
let MissingBlankLineAfterSummary { num_lines } = self;
if *num_lines == 0 {
"1 blank line required between summary line and description".to_string()
} else {
format!(
"1 blank line required between summary line and description (found {num_lines})"
)
}
}
fn fix_title(&self) -> Option<String> {
Some("Insert single blank line".to_string())
}
}
/// D205
pub(crate) fn blank_after_summary(checker: &Checker, docstring: &Docstring) {
let body = docstring.body();
if !docstring.is_triple_quoted() {
return;
}
let mut lines_count: usize = 1;
let mut blanks_count = 0;
for line in body.trim().universal_newlines().skip(1) {
lines_count += 1;
if line.trim().is_empty() {
blanks_count += 1;
} else {
break;
}
}
if lines_count > 1 && blanks_count != 1 {
let mut diagnostic = checker.report_diagnostic(
MissingBlankLineAfterSummary {
num_lines: blanks_count,
},
docstring.range(),
);
if blanks_count > 1 {
let mut lines = UniversalNewlineIterator::with_offset(&body, body.start());
let mut summary_end = body.start();
// Find the "summary" line (defined as the first non-blank line).
for line in lines.by_ref() {
if !line.trim().is_empty() {
summary_end = line.full_end();
break;
}
}
// Find the last blank line
let mut blank_end = summary_end;
for line in lines {
if !line.trim().is_empty() {
blank_end = line.start();
break;
}
}
// Insert one blank line after the summary (replacing any existing lines).
diagnostic.set_fix(Fix::safe_edit(Edit::replacement(
checker.stylist().line_ending().to_string(),
summary_end,
blank_end,
)));
}
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/pydocstyle/rules/no_surrounding_whitespace.rs | crates/ruff_linter/src/rules/pydocstyle/rules/no_surrounding_whitespace.rs | use ruff_macros::{ViolationMetadata, derive_message_formats};
use ruff_source_file::NewlineWithTrailingNewline;
use ruff_text_size::Ranged;
use ruff_text_size::{TextLen, TextRange};
use crate::checkers::ast::Checker;
use crate::docstrings::Docstring;
use crate::{Edit, Fix, FixAvailability, Violation};
use crate::rules::pydocstyle::helpers::ends_with_backslash;
/// ## What it does
/// Checks for surrounding whitespace in docstrings.
///
/// ## Why is this bad?
/// Remove surrounding whitespace from the docstring, for consistency.
///
/// ## Example
/// ```python
/// def factorial(n: int) -> int:
/// """ Return the factorial of n. """
/// ```
///
/// Use instead:
/// ```python
/// def factorial(n: int) -> int:
/// """Return the factorial of n."""
/// ```
///
/// ## Options
///
/// - `lint.pydocstyle.ignore-decorators`
///
/// ## References
/// - [PEP 257 – Docstring Conventions](https://peps.python.org/pep-0257/)
/// - [NumPy Style Guide](https://numpydoc.readthedocs.io/en/latest/format.html)
/// - [Google Python Style Guide - Docstrings](https://google.github.io/styleguide/pyguide.html#38-comments-and-docstrings)
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "v0.0.68")]
pub(crate) struct SurroundingWhitespace;
impl Violation for SurroundingWhitespace {
const FIX_AVAILABILITY: FixAvailability = FixAvailability::Sometimes;
#[derive_message_formats]
fn message(&self) -> String {
"No whitespaces allowed surrounding docstring text".to_string()
}
fn fix_title(&self) -> Option<String> {
Some("Trim surrounding whitespace".to_string())
}
}
/// D210
pub(crate) fn no_surrounding_whitespace(checker: &Checker, docstring: &Docstring) {
let body = docstring.body();
let mut lines = NewlineWithTrailingNewline::from(body.as_str());
let Some(line) = lines.next() else {
return;
};
let trimmed = line.trim();
if trimmed.is_empty() {
return;
}
if line == trimmed {
return;
}
let mut diagnostic = checker.report_diagnostic(SurroundingWhitespace, docstring.range());
let quote = docstring.quote_style().as_char();
// If removing whitespace would lead to an invalid string of quote
// characters, avoid applying the fix.
if !trimmed.ends_with(quote) && !trimmed.starts_with(quote) && !ends_with_backslash(trimmed) {
diagnostic.set_fix(Fix::safe_edit(Edit::range_replacement(
trimmed.to_string(),
TextRange::at(body.start(), line.text_len()),
)));
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/pydocstyle/rules/newline_after_last_paragraph.rs | crates/ruff_linter/src/rules/pydocstyle/rules/newline_after_last_paragraph.rs | use ruff_text_size::{TextLen, TextSize};
use ruff_macros::{ViolationMetadata, derive_message_formats};
use ruff_python_ast::docstrings::clean_space;
use ruff_source_file::{NewlineWithTrailingNewline, UniversalNewlines};
use ruff_text_size::Ranged;
use crate::checkers::ast::Checker;
use crate::docstrings::Docstring;
use crate::{AlwaysFixableViolation, Edit, Fix};
/// ## What it does
/// Checks for multi-line docstrings whose closing quotes are not on their
/// own line.
///
/// ## Why is this bad?
/// [PEP 257] recommends that the closing quotes of a multi-line docstring be
/// on their own line, for consistency and compatibility with documentation
/// tools that may need to parse the docstring.
///
/// ## Example
/// ```python
/// def sort_list(l: List[int]) -> List[int]:
/// """Return a sorted copy of the list.
///
/// Sort the list in ascending order and return a copy of the result using the
/// bubble sort algorithm."""
/// ```
///
/// Use instead:
/// ```python
/// def sort_list(l: List[int]) -> List[int]:
/// """Return a sorted copy of the list.
///
/// Sort the list in ascending order and return a copy of the result using the bubble
/// sort algorithm.
/// """
/// ```
///
/// ## Options
///
/// - `lint.pydocstyle.ignore-decorators`
///
/// ## References
/// - [PEP 257 – Docstring Conventions](https://peps.python.org/pep-0257/)
/// - [NumPy Style Guide](https://numpydoc.readthedocs.io/en/latest/format.html)
/// - [Google Python Style Guide - Docstrings](https://google.github.io/styleguide/pyguide.html#38-comments-and-docstrings)
///
/// [PEP 257]: https://peps.python.org/pep-0257/
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "v0.0.68")]
pub(crate) struct NewLineAfterLastParagraph;
impl AlwaysFixableViolation for NewLineAfterLastParagraph {
#[derive_message_formats]
fn message(&self) -> String {
"Multi-line docstring closing quotes should be on a separate line".to_string()
}
fn fix_title(&self) -> String {
"Move closing quotes to new line".to_string()
}
}
/// D209
pub(crate) fn newline_after_last_paragraph(checker: &Checker, docstring: &Docstring) {
let contents = docstring.contents();
let body = docstring.body();
if !docstring.is_triple_quoted() {
return;
}
let mut line_count = 0;
for line in NewlineWithTrailingNewline::from(body.as_str()) {
if !line.trim().is_empty() {
line_count += 1;
}
if line_count > 1 {
if let Some(last_line) = contents
.universal_newlines()
.last()
.map(|l| l.as_str().trim())
{
if last_line != "\"\"\"" && last_line != "'''" {
let mut diagnostic =
checker.report_diagnostic(NewLineAfterLastParagraph, docstring.range());
// Insert a newline just before the end-quote(s).
let num_trailing_quotes = "'''".text_len();
let num_trailing_spaces: TextSize = last_line
.chars()
.rev()
.skip(usize::from(num_trailing_quotes))
.take_while(|c| c.is_whitespace())
.map(TextLen::text_len)
.sum();
let content = format!(
"{}{}",
checker.stylist().line_ending().as_str(),
clean_space(docstring.compute_indentation())
);
diagnostic.set_fix(Fix::safe_edit(Edit::replacement(
content,
docstring.end() - num_trailing_quotes - num_trailing_spaces,
docstring.end() - num_trailing_quotes,
)));
}
}
return;
}
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/pydocstyle/rules/sections.rs | crates/ruff_linter/src/rules/pydocstyle/rules/sections.rs | use itertools::Itertools;
use regex::Regex;
use rustc_hash::FxHashSet;
use std::sync::LazyLock;
use ruff_macros::{ViolationMetadata, derive_message_formats};
use ruff_python_ast::Parameter;
use ruff_python_ast::docstrings::{clean_space, leading_space};
use ruff_python_ast::helpers::map_subscript;
use ruff_python_ast::identifier::Identifier;
use ruff_python_semantic::analyze::visibility::is_staticmethod;
use ruff_python_trivia::textwrap::dedent;
use ruff_source_file::NewlineWithTrailingNewline;
use ruff_text_size::{Ranged, TextLen, TextRange, TextSize};
use crate::checkers::ast::Checker;
use crate::docstrings::Docstring;
use crate::docstrings::sections::{SectionContext, SectionContexts, SectionKind};
use crate::docstrings::styles::SectionStyle;
use crate::registry::Rule;
use crate::rules::pydocstyle::helpers::find_underline;
use crate::rules::pydocstyle::settings::Convention;
use crate::{AlwaysFixableViolation, Violation};
use crate::{Edit, Fix};
/// ## What it does
/// Checks for over-indented sections in docstrings.
///
/// ## Why is this bad?
/// This rule enforces a consistent style for docstrings with multiple
/// sections.
///
/// Multiline docstrings are typically composed of a summary line, followed by
/// a blank line, followed by a series of sections, each with a section header
/// and a section body. The convention is that all sections should use
/// consistent indentation. In each section, the header should match the
/// indentation of the docstring's opening quotes, and the body should be
/// indented one level further.
///
/// This rule is enabled when using the `numpy` and `google` conventions, and
/// disabled when using the `pep257` convention.
///
/// ## Example
/// ```python
/// def calculate_speed(distance: float, time: float) -> float:
/// """Calculate speed as distance divided by time.
///
/// Args:
/// distance: Distance traveled.
/// time: Time spent traveling.
///
/// Returns:
/// Speed as distance divided by time.
///
/// Raises:
/// FasterThanLightError: If speed is greater than the speed of light.
/// """
/// try:
/// return distance / time
/// except ZeroDivisionError as exc:
/// raise FasterThanLightError from exc
/// ```
///
/// Use instead:
/// ```python
/// def calculate_speed(distance: float, time: float) -> float:
/// """Calculate speed as distance divided by time.
///
/// Args:
/// distance: Distance traveled.
/// time: Time spent traveling.
///
/// Returns:
/// Speed as distance divided by time.
///
/// Raises:
/// FasterThanLightError: If speed is greater than the speed of light.
/// """
/// try:
/// return distance / time
/// except ZeroDivisionError as exc:
/// raise FasterThanLightError from exc
/// ```
///
/// ## Options
/// - `lint.pydocstyle.convention`
///
/// ## References
/// - [PEP 257 – Docstring Conventions](https://peps.python.org/pep-0257/)
/// - [PEP 287 – reStructuredText Docstring Format](https://peps.python.org/pep-0287/)
/// - [NumPy Style Guide](https://numpydoc.readthedocs.io/en/latest/format.html)
/// - [Google Python Style Guide - Docstrings](https://google.github.io/styleguide/pyguide.html#38-comments-and-docstrings)
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "v0.0.73")]
pub(crate) struct OverindentedSection {
name: String,
}
impl AlwaysFixableViolation for OverindentedSection {
#[derive_message_formats]
fn message(&self) -> String {
let OverindentedSection { name } = self;
format!("Section is over-indented (\"{name}\")")
}
fn fix_title(&self) -> String {
let OverindentedSection { name } = self;
format!("Remove over-indentation from \"{name}\"")
}
}
/// ## What it does
/// Checks for over-indented section underlines in docstrings.
///
/// ## Why is this bad?
/// This rule enforces a consistent style for multiline numpy-style docstrings,
/// and helps prevent incorrect syntax in docstrings using reStructuredText.
///
/// Multiline numpy-style docstrings are typically composed of a summary line,
/// followed by a blank line, followed by a series of sections. Each section
/// has a section header and a section body, and there should be a series of
/// underline characters in the line following the header. The underline should
/// have the same indentation as the header.
///
/// This rule enforces a consistent style for multiline numpy-style docstrings
/// with sections. If your docstring uses reStructuredText, the rule also
/// helps protect against incorrect reStructuredText syntax, which would cause
/// errors if you tried to use a tool such as Sphinx to generate documentation
/// from the docstring.
///
/// This rule is enabled when using the `numpy` convention, and disabled when
/// using the `google` or `pep257` conventions.
///
/// ## Example
/// ```python
/// def calculate_speed(distance: float, time: float) -> float:
/// """Calculate speed as distance divided by time.
///
/// Parameters
/// ----------
/// distance : float
/// Distance traveled.
/// time : float
/// Time spent traveling.
///
/// Returns
/// -------
/// float
/// Speed as distance divided by time.
///
/// Raises
/// ------
/// FasterThanLightError
/// If speed is greater than the speed of light.
/// """
/// try:
/// return distance / time
/// except ZeroDivisionError as exc:
/// raise FasterThanLightError from exc
/// ```
///
/// Use instead:
/// ```python
/// def calculate_speed(distance: float, time: float) -> float:
/// """Calculate speed as distance divided by time.
///
/// Parameters
/// ----------
/// distance : float
/// Distance traveled.
/// time : float
/// Time spent traveling.
///
/// Returns
/// -------
/// float
/// Speed as distance divided by time.
///
/// Raises
/// ------
/// FasterThanLightError
/// If speed is greater than the speed of light.
/// """
/// try:
/// return distance / time
/// except ZeroDivisionError as exc:
/// raise FasterThanLightError from exc
/// ```
///
/// ## Options
/// - `lint.pydocstyle.convention`
///
/// ## References
/// - [PEP 257 – Docstring Conventions](https://peps.python.org/pep-0257/)
/// - [PEP 287 – reStructuredText Docstring Format](https://peps.python.org/pep-0287/)
/// - [NumPy Style Guide](https://numpydoc.readthedocs.io/en/latest/format.html)
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "v0.0.73")]
pub(crate) struct OverindentedSectionUnderline {
name: String,
}
impl AlwaysFixableViolation for OverindentedSectionUnderline {
#[derive_message_formats]
fn message(&self) -> String {
let OverindentedSectionUnderline { name } = self;
format!("Section underline is over-indented (\"{name}\")")
}
fn fix_title(&self) -> String {
let OverindentedSectionUnderline { name } = self;
format!("Remove over-indentation from \"{name}\" underline")
}
}
/// ## What it does
/// Checks for section headers in docstrings that do not begin with capital
/// letters.
///
/// ## Why is this bad?
/// For stylistic consistency, all section headers in a docstring should be
/// capitalized.
///
/// Multiline docstrings are typically composed of a summary line, followed by
/// a blank line, followed by a series of sections. Each section typically has
/// a header and a body.
///
/// This rule is enabled when using the `numpy` and `google` conventions, and
/// disabled when using the `pep257` convention.
///
/// ## Example
/// ```python
/// def calculate_speed(distance: float, time: float) -> float:
/// """Calculate speed as distance divided by time.
///
/// args:
/// distance: Distance traveled.
/// time: Time spent traveling.
///
/// returns:
/// Speed as distance divided by time.
///
/// raises:
/// FasterThanLightError: If speed is greater than the speed of light.
/// """
/// try:
/// return distance / time
/// except ZeroDivisionError as exc:
/// raise FasterThanLightError from exc
/// ```
///
/// Use instead:
/// ```python
/// def calculate_speed(distance: float, time: float) -> float:
/// """Calculate speed as distance divided by time.
///
/// Args:
/// distance: Distance traveled.
/// time: Time spent traveling.
///
/// Returns:
/// Speed as distance divided by time.
///
/// Raises:
/// FasterThanLightError: If speed is greater than the speed of light.
/// """
/// try:
/// return distance / time
/// except ZeroDivisionError as exc:
/// raise FasterThanLightError from exc
/// ```
///
/// ## Options
/// - `lint.pydocstyle.convention`
///
/// ## References
/// - [PEP 257 – Docstring Conventions](https://peps.python.org/pep-0257/)
/// - [PEP 287 – reStructuredText Docstring Format](https://peps.python.org/pep-0287/)
/// - [NumPy Style Guide](https://numpydoc.readthedocs.io/en/latest/format.html)
/// - [Google Python Style Guide - Docstrings](https://google.github.io/styleguide/pyguide.html#38-comments-and-docstrings)
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "v0.0.71")]
pub(crate) struct NonCapitalizedSectionName {
name: String,
}
impl AlwaysFixableViolation for NonCapitalizedSectionName {
#[derive_message_formats]
fn message(&self) -> String {
let NonCapitalizedSectionName { name } = self;
format!("Section name should be properly capitalized (\"{name}\")")
}
fn fix_title(&self) -> String {
let NonCapitalizedSectionName { name } = self;
format!("Capitalize \"{name}\"")
}
}
/// ## What it does
/// Checks for section headers in docstrings that are followed by non-newline
/// characters.
///
/// ## Why is this bad?
/// This rule enforces a consistent style for multiline numpy-style docstrings.
///
/// Multiline numpy-style docstrings are typically composed of a summary line,
/// followed by a blank line, followed by a series of sections. Each section
/// has a section header and a section body. The section header should be
/// followed by a newline, rather than by some other character (like a colon).
///
/// This rule is enabled when using the `numpy` convention, and disabled
/// when using the `google` or `pep257` conventions.
///
/// ## Example
/// ```python
/// # The `Parameters`, `Returns` and `Raises` section headers are all followed
/// # by a colon in this function's docstring:
/// def calculate_speed(distance: float, time: float) -> float:
/// """Calculate speed as distance divided by time.
///
/// Parameters:
/// -----------
/// distance : float
/// Distance traveled.
/// time : float
/// Time spent traveling.
///
/// Returns:
/// --------
/// float
/// Speed as distance divided by time.
///
/// Raises:
/// -------
/// FasterThanLightError
/// If speed is greater than the speed of light.
/// """
/// try:
/// return distance / time
/// except ZeroDivisionError as exc:
/// raise FasterThanLightError from exc
/// ```
///
/// Use instead:
/// ```python
/// def calculate_speed(distance: float, time: float) -> float:
/// """Calculate speed as distance divided by time.
///
/// Parameters
/// ----------
/// distance : float
/// Distance traveled.
/// time : float
/// Time spent traveling.
///
/// Returns
/// -------
/// float
/// Speed as distance divided by time.
///
/// Raises
/// ------
/// FasterThanLightError
/// If speed is greater than the speed of light.
/// """
/// try:
/// return distance / time
/// except ZeroDivisionError as exc:
/// raise FasterThanLightError from exc
/// ```
///
/// ## Options
/// - `lint.pydocstyle.convention`
///
/// ## References
/// - [PEP 257 – Docstring Conventions](https://peps.python.org/pep-0257/)
/// - [PEP 287 – reStructuredText Docstring Format](https://peps.python.org/pep-0287/)
/// - [NumPy Style Guide](https://numpydoc.readthedocs.io/en/latest/format.html)
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "v0.0.71")]
pub(crate) struct MissingNewLineAfterSectionName {
name: String,
}
impl AlwaysFixableViolation for MissingNewLineAfterSectionName {
#[derive_message_formats]
fn message(&self) -> String {
let MissingNewLineAfterSectionName { name } = self;
format!("Section name should end with a newline (\"{name}\")")
}
fn fix_title(&self) -> String {
let MissingNewLineAfterSectionName { name } = self;
format!("Add newline after \"{name}\"")
}
}
/// ## What it does
/// Checks for section headers in docstrings that are not followed by
/// underlines.
///
/// ## Why is this bad?
/// This rule enforces a consistent style for multiline numpy-style docstrings,
/// and helps prevent incorrect syntax in docstrings using reStructuredText.
///
/// Multiline numpy-style docstrings are typically composed of a summary line,
/// followed by a blank line, followed by a series of sections. Each section
/// has a section header and a section body, and the header should be followed
/// by a series of underline characters in the following line.
///
/// This rule enforces a consistent style for multiline numpy-style docstrings
/// with sections. If your docstring uses reStructuredText, the rule also
/// helps protect against incorrect reStructuredText syntax, which would cause
/// errors if you tried to use a tool such as Sphinx to generate documentation
/// from the docstring.
///
/// This rule is enabled when using the `numpy` convention, and disabled
/// when using the `google` or `pep257` conventions.
///
/// ## Example
/// ```python
/// def calculate_speed(distance: float, time: float) -> float:
/// """Calculate speed as distance divided by time.
///
/// Parameters
///
/// distance : float
/// Distance traveled.
/// time : float
/// Time spent traveling.
///
/// Returns
///
/// float
/// Speed as distance divided by time.
///
/// Raises
///
/// FasterThanLightError
/// If speed is greater than the speed of light.
/// """
/// try:
/// return distance / time
/// except ZeroDivisionError as exc:
/// raise FasterThanLightError from exc
/// ```
///
/// Use instead:
/// ```python
/// def calculate_speed(distance: float, time: float) -> float:
/// """Calculate speed as distance divided by time.
///
/// Parameters
/// ----------
/// distance : float
/// Distance traveled.
/// time : float
/// Time spent traveling.
///
/// Returns
/// -------
/// float
/// Speed as distance divided by time.
///
/// Raises
/// ------
/// FasterThanLightError
/// If speed is greater than the speed of light.
/// """
/// try:
/// return distance / time
/// except ZeroDivisionError as exc:
/// raise FasterThanLightError from exc
/// ```
///
/// ## Options
/// - `lint.pydocstyle.convention`
///
/// ## References
/// - [PEP 257 – Docstring Conventions](https://peps.python.org/pep-0257/)
/// - [PEP 287 – reStructuredText Docstring Format](https://peps.python.org/pep-0287/)
/// - [NumPy Style Guide](https://numpydoc.readthedocs.io/en/latest/format.html)
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "v0.0.71")]
pub(crate) struct MissingDashedUnderlineAfterSection {
name: String,
}
impl AlwaysFixableViolation for MissingDashedUnderlineAfterSection {
#[derive_message_formats]
fn message(&self) -> String {
let MissingDashedUnderlineAfterSection { name } = self;
format!("Missing dashed underline after section (\"{name}\")")
}
fn fix_title(&self) -> String {
let MissingDashedUnderlineAfterSection { name } = self;
format!("Add dashed line under \"{name}\"")
}
}
/// ## What it does
/// Checks for section underlines in docstrings that are not on the line
/// immediately following the section name.
///
/// ## Why is this bad?
/// This rule enforces a consistent style for multiline numpy-style docstrings,
/// and helps prevent incorrect syntax in docstrings using reStructuredText.
///
/// Multiline numpy-style docstrings are typically composed of a summary line,
/// followed by a blank line, followed by a series of sections. Each section
/// has a header and a body. There should be a series of underline characters
/// in the line immediately below the header.
///
/// This rule enforces a consistent style for multiline numpy-style docstrings
/// with sections. If your docstring uses reStructuredText, the rule also
/// helps protect against incorrect reStructuredText syntax, which would cause
/// errors if you tried to use a tool such as Sphinx to generate documentation
/// from the docstring.
///
/// This rule is enabled when using the `numpy` convention, and disabled
/// when using the `google` or `pep257` conventions.
///
/// ## Example
/// ```python
/// def calculate_speed(distance: float, time: float) -> float:
/// """Calculate speed as distance divided by time.
///
/// Parameters
///
/// ----------
/// distance : float
/// Distance traveled.
/// time : float
/// Time spent traveling.
///
/// Returns
///
/// -------
/// float
/// Speed as distance divided by time.
///
/// Raises
///
/// ------
/// FasterThanLightError
/// If speed is greater than the speed of light.
/// """
/// try:
/// return distance / time
/// except ZeroDivisionError as exc:
/// raise FasterThanLightError from exc
/// ```
///
/// Use instead:
/// ```python
/// def calculate_speed(distance: float, time: float) -> float:
/// """Calculate speed as distance divided by time.
///
/// Parameters
/// ----------
/// distance : float
/// Distance traveled.
/// time : float
/// Time spent traveling.
///
/// Returns
/// -------
/// float
/// Speed as distance divided by time.
///
/// Raises
/// ------
/// FasterThanLightError
/// If speed is greater than the speed of light.
/// """
/// try:
/// return distance / time
/// except ZeroDivisionError as exc:
/// raise FasterThanLightError from exc
/// ```
///
/// ## Options
/// - `lint.pydocstyle.convention`
///
/// ## References
/// - [PEP 257 – Docstring Conventions](https://peps.python.org/pep-0257/)
/// - [PEP 287 – reStructuredText Docstring Format](https://peps.python.org/pep-0287/)
/// - [NumPy Style Guide](https://numpydoc.readthedocs.io/en/latest/format.html)
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "v0.0.71")]
pub(crate) struct MissingSectionUnderlineAfterName {
name: String,
}
impl AlwaysFixableViolation for MissingSectionUnderlineAfterName {
#[derive_message_formats]
fn message(&self) -> String {
let MissingSectionUnderlineAfterName { name } = self;
format!("Section underline should be in the line following the section's name (\"{name}\")")
}
fn fix_title(&self) -> String {
let MissingSectionUnderlineAfterName { name } = self;
format!("Add underline to \"{name}\"")
}
}
/// ## What it does
/// Checks for section underlines in docstrings that do not match the length of
/// the corresponding section header.
///
/// ## Why is this bad?
/// This rule enforces a consistent style for multiline numpy-style docstrings,
/// and helps prevent incorrect syntax in docstrings using reStructuredText.
///
/// Multiline numpy-style docstrings are typically composed of a summary line,
/// followed by a blank line, followed by a series of sections. Each section
/// has a section header and a section body, and there should be a series of
/// underline characters in the line following the header. The length of the
/// underline should exactly match the length of the section header.
///
/// This rule enforces a consistent style for multiline numpy-style docstrings
/// with sections. If your docstring uses reStructuredText, the rule also
/// helps protect against incorrect reStructuredText syntax, which would cause
/// errors if you tried to use a tool such as Sphinx to generate documentation
/// from the docstring.
///
/// This rule is enabled when using the `numpy` convention, and disabled
/// when using the `google` or `pep257` conventions.
///
/// ## Example
/// ```python
/// def calculate_speed(distance: float, time: float) -> float:
/// """Calculate speed as distance divided by time.
///
/// Parameters
/// ---
/// distance : float
/// Distance traveled.
/// time : float
/// Time spent traveling.
///
/// Returns
/// ---
/// float
/// Speed as distance divided by time.
///
/// Raises
/// ---
/// FasterThanLightError
/// If speed is greater than the speed of light.
/// """
/// try:
/// return distance / time
/// except ZeroDivisionError as exc:
/// raise FasterThanLightError from exc
/// ```
///
/// Use instead:
/// ```python
/// def calculate_speed(distance: float, time: float) -> float:
/// """Calculate speed as distance divided by time.
///
/// Parameters
/// ----------
/// distance : float
/// Distance traveled.
/// time : float
/// Time spent traveling.
///
/// Returns
/// -------
/// float
/// Speed as distance divided by time.
///
/// Raises
/// ------
/// FasterThanLightError
/// If speed is greater than the speed of light.
/// """
/// try:
/// return distance / time
/// except ZeroDivisionError as exc:
/// raise FasterThanLightError from exc
/// ```
///
/// ## Options
/// - `lint.pydocstyle.convention`
///
/// ## References
/// - [PEP 257 – Docstring Conventions](https://peps.python.org/pep-0257/)
/// - [PEP 287 – reStructuredText Docstring Format](https://peps.python.org/pep-0287/)
/// - [NumPy Style Guide](https://numpydoc.readthedocs.io/en/latest/format.html)
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "v0.0.71")]
pub(crate) struct MismatchedSectionUnderlineLength {
name: String,
}
impl AlwaysFixableViolation for MismatchedSectionUnderlineLength {
#[derive_message_formats]
fn message(&self) -> String {
let MismatchedSectionUnderlineLength { name } = self;
format!("Section underline should match the length of its name (\"{name}\")")
}
fn fix_title(&self) -> String {
let MismatchedSectionUnderlineLength { name } = self;
format!("Adjust underline length to match \"{name}\"")
}
}
/// ## What it does
/// Checks for docstring sections that are not separated by a single blank
/// line.
///
/// ## Why is this bad?
/// This rule enforces consistency in your docstrings, and helps ensure
/// compatibility with documentation tooling.
///
/// Multiline docstrings are typically composed of a summary line, followed by
/// a blank line, followed by a series of sections, each with a section header
/// and a section body. If a multiline numpy-style or Google-style docstring
/// consists of multiple sections, each section should be separated by a single
/// blank line.
///
/// This rule is enabled when using the `numpy` and `google` conventions, and
/// disabled when using the `pep257` convention.
///
/// ## Example
/// ```python
/// def calculate_speed(distance: float, time: float) -> float:
/// """Calculate speed as distance divided by time.
///
/// Parameters
/// ----------
/// distance : float
/// Distance traveled.
/// time : float
/// Time spent traveling.
/// Returns
/// -------
/// float
/// Speed as distance divided by time.
/// Raises
/// ------
/// FasterThanLightError
/// If speed is greater than the speed of light.
/// """
/// try:
/// return distance / time
/// except ZeroDivisionError as exc:
/// raise FasterThanLightError from exc
/// ```
///
/// Use instead:
/// ```python
/// def calculate_speed(distance: float, time: float) -> float:
/// """Calculate speed as distance divided by time.
///
/// Parameters
/// ----------
/// distance : float
/// Distance traveled.
/// time : float
/// Time spent traveling.
///
/// Returns
/// -------
/// float
/// Speed as distance divided by time.
///
/// Raises
/// ------
/// FasterThanLightError
/// If speed is greater than the speed of light.
/// """
/// try:
/// return distance / time
/// except ZeroDivisionError as exc:
/// raise FasterThanLightError from exc
/// ```
///
/// ## Options
/// - `lint.pydocstyle.convention`
///
/// ## References
/// - [PEP 257 – Docstring Conventions](https://peps.python.org/pep-0257/)
/// - [PEP 287 – reStructuredText Docstring Format](https://peps.python.org/pep-0287/)
/// - [NumPy Style Guide](https://numpydoc.readthedocs.io/en/latest/format.html)
/// - [Google Style Guide](https://google.github.io/styleguide/pyguide.html#38-comments-and-docstrings)
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "v0.0.71")]
pub(crate) struct NoBlankLineAfterSection {
name: String,
}
impl AlwaysFixableViolation for NoBlankLineAfterSection {
#[derive_message_formats]
fn message(&self) -> String {
let NoBlankLineAfterSection { name } = self;
format!("Missing blank line after section (\"{name}\")")
}
fn fix_title(&self) -> String {
let NoBlankLineAfterSection { name } = self;
format!("Add blank line after \"{name}\"")
}
}
/// ## What it does
/// Checks for docstring sections that are not separated by a blank line.
///
/// ## Why is this bad?
/// This rule enforces consistency in numpy-style and Google-style docstrings,
/// and helps ensure compatibility with documentation tooling.
///
/// Multiline docstrings are typically composed of a summary line, followed by
/// a blank line, followed by a series of sections, each with a section header
/// and a section body. Sections should be separated by a single blank line.
///
/// This rule is enabled when using the `numpy` and `google` conventions, and
/// disabled when using the `pep257` convention.
///
/// ## Example
/// ```python
/// def calculate_speed(distance: float, time: float) -> float:
/// """Calculate speed as distance divided by time.
///
/// Parameters
/// ----------
/// distance : float
/// Distance traveled.
/// time : float
/// Time spent traveling.
/// Returns
/// -------
/// float
/// Speed as distance divided by time.
/// Raises
/// ------
/// FasterThanLightError
/// If speed is greater than the speed of light.
/// """
/// try:
/// return distance / time
/// except ZeroDivisionError as exc:
/// raise FasterThanLightError from exc
/// ```
///
/// Use instead:
/// ```python
/// def calculate_speed(distance: float, time: float) -> float:
/// """Calculate speed as distance divided by time.
///
/// Parameters
/// ----------
/// distance : float
/// Distance traveled.
/// time : float
/// Time spent traveling.
///
/// Returns
/// -------
/// float
/// Speed as distance divided by time.
///
/// Raises
/// ------
/// FasterThanLightError
/// If speed is greater than the speed of light.
/// """
/// try:
/// return distance / time
/// except ZeroDivisionError as exc:
/// raise FasterThanLightError from exc
/// ```
///
/// ## Options
/// - `lint.pydocstyle.convention`
///
/// ## References
/// - [PEP 257 – Docstring Conventions](https://peps.python.org/pep-0257/)
/// - [PEP 287 – reStructuredText Docstring Format](https://peps.python.org/pep-0287/)
/// - [NumPy Style Guide](https://numpydoc.readthedocs.io/en/latest/format.html)
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "v0.0.71")]
pub(crate) struct NoBlankLineBeforeSection {
name: String,
}
impl AlwaysFixableViolation for NoBlankLineBeforeSection {
#[derive_message_formats]
fn message(&self) -> String {
let NoBlankLineBeforeSection { name } = self;
format!("Missing blank line before section (\"{name}\")")
}
fn fix_title(&self) -> String {
let NoBlankLineBeforeSection { name } = self;
format!("Add blank line before \"{name}\"")
}
}
/// ## What it does
/// Checks for missing blank lines after the last section of a multiline
/// docstring.
///
/// ## Why is this bad?
/// This rule enforces a consistent style for multiline docstrings.
///
/// Multiline docstrings are typically composed of a summary line, followed by
/// a blank line, followed by a series of sections, each with a section header
/// and a section body.
///
/// This rule may not apply to all projects; its applicability is a matter of
/// convention. By default, the rule is disabled when using the `google`,
/// `numpy`, and `pep257` conventions.
///
/// ## Example
/// ```python
/// def calculate_speed(distance: float, time: float) -> float:
/// """Calculate speed as distance divided by time.
///
/// Parameters
/// ----------
/// distance : float
/// Distance traveled.
/// time : float
/// Time spent traveling.
/// Returns
/// -------
/// float
/// Speed as distance divided by time.
/// Raises
/// ------
/// FasterThanLightError
/// If speed is greater than the speed of light.
/// """
/// try:
/// return distance / time
/// except ZeroDivisionError as exc:
/// raise FasterThanLightError from exc
/// ```
///
/// Use instead:
/// ```python
/// def calculate_speed(distance: float, time: float) -> float:
/// """Calculate speed as distance divided by time.
///
/// Parameters
/// ----------
/// distance : float
/// Distance traveled.
/// time : float
/// Time spent traveling.
///
/// Returns
/// -------
/// float
/// Speed as distance divided by time.
///
/// Raises
/// ------
/// FasterThanLightError
/// If speed is greater than the speed of light.
///
/// """
/// try:
/// return distance / time
/// except ZeroDivisionError as exc:
/// raise FasterThanLightError from exc
/// ```
///
/// ## Options
/// - `lint.pydocstyle.convention`
///
/// ## References
/// - [PEP 257 – Docstring Conventions](https://peps.python.org/pep-0257/)
/// - [PEP 287 – reStructuredText Docstring Format](https://peps.python.org/pep-0287/)
/// - [NumPy Style Guide](https://numpydoc.readthedocs.io/en/latest/format.html)
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "v0.0.71")]
pub(crate) struct MissingBlankLineAfterLastSection {
name: String,
}
impl AlwaysFixableViolation for MissingBlankLineAfterLastSection {
#[derive_message_formats]
fn message(&self) -> String {
let MissingBlankLineAfterLastSection { name } = self;
format!("Missing blank line after last section (\"{name}\")")
}
fn fix_title(&self) -> String {
let MissingBlankLineAfterLastSection { name } = self;
format!("Add blank line after \"{name}\"")
}
}
/// ## What it does
/// Checks for docstrings with empty sections.
///
/// ## Why is this bad?
/// An empty section in a multiline docstring likely indicates an unfinished
/// or incomplete docstring.
///
/// Multiline docstrings are typically composed of a summary line, followed by
/// a blank line, followed by a series of sections, each with a section header
/// and a section body. Each section body should be non-empty; empty sections
/// should either have content added to them, or be removed entirely.
///
/// ## Example
/// ```python
/// def calculate_speed(distance: float, time: float) -> float:
/// """Calculate speed as distance divided by time.
///
/// Parameters
/// ----------
/// distance : float
/// Distance traveled.
/// time : float
/// Time spent traveling.
///
/// Returns
/// -------
/// float
/// Speed as distance divided by time.
///
/// Raises
/// ------
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | true |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/pydocstyle/rules/multi_line_summary_start.rs | crates/ruff_linter/src/rules/pydocstyle/rules/multi_line_summary_start.rs | use std::borrow::Cow;
use ruff_macros::{ViolationMetadata, derive_message_formats};
use ruff_python_ast::str::is_triple_quote;
use ruff_python_semantic::Definition;
use ruff_source_file::{LineRanges, NewlineWithTrailingNewline, UniversalNewlineIterator};
use ruff_text_size::{Ranged, TextRange, TextSize};
use crate::checkers::ast::Checker;
use crate::docstrings::Docstring;
use crate::registry::Rule;
use crate::{AlwaysFixableViolation, Edit, Fix};
/// ## What it does
/// Checks for docstring summary lines that are not positioned on the first
/// physical line of the docstring.
///
/// ## Why is this bad?
/// [PEP 257] recommends that multi-line docstrings consist of "a summary line
/// just like a one-line docstring, followed by a blank line, followed by a
/// more elaborate description."
///
/// The summary line should be located on the first physical line of the
/// docstring, immediately after the opening quotes.
///
/// This rule may not apply to all projects; its applicability is a matter of
/// convention. By default, this rule is enabled when using the `google`
/// convention, and disabled when using the `numpy` and `pep257` conventions.
///
/// For an alternative, see [D213].
///
/// ## Example
/// ```python
/// def sort_list(l: list[int]) -> list[int]:
/// """
/// Return a sorted copy of the list.
///
/// Sort the list in ascending order and return a copy of the result using the
/// bubble sort algorithm.
/// """
/// ```
///
/// Use instead:
/// ```python
/// def sort_list(l: list[int]) -> list[int]:
/// """Return a sorted copy of the list.
///
/// Sort the list in ascending order and return a copy of the result using the bubble
/// sort algorithm.
/// """
/// ```
///
/// ## Options
/// - `lint.pydocstyle.convention`
///
/// ## References
/// - [PEP 257 – Docstring Conventions](https://peps.python.org/pep-0257/)
/// - [NumPy Style Guide](https://numpydoc.readthedocs.io/en/latest/format.html)
/// - [Google Python Style Guide - Docstrings](https://google.github.io/styleguide/pyguide.html#38-comments-and-docstrings)
///
/// [D213]: https://docs.astral.sh/ruff/rules/multi-line-summary-second-line
/// [PEP 257]: https://peps.python.org/pep-0257
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "v0.0.69")]
pub(crate) struct MultiLineSummaryFirstLine;
impl AlwaysFixableViolation for MultiLineSummaryFirstLine {
#[derive_message_formats]
fn message(&self) -> String {
"Multi-line docstring summary should start at the first line".to_string()
}
fn fix_title(&self) -> String {
"Remove whitespace after opening quotes".to_string()
}
}
/// ## What it does
/// Checks for docstring summary lines that are not positioned on the second
/// physical line of the docstring.
///
/// ## Why is this bad?
/// [PEP 257] recommends that multi-line docstrings consist of "a summary line
/// just like a one-line docstring, followed by a blank line, followed by a
/// more elaborate description."
///
/// The summary line should be located on the second physical line of the
/// docstring, immediately after the opening quotes and the blank line.
///
/// This rule may not apply to all projects; its applicability is a matter of
/// convention. By default, this rule is disabled when using the `google`,
/// `numpy`, and `pep257` conventions.
///
/// For an alternative, see [D212].
///
/// ## Example
/// ```python
/// def sort_list(l: list[int]) -> list[int]:
/// """Return a sorted copy of the list.
///
/// Sort the list in ascending order and return a copy of the result using the
/// bubble sort algorithm.
/// """
/// ```
///
/// Use instead:
/// ```python
/// def sort_list(l: list[int]) -> list[int]:
/// """
/// Return a sorted copy of the list.
///
/// Sort the list in ascending order and return a copy of the result using the bubble
/// sort algorithm.
/// """
/// ```
///
/// ## Options
/// - `lint.pydocstyle.convention`
///
/// ## References
/// - [PEP 257 – Docstring Conventions](https://peps.python.org/pep-0257/)
/// - [NumPy Style Guide](https://numpydoc.readthedocs.io/en/latest/format.html)
/// - [Google Python Style Guide - Docstrings](https://google.github.io/styleguide/pyguide.html#38-comments-and-docstrings)
///
/// [D212]: https://docs.astral.sh/ruff/rules/multi-line-summary-first-line
/// [PEP 257]: https://peps.python.org/pep-0257
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "v0.0.69")]
pub(crate) struct MultiLineSummarySecondLine;
impl AlwaysFixableViolation for MultiLineSummarySecondLine {
#[derive_message_formats]
fn message(&self) -> String {
"Multi-line docstring summary should start at the second line".to_string()
}
fn fix_title(&self) -> String {
"Insert line break and indentation after opening quotes".to_string()
}
}
/// D212, D213
pub(crate) fn multi_line_summary_start(checker: &Checker, docstring: &Docstring) {
let body = docstring.body();
if NewlineWithTrailingNewline::from(body.as_str())
.nth(1)
.is_none()
{
return;
}
let mut content_lines =
UniversalNewlineIterator::with_offset(docstring.contents(), docstring.start());
let Some(first_line) = content_lines.next() else {
return;
};
if is_triple_quote(&first_line) {
if checker.is_rule_enabled(Rule::MultiLineSummaryFirstLine) {
let mut diagnostic =
checker.report_diagnostic(MultiLineSummaryFirstLine, docstring.range());
// Delete until first non-whitespace char.
for line in content_lines {
if let Some(end_column) = line.find(|c: char| !c.is_whitespace()) {
diagnostic.set_fix(Fix::safe_edit(Edit::deletion(
first_line.end(),
line.start() + TextSize::try_from(end_column).unwrap(),
)));
break;
}
}
}
} else if first_line.as_str().ends_with('\\') {
// Ignore the edge case whether a single quoted string is multiple lines through an
// escape (https://github.com/astral-sh/ruff/issues/7139). Single quote docstrings are
// flagged by D300.
// ```python
// "\
// "
// ```
} else {
if checker.is_rule_enabled(Rule::MultiLineSummarySecondLine) {
let mut diagnostic =
checker.report_diagnostic(MultiLineSummarySecondLine, docstring.range());
let mut indentation = Cow::Borrowed(docstring.compute_indentation());
let mut fixable = true;
if !indentation.chars().all(char::is_whitespace) {
fixable = false;
// If the docstring isn't on its own line, look at the statement indentation,
// and add the default indentation to get the "right" level.
if let Definition::Member(member) = &docstring.definition {
let stmt_line_start = checker.locator().line_start(member.start());
let stmt_indentation = checker
.locator()
.slice(TextRange::new(stmt_line_start, member.start()));
if stmt_indentation.chars().all(char::is_whitespace) {
let indentation = indentation.to_mut();
indentation.clear();
indentation.push_str(stmt_indentation);
indentation.push_str(checker.stylist().indentation());
fixable = true;
}
}
}
if fixable {
// Use replacement instead of insert to trim possible whitespace between leading
// quote and text.
let repl = format!(
"{}{}{}",
checker.stylist().line_ending().as_str(),
indentation,
first_line
.strip_prefix(docstring.opener())
.unwrap()
.trim_start()
);
diagnostic.set_fix(Fix::safe_edit(Edit::replacement(
repl,
body.start(),
first_line.end(),
)));
}
}
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/pydocstyle/rules/blank_before_after_class.rs | crates/ruff_linter/src/rules/pydocstyle/rules/blank_before_after_class.rs | use ruff_macros::{ViolationMetadata, derive_message_formats};
use ruff_python_trivia::{PythonWhitespace, indentation_at_offset};
use ruff_source_file::{Line, LineRanges, UniversalNewlineIterator};
use ruff_text_size::Ranged;
use ruff_text_size::TextRange;
use crate::checkers::ast::Checker;
use crate::docstrings::Docstring;
use crate::registry::Rule;
use crate::{AlwaysFixableViolation, Edit, Fix};
/// ## What it does
/// Checks for docstrings on class definitions that are not preceded by a
/// blank line.
///
/// ## Why is this bad?
/// Use a blank line to separate the docstring from the class definition, for
/// consistency.
///
/// This rule may not apply to all projects; its applicability is a matter of
/// convention. By default, this rule is disabled when using the `google`,
/// `numpy`, and `pep257` conventions.
///
/// For an alternative, see [D211].
///
/// ## Example
///
/// ```python
/// class PhotoMetadata:
/// """Metadata about a photo."""
/// ```
///
/// Use instead:
///
/// ```python
/// class PhotoMetadata:
///
/// """Metadata about a photo."""
/// ```
///
/// ## Options
/// - `lint.pydocstyle.convention`
///
/// [D211]: https://docs.astral.sh/ruff/rules/blank-line-before-class
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "v0.0.70")]
pub(crate) struct IncorrectBlankLineBeforeClass;
impl AlwaysFixableViolation for IncorrectBlankLineBeforeClass {
#[derive_message_formats]
fn message(&self) -> String {
"1 blank line required before class docstring".to_string()
}
fn fix_title(&self) -> String {
"Insert 1 blank line before class docstring".to_string()
}
}
/// ## What it does
/// Checks for class methods that are not separated from the class's docstring
/// by a blank line.
///
/// ## Why is this bad?
/// [PEP 257] recommends the use of a blank line to separate a class's
/// docstring from its methods.
///
/// This rule may not apply to all projects; its applicability is a matter of
/// convention. By default, this rule is enabled when using the `numpy` and `pep257`
/// conventions, and disabled when using the `google` convention.
///
/// ## Example
/// ```python
/// class PhotoMetadata:
/// """Metadata about a photo."""
/// def __init__(self, file: Path):
/// ...
/// ```
///
/// Use instead:
/// ```python
/// class PhotoMetadata:
/// """Metadata about a photo."""
///
/// def __init__(self, file: Path):
/// ...
/// ```
///
/// ## Options
/// - `lint.pydocstyle.convention`
///
/// ## References
/// - [PEP 257 – Docstring Conventions](https://peps.python.org/pep-0257/)
/// - [NumPy Style Guide](https://numpydoc.readthedocs.io/en/latest/format.html)
/// - [Google Python Style Guide - Docstrings](https://google.github.io/styleguide/pyguide.html#38-comments-and-docstrings)
///
/// [PEP 257]: https://peps.python.org/pep-0257/
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "v0.0.70")]
pub(crate) struct IncorrectBlankLineAfterClass;
impl AlwaysFixableViolation for IncorrectBlankLineAfterClass {
#[derive_message_formats]
fn message(&self) -> String {
"1 blank line required after class docstring".to_string()
}
fn fix_title(&self) -> String {
"Insert 1 blank line after class docstring".to_string()
}
}
/// ## What it does
/// Checks for docstrings on class definitions that are preceded by a blank
/// line.
///
/// ## Why is this bad?
/// Avoid introducing any blank lines between a class definition and its
/// docstring, for consistency.
///
/// This rule may not apply to all projects; its applicability is a matter of
/// convention. By default, this rule is enabled when using the `google`,
/// `numpy`, and `pep257` conventions.
///
/// For an alternative, see [D203].
///
/// ## Example
///
/// ```python
/// class PhotoMetadata:
///
/// """Metadata about a photo."""
/// ```
///
/// Use instead:
///
/// ```python
/// class PhotoMetadata:
/// """Metadata about a photo."""
/// ```
///
/// ## Options
/// - `lint.pydocstyle.convention`
///
/// [D203]: https://docs.astral.sh/ruff/rules/incorrect-blank-line-before-class
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "v0.0.70")]
pub(crate) struct BlankLineBeforeClass;
impl AlwaysFixableViolation for BlankLineBeforeClass {
#[derive_message_formats]
fn message(&self) -> String {
"No blank lines allowed before class docstring".to_string()
}
fn fix_title(&self) -> String {
"Remove blank line(s) before class docstring".to_string()
}
}
/// D203, D204, D211
pub(crate) fn blank_before_after_class(checker: &Checker, docstring: &Docstring) {
let Some(class) = docstring.definition.as_class_def() else {
return;
};
// Special-case: the docstring is on the same line as the class. For example:
// ```python
// class PhotoMetadata: """Metadata about a photo."""
// ```
let between_range = TextRange::new(class.start(), docstring.start());
if !checker.locator().contains_line_break(between_range) {
return;
}
if checker.is_rule_enabled(Rule::IncorrectBlankLineBeforeClass)
|| checker.is_rule_enabled(Rule::BlankLineBeforeClass)
{
let mut lines = UniversalNewlineIterator::with_offset(
checker.locator().slice(between_range),
between_range.start(),
)
.rev();
let mut blank_lines_before = 0usize;
let mut blank_lines_start = lines.next().map(|line| line.start()).unwrap_or_default();
for line in lines {
if line.trim().is_empty() {
blank_lines_before += 1;
blank_lines_start = line.start();
} else {
break;
}
}
if checker.is_rule_enabled(Rule::BlankLineBeforeClass) {
if blank_lines_before != 0 {
let mut diagnostic =
checker.report_diagnostic(BlankLineBeforeClass, docstring.range());
// Delete the blank line before the class.
diagnostic.set_fix(Fix::safe_edit(Edit::deletion(
blank_lines_start,
docstring.line_start(),
)));
}
}
if checker.is_rule_enabled(Rule::IncorrectBlankLineBeforeClass) {
if blank_lines_before != 1 {
let mut diagnostic =
checker.report_diagnostic(IncorrectBlankLineBeforeClass, docstring.range());
// Insert one blank line before the class.
diagnostic.set_fix(Fix::safe_edit(Edit::replacement(
checker.stylist().line_ending().to_string(),
blank_lines_start,
docstring.line_start(),
)));
}
}
}
if checker.is_rule_enabled(Rule::IncorrectBlankLineAfterClass) {
let class_after_docstring_range = TextRange::new(docstring.end(), class.end());
let class_after_docstring = checker.locator().slice(class_after_docstring_range);
let mut lines = UniversalNewlineIterator::with_offset(
class_after_docstring,
class_after_docstring_range.start(),
);
// If the class is empty except for comments, we don't need to insert a newline between
// docstring and no content
let all_blank_after = lines.clone().all(|line| {
line.trim_whitespace().is_empty() || line.trim_whitespace_start().starts_with('#')
});
if all_blank_after {
return;
}
let first_line = lines.next();
let mut replacement_start = first_line.as_ref().map(Line::start).unwrap_or_default();
// Edge case: There is trailing end-of-line content after the docstring, either a statement
// separated by a semicolon or a comment.
if let Some(first_line) = &first_line {
let trailing = first_line.as_str().trim_whitespace_start();
if let Some(next_statement) = trailing.strip_prefix(';') {
let indentation = indentation_at_offset(docstring.start(), checker.source())
.expect("Own line docstring must have indentation");
let mut diagnostic =
checker.report_diagnostic(IncorrectBlankLineAfterClass, docstring.range());
let line_ending = checker.stylist().line_ending().as_str();
// We have to trim the whitespace twice, once before the semicolon above and
// once after the semicolon here, or we get invalid indents:
// ```rust
// class Priority:
// """Has priorities""" ; priorities=1
// ```
let next_statement = next_statement.trim_whitespace_start();
diagnostic.set_fix(Fix::safe_edit(Edit::replacement(
line_ending.to_string() + line_ending + indentation + next_statement,
replacement_start,
first_line.end(),
)));
return;
} else if trailing.starts_with('#') {
// Keep the end-of-line comment, start counting empty lines after it
replacement_start = first_line.end();
}
}
let mut blank_lines_after = 0usize;
let mut blank_lines_end = first_line.as_ref().map_or(docstring.end(), Line::end);
for line in lines {
if line.trim_whitespace().is_empty() {
blank_lines_end = line.end();
blank_lines_after += 1;
} else {
break;
}
}
if blank_lines_after != 1 {
let mut diagnostic =
checker.report_diagnostic(IncorrectBlankLineAfterClass, docstring.range());
// Insert a blank line before the class (replacing any existing lines).
diagnostic.set_fix(Fix::safe_edit(Edit::replacement(
checker.stylist().line_ending().to_string(),
replacement_start,
blank_lines_end,
)));
}
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/pydocstyle/rules/non_imperative_mood.rs | crates/ruff_linter/src/rules/pydocstyle/rules/non_imperative_mood.rs | use std::sync::LazyLock;
use imperative::Mood;
use ruff_macros::{ViolationMetadata, derive_message_formats};
use ruff_python_semantic::analyze::visibility::{is_property, is_test};
use ruff_source_file::UniversalNewlines;
use ruff_text_size::Ranged;
use crate::Violation;
use crate::checkers::ast::Checker;
use crate::docstrings::Docstring;
use crate::rules::pydocstyle::helpers::normalize_word;
use crate::rules::pydocstyle::settings::Settings;
static MOOD: LazyLock<Mood> = LazyLock::new(Mood::new);
/// ## What it does
/// Checks for docstring first lines that are not in an imperative mood.
///
/// ## Why is this bad?
/// [PEP 257] recommends that the first line of a docstring be written in the
/// imperative mood, for consistency.
///
/// Hint: to rewrite the docstring in the imperative, phrase the first line as
/// if it were a command.
///
/// This rule may not apply to all projects; its applicability is a matter of
/// convention. By default, this rule is enabled when using the `numpy` and
/// `pep257` conventions, and disabled when using the `google` conventions.
///
/// ## Example
/// ```python
/// def average(values: list[float]) -> float:
/// """Returns the mean of the given values."""
/// ```
///
/// Use instead:
/// ```python
/// def average(values: list[float]) -> float:
/// """Return the mean of the given values."""
/// ```
///
/// ## Options
/// - `lint.pydocstyle.convention`
/// - `lint.pydocstyle.property-decorators`
/// - `lint.pydocstyle.ignore-decorators`
///
/// ## References
/// - [PEP 257 – Docstring Conventions](https://peps.python.org/pep-0257/)
///
/// [PEP 257]: https://peps.python.org/pep-0257/
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "v0.0.228")]
pub(crate) struct NonImperativeMood {
first_line: String,
}
impl Violation for NonImperativeMood {
#[derive_message_formats]
fn message(&self) -> String {
let NonImperativeMood { first_line } = self;
format!("First line of docstring should be in imperative mood: \"{first_line}\"")
}
}
/// D401
pub(crate) fn non_imperative_mood(checker: &Checker, docstring: &Docstring, settings: &Settings) {
let Some(function) = docstring.definition.as_function_def() else {
return;
};
if is_test(&function.name) {
return;
}
if is_property(
&function.decorator_list,
settings.property_decorators(),
checker.semantic(),
) {
return;
}
let body = docstring.body();
// Find first line, disregarding whitespace.
let first_line = match body.trim().universal_newlines().next() {
Some(line) => line.as_str().trim(),
None => return,
};
// Find the first word on that line and normalize it to lower-case.
let first_word_norm = match first_line.split_whitespace().next() {
Some(word) => normalize_word(word),
None => return,
};
if first_word_norm.is_empty() {
return;
}
if matches!(MOOD.is_imperative(&first_word_norm), Some(false)) {
checker.report_diagnostic(
NonImperativeMood {
first_line: first_line.to_string(),
},
docstring.range(),
);
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/pydocstyle/rules/capitalized.rs | crates/ruff_linter/src/rules/pydocstyle/rules/capitalized.rs | use ruff_macros::{ViolationMetadata, derive_message_formats};
use ruff_text_size::Ranged;
use ruff_text_size::{TextLen, TextRange};
use crate::checkers::ast::Checker;
use crate::docstrings::Docstring;
use crate::{AlwaysFixableViolation, Edit, Fix};
/// ## What it does
/// Checks for docstrings that do not start with a capital letter.
///
/// ## Why is this bad?
/// The first non-whitespace character in a docstring should be
/// capitalized for grammatical correctness and consistency.
///
/// ## Example
/// ```python
/// def average(values: list[float]) -> float:
/// """return the mean of the given values."""
/// ```
///
/// Use instead:
/// ```python
/// def average(values: list[float]) -> float:
/// """Return the mean of the given values."""
/// ```
///
/// ## Options
///
/// - `lint.pydocstyle.ignore-decorators`
///
/// ## References
/// - [PEP 257 – Docstring Conventions](https://peps.python.org/pep-0257/)
/// - [NumPy Style Guide](https://numpydoc.readthedocs.io/en/latest/format.html)
/// - [Google Python Style Guide - Docstrings](https://google.github.io/styleguide/pyguide.html#38-comments-and-docstrings)
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "v0.0.69")]
pub(crate) struct FirstWordUncapitalized {
first_word: String,
capitalized_word: String,
}
impl AlwaysFixableViolation for FirstWordUncapitalized {
#[derive_message_formats]
fn message(&self) -> String {
format!(
"First word of the docstring should be capitalized: `{}` -> `{}`",
self.first_word, self.capitalized_word
)
}
fn fix_title(&self) -> String {
format!(
"Capitalize `{}` to `{}`",
self.first_word, self.capitalized_word
)
}
}
/// D403
pub(crate) fn capitalized(checker: &Checker, docstring: &Docstring) {
if docstring.definition.as_function_def().is_none() {
return;
}
let body = docstring.body();
let trim_start_body = body.trim_start();
let first_word = trim_start_body
.find(char::is_whitespace)
.map_or(trim_start_body, |idx| &trim_start_body[..idx])
.trim_end_matches(['.', '!', '?']);
let mut first_word_chars = first_word.chars();
let Some(first_char) = first_word_chars.next() else {
return;
};
if !first_char.is_ascii() {
return;
}
let uppercase_first_char = first_char.to_ascii_uppercase();
if first_char == uppercase_first_char {
return;
}
// Like pydocstyle, we only support ASCII for now.
for char in first_word.chars().skip(1) {
if !char.is_ascii_alphabetic() && char != '\'' {
return;
}
}
let capitalized_word = uppercase_first_char.to_string() + first_word_chars.as_str();
let leading_whitespace_len = body.text_len() - trim_start_body.text_len();
let mut diagnostic = checker.report_diagnostic(
FirstWordUncapitalized {
first_word: first_word.to_string(),
capitalized_word: capitalized_word.clone(),
},
docstring.range(),
);
diagnostic.set_fix(Fix::safe_edit(Edit::range_replacement(
capitalized_word,
TextRange::at(body.start() + leading_whitespace_len, first_word.text_len()),
)));
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/pydocstyle/rules/if_needed.rs | crates/ruff_linter/src/rules/pydocstyle/rules/if_needed.rs | use ruff_macros::{ViolationMetadata, derive_message_formats};
use ruff_python_ast::identifier::Identifier;
use ruff_python_semantic::analyze::visibility::is_overload;
use crate::Violation;
use crate::checkers::ast::Checker;
use crate::docstrings::Docstring;
/// ## What it does
/// Checks for `@overload` function definitions that contain a docstring.
///
/// ## Why is this bad?
/// The `@overload` decorator is used to define multiple compatible signatures
/// for a given function, to support type-checking. A series of `@overload`
/// definitions should be followed by a single non-decorated definition that
/// contains the implementation of the function.
///
/// `@overload` function definitions should not contain a docstring; instead,
/// the docstring should be placed on the non-decorated definition that contains
/// the implementation.
///
/// ## Example
///
/// ```python
/// from typing import overload
///
///
/// @overload
/// def factorial(n: int) -> int:
/// """Return the factorial of n."""
///
///
/// @overload
/// def factorial(n: float) -> float:
/// """Return the factorial of n."""
///
///
/// def factorial(n):
/// """Return the factorial of n."""
///
///
/// factorial.__doc__ # "Return the factorial of n."
/// ```
///
/// Use instead:
///
/// ```python
/// from typing import overload
///
///
/// @overload
/// def factorial(n: int) -> int: ...
///
///
/// @overload
/// def factorial(n: float) -> float: ...
///
///
/// def factorial(n):
/// """Return the factorial of n."""
///
///
/// factorial.__doc__ # "Return the factorial of n."
/// ```
///
/// ## Options
///
/// - `lint.pydocstyle.ignore-decorators`
///
/// ## References
/// - [PEP 257 – Docstring Conventions](https://peps.python.org/pep-0257/)
/// - [Python documentation: `typing.overload`](https://docs.python.org/3/library/typing.html#typing.overload)
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "v0.0.71")]
pub(crate) struct OverloadWithDocstring;
impl Violation for OverloadWithDocstring {
#[derive_message_formats]
fn message(&self) -> String {
"Function decorated with `@overload` shouldn't contain a docstring".to_string()
}
}
/// D418
pub(crate) fn if_needed(checker: &Checker, docstring: &Docstring) {
let Some(function) = docstring.definition.as_function_def() else {
return;
};
if is_overload(&function.decorator_list, checker.semantic()) {
checker.report_diagnostic(OverloadWithDocstring, function.identifier());
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/pydocstyle/rules/ends_with_punctuation.rs | crates/ruff_linter/src/rules/pydocstyle/rules/ends_with_punctuation.rs | use ruff_text_size::TextLen;
use strum::IntoEnumIterator;
use ruff_macros::{ViolationMetadata, derive_message_formats};
use ruff_source_file::{UniversalNewlineIterator, UniversalNewlines};
use ruff_text_size::Ranged;
use crate::checkers::ast::Checker;
use crate::docstrings::Docstring;
use crate::docstrings::sections::SectionKind;
use crate::{Edit, Fix, FixAvailability, Violation};
use crate::rules::pydocstyle::helpers::logical_line;
/// ## What it does
/// Checks for docstrings in which the first line does not end in a punctuation
/// mark, such as a period, question mark, or exclamation point.
///
/// ## Why is this bad?
/// The first line of a docstring should end with a period, question mark, or
/// exclamation point, for grammatical correctness and consistency.
///
/// This rule may not apply to all projects; its applicability is a matter of
/// convention. By default, this rule is enabled when using the `google`
/// convention, and disabled when using the `numpy` and `pep257` conventions.
///
/// ## Example
/// ```python
/// def average(values: list[float]) -> float:
/// """Return the mean of the given values"""
/// ```
///
/// Use instead:
/// ```python
/// def average(values: list[float]) -> float:
/// """Return the mean of the given values."""
/// ```
///
/// ## Options
/// - `lint.pydocstyle.convention`
///
/// ## References
/// - [PEP 257 – Docstring Conventions](https://peps.python.org/pep-0257/)
/// - [NumPy Style Guide](https://numpydoc.readthedocs.io/en/latest/format.html)
/// - [Google Python Style Guide - Docstrings](https://google.github.io/styleguide/pyguide.html#38-comments-and-docstrings)
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "v0.0.69")]
pub(crate) struct MissingTerminalPunctuation;
impl Violation for MissingTerminalPunctuation {
/// `None` in the case a fix is never available or otherwise Some
/// [`FixAvailability`] describing the available fix.
const FIX_AVAILABILITY: FixAvailability = FixAvailability::Sometimes;
#[derive_message_formats]
fn message(&self) -> String {
"First line should end with a period, question mark, or exclamation point".to_string()
}
fn fix_title(&self) -> Option<String> {
Some("Add closing punctuation".to_string())
}
}
/// D415
pub(crate) fn ends_with_punctuation(checker: &Checker, docstring: &Docstring) {
let body = docstring.body();
if let Some(first_line) = body.trim().universal_newlines().next() {
let trimmed = first_line.trim();
// Avoid false-positives: `:param`, etc.
for prefix in [":param", ":type", ":raises", ":return", ":rtype"] {
if trimmed.starts_with(prefix) {
return;
}
}
// Avoid false-positives: `Args:`, etc.
for section_kind in SectionKind::iter() {
if let Some(suffix) = trimmed.strip_suffix(section_kind.as_str()) {
if suffix.is_empty() {
return;
}
if suffix == ":" {
return;
}
}
}
}
if let Some(index) = logical_line(body.as_str()) {
let mut lines = UniversalNewlineIterator::with_offset(&body, body.start()).skip(index);
let line = lines.next().unwrap();
let trimmed = line.trim_end();
if trimmed.ends_with('\\') {
// Ignore the edge case whether a single quoted string is multiple lines through an
// escape (https://github.com/astral-sh/ruff/issues/7139). Single quote docstrings are
// flagged by D300.
// ```python
// "\
// "
// ```
return;
}
if !trimmed.ends_with(['.', '!', '?']) {
let mut diagnostic =
checker.report_diagnostic(MissingTerminalPunctuation, docstring.range());
// Best-effort fix: avoid adding a period after other punctuation marks.
if !trimmed.ends_with([':', ';']) {
diagnostic.set_fix(Fix::unsafe_edit(Edit::insertion(
".".to_string(),
line.start() + trimmed.text_len(),
)));
}
}
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/flake8_builtins/settings.rs | crates/ruff_linter/src/rules/flake8_builtins/settings.rs | //! Settings for the `flake8-builtins` plugin.
use crate::display_settings;
use ruff_macros::CacheKey;
use std::fmt::{Display, Formatter};
#[derive(Debug, Clone, Default, CacheKey)]
pub struct Settings {
pub ignorelist: Vec<String>,
pub allowed_modules: Vec<String>,
pub strict_checking: bool,
}
impl Display for Settings {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
display_settings! {
formatter = f,
namespace = "linter.flake8_builtins",
fields = [
self.allowed_modules | array,
self.ignorelist | array,
self.strict_checking,
]
}
Ok(())
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/flake8_builtins/helpers.rs | crates/ruff_linter/src/rules/flake8_builtins/helpers.rs | use ruff_python_ast::PySourceType;
use ruff_python_ast::PythonVersion;
use ruff_python_stdlib::builtins::is_python_builtin;
pub(super) fn shadows_builtin(
name: &str,
source_type: PySourceType,
ignorelist: &[String],
python_version: PythonVersion,
) -> bool {
if is_python_builtin(name, python_version.minor, source_type.is_ipynb()) {
ignorelist.iter().all(|ignore| ignore != name)
} else {
false
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/flake8_builtins/mod.rs | crates/ruff_linter/src/rules/flake8_builtins/mod.rs | //! Rules from [flake8-builtins](https://pypi.org/project/flake8-builtins/).
pub(crate) mod helpers;
pub(crate) mod rules;
pub mod settings;
#[cfg(test)]
mod tests {
use std::path::Path;
use anyhow::Result;
use test_case::test_case;
use crate::registry::Rule;
use crate::rules::flake8_builtins;
use crate::settings::LinterSettings;
use crate::settings::types::PreviewMode;
use crate::test::{test_path, test_resource_path};
use crate::{assert_diagnostics, assert_diagnostics_diff};
use ruff_python_ast::PythonVersion;
#[test_case(Rule::BuiltinVariableShadowing, Path::new("A001.py"))]
#[test_case(Rule::BuiltinArgumentShadowing, Path::new("A002.py"))]
#[test_case(Rule::BuiltinAttributeShadowing, Path::new("A003.py"))]
#[test_case(Rule::BuiltinImportShadowing, Path::new("A004.py"))]
#[test_case(
Rule::StdlibModuleShadowing,
Path::new("A005/modules/non_builtin/__init__.py")
)]
#[test_case(
Rule::StdlibModuleShadowing,
Path::new("A005/modules/logging/__init__.py")
)]
#[test_case(
Rule::StdlibModuleShadowing,
Path::new("A005/modules/string/__init__.py")
)]
#[test_case(
Rule::StdlibModuleShadowing,
Path::new("A005/modules/package/bisect.py")
)]
#[test_case(
Rule::StdlibModuleShadowing,
Path::new("A005/modules/package/collections.pyi")
)]
#[test_case(
Rule::StdlibModuleShadowing,
Path::new("A005/modules/_abc/__init__.py")
)]
#[test_case(Rule::StdlibModuleShadowing, Path::new("A005/modules/package/xml.py"))]
#[test_case(Rule::BuiltinLambdaArgumentShadowing, Path::new("A006.py"))]
fn rules(rule_code: Rule, path: &Path) -> Result<()> {
let snapshot = format!("{}_{}", rule_code.noqa_code(), path.to_string_lossy());
let diagnostics = test_path(
Path::new("flake8_builtins").join(path).as_path(),
&LinterSettings {
flake8_builtins: flake8_builtins::settings::Settings {
strict_checking: true,
..Default::default()
},
..LinterSettings::for_rule(rule_code)
},
)?;
assert_diagnostics!(snapshot, diagnostics);
Ok(())
}
#[test_case(Rule::BuiltinAttributeShadowing, Path::new("A003.py"))]
fn deferred_annotations_diff(rule_code: Rule, path: &Path) -> Result<()> {
let snapshot = format!(
"deferred_annotations_diff_{}_{}",
rule_code.name(),
path.to_string_lossy()
);
assert_diagnostics_diff!(
snapshot,
Path::new("flake8_builtins").join(path).as_path(),
&LinterSettings {
unresolved_target_version: PythonVersion::PY313.into(),
..LinterSettings::for_rule(rule_code)
},
&LinterSettings {
unresolved_target_version: PythonVersion::PY314.into(),
..LinterSettings::for_rule(rule_code)
},
);
Ok(())
}
#[test_case(Rule::BuiltinAttributeShadowing, Path::new("A003.py"))]
fn preview_rules(rule_code: Rule, path: &Path) -> Result<()> {
let snapshot = format!(
"preview__{}_{}",
rule_code.noqa_code(),
path.to_string_lossy()
);
let diagnostics = test_path(
Path::new("flake8_builtins").join(path).as_path(),
&LinterSettings {
preview: PreviewMode::Enabled,
flake8_builtins: flake8_builtins::settings::Settings {
strict_checking: true,
..Default::default()
},
..LinterSettings::for_rule(rule_code)
},
)?;
assert_diagnostics!(snapshot, diagnostics);
Ok(())
}
#[test_case(
Rule::StdlibModuleShadowing,
Path::new("A005/modules/utils/logging.py"),
true
)]
#[test_case(
Rule::StdlibModuleShadowing,
Path::new("A005/modules/utils/logging.py"),
false
)]
fn non_strict_checking(rule_code: Rule, path: &Path, strict: bool) -> Result<()> {
let snapshot = format!(
"{}_{}_{strict}",
rule_code.noqa_code(),
path.to_string_lossy()
);
let diagnostics = test_path(
Path::new("flake8_builtins").join(path).as_path(),
&LinterSettings {
flake8_builtins: flake8_builtins::settings::Settings {
strict_checking: strict,
..Default::default()
},
..LinterSettings::for_rule(rule_code)
},
)?;
assert_diagnostics!(snapshot, diagnostics);
Ok(())
}
/// Test that even with strict checking disabled, a module in `src` will trigger A005
#[test_case(
Rule::StdlibModuleShadowing,
Path::new("A005/modules/utils/logging.py")
)]
fn non_strict_checking_src(rule_code: Rule, path: &Path) -> Result<()> {
let snapshot = format!("{}_{}_src", rule_code.noqa_code(), path.to_string_lossy());
let src = Path::new("fixtures/flake8_builtins");
let diagnostics = test_path(
Path::new("flake8_builtins").join(path).as_path(),
&LinterSettings {
src: vec![test_resource_path(src.join(path.parent().unwrap()))],
flake8_builtins: flake8_builtins::settings::Settings {
strict_checking: false,
..Default::default()
},
..LinterSettings::for_rule(rule_code)
},
)?;
assert_diagnostics!(snapshot, diagnostics);
Ok(())
}
/// Test that even with strict checking disabled, a module in the `project_root` will trigger
/// A005
#[test_case(
Rule::StdlibModuleShadowing,
Path::new("A005/modules/utils/logging.py")
)]
fn non_strict_checking_root(rule_code: Rule, path: &Path) -> Result<()> {
let snapshot = format!("{}_{}_root", rule_code.noqa_code(), path.to_string_lossy());
let src = Path::new("fixtures/flake8_builtins");
let diagnostics = test_path(
Path::new("flake8_builtins").join(path).as_path(),
&LinterSettings {
project_root: test_resource_path(src.join(path.parent().unwrap())),
flake8_builtins: flake8_builtins::settings::Settings {
strict_checking: false,
..Default::default()
},
..LinterSettings::for_rule(rule_code)
},
)?;
assert_diagnostics!(snapshot, diagnostics);
Ok(())
}
#[test_case(Rule::BuiltinVariableShadowing, Path::new("A001.py"))]
#[test_case(Rule::BuiltinArgumentShadowing, Path::new("A002.py"))]
#[test_case(Rule::BuiltinAttributeShadowing, Path::new("A003.py"))]
#[test_case(Rule::BuiltinImportShadowing, Path::new("A004.py"))]
#[test_case(Rule::BuiltinLambdaArgumentShadowing, Path::new("A006.py"))]
fn builtins_ignorelist(rule_code: Rule, path: &Path) -> Result<()> {
let snapshot = format!(
"{}_{}_builtins_ignorelist",
rule_code.noqa_code(),
path.to_string_lossy()
);
let diagnostics = test_path(
Path::new("flake8_builtins").join(path).as_path(),
&LinterSettings {
flake8_builtins: super::settings::Settings {
ignorelist: vec!["id".to_string(), "dir".to_string()],
..Default::default()
},
..LinterSettings::for_rules(vec![rule_code])
},
)?;
assert_diagnostics!(snapshot, diagnostics);
Ok(())
}
#[test_case(
Rule::StdlibModuleShadowing,
Path::new("A005/modules/non_builtin/__init__.py")
)]
#[test_case(
Rule::StdlibModuleShadowing,
Path::new("A005/modules/logging/__init__.py")
)]
#[test_case(
Rule::StdlibModuleShadowing,
Path::new("A005/modules/string/__init__.py")
)]
#[test_case(
Rule::StdlibModuleShadowing,
Path::new("A005/modules/package/bisect.py")
)]
#[test_case(
Rule::StdlibModuleShadowing,
Path::new("A005/modules/_abc/__init__.py")
)]
#[test_case(Rule::StdlibModuleShadowing, Path::new("A005/modules/package/xml.py"))]
fn builtins_allowed_modules(rule_code: Rule, path: &Path) -> Result<()> {
let snapshot = format!(
"{}_{}_builtins_allowed_modules",
rule_code.noqa_code(),
path.to_string_lossy()
);
let diagnostics = test_path(
Path::new("flake8_builtins").join(path).as_path(),
&LinterSettings {
flake8_builtins: super::settings::Settings {
allowed_modules: vec!["xml".to_string(), "logging".to_string()],
strict_checking: true,
..Default::default()
},
..LinterSettings::for_rules(vec![rule_code])
},
)?;
assert_diagnostics!(snapshot, diagnostics);
Ok(())
}
#[test_case(Rule::BuiltinImportShadowing, Path::new("A004.py"))]
fn rules_py312(rule_code: Rule, path: &Path) -> Result<()> {
let snapshot = format!("{}_{}_py38", rule_code.noqa_code(), path.to_string_lossy());
let diagnostics = test_path(
Path::new("flake8_builtins").join(path).as_path(),
&LinterSettings {
unresolved_target_version: PythonVersion::PY38.into(),
..LinterSettings::for_rule(rule_code)
},
)?;
assert_diagnostics!(snapshot, diagnostics);
Ok(())
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/flake8_builtins/rules/builtin_attribute_shadowing.rs | crates/ruff_linter/src/rules/flake8_builtins/rules/builtin_attribute_shadowing.rs | use ruff_macros::{ViolationMetadata, derive_message_formats};
use ruff_python_ast as ast;
use ruff_python_semantic::{BindingKind, Scope, ScopeId};
use ruff_source_file::SourceRow;
use ruff_text_size::Ranged;
use crate::Violation;
use crate::checkers::ast::Checker;
use crate::preview::is_a003_class_scope_shadowing_expansion_enabled;
use crate::rules::flake8_builtins::helpers::shadows_builtin;
/// ## What it does
/// Checks for class attributes and methods that use the same names as
/// Python builtins.
///
/// ## Why is this bad?
/// Reusing a builtin name for the name of an attribute increases the
/// difficulty of reading and maintaining the code, and can cause
/// non-obvious errors, as readers may mistake the attribute for the
/// builtin and vice versa.
///
/// Since methods and class attributes typically cannot be referenced directly
/// from outside the class scope, this rule only applies to those methods
/// and attributes that both shadow a builtin _and_ are referenced from within
/// the class scope, as in the following example, where the `list[int]` return
/// type annotation resolves to the `list` method, rather than the builtin:
///
/// ```python
/// class Class:
/// @staticmethod
/// def list() -> None:
/// pass
///
/// @staticmethod
/// def repeat(value: int, times: int) -> list[int]:
/// return [value] * times
/// ```
///
/// Builtins can be marked as exceptions to this rule via the
/// [`lint.flake8-builtins.ignorelist`] configuration option, or
/// converted to the appropriate dunder method. Methods decorated with
/// `@typing.override` or `@typing_extensions.override` are also
/// ignored.
///
/// ## Example
/// ```python
/// class Class:
/// @staticmethod
/// def list() -> None:
/// pass
///
/// @staticmethod
/// def repeat(value: int, times: int) -> list[int]:
/// return [value] * times
/// ```
///
/// ## Options
/// - `lint.flake8-builtins.ignorelist`
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "v0.0.48")]
pub(crate) struct BuiltinAttributeShadowing {
kind: Kind,
name: String,
row: SourceRow,
}
impl Violation for BuiltinAttributeShadowing {
#[derive_message_formats]
fn message(&self) -> String {
let BuiltinAttributeShadowing { kind, name, row } = self;
match kind {
Kind::Attribute => {
format!("Python builtin is shadowed by class attribute `{name}` from {row}")
}
Kind::Method => {
format!("Python builtin is shadowed by method `{name}` from {row}")
}
}
}
}
/// A003
pub(crate) fn builtin_attribute_shadowing(
checker: &Checker,
scope_id: ScopeId,
scope: &Scope,
class_def: &ast::StmtClassDef,
) {
for (name, binding_id) in scope.all_bindings() {
let binding = checker.semantic().binding(binding_id);
// We only care about methods and attributes.
let kind = match binding.kind {
BindingKind::Assignment | BindingKind::Annotation => Kind::Attribute,
BindingKind::FunctionDefinition(_) => Kind::Method,
_ => continue,
};
if shadows_builtin(
name,
checker.source_type,
&checker.settings().flake8_builtins.ignorelist,
checker.target_version(),
) {
// Ignore explicit overrides.
if class_def.decorator_list.iter().any(|decorator| {
checker
.semantic()
.match_typing_expr(&decorator.expression, "override")
}) {
return;
}
// Class scopes are special, in that you can only reference a binding defined in a
// class scope from within the class scope itself. As such, we can safely ignore
// methods that weren't referenced from within the class scope. In other words, we're
// only trying to identify shadowing as in:
// ```python
// class Class:
// @staticmethod
// def list() -> None:
// pass
//
// @staticmethod
// def repeat(value: int, times: int) -> list[int]:
// return [value] * times
// ```
// In stable, only consider references whose first non-type parent scope is the class
// scope (e.g., decorators, default args, and attribute initializers).
// In preview, also consider references from within the class scope.
let consider_reference = |reference_scope_id: ScopeId| {
if is_a003_class_scope_shadowing_expansion_enabled(checker.settings()) {
if reference_scope_id == scope_id {
return true;
}
}
checker
.semantic()
.first_non_type_parent_scope_id(reference_scope_id)
== Some(scope_id)
};
for reference in binding
.references
.iter()
.map(|reference_id| checker.semantic().reference(*reference_id))
.filter(|reference| consider_reference(reference.scope_id()))
{
checker.report_diagnostic(
BuiltinAttributeShadowing {
kind,
name: name.to_string(),
row: checker.compute_source_row(binding.start()),
},
reference.range(),
);
}
}
}
}
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
enum Kind {
Attribute,
Method,
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/flake8_builtins/rules/builtin_variable_shadowing.rs | crates/ruff_linter/src/rules/flake8_builtins/rules/builtin_variable_shadowing.rs | use ruff_macros::{ViolationMetadata, derive_message_formats};
use ruff_text_size::TextRange;
use crate::Violation;
use crate::checkers::ast::Checker;
use crate::rules::flake8_builtins::helpers::shadows_builtin;
/// ## What it does
/// Checks for variable (and function) assignments that use the same names
/// as builtins.
///
/// ## Why is this bad?
/// Reusing a builtin name for the name of a variable increases the
/// difficulty of reading and maintaining the code, and can cause
/// non-obvious errors, as readers may mistake the variable for the
/// builtin and vice versa.
///
/// Builtins can be marked as exceptions to this rule via the
/// [`lint.flake8-builtins.ignorelist`] configuration option.
///
/// ## Example
/// ```python
/// def find_max(list_of_lists):
/// max = 0
/// for flat_list in list_of_lists:
/// for value in flat_list:
/// max = max(max, value) # TypeError: 'int' object is not callable
/// return max
/// ```
///
/// Use instead:
/// ```python
/// def find_max(list_of_lists):
/// result = 0
/// for flat_list in list_of_lists:
/// for value in flat_list:
/// result = max(result, value)
/// return result
/// ```
///
/// ## Options
/// - `lint.flake8-builtins.ignorelist`
///
/// ## References
/// - [_Why is it a bad idea to name a variable `id` in Python?_](https://stackoverflow.com/questions/77552/id-is-a-bad-variable-name-in-python)
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "v0.0.48")]
pub(crate) struct BuiltinVariableShadowing {
name: String,
}
impl Violation for BuiltinVariableShadowing {
#[derive_message_formats]
fn message(&self) -> String {
let BuiltinVariableShadowing { name } = self;
format!("Variable `{name}` is shadowing a Python builtin")
}
}
/// A001
pub(crate) fn builtin_variable_shadowing(checker: &Checker, name: &str, range: TextRange) {
// These should not report violations as discussed in
// https://github.com/astral-sh/ruff/issues/16373
if matches!(
name,
"__doc__" | "__name__" | "__loader__" | "__package__" | "__spec__"
) {
return;
}
if shadows_builtin(
name,
checker.source_type,
&checker.settings().flake8_builtins.ignorelist,
checker.target_version(),
) {
checker.report_diagnostic(
BuiltinVariableShadowing {
name: name.to_string(),
},
range,
);
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/flake8_builtins/rules/builtin_import_shadowing.rs | crates/ruff_linter/src/rules/flake8_builtins/rules/builtin_import_shadowing.rs | use ruff_macros::{ViolationMetadata, derive_message_formats};
use ruff_python_ast::Alias;
use crate::Violation;
use crate::checkers::ast::Checker;
use crate::rules::flake8_builtins::helpers::shadows_builtin;
/// ## What it does
/// Checks for imports that use the same names as builtins.
///
/// ## Why is this bad?
/// Reusing a builtin for the name of an import increases the difficulty
/// of reading and maintaining the code, and can cause non-obvious errors,
/// as readers may mistake the variable for the builtin and vice versa.
///
/// Builtins can be marked as exceptions to this rule via the
/// [`lint.flake8-builtins.ignorelist`] configuration option.
///
/// ## Example
/// ```python
/// from rich import print
///
/// print("Some message")
/// ```
///
/// Use instead:
/// ```python
/// from rich import print as rich_print
///
/// rich_print("Some message")
/// ```
///
/// or:
/// ```python
/// import rich
///
/// rich.print("Some message")
/// ```
///
/// ## Options
/// - `lint.flake8-builtins.ignorelist`
/// - `target-version`
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "0.8.0")]
pub(crate) struct BuiltinImportShadowing {
name: String,
}
impl Violation for BuiltinImportShadowing {
#[derive_message_formats]
fn message(&self) -> String {
let BuiltinImportShadowing { name } = self;
format!("Import `{name}` is shadowing a Python builtin")
}
}
/// A004
pub(crate) fn builtin_import_shadowing(checker: &Checker, alias: &Alias) {
let name = alias.asname.as_ref().unwrap_or(&alias.name);
if shadows_builtin(
name.as_str(),
checker.source_type,
&checker.settings().flake8_builtins.ignorelist,
checker.target_version(),
) {
checker.report_diagnostic(
BuiltinImportShadowing {
name: name.to_string(),
},
name.range,
);
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/flake8_builtins/rules/builtin_lambda_argument_shadowing.rs | crates/ruff_linter/src/rules/flake8_builtins/rules/builtin_lambda_argument_shadowing.rs | use ruff_macros::{ViolationMetadata, derive_message_formats};
use ruff_python_ast::ExprLambda;
use ruff_text_size::Ranged;
use crate::Violation;
use crate::checkers::ast::Checker;
use crate::rules::flake8_builtins::helpers::shadows_builtin;
/// ## What it does
/// Checks for lambda arguments that use the same names as Python builtins.
///
/// ## Why is this bad?
/// Reusing a builtin name for the name of a lambda argument increases the
/// difficulty of reading and maintaining the code and can cause
/// non-obvious errors. Readers may mistake the variable for the
/// builtin, and vice versa.
///
/// Builtins can be marked as exceptions to this rule via the
/// [`lint.flake8-builtins.ignorelist`] configuration option.
///
/// ## Options
/// - `lint.flake8-builtins.ignorelist`
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "0.9.0")]
pub(crate) struct BuiltinLambdaArgumentShadowing {
name: String,
}
impl Violation for BuiltinLambdaArgumentShadowing {
#[derive_message_formats]
fn message(&self) -> String {
let BuiltinLambdaArgumentShadowing { name } = self;
format!("Lambda argument `{name}` is shadowing a Python builtin")
}
}
/// A006
pub(crate) fn builtin_lambda_argument_shadowing(checker: &Checker, lambda: &ExprLambda) {
let Some(parameters) = lambda.parameters.as_ref() else {
return;
};
for param in parameters.iter_non_variadic_params() {
let name = param.name();
if shadows_builtin(
name,
checker.source_type,
&checker.settings().flake8_builtins.ignorelist,
checker.target_version(),
) {
checker.report_diagnostic(
BuiltinLambdaArgumentShadowing {
name: name.to_string(),
},
name.range(),
);
}
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/flake8_builtins/rules/mod.rs | crates/ruff_linter/src/rules/flake8_builtins/rules/mod.rs | pub(crate) use builtin_argument_shadowing::*;
pub(crate) use builtin_attribute_shadowing::*;
pub(crate) use builtin_import_shadowing::*;
pub(crate) use builtin_lambda_argument_shadowing::*;
pub(crate) use builtin_variable_shadowing::*;
pub(crate) use stdlib_module_shadowing::*;
mod builtin_argument_shadowing;
mod builtin_attribute_shadowing;
mod builtin_import_shadowing;
mod builtin_lambda_argument_shadowing;
mod builtin_variable_shadowing;
mod stdlib_module_shadowing;
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/flake8_builtins/rules/builtin_argument_shadowing.rs | crates/ruff_linter/src/rules/flake8_builtins/rules/builtin_argument_shadowing.rs | use ruff_macros::{ViolationMetadata, derive_message_formats};
use ruff_python_ast::{Expr, Parameter};
use ruff_python_semantic::analyze::visibility::{is_overload, is_override};
use ruff_text_size::Ranged;
use crate::Violation;
use crate::checkers::ast::Checker;
use crate::rules::flake8_builtins::helpers::shadows_builtin;
/// ## What it does
/// Checks for function arguments that use the same names as builtins.
///
/// ## Why is this bad?
/// Reusing a builtin name for the name of an argument increases the
/// difficulty of reading and maintaining the code, and can cause
/// non-obvious errors, as readers may mistake the argument for the
/// builtin and vice versa.
///
/// Function definitions decorated with [`@override`][override] or
/// [`@overload`][overload] are exempt from this rule by default.
/// Builtins can be marked as exceptions to this rule via the
/// [`lint.flake8-builtins.ignorelist`] configuration option.
///
/// ## Example
/// ```python
/// def remove_duplicates(list, list2):
/// result = set()
/// for value in list:
/// result.add(value)
/// for value in list2:
/// result.add(value)
/// return list(result) # TypeError: 'list' object is not callable
/// ```
///
/// Use instead:
/// ```python
/// def remove_duplicates(list1, list2):
/// result = set()
/// for value in list1:
/// result.add(value)
/// for value in list2:
/// result.add(value)
/// return list(result)
/// ```
///
/// ## Options
/// - `lint.flake8-builtins.ignorelist`
///
/// ## References
/// - [_Is it bad practice to use a built-in function name as an attribute or method identifier?_](https://stackoverflow.com/questions/9109333/is-it-bad-practice-to-use-a-built-in-function-name-as-an-attribute-or-method-ide)
/// - [_Why is it a bad idea to name a variable `id` in Python?_](https://stackoverflow.com/questions/77552/id-is-a-bad-variable-name-in-python)
///
/// [override]: https://docs.python.org/3/library/typing.html#typing.override
/// [overload]: https://docs.python.org/3/library/typing.html#typing.overload
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "v0.0.48")]
pub(crate) struct BuiltinArgumentShadowing {
name: String,
}
impl Violation for BuiltinArgumentShadowing {
#[derive_message_formats]
fn message(&self) -> String {
let BuiltinArgumentShadowing { name } = self;
format!("Function argument `{name}` is shadowing a Python builtin")
}
}
/// A002
pub(crate) fn builtin_argument_shadowing(checker: &Checker, parameter: &Parameter) {
if shadows_builtin(
parameter.name(),
checker.source_type,
&checker.settings().flake8_builtins.ignorelist,
checker.target_version(),
) {
// Ignore parameters in lambda expressions.
// (That is the domain of A006.)
if checker
.semantic()
.current_expression()
.is_some_and(Expr::is_lambda_expr)
{
return;
}
// Ignore `@override` and `@overload` decorated functions.
if checker
.semantic()
.current_statement()
.as_function_def_stmt()
.is_some_and(|function_def| {
is_override(&function_def.decorator_list, checker.semantic())
|| is_overload(&function_def.decorator_list, checker.semantic())
})
{
return;
}
checker.report_diagnostic(
BuiltinArgumentShadowing {
name: parameter.name.to_string(),
},
parameter.name.range(),
);
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/flake8_builtins/rules/stdlib_module_shadowing.rs | crates/ruff_linter/src/rules/flake8_builtins/rules/stdlib_module_shadowing.rs | use std::borrow::Cow;
use std::path::{Component, Path, PathBuf};
use ruff_macros::{ViolationMetadata, derive_message_formats};
use ruff_python_ast::{PySourceType, PythonVersion};
use ruff_python_stdlib::path::is_module_file;
use ruff_python_stdlib::sys::is_known_standard_library;
use ruff_text_size::TextRange;
use crate::Violation;
use crate::checkers::ast::LintContext;
use crate::settings::LinterSettings;
/// ## What it does
/// Checks for modules that use the same names as Python standard-library
/// modules.
///
/// ## Why is this bad?
/// Reusing a standard-library module name for the name of a module increases
/// the difficulty of reading and maintaining the code, and can cause
/// non-obvious errors. Readers may mistake the first-party module for the
/// standard-library module and vice versa.
///
/// Standard-library modules can be marked as exceptions to this rule via the
/// [`lint.flake8-builtins.allowed-modules`] configuration option.
///
/// By default, the module path relative to the project root or [`src`] directories is considered,
/// so a top-level `logging.py` or `logging/__init__.py` will clash with the builtin `logging`
/// module, but `utils/logging.py`, for example, will not. With the
/// [`lint.flake8-builtins.strict-checking`] option set to `true`, only the last component
/// of the module name is considered, so `logging.py`, `utils/logging.py`, and
/// `utils/logging/__init__.py` will all trigger the rule.
///
/// This rule is not applied to stub files, as the name of a stub module is out
/// of the control of the author of the stub file. Instead, a stub should aim to
/// faithfully emulate the runtime module it is stubbing.
///
/// As of Python 3.13, errors from modules that use the same name as
/// standard-library modules now display a custom message.
///
/// ## Example
///
/// ```console
/// $ touch random.py
/// $ python3 -c 'from random import choice'
/// Traceback (most recent call last):
/// File "<string>", line 1, in <module>
/// from random import choice
/// ImportError: cannot import name 'choice' from 'random' (consider renaming '/random.py' since it has the same name as the standard library module named 'random' and prevents importing that standard library module)
/// ```
///
/// ## Options
/// - `lint.flake8-builtins.allowed-modules`
/// - `lint.flake8-builtins.strict-checking`
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "0.9.0")]
pub(crate) struct StdlibModuleShadowing {
name: String,
}
impl Violation for StdlibModuleShadowing {
#[derive_message_formats]
fn message(&self) -> String {
let StdlibModuleShadowing { name } = self;
format!("Module `{name}` shadows a Python standard-library module")
}
}
/// A005
pub(crate) fn stdlib_module_shadowing(
mut path: &Path,
settings: &LinterSettings,
target_version: PythonVersion,
context: &LintContext,
) {
if !PySourceType::try_from_path(path).is_some_and(PySourceType::is_py_file) {
return;
}
// strip src and root prefixes before converting to a fully-qualified module path
let prefix = get_prefix(settings, path);
if let Some(Ok(new_path)) = prefix.map(|p| path.strip_prefix(p)) {
path = new_path;
}
// for modules like `modname/__init__.py`, use the parent directory name, otherwise just trim
// the `.py` extension
let path = if is_module_file(path) {
let Some(parent) = path.parent() else { return };
Cow::from(parent)
} else {
Cow::from(path.with_extension(""))
};
// convert a filesystem path like `foobar/collections/abc` to a reversed sequence of modules
// like `["abc", "collections", "foobar"]`, stripping anything that's not a normal component
let mut components = path
.components()
.filter(|c| matches!(c, Component::Normal(_)))
.map(|c| c.as_os_str().to_string_lossy())
.rev();
let Some(module_name) = components.next() else {
return;
};
if is_allowed_module(settings, target_version, &module_name) {
return;
}
// not allowed generally, but check for a parent in non-strict mode
if !settings.flake8_builtins.strict_checking && components.next().is_some() {
return;
}
context.report_diagnostic(
StdlibModuleShadowing {
name: module_name.to_string(),
},
TextRange::default(),
);
}
/// Return the longest prefix of `path` between `settings.src` and `settings.project_root`.
fn get_prefix<'a>(settings: &'a LinterSettings, path: &Path) -> Option<&'a PathBuf> {
let mut prefix = None;
for dir in settings.src.iter().chain([&settings.project_root]) {
if path.starts_with(dir) && prefix.is_none_or(|existing| existing < dir) {
prefix = Some(dir);
}
}
prefix
}
fn is_allowed_module(settings: &LinterSettings, version: PythonVersion, module: &str) -> bool {
// Shadowing private stdlib modules is okay.
// https://github.com/astral-sh/ruff/issues/12949
if module.starts_with('_') && !module.starts_with("__") {
return true;
}
if settings
.flake8_builtins
.allowed_modules
.iter()
.any(|allowed_module| allowed_module == module)
{
return true;
}
!is_known_standard_library(version.minor, module)
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/pygrep_hooks/mod.rs | crates/ruff_linter/src/rules/pygrep_hooks/mod.rs | //! Rules from [pygrep-hooks](https://github.com/pre-commit/pygrep-hooks).
pub(crate) mod rules;
#[cfg(test)]
mod tests {
use std::path::Path;
use anyhow::Result;
use test_case::test_case;
use crate::registry::Rule;
use crate::test::test_path;
use crate::{assert_diagnostics, settings};
#[test_case(Rule::BlanketTypeIgnore, Path::new("PGH003_0.py"))]
#[test_case(Rule::BlanketTypeIgnore, Path::new("PGH003_1.py"))]
#[test_case(Rule::BlanketNOQA, Path::new("PGH004_0.py"))]
#[test_case(Rule::BlanketNOQA, Path::new("PGH004_1.py"))]
#[test_case(Rule::BlanketNOQA, Path::new("PGH004_2.py"))]
#[test_case(Rule::BlanketNOQA, Path::new("PGH004_3.py"))]
#[test_case(Rule::InvalidMockAccess, Path::new("PGH005_0.py"))]
fn rules(rule_code: Rule, path: &Path) -> Result<()> {
let snapshot = format!("{}_{}", rule_code.noqa_code(), path.to_string_lossy());
let diagnostics = test_path(
Path::new("pygrep_hooks").join(path).as_path(),
&settings::LinterSettings::for_rule(rule_code),
)?;
assert_diagnostics!(snapshot, diagnostics);
Ok(())
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/pygrep_hooks/rules/blanket_noqa.rs | crates/ruff_linter/src/rules/pygrep_hooks/rules/blanket_noqa.rs | use ruff_macros::{ViolationMetadata, derive_message_formats};
use ruff_python_trivia::Cursor;
use ruff_text_size::{Ranged, TextRange};
use crate::Locator;
use crate::checkers::ast::LintContext;
use crate::noqa::{self, Directive, FileNoqaDirectives, NoqaDirectives};
use crate::{Edit, Fix, FixAvailability, Violation};
/// ## What it does
/// Check for `noqa` annotations that suppress all diagnostics, as opposed to
/// targeting specific diagnostics.
///
/// ## Why is this bad?
/// Suppressing all diagnostics can hide issues in the code.
///
/// Blanket `noqa` annotations are also more difficult to interpret and
/// maintain, as the annotation does not clarify which diagnostics are intended
/// to be suppressed.
///
/// ## Example
/// ```python
/// from .base import * # noqa
/// ```
///
/// Use instead:
/// ```python
/// from .base import * # noqa: F403
/// ```
///
/// ## Fix safety
/// This rule will attempt to fix blanket `noqa` annotations that appear to
/// be unintentional. For example, given `# noqa F401`, the rule will suggest
/// inserting a colon, as in `# noqa: F401`.
///
/// While modifying `noqa` comments is generally safe, doing so may introduce
/// additional diagnostics.
///
/// ## References
/// - [Ruff documentation](https://docs.astral.sh/ruff/configuration/#error-suppression)
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "v0.0.200")]
pub(crate) struct BlanketNOQA {
missing_colon: bool,
file_exemption: bool,
}
impl Violation for BlanketNOQA {
const FIX_AVAILABILITY: FixAvailability = FixAvailability::Sometimes;
#[derive_message_formats]
fn message(&self) -> String {
let BlanketNOQA {
missing_colon,
file_exemption,
} = self;
// This awkward branching is necessary to ensure that the generic message is picked up by
// `derive_message_formats`.
if !missing_colon && !file_exemption {
"Use specific rule codes when using `noqa`".to_string()
} else if *file_exemption {
"Use specific rule codes when using `ruff: noqa`".to_string()
} else {
"Use a colon when specifying `noqa` rule codes".to_string()
}
}
fn fix_title(&self) -> Option<String> {
if self.missing_colon {
Some("Add missing colon".to_string())
} else {
None
}
}
}
/// PGH004
pub(crate) fn blanket_noqa(
context: &LintContext,
noqa_directives: &NoqaDirectives,
locator: &Locator,
file_noqa_directives: &FileNoqaDirectives,
) {
for line in file_noqa_directives.lines() {
if let Directive::All(_) = line.parsed_file_exemption {
context.report_diagnostic(
BlanketNOQA {
missing_colon: false,
file_exemption: true,
},
line.range(),
);
}
}
for directive_line in noqa_directives.lines() {
if let Directive::All(all) = &directive_line.directive {
let line = locator.slice(directive_line);
let noqa_end = all.end() - directive_line.start();
// Skip the `# noqa`, plus any trailing whitespace.
let mut cursor = Cursor::new(&line[noqa_end.to_usize()..]);
cursor.eat_while(char::is_whitespace);
if noqa::lex_codes(cursor.chars().as_str()).is_ok_and(|codes| !codes.is_empty()) {
// Check for a missing colon.
// Ex) `# noqa F401`
let start = all.end();
let end = start + cursor.token_len();
let mut diagnostic = context.report_diagnostic(
BlanketNOQA {
missing_colon: true,
file_exemption: false,
},
TextRange::new(all.start(), end),
);
diagnostic.set_fix(Fix::unsafe_edit(Edit::insertion(':'.to_string(), start)));
} else {
// Otherwise, it looks like an intentional blanket `noqa` annotation.
context.report_diagnostic(
BlanketNOQA {
missing_colon: false,
file_exemption: false,
},
all.range(),
);
}
}
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/pygrep_hooks/rules/no_eval.rs | crates/ruff_linter/src/rules/pygrep_hooks/rules/no_eval.rs | use ruff_macros::{ViolationMetadata, derive_message_formats};
use crate::Violation;
/// ## Removed
/// This rule is identical to [S307] which should be used instead.
///
/// ## What it does
/// Checks for uses of the builtin `eval()` function.
///
/// ## Why is this bad?
/// The `eval()` function is insecure as it enables arbitrary code execution.
///
/// ## Example
/// ```python
/// def foo():
/// x = eval(input("Enter a number: "))
/// ...
/// ```
///
/// Use instead:
/// ```python
/// def foo():
/// x = input("Enter a number: ")
/// ...
/// ```
///
/// ## References
/// - [Python documentation: `eval`](https://docs.python.org/3/library/functions.html#eval)
/// - [_Eval really is dangerous_ by Ned Batchelder](https://nedbatchelder.com/blog/201206/eval_really_is_dangerous.html)
///
/// [S307]: https://docs.astral.sh/ruff/rules/suspicious-eval-usage/
#[derive(ViolationMetadata)]
#[violation_metadata(removed_since = "v0.2.0")]
pub(crate) struct Eval;
/// PGH001
impl Violation for Eval {
#[derive_message_formats]
fn message(&self) -> String {
"No builtin `eval()` allowed".to_string()
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/pygrep_hooks/rules/blanket_type_ignore.rs | crates/ruff_linter/src/rules/pygrep_hooks/rules/blanket_type_ignore.rs | use std::sync::LazyLock;
use anyhow::{Result, anyhow};
use memchr::memchr_iter;
use regex::Regex;
use ruff_macros::{ViolationMetadata, derive_message_formats};
use ruff_python_trivia::CommentRanges;
use ruff_text_size::TextSize;
use crate::Locator;
use crate::Violation;
use crate::checkers::ast::LintContext;
/// ## What it does
/// Check for `type: ignore` annotations that suppress all type warnings, as
/// opposed to targeting specific type warnings.
///
/// ## Why is this bad?
/// Suppressing all warnings can hide issues in the code.
///
/// Blanket `type: ignore` annotations are also more difficult to interpret and
/// maintain, as the annotation does not clarify which warnings are intended
/// to be suppressed.
///
/// ## Example
/// ```python
/// from foo import secrets # type: ignore
/// ```
///
/// Use instead:
/// ```python
/// from foo import secrets # type: ignore[attr-defined]
/// ```
///
/// ## References
/// Mypy supports a [built-in setting](https://mypy.readthedocs.io/en/stable/error_code_list2.html#check-that-type-ignore-include-an-error-code-ignore-without-code)
/// to enforce that all `type: ignore` annotations include an error code, akin
/// to enabling this rule:
/// ```toml
/// [tool.mypy]
/// enable_error_code = ["ignore-without-code"]
/// ```
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "v0.0.187")]
pub(crate) struct BlanketTypeIgnore;
impl Violation for BlanketTypeIgnore {
#[derive_message_formats]
fn message(&self) -> String {
"Use specific rule codes when ignoring type issues".to_string()
}
}
/// PGH003
pub(crate) fn blanket_type_ignore(
context: &LintContext,
comment_ranges: &CommentRanges,
locator: &Locator,
) {
for range in comment_ranges {
let line = locator.slice(range);
// Match, e.g., `# type: ignore` or `# type: ignore[attr-defined]`.
// See: https://github.com/python/mypy/blob/b43e0d34247a6d1b3b9d9094d184bbfcb9808bb9/mypy/fastparse.py#L248
for start in memchr_iter(b'#', line.as_bytes()) {
// Strip the `#` and any trailing whitespace.
let comment = &line[start + 1..].trim_start();
// Match the `type` or `pyright` prefixes (in, e.g., `# type: ignore`).
let Some(comment) = comment
.strip_prefix("type")
.or_else(|| comment.strip_prefix("pyright"))
else {
continue;
};
// Next character must be a colon.
if !comment.starts_with(':') {
continue;
}
// Strip the colon and any trailing whitespace.
let comment = &comment[1..].trim_start();
// Match the `ignore`.
let Some(comment) = comment.strip_prefix("ignore") else {
continue;
};
// Strip any trailing whitespace.
let comment = comment.trim_start();
// Match the optional `[...]` tag.
if let Ok(codes) = parse_type_ignore_tag(comment) {
if codes.is_empty() {
context.report_diagnostic_if_enabled(
BlanketTypeIgnore,
range.add_start(TextSize::try_from(start).unwrap()),
);
}
}
}
}
}
// Match, e.g., `[attr-defined]` or `[attr-defined, misc]`.
// See: https://github.com/python/mypy/blob/b43e0d34247a6d1b3b9d9094d184bbfcb9808bb9/mypy/fastparse.py#L327
static TYPE_IGNORE_TAG_PATTERN: LazyLock<Regex> =
LazyLock::new(|| Regex::new(r"^\s*\[(?P<codes>[^]#]*)]\s*(#.*)?$").unwrap());
/// Parse the optional `[...]` tag in a `# type: ignore[...]` comment.
///
/// Returns a list of error codes to ignore, or an empty list if the tag is
/// a blanket ignore.
fn parse_type_ignore_tag(tag: &str) -> Result<Vec<&str>> {
// See: https://github.com/python/mypy/blob/b43e0d34247a6d1b3b9d9094d184bbfcb9808bb9/mypy/fastparse.py#L316
// No tag -- ignore all errors.
let trimmed = tag.trim();
if trimmed.is_empty() || trimmed.starts_with('#') {
return Ok(vec![]);
}
// Parse comma-separated list of error codes.
TYPE_IGNORE_TAG_PATTERN
.captures(tag)
.map(|captures| {
captures
.name("codes")
.unwrap()
.as_str()
.split(',')
.map(str::trim)
.collect()
})
.ok_or_else(|| anyhow!("Invalid type ignore tag: {tag}"))
}
#[cfg(test)]
mod tests {
#[test]
fn type_ignore_tag() {
let tag = "";
let result = super::parse_type_ignore_tag(tag);
assert!(result.is_ok());
assert_eq!(result.unwrap(), Vec::<&str>::new());
let tag = "[attr-defined]";
let result = super::parse_type_ignore_tag(tag);
assert!(result.is_ok());
assert_eq!(result.unwrap(), vec!["attr-defined"]);
let tag = " [attr-defined]";
let result = super::parse_type_ignore_tag(tag);
assert!(result.is_ok());
assert_eq!(result.unwrap(), vec!["attr-defined"]);
let tag = "[attr-defined, misc]";
let result = super::parse_type_ignore_tag(tag);
assert!(result.is_ok());
assert_eq!(result.unwrap(), vec!["attr-defined", "misc"]);
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/pygrep_hooks/rules/mod.rs | crates/ruff_linter/src/rules/pygrep_hooks/rules/mod.rs | pub(crate) use blanket_noqa::*;
pub(crate) use blanket_type_ignore::*;
pub(crate) use deprecated_log_warn::*;
pub(crate) use invalid_mock_access::*;
pub(crate) use no_eval::*;
mod blanket_noqa;
mod blanket_type_ignore;
mod deprecated_log_warn;
mod invalid_mock_access;
mod no_eval;
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/pygrep_hooks/rules/deprecated_log_warn.rs | crates/ruff_linter/src/rules/pygrep_hooks/rules/deprecated_log_warn.rs | use ruff_macros::{ViolationMetadata, derive_message_formats};
use crate::{FixAvailability, Violation};
/// ## Removed
/// This rule is identical to [G010] which should be used instead.
///
/// ## What it does
/// Check for usages of the deprecated `warn` method from the `logging` module.
///
/// ## Why is this bad?
/// The `warn` method is deprecated. Use `warning` instead.
///
/// ## Example
/// ```python
/// import logging
///
///
/// def foo():
/// logging.warn("Something happened")
/// ```
///
/// Use instead:
/// ```python
/// import logging
///
///
/// def foo():
/// logging.warning("Something happened")
/// ```
///
/// ## References
/// - [Python documentation: `logger.Logger.warning`](https://docs.python.org/3/library/logging.html#logging.Logger.warning)
///
/// [G010]: https://docs.astral.sh/ruff/rules/logging-warn/
#[derive(ViolationMetadata)]
#[violation_metadata(removed_since = "v0.2.0")]
pub(crate) struct DeprecatedLogWarn;
/// PGH002
impl Violation for DeprecatedLogWarn {
const FIX_AVAILABILITY: FixAvailability = FixAvailability::Sometimes;
#[derive_message_formats]
fn message(&self) -> String {
"`warn` is deprecated in favor of `warning`".to_string()
}
fn fix_title(&self) -> Option<String> {
Some("Replace with `warning`".to_string())
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/pygrep_hooks/rules/invalid_mock_access.rs | crates/ruff_linter/src/rules/pygrep_hooks/rules/invalid_mock_access.rs | use ruff_python_ast::{self as ast, Expr};
use ruff_macros::{ViolationMetadata, derive_message_formats};
use ruff_text_size::Ranged;
use crate::Violation;
use crate::checkers::ast::Checker;
#[derive(Debug, PartialEq, Eq)]
enum Reason {
UncalledMethod(String),
NonExistentMethod(String),
}
/// ## What it does
/// Checks for common mistakes when using mock objects.
///
/// ## Why is this bad?
/// The `mock` module exposes an assertion API that can be used to verify that
/// mock objects undergo expected interactions. This rule checks for common
/// mistakes when using this API.
///
/// For example, it checks for mock attribute accesses that should be replaced
/// with mock method calls.
///
/// ## Example
/// ```python
/// my_mock.assert_called
/// ```
///
/// Use instead:
/// ```python
/// my_mock.assert_called()
/// ```
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "v0.0.266")]
pub(crate) struct InvalidMockAccess {
reason: Reason,
}
impl Violation for InvalidMockAccess {
#[derive_message_formats]
fn message(&self) -> String {
let InvalidMockAccess { reason } = self;
match reason {
Reason::UncalledMethod(name) => format!("Mock method should be called: `{name}`"),
Reason::NonExistentMethod(name) => format!("Non-existent mock method: `{name}`"),
}
}
}
/// PGH005
pub(crate) fn uncalled_mock_method(checker: &Checker, expr: &Expr) {
if let Expr::Attribute(ast::ExprAttribute { attr, .. }) = expr {
let is_uncalled_mock_method = matches!(
attr.as_str(),
"assert_any_call"
| "assert_called"
| "assert_called_once"
| "assert_called_once_with"
| "assert_called_with"
| "assert_has_calls"
| "assert_not_called"
);
let is_uncalled_async_mock_method = matches!(
attr.as_str(),
"assert_awaited"
| "assert_awaited_once"
| "assert_awaited_with"
| "assert_awaited_once_with"
| "assert_any_await"
| "assert_has_awaits"
| "assert_not_awaited"
);
if is_uncalled_mock_method || is_uncalled_async_mock_method {
checker.report_diagnostic(
InvalidMockAccess {
reason: Reason::UncalledMethod(attr.to_string()),
},
expr.range(),
);
}
}
}
/// PGH005
pub(crate) fn non_existent_mock_method(checker: &Checker, test: &Expr) {
let attr = match test {
Expr::Attribute(ast::ExprAttribute { attr, .. }) => attr,
Expr::Call(ast::ExprCall { func, .. }) => match func.as_ref() {
Expr::Attribute(ast::ExprAttribute { attr, .. }) => attr,
_ => return,
},
_ => return,
};
let is_missing_mock_method = matches!(
attr.as_str(),
"any_call"
| "called_once"
| "called_once_with"
| "called_with"
| "has_calls"
| "not_called"
);
let is_missing_async_mock_method = matches!(
attr.as_str(),
"awaited"
| "awaited_once"
| "awaited_with"
| "awaited_once_with"
| "any_await"
| "has_awaits"
| "not_awaited"
);
if is_missing_mock_method || is_missing_async_mock_method {
checker.report_diagnostic(
InvalidMockAccess {
reason: Reason::NonExistentMethod(attr.to_string()),
},
test.range(),
);
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/flake8_boolean_trap/settings.rs | crates/ruff_linter/src/rules/flake8_boolean_trap/settings.rs | //! Settings for the `flake8-boolean-trap` plugin.
use std::fmt;
use ruff_macros::CacheKey;
use crate::display_settings;
#[derive(Debug, Clone, CacheKey, Default)]
pub struct Settings {
pub extend_allowed_calls: Vec<String>,
}
impl fmt::Display for Settings {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
display_settings! {
formatter = f,
namespace = "linter.flake8_boolean_trap",
fields = [
self.extend_allowed_calls | array,
]
}
Ok(())
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/flake8_boolean_trap/helpers.rs | crates/ruff_linter/src/rules/flake8_boolean_trap/helpers.rs | use ruff_db::diagnostic::Diagnostic;
use ruff_python_ast::name::QualifiedName;
use ruff_python_ast::{self as ast, Expr};
use ruff_python_semantic::SemanticModel;
use ruff_python_semantic::analyze::function_type::is_subject_to_liskov_substitution_principle;
use crate::checkers::ast::Checker;
use crate::settings::LinterSettings;
/// Returns `true` if a function call is allowed to use a boolean trap.
pub(super) fn is_allowed_func_call(name: &str) -> bool {
matches!(
name,
"__setattr__"
| "append"
| "assertEqual"
| "assertEquals"
| "assertNotEqual"
| "assertNotEquals"
| "bool"
| "bytes"
| "coalesce"
| "count"
| "failIfEqual"
| "failUnlessEqual"
| "float"
| "fromkeys"
| "get"
| "getattr"
| "getboolean"
| "getfloat"
| "getint"
| "ifnull"
| "index"
| "insert"
| "int"
| "is_"
| "is_not"
| "isnull"
| "next"
| "nvl"
| "param"
| "pop"
| "remove"
| "set_blocking"
| "set_enabled"
| "setattr"
| "setdefault"
| "str"
)
}
/// Returns `true` if a call is allowed by the user to use a boolean trap.
pub(super) fn is_user_allowed_func_call(
call: &ast::ExprCall,
semantic: &SemanticModel,
settings: &LinterSettings,
) -> bool {
semantic
.resolve_qualified_name(call.func.as_ref())
.is_some_and(|qualified_name| {
settings
.flake8_boolean_trap
.extend_allowed_calls
.iter()
.map(|target| QualifiedName::from_dotted_name(target))
.any(|target| qualified_name == target)
})
}
/// Returns `true` if a function defines a binary operator.
///
/// This only includes operators, i.e., functions that are usually not called directly.
///
/// See: <https://docs.python.org/3/library/operator.html>
pub(super) fn is_operator_method(name: &str) -> bool {
matches!(
name,
"__contains__" // in
// item access ([])
| "__getitem__" // []
| "__setitem__" // []=
| "__delitem__" // del []
// addition (+)
| "__add__" // +
| "__radd__" // +
| "__iadd__" // +=
// subtraction (-)
| "__sub__" // -
| "__rsub__" // -
| "__isub__" // -=
// multiplication (*)
| "__mul__" // *
| "__rmul__" // *
| "__imul__" // *=
// division (/)
| "__truediv__" // /
| "__rtruediv__" // /
| "__itruediv__" // /=
// floor division (//)
| "__floordiv__" // //
| "__rfloordiv__" // //
| "__ifloordiv__" // //=
// remainder (%)
| "__mod__" // %
| "__rmod__" // %
| "__imod__" // %=
// exponentiation (**)
| "__pow__" // **
| "__rpow__" // **
| "__ipow__" // **=
// left shift (<<)
| "__lshift__" // <<
| "__rlshift__" // <<
| "__ilshift__" // <<=
// right shift (>>)
| "__rshift__" // >>
| "__rrshift__" // >>
| "__irshift__" // >>=
// matrix multiplication (@)
| "__matmul__" // @
| "__rmatmul__" // @
| "__imatmul__" // @=
// meet (&)
| "__and__" // &
| "__rand__" // &
| "__iand__" // &=
// join (|)
| "__or__" // |
| "__ror__" // |
| "__ior__" // |=
// xor (^)
| "__xor__" // ^
| "__rxor__" // ^
| "__ixor__" // ^=
// comparison (>, <, >=, <=, ==, !=)
| "__gt__" // >
| "__lt__" // <
| "__ge__" // >=
| "__le__" // <=
| "__eq__" // ==
| "__ne__" // !=
// unary operators (included for completeness)
| "__pos__" // +
| "__neg__" // -
| "__invert__" // ~
)
}
/// Returns `true` if a function definition is allowed to use a boolean trap.
pub(super) fn is_allowed_func_def(name: &str) -> bool {
matches!(name, "__post_init__") || is_operator_method(name)
}
/// Returns `true` if an argument is allowed to use a boolean trap. To return
/// `true`, the function name must be explicitly allowed, and the argument must
/// be either the first or second argument in the call.
pub(super) fn allow_boolean_trap(call: &ast::ExprCall, checker: &Checker) -> bool {
let func_name = match call.func.as_ref() {
Expr::Attribute(ast::ExprAttribute { attr, .. }) => attr.as_str(),
Expr::Name(ast::ExprName { id, .. }) => id.as_str(),
_ => return false,
};
// If the function name is explicitly allowed, then the boolean trap is
// allowed.
if is_allowed_func_call(func_name) {
return true;
}
// If the function appears to be a setter (e.g., `set_visible` or `setVisible`), then the
// boolean trap is allowed. We want to avoid raising a violation for cases in which the argument
// is positional-only and third-party, and this tends to be the case for setters.
if call.arguments.args.len() == 1 {
// Ex) `foo.set(True)`
if func_name == "set" {
return true;
}
// Ex) `foo.set_visible(True)`
if func_name
.strip_prefix("set")
.is_some_and(|suffix| suffix.starts_with(|c: char| c == '_' || c.is_ascii_uppercase()))
{
return true;
}
}
// If the call is explicitly allowed by the user, then the boolean trap is allowed.
if is_user_allowed_func_call(call, checker.semantic(), checker.settings()) {
return true;
}
false
}
pub(super) fn add_liskov_substitution_principle_help(
diagnostic: &mut Diagnostic,
function_name: &str,
decorator_list: &[ast::Decorator],
checker: &Checker,
) {
let semantic = checker.semantic();
let parent_scope = semantic.current_scope();
let pep8_settings = &checker.settings().pep8_naming;
if is_subject_to_liskov_substitution_principle(
function_name,
decorator_list,
parent_scope,
semantic,
&pep8_settings.classmethod_decorators,
&pep8_settings.staticmethod_decorators,
) {
diagnostic.help(
"Consider adding `@typing.override` if changing the function signature \
would violate the Liskov Substitution Principle",
);
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/flake8_boolean_trap/mod.rs | crates/ruff_linter/src/rules/flake8_boolean_trap/mod.rs | //! Rules from [flake8-boolean-trap](https://pypi.org/project/flake8-boolean-trap/).
mod helpers;
pub(crate) mod rules;
pub mod settings;
#[cfg(test)]
mod tests {
use std::path::Path;
use anyhow::Result;
use test_case::test_case;
use crate::registry::Rule;
use crate::settings::LinterSettings;
use crate::test::test_path;
use crate::{assert_diagnostics, settings};
#[test_case(Rule::BooleanTypeHintPositionalArgument, Path::new("FBT.py"))]
#[test_case(Rule::BooleanDefaultValuePositionalArgument, Path::new("FBT.py"))]
#[test_case(Rule::BooleanPositionalValueInCall, Path::new("FBT.py"))]
fn rules(rule_code: Rule, path: &Path) -> Result<()> {
let snapshot = format!("{}_{}", rule_code.noqa_code(), path.to_string_lossy());
let diagnostics = test_path(
Path::new("flake8_boolean_trap").join(path).as_path(),
&settings::LinterSettings::for_rule(rule_code),
)?;
assert_diagnostics!(snapshot, diagnostics);
Ok(())
}
#[test]
fn extend_allowed_callable() -> Result<()> {
let diagnostics = test_path(
Path::new("flake8_boolean_trap/FBT.py"),
&LinterSettings {
flake8_boolean_trap: super::settings::Settings {
extend_allowed_calls: vec![
"django.db.models.Value".to_string(),
"pydantic.Field".to_string(),
],
},
..LinterSettings::for_rule(Rule::BooleanPositionalValueInCall)
},
)?;
assert_diagnostics!(diagnostics);
Ok(())
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/flake8_boolean_trap/rules/boolean_positional_value_in_call.rs | crates/ruff_linter/src/rules/flake8_boolean_trap/rules/boolean_positional_value_in_call.rs | use ruff_macros::{ViolationMetadata, derive_message_formats};
use ruff_python_ast as ast;
use ruff_text_size::Ranged;
use crate::Violation;
use crate::checkers::ast::Checker;
use crate::rules::flake8_boolean_trap::helpers::allow_boolean_trap;
/// ## What it does
/// Checks for boolean positional arguments in function calls.
///
/// Some functions are whitelisted by default. To extend the list of allowed calls
/// configure the [`lint.flake8-boolean-trap.extend-allowed-calls`] option.
///
/// ## Why is this bad?
/// Calling a function with boolean positional arguments is confusing as the
/// meaning of the boolean value is not clear to the caller, and to future
/// readers of the code.
///
/// ## Example
///
/// ```python
/// def func(flag: bool) -> None: ...
///
///
/// func(True)
/// ```
///
/// Use instead:
///
/// ```python
/// def func(flag: bool) -> None: ...
///
///
/// func(flag=True)
/// ```
///
/// ## Options
/// - `lint.flake8-boolean-trap.extend-allowed-calls`
///
/// ## References
/// - [Python documentation: Calls](https://docs.python.org/3/reference/expressions.html#calls)
/// - [_How to Avoid “The Boolean Trap”_ by Adam Johnson](https://adamj.eu/tech/2021/07/10/python-type-hints-how-to-avoid-the-boolean-trap/)
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "v0.0.127")]
pub(crate) struct BooleanPositionalValueInCall;
impl Violation for BooleanPositionalValueInCall {
#[derive_message_formats]
fn message(&self) -> String {
"Boolean positional value in function call".to_string()
}
}
/// FBT003
pub(crate) fn boolean_positional_value_in_call(checker: &Checker, call: &ast::ExprCall) {
if allow_boolean_trap(call, checker) {
return;
}
for arg in call
.arguments
.args
.iter()
.filter(|arg| arg.is_boolean_literal_expr())
{
checker.report_diagnostic(BooleanPositionalValueInCall, arg.range());
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/flake8_boolean_trap/rules/mod.rs | crates/ruff_linter/src/rules/flake8_boolean_trap/rules/mod.rs | pub(crate) use boolean_default_value_positional_argument::*;
pub(crate) use boolean_positional_value_in_call::*;
pub(crate) use boolean_type_hint_positional_argument::*;
mod boolean_default_value_positional_argument;
mod boolean_positional_value_in_call;
mod boolean_type_hint_positional_argument;
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/flake8_boolean_trap/rules/boolean_type_hint_positional_argument.rs | crates/ruff_linter/src/rules/flake8_boolean_trap/rules/boolean_type_hint_positional_argument.rs | use ruff_macros::{ViolationMetadata, derive_message_formats};
use ruff_python_ast::identifier::Identifier;
use ruff_python_ast::name::UnqualifiedName;
use ruff_python_ast::{self as ast, Decorator, Expr, Parameters};
use ruff_python_semantic::SemanticModel;
use ruff_python_semantic::analyze::visibility;
use crate::Violation;
use crate::checkers::ast::Checker;
use crate::rules::flake8_boolean_trap::helpers::{
add_liskov_substitution_principle_help, is_allowed_func_def,
};
/// ## What it does
/// Checks for the use of boolean positional arguments in function definitions,
/// as determined by the presence of a type hint containing `bool` as an
/// evident subtype - e.g. `bool`, `bool | int`, `typing.Optional[bool]`, etc.
///
/// ## Why is this bad?
/// Calling a function with boolean positional arguments is confusing as the
/// meaning of the boolean value is not clear to the caller and to future
/// readers of the code.
///
/// The use of a boolean will also limit the function to only two possible
/// behaviors, which makes the function difficult to extend in the future.
///
/// Instead, consider refactoring into separate implementations for the
/// `True` and `False` cases, using an `Enum`, or making the argument a
/// keyword-only argument, to force callers to be explicit when providing
/// the argument.
///
/// Dunder methods that define operators are exempt from this rule, as are
/// setters and [`@override`][override] definitions.
///
/// ## Example
///
/// ```python
/// from math import ceil, floor
///
///
/// def round_number(number: float, up: bool) -> int:
/// return ceil(number) if up else floor(number)
///
///
/// round_number(1.5, True) # What does `True` mean?
/// round_number(1.5, False) # What does `False` mean?
/// ```
///
/// Instead, refactor into separate implementations:
///
/// ```python
/// from math import ceil, floor
///
///
/// def round_up(number: float) -> int:
/// return ceil(number)
///
///
/// def round_down(number: float) -> int:
/// return floor(number)
///
///
/// round_up(1.5)
/// round_down(1.5)
/// ```
///
/// Or, refactor to use an `Enum`:
///
/// ```python
/// from enum import Enum
///
///
/// class RoundingMethod(Enum):
/// UP = 1
/// DOWN = 2
///
///
/// def round_number(value: float, method: RoundingMethod) -> float: ...
/// ```
///
/// Or, make the argument a keyword-only argument:
///
/// ```python
/// from math import ceil, floor
///
///
/// def round_number(number: float, *, up: bool) -> int:
/// return ceil(number) if up else floor(number)
///
///
/// round_number(1.5, up=True)
/// round_number(1.5, up=False)
/// ```
///
/// ## References
/// - [Python documentation: Calls](https://docs.python.org/3/reference/expressions.html#calls)
/// - [_How to Avoid “The Boolean Trap”_ by Adam Johnson](https://adamj.eu/tech/2021/07/10/python-type-hints-how-to-avoid-the-boolean-trap/)
///
/// [override]: https://docs.python.org/3/library/typing.html#typing.override
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "v0.0.127")]
pub(crate) struct BooleanTypeHintPositionalArgument;
impl Violation for BooleanTypeHintPositionalArgument {
#[derive_message_formats]
fn message(&self) -> String {
"Boolean-typed positional argument in function definition".to_string()
}
}
/// FBT001
pub(crate) fn boolean_type_hint_positional_argument(
checker: &Checker,
name: &str,
decorator_list: &[Decorator],
parameters: &Parameters,
) {
// https://github.com/astral-sh/ruff/issues/14535
if checker.source_type.is_stub() {
return;
}
// Allow Boolean type hints in explicitly-allowed functions.
if is_allowed_func_def(name) {
return;
}
for parameter in parameters.posonlyargs.iter().chain(¶meters.args) {
let Some(annotation) = parameter.annotation() else {
continue;
};
if !match_annotation_to_complex_bool(annotation, checker.semantic()) {
continue;
}
// Allow Boolean type hints in setters.
if decorator_list.iter().any(|decorator| {
UnqualifiedName::from_expr(&decorator.expression)
.is_some_and(|unqualified_name| unqualified_name.segments() == [name, "setter"])
}) {
return;
}
// Allow Boolean defaults in `@override` methods, since they're required to adhere to
// the parent signature.
if visibility::is_override(decorator_list, checker.semantic()) {
return;
}
// If `bool` isn't actually a reference to the `bool` built-in, return.
if !checker.semantic().has_builtin_binding("bool") {
return;
}
let mut diagnostic =
checker.report_diagnostic(BooleanTypeHintPositionalArgument, parameter.identifier());
add_liskov_substitution_principle_help(&mut diagnostic, name, decorator_list, checker);
}
}
/// Returns `true` if the annotation is a boolean type hint (e.g., `bool`), or a type hint that
/// includes boolean as a variant (e.g., `bool | int`).
fn match_annotation_to_complex_bool(annotation: &Expr, semantic: &SemanticModel) -> bool {
match annotation {
// Ex) `bool`
Expr::Name(name) => &name.id == "bool",
// Ex) `"bool"`
Expr::StringLiteral(ast::ExprStringLiteral { value, .. }) => value == "bool",
// Ex) `bool | int`
Expr::BinOp(ast::ExprBinOp {
left,
op: ast::Operator::BitOr,
right,
..
}) => {
match_annotation_to_complex_bool(left, semantic)
|| match_annotation_to_complex_bool(right, semantic)
}
// Ex) `typing.Union[bool, int]`
Expr::Subscript(ast::ExprSubscript { value, slice, .. }) => {
// If the typing modules were never imported, we'll never match below.
if !semantic.seen_typing() {
return false;
}
let qualified_name = semantic.resolve_qualified_name(value);
if qualified_name.as_ref().is_some_and(|qualified_name| {
semantic.match_typing_qualified_name(qualified_name, "Union")
}) {
if let Expr::Tuple(ast::ExprTuple { elts, .. }) = slice.as_ref() {
elts.iter()
.any(|elt| match_annotation_to_complex_bool(elt, semantic))
} else {
// Union with a single type is an invalid type annotation
false
}
} else if qualified_name.as_ref().is_some_and(|qualified_name| {
semantic.match_typing_qualified_name(qualified_name, "Optional")
}) {
match_annotation_to_complex_bool(slice, semantic)
} else {
false
}
}
_ => false,
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/flake8_boolean_trap/rules/boolean_default_value_positional_argument.rs | crates/ruff_linter/src/rules/flake8_boolean_trap/rules/boolean_default_value_positional_argument.rs | use ruff_macros::{ViolationMetadata, derive_message_formats};
use ruff_python_ast::identifier::Identifier;
use ruff_python_ast::name::UnqualifiedName;
use ruff_python_ast::{Decorator, Expr, Parameters};
use ruff_python_semantic::analyze::visibility;
use crate::Violation;
use crate::checkers::ast::Checker;
use crate::rules::flake8_boolean_trap::helpers::{
add_liskov_substitution_principle_help, is_allowed_func_def,
};
/// ## What it does
/// Checks for the use of boolean positional arguments in function definitions,
/// as determined by the presence of a boolean default value.
///
/// ## Why is this bad?
/// Calling a function with boolean positional arguments is confusing as the
/// meaning of the boolean value is not clear to the caller and to future
/// readers of the code.
///
/// The use of a boolean will also limit the function to only two possible
/// behaviors, which makes the function difficult to extend in the future.
///
/// Instead, consider refactoring into separate implementations for the
/// `True` and `False` cases, using an `Enum`, or making the argument a
/// keyword-only argument, to force callers to be explicit when providing
/// the argument.
///
/// This rule exempts methods decorated with [`@typing.override`][override],
/// since changing the signature of a subclass method that overrides a
/// superclass method may cause type checkers to complain about a violation of
/// the Liskov Substitution Principle.
///
/// ## Example
/// ```python
/// from math import ceil, floor
///
///
/// def round_number(number, up=True):
/// return ceil(number) if up else floor(number)
///
///
/// round_number(1.5, True) # What does `True` mean?
/// round_number(1.5, False) # What does `False` mean?
/// ```
///
/// Instead, refactor into separate implementations:
/// ```python
/// from math import ceil, floor
///
///
/// def round_up(number):
/// return ceil(number)
///
///
/// def round_down(number):
/// return floor(number)
///
///
/// round_up(1.5)
/// round_down(1.5)
/// ```
///
/// Or, refactor to use an `Enum`:
/// ```python
/// from enum import Enum
///
///
/// class RoundingMethod(Enum):
/// UP = 1
/// DOWN = 2
///
///
/// def round_number(value, method):
/// return ceil(number) if method is RoundingMethod.UP else floor(number)
///
///
/// round_number(1.5, RoundingMethod.UP)
/// round_number(1.5, RoundingMethod.DOWN)
/// ```
///
/// Or, make the argument a keyword-only argument:
/// ```python
/// from math import ceil, floor
///
///
/// def round_number(number, *, up=True):
/// return ceil(number) if up else floor(number)
///
///
/// round_number(1.5, up=True)
/// round_number(1.5, up=False)
/// ```
///
/// ## References
/// - [Python documentation: Calls](https://docs.python.org/3/reference/expressions.html#calls)
/// - [_How to Avoid “The Boolean Trap”_ by Adam Johnson](https://adamj.eu/tech/2021/07/10/python-type-hints-how-to-avoid-the-boolean-trap/)
///
/// [override]: https://docs.python.org/3/library/typing.html#typing.override
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "v0.0.127")]
pub(crate) struct BooleanDefaultValuePositionalArgument;
impl Violation for BooleanDefaultValuePositionalArgument {
#[derive_message_formats]
fn message(&self) -> String {
"Boolean default positional argument in function definition".to_string()
}
}
/// FBT002
pub(crate) fn boolean_default_value_positional_argument(
checker: &Checker,
name: &str,
decorator_list: &[Decorator],
parameters: &Parameters,
) {
// https://github.com/astral-sh/ruff/issues/14535
if checker.source_type.is_stub() {
return;
}
// Allow Boolean defaults in explicitly-allowed functions.
if is_allowed_func_def(name) {
return;
}
for param in parameters.posonlyargs.iter().chain(¶meters.args) {
if param.default().is_some_and(Expr::is_boolean_literal_expr) {
// Allow Boolean defaults in setters.
if decorator_list.iter().any(|decorator| {
UnqualifiedName::from_expr(&decorator.expression)
.is_some_and(|unqualified_name| unqualified_name.segments() == [name, "setter"])
}) {
return;
}
// Allow Boolean defaults in `@override` methods, since they're required to adhere to
// the parent signature.
if visibility::is_override(decorator_list, checker.semantic()) {
return;
}
let mut diagnostic = checker
.report_diagnostic(BooleanDefaultValuePositionalArgument, param.identifier());
add_liskov_substitution_principle_help(&mut diagnostic, name, decorator_list, checker);
}
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/flake8_debugger/types.rs | crates/ruff_linter/src/rules/flake8_debugger/types.rs | #[derive(Debug, PartialEq, Eq, Clone)]
pub(crate) enum DebuggerUsingType {
Call(String),
Import(String),
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/flake8_debugger/mod.rs | crates/ruff_linter/src/rules/flake8_debugger/mod.rs | //! Rules from [flake8-debugger](https://pypi.org/project/flake8-debugger/).
pub(crate) mod rules;
pub(crate) mod types;
#[cfg(test)]
mod tests {
use std::path::Path;
use anyhow::Result;
use test_case::test_case;
use crate::registry::Rule;
use crate::test::test_path;
use crate::{assert_diagnostics, settings};
#[test_case(Rule::Debugger, Path::new("T100.py"))]
fn rules(rule_code: Rule, path: &Path) -> Result<()> {
let snapshot = format!("{}_{}", rule_code.noqa_code(), path.to_string_lossy());
let diagnostics = test_path(
Path::new("flake8_debugger").join(path).as_path(),
&settings::LinterSettings::for_rule(rule_code),
)?;
assert_diagnostics!(snapshot, diagnostics);
Ok(())
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/flake8_debugger/rules/mod.rs | crates/ruff_linter/src/rules/flake8_debugger/rules/mod.rs | pub(crate) use debugger::*;
mod debugger;
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/flake8_debugger/rules/debugger.rs | crates/ruff_linter/src/rules/flake8_debugger/rules/debugger.rs | use ruff_python_ast::{Expr, Stmt};
use ruff_macros::{ViolationMetadata, derive_message_formats};
use ruff_python_ast::name::QualifiedName;
use ruff_text_size::Ranged;
use crate::Violation;
use crate::checkers::ast::Checker;
use crate::rules::flake8_debugger::types::DebuggerUsingType;
/// ## What it does
/// Checks for the presence of debugger calls and imports.
///
/// ## Why is this bad?
/// Debugger calls and imports should be used for debugging purposes only. The
/// presence of a debugger call or import in production code is likely a
/// mistake and may cause unintended behavior, such as exposing sensitive
/// information or causing the program to hang.
///
/// Instead, consider using a logging library to log information about the
/// program's state, and writing tests to verify that the program behaves
/// as expected.
///
/// ## Example
/// ```python
/// def foo():
/// breakpoint()
/// ```
///
/// ## References
/// - [Python documentation: `pdb` — The Python Debugger](https://docs.python.org/3/library/pdb.html)
/// - [Python documentation: `logging` — Logging facility for Python](https://docs.python.org/3/library/logging.html)
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "v0.0.141")]
pub(crate) struct Debugger {
using_type: DebuggerUsingType,
}
impl Violation for Debugger {
#[derive_message_formats]
fn message(&self) -> String {
let Debugger { using_type } = self;
match using_type {
DebuggerUsingType::Call(name) => format!("Trace found: `{name}` used"),
DebuggerUsingType::Import(name) => format!("Import for `{name}` found"),
}
}
}
/// T100
/// Checks for the presence of a debugger call.
pub(crate) fn debugger_call(checker: &Checker, expr: &Expr, func: &Expr) {
if let Some(using_type) =
checker
.semantic()
.resolve_qualified_name(func)
.and_then(|qualified_name| {
if is_debugger_call(&qualified_name) {
Some(DebuggerUsingType::Call(qualified_name.to_string()))
} else {
None
}
})
{
checker.report_diagnostic(Debugger { using_type }, expr.range());
}
}
/// T100
/// Checks for the presence of a debugger import.
pub(crate) fn debugger_import(checker: &Checker, stmt: &Stmt, module: Option<&str>, name: &str) {
if let Some(module) = module {
let qualified_name = QualifiedName::user_defined(module).append_member(name);
if is_debugger_call(&qualified_name) {
checker.report_diagnostic(
Debugger {
using_type: DebuggerUsingType::Import(qualified_name.to_string()),
},
stmt.range(),
);
}
} else {
let qualified_name = QualifiedName::user_defined(name);
if is_debugger_import(&qualified_name) {
checker.report_diagnostic(
Debugger {
using_type: DebuggerUsingType::Import(name.to_string()),
},
stmt.range(),
);
}
}
}
fn is_debugger_call(qualified_name: &QualifiedName) -> bool {
matches!(
qualified_name.segments(),
["pdb" | "pudb" | "ipdb", "set_trace"]
| ["ipdb", "sset_trace"]
| ["IPython", "terminal", "embed", "InteractiveShellEmbed"]
| [
"IPython",
"frontend",
"terminal",
"embed",
"InteractiveShellEmbed"
]
| ["celery", "contrib", "rdb", "set_trace"]
| ["builtins" | "", "breakpoint"]
| ["debugpy", "breakpoint" | "listen" | "wait_for_client"]
| ["ptvsd", "break_into_debugger" | "wait_for_attach"]
| ["sys", "breakpointhook" | "__breakpointhook__"]
)
}
fn is_debugger_import(qualified_name: &QualifiedName) -> bool {
// Constructed by taking every pattern in `is_debugger_call`, removing the last element in
// each pattern, and de-duplicating the values.
// As special-cases, we omit `builtins` and `sys` to allow `import builtins` and `import sys`
// which are far more general than (e.g.) `import celery.contrib.rdb`.
matches!(
qualified_name.segments(),
["pdb" | "pudb" | "ipdb" | "debugpy" | "ptvsd"]
| ["IPython", "terminal", "embed"]
| ["IPython", "frontend", "terminal", "embed",]
| ["celery", "contrib", "rdb"]
)
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/flake8_annotations/settings.rs | crates/ruff_linter/src/rules/flake8_annotations/settings.rs | //! Settings for the `flake-annotations` plugin.
use crate::display_settings;
use ruff_macros::CacheKey;
use std::fmt::{Display, Formatter};
#[derive(Debug, Clone, Default, CacheKey)]
#[expect(clippy::struct_excessive_bools)]
pub struct Settings {
pub mypy_init_return: bool,
pub suppress_dummy_args: bool,
pub suppress_none_returning: bool,
pub allow_star_arg_any: bool,
pub ignore_fully_untyped: bool,
}
impl Display for Settings {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
display_settings! {
formatter = f,
namespace = "linter.flake8_annotations",
fields = [
self.mypy_init_return,
self.suppress_dummy_args,
self.suppress_none_returning,
self.allow_star_arg_any,
self.ignore_fully_untyped
]
}
Ok(())
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/flake8_annotations/helpers.rs | crates/ruff_linter/src/rules/flake8_annotations/helpers.rs | use itertools::Itertools;
use rustc_hash::FxHashSet;
use ruff_python_ast::helpers::{
ReturnStatementVisitor, pep_604_union, typing_optional, typing_union,
};
use ruff_python_ast::name::Name;
use ruff_python_ast::visitor::Visitor;
use ruff_python_ast::{self as ast, Expr, ExprContext};
use ruff_python_semantic::analyze::terminal::Terminal;
use ruff_python_semantic::analyze::type_inference::{NumberLike, PythonType, ResolvedPythonType};
use ruff_python_semantic::analyze::visibility;
use ruff_python_semantic::{Definition, SemanticModel};
use ruff_text_size::{TextRange, TextSize};
use crate::Edit;
use crate::checkers::ast::Checker;
use ruff_python_ast::PythonVersion;
/// Return the name of the function, if it's overloaded.
pub(crate) fn overloaded_name<'a>(
definition: &'a Definition,
semantic: &SemanticModel,
) -> Option<&'a str> {
let function = definition.as_function_def()?;
if visibility::is_overload(&function.decorator_list, semantic) {
Some(function.name.as_str())
} else {
None
}
}
/// Return `true` if the definition is the implementation for an overloaded
/// function.
pub(crate) fn is_overload_impl(
definition: &Definition,
overloaded_name: &str,
semantic: &SemanticModel,
) -> bool {
let Some(function) = definition.as_function_def() else {
return false;
};
if visibility::is_overload(&function.decorator_list, semantic) {
false
} else {
function.name.as_str() == overloaded_name
}
}
/// Given a function, guess its return type.
pub(crate) fn auto_return_type(function: &ast::StmtFunctionDef) -> Option<AutoPythonType> {
// Collect all the `return` statements.
let returns = {
let mut visitor = ReturnStatementVisitor::default();
visitor.visit_body(&function.body);
// Ignore generators.
if visitor.is_generator {
return None;
}
visitor.returns
};
// Determine the terminal behavior (i.e., implicit return, no return, etc.).
let terminal = Terminal::from_function(function);
// If every control flow path raises an exception, return `NoReturn`.
if terminal == Terminal::Raise {
return Some(AutoPythonType::Never);
}
// Determine the return type of the first `return` statement.
let Some((return_statement, returns)) = returns.split_first() else {
return Some(AutoPythonType::Atom(PythonType::None));
};
let mut return_type = return_statement.value.as_deref().map_or(
ResolvedPythonType::Atom(PythonType::None),
ResolvedPythonType::from,
);
// Merge the return types of the remaining `return` statements.
for return_statement in returns {
return_type = return_type.union(return_statement.value.as_deref().map_or(
ResolvedPythonType::Atom(PythonType::None),
ResolvedPythonType::from,
));
}
// If the function has an implicit return, union with `None`, as in:
// ```python
// def func(x: int):
// if x > 0:
// return 1
// ```
if terminal.has_implicit_return() {
return_type = return_type.union(ResolvedPythonType::Atom(PythonType::None));
}
match return_type {
ResolvedPythonType::Atom(python_type) => Some(AutoPythonType::Atom(python_type)),
ResolvedPythonType::Union(python_types) => Some(AutoPythonType::Union(python_types)),
ResolvedPythonType::Unknown => None,
ResolvedPythonType::TypeError => None,
}
}
#[derive(Debug)]
pub(crate) enum AutoPythonType {
Never,
Atom(PythonType),
Union(FxHashSet<PythonType>),
}
impl AutoPythonType {
/// Convert an [`AutoPythonType`] into an [`Expr`].
///
/// If the [`Expr`] relies on importing any external symbols, those imports will be returned as
/// additional edits.
pub(crate) fn into_expression(
self,
checker: &Checker,
at: TextSize,
) -> Option<(Expr, Vec<Edit>)> {
let target_version = checker.target_version();
match self {
AutoPythonType::Never => {
let member = if target_version >= PythonVersion::PY311 {
"Never"
} else {
"NoReturn"
};
let (no_return_edit, binding) = checker
.typing_importer(member, PythonVersion::lowest())?
.import(at)
.ok()?;
let expr = Expr::Name(ast::ExprName {
id: Name::from(binding),
range: TextRange::default(),
node_index: ruff_python_ast::AtomicNodeIndex::NONE,
ctx: ExprContext::Load,
});
Some((expr, vec![no_return_edit]))
}
AutoPythonType::Atom(python_type) => type_expr(python_type, checker, at),
AutoPythonType::Union(python_types) => {
if target_version >= PythonVersion::PY310 {
// Aggregate all the individual types (e.g., `int`, `float`).
let mut all_edits = Vec::new();
let names = python_types
.iter()
.sorted_unstable()
.map(|python_type| {
let (expr, mut edits) = type_expr(*python_type, checker, at)?;
all_edits.append(&mut edits);
Some(expr)
})
.collect::<Option<Vec<_>>>()?;
// Wrap in a bitwise union (e.g., `int | float`).
let expr = pep_604_union(&names);
Some((expr, all_edits))
} else {
let python_types = python_types
.into_iter()
.sorted_unstable()
.collect::<Vec<_>>();
match python_types.as_slice() {
[python_type, PythonType::None] | [PythonType::None, python_type] => {
let (element, mut edits) = type_expr(*python_type, checker, at)?;
// Ex) `Optional[int]`
let (optional_edit, binding) = checker
.typing_importer("Optional", PythonVersion::lowest())?
.import(at)
.ok()?;
let expr = typing_optional(element, Name::from(binding));
edits.push(optional_edit);
Some((expr, edits))
}
_ => {
let mut all_edits = Vec::new();
let elements = python_types
.into_iter()
.map(|python_type| {
let (expr, mut edits) = type_expr(python_type, checker, at)?;
all_edits.append(&mut edits);
Some(expr)
})
.collect::<Option<Vec<_>>>()?;
// Ex) `Union[int, str]`
let (union_edit, binding) = checker
.typing_importer("Union", PythonVersion::lowest())?
.import(at)
.ok()?;
let expr = typing_union(&elements, Name::from(binding));
all_edits.push(union_edit);
Some((expr, all_edits))
}
}
}
}
}
}
}
/// Given a [`PythonType`], return an [`Expr`] that resolves to that type.
///
/// If the [`Expr`] relies on importing any external symbols, those imports will be returned as
/// additional edits.
pub(crate) fn type_expr(
python_type: PythonType,
checker: &Checker,
at: TextSize,
) -> Option<(Expr, Vec<Edit>)> {
fn name(name: &str, checker: &Checker, at: TextSize) -> Option<(Expr, Vec<Edit>)> {
let (edit, binding) = checker
.importer()
.get_or_import_builtin_symbol(name, at, checker.semantic())
.ok()?;
let expr = Expr::Name(ast::ExprName {
id: binding.into(),
range: TextRange::default(),
node_index: ruff_python_ast::AtomicNodeIndex::NONE,
ctx: ExprContext::Load,
});
Some((expr, edit.map_or_else(Vec::new, |edit| vec![edit])))
}
match python_type {
PythonType::String => name("str", checker, at),
PythonType::Bytes => name("bytes", checker, at),
PythonType::Number(number) => {
let symbol = match number {
NumberLike::Integer => "int",
NumberLike::Float => "float",
NumberLike::Complex => "complex",
NumberLike::Bool => "bool",
};
name(symbol, checker, at)
}
PythonType::None => {
let expr = Expr::NoneLiteral(ast::ExprNoneLiteral::default());
Some((expr, vec![]))
}
PythonType::Ellipsis => None,
PythonType::Dict => None,
PythonType::List => None,
PythonType::Set => None,
PythonType::Tuple => None,
PythonType::Generator => None,
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/flake8_annotations/mod.rs | crates/ruff_linter/src/rules/flake8_annotations/mod.rs | //! Rules from [flake8-annotations](https://pypi.org/project/flake8-annotations/).
pub(crate) mod helpers;
pub(crate) mod rules;
pub mod settings;
#[cfg(test)]
mod tests {
use std::path::Path;
use anyhow::Result;
use crate::assert_diagnostics;
use crate::registry::Rule;
use crate::settings::LinterSettings;
use crate::test::test_path;
use ruff_python_ast::PythonVersion;
#[test]
fn defaults() -> Result<()> {
let diagnostics = test_path(
Path::new("flake8_annotations/annotation_presence.py"),
&LinterSettings {
..LinterSettings::for_rules(vec![
Rule::MissingTypeFunctionArgument,
Rule::MissingTypeArgs,
Rule::MissingTypeKwargs,
Rule::MissingReturnTypeUndocumentedPublicFunction,
Rule::MissingReturnTypePrivateFunction,
Rule::MissingReturnTypeSpecialMethod,
Rule::MissingReturnTypeStaticMethod,
Rule::MissingReturnTypeClassMethod,
Rule::AnyType,
])
},
)?;
assert_diagnostics!(diagnostics);
Ok(())
}
#[test]
fn ignore_fully_untyped() -> Result<()> {
let diagnostics = test_path(
Path::new("flake8_annotations/ignore_fully_untyped.py"),
&LinterSettings {
flake8_annotations: super::settings::Settings {
ignore_fully_untyped: true,
..Default::default()
},
..LinterSettings::for_rules(vec![
Rule::MissingTypeFunctionArgument,
Rule::MissingTypeArgs,
Rule::MissingTypeKwargs,
Rule::MissingReturnTypeUndocumentedPublicFunction,
Rule::MissingReturnTypePrivateFunction,
Rule::MissingReturnTypeSpecialMethod,
Rule::MissingReturnTypeStaticMethod,
Rule::MissingReturnTypeClassMethod,
Rule::AnyType,
])
},
)?;
assert_diagnostics!(diagnostics);
Ok(())
}
#[test]
fn suppress_dummy_args() -> Result<()> {
let diagnostics = test_path(
Path::new("flake8_annotations/suppress_dummy_args.py"),
&LinterSettings {
flake8_annotations: super::settings::Settings {
suppress_dummy_args: true,
..Default::default()
},
..LinterSettings::for_rules(vec![
Rule::MissingTypeFunctionArgument,
Rule::MissingTypeArgs,
Rule::MissingTypeKwargs,
])
},
)?;
assert_diagnostics!(diagnostics);
Ok(())
}
#[test]
fn mypy_init_return() -> Result<()> {
let diagnostics = test_path(
Path::new("flake8_annotations/mypy_init_return.py"),
&LinterSettings {
flake8_annotations: super::settings::Settings {
mypy_init_return: true,
..Default::default()
},
..LinterSettings::for_rules(vec![
Rule::MissingReturnTypeUndocumentedPublicFunction,
Rule::MissingReturnTypePrivateFunction,
Rule::MissingReturnTypeSpecialMethod,
Rule::MissingReturnTypeStaticMethod,
Rule::MissingReturnTypeClassMethod,
])
},
)?;
assert_diagnostics!(diagnostics);
Ok(())
}
#[test]
fn auto_return_type() -> Result<()> {
let diagnostics = test_path(
Path::new("flake8_annotations/auto_return_type.py"),
&LinterSettings {
..LinterSettings::for_rules(vec![
Rule::MissingReturnTypeUndocumentedPublicFunction,
Rule::MissingReturnTypePrivateFunction,
Rule::MissingReturnTypeSpecialMethod,
Rule::MissingReturnTypeStaticMethod,
Rule::MissingReturnTypeClassMethod,
])
},
)?;
assert_diagnostics!(diagnostics);
Ok(())
}
#[test]
fn auto_return_type_py38() -> Result<()> {
let diagnostics = test_path(
Path::new("flake8_annotations/auto_return_type.py"),
&LinterSettings {
unresolved_target_version: PythonVersion::PY38.into(),
..LinterSettings::for_rules(vec![
Rule::MissingReturnTypeUndocumentedPublicFunction,
Rule::MissingReturnTypePrivateFunction,
Rule::MissingReturnTypeSpecialMethod,
Rule::MissingReturnTypeStaticMethod,
Rule::MissingReturnTypeClassMethod,
])
},
)?;
assert_diagnostics!(diagnostics);
Ok(())
}
#[test]
fn suppress_none_returning() -> Result<()> {
let diagnostics = test_path(
Path::new("flake8_annotations/suppress_none_returning.py"),
&LinterSettings {
flake8_annotations: super::settings::Settings {
suppress_none_returning: true,
..Default::default()
},
..LinterSettings::for_rules(vec![
Rule::MissingTypeFunctionArgument,
Rule::MissingTypeArgs,
Rule::MissingTypeKwargs,
Rule::MissingReturnTypeUndocumentedPublicFunction,
Rule::MissingReturnTypePrivateFunction,
Rule::MissingReturnTypeSpecialMethod,
Rule::MissingReturnTypeStaticMethod,
Rule::MissingReturnTypeClassMethod,
Rule::AnyType,
])
},
)?;
assert_diagnostics!(diagnostics);
Ok(())
}
#[test]
fn allow_star_arg_any() -> Result<()> {
let diagnostics = test_path(
Path::new("flake8_annotations/allow_star_arg_any.py"),
&LinterSettings {
flake8_annotations: super::settings::Settings {
allow_star_arg_any: true,
..Default::default()
},
..LinterSettings::for_rules(vec![Rule::AnyType])
},
)?;
assert_diagnostics!(diagnostics);
Ok(())
}
#[test]
fn allow_overload() -> Result<()> {
let diagnostics = test_path(
Path::new("flake8_annotations/allow_overload.py"),
&LinterSettings {
..LinterSettings::for_rules(vec![
Rule::MissingReturnTypeUndocumentedPublicFunction,
Rule::MissingReturnTypePrivateFunction,
Rule::MissingReturnTypeSpecialMethod,
Rule::MissingReturnTypeStaticMethod,
Rule::MissingReturnTypeClassMethod,
])
},
)?;
assert_diagnostics!(diagnostics);
Ok(())
}
#[test]
fn allow_nested_overload() -> Result<()> {
let diagnostics = test_path(
Path::new("flake8_annotations/allow_nested_overload.py"),
&LinterSettings {
..LinterSettings::for_rules(vec![
Rule::MissingReturnTypeUndocumentedPublicFunction,
Rule::MissingReturnTypePrivateFunction,
Rule::MissingReturnTypeSpecialMethod,
Rule::MissingReturnTypeStaticMethod,
Rule::MissingReturnTypeClassMethod,
])
},
)?;
assert_diagnostics!(diagnostics);
Ok(())
}
#[test]
fn simple_magic_methods() -> Result<()> {
let diagnostics = test_path(
Path::new("flake8_annotations/simple_magic_methods.py"),
&LinterSettings::for_rule(Rule::MissingReturnTypeSpecialMethod),
)?;
assert_diagnostics!(diagnostics);
Ok(())
}
#[test]
fn shadowed_builtins() -> Result<()> {
let diagnostics = test_path(
Path::new("flake8_annotations/shadowed_builtins.py"),
&LinterSettings::for_rules(vec![
Rule::MissingReturnTypeUndocumentedPublicFunction,
Rule::MissingReturnTypePrivateFunction,
Rule::MissingReturnTypeSpecialMethod,
Rule::MissingReturnTypeStaticMethod,
Rule::MissingReturnTypeClassMethod,
]),
)?;
assert_diagnostics!(diagnostics);
Ok(())
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/flake8_annotations/rules/mod.rs | crates/ruff_linter/src/rules/flake8_annotations/rules/mod.rs | pub(crate) use definition::*;
mod definition;
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/flake8_annotations/rules/definition.rs | crates/ruff_linter/src/rules/flake8_annotations/rules/definition.rs | use ruff_macros::{ViolationMetadata, derive_message_formats};
use ruff_python_ast::helpers::ReturnStatementVisitor;
use ruff_python_ast::identifier::Identifier;
use ruff_python_ast::visitor::Visitor;
use ruff_python_ast::{self as ast, Expr, Stmt};
use ruff_python_semantic::Definition;
use ruff_python_semantic::analyze::type_inference::{NumberLike, PythonType};
use ruff_python_semantic::analyze::visibility;
use ruff_python_stdlib::typing::simple_magic_return_type;
use ruff_text_size::Ranged;
use crate::checkers::ast::{Checker, DiagnosticGuard};
use crate::registry::Rule;
use crate::rules::flake8_annotations::helpers::{auto_return_type, type_expr};
use crate::rules::ruff::typing::type_hint_resolves_to_any;
use crate::{Edit, Fix, FixAvailability, Violation};
/// ## What it does
/// Checks that function arguments have type annotations.
///
/// ## Why is this bad?
/// Type annotations are a good way to document the types of function arguments. They also
/// help catch bugs, when used alongside a type checker, by ensuring that the types of
/// any provided arguments match expectation.
///
/// ## Example
///
/// ```python
/// def foo(x): ...
/// ```
///
/// Use instead:
///
/// ```python
/// def foo(x: int): ...
/// ```
///
/// ## Options
/// - `lint.flake8-annotations.suppress-dummy-args`
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "v0.0.105")]
pub(crate) struct MissingTypeFunctionArgument {
name: String,
}
impl Violation for MissingTypeFunctionArgument {
#[derive_message_formats]
fn message(&self) -> String {
let Self { name } = self;
format!("Missing type annotation for function argument `{name}`")
}
}
/// ## What it does
/// Checks that function `*args` arguments have type annotations.
///
/// ## Why is this bad?
/// Type annotations are a good way to document the types of function arguments. They also
/// help catch bugs, when used alongside a type checker, by ensuring that the types of
/// any provided arguments match expectation.
///
/// ## Example
///
/// ```python
/// def foo(*args): ...
/// ```
///
/// Use instead:
///
/// ```python
/// def foo(*args: int): ...
/// ```
///
/// ## Options
/// - `lint.flake8-annotations.suppress-dummy-args`
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "v0.0.105")]
pub(crate) struct MissingTypeArgs {
name: String,
}
impl Violation for MissingTypeArgs {
#[derive_message_formats]
fn message(&self) -> String {
let Self { name } = self;
format!("Missing type annotation for `*{name}`")
}
}
/// ## What it does
/// Checks that function `**kwargs` arguments have type annotations.
///
/// ## Why is this bad?
/// Type annotations are a good way to document the types of function arguments. They also
/// help catch bugs, when used alongside a type checker, by ensuring that the types of
/// any provided arguments match expectation.
///
/// ## Example
///
/// ```python
/// def foo(**kwargs): ...
/// ```
///
/// Use instead:
///
/// ```python
/// def foo(**kwargs: int): ...
/// ```
///
/// ## Options
/// - `lint.flake8-annotations.suppress-dummy-args`
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "v0.0.105")]
pub(crate) struct MissingTypeKwargs {
name: String,
}
impl Violation for MissingTypeKwargs {
#[derive_message_formats]
fn message(&self) -> String {
let Self { name } = self;
format!("Missing type annotation for `**{name}`")
}
}
/// ## Removed
/// This rule has been removed because type checkers can infer this type without annotation.
///
/// ## What it does
/// Checks that instance method `self` arguments have type annotations.
///
/// ## Why is this bad?
/// Type annotations are a good way to document the types of function arguments. They also
/// help catch bugs, when used alongside a type checker, by ensuring that the types of
/// any provided arguments match expectation.
///
/// Note that many type checkers will infer the type of `self` automatically, so this
/// annotation is not strictly necessary.
///
/// ## Example
///
/// ```python
/// class Foo:
/// def bar(self): ...
/// ```
///
/// Use instead:
///
/// ```python
/// class Foo:
/// def bar(self: "Foo"): ...
/// ```
#[derive(ViolationMetadata)]
#[deprecated(note = "ANN101 has been removed")]
#[violation_metadata(removed_since = "0.8.0")]
pub(crate) struct MissingTypeSelf;
#[expect(deprecated)]
impl Violation for MissingTypeSelf {
fn message(&self) -> String {
unreachable!("ANN101 has been removed");
}
fn message_formats() -> &'static [&'static str] {
&["Missing type annotation for `{name}` in method"]
}
}
/// ## Removed
/// This rule has been removed because type checkers can infer this type without annotation.
///
/// ## What it does
/// Checks that class method `cls` arguments have type annotations.
///
/// ## Why is this bad?
/// Type annotations are a good way to document the types of function arguments. They also
/// help catch bugs, when used alongside a type checker, by ensuring that the types of
/// any provided arguments match expectation.
///
/// Note that many type checkers will infer the type of `cls` automatically, so this
/// annotation is not strictly necessary.
///
/// ## Example
///
/// ```python
/// class Foo:
/// @classmethod
/// def bar(cls): ...
/// ```
///
/// Use instead:
///
/// ```python
/// class Foo:
/// @classmethod
/// def bar(cls: Type["Foo"]): ...
/// ```
#[derive(ViolationMetadata)]
#[deprecated(note = "ANN102 has been removed")]
#[violation_metadata(removed_since = "0.8.0")]
pub(crate) struct MissingTypeCls;
#[expect(deprecated)]
impl Violation for MissingTypeCls {
fn message(&self) -> String {
unreachable!("ANN102 has been removed")
}
fn message_formats() -> &'static [&'static str] {
&["Missing type annotation for `{name}` in classmethod"]
}
}
/// ## What it does
/// Checks that public functions and methods have return type annotations.
///
/// ## Why is this bad?
/// Type annotations are a good way to document the return types of functions. They also
/// help catch bugs, when used alongside a type checker, by ensuring that the types of
/// any returned values, and the types expected by callers, match expectation.
///
/// ## Example
/// ```python
/// def add(a, b):
/// return a + b
/// ```
///
/// Use instead:
/// ```python
/// def add(a: int, b: int) -> int:
/// return a + b
/// ```
///
/// ## Availability
///
/// Because this rule relies on the third-party `typing_extensions` module for some Python versions,
/// its diagnostic will not be emitted, and no fix will be offered, if `typing_extensions` imports
/// have been disabled by the [`lint.typing-extensions`] linter option.
///
/// ## Options
///
/// - `lint.typing-extensions`
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "v0.0.105")]
pub(crate) struct MissingReturnTypeUndocumentedPublicFunction {
name: String,
annotation: Option<String>,
}
impl Violation for MissingReturnTypeUndocumentedPublicFunction {
const FIX_AVAILABILITY: FixAvailability = FixAvailability::Sometimes;
#[derive_message_formats]
fn message(&self) -> String {
let Self { name, .. } = self;
format!("Missing return type annotation for public function `{name}`")
}
fn fix_title(&self) -> Option<String> {
let title = match &self.annotation {
Some(annotation) => format!("Add return type annotation: `{annotation}`"),
None => "Add return type annotation".to_string(),
};
Some(title)
}
}
/// ## What it does
/// Checks that private functions and methods have return type annotations.
///
/// ## Why is this bad?
/// Type annotations are a good way to document the return types of functions. They also
/// help catch bugs, when used alongside a type checker, by ensuring that the types of
/// any returned values, and the types expected by callers, match expectation.
///
/// ## Example
/// ```python
/// def _add(a, b):
/// return a + b
/// ```
///
/// Use instead:
/// ```python
/// def _add(a: int, b: int) -> int:
/// return a + b
/// ```
///
/// ## Availability
///
/// Because this rule relies on the third-party `typing_extensions` module for some Python versions,
/// its diagnostic will not be emitted, and no fix will be offered, if `typing_extensions` imports
/// have been disabled by the [`lint.typing-extensions`] linter option.
///
/// ## Options
///
/// - `lint.typing-extensions`
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "v0.0.105")]
pub(crate) struct MissingReturnTypePrivateFunction {
name: String,
annotation: Option<String>,
}
impl Violation for MissingReturnTypePrivateFunction {
const FIX_AVAILABILITY: FixAvailability = FixAvailability::Sometimes;
#[derive_message_formats]
fn message(&self) -> String {
let Self { name, .. } = self;
format!("Missing return type annotation for private function `{name}`")
}
fn fix_title(&self) -> Option<String> {
let title = match &self.annotation {
Some(annotation) => format!("Add return type annotation: `{annotation}`"),
None => "Add return type annotation".to_string(),
};
Some(title)
}
}
/// ## What it does
/// Checks that "special" methods, like `__init__`, `__new__`, and `__call__`, have
/// return type annotations.
///
/// ## Why is this bad?
/// Type annotations are a good way to document the return types of functions. They also
/// help catch bugs, when used alongside a type checker, by ensuring that the types of
/// any returned values, and the types expected by callers, match expectation.
///
/// Note that type checkers often allow you to omit the return type annotation for
/// `__init__` methods, as long as at least one argument has a type annotation. To
/// opt in to this behavior, use the `mypy-init-return` setting in your `pyproject.toml`
/// or `ruff.toml` file:
///
/// ```toml
/// [tool.ruff.lint.flake8-annotations]
/// mypy-init-return = true
/// ```
///
/// ## Example
/// ```python
/// class Foo:
/// def __init__(self, x: int):
/// self.x = x
/// ```
///
/// Use instead:
/// ```python
/// class Foo:
/// def __init__(self, x: int) -> None:
/// self.x = x
/// ```
///
/// ## Options
///
/// - `lint.flake8-annotations.mypy-init-return`
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "v0.0.105")]
pub(crate) struct MissingReturnTypeSpecialMethod {
name: String,
annotation: Option<String>,
}
impl Violation for MissingReturnTypeSpecialMethod {
const FIX_AVAILABILITY: FixAvailability = FixAvailability::Sometimes;
#[derive_message_formats]
fn message(&self) -> String {
let Self { name, .. } = self;
format!("Missing return type annotation for special method `{name}`")
}
fn fix_title(&self) -> Option<String> {
let title = match &self.annotation {
Some(annotation) => format!("Add return type annotation: `{annotation}`"),
None => "Add return type annotation".to_string(),
};
Some(title)
}
}
/// ## What it does
/// Checks that static methods have return type annotations.
///
/// ## Why is this bad?
/// Type annotations are a good way to document the return types of functions. They also
/// help catch bugs, when used alongside a type checker, by ensuring that the types of
/// any returned values, and the types expected by callers, match expectation.
///
/// ## Example
/// ```python
/// class Foo:
/// @staticmethod
/// def bar():
/// return 1
/// ```
///
/// Use instead:
/// ```python
/// class Foo:
/// @staticmethod
/// def bar() -> int:
/// return 1
/// ```
///
/// ## Options
///
/// - `lint.flake8-annotations.suppress-none-returning`
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "v0.0.105")]
pub(crate) struct MissingReturnTypeStaticMethod {
name: String,
annotation: Option<String>,
}
impl Violation for MissingReturnTypeStaticMethod {
const FIX_AVAILABILITY: FixAvailability = FixAvailability::Sometimes;
#[derive_message_formats]
fn message(&self) -> String {
let Self { name, .. } = self;
format!("Missing return type annotation for staticmethod `{name}`")
}
fn fix_title(&self) -> Option<String> {
let title = match &self.annotation {
Some(annotation) => format!("Add return type annotation: `{annotation}`"),
None => "Add return type annotation".to_string(),
};
Some(title)
}
}
/// ## What it does
/// Checks that class methods have return type annotations.
///
/// ## Why is this bad?
/// Type annotations are a good way to document the return types of functions. They also
/// help catch bugs, when used alongside a type checker, by ensuring that the types of
/// any returned values, and the types expected by callers, match expectation.
///
/// ## Example
/// ```python
/// class Foo:
/// @classmethod
/// def bar(cls):
/// return 1
/// ```
///
/// Use instead:
/// ```python
/// class Foo:
/// @classmethod
/// def bar(cls) -> int:
/// return 1
/// ```
///
/// ## Options
///
/// - `lint.flake8-annotations.suppress-none-returning`
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "v0.0.105")]
pub(crate) struct MissingReturnTypeClassMethod {
name: String,
annotation: Option<String>,
}
impl Violation for MissingReturnTypeClassMethod {
const FIX_AVAILABILITY: FixAvailability = FixAvailability::Sometimes;
#[derive_message_formats]
fn message(&self) -> String {
let Self { name, .. } = self;
format!("Missing return type annotation for classmethod `{name}`")
}
fn fix_title(&self) -> Option<String> {
let title = match &self.annotation {
Some(annotation) => format!("Add return type annotation: `{annotation}`"),
None => "Add return type annotation".to_string(),
};
Some(title)
}
}
/// ## What it does
/// Checks that function arguments are annotated with a more specific type than
/// `Any`.
///
/// ## Why is this bad?
/// `Any` is a special type indicating an unconstrained type. When an
/// expression is annotated with type `Any`, type checkers will allow all
/// operations on it.
///
/// It's better to be explicit about the type of an expression, and to use
/// `Any` as an "escape hatch" only when it is really needed.
///
/// ## Example
///
/// ```python
/// from typing import Any
///
///
/// def foo(x: Any): ...
/// ```
///
/// Use instead:
///
/// ```python
/// def foo(x: int): ...
/// ```
///
/// ## Known problems
///
/// Type aliases are unsupported and can lead to false positives.
/// For example, the following will trigger this rule inadvertently:
///
/// ```python
/// from typing import Any
///
/// MyAny = Any
///
///
/// def foo(x: MyAny): ...
/// ```
///
/// ## Options
/// - `lint.flake8-annotations.allow-star-arg-any`
///
/// ## References
/// - [Typing spec: `Any`](https://typing.python.org/en/latest/spec/special-types.html#any)
/// - [Python documentation: `typing.Any`](https://docs.python.org/3/library/typing.html#typing.Any)
/// - [Mypy documentation: The Any type](https://mypy.readthedocs.io/en/stable/kinds_of_types.html#the-any-type)
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "v0.0.108")]
pub(crate) struct AnyType {
name: String,
}
impl Violation for AnyType {
#[derive_message_formats]
fn message(&self) -> String {
let Self { name } = self;
format!("Dynamically typed expressions (typing.Any) are disallowed in `{name}`")
}
}
fn is_none_returning(body: &[Stmt]) -> bool {
let mut visitor = ReturnStatementVisitor::default();
visitor.visit_body(body);
for stmt in visitor.returns {
if let Some(value) = stmt.value.as_deref() {
if !value.is_none_literal_expr() {
return false;
}
}
}
true
}
/// ANN401
fn check_dynamically_typed<'a, 'b, F>(
checker: &'a Checker<'b>,
annotation: &Expr,
func: F,
context: &mut Vec<DiagnosticGuard<'a, 'b>>,
) where
F: FnOnce() -> String,
{
if let Expr::StringLiteral(string_expr) = annotation {
// Quoted annotations
if let Ok(parsed_annotation) = checker.parse_type_annotation(string_expr) {
if type_hint_resolves_to_any(
parsed_annotation.expression(),
checker,
checker.target_version(),
) {
context
.push(checker.report_diagnostic(AnyType { name: func() }, annotation.range()));
}
}
} else {
if type_hint_resolves_to_any(annotation, checker, checker.target_version()) {
context.push(checker.report_diagnostic(AnyType { name: func() }, annotation.range()));
}
}
}
/// Return `true` if a function appears to be a stub.
fn is_stub_function(function_def: &ast::StmtFunctionDef, checker: &Checker) -> bool {
/// Returns `true` if a function has an empty body.
fn is_empty_body(function_def: &ast::StmtFunctionDef) -> bool {
function_def.body.iter().all(|stmt| match stmt {
Stmt::Pass(_) => true,
Stmt::Expr(ast::StmtExpr {
value,
range: _,
node_index: _,
}) => {
matches!(
value.as_ref(),
Expr::StringLiteral(_) | Expr::EllipsisLiteral(_)
)
}
_ => false,
})
}
// Ignore functions with empty bodies in...
if is_empty_body(function_def) {
// Stub definitions (.pyi files)...
if checker.source_type.is_stub() {
return true;
}
// Abstract methods...
if visibility::is_abstract(&function_def.decorator_list, checker.semantic()) {
return true;
}
// Overload definitions...
if visibility::is_overload(&function_def.decorator_list, checker.semantic()) {
return true;
}
}
false
}
/// Generate flake8-annotation checks for a given `Definition`.
/// ANN001, ANN401
pub(crate) fn definition(
checker: &Checker,
definition: &Definition,
visibility: visibility::Visibility,
) {
let Some(function) = definition.as_function_def() else {
return;
};
let ast::StmtFunctionDef {
range: _,
node_index: _,
is_async: _,
decorator_list,
name,
type_params: _,
parameters,
returns,
body,
} = function;
let is_method = definition.is_method();
// Keep track of whether we've seen any typed arguments or return values.
let mut has_any_typed_arg = false; // Any argument has been typed?
let mut has_typed_return = false; // Return value has been typed?
// Temporary storage for diagnostics; we emit them at the end
// unless configured to suppress ANN* for declarations that are fully untyped.
let mut diagnostics = Vec::new();
let is_overridden = visibility::is_override(decorator_list, checker.semantic());
// If this is a non-static method, skip `cls` or `self`.
for parameter in parameters.iter_non_variadic_params().skip(usize::from(
is_method && !visibility::is_staticmethod(decorator_list, checker.semantic()),
)) {
// ANN401 for dynamically typed parameters
if let Some(annotation) = parameter.annotation() {
has_any_typed_arg = true;
if checker.is_rule_enabled(Rule::AnyType) && !is_overridden {
check_dynamically_typed(
checker,
annotation,
|| parameter.name().to_string(),
&mut diagnostics,
);
}
} else {
if !(checker.settings().flake8_annotations.suppress_dummy_args
&& checker
.settings()
.dummy_variable_rgx
.is_match(parameter.name()))
{
if checker.is_rule_enabled(Rule::MissingTypeFunctionArgument) {
diagnostics.push(checker.report_diagnostic(
MissingTypeFunctionArgument {
name: parameter.name().to_string(),
},
parameter.parameter.range(),
));
}
}
}
}
// ANN002, ANN401
if let Some(arg) = ¶meters.vararg {
if let Some(expr) = &arg.annotation {
has_any_typed_arg = true;
if !checker.settings().flake8_annotations.allow_star_arg_any {
if checker.is_rule_enabled(Rule::AnyType) && !is_overridden {
let name = &arg.name;
check_dynamically_typed(checker, expr, || format!("*{name}"), &mut diagnostics);
}
}
} else {
if !(checker.settings().flake8_annotations.suppress_dummy_args
&& checker.settings().dummy_variable_rgx.is_match(&arg.name))
{
if checker.is_rule_enabled(Rule::MissingTypeArgs) {
diagnostics.push(checker.report_diagnostic(
MissingTypeArgs {
name: arg.name.to_string(),
},
arg.range(),
));
}
}
}
}
// ANN003, ANN401
if let Some(arg) = ¶meters.kwarg {
if let Some(expr) = &arg.annotation {
has_any_typed_arg = true;
if !checker.settings().flake8_annotations.allow_star_arg_any {
if checker.is_rule_enabled(Rule::AnyType) && !is_overridden {
let name = &arg.name;
check_dynamically_typed(
checker,
expr,
|| format!("**{name}"),
&mut diagnostics,
);
}
}
} else {
if !(checker.settings().flake8_annotations.suppress_dummy_args
&& checker.settings().dummy_variable_rgx.is_match(&arg.name))
{
if checker.is_rule_enabled(Rule::MissingTypeKwargs) {
diagnostics.push(checker.report_diagnostic(
MissingTypeKwargs {
name: arg.name.to_string(),
},
arg.range(),
));
}
}
}
}
// ANN201, ANN202, ANN401
if let Some(expr) = &returns {
has_typed_return = true;
if checker.is_rule_enabled(Rule::AnyType) && !is_overridden {
check_dynamically_typed(checker, expr, || name.to_string(), &mut diagnostics);
}
} else if !(
// Allow omission of return annotation if the function only returns `None`
// (explicitly or implicitly).
checker
.settings()
.flake8_annotations
.suppress_none_returning
&& is_none_returning(body)
) {
// ANN206
if is_method && visibility::is_classmethod(decorator_list, checker.semantic()) {
if checker.is_rule_enabled(Rule::MissingReturnTypeClassMethod) {
let return_type = if is_stub_function(function, checker) {
None
} else {
auto_return_type(function)
.and_then(|return_type| {
return_type.into_expression(checker, function.parameters.start())
})
.map(|(return_type, edits)| (checker.generator().expr(&return_type), edits))
};
let mut diagnostic = checker.report_diagnostic(
MissingReturnTypeClassMethod {
name: name.to_string(),
annotation: return_type.clone().map(|(return_type, ..)| return_type),
},
function.identifier(),
);
if let Some((return_type, edits)) = return_type {
diagnostic.set_fix(Fix::unsafe_edits(
Edit::insertion(format!(" -> {return_type}"), function.parameters.end()),
edits,
));
}
diagnostics.push(diagnostic);
}
} else if is_method && visibility::is_staticmethod(decorator_list, checker.semantic()) {
// ANN205
if checker.is_rule_enabled(Rule::MissingReturnTypeStaticMethod) {
let return_type = if is_stub_function(function, checker) {
None
} else {
auto_return_type(function)
.and_then(|return_type| {
return_type.into_expression(checker, function.parameters.start())
})
.map(|(return_type, edits)| (checker.generator().expr(&return_type), edits))
};
let mut diagnostic = checker.report_diagnostic(
MissingReturnTypeStaticMethod {
name: name.to_string(),
annotation: return_type.clone().map(|(return_type, ..)| return_type),
},
function.identifier(),
);
if let Some((return_type, edits)) = return_type {
diagnostic.set_fix(Fix::unsafe_edits(
Edit::insertion(format!(" -> {return_type}"), function.parameters.end()),
edits,
));
}
diagnostics.push(diagnostic);
}
} else if is_method && visibility::is_init(name) {
// ANN204
// Allow omission of return annotation in `__init__` functions, as long as at
// least one argument is typed.
if checker.is_rule_enabled(Rule::MissingReturnTypeSpecialMethod) {
if !(checker.settings().flake8_annotations.mypy_init_return && has_any_typed_arg) {
let mut diagnostic = checker.report_diagnostic(
MissingReturnTypeSpecialMethod {
name: name.to_string(),
annotation: Some("None".to_string()),
},
function.identifier(),
);
diagnostic.set_fix(Fix::unsafe_edit(Edit::insertion(
" -> None".to_string(),
function.parameters.end(),
)));
diagnostics.push(diagnostic);
}
}
} else if is_method && visibility::is_magic(name) {
if checker.is_rule_enabled(Rule::MissingReturnTypeSpecialMethod) {
let return_type = simple_magic_return_type(name);
let mut diagnostic = checker.report_diagnostic(
MissingReturnTypeSpecialMethod {
name: name.to_string(),
annotation: return_type.map(ToString::to_string),
},
function.identifier(),
);
if let Some(return_type_str) = return_type {
// Convert the simple return type to a proper expression that handles shadowed builtins
let python_type = match return_type_str {
"str" => PythonType::String,
"bytes" => PythonType::Bytes,
"int" => PythonType::Number(NumberLike::Integer),
"float" => PythonType::Number(NumberLike::Float),
"complex" => PythonType::Number(NumberLike::Complex),
"bool" => PythonType::Number(NumberLike::Bool),
"None" => PythonType::None,
_ => return, // Unknown type, skip
};
if let Some((expr, edits)) =
type_expr(python_type, checker, function.parameters.start())
{
let return_type_expr = checker.generator().expr(&expr);
diagnostic.set_fix(Fix::unsafe_edits(
Edit::insertion(
format!(" -> {return_type_expr}"),
function.parameters.end(),
),
edits,
));
}
}
diagnostics.push(diagnostic);
}
} else {
match visibility {
visibility::Visibility::Public => {
if checker.is_rule_enabled(Rule::MissingReturnTypeUndocumentedPublicFunction) {
let return_type = if is_stub_function(function, checker) {
None
} else {
auto_return_type(function)
.and_then(|return_type| {
return_type
.into_expression(checker, function.parameters.start())
})
.map(|(return_type, edits)| {
(checker.generator().expr(&return_type), edits)
})
};
let mut diagnostic = checker.report_diagnostic(
MissingReturnTypeUndocumentedPublicFunction {
name: name.to_string(),
annotation: return_type
.clone()
.map(|(return_type, ..)| return_type),
},
function.identifier(),
);
if let Some((return_type, edits)) = return_type {
diagnostic.set_fix(Fix::unsafe_edits(
Edit::insertion(
format!(" -> {return_type}"),
function.parameters.end(),
),
edits,
));
}
diagnostics.push(diagnostic);
}
}
visibility::Visibility::Private => {
if checker.is_rule_enabled(Rule::MissingReturnTypePrivateFunction) {
let return_type = if is_stub_function(function, checker) {
None
} else {
auto_return_type(function)
.and_then(|return_type| {
return_type
.into_expression(checker, function.parameters.start())
})
.map(|(return_type, edits)| {
(checker.generator().expr(&return_type), edits)
})
};
let mut diagnostic = checker.report_diagnostic(
MissingReturnTypePrivateFunction {
name: name.to_string(),
annotation: return_type
.clone()
.map(|(return_type, ..)| return_type),
},
function.identifier(),
);
if let Some((return_type, edits)) = return_type {
diagnostic.set_fix(Fix::unsafe_edits(
Edit::insertion(
format!(" -> {return_type}"),
function.parameters.end(),
),
edits,
));
}
diagnostics.push(diagnostic);
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | true |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/flynt/helpers.rs | crates/ruff_linter/src/rules/flynt/helpers.rs | use ruff_python_ast::{self as ast, Arguments, ConversionFlag, Expr};
use ruff_text_size::TextRange;
/// Wrap an expression in a [`ast::FStringElement::Expression`] with no special formatting.
fn to_interpolated_string_interpolation_element(inner: &Expr) -> ast::InterpolatedStringElement {
ast::InterpolatedStringElement::Interpolation(ast::InterpolatedElement {
expression: Box::new(inner.clone()),
debug_text: None,
conversion: ConversionFlag::None,
format_spec: None,
range: TextRange::default(),
node_index: ruff_python_ast::AtomicNodeIndex::NONE,
})
}
/// Convert a string to a [`ast::InterpolatedStringLiteralElement `].
pub(super) fn to_interpolated_string_literal_element(s: &str) -> ast::InterpolatedStringElement {
ast::InterpolatedStringElement::Literal(ast::InterpolatedStringLiteralElement {
value: Box::from(s),
range: TextRange::default(),
node_index: ruff_python_ast::AtomicNodeIndex::NONE,
})
}
/// Figure out if `expr` represents a "simple" call
/// (i.e. one that can be safely converted to a formatted value).
fn is_simple_call(expr: &Expr) -> bool {
match expr {
Expr::Call(ast::ExprCall {
func,
arguments:
Arguments {
args,
keywords,
range: _,
node_index: _,
},
range: _,
node_index: _,
}) => args.is_empty() && keywords.is_empty() && is_simple_callee(func),
_ => false,
}
}
/// Figure out if `func` represents a "simple" callee (a bare name, or a chain of simple
/// attribute accesses).
fn is_simple_callee(func: &Expr) -> bool {
match func {
Expr::Name(_) => true,
Expr::Attribute(ast::ExprAttribute { value, .. }) => is_simple_callee(value),
_ => false,
}
}
/// Convert an expression to an f-string or t-string element (if it looks like a good idea).
pub(super) fn to_interpolated_string_element(
expr: &Expr,
) -> Option<ast::InterpolatedStringElement> {
match expr {
Expr::StringLiteral(ast::ExprStringLiteral {
value,
range,
node_index,
}) => Some(ast::InterpolatedStringElement::Literal(
ast::InterpolatedStringLiteralElement {
value: value.to_string().into_boxed_str(),
range: *range,
node_index: node_index.clone(),
},
)),
// These should be pretty safe to wrap in a formatted value.
Expr::NumberLiteral(_) | Expr::BooleanLiteral(_) | Expr::Name(_) | Expr::Attribute(_) => {
Some(to_interpolated_string_interpolation_element(expr))
}
Expr::Call(_) if is_simple_call(expr) => {
Some(to_interpolated_string_interpolation_element(expr))
}
_ => None,
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/flynt/mod.rs | crates/ruff_linter/src/rules/flynt/mod.rs | //! Rules from [flynt](https://pypi.org/project/flynt/).
mod helpers;
pub(crate) mod rules;
#[cfg(test)]
mod tests {
use std::path::Path;
use anyhow::Result;
use test_case::test_case;
use crate::registry::Rule;
use crate::test::test_path;
use crate::{assert_diagnostics, settings};
#[test_case(Rule::StaticJoinToFString, Path::new("FLY002.py"))]
fn rules(rule_code: Rule, path: &Path) -> Result<()> {
let snapshot = format!("{}_{}", rule_code.noqa_code(), path.to_string_lossy());
let diagnostics = test_path(
Path::new("flynt").join(path).as_path(),
&settings::LinterSettings::for_rule(rule_code),
)?;
assert_diagnostics!(snapshot, diagnostics);
Ok(())
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/flynt/rules/static_join_to_fstring.rs | crates/ruff_linter/src/rules/flynt/rules/static_join_to_fstring.rs | use ast::FStringFlags;
use itertools::Itertools;
use ruff_macros::{ViolationMetadata, derive_message_formats};
use ruff_python_ast::{self as ast, Arguments, Expr, StringFlags, str::Quote};
use ruff_text_size::{Ranged, TextRange};
use crate::checkers::ast::Checker;
use crate::fix::edits::pad;
use crate::fix::snippet::SourceCodeSnippet;
use crate::{AlwaysFixableViolation, Edit, Fix};
use crate::rules::flynt::helpers;
/// ## What it does
/// Checks for `str.join` calls that can be replaced with f-strings.
///
/// ## Why is this bad?
/// f-strings are more readable and generally preferred over `str.join` calls.
///
/// ## Example
/// ```python
/// " ".join((foo, bar))
/// ```
///
/// Use instead:
/// ```python
/// f"{foo} {bar}"
/// ```
///
/// ## Fix safety
/// The fix is always marked unsafe because the evaluation of the f-string
/// expressions will default to calling the `__format__` method of each
/// object, whereas `str.join` expects each object to be an instance of
/// `str` and uses the corresponding string. Therefore it is possible for
/// the values of the resulting strings to differ, or for one expression
/// to raise an exception while the other does not.
///
/// ## References
/// - [Python documentation: f-strings](https://docs.python.org/3/reference/lexical_analysis.html#f-strings)
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "v0.0.266")]
pub(crate) struct StaticJoinToFString {
expression: SourceCodeSnippet,
}
impl AlwaysFixableViolation for StaticJoinToFString {
#[derive_message_formats]
fn message(&self) -> String {
let StaticJoinToFString { expression } = self;
if let Some(expression) = expression.full_display() {
format!("Consider `{expression}` instead of string join")
} else {
"Consider f-string instead of string join".to_string()
}
}
fn fix_title(&self) -> String {
let StaticJoinToFString { expression } = self;
if let Some(expression) = expression.full_display() {
format!("Replace with `{expression}`")
} else {
"Replace with f-string".to_string()
}
}
}
fn is_static_length(elts: &[Expr]) -> bool {
elts.iter().all(|e| !e.is_starred_expr())
}
/// Build an f-string consisting of `joinees` joined by `joiner` with `flags`.
fn build_fstring(joiner: &str, joinees: &[Expr], flags: FStringFlags) -> Option<Expr> {
// If all elements are string constants, join them into a single string.
if joinees.iter().all(Expr::is_string_literal_expr) {
let mut flags: Option<ast::StringLiteralFlags> = None;
let content = joinees
.iter()
.filter_map(|expr| {
if let Expr::StringLiteral(ast::ExprStringLiteral { value, .. }) = expr {
if flags.is_none() {
// Take the flags from the first Expr
flags = Some(value.first_literal_flags());
}
Some(value.to_str())
} else {
None
}
})
.join(joiner);
let mut flags = flags?;
// If the result is a raw string and contains a newline, use triple quotes.
if flags.prefix().is_raw() && content.contains(['\n', '\r']) {
flags = flags.with_triple_quotes(ruff_python_ast::str::TripleQuotes::Yes);
// Prefer a delimiter that doesn't occur in the content; if both occur, bail.
if content.contains(flags.quote_str()) {
flags = flags.with_quote_style(flags.quote_style().opposite());
if content.contains(flags.quote_str()) {
// Both "'''" and "\"\"\"" are present in content; avoid emitting
// an invalid raw triple-quoted literal (or escaping). Bail on the fix.
return None;
}
}
}
let node = ast::StringLiteral {
value: content.into_boxed_str(),
flags,
range: TextRange::default(),
node_index: ruff_python_ast::AtomicNodeIndex::NONE,
};
return Some(node.into());
}
let mut f_string_elements = Vec::with_capacity(joinees.len() * 2);
let mut has_single_quote = joiner.contains('\'');
let mut has_double_quote = joiner.contains('"');
let mut first = true;
for expr in joinees {
if expr.is_f_string_expr() {
// Oops, already an f-string. We don't know how to handle those
// gracefully right now.
return None;
}
if !std::mem::take(&mut first) {
f_string_elements.push(helpers::to_interpolated_string_literal_element(joiner));
}
let element = helpers::to_interpolated_string_element(expr)?;
if let ast::InterpolatedStringElement::Literal(ast::InterpolatedStringLiteralElement {
value,
..
}) = &element
{
has_single_quote |= value.contains('\'');
has_double_quote |= value.contains('"');
}
f_string_elements.push(element);
}
let quote = flags.quote_style();
let adjusted_quote = match quote {
Quote::Single if has_single_quote && !has_double_quote => quote.opposite(),
Quote::Double if has_double_quote && !has_single_quote => quote.opposite(),
_ if has_double_quote && has_single_quote => return None,
_ => quote,
};
let node = ast::FString {
elements: f_string_elements.into(),
range: TextRange::default(),
node_index: ruff_python_ast::AtomicNodeIndex::NONE,
flags: flags.with_quote_style(adjusted_quote),
};
Some(node.into())
}
/// FLY002
pub(crate) fn static_join_to_fstring(checker: &Checker, expr: &Expr, joiner: &str) {
let Expr::Call(ast::ExprCall {
arguments: Arguments { args, keywords, .. },
..
}) = expr
else {
return;
};
// If there are kwargs or more than one argument, this is some non-standard
// string join call.
if !keywords.is_empty() {
return;
}
let [arg] = &**args else {
return;
};
// Get the elements to join; skip (e.g.) generators, sets, etc.
let joinees = match &arg {
Expr::List(ast::ExprList { elts, .. }) if is_static_length(elts) => elts,
Expr::Tuple(ast::ExprTuple { elts, .. }) if is_static_length(elts) => elts,
_ => return,
};
// Try to build the fstring (internally checks whether e.g. the elements are
// convertible to f-string elements).
let Some(new_expr) = build_fstring(joiner, joinees, checker.default_fstring_flags()) else {
return;
};
let contents = checker.generator().expr(&new_expr);
let mut diagnostic = checker.report_diagnostic(
StaticJoinToFString {
expression: SourceCodeSnippet::new(contents.clone()),
},
expr.range(),
);
diagnostic.set_fix(Fix::unsafe_edit(Edit::range_replacement(
pad(contents, expr.range(), checker.locator()),
expr.range(),
)));
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/flynt/rules/mod.rs | crates/ruff_linter/src/rules/flynt/rules/mod.rs | pub(crate) use static_join_to_fstring::*;
mod static_join_to_fstring;
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/flake8_no_pep420/mod.rs | crates/ruff_linter/src/rules/flake8_no_pep420/mod.rs | //! Rules from [flake8-no-pep420](https://pypi.org/project/flake8-no-pep420/).
pub(crate) mod rules;
#[cfg(test)]
mod tests {
use std::path::{Path, PathBuf};
use anyhow::Result;
use test_case::test_case;
use crate::registry::Rule;
use crate::assert_diagnostics;
use crate::settings::LinterSettings;
use crate::test::{test_path, test_resource_path};
#[test_case(Path::new("test_fail_empty"), Path::new("example.py"))]
#[test_case(Path::new("test_fail_nonempty"), Path::new("example.py"))]
#[test_case(Path::new("test_ignored"), Path::new("example.py"))]
#[test_case(Path::new("test_pass_init"), Path::new("example.py"))]
#[test_case(Path::new("test_pass_namespace_package"), Path::new("example.py"))]
#[test_case(Path::new("test_pass_pep723"), Path::new("script.py"))]
#[test_case(Path::new("test_pass_pyi"), Path::new("example.pyi"))]
#[test_case(Path::new("test_pass_script"), Path::new("script"))]
#[test_case(Path::new("test_pass_shebang"), Path::new("example.py"))]
fn default(path: &Path, filename: &Path) -> Result<()> {
let snapshot = format!("{}", path.to_string_lossy());
let p = PathBuf::from(format!(
"flake8_no_pep420/{}/{}",
path.display(),
filename.display()
));
let diagnostics = test_path(
p.as_path(),
&LinterSettings {
namespace_packages: vec![test_resource_path(
"fixtures/flake8_no_pep420/test_pass_namespace_package",
)],
..LinterSettings::for_rule(Rule::ImplicitNamespacePackage)
},
)?;
insta::with_settings!({filters => vec![(r"\\", "/")]}, {
assert_diagnostics!(snapshot, diagnostics);
});
Ok(())
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/flake8_no_pep420/rules/mod.rs | crates/ruff_linter/src/rules/flake8_no_pep420/rules/mod.rs | pub(crate) use implicit_namespace_package::*;
mod implicit_namespace_package;
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/flake8_no_pep420/rules/implicit_namespace_package.rs | crates/ruff_linter/src/rules/flake8_no_pep420/rules/implicit_namespace_package.rs | use std::path::{Path, PathBuf};
use ruff_macros::{ViolationMetadata, derive_message_formats};
use ruff_python_ast::PySourceType;
use ruff_python_ast::script::ScriptTag;
use ruff_python_trivia::CommentRanges;
use ruff_text_size::{TextRange, TextSize};
use crate::Locator;
use crate::Violation;
use crate::checkers::ast::LintContext;
use crate::comments::shebang::ShebangDirective;
use crate::fs;
use crate::package::PackageRoot;
/// ## What it does
/// Checks for packages that are missing an `__init__.py` file.
///
/// ## Why is this bad?
/// Python packages are directories that contain a file named `__init__.py`.
/// The existence of this file indicates that the directory is a Python
/// package, and so it can be imported the same way a module can be
/// imported.
///
/// Directories that lack an `__init__.py` file can still be imported, but
/// they're indicative of a special kind of package, known as a "namespace
/// package" (see: [PEP 420](https://peps.python.org/pep-0420/)).
/// Namespace packages are less widely used, so a package that lacks an
/// `__init__.py` file is typically meant to be a regular package, and
/// the absence of the `__init__.py` file is probably an oversight.
///
/// ## Options
/// - `namespace-packages`
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "v0.0.225")]
pub(crate) struct ImplicitNamespacePackage {
filename: String,
parent: Option<String>,
}
impl Violation for ImplicitNamespacePackage {
#[derive_message_formats]
fn message(&self) -> String {
let ImplicitNamespacePackage { filename, parent } = self;
match parent {
None => {
format!(
"File `{filename}` is part of an implicit namespace package. Add an `__init__.py`."
)
}
Some(parent) => {
format!(
"File `{filename}` declares a package, but is nested under an implicit namespace package. Add an `__init__.py` to `{parent}`."
)
}
}
}
}
/// INP001
#[expect(clippy::too_many_arguments)]
pub(crate) fn implicit_namespace_package(
path: &Path,
package: Option<PackageRoot<'_>>,
locator: &Locator,
comment_ranges: &CommentRanges,
project_root: &Path,
src: &[PathBuf],
allow_nested_roots: bool,
context: &LintContext,
) {
if package.is_none()
// Ignore non-`.py` files, which don't require an `__init__.py`.
&& PySourceType::try_from_path(path).is_some_and(PySourceType::is_py_file)
// Ignore `.pyw` files that are also PySourceType::Python but aren't importable namespaces
&& path.extension().is_some_and(|ext| ext == "py")
// Ignore any files that are direct children of the project root.
&& path
.parent()
.is_none_or( |parent| parent != project_root)
// Ignore any files that are direct children of a source directory (e.g., `src/manage.py`).
&& !path
.parent()
.is_some_and( |parent| src.iter().any(|src| src == parent))
// Ignore files that contain a shebang.
&& comment_ranges
.first().filter(|range| range.start() == TextSize::from(0))
.is_none_or(|range| ShebangDirective::try_extract(locator.slice(*range)).is_none())
// Ignore PEP 723 scripts.
&& ScriptTag::parse(locator.contents().as_bytes()).is_none()
{
context.report_diagnostic(
ImplicitNamespacePackage {
filename: fs::relativize_path(path),
parent: None,
},
TextRange::default(),
);
} else if allow_nested_roots {
if let Some(PackageRoot::Nested { path: root }) = package.as_ref() {
if path.ends_with("__init__.py") {
// Identify the intermediary package that's missing the `__init__.py` file.
if let Some(parent) = root
.ancestors()
.find(|parent| !parent.join("__init__.py").exists())
{
context.report_diagnostic(
ImplicitNamespacePackage {
filename: fs::relativize_path(path),
parent: Some(fs::relativize_path(parent)),
},
TextRange::default(),
);
}
}
}
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/airflow/helpers.rs | crates/ruff_linter/src/rules/airflow/helpers.rs | use crate::checkers::ast::Checker;
use crate::fix::edits::remove_unused_imports;
use crate::importer::ImportRequest;
use crate::rules::numpy::helpers::{AttributeSearcher, ImportSearcher};
use ruff_diagnostics::{Edit, Fix};
use ruff_python_ast::name::QualifiedNameBuilder;
use ruff_python_ast::statement_visitor::StatementVisitor;
use ruff_python_ast::visitor::Visitor;
use ruff_python_ast::{Expr, ExprAttribute, ExprName, StmtTry};
use ruff_python_semantic::Exceptions;
use ruff_python_semantic::SemanticModel;
use ruff_python_semantic::{MemberNameImport, NameImport};
use ruff_text_size::Ranged;
use ruff_text_size::TextRange;
#[derive(Clone, Debug, Eq, PartialEq)]
pub(crate) enum Replacement {
// There's no replacement or suggestion other than removal
None,
// Additional information. Used when there's no direct maaping replacement.
Message(&'static str),
// The attribute name of a class has been changed.
AttrName(&'static str),
// Symbols updated in Airflow 3 with replacement
// e.g., `airflow.datasets.Dataset` to `airflow.sdk.Asset`
Rename {
module: &'static str,
name: &'static str,
},
// Symbols updated in Airflow 3 with only module changed. Used when we want to match multiple names.
// e.g., `airflow.configuration.as_dict | get` to `airflow.configuration.conf.as_dict | get`
SourceModuleMoved {
module: &'static str,
name: String,
},
}
#[derive(Clone, Debug, Eq, PartialEq)]
pub(crate) enum ProviderReplacement {
Rename {
module: &'static str,
name: &'static str,
provider: &'static str,
version: &'static str,
},
SourceModuleMovedToProvider {
module: &'static str,
name: String,
provider: &'static str,
version: &'static str,
},
}
pub(crate) fn is_guarded_by_try_except(
expr: &Expr,
module: &str,
name: &str,
semantic: &SemanticModel,
) -> bool {
match expr {
Expr::Attribute(_) => {
if !semantic.in_exception_handler() {
return false;
}
let Some(try_node) = semantic
.current_statements()
.find_map(|stmt| stmt.as_try_stmt())
else {
return false;
};
let suspended_exceptions = Exceptions::from_try_stmt(try_node, semantic);
if !suspended_exceptions.contains(Exceptions::ATTRIBUTE_ERROR) {
return false;
}
try_block_contains_undeprecated_attribute(try_node, module, name, semantic)
}
Expr::Name(ExprName { id, .. }) => {
let Some(binding_id) = semantic.lookup_symbol(id.as_str()) else {
return false;
};
let binding = semantic.binding(binding_id);
if !binding.is_external() {
return false;
}
if !binding.in_exception_handler() {
return false;
}
let Some(try_node) = binding.source.and_then(|import_id| {
semantic
.statements(import_id)
.find_map(|stmt| stmt.as_try_stmt())
}) else {
return false;
};
let suspended_exceptions = Exceptions::from_try_stmt(try_node, semantic);
if !suspended_exceptions
.intersects(Exceptions::IMPORT_ERROR | Exceptions::MODULE_NOT_FOUND_ERROR)
{
return false;
}
try_block_contains_undeprecated_import(try_node, module, name)
}
_ => false,
}
}
/// Given an [`ast::StmtTry`] node, does the `try` branch of that node
/// contain any [`ast::ExprAttribute`] nodes that indicate the airflow
/// member is being accessed from the non-deprecated location?
fn try_block_contains_undeprecated_attribute(
try_node: &StmtTry,
module: &str,
name: &str,
semantic: &SemanticModel,
) -> bool {
let undeprecated_qualified_name = {
let mut builder = QualifiedNameBuilder::default();
for part in module.split('.') {
builder.push(part);
}
builder.push(name);
builder.build()
};
let mut attribute_searcher = AttributeSearcher::new(undeprecated_qualified_name, semantic);
attribute_searcher.visit_body(&try_node.body);
attribute_searcher.found_attribute
}
/// Given an [`ast::StmtTry`] node, does the `try` branch of that node
/// contain any [`ast::StmtImportFrom`] nodes that indicate the airflow
/// member is being imported from the non-deprecated location?
fn try_block_contains_undeprecated_import(try_node: &StmtTry, module: &str, name: &str) -> bool {
let mut import_searcher = ImportSearcher::new(module, name);
import_searcher.visit_body(&try_node.body);
import_searcher.found_import
}
/// Check whether the segments corresponding to the fully qualified name points to a symbol that's
/// either a builtin or coming from one of the providers in Airflow.
///
/// The pattern it looks for are:
/// - `airflow.providers.**.<module>.**.*<symbol_suffix>` for providers
/// - `airflow.<module>.**.*<symbol_suffix>` for builtins
///
/// where `**` is one or more segments separated by a dot, and `*` is one or more characters.
///
/// Examples for the above patterns:
/// - `airflow.providers.google.cloud.secrets.secret_manager.CloudSecretManagerBackend` (provider)
/// - `airflow.secrets.base_secrets.BaseSecretsBackend` (builtin)
pub(crate) fn is_airflow_builtin_or_provider(
segments: &[&str],
module: &str,
symbol_suffix: &str,
) -> bool {
match segments {
["airflow", "providers", rest @ ..] => {
if let (Some(pos), Some(last_element)) =
(rest.iter().position(|&s| s == module), rest.last())
{
// Check that the module is not the last element i.e., there's a symbol that's
// being used from the `module` that ends with `symbol_suffix`.
pos + 1 < rest.len() && last_element.ends_with(symbol_suffix)
} else {
false
}
}
["airflow", first, rest @ ..] => {
if let Some(last) = rest.last() {
*first == module && last.ends_with(symbol_suffix)
} else {
false
}
}
_ => false,
}
}
/// Return the [`ast::ExprName`] at the head of the expression, if any.
pub(crate) fn match_head(value: &Expr) -> Option<&ExprName> {
match value {
Expr::Attribute(ExprAttribute { value, .. }) => value.as_name_expr(),
Expr::Name(name) => Some(name),
_ => None,
}
}
/// Return the [`Fix`] that imports the new name and updates where the import is referenced.
/// This is used for cases that member name has changed.
/// (e.g., `airflow.datasets.Dataset` to `airflow.sdk.Asset`)
pub(crate) fn generate_import_edit(
expr: &Expr,
checker: &Checker,
module: &str,
name: &str,
ranged: TextRange,
) -> Option<Fix> {
let (import_edit, _) = checker
.importer()
.get_or_import_symbol(
&ImportRequest::import_from(module, name),
expr.start(),
checker.semantic(),
)
.ok()?;
let replacement_edit = Edit::range_replacement(name.to_string(), ranged.range());
Some(Fix::safe_edits(import_edit, [replacement_edit]))
}
/// Return the [`Fix`] that remove the original import and import the same name with new path.
/// This is used for cases that member name has not changed.
/// (e.g., `airflow.operators.pig_operator.PigOperator` to `airflow.providers.apache.pig.hooks.pig.PigCliHook`)
pub(crate) fn generate_remove_and_runtime_import_edit(
expr: &Expr,
checker: &Checker,
module: &str,
name: &str,
) -> Option<Fix> {
let head = match_head(expr)?;
let semantic = checker.semantic();
let binding = semantic
.resolve_name(head)
.or_else(|| checker.semantic().lookup_symbol(&head.id))
.map(|id| checker.semantic().binding(id))?;
let stmt = binding.statement(semantic)?;
let remove_edit = remove_unused_imports(
std::iter::once(name),
stmt,
None,
checker.locator(),
checker.stylist(),
checker.indexer(),
)
.ok()?;
let import_edit = checker.importer().add_import(
&NameImport::ImportFrom(MemberNameImport::member(
(*module).to_string(),
name.to_string(),
)),
expr.start(),
);
Some(Fix::unsafe_edits(remove_edit, [import_edit]))
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/airflow/mod.rs | crates/ruff_linter/src/rules/airflow/mod.rs | //! Airflow-specific rules.
pub(crate) mod helpers;
pub(crate) mod rules;
#[cfg(test)]
mod tests {
use std::path::Path;
use anyhow::Result;
use test_case::test_case;
use crate::registry::Rule;
use crate::test::test_path;
use crate::{assert_diagnostics, settings};
#[test_case(Rule::AirflowVariableNameTaskIdMismatch, Path::new("AIR001.py"))]
#[test_case(Rule::AirflowDagNoScheduleArgument, Path::new("AIR002.py"))]
#[test_case(Rule::Airflow3Removal, Path::new("AIR301_args.py"))]
#[test_case(Rule::Airflow3Removal, Path::new("AIR301_names.py"))]
#[test_case(Rule::Airflow3Removal, Path::new("AIR301_names_fix.py"))]
#[test_case(Rule::Airflow3Removal, Path::new("AIR301_provider_names_fix.py"))]
#[test_case(Rule::Airflow3Removal, Path::new("AIR301_names_try.py"))]
#[test_case(Rule::Airflow3Removal, Path::new("AIR301_class_attribute.py"))]
#[test_case(Rule::Airflow3Removal, Path::new("AIR301_airflow_plugin.py"))]
#[test_case(Rule::Airflow3Removal, Path::new("AIR301_context.py"))]
#[test_case(Rule::Airflow3MovedToProvider, Path::new("AIR302_amazon.py"))]
#[test_case(Rule::Airflow3MovedToProvider, Path::new("AIR302_celery.py"))]
#[test_case(Rule::Airflow3MovedToProvider, Path::new("AIR302_common_sql.py"))]
#[test_case(Rule::Airflow3MovedToProvider, Path::new("AIR302_daskexecutor.py"))]
#[test_case(Rule::Airflow3MovedToProvider, Path::new("AIR302_druid.py"))]
#[test_case(Rule::Airflow3MovedToProvider, Path::new("AIR302_fab.py"))]
#[test_case(Rule::Airflow3MovedToProvider, Path::new("AIR302_hdfs.py"))]
#[test_case(Rule::Airflow3MovedToProvider, Path::new("AIR302_hive.py"))]
#[test_case(Rule::Airflow3MovedToProvider, Path::new("AIR302_http.py"))]
#[test_case(Rule::Airflow3MovedToProvider, Path::new("AIR302_jdbc.py"))]
#[test_case(Rule::Airflow3MovedToProvider, Path::new("AIR302_kubernetes.py"))]
#[test_case(Rule::Airflow3MovedToProvider, Path::new("AIR302_mysql.py"))]
#[test_case(Rule::Airflow3MovedToProvider, Path::new("AIR302_oracle.py"))]
#[test_case(Rule::Airflow3MovedToProvider, Path::new("AIR302_papermill.py"))]
#[test_case(Rule::Airflow3MovedToProvider, Path::new("AIR302_pig.py"))]
#[test_case(Rule::Airflow3MovedToProvider, Path::new("AIR302_postgres.py"))]
#[test_case(Rule::Airflow3MovedToProvider, Path::new("AIR302_presto.py"))]
#[test_case(Rule::Airflow3MovedToProvider, Path::new("AIR302_samba.py"))]
#[test_case(Rule::Airflow3MovedToProvider, Path::new("AIR302_slack.py"))]
#[test_case(Rule::Airflow3MovedToProvider, Path::new("AIR302_smtp.py"))]
#[test_case(Rule::Airflow3MovedToProvider, Path::new("AIR302_sqlite.py"))]
#[test_case(Rule::Airflow3MovedToProvider, Path::new("AIR302_zendesk.py"))]
#[test_case(Rule::Airflow3MovedToProvider, Path::new("AIR302_standard.py"))]
#[test_case(Rule::Airflow3MovedToProvider, Path::new("AIR302_try.py"))]
#[test_case(Rule::Airflow3IncompatibleFunctionSignature, Path::new("AIR303.py"))]
#[test_case(Rule::Airflow3SuggestedUpdate, Path::new("AIR311_args.py"))]
#[test_case(Rule::Airflow3SuggestedUpdate, Path::new("AIR311_names.py"))]
#[test_case(Rule::Airflow3SuggestedUpdate, Path::new("AIR311_try.py"))]
#[test_case(Rule::Airflow3SuggestedToMoveToProvider, Path::new("AIR312.py"))]
#[test_case(Rule::Airflow3SuggestedToMoveToProvider, Path::new("AIR312_try.py"))]
fn rules(rule_code: Rule, path: &Path) -> Result<()> {
let snapshot = format!("{}_{}", rule_code.noqa_code(), path.to_string_lossy());
let diagnostics = test_path(
Path::new("airflow").join(path).as_path(),
&settings::LinterSettings::for_rule(rule_code),
)?;
assert_diagnostics!(snapshot, diagnostics);
Ok(())
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/airflow/rules/suggested_to_move_to_provider_in_3.rs | crates/ruff_linter/src/rules/airflow/rules/suggested_to_move_to_provider_in_3.rs | use crate::checkers::ast::Checker;
use crate::rules::airflow::helpers::{
ProviderReplacement, generate_import_edit, generate_remove_and_runtime_import_edit,
is_guarded_by_try_except,
};
use crate::{FixAvailability, Violation};
use ruff_macros::{ViolationMetadata, derive_message_formats};
use ruff_python_ast::name::QualifiedName;
use ruff_python_ast::{Expr, ExprAttribute};
use ruff_python_semantic::Modules;
use ruff_text_size::Ranged;
use ruff_text_size::TextRange;
/// ## What it does
/// Checks for uses of Airflow functions and values that have been moved to its providers
/// but still have a compatibility layer (e.g., `apache-airflow-providers-standard`).
///
/// ## Why is this bad?
/// Airflow 3.0 moved various deprecated functions, members, and other
/// values to its providers. Even though these symbols still work fine on Airflow 3.0,
/// they are expected to be removed in a future version. The user is suggested to install
/// the corresponding provider and replace the original usage with the one in the provider.
///
/// ## Example
/// ```python
/// from airflow.operators.python import PythonOperator
///
///
/// def print_context(ds=None, **kwargs):
/// print(kwargs)
/// print(ds)
///
///
/// print_the_context = PythonOperator(
/// task_id="print_the_context", python_callable=print_context
/// )
/// ```
///
/// Use instead:
/// ```python
/// from airflow.providers.standard.operators.python import PythonOperator
///
///
/// def print_context(ds=None, **kwargs):
/// print(kwargs)
/// print(ds)
///
///
/// print_the_context = PythonOperator(
/// task_id="print_the_context", python_callable=print_context
/// )
/// ```
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "0.13.0")]
pub(crate) struct Airflow3SuggestedToMoveToProvider<'a> {
deprecated: QualifiedName<'a>,
replacement: ProviderReplacement,
}
impl Violation for Airflow3SuggestedToMoveToProvider<'_> {
const FIX_AVAILABILITY: FixAvailability = FixAvailability::Sometimes;
#[derive_message_formats]
fn message(&self) -> String {
let Airflow3SuggestedToMoveToProvider {
deprecated,
replacement,
} = self;
match replacement {
ProviderReplacement::Rename {
name: _,
module: _,
provider,
version: _,
}
| ProviderReplacement::SourceModuleMovedToProvider {
name: _,
module: _,
provider,
version: _,
} => {
format!(
"`{deprecated}` is deprecated and moved into `{provider}` provider in Airflow 3.0; \
It still works in Airflow 3.0 but is expected to be removed in a future version."
)
}
}
}
fn fix_title(&self) -> Option<String> {
let Airflow3SuggestedToMoveToProvider { replacement, .. } = self;
match replacement {
ProviderReplacement::Rename {
module,
name,
provider,
version,
} => Some(format!(
"Install `apache-airflow-providers-{provider}>={version}` and use `{name}` from `{module}` instead."
)),
ProviderReplacement::SourceModuleMovedToProvider {
module,
name,
provider,
version,
} => Some(format!(
"Install `apache-airflow-providers-{provider}>={version}` and use `{name}` from `{module}` instead."
)),
}
}
}
// AIR312
pub(crate) fn suggested_to_move_to_provider_in_3(checker: &Checker, expr: &Expr) {
if !checker.semantic().seen_module(Modules::AIRFLOW) {
return;
}
match expr {
Expr::Attribute(ExprAttribute { attr, .. }) => {
check_names_moved_to_provider(checker, expr, attr.range());
}
Expr::Name(_) => check_names_moved_to_provider(checker, expr, expr.range()),
_ => {}
}
}
fn check_names_moved_to_provider(checker: &Checker, expr: &Expr, ranged: TextRange) {
let Some(qualified_name) = checker.semantic().resolve_qualified_name(expr) else {
return;
};
let replacement = match qualified_name.segments() {
// apache-airflow-providers-standard
["airflow", "hooks", "filesystem", "FSHook"] => ProviderReplacement::Rename {
module: "airflow.providers.standard.hooks.filesystem",
name: "FSHook",
provider: "standard",
version: "0.0.1",
},
["airflow", "hooks", "package_index", "PackageIndexHook"] => ProviderReplacement::Rename {
module: "airflow.providers.standard.hooks.package_index",
name: "PackageIndexHook",
provider: "standard",
version: "0.0.1",
},
["airflow", "hooks", "subprocess", "SubprocessHook"] => ProviderReplacement::Rename {
module: "airflow.providers.standard.hooks.subprocess",
name: "SubprocessHook",
provider: "standard",
version: "0.0.3",
},
["airflow", "operators", "bash", "BashOperator"] => ProviderReplacement::Rename {
module: "airflow.providers.standard.operators.bash",
name: "BashOperator",
provider: "standard",
version: "0.0.1",
},
["airflow", "operators", "datetime", "BranchDateTimeOperator"] => {
ProviderReplacement::Rename {
module: "airflow.providers.standard.operators.datetime",
name: "BranchDateTimeOperator",
provider: "standard",
version: "0.0.1",
}
}
[
"airflow",
"operators",
"trigger_dagrun",
"TriggerDagRunOperator",
] => ProviderReplacement::Rename {
module: "airflow.providers.standard.operators.trigger_dagrun",
name: "TriggerDagRunOperator",
provider: "standard",
version: "0.0.2",
},
["airflow", "operators", "empty", "EmptyOperator"] => ProviderReplacement::Rename {
module: "airflow.providers.standard.operators.empty",
name: "EmptyOperator",
provider: "standard",
version: "0.0.2",
},
["airflow", "operators", "latest_only", "LatestOnlyOperator"] => {
ProviderReplacement::Rename {
module: "airflow.providers.standard.operators.latest_only",
name: "LatestOnlyOperator",
provider: "standard",
version: "0.0.3",
}
}
[
"airflow",
"operators",
"python",
rest @ ("BranchPythonOperator"
| "PythonOperator"
| "PythonVirtualenvOperator"
| "ShortCircuitOperator"),
] => ProviderReplacement::SourceModuleMovedToProvider {
name: (*rest).to_string(),
module: "airflow.providers.standard.operators.python",
provider: "standard",
version: "0.0.1",
},
["airflow", "operators", "weekday", "BranchDayOfWeekOperator"] => {
ProviderReplacement::Rename {
module: "airflow.providers.standard.operators.weekday",
name: "BranchDayOfWeekOperator",
provider: "standard",
version: "0.0.1",
}
}
["airflow", "sensors", "bash", "BashSensor"] => ProviderReplacement::Rename {
module: "airflow.providers.standard.sensor.bash",
name: "BashSensor",
provider: "standard",
version: "0.0.1",
},
[
"airflow",
"sensors",
"date_time",
rest @ ("DateTimeSensor" | "DateTimeSensorAsync"),
] => ProviderReplacement::SourceModuleMovedToProvider {
name: (*rest).to_string(),
module: "airflow.providers.standard.sensors.date_time",
provider: "standard",
version: "0.0.1",
},
[
"airflow",
"sensors",
"external_task",
rest @ ("ExternalTaskMarker" | "ExternalTaskSensor" | "ExternalTaskSensorLink"),
] => ProviderReplacement::SourceModuleMovedToProvider {
name: (*rest).to_string(),
module: "airflow.providers.standard.sensors.external_task",
provider: "standard",
version: "0.0.3",
},
["airflow", "sensors", "filesystem", "FileSensor"] => ProviderReplacement::Rename {
module: "airflow.providers.standard.sensors.filesystem",
name: "FileSensor",
provider: "standard",
version: "0.0.2",
},
["airflow", "sensors", "python", "PythonSensor"] => ProviderReplacement::Rename {
module: "airflow.providers.standard.sensors.python",
name: "PythonSensor",
provider: "standard",
version: "0.0.1",
},
[
"airflow",
"sensors",
"time_sensor",
rest @ ("TimeSensor" | "TimeSensorAsync"),
] => ProviderReplacement::SourceModuleMovedToProvider {
name: (*rest).to_string(),
module: "airflow.providers.standard.sensors.time",
provider: "standard",
version: "0.0.1",
},
[
"airflow",
"sensors",
"time_delta",
rest @ ("TimeDeltaSensor" | "TimeDeltaSensorAsync"),
] => ProviderReplacement::SourceModuleMovedToProvider {
name: (*rest).to_string(),
module: "airflow.providers.standard.sensors.time_delta",
provider: "standard",
version: "0.0.1",
},
["airflow", "sensors", "weekday", "DayOfWeekSensor"] => ProviderReplacement::Rename {
module: "airflow.providers.standard.sensors.weekday",
name: "DayOfWeekSensor",
provider: "standard",
version: "0.0.1",
},
[
"airflow",
"triggers",
"external_task",
rest @ ("DagStateTrigger" | "WorkflowTrigger"),
] => ProviderReplacement::SourceModuleMovedToProvider {
name: (*rest).to_string(),
module: "airflow.providers.standard.triggers.external_task",
provider: "standard",
version: "0.0.3",
},
["airflow", "triggers", "file", "FileTrigger"] => ProviderReplacement::Rename {
module: "airflow.providers.standard.triggers.file",
name: "FileTrigger",
provider: "standard",
version: "0.0.3",
},
[
"airflow",
"triggers",
"temporal",
rest @ ("DateTimeTrigger" | "TimeDeltaTrigger"),
] => ProviderReplacement::SourceModuleMovedToProvider {
name: (*rest).to_string(),
module: "airflow.providers.standard.triggers.temporal",
provider: "standard",
version: "0.0.3",
},
_ => return,
};
let (module, name) = match &replacement {
ProviderReplacement::Rename { module, name, .. } => (module, *name),
ProviderReplacement::SourceModuleMovedToProvider { module, name, .. } => {
(module, name.as_str())
}
};
if is_guarded_by_try_except(expr, module, name, checker.semantic()) {
return;
}
let mut diagnostic = checker.report_diagnostic(
Airflow3SuggestedToMoveToProvider {
deprecated: qualified_name,
replacement: replacement.clone(),
},
ranged,
);
if let Some(fix) = generate_import_edit(expr, checker, module, name, ranged)
.or_else(|| generate_remove_and_runtime_import_edit(expr, checker, module, name))
{
diagnostic.set_fix(fix);
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/airflow/rules/dag_schedule_argument.rs | crates/ruff_linter/src/rules/airflow/rules/dag_schedule_argument.rs | use ruff_macros::{ViolationMetadata, derive_message_formats};
use ruff_python_ast::Expr;
use ruff_python_ast::{self as ast};
use ruff_python_semantic::Modules;
use ruff_text_size::Ranged;
use crate::Violation;
use crate::checkers::ast::Checker;
/// ## What it does
/// Checks for a `DAG()` class or `@dag()` decorator without an explicit
/// `schedule` (or `schedule_interval` for Airflow 1) parameter.
///
/// ## Why is this bad?
/// The default value of the `schedule` parameter on Airflow 2 and
/// `schedule_interval` on Airflow 1 is `timedelta(days=1)`, which is almost
/// never what a user is looking for. Airflow 3 changed the default value to `None`,
/// and would break existing dags using the implicit default.
///
/// If your dag does not have an explicit `schedule` / `schedule_interval` argument,
/// Airflow 2 schedules a run for it every day (at the time determined by `start_date`).
/// Such a dag will no longer be scheduled on Airflow 3 at all, without any
/// exceptions or other messages visible to the user.
///
/// ## Example
/// ```python
/// from airflow import DAG
///
///
/// # Using the implicit default schedule.
/// dag = DAG(dag_id="my_dag")
/// ```
///
/// Use instead:
/// ```python
/// from datetime import timedelta
///
/// from airflow import DAG
///
///
/// dag = DAG(dag_id="my_dag", schedule=timedelta(days=1))
/// ```
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "0.13.0")]
pub(crate) struct AirflowDagNoScheduleArgument;
impl Violation for AirflowDagNoScheduleArgument {
#[derive_message_formats]
fn message(&self) -> String {
"`DAG` or `@dag` should have an explicit `schedule` argument".to_string()
}
}
/// AIR002
pub(crate) fn dag_no_schedule_argument(checker: &Checker, expr: &Expr) {
if !checker.semantic().seen_module(Modules::AIRFLOW) {
return;
}
// Don't check non-call expressions.
let Expr::Call(ast::ExprCall {
func, arguments, ..
}) = expr
else {
return;
};
// We don't do anything unless this is a `DAG` (class) or `dag` (decorator
// function) from Airflow.
if !checker
.semantic()
.resolve_qualified_name(func)
.is_some_and(|qualname| matches!(qualname.segments(), ["airflow", .., "DAG" | "dag"]))
{
return;
}
// If there's a schedule keyword argument, we are good.
// This includes the canonical 'schedule', and the deprecated 'timetable'
// and 'schedule_interval'. Usages of deprecated schedule arguments are
// covered by AIR301.
if ["schedule", "schedule_interval", "timetable"]
.iter()
.any(|a| arguments.find_keyword(a).is_some())
{
return;
}
// Produce a diagnostic when the `schedule` keyword argument is not found.
checker.report_diagnostic(AirflowDagNoScheduleArgument, expr.range());
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/airflow/rules/task_variable_name.rs | crates/ruff_linter/src/rules/airflow/rules/task_variable_name.rs | use crate::Violation;
use ruff_macros::{ViolationMetadata, derive_message_formats};
use ruff_python_ast as ast;
use ruff_python_ast::Expr;
use ruff_python_semantic::Modules;
use ruff_text_size::Ranged;
use crate::checkers::ast::Checker;
/// ## What it does
/// Checks that the task variable name matches the `task_id` value for
/// Airflow Operators.
///
/// ## Why is this bad?
/// When initializing an Airflow Operator, for consistency, the variable
/// name should match the `task_id` value. This makes it easier to
/// follow the flow of the DAG.
///
/// ## Example
/// ```python
/// from airflow.operators import PythonOperator
///
///
/// incorrect_name = PythonOperator(task_id="my_task")
/// ```
///
/// Use instead:
/// ```python
/// from airflow.operators import PythonOperator
///
///
/// my_task = PythonOperator(task_id="my_task")
/// ```
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "v0.0.271")]
pub(crate) struct AirflowVariableNameTaskIdMismatch {
task_id: String,
}
impl Violation for AirflowVariableNameTaskIdMismatch {
#[derive_message_formats]
fn message(&self) -> String {
let AirflowVariableNameTaskIdMismatch { task_id } = self;
format!("Task variable name should match the `task_id`: \"{task_id}\"")
}
}
/// AIR001
pub(crate) fn variable_name_task_id(checker: &Checker, targets: &[Expr], value: &Expr) {
if !checker.semantic().seen_module(Modules::AIRFLOW) {
return;
}
// If we have more than one target, we can't do anything.
let [target] = targets else {
return;
};
let Expr::Name(ast::ExprName { id, .. }) = target else {
return;
};
// If the value is not a call, we can't do anything.
let Expr::Call(ast::ExprCall {
func, arguments, ..
}) = value
else {
return;
};
// If the function doesn't come from Airflow's operators module (builtin or providers), we
// can't do anything.
if !checker
.semantic()
.resolve_qualified_name(func)
.is_some_and(|qualified_name| {
match qualified_name.segments() {
// Match `airflow.operators.*`
["airflow", "operators", ..] => true,
// Match `airflow.providers.**.operators.*`
["airflow", "providers", rest @ ..] => {
// Ensure 'operators' exists somewhere in the middle
if let Some(pos) = rest.iter().position(|&s| s == "operators") {
pos + 1 < rest.len() // Check that 'operators' is not the last element
} else {
false
}
}
_ => false,
}
})
{
return;
}
// If the call doesn't have a `task_id` keyword argument, we can't do anything.
let Some(keyword) = arguments.find_keyword("task_id") else {
return;
};
// If the keyword argument is not a string, we can't do anything.
let Some(ast::ExprStringLiteral { value: task_id, .. }) =
keyword.value.as_string_literal_expr()
else {
return;
};
// If the target name is the same as the task_id, no violation.
if task_id == id.as_str() {
return;
}
checker.report_diagnostic(
AirflowVariableNameTaskIdMismatch {
task_id: task_id.to_string(),
},
target.range(),
);
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/airflow/rules/moved_to_provider_in_3.rs | crates/ruff_linter/src/rules/airflow/rules/moved_to_provider_in_3.rs | use crate::checkers::ast::Checker;
use crate::rules::airflow::helpers::{
ProviderReplacement, generate_import_edit, generate_remove_and_runtime_import_edit,
is_guarded_by_try_except,
};
use ruff_macros::{ViolationMetadata, derive_message_formats};
use ruff_python_ast::name::QualifiedName;
use ruff_python_ast::{Expr, ExprAttribute};
use ruff_python_semantic::Modules;
use ruff_text_size::Ranged;
use ruff_text_size::TextRange;
use crate::{FixAvailability, Violation};
/// ## What it does
/// Checks for uses of Airflow functions and values that have been moved to its providers
/// (e.g., `apache-airflow-providers-fab`).
///
/// ## Why is this bad?
/// Airflow 3.0 moved various deprecated functions, members, and other
/// values to its providers. The user needs to install the corresponding provider and replace
/// the original usage with the one in the provider.
///
/// ## Example
/// ```python
/// from airflow.auth.managers.fab.fab_auth_manager import FabAuthManager
///
/// fab_auth_manager_app = FabAuthManager().get_fastapi_app()
/// ```
///
/// Use instead:
/// ```python
/// from airflow.providers.fab.auth_manager.fab_auth_manager import FabAuthManager
///
/// fab_auth_manager_app = FabAuthManager().get_fastapi_app()
/// ```
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "0.13.0")]
pub(crate) struct Airflow3MovedToProvider<'a> {
deprecated: QualifiedName<'a>,
replacement: ProviderReplacement,
}
impl Violation for Airflow3MovedToProvider<'_> {
const FIX_AVAILABILITY: FixAvailability = FixAvailability::Sometimes;
#[derive_message_formats]
fn message(&self) -> String {
let Airflow3MovedToProvider {
deprecated,
replacement,
} = self;
match replacement {
ProviderReplacement::Rename {
name: _,
module: _,
provider,
version: _,
}
| ProviderReplacement::SourceModuleMovedToProvider {
name: _,
module: _,
provider,
version: _,
} => {
format!("`{deprecated}` is moved into `{provider}` provider in Airflow 3.0;")
}
}
}
fn fix_title(&self) -> Option<String> {
let Airflow3MovedToProvider { replacement, .. } = self;
if let Some((module, name, provider, version)) = match &replacement {
ProviderReplacement::Rename {
module,
name,
provider,
version,
} => Some((module, *name, provider, version)),
ProviderReplacement::SourceModuleMovedToProvider {
module,
name,
provider,
version,
} => Some((module, name.as_str(), provider, version)),
} {
Some(format!(
"Install `apache-airflow-providers-{provider}>={version}` and use `{name}` from `{module}` instead."
))
} else {
None
}
}
}
/// AIR302
pub(crate) fn moved_to_provider_in_3(checker: &Checker, expr: &Expr) {
if !checker.semantic().seen_module(Modules::AIRFLOW) {
return;
}
match expr {
Expr::Attribute(ExprAttribute { attr, .. }) => {
check_names_moved_to_provider(checker, expr, attr.range());
}
Expr::Name(_) => check_names_moved_to_provider(checker, expr, expr.range()),
_ => {}
}
}
fn check_names_moved_to_provider(checker: &Checker, expr: &Expr, ranged: TextRange) {
let Some(qualified_name) = checker.semantic().resolve_qualified_name(expr) else {
return;
};
let replacement = match qualified_name.segments() {
// apache-airflow-providers-amazon
[
"airflow",
"hooks",
"S3_hook",
rest @ ("S3Hook" | "provide_bucket_name"),
] => ProviderReplacement::SourceModuleMovedToProvider {
name: (*rest).to_string(),
module: "airflow.providers.amazon.aws.hooks.s3",
provider: "amazon",
version: "1.0.0",
},
["airflow", "operators", "gcs_to_s3", "GCSToS3Operator"] => ProviderReplacement::Rename {
module: "airflow.providers.amazon.aws.transfers.gcs_to_s3",
name: "GCSToS3Operator",
provider: "amazon",
version: "1.0.0",
},
[
"airflow",
"operators",
"google_api_to_s3_transfer",
"GoogleApiToS3Operator" | "GoogleApiToS3Transfer",
] => ProviderReplacement::Rename {
module: "airflow.providers.amazon.aws.transfers.google_api_to_s3",
name: "GoogleApiToS3Operator",
provider: "amazon",
version: "1.0.0",
},
[
"airflow",
"operators",
"redshift_to_s3_operator",
"RedshiftToS3Operator" | "RedshiftToS3Transfer",
] => ProviderReplacement::Rename {
module: "airflow.providers.amazon.aws.transfers.redshift_to_s3",
name: "RedshiftToS3Operator",
provider: "amazon",
version: "1.0.0",
},
[
"airflow",
"operators",
"s3_file_transform_operator",
"S3FileTransformOperator",
] => ProviderReplacement::Rename {
module: "airflow.providers.amazon.aws.operators.s3",
name: "S3FileTransformOperator",
provider: "amazon",
version: "3.0.0",
},
[
"airflow",
"operators",
"s3_to_redshift_operator",
"S3ToRedshiftOperator" | "S3ToRedshiftTransfer",
] => ProviderReplacement::Rename {
module: "airflow.providers.amazon.aws.transfers.s3_to_redshift",
name: "S3ToRedshiftOperator",
provider: "amazon",
version: "1.0.0",
},
["airflow", "sensors", "s3_key_sensor", "S3KeySensor"] => ProviderReplacement::Rename {
module: "airflow.providers.amazon.aws.sensors.s3",
name: "S3KeySensor",
provider: "amazon",
version: "1.0.0",
},
// apache-airflow-providers-celery
[
"airflow",
"config_templates",
"default_celery",
"DEFAULT_CELERY_CONFIG",
] => ProviderReplacement::Rename {
module: "airflow.providers.celery.executors.default_celery",
name: "DEFAULT_CELERY_CONFIG",
provider: "celery",
version: "3.3.0",
},
["airflow", "executors", "celery_executor", rest] => match *rest {
"app" => ProviderReplacement::Rename {
module: "airflow.providers.celery.executors.celery_executor_utils",
name: "app",
provider: "celery",
version: "3.3.0",
},
"CeleryExecutor" => ProviderReplacement::Rename {
module: "airflow.providers.celery.executors.celery_executor",
name: "CeleryExecutor",
provider: "celery",
version: "3.3.0",
},
_ => return,
},
[
"airflow",
"executors",
"celery_kubernetes_executor",
"CeleryKubernetesExecutor",
] => ProviderReplacement::Rename {
module: "airflow.providers.celery.executors.celery_kubernetes_executor",
name: "CeleryKubernetesExecutor",
provider: "celery",
version: "3.3.0",
},
// apache-airflow-providers-common-sql
[
"airflow",
"hooks",
"dbapi",
rest @ ("ConnectorProtocol" | "DbApiHook"),
] => ProviderReplacement::SourceModuleMovedToProvider {
name: (*rest).to_string(),
module: "airflow.providers.common.sql.hooks.sql",
provider: "common-sql",
version: "1.0.0",
},
["airflow", "hooks", "dbapi_hook", "DbApiHook"] => ProviderReplacement::Rename {
module: "airflow.providers.common.sql.hooks.sql",
name: "DbApiHook",
provider: "common-sql",
version: "1.0.0",
},
[
"airflow",
"operators",
"check_operator" | "sql",
"SQLCheckOperator",
]
| [
"airflow",
"operators",
"check_operator" | "druid_check_operator" | "presto_check_operator",
"CheckOperator",
] => ProviderReplacement::Rename {
module: "airflow.providers.common.sql.operators.sql",
name: "SQLCheckOperator",
provider: "common-sql",
version: "1.1.0",
},
[
"airflow",
"operators",
"druid_check_operator",
"DruidCheckOperator",
]
| [
"airflow",
"operators",
"presto_check_operator",
"PrestoCheckOperator",
] => ProviderReplacement::Rename {
module: "airflow.providers.common.sql.operators.sql",
name: "SQLCheckOperator",
provider: "common-sql",
version: "1.1.0",
},
[
"airflow",
"operators",
"check_operator",
"IntervalCheckOperator" | "SQLIntervalCheckOperator",
]
| [
"airflow",
"operators",
"presto_check_operator",
"IntervalCheckOperator",
]
| ["airflow", "operators", "sql", "SQLIntervalCheckOperator"] => {
ProviderReplacement::Rename {
module: "airflow.providers.common.sql.operators.sql",
name: "SQLIntervalCheckOperator",
provider: "common-sql",
version: "1.1.0",
}
}
[
"airflow",
"operators",
"presto_check_operator",
"PrestoIntervalCheckOperator",
] => ProviderReplacement::Rename {
module: "airflow.providers.common.sql.operators.sql",
name: "SQLIntervalCheckOperator",
provider: "common-sql",
version: "1.1.0",
},
[
"airflow",
"operators",
"check_operator",
"SQLThresholdCheckOperator" | "ThresholdCheckOperator",
]
| ["airflow", "operators", "sql", "SQLThresholdCheckOperator"] => {
ProviderReplacement::Rename {
module: "airflow.providers.common.sql.operators.sql",
name: "SQLThresholdCheckOperator",
provider: "common-sql",
version: "1.1.0",
}
}
[
"airflow",
"operators",
"check_operator",
"SQLValueCheckOperator" | "ValueCheckOperator",
]
| [
"airflow",
"operators",
"presto_check_operator",
"ValueCheckOperator",
]
| ["airflow", "operators", "sql", "SQLValueCheckOperator"] => ProviderReplacement::Rename {
module: "airflow.providers.common.sql.operators.sql",
name: "SQLValueCheckOperator",
provider: "common-sql",
version: "1.1.0",
},
[
"airflow",
"operators",
"presto_check_operator",
"PrestoValueCheckOperator",
] => ProviderReplacement::Rename {
module: "airflow.providers.common.sql.operators.sql",
name: "SQLValueCheckOperator",
provider: "common-sql",
version: "1.1.0",
},
["airflow", "operators", "sql", rest] => match *rest {
"BaseSQLOperator" | "BranchSQLOperator" | "SQLTableCheckOperator" => {
ProviderReplacement::SourceModuleMovedToProvider {
name: (*rest).to_string(),
module: "airflow.providers.common.sql.operators.sql",
provider: "common-sql",
version: "1.1.0",
}
}
"SQLColumnCheckOperator" | "_convert_to_float_if_possible" | "parse_boolean" => {
ProviderReplacement::SourceModuleMovedToProvider {
name: (*rest).to_string(),
module: "airflow.providers.common.sql.operators.sql",
provider: "common-sql",
version: "1.0.0",
}
}
_ => return,
},
["airflow", "sensors", "sql" | "sql_sensor", "SqlSensor"] => ProviderReplacement::Rename {
module: "airflow.providers.common.sql.sensors.sql",
name: "SqlSensor",
provider: "common-sql",
version: "1.0.0",
},
["airflow", "operators", "jdbc_operator", "JdbcOperator"]
| ["airflow", "operators", "mssql_operator", "MsSqlOperator"]
| ["airflow", "operators", "mysql_operator", "MySqlOperator"]
| ["airflow", "operators", "oracle_operator", "OracleOperator"]
| [
"airflow",
"operators",
"postgres_operator",
"PostgresOperator",
]
| ["airflow", "operators", "sqlite_operator", "SqliteOperator"] => {
ProviderReplacement::Rename {
module: "airflow.providers.common.sql.operators.sql",
name: "SQLExecuteQueryOperator",
provider: "common-sql",
version: "1.3.0",
}
}
// apache-airflow-providers-daskexecutor
["airflow", "executors", "dask_executor", "DaskExecutor"] => ProviderReplacement::Rename {
module: "airflow.providers.daskexecutor.executors.dask_executor",
name: "DaskExecutor",
provider: "daskexecutor",
version: "1.0.0",
},
// apache-airflow-providers-docker
["airflow", "hooks", "docker_hook", "DockerHook"] => ProviderReplacement::Rename {
module: "airflow.providers.docker.hooks.docker",
name: "DockerHook",
provider: "docker",
version: "1.0.0",
},
["airflow", "operators", "docker_operator", "DockerOperator"] => {
ProviderReplacement::Rename {
module: "airflow.providers.docker.operators.docker",
name: "DockerOperator",
provider: "docker",
version: "1.0.0",
}
}
// apache-airflow-providers-apache-druid
[
"airflow",
"hooks",
"druid_hook",
rest @ ("DruidDbApiHook" | "DruidHook"),
] => ProviderReplacement::SourceModuleMovedToProvider {
name: (*rest).to_string(),
module: "airflow.providers.apache.druid.hooks.druid",
provider: "apache-druid",
version: "1.0.0",
},
[
"airflow",
"operators",
"hive_to_druid",
"HiveToDruidOperator" | "HiveToDruidTransfer",
] => ProviderReplacement::Rename {
module: "airflow.providers.apache.druid.transfers.hive_to_druid",
name: "HiveToDruidOperator",
provider: "apache-druid",
version: "1.0.0",
},
// apache-airflow-providers-fab
[
"airflow",
"api",
"auth",
"backend",
"basic_auth",
rest @ ("CLIENT_AUTH" | "init_app" | "auth_current_user" | "requires_authentication"),
] => ProviderReplacement::SourceModuleMovedToProvider {
name: (*rest).to_string(),
module: "airflow.providers.fab.auth_manager.api.auth.backend.basic_auth",
provider: "fab",
version: "1.0.0",
},
[
"airflow",
"api",
"auth",
"backend",
"kerberos_auth",
rest @ ("log" | "CLIENT_AUTH" | "find_user" | "init_app" | "requires_authentication"),
] => ProviderReplacement::SourceModuleMovedToProvider {
name: (*rest).to_string(),
module: "airflow.providers.fab.auth_manager.api.auth.backend.kerberos_auth",
provider: "fab",
version: "1.0.0",
},
[
"airflow",
"auth",
"managers",
"fab",
"api",
"auth",
"backend",
"kerberos_auth",
rest @ ("log" | "CLIENT_AUTH" | "find_user" | "init_app" | "requires_authentication"),
] => ProviderReplacement::SourceModuleMovedToProvider {
name: (*rest).to_string(),
module: "airflow.providers.fab.auth_manager.api.auth.backend.kerberos_auth",
provider: "fab",
version: "1.0.0",
},
[
"airflow",
"auth",
"managers",
"fab",
"fab_auth_manager",
"FabAuthManager",
] => ProviderReplacement::Rename {
module: "airflow.providers.fab.auth_manager.fab_auth_manager",
name: "FabAuthManager",
provider: "fab",
version: "1.0.0",
},
[
"airflow",
"auth",
"managers",
"fab",
"security_manager",
"override",
"MAX_NUM_DATABASE_USER_SESSIONS",
] => ProviderReplacement::Rename {
module: "airflow.providers.fab.auth_manager.security_manager.override",
name: "MAX_NUM_DATABASE_USER_SESSIONS",
provider: "fab",
version: "1.0.0",
},
[
"airflow",
"auth",
"managers",
"fab",
"security_manager",
"override",
"FabAirflowSecurityManagerOverride",
]
| [
"airflow",
"www",
"security",
"FabAirflowSecurityManagerOverride",
] => ProviderReplacement::Rename {
module: "airflow.providers.fab.auth_manager.security_manager.override",
name: "FabAirflowSecurityManagerOverride",
provider: "fab",
version: "1.0.0",
},
// apache-airflow-providers-apache-hdfs
["airflow", "hooks", "webhdfs_hook", "WebHDFSHook"] => ProviderReplacement::Rename {
module: "airflow.providers.apache.hdfs.hooks.webhdfs",
name: "WebHDFSHook",
provider: "apache-hdfs",
version: "1.0.0",
},
["airflow", "sensors", "web_hdfs_sensor", "WebHdfsSensor"] => ProviderReplacement::Rename {
module: "airflow.providers.apache.hdfs.sensors.web_hdfs",
name: "WebHdfsSensor",
provider: "apache-hdfs",
version: "1.0.0",
},
// apache-airflow-providers-apache-hive
[
"airflow",
"hooks",
"hive_hooks",
rest @ ("HiveCliHook"
| "HiveMetastoreHook"
| "HiveServer2Hook"
| "HIVE_QUEUE_PRIORITIES"),
] => ProviderReplacement::SourceModuleMovedToProvider {
name: (*rest).to_string(),
module: "airflow.providers.apache.hive.hooks.hive",
provider: "apache-hive",
version: "1.0.0",
},
[
"airflow",
"macros",
"hive",
rest @ ("closest_ds_partition" | "max_partition"),
] => ProviderReplacement::SourceModuleMovedToProvider {
name: (*rest).to_string(),
module: "airflow.providers.apache.hive.macros.hive",
provider: "apache-hive",
version: "5.1.0",
},
["airflow", "operators", "hive_operator", "HiveOperator"] => ProviderReplacement::Rename {
module: "airflow.providers.apache.hive.operators.hive",
name: "HiveOperator",
provider: "apache-hive",
version: "1.0.0",
},
[
"airflow",
"operators",
"hive_stats_operator",
"HiveStatsCollectionOperator",
] => ProviderReplacement::Rename {
module: "airflow.providers.apache.hive.operators.hive_stats",
name: "HiveStatsCollectionOperator",
provider: "apache-hive",
version: "1.0.0",
},
[
"airflow",
"operators",
"hive_to_mysql",
"HiveToMySqlOperator" | "HiveToMySqlTransfer",
] => ProviderReplacement::Rename {
module: "airflow.providers.apache.hive.transfers.hive_to_mysql",
name: "HiveToMySqlOperator",
provider: "apache-hive",
version: "1.0.0",
},
[
"airflow",
"operators",
"hive_to_samba_operator",
"HiveToSambaOperator",
] => ProviderReplacement::Rename {
module: "airflow.providers.apache.hive.transfers.hive_to_samba",
name: "HiveToSambaOperator",
provider: "apache-hive",
version: "1.0.0",
},
[
"airflow",
"operators",
"mssql_to_hive",
"MsSqlToHiveOperator" | "MsSqlToHiveTransfer",
] => ProviderReplacement::Rename {
module: "airflow.providers.apache.hive.transfers.mssql_to_hive",
name: "MsSqlToHiveOperator",
provider: "apache-hive",
version: "1.0.0",
},
[
"airflow",
"operators",
"mysql_to_hive",
"MySqlToHiveOperator" | "MySqlToHiveTransfer",
] => ProviderReplacement::Rename {
module: "airflow.providers.apache.hive.transfers.mysql_to_hive",
name: "MySqlToHiveOperator",
provider: "apache-hive",
version: "1.0.0",
},
[
"airflow",
"operators",
"s3_to_hive_operator",
"S3ToHiveOperator" | "S3ToHiveTransfer",
] => ProviderReplacement::Rename {
module: "airflow.providers.apache.hive.transfers.s3_to_hive",
name: "S3ToHiveOperator",
provider: "apache-hive",
version: "1.0.0",
},
[
"airflow",
"sensors",
"hive_partition_sensor",
"HivePartitionSensor",
] => ProviderReplacement::Rename {
module: "airflow.providers.apache.hive.sensors.hive_partition",
name: "HivePartitionSensor",
provider: "apache-hive",
version: "1.0.0",
},
[
"airflow",
"sensors",
"metastore_partition_sensor",
"MetastorePartitionSensor",
] => ProviderReplacement::Rename {
module: "airflow.providers.apache.hive.sensors.metastore_partition",
name: "MetastorePartitionSensor",
provider: "apache-hive",
version: "1.0.0",
},
[
"airflow",
"sensors",
"named_hive_partition_sensor",
"NamedHivePartitionSensor",
] => ProviderReplacement::Rename {
module: "airflow.providers.apache.hive.sensors.named_hive_partition",
name: "NamedHivePartitionSensor",
provider: "apache-hive",
version: "1.0.0",
},
// apache-airflow-providers-http
["airflow", "hooks", "http_hook", "HttpHook"] => ProviderReplacement::Rename {
module: "airflow.providers.http.hooks.http",
name: "HttpHook",
provider: "http",
version: "1.0.0",
},
[
"airflow",
"operators",
"http_operator",
"SimpleHttpOperator",
] => ProviderReplacement::Rename {
module: "airflow.providers.http.operators.http",
name: "HttpOperator",
provider: "http",
version: "5.0.0",
},
["airflow", "sensors", "http_sensor", "HttpSensor"] => ProviderReplacement::Rename {
module: "airflow.providers.http.sensors.http",
name: "HttpSensor",
provider: "http",
version: "1.0.0",
},
// apache-airflow-providers-jdbc
[
"airflow",
"hooks",
"jdbc_hook",
rest @ ("JdbcHook" | "jaydebeapi"),
] => ProviderReplacement::SourceModuleMovedToProvider {
name: (*rest).to_string(),
module: "airflow.providers.jdbc.hooks.jdbc",
provider: "jdbc",
version: "1.0.0",
},
// apache-airflow-providers-cncf-kubernetes
[
"airflow",
"executors",
"kubernetes_executor_types",
rest @ ("ALL_NAMESPACES" | "POD_EXECUTOR_DONE_KEY"),
] => ProviderReplacement::SourceModuleMovedToProvider {
name: (*rest).to_string(),
module: "airflow.providers.cncf.kubernetes.executors.kubernetes_executor_types",
provider: "cncf-kubernetes",
version: "7.4.0",
},
[
"airflow",
"kubernetes",
"k8s_model",
rest @ ("K8SModel" | "append_to_pod"),
] => ProviderReplacement::SourceModuleMovedToProvider {
name: (*rest).to_string(),
module: "airflow.providers.cncf.kubernetes.k8s_model",
provider: "cncf-kubernetes",
version: "7.4.0",
},
[
"airflow",
"kubernetes",
"kube_client",
rest @ ("_disable_verify_ssl" | "_enable_tcp_keepalive" | "get_kube_client"),
] => ProviderReplacement::SourceModuleMovedToProvider {
name: (*rest).to_string(),
module: "airflow.providers.cncf.kubernetes.kube_client",
provider: "cncf-kubernetes",
version: "7.4.0",
},
[
"airflow",
"kubernetes",
"kubernetes_helper_functions",
"add_pod_suffix",
] => ProviderReplacement::Rename {
module: "airflow.providers.cncf.kubernetes.kubernetes_helper_functions",
name: "add_unique_suffix",
provider: "cncf-kubernetes",
version: "10.0.0",
},
[
"airflow",
"kubernetes",
"kubernetes_helper_functions",
"create_pod_id",
] => ProviderReplacement::Rename {
module: "airflow.providers.cncf.kubernetes.kubernetes_helper_functions",
name: "create_unique_id",
provider: "cncf-kubernetes",
version: "10.0.0",
},
[
"airflow",
"kubernetes",
"kubernetes_helper_functions",
rest @ ("annotations_for_logging_task_metadata"
| "annotations_to_key"
| "get_logs_task_metadata"
| "rand_str"),
] => ProviderReplacement::SourceModuleMovedToProvider {
name: (*rest).to_string(),
module: "airflow.providers.cncf.kubernetes.kubernetes_helper_functions",
provider: "cncf-kubernetes",
version: "7.4.0",
},
["airflow", "kubernetes", "pod", rest] => match *rest {
"Port" => ProviderReplacement::Rename {
module: "kubernetes.client.models",
name: "V1ContainerPort",
provider: "cncf-kubernetes",
version: "7.4.0",
},
"Resources" => ProviderReplacement::Rename {
module: "kubernetes.client.models",
name: "V1ResourceRequirements",
provider: "cncf-kubernetes",
version: "7.4.0",
},
_ => return,
},
["airflow", "kubernetes", "pod_generator", rest] => match *rest {
"datetime_to_label_safe_datestring"
| "extend_object_field"
| "label_safe_datestring_to_datetime"
| "make_safe_label_value"
| "merge_objects"
| "PodGenerator" => ProviderReplacement::SourceModuleMovedToProvider {
name: (*rest).to_string(),
module: "airflow.providers.cncf.kubernetes.pod_generator",
provider: "cncf-kubernetes",
version: "7.4.0",
},
"PodDefaults" => ProviderReplacement::Rename {
module: "airflow.providers.cncf.kubernetes.utils.xcom_sidecar",
name: "PodDefaults",
provider: "cncf-kubernetes",
version: "7.4.0",
},
"PodGeneratorDeprecated" => ProviderReplacement::Rename {
module: "airflow.providers.cncf.kubernetes.pod_generator",
name: "PodGenerator",
provider: "cncf-kubernetes",
version: "7.4.0",
},
"add_pod_suffix" => ProviderReplacement::Rename {
module: "airflow.providers.cncf.kubernetes.kubernetes_helper_functions",
name: "add_unique_suffix",
provider: "cncf-kubernetes",
version: "10.0.0",
},
"rand_str" => ProviderReplacement::SourceModuleMovedToProvider {
module: "airflow.providers.cncf.kubernetes.kubernetes_helper_functions",
name: "rand_str".to_string(),
provider: "cncf-kubernetes",
version: "7.4.0",
},
_ => return,
},
[
"airflow",
"kubernetes",
"pod_generator_deprecated",
rest @ ("make_safe_label_value" | "PodGenerator"),
] => ProviderReplacement::SourceModuleMovedToProvider {
name: (*rest).to_string(),
module: "airflow.providers.cncf.kubernetes.pod_generator",
provider: "cncf-kubernetes",
version: "7.4.0",
},
[
"airflow",
"kubernetes",
"pod_generator_deprecated" | "pod_launcher_deprecated",
"PodDefaults",
] => ProviderReplacement::Rename {
module: "airflow.providers.cncf.kubernetes.utils.xcom_sidecar",
name: "PodDefaults",
provider: "cncf-kubernetes",
version: "7.4.0",
},
[
"airflow",
"kubernetes",
"pod_launcher_deprecated",
"get_kube_client",
] => ProviderReplacement::Rename {
module: "airflow.providers.cncf.kubernetes.kube_client",
name: "get_kube_client",
provider: "cncf-kubernetes",
version: "7.4.0",
},
[
"airflow",
"kubernetes",
"pod_launcher" | "pod_launcher_deprecated",
"PodLauncher",
] => ProviderReplacement::Rename {
module: "airflow.providers.cncf.kubernetes.utils.pod_manager",
name: "PodManager",
provider: "cncf-kubernetes",
version: "3.0.0",
},
[
"airflow",
"kubernetes",
"pod_launcher" | "pod_launcher_deprecated",
"PodStatus",
] => ProviderReplacement::Rename {
module: " airflow.providers.cncf.kubernetes.utils.pod_manager",
name: "PodPhase",
provider: "cncf-kubernetes",
version: "3.0.0",
},
[
"airflow",
"kubernetes",
"pod_runtime_info_env",
"PodRuntimeInfoEnv",
] => ProviderReplacement::Rename {
module: "kubernetes.client.models",
name: "V1EnvVar",
provider: "cncf-kubernetes",
version: "7.4.0",
},
["airflow", "kubernetes", "secret", rest] => match *rest {
"K8SModel" => ProviderReplacement::Rename {
module: "airflow.providers.cncf.kubernetes.k8s_model",
name: "K8SModel",
provider: "cncf-kubernetes",
version: "7.4.0",
},
"Secret" => ProviderReplacement::Rename {
module: "airflow.providers.cncf.kubernetes.secret",
name: "Secret",
provider: "cncf-kubernetes",
version: "7.4.0",
},
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.