repo stringlengths 6 65 | file_url stringlengths 81 311 | file_path stringlengths 6 227 | content stringlengths 0 32.8k | language stringclasses 1
value | license stringclasses 7
values | commit_sha stringlengths 40 40 | retrieved_at stringdate 2026-01-04 15:31:58 2026-01-04 20:25:31 | truncated bool 2
classes |
|---|---|---|---|---|---|---|---|---|
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/airflow/rules/mod.rs | crates/ruff_linter/src/rules/airflow/rules/mod.rs | pub(crate) use dag_schedule_argument::*;
pub(crate) use function_signature_change_in_3::*;
pub(crate) use moved_to_provider_in_3::*;
pub(crate) use removal_in_3::*;
pub(crate) use suggested_to_move_to_provider_in_3::*;
pub(crate) use suggested_to_update_3_0::*;
pub(crate) use task_variable_name::*;
mod dag_schedule_argument;
mod function_signature_change_in_3;
mod moved_to_provider_in_3;
mod removal_in_3;
mod suggested_to_move_to_provider_in_3;
mod suggested_to_update_3_0;
mod task_variable_name;
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/airflow/rules/removal_in_3.rs | crates/ruff_linter/src/rules/airflow/rules/removal_in_3.rs | use crate::checkers::ast::Checker;
use crate::rules::airflow::helpers::{
Replacement, generate_import_edit, generate_remove_and_runtime_import_edit,
is_airflow_builtin_or_provider, is_guarded_by_try_except,
};
use crate::{Edit, Fix, FixAvailability, Violation};
use ruff_macros::{ViolationMetadata, derive_message_formats};
use ruff_python_ast::helpers::map_callable;
use ruff_python_ast::{
Arguments, Expr, ExprAttribute, ExprCall, ExprContext, ExprName, ExprStringLiteral,
ExprSubscript, Stmt, StmtClassDef, StmtFunctionDef, name::QualifiedName,
};
use ruff_python_semantic::Modules;
use ruff_python_semantic::ScopeKind;
use ruff_python_semantic::SemanticModel;
use ruff_python_semantic::analyze::typing;
use ruff_text_size::Ranged;
use ruff_text_size::TextRange;
/// ## What it does
/// Checks for uses of deprecated Airflow functions and values.
///
/// ## Why is this bad?
/// Airflow 3.0 removed various deprecated functions, members, and other
/// values. Some have more modern replacements. Others are considered too niche
/// and not worth continued maintenance in Airflow.
///
/// ## Example
/// ```python
/// from airflow.utils.dates import days_ago
///
///
/// yesterday = days_ago(today, 1)
/// ```
///
/// Use instead:
/// ```python
/// from datetime import timedelta
///
///
/// yesterday = today - timedelta(days=1)
/// ```
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "0.13.0")]
pub(crate) struct Airflow3Removal {
deprecated: String,
replacement: Replacement,
}
impl Violation for Airflow3Removal {
const FIX_AVAILABILITY: FixAvailability = FixAvailability::Sometimes;
#[derive_message_formats]
fn message(&self) -> String {
let Airflow3Removal {
deprecated,
replacement,
} = self;
match replacement {
Replacement::None
| Replacement::AttrName(_)
| Replacement::Message(_)
| Replacement::Rename { module: _, name: _ }
| Replacement::SourceModuleMoved { module: _, name: _ } => {
format!("`{deprecated}` is removed in Airflow 3.0")
}
}
}
fn fix_title(&self) -> Option<String> {
let Airflow3Removal { replacement, .. } = self;
match replacement {
Replacement::None => None,
Replacement::AttrName(name) => Some(format!("Use `{name}` instead")),
Replacement::Message(message) => Some((*message).to_string()),
Replacement::Rename { module, name } => {
Some(format!("Use `{name}` from `{module}` instead."))
}
Replacement::SourceModuleMoved { module, name } => {
Some(format!("Use `{name}` from `{module}` instead."))
}
}
}
}
/// AIR301
pub(crate) fn airflow_3_removal_expr(checker: &Checker, expr: &Expr) {
if !checker.semantic().seen_module(Modules::AIRFLOW) {
return;
}
match expr {
Expr::Call(
call_expr @ ExprCall {
func, arguments, ..
},
) => {
if let Some(qualified_name) = checker.semantic().resolve_qualified_name(func) {
check_call_arguments(checker, &qualified_name, arguments);
}
check_method(checker, call_expr);
check_context_key_usage_in_call(checker, call_expr);
}
Expr::Attribute(attribute_expr @ ExprAttribute { range, .. }) => {
check_name(checker, expr, *range);
check_class_attribute(checker, attribute_expr);
}
Expr::Name(ExprName {
id,
ctx,
range,
node_index: _,
}) => {
check_name(checker, expr, *range);
if matches!(ctx, ExprContext::Store) {
if let ScopeKind::Class(class_def) = checker.semantic().current_scope().kind {
check_airflow_plugin_extension(checker, expr, id, class_def);
}
}
}
Expr::Subscript(subscript_expr) => {
check_context_key_usage_in_subscript(checker, subscript_expr);
}
_ => {}
}
}
/// AIR301
pub(crate) fn airflow_3_removal_function_def(checker: &Checker, function_def: &StmtFunctionDef) {
if !checker.semantic().seen_module(Modules::AIRFLOW) {
return;
}
check_function_parameters(checker, function_def);
}
const REMOVED_CONTEXT_KEYS: [&str; 12] = [
"conf",
"execution_date",
"next_ds",
"next_ds_nodash",
"next_execution_date",
"prev_ds",
"prev_ds_nodash",
"prev_execution_date",
"prev_execution_date_success",
"tomorrow_ds",
"yesterday_ds",
"yesterday_ds_nodash",
];
/// Check the function parameters for removed context keys.
///
/// For example:
///
/// ```python
/// from airflow.decorators import task
///
/// @task
/// def another_task(execution_date, **kwargs):
/// # ^^^^^^^^^^^^^^
/// # 'execution_date' is removed in Airflow 3.0
/// pass
/// ```
fn check_function_parameters(checker: &Checker, function_def: &StmtFunctionDef) {
if !is_airflow_task(function_def, checker.semantic())
&& !is_execute_method_inherits_from_airflow_operator(function_def, checker.semantic())
{
return;
}
for param in function_def.parameters.iter_non_variadic_params() {
let param_name = param.name();
if REMOVED_CONTEXT_KEYS.contains(¶m_name.as_str()) {
checker.report_diagnostic(
Airflow3Removal {
deprecated: param_name.to_string(),
replacement: Replacement::None,
},
param_name.range(),
);
}
}
}
/// Check whether a removed Airflow argument is passed.
///
/// For example:
///
/// ```python
/// from airflow import DAG
///
/// DAG(schedule_interval="@daily")
/// ```
fn check_call_arguments(checker: &Checker, qualified_name: &QualifiedName, arguments: &Arguments) {
match qualified_name.segments() {
["airflow", .., "DAG" | "dag"] => {
// with replacement
diagnostic_for_argument(checker, arguments, "concurrency", Some("max_active_tasks"));
diagnostic_for_argument(checker, arguments, "fail_stop", Some("fail_fast"));
diagnostic_for_argument(checker, arguments, "schedule_interval", Some("schedule"));
diagnostic_for_argument(checker, arguments, "timetable", Some("schedule"));
// without replacement
diagnostic_for_argument(checker, arguments, "default_view", None);
diagnostic_for_argument(checker, arguments, "orientation", None);
}
segments => {
if is_airflow_auth_manager(segments) {
if !arguments.is_empty() {
checker.report_diagnostic(
Airflow3Removal {
deprecated: String::from("appbuilder"),
replacement: Replacement::Message(
"The constructor takes no parameter now",
),
},
arguments.range(),
);
}
} else if is_airflow_task_handler(segments) {
diagnostic_for_argument(checker, arguments, "filename_template", None);
} else if is_airflow_builtin_or_provider(segments, "operators", "Operator") {
diagnostic_for_argument(
checker,
arguments,
"task_concurrency",
Some("max_active_tis_per_dag"),
);
match segments {
[
"airflow",
..,
"operators",
"trigger_dagrun",
"TriggerDagRunOperator",
] => {
diagnostic_for_argument(
checker,
arguments,
"execution_date",
Some("logical_date"),
);
}
[
"airflow",
..,
"operators",
"datetime",
"BranchDateTimeOperator",
]
| [
"airflow",
..,
"operators",
"weekday",
"BranchDayOfWeekOperator",
]
| ["airflow", .., "sensors", "weekday", "DayOfWeekSensor"] => {
diagnostic_for_argument(
checker,
arguments,
"use_task_execution_day",
Some("use_task_logical_date"),
);
}
_ => {}
}
}
}
}
}
/// Check whether a removed Airflow class attribute (include property) is called.
///
/// For example:
///
/// ```python
/// from airflow.linesage.hook import DatasetLineageInfo
///
/// info = DatasetLineageInfo()
/// info.dataset
/// ```
fn check_class_attribute(checker: &Checker, attribute_expr: &ExprAttribute) {
let ExprAttribute { value, attr, .. } = attribute_expr;
let Some(qualname) = typing::resolve_assignment(value, checker.semantic()) else {
return;
};
let replacement = match *qualname.segments() {
["airflow", "providers_manager", "ProvidersManager"] => match attr.as_str() {
"dataset_factories" => Replacement::AttrName("asset_factories"),
"dataset_uri_handlers" => Replacement::AttrName("asset_uri_handlers"),
"dataset_to_openlineage_converters" => {
Replacement::AttrName("asset_to_openlineage_converters")
}
_ => return,
},
["airflow", "lineage", "hook", "DatasetLineageInfo"] => match attr.as_str() {
"dataset" => Replacement::AttrName("asset"),
_ => return,
},
_ => return,
};
// Create the `Fix` first to avoid cloning `Replacement`.
let fix = if let Replacement::AttrName(name) = replacement {
Some(Fix::safe_edit(Edit::range_replacement(
name.to_string(),
attr.range(),
)))
} else {
None
};
let mut diagnostic = checker.report_diagnostic(
Airflow3Removal {
deprecated: attr.to_string(),
replacement,
},
attr.range(),
);
if let Some(fix) = fix {
diagnostic.set_fix(fix);
}
}
/// Checks whether an Airflow 3.0–removed context key is used in a function decorated with `@task`.
///
/// Specifically, it flags the following two scenarios:
///
/// 1. A removed context key accessed via `context.get("...")` where context is coming from
/// `get_current_context` function.
///
/// ```python
/// from airflow.decorators import task
/// from airflow.utils.context import get_current_context
///
///
/// @task
/// def my_task():
/// context = get_current_context()
/// context.get("conf") # 'conf' is removed in Airflow 3.0
/// ```
///
/// 2. A removed context key accessed via `context.get("...")` where context is a kwarg parameter.
///
/// ```python
/// from airflow.decorators import task
///
///
/// @task
/// def my_task(**context):
/// context.get("conf") # 'conf' is removed in Airflow 3.0
/// ```
fn check_context_key_usage_in_call(checker: &Checker, call_expr: &ExprCall) {
if !in_airflow_task_function(checker.semantic()) {
return;
}
let Expr::Attribute(ExprAttribute { value, attr, .. }) = &*call_expr.func else {
return;
};
if attr.as_str() != "get" {
return;
}
let is_kwarg_parameter = value
.as_name_expr()
.is_some_and(|name| is_kwarg_parameter(checker.semantic(), name));
let is_assigned_from_get_current_context =
typing::resolve_assignment(value, checker.semantic()).is_some_and(|qualified_name| {
matches!(
qualified_name.segments(),
["airflow", "utils", "context", "get_current_context"]
)
});
if !(is_kwarg_parameter || is_assigned_from_get_current_context) {
return;
}
for removed_key in REMOVED_CONTEXT_KEYS {
let Some(Expr::StringLiteral(ExprStringLiteral {
value,
range,
node_index: _,
})) = call_expr.arguments.find_positional(0)
else {
continue;
};
if value == removed_key {
checker.report_diagnostic(
Airflow3Removal {
deprecated: removed_key.to_string(),
replacement: Replacement::None,
},
*range,
);
}
}
}
/// Check if a subscript expression accesses a removed Airflow context variable.
/// If a removed key is found, push a corresponding diagnostic.
fn check_context_key_usage_in_subscript(checker: &Checker, subscript: &ExprSubscript) {
if !in_airflow_task_function(checker.semantic()) {
return;
}
let ExprSubscript { value, slice, .. } = subscript;
let Some(ExprStringLiteral { value: key, .. }) = slice.as_string_literal_expr() else {
return;
};
let is_kwarg_parameter = value
.as_name_expr()
.is_some_and(|name| is_kwarg_parameter(checker.semantic(), name));
let is_assigned_from_get_current_context =
typing::resolve_assignment(value, checker.semantic()).is_some_and(|qualified_name| {
matches!(
qualified_name.segments(),
["airflow", "utils", "context", "get_current_context"]
)
});
if !(is_kwarg_parameter || is_assigned_from_get_current_context) {
return;
}
if REMOVED_CONTEXT_KEYS.contains(&key.to_str()) {
checker.report_diagnostic(
Airflow3Removal {
deprecated: key.to_string(),
replacement: Replacement::None,
},
slice.range(),
);
}
}
/// Finds the parameter definition for a given name expression in a function.
fn is_kwarg_parameter(semantic: &SemanticModel, name: &ExprName) -> bool {
let Some(binding_id) = semantic.only_binding(name) else {
return false;
};
let binding = semantic.binding(binding_id);
let Some(Stmt::FunctionDef(StmtFunctionDef { parameters, .. })) = binding.statement(semantic)
else {
return false;
};
parameters
.kwarg
.as_deref()
.is_some_and(|kwarg| kwarg.name.as_str() == name.id.as_str())
}
/// Check whether a removed Airflow class method is called.
///
/// For example:
///
/// ```python
/// from airflow.datasets.manager import DatasetManager
///
/// manager = DatasetManager()
/// manager.register_datsaet_change()
/// ```
fn check_method(checker: &Checker, call_expr: &ExprCall) {
let Expr::Attribute(ExprAttribute { attr, value, .. }) = &*call_expr.func else {
return;
};
let Some(qualname) = typing::resolve_assignment(value, checker.semantic()) else {
return;
};
let replacement = match qualname.segments() {
["airflow", "datasets", "manager", "DatasetManager"] => match attr.as_str() {
"register_dataset_change" => Replacement::AttrName("register_asset_change"),
"create_datasets" => Replacement::AttrName("create_assets"),
"notify_dataset_created" => Replacement::AttrName("notify_asset_created"),
"notify_dataset_changed" => Replacement::AttrName("notify_asset_changed"),
"notify_dataset_alias_created" => Replacement::AttrName("notify_asset_alias_created"),
_ => return,
},
["airflow", "lineage", "hook", "HookLineageCollector"] => match attr.as_str() {
"create_dataset" => Replacement::AttrName("create_asset"),
"add_input_dataset" => Replacement::AttrName("add_input_asset"),
"add_output_dataset" => Replacement::AttrName("add_output_asset"),
"collected_datasets" => Replacement::AttrName("collected_assets"),
_ => return,
},
["airflow", "models", "dag", "DAG"] | ["airflow", "models", "DAG"] | ["airflow", "DAG"] => {
match attr.as_str() {
"create_dagrun" => Replacement::None,
_ => return,
}
}
["airflow", "providers_manager", "ProvidersManager"] => match attr.as_str() {
"initialize_providers_dataset_uri_resources" => {
Replacement::AttrName("initialize_providers_asset_uri_resources")
}
_ => return,
},
[
"airflow",
"secrets",
"local_filesystem",
"LocalFilesystemBackend",
] => match attr.as_str() {
"get_connections" => Replacement::AttrName("get_connection"),
_ => return,
},
["airflow", "datasets", ..] | ["airflow", "Dataset"] => match attr.as_str() {
"iter_datasets" => Replacement::AttrName("iter_assets"),
"iter_dataset_aliases" => Replacement::AttrName("iter_asset_aliases"),
_ => return,
},
segments => {
if is_airflow_secret_backend(segments) {
match attr.as_str() {
"get_conn_uri" => Replacement::AttrName("get_conn_value"),
"get_connections" => Replacement::AttrName("get_connection"),
_ => return,
}
} else if is_airflow_hook(segments) {
match attr.as_str() {
"get_connections" => Replacement::AttrName("get_connection"),
_ => return,
}
} else if is_airflow_auth_manager(segments) {
if attr.as_str() == "is_authorized_dataset" {
Replacement::AttrName("is_authorized_asset")
} else {
return;
}
} else {
return;
}
}
};
// Create the `Fix` first to avoid cloning `Replacement`.
let fix = if let Replacement::AttrName(name) = replacement {
Some(Fix::safe_edit(Edit::range_replacement(
name.to_string(),
attr.range(),
)))
} else {
None
};
let mut diagnostic = checker.report_diagnostic(
Airflow3Removal {
deprecated: attr.to_string(),
replacement,
},
attr.range(),
);
if let Some(fix) = fix {
diagnostic.set_fix(fix);
}
}
/// Check whether a removed Airflow name is used.
///
/// For example:
///
/// ```python
/// from airflow.operators import subdag
/// from airflow.operators.subdag import SubDagOperator
///
/// # Accessing via attribute
/// subdag.SubDagOperator()
///
/// # Or, directly
/// SubDagOperator()
/// ```
fn check_name(checker: &Checker, expr: &Expr, range: TextRange) {
let semantic = checker.semantic();
let Some(qualified_name) = semantic.resolve_qualified_name(expr) else {
return;
};
let replacement = match qualified_name.segments() {
// airflow.PY\d{1,2}
[
"airflow",
"PY36" | "PY37" | "PY38" | "PY39" | "PY310" | "PY311" | "PY312",
] => Replacement::Message("Use `sys.version_info` instead"),
// airflow.api_connexion.security
["airflow", "api_connexion", "security", "requires_access"] => Replacement::Message(
"Use `airflow.api_fastapi.core_api.security.requires_access_*` instead",
),
[
"airflow",
"api_connexion",
"security",
"requires_access_dataset",
] => Replacement::Rename {
module: "airflow.api_fastapi.core_api.security",
name: "requires_access_asset",
},
// airflow.auth.managers
[
"airflow",
"auth",
"managers",
"base_auth_manager",
"BaseAuthManager",
] => Replacement::Rename {
module: "airflow.api_fastapi.auth.managers.base_auth_manager",
name: "BaseAuthManager",
},
[
"airflow",
"auth",
"managers",
"models",
"resource_details",
"DatasetDetails",
] => Replacement::Rename {
module: "airflow.api_fastapi.auth.managers.models.resource_details",
name: "AssetDetails",
},
// airflow.configuration
[
"airflow",
"configuration",
rest @ ("as_dict" | "get" | "getboolean" | "getfloat" | "getint" | "has_option"
| "remove_option" | "set"),
] => Replacement::SourceModuleMoved {
module: "airflow.configuration",
name: format!("conf.{rest}"),
},
// airflow.contrib.*
["airflow", "contrib", ..] => {
Replacement::Message("The whole `airflow.contrib` module has been removed.")
}
// airflow.datasets.manager
["airflow", "datasets", "manager", rest] => match *rest {
"DatasetManager" => Replacement::Rename {
module: "airflow.assets.manager",
name: "AssetManager",
},
"dataset_manager" => Replacement::Rename {
module: "airflow.assets.manager",
name: "asset_manager",
},
"resolve_dataset_manager" => Replacement::Rename {
module: "airflow.assets.manager",
name: "resolve_asset_manager",
},
_ => return,
},
// airflow.datasets
["airflow", "datasets", "DatasetAliasEvent"] => Replacement::None,
["airflow", "datasets", "DatasetEvent"] => Replacement::Message(
"`DatasetEvent` has been made private in Airflow 3. \
Use `dict[str, Any]` for the time being. \
An `AssetEvent` type will be added to the apache-airflow-task-sdk in a future version.",
),
// airflow.hooks
["airflow", "hooks", "base_hook", "BaseHook"] => Replacement::Rename {
module: "airflow.hooks.base",
name: "BaseHook",
},
// airflow.lineage.hook
["airflow", "lineage", "hook", "DatasetLineageInfo"] => Replacement::Rename {
module: "airflow.lineage.hook",
name: "AssetLineageInfo",
},
// airflow.listeners.spec
["airflow", "listeners", "spec", "dataset", rest] => match *rest {
"on_dataset_created" => Replacement::Rename {
module: "airflow.listeners.spec.asset",
name: "on_asset_created",
},
"on_dataset_changed" => Replacement::Rename {
module: "airflow.listeners.spec.asset",
name: "on_asset_changed",
},
_ => return,
},
// airflow.metrics.validators
["airflow", "metrics", "validators", rest] => match *rest {
"AllowListValidator" => Replacement::Rename {
module: "airflow.metrics.validators",
name: "PatternAllowListValidator",
},
"BlockListValidator" => Replacement::Rename {
module: "airflow.metrics.validators",
name: "PatternBlockListValidator",
},
_ => return,
},
// airflow.notifications
["airflow", "notifications", "basenotifier", "BaseNotifier"] => Replacement::Rename {
module: "airflow.sdk.bases.notifier",
name: "BaseNotifier",
},
// airflow.operators
["airflow", "operators", "subdag", ..] => {
Replacement::Message("The whole `airflow.subdag` module has been removed.")
}
["airflow", "operators", "postgres_operator", "Mapping"] => Replacement::None,
["airflow", "operators", "python", "get_current_context"] => Replacement::Rename {
module: "airflow.sdk",
name: "get_current_context",
},
// airflow.secrets
["airflow", "secrets", "cache", "SecretCache"] => Replacement::Rename {
module: "airflow.sdk",
name: "SecretCache",
},
["airflow", "secrets", "local_filesystem", "load_connections"] => Replacement::Rename {
module: "airflow.secrets.local_filesystem",
name: "load_connections_dict",
},
// airflow.security
["airflow", "security", "permissions", "RESOURCE_DATASET"] => Replacement::Rename {
module: "airflow.security.permissions",
name: "RESOURCE_ASSET",
},
// airflow.sensors
[
"airflow",
"sensors",
"base_sensor_operator",
"BaseSensorOperator",
] => Replacement::Rename {
module: "airflow.sdk.bases.sensor",
name: "BaseSensorOperator",
},
// airflow.timetables
[
"airflow",
"timetables",
"simple",
"DatasetTriggeredTimetable",
] => Replacement::Rename {
module: "airflow.timetables.simple",
name: "AssetTriggeredTimetable",
},
// airflow.triggers
["airflow", "triggers", "external_task", "TaskStateTrigger"] => Replacement::None,
// airflow.utils
["airflow", "utils", rest @ ..] => match &rest {
// airflow.utils.dag_cycle_tester
["dag_cycle_tester", "test_cycle"] => Replacement::None,
// airflow.utils.db
["db", "create_session"] => Replacement::None,
// airflow.utils.decorators
["decorators", "apply_defaults"] => Replacement::Message(
"`apply_defaults` is now unconditionally done and can be safely removed.",
),
// airflow.utils.dates
["dates", "date_range"] => Replacement::None,
["dates", "days_ago"] => {
Replacement::Message("Use `pendulum.today('UTC').add(days=-N, ...)` instead")
}
[
"dates",
"parse_execution_date" | "round_time" | "scale_time_units" | "infer_time_unit",
] => Replacement::None,
// airflow.utils.file
["file", "TemporaryDirectory"] => Replacement::Rename {
module: "tempfile",
name: "TemporaryDirectory",
},
["file", "mkdirs"] => Replacement::Message("Use `pathlib.Path({path}).mkdir` instead"),
// airflow.utils.helpers
["helpers", "chain"] => Replacement::Rename {
module: "airflow.sdk",
name: "chain",
},
["helpers", "cross_downstream"] => Replacement::Rename {
module: "airflow.sdk",
name: "cross_downstream",
},
// TODO: update it as SourceModuleMoved
// airflow.utils.log.secrets_masker
["log", "secrets_masker"] => Replacement::Rename {
module: "airflow.sdk.execution_time",
name: "secrets_masker",
},
// airflow.utils.state
["state", "SHUTDOWN" | "terminating_states"] => Replacement::None,
// airflow.utils.trigger_rule
[
"trigger_rule",
"TriggerRule",
"DUMMY" | "NONE_FAILED_OR_SKIPPED",
] => Replacement::None,
_ => return,
},
// airflow.www
[
"airflow",
"www",
"auth",
"has_access" | "has_access_dataset",
] => Replacement::None,
[
"airflow",
"www",
"utils",
"get_sensitive_variables_fields" | "should_hide_value_for_key",
] => Replacement::None,
// airflow.providers.amazon
[
"airflow",
"providers",
"amazon",
"aws",
"datasets",
"s3",
rest,
] => match *rest {
"create_dataset" => Replacement::Rename {
module: "airflow.providers.amazon.aws.assets.s3",
name: "create_asset",
},
"convert_dataset_to_openlineage" => Replacement::Rename {
module: "airflow.providers.amazon.aws.assets.s3",
name: "convert_asset_to_openlineage",
},
"sanitize_uri" => Replacement::Rename {
module: "airflow.providers.amazon.aws.assets.s3",
name: "sanitize_uri",
},
_ => return,
},
[
"airflow",
"providers",
"amazon",
"aws",
"auth_manager",
"avp",
"entities",
"AvpEntities",
"DATASET",
] => Replacement::Rename {
module: "airflow.providers.amazon.aws.auth_manager.avp.entities",
name: "AvpEntities.ASSET",
},
// airflow.providers.common.io
// airflow.providers.common.io.datasets.file
[
"airflow",
"providers",
"common",
"io",
"datasets",
"file",
rest,
] => match *rest {
"create_dataset" => Replacement::Rename {
module: "airflow.providers.common.io.assets.file",
name: "create_asset",
},
"convert_dataset_to_openlineage" => Replacement::Rename {
module: "airflow.providers.common.io.assets.file",
name: "convert_asset_to_openlineage",
},
"sanitize_uri" => Replacement::Rename {
module: "airflow.providers.common.io.assets.file",
name: "sanitize_uri",
},
_ => return,
},
// airflow.providers.google
// airflow.providers.google.datasets
["airflow", "providers", "google", "datasets", rest @ ..] => match &rest {
["bigquery", "create_dataset"] => Replacement::Rename {
module: "airflow.providers.google.assets.bigquery",
name: "create_asset",
},
["gcs", "create_dataset"] => Replacement::Rename {
module: "airflow.providers.google.assets.gcs",
name: "create_asset",
},
["gcs", "convert_dataset_to_openlineage"] => Replacement::Rename {
module: "airflow.providers.google.assets.gcs",
name: "convert_asset_to_openlineage",
},
["gcs", "sanitize_uri"] => Replacement::Rename {
module: "airflow.providers.google.assets.gcs",
name: "sanitize_uri",
},
_ => return,
},
// airflow.providers.mysql
[
"airflow",
"providers",
"mysql",
"datasets",
"mysql",
"sanitize_uri",
] => Replacement::Rename {
module: "airflow.providers.mysql.assets.mysql",
name: "sanitize_uri",
},
// airflow.providers.postgres
[
"airflow",
"providers",
"postgres",
"datasets",
"postgres",
"sanitize_uri",
] => Replacement::Rename {
module: "airflow.providers.postgres.assets.postgres",
name: "sanitize_uri",
},
// airflow.providers.openlineage
// airflow.providers.openlineage.utils.utils
[
"airflow",
"providers",
"openlineage",
"utils",
"utils",
rest,
] => match *rest {
"DatasetInfo" => Replacement::Rename {
module: "airflow.providers.openlineage.utils.utils",
name: "AssetInfo",
},
"translate_airflow_dataset" => Replacement::Rename {
module: "airflow.providers.openlineage.utils.utils",
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | true |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/airflow/rules/function_signature_change_in_3.rs | crates/ruff_linter/src/rules/airflow/rules/function_signature_change_in_3.rs | use crate::checkers::ast::Checker;
use crate::{FixAvailability, Violation};
use ruff_macros::{ViolationMetadata, derive_message_formats};
use ruff_python_ast::name::QualifiedName;
use ruff_python_ast::{Arguments, Expr, ExprAttribute, ExprCall, Identifier};
use ruff_python_semantic::Modules;
use ruff_python_semantic::analyze::typing;
use ruff_text_size::Ranged;
/// ## What it does
/// Checks for Airflow function calls that will raise a runtime error in Airflow 3.0
/// due to function signature changes, such as functions that changed to accept only
/// keyword arguments, parameter reordering, or parameter type changes.
///
/// ## Why is this bad?
/// Airflow 3.0 introduces changes to function signatures. Code that
/// worked in Airflow 2.x will raise a runtime error if not updated in Airflow
/// 3.0.
///
/// ## Example
/// ```python
/// from airflow.lineage.hook import HookLineageCollector
///
/// collector = HookLineageCollector()
/// # Passing positional arguments will raise a runtime error in Airflow 3.0
/// collector.create_asset("s3://bucket/key")
/// ```
///
/// Use instead:
/// ```python
/// from airflow.lineage.hook import HookLineageCollector
///
/// collector = HookLineageCollector()
/// # Passing arguments as keyword arguments instead of positional arguments
/// collector.create_asset(uri="s3://bucket/key")
/// ```
#[derive(ViolationMetadata)]
#[violation_metadata(preview_since = "0.14.11")]
pub(crate) struct Airflow3IncompatibleFunctionSignature {
function_name: String,
change_type: FunctionSignatureChangeType,
}
impl Violation for Airflow3IncompatibleFunctionSignature {
const FIX_AVAILABILITY: FixAvailability = FixAvailability::None;
#[derive_message_formats]
fn message(&self) -> String {
let Airflow3IncompatibleFunctionSignature {
function_name,
change_type,
} = self;
match change_type {
FunctionSignatureChangeType::KeywordOnly { .. } => {
format!("`{function_name}` signature is changed in Airflow 3.0")
}
}
}
fn fix_title(&self) -> Option<String> {
let Airflow3IncompatibleFunctionSignature { change_type, .. } = self;
match change_type {
FunctionSignatureChangeType::KeywordOnly { message } => Some(message.to_string()),
}
}
}
/// AIR303
pub(crate) fn airflow_3_incompatible_function_signature(checker: &Checker, expr: &Expr) {
if !checker.semantic().seen_module(Modules::AIRFLOW) {
return;
}
let Expr::Call(ExprCall {
func, arguments, ..
}) = expr
else {
return;
};
let Expr::Attribute(ExprAttribute { attr, value, .. }) = func.as_ref() else {
return;
};
// Resolve the qualified name: try variable assignments first, then fall back to direct
// constructor calls.
let qualified_name = typing::resolve_assignment(value, checker.semantic()).or_else(|| {
value
.as_call_expr()
.and_then(|call| checker.semantic().resolve_qualified_name(&call.func))
});
let Some(qualified_name) = qualified_name else {
return;
};
check_keyword_only_method(checker, &qualified_name, attr, arguments);
}
fn check_keyword_only_method(
checker: &Checker,
qualified_name: &QualifiedName,
attr: &Identifier,
arguments: &Arguments,
) {
let has_positional_args =
arguments.find_positional(0).is_some() || arguments.args.iter().any(Expr::is_starred_expr);
if let ["airflow", "lineage", "hook", "HookLineageCollector"] = qualified_name.segments() {
if attr.as_str() == "create_asset" && has_positional_args {
checker.report_diagnostic(
Airflow3IncompatibleFunctionSignature {
function_name: attr.to_string(),
change_type: FunctionSignatureChangeType::KeywordOnly {
message: "Pass positional arguments as keyword arguments (e.g., `create_asset(uri=...)`)",
},
},
attr.range(),
);
}
}
}
#[derive(Clone, Debug, Eq, PartialEq)]
pub(crate) enum FunctionSignatureChangeType {
/// Function signature changed to only accept keyword arguments.
KeywordOnly { message: &'static str },
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/airflow/rules/suggested_to_update_3_0.rs | crates/ruff_linter/src/rules/airflow/rules/suggested_to_update_3_0.rs | use crate::checkers::ast::Checker;
use crate::rules::airflow::helpers::{Replacement, is_airflow_builtin_or_provider};
use crate::rules::airflow::helpers::{
generate_import_edit, generate_remove_and_runtime_import_edit, is_guarded_by_try_except,
};
use crate::{Edit, Fix, FixAvailability, Violation};
use ruff_macros::{ViolationMetadata, derive_message_formats};
use ruff_python_ast::{Arguments, Expr, ExprAttribute, ExprCall, ExprName, name::QualifiedName};
use ruff_python_semantic::Modules;
use ruff_text_size::Ranged;
use ruff_text_size::TextRange;
/// ## What it does
/// Checks for uses of deprecated Airflow functions and values that still have
/// a compatibility layer.
///
/// ## Why is this bad?
/// Airflow 3.0 removed various deprecated functions, members, and other
/// values. Some have more modern replacements. Others are considered too niche
/// and not worth continued maintenance in Airflow.
/// Even though these symbols still work fine on Airflow 3.0, they are expected to be removed in a future version.
/// Where available, users should replace the removed functionality with the new alternatives.
///
/// ## Example
/// ```python
/// from airflow import Dataset
///
///
/// Dataset(uri="test://test/")
/// ```
///
/// Use instead:
/// ```python
/// from airflow.sdk import Asset
///
///
/// Asset(uri="test://test/")
/// ```
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "0.13.0")]
pub(crate) struct Airflow3SuggestedUpdate {
deprecated: String,
replacement: Replacement,
}
impl Violation for Airflow3SuggestedUpdate {
const FIX_AVAILABILITY: FixAvailability = FixAvailability::Sometimes;
#[derive_message_formats]
fn message(&self) -> String {
let Airflow3SuggestedUpdate {
deprecated,
replacement,
} = self;
match replacement {
Replacement::None
| Replacement::AttrName(_)
| Replacement::Message(_)
| Replacement::Rename { module: _, name: _ }
| Replacement::SourceModuleMoved { module: _, name: _ } => {
format!(
"`{deprecated}` is removed in Airflow 3.0; \
It still works in Airflow 3.0 but is expected to be removed in a future version."
)
}
}
}
fn fix_title(&self) -> Option<String> {
let Airflow3SuggestedUpdate { replacement, .. } = self;
match replacement {
Replacement::None => None,
Replacement::AttrName(name) => Some(format!("Use `{name}` instead")),
Replacement::Message(message) => Some((*message).to_string()),
Replacement::Rename { module, name } => {
Some(format!("Use `{name}` from `{module}` instead."))
}
Replacement::SourceModuleMoved { module, name } => {
Some(format!("Use `{name}` from `{module}` instead."))
}
}
}
}
/// AIR311
pub(crate) fn airflow_3_0_suggested_update_expr(checker: &Checker, expr: &Expr) {
if !checker.semantic().seen_module(Modules::AIRFLOW) {
return;
}
match expr {
Expr::Call(ExprCall {
func, arguments, ..
}) => {
if let Some(qualified_name) = checker.semantic().resolve_qualified_name(func) {
check_call_arguments(checker, &qualified_name, arguments);
}
}
Expr::Attribute(ExprAttribute { attr, .. }) => {
check_name(checker, expr, attr.range());
}
Expr::Name(ExprName {
id: _,
ctx: _,
range,
node_index: _,
}) => {
check_name(checker, expr, *range);
}
_ => {}
}
}
/// Check if the `deprecated` keyword argument is being used and create a diagnostic if so along
/// with a possible `replacement`.
fn diagnostic_for_argument(
checker: &Checker,
arguments: &Arguments,
deprecated: &str,
replacement: Option<&'static str>,
) {
let Some(keyword) = arguments.find_keyword(deprecated) else {
return;
};
let range = keyword
.arg
.as_ref()
.map_or_else(|| keyword.range(), Ranged::range);
let mut diagnostic = checker.report_diagnostic(
Airflow3SuggestedUpdate {
deprecated: deprecated.to_string(),
replacement: match replacement {
Some(name) => Replacement::AttrName(name),
None => Replacement::None,
},
},
range,
);
if let Some(replacement) = replacement {
diagnostic.set_fix(Fix::safe_edit(Edit::range_replacement(
replacement.to_string(),
range,
)));
}
}
/// Check whether a removed Airflow argument is passed.
///
/// For example:
///
/// ```python
/// from airflow import DAG
///
/// DAG(sla="@daily")
/// ```
fn check_call_arguments(checker: &Checker, qualified_name: &QualifiedName, arguments: &Arguments) {
match qualified_name.segments() {
["airflow", .., "DAG" | "dag"] => {
diagnostic_for_argument(checker, arguments, "sla_miss_callback", None);
}
["airflow", "timetables", "datasets", "DatasetOrTimeSchedule"] => {
diagnostic_for_argument(checker, arguments, "datasets", Some("assets"));
}
segments => {
if is_airflow_builtin_or_provider(segments, "operators", "Operator") {
diagnostic_for_argument(checker, arguments, "sla", None);
}
}
}
}
/// Check whether a removed Airflow name is used.
///
/// For example:
///
/// ```python
/// from airflow import Dataset
/// from airflow import datasets
///
/// # Accessing via attribute
/// datasets.Dataset()
///
/// # Or, directly
/// Dataset()
/// ```
fn check_name(checker: &Checker, expr: &Expr, range: TextRange) {
let semantic = checker.semantic();
let Some(qualified_name) = semantic.resolve_qualified_name(expr) else {
return;
};
let replacement = match qualified_name.segments() {
// airflow.datasets.metadata
["airflow", "datasets", "metadata", "Metadata"] => Replacement::Rename {
module: "airflow.sdk",
name: "Metadata",
},
// airflow.datasets
["airflow", "Dataset"] | ["airflow", "datasets", "Dataset"] => Replacement::Rename {
module: "airflow.sdk",
name: "Asset",
},
["airflow", "datasets", rest] => match *rest {
"DatasetAliasEvent" => Replacement::None,
"DatasetAlias" => Replacement::Rename {
module: "airflow.sdk",
name: "AssetAlias",
},
"DatasetAll" => Replacement::Rename {
module: "airflow.sdk",
name: "AssetAll",
},
"DatasetAny" => Replacement::Rename {
module: "airflow.sdk",
name: "AssetAny",
},
"expand_alias_to_datasets" => Replacement::Rename {
module: "airflow.models.asset",
name: "expand_alias_to_assets",
},
_ => return,
},
// airflow.decorators
[
"airflow",
"decorators",
rest @ ("dag" | "task" | "task_group" | "setup" | "teardown"),
] => Replacement::SourceModuleMoved {
module: "airflow.sdk",
name: (*rest).to_string(),
},
[
"airflow",
"decorators",
"base",
rest @ ("DecoratedMappedOperator"
| "DecoratedOperator"
| "TaskDecorator"
| "get_unique_task_id"
| "task_decorator_factory"),
] => Replacement::SourceModuleMoved {
module: "airflow.sdk.bases.decorator",
name: (*rest).to_string(),
},
// airflow.io
["airflow", "io", "path", "ObjectStoragePath"] => Replacement::SourceModuleMoved {
module: "airflow.sdk",
name: "ObjectStoragePath".to_string(),
},
["airflow", "io", "store", "attach"] => Replacement::SourceModuleMoved {
module: "airflow.sdk.io",
name: "attach".to_string(),
},
// airflow.models
["airflow", "models", rest @ ("Connection" | "Variable")] => {
Replacement::SourceModuleMoved {
module: "airflow.sdk",
name: (*rest).to_string(),
}
}
[
"airflow",
"models",
..,
rest @ ("Param" | "ParamsDict" | "DagParam"),
] => Replacement::SourceModuleMoved {
module: "airflow.sdk.definitions.param",
name: (*rest).to_string(),
},
// airflow.models.baseoperator
[
"airflow",
"models",
"baseoperator",
rest @ ("chain" | "chain_linear" | "cross_downstream"),
] => Replacement::SourceModuleMoved {
module: "airflow.sdk",
name: (*rest).to_string(),
},
["airflow", "models", "baseoperatorlink", "BaseOperatorLink"] => Replacement::Rename {
module: "airflow.sdk",
name: "BaseOperatorLink",
},
// airflow.model..DAG
["airflow", "models", "dag", "DAG"] | ["airflow", "models", "DAG"] | ["airflow", "DAG"] => {
Replacement::SourceModuleMoved {
module: "airflow.sdk",
name: "DAG".to_string(),
}
}
// airflow.sensors.base
[
"airflow",
"sensors",
"base",
rest @ ("BaseSensorOperator" | "PokeReturnValue" | "poke_mode_only"),
] => Replacement::SourceModuleMoved {
module: "airflow.sdk",
name: (*rest).to_string(),
},
// airflow.timetables
["airflow", "timetables", "datasets", "DatasetOrTimeSchedule"] => Replacement::Rename {
module: "airflow.timetables.assets",
name: "AssetOrTimeSchedule",
},
// airflow.utils
[
"airflow",
"utils",
"dag_parsing_context",
"get_parsing_context",
] => Replacement::Rename {
module: "airflow.sdk",
name: "get_parsing_context",
},
_ => return,
};
let (module, name) = match &replacement {
Replacement::Rename { module, name } => (module, *name),
Replacement::SourceModuleMoved { module, name } => (module, name.as_str()),
_ => {
checker.report_diagnostic(
Airflow3SuggestedUpdate {
deprecated: qualified_name.to_string(),
replacement: replacement.clone(),
},
range,
);
return;
}
};
if is_guarded_by_try_except(expr, module, name, checker.semantic()) {
return;
}
let mut diagnostic = checker.report_diagnostic(
Airflow3SuggestedUpdate {
deprecated: qualified_name.to_string(),
replacement: replacement.clone(),
},
range,
);
if let Some(fix) = generate_import_edit(expr, checker, module, name, range)
.or_else(|| generate_remove_and_runtime_import_edit(expr, checker, module, name))
{
diagnostic.set_fix(fix);
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/flake8_raise/mod.rs | crates/ruff_linter/src/rules/flake8_raise/mod.rs | //! Rules from [flake8-raise](https://pypi.org/project/flake8-raise/).
pub(crate) mod rules;
#[cfg(test)]
mod tests {
use std::path::Path;
use anyhow::Result;
use test_case::test_case;
use crate::registry::Rule;
use crate::test::test_path;
use crate::{assert_diagnostics, settings};
#[test_case(Rule::UnnecessaryParenOnRaiseException, Path::new("RSE102.py"))]
fn rules(rule_code: Rule, path: &Path) -> Result<()> {
let snapshot = format!("{}_{}", rule_code.name(), path.to_string_lossy());
let diagnostics = test_path(
Path::new("flake8_raise").join(path).as_path(),
&settings::LinterSettings::for_rule(rule_code),
)?;
assert_diagnostics!(snapshot, diagnostics);
Ok(())
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/flake8_raise/rules/mod.rs | crates/ruff_linter/src/rules/flake8_raise/rules/mod.rs | pub(crate) use unnecessary_paren_on_raise_exception::*;
mod unnecessary_paren_on_raise_exception;
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/flake8_raise/rules/unnecessary_paren_on_raise_exception.rs | crates/ruff_linter/src/rules/flake8_raise/rules/unnecessary_paren_on_raise_exception.rs | use ruff_macros::{ViolationMetadata, derive_message_formats};
use ruff_python_ast::{self as ast, Expr};
use ruff_python_semantic::BindingKind;
use ruff_text_size::Ranged;
use crate::checkers::ast::Checker;
use crate::{AlwaysFixableViolation, Applicability, Edit, Fix};
/// ## What it does
/// Checks for unnecessary parentheses on raised exceptions.
///
/// ## Why is this bad?
/// If an exception is raised without any arguments, parentheses are not
/// required, as the `raise` statement accepts either an exception instance
/// or an exception class (which is then implicitly instantiated).
///
/// Removing the parentheses makes the code more concise.
///
/// ## Known problems
/// Parentheses can only be omitted if the exception is a class, as opposed to
/// a function call. This rule isn't always capable of distinguishing between
/// the two.
///
/// For example, if you import a function `module.get_exception` from another
/// module, and `module.get_exception` returns an exception object, this rule will
/// incorrectly mark the parentheses in `raise module.get_exception()` as
/// unnecessary.
///
/// ## Example
/// ```python
/// raise TypeError()
/// ```
///
/// Use instead:
/// ```python
/// raise TypeError
/// ```
///
/// ## Fix Safety
/// This rule's fix is marked as unsafe if removing the parentheses would also remove comments
/// or if it’s unclear whether the expression is a class or a function call.
///
/// ## References
/// - [Python documentation: The `raise` statement](https://docs.python.org/3/reference/simple_stmts.html#the-raise-statement)
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "v0.0.239")]
pub(crate) struct UnnecessaryParenOnRaiseException;
impl AlwaysFixableViolation for UnnecessaryParenOnRaiseException {
#[derive_message_formats]
fn message(&self) -> String {
"Unnecessary parentheses on raised exception".to_string()
}
fn fix_title(&self) -> String {
"Remove unnecessary parentheses".to_string()
}
}
/// RSE102
pub(crate) fn unnecessary_paren_on_raise_exception(checker: &Checker, expr: &Expr) {
let Expr::Call(ast::ExprCall {
func,
arguments,
range: _,
node_index: _,
}) = expr
else {
return;
};
if arguments.is_empty() {
// `raise func()` still requires parentheses; only `raise Class()` does not.
let exception_type = if let Some(id) = checker.semantic().lookup_attribute(func) {
match checker.semantic().binding(id).kind {
BindingKind::FunctionDefinition(_) => return,
BindingKind::ClassDefinition(_) => Some(ExceptionType::Class),
BindingKind::Builtin => Some(ExceptionType::Builtin),
_ => None,
}
} else {
None
};
if exception_type.is_none() {
// If the method name doesn't _look_ like a class (i.e., it's lowercase), it's
// probably a function call, not a class.
let identifier = match func.as_ref() {
Expr::Name(ast::ExprName { id, .. }) => Some(id.as_str()),
Expr::Attribute(ast::ExprAttribute { attr, .. }) => Some(attr.as_str()),
_ => None,
};
if identifier.is_some_and(|identifier| {
identifier
.strip_prefix('_')
.unwrap_or(identifier)
.chars()
.next()
.is_some_and(char::is_lowercase)
}) {
return;
}
// `ctypes.WinError()` is a function, not a class. It's part of the standard library, so
// we might as well get it right.
if checker
.semantic()
.resolve_qualified_name(func)
.is_some_and(|qualified_name| {
matches!(qualified_name.segments(), ["ctypes", "WinError"])
})
{
return;
}
}
let mut diagnostic =
checker.report_diagnostic(UnnecessaryParenOnRaiseException, arguments.range());
// If the arguments are immediately followed by a `from`, insert whitespace to avoid
// a syntax error, as in:
// ```python
// raise IndexError()from ZeroDivisionError
// ```
if checker
.locator()
.after(arguments.end())
.chars()
.next()
.is_some_and(char::is_alphanumeric)
{
diagnostic.set_fix(Fix::applicable_edit(
Edit::range_replacement(" ".to_string(), arguments.range()),
if exception_type.is_some() {
Applicability::Safe
} else {
Applicability::Unsafe
},
));
} else {
let applicability = if exception_type.is_some()
&& !checker.comment_ranges().intersects(arguments.range())
{
Applicability::Safe
} else {
Applicability::Unsafe
};
diagnostic.set_fix(Fix::applicable_edit(
Edit::range_deletion(arguments.range()),
applicability,
));
}
}
}
#[derive(Debug, is_macro::Is)]
enum ExceptionType {
Class,
Builtin,
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/flake8_copyright/settings.rs | crates/ruff_linter/src/rules/flake8_copyright/settings.rs | //! Settings for the `flake8-copyright` plugin.
use std::fmt::{Display, Formatter};
use std::sync::LazyLock;
use regex::Regex;
use ruff_macros::CacheKey;
use crate::display_settings;
#[derive(Debug, Clone, CacheKey)]
pub struct Settings {
pub notice_rgx: Regex,
pub author: Option<String>,
pub min_file_size: usize,
}
pub static COPYRIGHT: LazyLock<Regex> =
LazyLock::new(|| Regex::new(r"(?i)Copyright\s+((?:\(C\)|©)\s+)?\d{4}((-|,\s)\d{4})*").unwrap());
impl Default for Settings {
fn default() -> Self {
Self {
notice_rgx: COPYRIGHT.clone(),
author: None,
min_file_size: 0,
}
}
}
impl Display for Settings {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
display_settings! {
formatter = f,
namespace = "linter.flake8_copyright",
fields = [
self.notice_rgx,
self.author | optional,
self.min_file_size,
]
}
Ok(())
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/flake8_copyright/mod.rs | crates/ruff_linter/src/rules/flake8_copyright/mod.rs | //! Rules from [flake8-copyright](https://pypi.org/project/flake8-copyright/).
pub(crate) mod rules;
pub mod settings;
#[cfg(test)]
mod tests {
use crate::registry::Rule;
use crate::test::test_snippet;
use crate::{assert_diagnostics, settings};
#[test]
fn notice() {
let diagnostics = test_snippet(
r"
# Copyright 2023
import os
"
.trim(),
&settings::LinterSettings::for_rules(vec![Rule::MissingCopyrightNotice]),
);
assert_diagnostics!(diagnostics);
}
#[test]
fn notice_with_c() {
let diagnostics = test_snippet(
r"
# Copyright (C) 2023
import os
"
.trim(),
&settings::LinterSettings::for_rules(vec![Rule::MissingCopyrightNotice]),
);
assert_diagnostics!(diagnostics);
}
#[test]
fn notice_with_unicode_c() {
let diagnostics = test_snippet(
r"
# Copyright © 2023
import os
"
.trim(),
&settings::LinterSettings::for_rules(vec![Rule::MissingCopyrightNotice]),
);
assert_diagnostics!(diagnostics);
}
#[test]
fn notice_with_caps() {
let diagnostics = test_snippet(
r"
# COPYRIGHT (C) 2023
import os
"
.trim(),
&settings::LinterSettings::for_rules(vec![Rule::MissingCopyrightNotice]),
);
assert_diagnostics!(diagnostics);
}
#[test]
fn notice_with_range() {
let diagnostics = test_snippet(
r"
# Copyright (C) 2021-2023
import os
"
.trim(),
&settings::LinterSettings::for_rules(vec![Rule::MissingCopyrightNotice]),
);
assert_diagnostics!(diagnostics);
}
#[test]
fn notice_with_comma() {
let diagnostics = test_snippet(
r"
# Copyright (C) 2021, 2022
import os
"
.trim(),
&settings::LinterSettings::for_rules(vec![Rule::MissingCopyrightNotice]),
);
assert_diagnostics!(diagnostics);
}
#[test]
fn valid_author() {
let diagnostics = test_snippet(
r"
# Copyright (C) 2023 Ruff
import os
"
.trim(),
&settings::LinterSettings {
flake8_copyright: super::settings::Settings {
author: Some("Ruff".to_string()),
..super::settings::Settings::default()
},
..settings::LinterSettings::for_rules(vec![Rule::MissingCopyrightNotice])
},
);
assert_diagnostics!(diagnostics);
}
#[test]
fn valid_author_with_dash() {
let diagnostics = test_snippet(
r"
# Copyright (C) 2022-2023 Ruff
import os
"
.trim(),
&settings::LinterSettings {
flake8_copyright: super::settings::Settings {
author: Some("Ruff".to_string()),
..super::settings::Settings::default()
},
..settings::LinterSettings::for_rules(vec![Rule::MissingCopyrightNotice])
},
);
assert_diagnostics!(diagnostics);
}
#[test]
fn valid_author_with_dash_invalid_space() {
let diagnostics = test_snippet(
r"
# Copyright (C) 2022- 2023 Ruff
import os
"
.trim(),
&settings::LinterSettings {
flake8_copyright: super::settings::Settings {
author: Some("Ruff".to_string()),
..super::settings::Settings::default()
},
..settings::LinterSettings::for_rules(vec![Rule::MissingCopyrightNotice])
},
);
assert_diagnostics!(diagnostics);
}
#[test]
fn valid_author_with_dash_invalid_spaces() {
let diagnostics = test_snippet(
r"
# Copyright (C) 2022 - 2023 Ruff
import os
"
.trim(),
&settings::LinterSettings {
flake8_copyright: super::settings::Settings {
author: Some("Ruff".to_string()),
..super::settings::Settings::default()
},
..settings::LinterSettings::for_rules(vec![Rule::MissingCopyrightNotice])
},
);
assert_diagnostics!(diagnostics);
}
#[test]
fn valid_author_with_comma_invalid_no_space() {
let diagnostics = test_snippet(
r"
# Copyright (C) 2022,2023 Ruff
import os
"
.trim(),
&settings::LinterSettings {
flake8_copyright: super::settings::Settings {
author: Some("Ruff".to_string()),
..super::settings::Settings::default()
},
..settings::LinterSettings::for_rules(vec![Rule::MissingCopyrightNotice])
},
);
assert_diagnostics!(diagnostics);
}
#[test]
fn valid_author_with_comma_invalid_spaces() {
let diagnostics = test_snippet(
r"
# Copyright (C) 2022 , 2023 Ruff
import os
"
.trim(),
&settings::LinterSettings {
flake8_copyright: super::settings::Settings {
author: Some("Ruff".to_string()),
..super::settings::Settings::default()
},
..settings::LinterSettings::for_rules(vec![Rule::MissingCopyrightNotice])
},
);
assert_diagnostics!(diagnostics);
}
#[test]
fn valid_author_with_comma_valid_space() {
let diagnostics = test_snippet(
r"
# Copyright (C) 2022, 2023 Ruff
import os
"
.trim(),
&settings::LinterSettings {
flake8_copyright: super::settings::Settings {
author: Some("Ruff".to_string()),
..super::settings::Settings::default()
},
..settings::LinterSettings::for_rules(vec![Rule::MissingCopyrightNotice])
},
);
assert_diagnostics!(diagnostics);
}
#[test]
fn invalid_author() {
let diagnostics = test_snippet(
r"
# Copyright (C) 2023 Some Author
import os
"
.trim(),
&settings::LinterSettings {
flake8_copyright: super::settings::Settings {
author: Some("Ruff".to_string()),
..super::settings::Settings::default()
},
..settings::LinterSettings::for_rules(vec![Rule::MissingCopyrightNotice])
},
);
assert_diagnostics!(diagnostics);
}
#[test]
fn small_file() {
let diagnostics = test_snippet(
r"
import os
"
.trim(),
&settings::LinterSettings {
flake8_copyright: super::settings::Settings {
min_file_size: 256,
..super::settings::Settings::default()
},
..settings::LinterSettings::for_rules(vec![Rule::MissingCopyrightNotice])
},
);
assert_diagnostics!(diagnostics);
}
#[test]
fn late_notice() {
let diagnostics = test_snippet(
r"
# Content Content Content Content Content Content Content Content Content Content
# Content Content Content Content Content Content Content Content Content Content
# Content Content Content Content Content Content Content Content Content Content
# Content Content Content Content Content Content Content Content Content Content
# Content Content Content Content Content Content Content Content Content Content
# Content Content Content Content Content Content Content Content Content Content
# Content Content Content Content Content Content Content Content Content Content
# Content Content Content Content Content Content Content Content Content Content
# Content Content Content Content Content Content Content Content Content Content
# Content Content Content Content Content Content Content Content Content Content
# Content Content Content Content Content Content Content Content Content Content
# Content Content Content Content Content Content Content Content Content Content
# Content Content Content Content Content Content Content Content Content Content
# Content Content Content Content Content Content Content Content Content Content
# Content Content Content Content Content Content Content Content Content Content
# Content Content Content Content Content Content Content Content Content Content
# Content Content Content Content Content Content Content Content Content Content
# Content Content Content Content Content Content Content Content Content Content
# Content Content Content Content Content Content Content Content Content Content
# Content Content Content Content Content Content Content Content Content Content
# Content Content Content Content Content Content Content Content Content Content
# Content Content Content Content Content Content Content Content Content Content
# Content Content Content Content Content Content Content Content Content Content
# Content Content Content Content Content Content Content Content Content Content
# Content Content Content Content Content Content Content Content Content Content
# Content Content Content Content Content Content Content Content Content Content
# Content Content Content Content Content Content Content Content Content Content
# Content Content Content Content Content Content Content Content Content Content
# Content Content Content Content Content Content Content Content Content Content
# Content Content Content Content Content Content Content Content Content Content
# Content Content Content Content Content Content Content Content Content Content
# Content Content Content Content Content Content Content Content Content Content
# Content Content Content Content Content Content Content Content Content Content
# Content Content Content Content Content Content Content Content Content Content
# Content Content Content Content Content Content Content Content Content Content
# Content Content Content Content Content Content Content Content Content Content
# Content Content Content Content Content Content Content Content Content Content
# Content Content Content Content Content Content Content Content Content Content
# Content Content Content Content Content Content Content Content Content Content
# Content Content Content Content Content Content Content Content Content Content
# Content Content Content Content Content Content Content Content Content Content
# Content Content Content Content Content Content Content Content Content Content
# Content Content Content Content Content Content Content Content Content Content
# Content Content Content Content Content Content Content Content Content Content
# Content Content Content Content Content Content Content Content Content Content
# Content Content Content Content Content Content Content Content Content Content
# Content Content Content Content Content Content Content Content Content Content
# Content Content Content Content Content Content Content Content Content Content
# Content Content Content Content Content Content Content Content Content Content
# Content Content Content Content Content Content Content Content Content Content
# Copyright 2023
"
.trim(),
&settings::LinterSettings::for_rules(vec![Rule::MissingCopyrightNotice]),
);
assert_diagnostics!(diagnostics);
}
#[test]
fn char_boundary() {
let diagnostics = test_snippet(
r"কককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককককক
"
.trim(),
&settings::LinterSettings::for_rules(vec![Rule::MissingCopyrightNotice]),
);
assert_diagnostics!(diagnostics);
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/flake8_copyright/rules/missing_copyright_notice.rs | crates/ruff_linter/src/rules/flake8_copyright/rules/missing_copyright_notice.rs | use ruff_macros::{ViolationMetadata, derive_message_formats};
use ruff_text_size::{TextRange, TextSize};
use crate::Locator;
use crate::Violation;
use crate::checkers::ast::LintContext;
use crate::settings::LinterSettings;
/// ## What it does
/// Checks for the absence of copyright notices within Python files.
///
/// Note that this check only searches within the first 4096 bytes of the file.
///
/// ## Why is this bad?
/// In some codebases, it's common to have a license header at the top of every
/// file. This rule ensures that the license header is present.
///
/// ## Options
/// - `lint.flake8-copyright.author`
/// - `lint.flake8-copyright.min-file-size`
/// - `lint.flake8-copyright.notice-rgx`
#[derive(ViolationMetadata)]
#[violation_metadata(preview_since = "v0.0.273")]
pub(crate) struct MissingCopyrightNotice;
impl Violation for MissingCopyrightNotice {
#[derive_message_formats]
fn message(&self) -> String {
"Missing copyright notice at top of file".to_string()
}
}
/// CPY001
pub(crate) fn missing_copyright_notice(
locator: &Locator,
settings: &LinterSettings,
context: &LintContext,
) {
// Ignore files that are too small to contain a copyright notice.
if locator.len() < settings.flake8_copyright.min_file_size {
return;
}
// Only search the first 4096 bytes in the file.
let contents = locator.up_to(locator.floor_char_boundary(TextSize::new(4096)));
// Locate the copyright notice.
if let Some(match_) = settings.flake8_copyright.notice_rgx.find(contents) {
match settings.flake8_copyright.author {
Some(ref author) => {
// Ensure that it's immediately followed by the author.
if contents[match_.end()..].trim_start().starts_with(author) {
return;
}
}
None => return,
}
}
context.report_diagnostic(MissingCopyrightNotice, TextRange::default());
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/flake8_copyright/rules/mod.rs | crates/ruff_linter/src/rules/flake8_copyright/rules/mod.rs | pub(crate) use missing_copyright_notice::*;
mod missing_copyright_notice;
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/pycodestyle/settings.rs | crates/ruff_linter/src/rules/pycodestyle/settings.rs | //! Settings for the `pycodestyle` plugin.
use crate::display_settings;
use ruff_macros::CacheKey;
use std::fmt;
use crate::line_width::LineLength;
#[derive(Debug, Clone, Default, CacheKey)]
pub struct Settings {
pub max_line_length: LineLength,
pub max_doc_length: Option<LineLength>,
pub ignore_overlong_task_comments: bool,
}
impl fmt::Display for Settings {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
display_settings! {
formatter = f,
namespace = "linter.pycodestyle",
fields = [
self.max_line_length,
self.max_doc_length | optional,
self.ignore_overlong_task_comments,
]
}
Ok(())
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/pycodestyle/overlong.rs | crates/ruff_linter/src/rules/pycodestyle/overlong.rs | use std::ops::Deref;
use ruff_python_trivia::{CommentRanges, is_pragma_comment};
use ruff_source_file::Line;
use ruff_text_size::{TextLen, TextRange};
use crate::line_width::{IndentWidth, LineLength, LineWidthBuilder};
#[derive(Debug)]
pub(super) struct Overlong {
range: TextRange,
width: usize,
}
impl Overlong {
/// Returns an [`Overlong`] if the measured line exceeds the configured line length, or `None`
/// otherwise.
pub(super) fn try_from_line(
line: &Line,
comment_ranges: &CommentRanges,
limit: LineLength,
task_tags: &[String],
tab_size: IndentWidth,
) -> Option<Self> {
// The maximum width of the line is the number of bytes multiplied by the tab size (the
// worst-case scenario is that the line is all tabs). If the maximum width is less than the
// limit, then the line is not overlong.
let max_width = line.len() * tab_size.as_usize();
if max_width < limit.value() as usize {
return None;
}
// Measure the line. If it's already below the limit, exit early.
let width = measure(line.as_str(), tab_size);
if width <= limit {
return None;
}
// Strip trailing comments and re-measure the line, if needed.
let line = StrippedLine::from_line(line, comment_ranges, task_tags);
let width = match &line {
StrippedLine::WithoutPragma(line) => {
let width = measure(line.as_str(), tab_size);
if width <= limit {
return None;
}
width
}
StrippedLine::Unchanged(_) => width,
};
let mut chunks = line.split_whitespace();
let (Some(first_chunk), Some(second_chunk)) = (chunks.next(), chunks.next()) else {
// Single word / no printable chars - no way to make the line shorter.
return None;
};
// Do not enforce the line length for lines that end with a URL, as long as the URL
// begins before the limit.
let last_chunk = chunks.last().unwrap_or(second_chunk);
if last_chunk.contains("://") {
if width.get() - measure(last_chunk, tab_size).get() <= limit.value() as usize {
return None;
}
}
// Do not enforce the line length limit for SPDX license headers, which are machine-readable
// and explicitly _not_ recommended to wrap over multiple lines.
if matches!(
(first_chunk, second_chunk),
("#", "SPDX-License-Identifier:" | "SPDX-FileCopyrightText:")
) {
return None;
}
// Obtain the start offset of the part of the line that exceeds the limit.
let mut start_offset = line.start();
let mut start_width = LineWidthBuilder::new(tab_size);
for c in line.chars() {
if start_width < limit {
start_offset += c.text_len();
start_width = start_width.add_char(c);
} else {
break;
}
}
Some(Self {
range: TextRange::new(start_offset, line.end()),
width: width.get(),
})
}
/// Return the range of the overlong portion of the line.
pub(super) const fn range(&self) -> TextRange {
self.range
}
/// Return the measured width of the line, without any trailing pragma comments.
pub(super) const fn width(&self) -> usize {
self.width
}
}
/// A [`Line`] that may have trailing pragma comments stripped.
#[derive(Debug)]
enum StrippedLine<'a> {
/// The [`Line`] was unchanged.
Unchanged(&'a Line<'a>),
/// The [`Line`] was changed such that a trailing pragma comment (e.g., `# type: ignore`) was
/// removed. The stored [`Line`] consists of the portion of the original line that precedes the
/// pragma comment.
WithoutPragma(Line<'a>),
}
impl<'a> StrippedLine<'a> {
/// Strip trailing comments from a [`Line`], if the line ends with a pragma comment (like
/// `# type: ignore`) or, if necessary, a task comment (like `# TODO`).
fn from_line(line: &'a Line<'a>, comment_ranges: &CommentRanges, task_tags: &[String]) -> Self {
let [comment_range] = comment_ranges.comments_in_range(line.range()) else {
return Self::Unchanged(line);
};
// Convert from absolute to relative range.
let comment_range = comment_range - line.start();
let comment = &line.as_str()[comment_range];
// Ex) `# type: ignore`
if is_pragma_comment(comment) {
// Remove the pragma from the line.
let prefix = &line.as_str()[..usize::from(comment_range.start())].trim_end();
return Self::WithoutPragma(Line::new(prefix, line.start()));
}
// Ex) `# TODO(charlie): ...`
if !task_tags.is_empty() {
let Some(trimmed) = comment.strip_prefix('#') else {
return Self::Unchanged(line);
};
let trimmed = trimmed.trim_start();
if task_tags
.iter()
.any(|task_tag| trimmed.starts_with(task_tag))
{
// Remove the task tag from the line.
let prefix = &line.as_str()[..usize::from(comment_range.start())].trim_end();
return Self::WithoutPragma(Line::new(prefix, line.start()));
}
}
Self::Unchanged(line)
}
}
impl<'a> Deref for StrippedLine<'a> {
type Target = Line<'a>;
fn deref(&self) -> &Self::Target {
match self {
Self::Unchanged(line) => line,
Self::WithoutPragma(line) => line,
}
}
}
/// Returns the width of a given string, accounting for the tab size.
fn measure(s: &str, tab_size: IndentWidth) -> LineWidthBuilder {
let mut width = LineWidthBuilder::new(tab_size);
width = width.add_str(s);
width
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/pycodestyle/helpers.rs | crates/ruff_linter/src/rules/pycodestyle/helpers.rs | use ruff_python_ast::token::TokenKind;
/// Returns `true` if the name should be considered "ambiguous".
pub(super) fn is_ambiguous_name(name: &str) -> bool {
name == "l" || name == "I" || name == "O"
}
/// Returns `true` if the given `token` is a non-logical token.
///
/// Unlike [`TokenKind::is_trivia`], this function also considers the indent, dedent and newline
/// tokens.
pub(super) const fn is_non_logical_token(token: TokenKind) -> bool {
token.is_trivia()
|| matches!(
token,
TokenKind::Newline | TokenKind::Indent | TokenKind::Dedent
)
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/pycodestyle/mod.rs | crates/ruff_linter/src/rules/pycodestyle/mod.rs | //! Rules from [pycodestyle](https://pypi.org/project/pycodestyle/).
pub(crate) mod rules;
pub mod settings;
pub(crate) mod helpers;
pub(super) mod overlong;
#[cfg(test)]
mod tests {
use std::num::NonZeroU8;
use std::path::Path;
use anyhow::Result;
use test_case::test_case;
use crate::line_width::LineLength;
use crate::registry::Rule;
use crate::rules::{isort, pycodestyle};
use crate::settings::types::PreviewMode;
use crate::test::test_path;
use crate::{assert_diagnostics, settings};
use super::settings::Settings;
#[test_case(Rule::AmbiguousClassName, Path::new("E742.py"))]
#[test_case(Rule::AmbiguousFunctionName, Path::new("E743.py"))]
#[test_case(Rule::AmbiguousVariableName, Path::new("E741.py"))]
// E741 is disapplied for `.pyi` files (see #13119 for rationale);
// this fixture tests that we emit no errors there
#[test_case(Rule::AmbiguousVariableName, Path::new("E741.pyi"))]
#[test_case(Rule::LambdaAssignment, Path::new("E731.py"))]
#[test_case(Rule::BareExcept, Path::new("E722.py"))]
#[test_case(Rule::BlankLineWithWhitespace, Path::new("W29.py"))]
#[test_case(Rule::BlankLineWithWhitespace, Path::new("W293.py"))]
#[test_case(Rule::InvalidEscapeSequence, Path::new("W605_0.py"))]
#[test_case(Rule::InvalidEscapeSequence, Path::new("W605_1.py"))]
#[test_case(Rule::LineTooLong, Path::new("E501.py"))]
#[test_case(Rule::LineTooLong, Path::new("E501_3.py"))]
#[test_case(Rule::LineTooLong, Path::new("E501_4.py"))]
#[test_case(Rule::MixedSpacesAndTabs, Path::new("E101.py"))]
#[test_case(Rule::ModuleImportNotAtTopOfFile, Path::new("E40.py"))]
#[test_case(Rule::ModuleImportNotAtTopOfFile, Path::new("E402_0.py"))]
#[test_case(Rule::ModuleImportNotAtTopOfFile, Path::new("E402_1.py"))]
#[test_case(Rule::ModuleImportNotAtTopOfFile, Path::new("E402_2.py"))]
#[test_case(Rule::ModuleImportNotAtTopOfFile, Path::new("E402_3.py"))]
#[test_case(Rule::ModuleImportNotAtTopOfFile, Path::new("E402_4.py"))]
#[test_case(Rule::ModuleImportNotAtTopOfFile, Path::new("E402_5.py"))]
#[test_case(Rule::ModuleImportNotAtTopOfFile, Path::new("E402.ipynb"))]
#[test_case(Rule::MultipleImportsOnOneLine, Path::new("E40.py"))]
#[test_case(Rule::MultipleStatementsOnOneLineColon, Path::new("E70.py"))]
#[test_case(Rule::MultipleStatementsOnOneLineSemicolon, Path::new("E70.py"))]
#[test_case(Rule::MissingNewlineAtEndOfFile, Path::new("W292_0.py"))]
#[test_case(Rule::MissingNewlineAtEndOfFile, Path::new("W292_1.py"))]
#[test_case(Rule::MissingNewlineAtEndOfFile, Path::new("W292_2.py"))]
#[test_case(Rule::MissingNewlineAtEndOfFile, Path::new("W292_3.py"))]
#[test_case(Rule::NoneComparison, Path::new("E711.py"))]
#[test_case(Rule::NotInTest, Path::new("E713.py"))]
#[test_case(Rule::NotIsTest, Path::new("E714.py"))]
#[test_case(Rule::TabIndentation, Path::new("W19.py"))]
#[test_case(Rule::TrailingWhitespace, Path::new("W29.py"))]
#[test_case(Rule::TrailingWhitespace, Path::new("W291.py"))]
#[test_case(Rule::TrueFalseComparison, Path::new("E712.py"))]
#[test_case(Rule::TypeComparison, Path::new("E721.py"))]
#[test_case(Rule::UselessSemicolon, Path::new("E70.py"))]
#[test_case(Rule::UselessSemicolon, Path::new("E703.ipynb"))]
#[test_case(Rule::WhitespaceAfterDecorator, Path::new("E204.py"))]
fn rules(rule_code: Rule, path: &Path) -> Result<()> {
let snapshot = format!("{}_{}", rule_code.noqa_code(), path.to_string_lossy());
let diagnostics = test_path(
Path::new("pycodestyle").join(path).as_path(),
&settings::LinterSettings::for_rule(rule_code),
)?;
assert_diagnostics!(snapshot, diagnostics);
Ok(())
}
#[test_case(Rule::RedundantBackslash, Path::new("E502.py"))]
#[test_case(Rule::TooManyNewlinesAtEndOfFile, Path::new("W391_0.py"))]
#[test_case(Rule::TooManyNewlinesAtEndOfFile, Path::new("W391_1.py"))]
#[test_case(Rule::TooManyNewlinesAtEndOfFile, Path::new("W391_2.py"))]
#[test_case(Rule::TooManyNewlinesAtEndOfFile, Path::new("W391_3.py"))]
#[test_case(Rule::TooManyNewlinesAtEndOfFile, Path::new("W391_4.py"))]
#[test_case(Rule::TooManyNewlinesAtEndOfFile, Path::new("W391.ipynb"))]
fn preview_rules(rule_code: Rule, path: &Path) -> Result<()> {
let snapshot = format!(
"preview__{}_{}",
rule_code.noqa_code(),
path.to_string_lossy()
);
let diagnostics = test_path(
Path::new("pycodestyle").join(path).as_path(),
&settings::LinterSettings {
preview: PreviewMode::Enabled,
..settings::LinterSettings::for_rule(rule_code)
},
)?;
assert_diagnostics!(snapshot, diagnostics);
Ok(())
}
#[test]
fn w292_4() -> Result<()> {
let diagnostics = test_path(
Path::new("pycodestyle/W292_4.py"),
&settings::LinterSettings::for_rule(Rule::MissingNewlineAtEndOfFile),
)?;
assert_diagnostics!(diagnostics);
Ok(())
}
#[test_case(Rule::IndentationWithInvalidMultiple, Path::new("E11.py"))]
#[test_case(Rule::IndentationWithInvalidMultipleComment, Path::new("E11.py"))]
#[test_case(Rule::MultipleLeadingHashesForBlockComment, Path::new("E26.py"))]
#[test_case(Rule::MultipleSpacesAfterComma, Path::new("E24.py"))]
#[test_case(Rule::MultipleSpacesAfterKeyword, Path::new("E27.py"))]
#[test_case(Rule::MultipleSpacesAfterOperator, Path::new("E22.py"))]
#[test_case(Rule::MultipleSpacesBeforeKeyword, Path::new("E27.py"))]
#[test_case(Rule::MissingWhitespaceAfterKeyword, Path::new("E27.py"))]
#[test_case(Rule::MultipleSpacesBeforeOperator, Path::new("E22.py"))]
#[test_case(Rule::NoIndentedBlock, Path::new("E11.py"))]
#[test_case(Rule::NoIndentedBlockComment, Path::new("E11.py"))]
#[test_case(Rule::NoSpaceAfterBlockComment, Path::new("E26.py"))]
#[test_case(Rule::NoSpaceAfterInlineComment, Path::new("E26.py"))]
#[test_case(Rule::OverIndented, Path::new("E11.py"))]
#[test_case(Rule::TabAfterComma, Path::new("E24.py"))]
#[test_case(Rule::TabAfterKeyword, Path::new("E27.py"))]
#[test_case(Rule::TabAfterOperator, Path::new("E22.py"))]
#[test_case(Rule::TabBeforeKeyword, Path::new("E27.py"))]
#[test_case(Rule::TabBeforeOperator, Path::new("E22.py"))]
#[test_case(Rule::MissingWhitespaceAroundOperator, Path::new("E22.py"))]
#[test_case(Rule::MissingWhitespaceAroundArithmeticOperator, Path::new("E22.py"))]
#[test_case(
Rule::MissingWhitespaceAroundBitwiseOrShiftOperator,
Path::new("E22.py")
)]
#[test_case(Rule::MissingWhitespaceAroundModuloOperator, Path::new("E22.py"))]
#[test_case(Rule::MissingWhitespace, Path::new("E23.py"))]
#[test_case(Rule::TooFewSpacesBeforeInlineComment, Path::new("E26.py"))]
#[test_case(Rule::UnexpectedIndentation, Path::new("E11.py"))]
#[test_case(Rule::UnexpectedIndentationComment, Path::new("E11.py"))]
#[test_case(Rule::WhitespaceAfterOpenBracket, Path::new("E20.py"))]
#[test_case(Rule::WhitespaceBeforeCloseBracket, Path::new("E20.py"))]
#[test_case(Rule::WhitespaceBeforePunctuation, Path::new("E20.py"))]
#[test_case(Rule::WhitespaceBeforeParameters, Path::new("E21.py"))]
#[test_case(
Rule::UnexpectedSpacesAroundKeywordParameterEquals,
Path::new("E25.py")
)]
#[test_case(Rule::MissingWhitespaceAroundParameterEquals, Path::new("E25.py"))]
fn logical(rule_code: Rule, path: &Path) -> Result<()> {
let snapshot = format!("{}_{}", rule_code.noqa_code(), path.to_string_lossy());
let diagnostics = test_path(
Path::new("pycodestyle").join(path).as_path(),
&settings::LinterSettings::for_rule(rule_code),
)?;
assert_diagnostics!(snapshot, diagnostics);
Ok(())
}
/// Tests the compatibility of E2 rules (E202, E225 and E275) on syntactically incorrect code.
#[test]
fn white_space_syntax_error_compatibility() -> Result<()> {
let diagnostics = test_path(
Path::new("pycodestyle").join("E2_syntax_error.py"),
&settings::LinterSettings {
..settings::LinterSettings::for_rules([
Rule::MissingWhitespaceAroundOperator,
Rule::MissingWhitespaceAfterKeyword,
Rule::WhitespaceBeforeCloseBracket,
])
},
)?;
assert_diagnostics!(diagnostics);
Ok(())
}
#[test_case(Rule::BlankLinesTopLevel, Path::new("E302_first_line_docstring.py"))]
#[test_case(Rule::BlankLinesTopLevel, Path::new("E302_first_line_expression.py"))]
#[test_case(Rule::BlankLinesTopLevel, Path::new("E302_first_line_function.py"))]
#[test_case(Rule::BlankLinesTopLevel, Path::new("E302_first_line_statement.py"))]
#[test_case(Rule::TooManyBlankLines, Path::new("E303_first_line_comment.py"))]
#[test_case(Rule::TooManyBlankLines, Path::new("E303_first_line_docstring.py"))]
#[test_case(Rule::TooManyBlankLines, Path::new("E303_first_line_expression.py"))]
#[test_case(Rule::TooManyBlankLines, Path::new("E303_first_line_statement.py"))]
fn blank_lines_first_line(rule_code: Rule, path: &Path) -> Result<()> {
let snapshot = format!("{}_{}", rule_code.noqa_code(), path.to_string_lossy());
let diagnostics = test_path(
Path::new("pycodestyle").join(path).as_path(),
&settings::LinterSettings::for_rule(rule_code),
)?;
assert_diagnostics!(snapshot, diagnostics);
Ok(())
}
#[test_case(Rule::BlankLineBetweenMethods, Path::new("E30.py"))]
#[test_case(Rule::BlankLinesTopLevel, Path::new("E30.py"))]
#[test_case(Rule::TooManyBlankLines, Path::new("E30.py"))]
#[test_case(Rule::BlankLineAfterDecorator, Path::new("E30.py"))]
#[test_case(Rule::BlankLinesAfterFunctionOrClass, Path::new("E30.py"))]
#[test_case(Rule::BlankLinesBeforeNestedDefinition, Path::new("E30.py"))]
#[test_case(Rule::BlankLineBetweenMethods, Path::new("E30_syntax_error.py"))]
#[test_case(Rule::BlankLinesTopLevel, Path::new("E30_syntax_error.py"))]
#[test_case(Rule::TooManyBlankLines, Path::new("E30_syntax_error.py"))]
#[test_case(Rule::BlankLinesAfterFunctionOrClass, Path::new("E30_syntax_error.py"))]
#[test_case(
Rule::BlankLinesBeforeNestedDefinition,
Path::new("E30_syntax_error.py")
)]
fn blank_lines(rule_code: Rule, path: &Path) -> Result<()> {
let snapshot = format!("{}_{}", rule_code.noqa_code(), path.to_string_lossy());
let diagnostics = test_path(
Path::new("pycodestyle").join(path).as_path(),
&settings::LinterSettings::for_rule(rule_code),
)?;
assert_diagnostics!(snapshot, diagnostics);
Ok(())
}
/// Tests the compatibility of the blank line top level rule and isort.
#[test_case(-1, 0)]
#[test_case(1, 1)]
#[test_case(0, 0)]
#[test_case(4, 4)]
fn blank_lines_top_level_isort_compatibility(
lines_after_imports: isize,
lines_between_types: usize,
) -> Result<()> {
let snapshot = format!(
"blank_lines_top_level_isort_compatibility-lines-after({lines_after_imports})-between({lines_between_types})"
);
let diagnostics = test_path(
Path::new("pycodestyle").join("E30_isort.py"),
&settings::LinterSettings {
isort: isort::settings::Settings {
lines_after_imports,
lines_between_types,
..isort::settings::Settings::default()
},
..settings::LinterSettings::for_rules([
Rule::BlankLinesTopLevel,
Rule::UnsortedImports,
])
},
)?;
assert_diagnostics!(snapshot, diagnostics);
Ok(())
}
/// Tests the compatibility of the blank line too many lines and isort.
#[test_case(-1, 0)]
#[test_case(1, 1)]
#[test_case(0, 0)]
#[test_case(4, 4)]
fn too_many_blank_lines_isort_compatibility(
lines_after_imports: isize,
lines_between_types: usize,
) -> Result<()> {
let snapshot = format!(
"too_many_blank_lines_isort_compatibility-lines-after({lines_after_imports})-between({lines_between_types})"
);
let diagnostics = test_path(
Path::new("pycodestyle").join("E30_isort.py"),
&settings::LinterSettings {
isort: isort::settings::Settings {
lines_after_imports,
lines_between_types,
..isort::settings::Settings::default()
},
..settings::LinterSettings::for_rules([
Rule::TooManyBlankLines,
Rule::UnsortedImports,
])
},
)?;
assert_diagnostics!(snapshot, diagnostics);
Ok(())
}
#[test_case(Rule::BlankLineBetweenMethods)]
#[test_case(Rule::BlankLinesTopLevel)]
#[test_case(Rule::TooManyBlankLines)]
#[test_case(Rule::BlankLineAfterDecorator)]
#[test_case(Rule::BlankLinesAfterFunctionOrClass)]
#[test_case(Rule::BlankLinesBeforeNestedDefinition)]
fn blank_lines_typing_stub(rule_code: Rule) -> Result<()> {
let snapshot = format!("blank_lines_{}_typing_stub", rule_code.noqa_code());
let diagnostics = test_path(
Path::new("pycodestyle").join("E30.pyi"),
&settings::LinterSettings::for_rule(rule_code),
)?;
assert_diagnostics!(snapshot, diagnostics);
Ok(())
}
#[test_case(Rule::BlankLineBetweenMethods)]
#[test_case(Rule::BlankLinesTopLevel)]
#[test_case(Rule::TooManyBlankLines)]
#[test_case(Rule::BlankLineAfterDecorator)]
#[test_case(Rule::BlankLinesAfterFunctionOrClass)]
#[test_case(Rule::BlankLinesBeforeNestedDefinition)]
fn blank_lines_notebook(rule_code: Rule) -> Result<()> {
let snapshot = format!("blank_lines_{}_notebook", rule_code.noqa_code());
let diagnostics = test_path(
Path::new("pycodestyle").join("E30.ipynb"),
&settings::LinterSettings::for_rule(rule_code),
)?;
assert_diagnostics!(snapshot, diagnostics);
Ok(())
}
#[test]
fn blank_lines_typing_stub_isort() -> Result<()> {
let diagnostics = test_path(
Path::new("pycodestyle").join("E30_isort.pyi"),
&settings::LinterSettings {
..settings::LinterSettings::for_rules([
Rule::TooManyBlankLines,
Rule::BlankLinesTopLevel,
Rule::UnsortedImports,
])
},
)?;
assert_diagnostics!(diagnostics);
Ok(())
}
#[test]
fn constant_literals() -> Result<()> {
let diagnostics = test_path(
Path::new("pycodestyle/constant_literals.py"),
&settings::LinterSettings::for_rules(vec![
Rule::NoneComparison,
Rule::TrueFalseComparison,
Rule::IsLiteral,
]),
)?;
assert_diagnostics!(diagnostics);
Ok(())
}
#[test]
fn shebang() -> Result<()> {
let diagnostics = test_path(
Path::new("pycodestyle/shebang.py"),
&settings::LinterSettings::for_rules(vec![
Rule::TooFewSpacesBeforeInlineComment,
Rule::NoSpaceAfterInlineComment,
Rule::NoSpaceAfterBlockComment,
Rule::MultipleLeadingHashesForBlockComment,
]),
)?;
assert_diagnostics!(diagnostics);
Ok(())
}
#[test_case(false)]
#[test_case(true)]
fn task_tags(ignore_overlong_task_comments: bool) -> Result<()> {
let snapshot = format!("task_tags_{ignore_overlong_task_comments}");
let diagnostics = test_path(
Path::new("pycodestyle/E501_1.py"),
&settings::LinterSettings {
pycodestyle: Settings {
ignore_overlong_task_comments,
..Settings::default()
},
..settings::LinterSettings::for_rule(Rule::LineTooLong)
},
)?;
assert_diagnostics!(snapshot, diagnostics);
Ok(())
}
#[test]
fn max_doc_length() -> Result<()> {
let diagnostics = test_path(
Path::new("pycodestyle/W505.py"),
&settings::LinterSettings {
pycodestyle: Settings {
max_doc_length: Some(LineLength::try_from(50).unwrap()),
..Settings::default()
},
..settings::LinterSettings::for_rule(Rule::DocLineTooLong)
},
)?;
assert_diagnostics!(diagnostics);
Ok(())
}
#[test]
fn max_doc_length_with_utf_8() -> Result<()> {
let diagnostics = test_path(
Path::new("pycodestyle/W505_utf_8.py"),
&settings::LinterSettings {
pycodestyle: Settings {
max_doc_length: Some(LineLength::try_from(50).unwrap()),
..Settings::default()
},
..settings::LinterSettings::for_rule(Rule::DocLineTooLong)
},
)?;
assert_diagnostics!(diagnostics);
Ok(())
}
#[test_case(1)]
#[test_case(2)]
#[test_case(4)]
#[test_case(8)]
fn tab_size(tab_size: u8) -> Result<()> {
let snapshot = format!("tab_size_{tab_size}");
let diagnostics = test_path(
Path::new("pycodestyle/E501_2.py"),
&settings::LinterSettings {
tab_size: NonZeroU8::new(tab_size).unwrap().into(),
pycodestyle: pycodestyle::settings::Settings {
max_line_length: LineLength::try_from(6).unwrap(),
..pycodestyle::settings::Settings::default()
},
..settings::LinterSettings::for_rule(Rule::LineTooLong)
},
)?;
assert_diagnostics!(snapshot, diagnostics);
Ok(())
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/pycodestyle/rules/errors.rs | crates/ruff_linter/src/rules/pycodestyle/rules/errors.rs | use ruff_macros::{ViolationMetadata, derive_message_formats};
use crate::Violation;
/// ## What it does
/// This is not a regular diagnostic; instead, it's raised when a file cannot be read
/// from disk.
///
/// ## Why is this bad?
/// An `IOError` indicates an error in the development setup. For example, the user may
/// not have permissions to read a given file, or the filesystem may contain a broken
/// symlink.
///
/// ## Example
/// On Linux or macOS:
/// ```shell
/// $ echo 'print("hello world!")' > a.py
/// $ chmod 000 a.py
/// $ ruff a.py
/// a.py:1:1: E902 Permission denied (os error 13)
/// Found 1 error.
/// ```
///
/// ## References
/// - [UNIX Permissions introduction](https://mason.gmu.edu/~montecin/UNIXpermiss.htm)
/// - [Command Line Basics: Symbolic Links](https://www.digitalocean.com/community/tutorials/workflow-symbolic-links)
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "v0.0.28")]
pub struct IOError {
pub message: String,
}
/// E902
impl Violation for IOError {
// The format message is used by the `derive_message_formats` macro.
#![allow(clippy::useless_format)]
#[derive_message_formats]
fn message(&self) -> String {
let IOError { message } = self;
format!("{message}")
}
}
/// ## Removed
/// This rule has been removed. Syntax errors will
/// always be shown regardless of whether this rule is selected or not.
///
/// ## What it does
/// Checks for code that contains syntax errors.
///
/// ## Why is this bad?
/// Code with syntax errors cannot be executed. Such errors are likely a
/// mistake.
///
/// ## Example
/// ```python
/// x =
/// ```
///
/// Use instead:
/// ```python
/// x = 1
/// ```
///
/// ## References
/// - [Python documentation: Syntax Errors](https://docs.python.org/3/tutorial/errors.html#syntax-errors)
#[derive(ViolationMetadata)]
#[deprecated(note = "E999 has been removed")]
#[violation_metadata(removed_since = "0.8.0")]
pub(crate) struct SyntaxError;
#[expect(deprecated)]
impl Violation for SyntaxError {
fn message(&self) -> String {
unreachable!("E999 has been removed")
}
fn message_formats() -> &'static [&'static str] {
&["SyntaxError"]
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/pycodestyle/rules/mixed_spaces_and_tabs.rs | crates/ruff_linter/src/rules/pycodestyle/rules/mixed_spaces_and_tabs.rs | use ruff_text_size::{TextLen, TextRange};
use ruff_macros::{ViolationMetadata, derive_message_formats};
use ruff_python_trivia::leading_indentation;
use ruff_source_file::Line;
use crate::{Violation, checkers::ast::LintContext};
/// ## What it does
/// Checks for mixed tabs and spaces in indentation.
///
/// ## Why is this bad?
/// Never mix tabs and spaces.
///
/// The most popular way of indenting Python is with spaces only. The
/// second-most popular way is with tabs only. Code indented with a
/// mixture of tabs and spaces should be converted to using spaces
/// exclusively.
///
/// ## Example
/// ```python
/// if a == 0:\n a = 1\n\tb = 1
/// ```
///
/// Use instead:
/// ```python
/// if a == 0:\n a = 1\n b = 1
/// ```
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "v0.0.229")]
pub(crate) struct MixedSpacesAndTabs;
impl Violation for MixedSpacesAndTabs {
#[derive_message_formats]
fn message(&self) -> String {
"Indentation contains mixed spaces and tabs".to_string()
}
}
/// E101
pub(crate) fn mixed_spaces_and_tabs(line: &Line, context: &LintContext) {
let indent = leading_indentation(line.as_str());
if indent.contains(' ') && indent.contains('\t') {
context.report_diagnostic(
MixedSpacesAndTabs,
TextRange::at(line.start(), indent.text_len()),
);
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/pycodestyle/rules/line_too_long.rs | crates/ruff_linter/src/rules/pycodestyle/rules/line_too_long.rs | use ruff_macros::{ViolationMetadata, derive_message_formats};
use ruff_python_trivia::CommentRanges;
use ruff_source_file::Line;
use crate::Violation;
use crate::checkers::ast::LintContext;
use crate::rules::pycodestyle::overlong::Overlong;
use crate::settings::LinterSettings;
/// ## What it does
/// Checks for lines that exceed the specified maximum character length.
///
/// ## Why is this bad?
/// Overlong lines can hurt readability. [PEP 8], for example, recommends
/// limiting lines to 79 characters. By default, this rule enforces a limit
/// of 88 characters for compatibility with Black and the Ruff formatter,
/// though that limit is configurable via the [`line-length`] setting.
///
/// In the interest of pragmatism, this rule makes a few exceptions when
/// determining whether a line is overlong. Namely, it:
///
/// 1. Ignores lines that consist of a single "word" (i.e., without any
/// whitespace between its characters).
/// 2. Ignores lines that end with a URL, as long as the URL starts before
/// the line-length threshold.
/// 3. Ignores line that end with a pragma comment (e.g., `# type: ignore`
/// or `# noqa`), as long as the pragma comment starts before the
/// line-length threshold. That is, a line will not be flagged as
/// overlong if a pragma comment _causes_ it to exceed the line length.
/// (This behavior aligns with that of the Ruff formatter.)
/// 4. Ignores SPDX license identifiers and copyright notices
/// (e.g., `# SPDX-License-Identifier: MIT`), which are machine-readable
/// and should _not_ wrap over multiple lines.
///
/// If [`lint.pycodestyle.ignore-overlong-task-comments`] is `true`, this rule will
/// also ignore comments that start with any of the specified [`lint.task-tags`]
/// (e.g., `# TODO:`).
///
/// ## Example
/// ```python
/// my_function(param1, param2, param3, param4, param5, param6, param7, param8, param9, param10)
/// ```
///
/// Use instead:
/// ```python
/// my_function(
/// param1, param2, param3, param4, param5,
/// param6, param7, param8, param9, param10
/// )
/// ```
///
/// ## Error suppression
/// Hint: when suppressing `E501` errors within multi-line strings (like
/// docstrings), the `noqa` directive should come at the end of the string
/// (after the closing triple quote), and will apply to the entire string, like
/// so:
///
/// ```python
/// """Lorem ipsum dolor sit amet.
///
/// Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor.
/// """ # noqa: E501
/// ```
///
/// ## Options
/// - `line-length`
/// - `lint.task-tags`
/// - `lint.pycodestyle.ignore-overlong-task-comments`
/// - `lint.pycodestyle.max-line-length`
///
/// [PEP 8]: https://peps.python.org/pep-0008/#maximum-line-length
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "v0.0.18")]
pub(crate) struct LineTooLong(usize, usize);
impl Violation for LineTooLong {
#[derive_message_formats]
fn message(&self) -> String {
let LineTooLong(width, limit) = self;
format!("Line too long ({width} > {limit})")
}
}
/// E501
pub(crate) fn line_too_long(
line: &Line,
comment_ranges: &CommentRanges,
settings: &LinterSettings,
context: &LintContext,
) {
let limit = settings.pycodestyle.max_line_length;
if let Some(overlong) = Overlong::try_from_line(
line,
comment_ranges,
limit,
if settings.pycodestyle.ignore_overlong_task_comments {
&settings.task_tags
} else {
&[]
},
settings.tab_size,
) {
context.report_diagnostic(
LineTooLong(overlong.width(), limit.value() as usize),
overlong.range(),
);
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/pycodestyle/rules/tab_indentation.rs | crates/ruff_linter/src/rules/pycodestyle/rules/tab_indentation.rs | use ruff_macros::{ViolationMetadata, derive_message_formats};
use ruff_python_index::Indexer;
use ruff_source_file::LineRanges;
use ruff_text_size::{TextRange, TextSize};
use crate::Locator;
use crate::Violation;
use crate::checkers::ast::LintContext;
/// ## What it does
/// Checks for indentation that uses tabs.
///
/// ## Why is this bad?
/// According to [PEP 8], spaces are preferred over tabs (unless used to remain
/// consistent with code that is already indented with tabs).
///
/// ## Formatter compatibility
/// We recommend against using this rule alongside the [formatter]. The
/// formatter enforces consistent indentation, making the rule redundant.
///
/// The rule is also incompatible with the [formatter] when using
/// `format.indent-style="tab"`.
///
/// [PEP 8]: https://peps.python.org/pep-0008/#tabs-or-spaces
/// [formatter]: https://docs.astral.sh/ruff/formatter
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "v0.0.254")]
pub(crate) struct TabIndentation;
impl Violation for TabIndentation {
#[derive_message_formats]
fn message(&self) -> String {
"Indentation contains tabs".to_string()
}
}
/// W191
pub(crate) fn tab_indentation(context: &LintContext, locator: &Locator, indexer: &Indexer) {
let contents = locator.contents().as_bytes();
let mut offset = 0;
while let Some(index) = memchr::memchr(b'\t', &contents[offset..]) {
// If we find a tab in the file, grab the entire line.
let range = locator.full_line_range(TextSize::try_from(offset + index).unwrap());
// Determine whether the tab is part of the line's indentation.
if let Some(indent) = tab_indentation_at_line_start(range.start(), locator, indexer) {
context.report_diagnostic_if_enabled(TabIndentation, indent);
}
// Advance to the next line.
offset = range.end().to_usize();
}
}
/// If a line includes tabs in its indentation, returns the range of the
/// indent.
fn tab_indentation_at_line_start(
line_start: TextSize,
locator: &Locator,
indexer: &Indexer,
) -> Option<TextRange> {
let mut contains_tab = false;
for (i, char) in locator.after(line_start).as_bytes().iter().enumerate() {
match char {
// If we find a tab character, report it as a violation.
b'\t' => {
contains_tab = true;
}
// If we find a space, continue.
b' ' | b'\x0C' => {}
// If we find a non-whitespace character, stop.
_ => {
if contains_tab {
let range = TextRange::at(line_start, TextSize::try_from(i).unwrap());
if !indexer.multiline_ranges().contains_range(range) {
return Some(range);
}
}
break;
}
}
}
None
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/pycodestyle/rules/compound_statements.rs | crates/ruff_linter/src/rules/pycodestyle/rules/compound_statements.rs | use ruff_macros::{ViolationMetadata, derive_message_formats};
use ruff_notebook::CellOffsets;
use ruff_python_ast::PySourceType;
use ruff_python_ast::token::{TokenIterWithContext, TokenKind, Tokens};
use ruff_python_index::Indexer;
use ruff_text_size::{Ranged, TextSize};
use crate::Locator;
use crate::checkers::ast::LintContext;
use crate::{AlwaysFixableViolation, Violation};
use crate::{Edit, Fix};
/// ## What it does
/// Checks for compound statements (multiple statements on the same line).
///
/// ## Why is this bad?
/// According to [PEP 8], "compound statements are generally discouraged".
///
/// ## Example
/// ```python
/// if foo == "blah": do_blah_thing()
/// ```
///
/// Use instead:
/// ```python
/// if foo == "blah":
/// do_blah_thing()
/// ```
///
/// [PEP 8]: https://peps.python.org/pep-0008/#other-recommendations
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "v0.0.245")]
pub(crate) struct MultipleStatementsOnOneLineColon;
impl Violation for MultipleStatementsOnOneLineColon {
#[derive_message_formats]
fn message(&self) -> String {
"Multiple statements on one line (colon)".to_string()
}
}
/// ## What it does
/// Checks for multiline statements on one line.
///
/// ## Why is this bad?
/// According to [PEP 8], including multi-clause statements on the same line is
/// discouraged.
///
/// ## Example
/// ```python
/// do_one(); do_two(); do_three()
/// ```
///
/// Use instead:
/// ```python
/// do_one()
/// do_two()
/// do_three()
/// ```
///
/// [PEP 8]: https://peps.python.org/pep-0008/#other-recommendations
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "v0.0.245")]
pub(crate) struct MultipleStatementsOnOneLineSemicolon;
impl Violation for MultipleStatementsOnOneLineSemicolon {
#[derive_message_formats]
fn message(&self) -> String {
"Multiple statements on one line (semicolon)".to_string()
}
}
/// ## What it does
/// Checks for statements that end with an unnecessary semicolon.
///
/// ## Why is this bad?
/// A trailing semicolon is unnecessary and should be removed.
///
/// ## Example
/// ```python
/// do_four(); # useless semicolon
/// ```
///
/// Use instead:
/// ```python
/// do_four()
/// ```
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "v0.0.245")]
pub(crate) struct UselessSemicolon;
impl AlwaysFixableViolation for UselessSemicolon {
#[derive_message_formats]
fn message(&self) -> String {
"Statement ends with an unnecessary semicolon".to_string()
}
fn fix_title(&self) -> String {
"Remove unnecessary semicolon".to_string()
}
}
/// E701, E702, E703
pub(crate) fn compound_statements(
context: &LintContext,
tokens: &Tokens,
locator: &Locator,
indexer: &Indexer,
source_type: PySourceType,
cell_offsets: Option<&CellOffsets>,
) {
// Track the last seen instance of a variety of tokens.
let mut colon = None;
let mut semi = None;
let mut case = None;
let mut class = None;
let mut elif = None;
let mut else_ = None;
let mut except = None;
let mut finally = None;
let mut for_ = None;
let mut if_ = None;
let mut match_ = None;
let mut try_ = None;
let mut while_ = None;
let mut with = None;
// As a special-case, track whether we're at the first token after a colon.
// This is used to allow `class C: ...`-style definitions in stubs.
let mut allow_ellipsis = false;
// Track indentation.
let mut indent = 0u32;
// Use an iterator to allow passing it around.
let mut token_iter = tokens.iter_with_context();
loop {
let Some(token) = token_iter.next() else {
break;
};
match token.kind() {
TokenKind::Ellipsis => {
if allow_ellipsis {
allow_ellipsis = false;
continue;
}
}
TokenKind::Indent => {
indent = indent.saturating_add(1);
}
TokenKind::Dedent => {
indent = indent.saturating_sub(1);
}
_ => {}
}
if token_iter.in_parenthesized_context() {
continue;
}
match token.kind() {
TokenKind::Newline => {
if let Some(range) = semi {
if !(source_type.is_ipynb()
&& indent == 0
&& cell_offsets
.and_then(|cell_offsets| cell_offsets.containing_range(token.start()))
.is_some_and(|cell_range| {
!has_non_trivia_tokens_till(token_iter.clone(), cell_range.end())
}))
{
if let Some(mut diagnostic) =
context.report_diagnostic_if_enabled(UselessSemicolon, range)
{
diagnostic.set_fix(Fix::safe_edit(Edit::deletion(
indexer
.preceded_by_continuations(range.start(), locator.contents())
.unwrap_or(range.start()),
range.end(),
)));
}
}
}
// Reset.
colon = None;
semi = None;
case = None;
class = None;
elif = None;
else_ = None;
except = None;
finally = None;
for_ = None;
if_ = None;
match_ = None;
try_ = None;
while_ = None;
with = None;
}
TokenKind::Colon => {
if case.is_some()
|| class.is_some()
|| elif.is_some()
|| else_.is_some()
|| except.is_some()
|| finally.is_some()
|| for_.is_some()
|| if_.is_some()
|| match_.is_some()
|| try_.is_some()
|| while_.is_some()
|| with.is_some()
{
colon = Some(token.range());
// Allow `class C: ...`-style definitions.
allow_ellipsis = true;
}
}
TokenKind::Semi => {
semi = Some(token.range());
allow_ellipsis = false;
}
TokenKind::Comment
| TokenKind::Indent
| TokenKind::Dedent
| TokenKind::NonLogicalNewline => {}
_ => {
if let Some(range) = semi {
context
.report_diagnostic_if_enabled(MultipleStatementsOnOneLineSemicolon, range);
// Reset.
semi = None;
allow_ellipsis = false;
}
if let Some(range) = colon {
context.report_diagnostic_if_enabled(MultipleStatementsOnOneLineColon, range);
// Reset.
colon = None;
case = None;
class = None;
elif = None;
else_ = None;
except = None;
finally = None;
for_ = None;
if_ = None;
match_ = None;
try_ = None;
while_ = None;
with = None;
allow_ellipsis = false;
}
}
}
match token.kind() {
TokenKind::Lambda => {
// Reset.
colon = None;
case = None;
class = None;
elif = None;
else_ = None;
except = None;
finally = None;
for_ = None;
if_ = None;
match_ = None;
try_ = None;
while_ = None;
with = None;
}
TokenKind::Case => {
case = Some(token.range());
}
TokenKind::If => {
if_ = Some(token.range());
}
TokenKind::While => {
while_ = Some(token.range());
}
TokenKind::For => {
for_ = Some(token.range());
}
TokenKind::Try => {
try_ = Some(token.range());
}
TokenKind::Except => {
except = Some(token.range());
}
TokenKind::Finally => {
finally = Some(token.range());
}
TokenKind::Elif => {
elif = Some(token.range());
}
TokenKind::Else => {
else_ = Some(token.range());
}
TokenKind::Class => {
class = Some(token.range());
}
TokenKind::With => {
with = Some(token.range());
}
TokenKind::Match => {
match_ = Some(token.range());
}
_ => {}
}
}
}
/// Returns `true` if there are any non-trivia tokens from the given token
/// iterator till the given end offset.
fn has_non_trivia_tokens_till(token_iter: TokenIterWithContext<'_>, cell_end: TextSize) -> bool {
for token in token_iter {
if token.start() >= cell_end {
return false;
}
if !matches!(
token.kind(),
TokenKind::Newline
| TokenKind::Comment
| TokenKind::EndOfFile
| TokenKind::NonLogicalNewline
) {
return true;
}
}
false
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/pycodestyle/rules/type_comparison.rs | crates/ruff_linter/src/rules/pycodestyle/rules/type_comparison.rs | use itertools::Itertools;
use ruff_macros::{ViolationMetadata, derive_message_formats};
use ruff_python_ast::{self as ast, CmpOp, Expr};
use ruff_python_semantic::SemanticModel;
use ruff_text_size::Ranged;
use crate::Violation;
use crate::checkers::ast::Checker;
/// ## What it does
/// Checks for object type comparisons using `==` and other comparison
/// operators.
///
/// ## Why is this bad?
/// Unlike a direct type comparison, `isinstance` will also check if an object
/// is an instance of a class or a subclass thereof.
///
/// If you want to check for an exact type match, use `is` or `is not`.
///
/// ## Known problems
/// When using libraries that override the `==` (`__eq__`) operator (such as NumPy,
/// Pandas, and SQLAlchemy), this rule may produce false positives, as converting
/// from `==` to `is` or `is not` will change the behavior of the code.
///
/// For example, the following operations are _not_ equivalent:
/// ```python
/// import numpy as np
///
/// np.array([True, False]) == False
/// # array([False, True])
///
/// np.array([True, False]) is False
/// # False
/// ```
///
/// ## Example
/// ```python
/// if type(obj) == type(1):
/// pass
///
/// if type(obj) == int:
/// pass
/// ```
///
/// Use instead:
/// ```python
/// if isinstance(obj, int):
/// pass
/// ```
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "v0.0.39")]
pub(crate) struct TypeComparison;
impl Violation for TypeComparison {
#[derive_message_formats]
fn message(&self) -> String {
"Use `is` and `is not` for type comparisons, or `isinstance()` for isinstance checks"
.to_string()
}
}
/// E721
pub(crate) fn type_comparison(checker: &Checker, compare: &ast::ExprCompare) {
for (left, right) in std::iter::once(&*compare.left)
.chain(&compare.comparators)
.tuple_windows()
.zip(&compare.ops)
.filter(|(_, op)| matches!(op, CmpOp::Eq | CmpOp::NotEq))
.map(|((left, right), _)| (left, right))
{
// If either expression is a type...
if is_type(left, checker.semantic()) || is_type(right, checker.semantic()) {
// And neither is a `dtype`...
if is_dtype(left, checker.semantic()) || is_dtype(right, checker.semantic()) {
continue;
}
// Disallow the comparison.
checker.report_diagnostic(TypeComparison, compare.range());
}
}
}
/// Returns `true` if the [`Expr`] is known to evaluate to a type (e.g., `int`, or `type(1)`).
fn is_type(expr: &Expr, semantic: &SemanticModel) -> bool {
match expr {
Expr::Call(ast::ExprCall { func, .. }) => {
// Ex) `type(obj) == type(1)`
semantic.match_builtin_expr(func, "type")
}
Expr::Name(ast::ExprName { id, .. }) => {
// Ex) `type(obj) == int`
matches!(
id.as_str(),
"bool"
| "bytearray"
| "bytes"
| "classmethod"
| "complex"
| "dict"
| "enumerate"
| "filter"
| "float"
| "frozenset"
| "int"
| "list"
| "map"
| "memoryview"
| "object"
| "property"
| "range"
| "reversed"
| "set"
| "slice"
| "staticmethod"
| "str"
| "super"
| "tuple"
| "type"
| "zip"
| "ArithmeticError"
| "AssertionError"
| "AttributeError"
| "BaseException"
| "BlockingIOError"
| "BrokenPipeError"
| "BufferError"
| "BytesWarning"
| "ChildProcessError"
| "ConnectionAbortedError"
| "ConnectionError"
| "ConnectionRefusedError"
| "ConnectionResetError"
| "DeprecationWarning"
| "EnvironmentError"
| "EOFError"
| "Exception"
| "FileExistsError"
| "FileNotFoundError"
| "FloatingPointError"
| "FutureWarning"
| "GeneratorExit"
| "ImportError"
| "ImportWarning"
| "IndentationError"
| "IndexError"
| "InterruptedError"
| "IOError"
| "IsADirectoryError"
| "KeyboardInterrupt"
| "KeyError"
| "LookupError"
| "MemoryError"
| "ModuleNotFoundError"
| "NameError"
| "NotADirectoryError"
| "NotImplementedError"
| "OSError"
| "OverflowError"
| "PendingDeprecationWarning"
| "PermissionError"
| "ProcessLookupError"
| "RecursionError"
| "ReferenceError"
| "ResourceWarning"
| "RuntimeError"
| "RuntimeWarning"
| "StopAsyncIteration"
| "StopIteration"
| "SyntaxError"
| "SyntaxWarning"
| "SystemError"
| "SystemExit"
| "TabError"
| "TimeoutError"
| "TypeError"
| "UnboundLocalError"
| "UnicodeDecodeError"
| "UnicodeEncodeError"
| "UnicodeError"
| "UnicodeTranslateError"
| "UnicodeWarning"
| "UserWarning"
| "ValueError"
| "Warning"
| "ZeroDivisionError"
) && semantic.has_builtin_binding(id)
}
_ => false,
}
}
/// Returns `true` if the [`Expr`] appears to be a reference to a NumPy dtype, since:
/// > `dtype` are a bit of a strange beast, but definitely best thought of as instances, not
/// > classes, and they are meant to be comparable not just to their own class, but also to the
/// > corresponding scalar types (e.g., `x.dtype == np.float32`) and strings (e.g.,
/// > `x.dtype == ['i1,i4']`; basically, __eq__ always tries to do `dtype(other)`).
fn is_dtype(expr: &Expr, semantic: &SemanticModel) -> bool {
match expr {
// Ex) `np.dtype(obj)`
Expr::Call(ast::ExprCall { func, .. }) => semantic
.resolve_qualified_name(func)
.is_some_and(|qualified_name| matches!(qualified_name.segments(), ["numpy", "dtype"])),
// Ex) `obj.dtype`
Expr::Attribute(ast::ExprAttribute { attr, .. }) => {
// Ex) `obj.dtype`
attr.as_str() == "dtype"
}
_ => false,
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/pycodestyle/rules/missing_newline_at_end_of_file.rs | crates/ruff_linter/src/rules/pycodestyle/rules/missing_newline_at_end_of_file.rs | use ruff_macros::{ViolationMetadata, derive_message_formats};
use ruff_python_codegen::Stylist;
use ruff_text_size::{TextLen, TextRange};
use crate::Locator;
use crate::checkers::ast::LintContext;
use crate::{AlwaysFixableViolation, Edit, Fix};
/// ## What it does
/// Checks for files missing a new line at the end of the file.
///
/// ## Why is this bad?
/// Trailing blank lines in a file are superfluous.
///
/// However, the last line of the file should end with a newline.
///
/// ## Example
/// ```python
/// spam(1)
/// ```
///
/// Use instead:
/// ```python
/// spam(1)\n
/// ```
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "v0.0.61")]
pub(crate) struct MissingNewlineAtEndOfFile;
impl AlwaysFixableViolation for MissingNewlineAtEndOfFile {
#[derive_message_formats]
fn message(&self) -> String {
"No newline at end of file".to_string()
}
fn fix_title(&self) -> String {
"Add trailing newline".to_string()
}
}
/// W292
pub(crate) fn no_newline_at_end_of_file(
locator: &Locator,
stylist: &Stylist,
context: &LintContext,
) {
let source = locator.contents();
// Ignore empty and BOM only files.
if source.is_empty() || source == "\u{feff}" {
return;
}
if !source.ends_with(['\n', '\r']) {
let range = TextRange::empty(locator.contents().text_len());
let mut diagnostic = context.report_diagnostic(MissingNewlineAtEndOfFile, range);
diagnostic.set_fix(Fix::safe_edit(Edit::insertion(
stylist.line_ending().to_string(),
range.start(),
)));
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/pycodestyle/rules/bare_except.rs | crates/ruff_linter/src/rules/pycodestyle/rules/bare_except.rs | use ruff_macros::{ViolationMetadata, derive_message_formats};
use ruff_python_ast::identifier::except;
use ruff_python_ast::{self as ast, ExceptHandler, Expr, Stmt};
use crate::Violation;
use crate::checkers::ast::Checker;
/// ## What it does
/// Checks for bare `except` catches in `try`-`except` statements.
///
/// ## Why is this bad?
/// A bare `except` catches `BaseException` which includes
/// `KeyboardInterrupt`, `SystemExit`, `Exception`, and others. Catching
/// `BaseException` can make it hard to interrupt the program (e.g., with
/// Ctrl-C) and can disguise other problems.
///
/// ## Example
/// ```python
/// try:
/// raise KeyboardInterrupt("You probably don't mean to break CTRL-C.")
/// except:
/// print("But a bare `except` will ignore keyboard interrupts.")
/// ```
///
/// Use instead:
/// ```python
/// try:
/// do_something_that_might_break()
/// except MoreSpecificException as e:
/// handle_error(e)
/// ```
///
/// If you actually need to catch an unknown error, use `Exception` which will
/// catch regular program errors but not important system exceptions.
///
/// ```python
/// def run_a_function(some_other_fn):
/// try:
/// some_other_fn()
/// except Exception as e:
/// print(f"How exceptional! {e}")
/// ```
///
/// ## References
/// - [Python documentation: Exception hierarchy](https://docs.python.org/3/library/exceptions.html#exception-hierarchy)
/// - [Google Python Style Guide: "Exceptions"](https://google.github.io/styleguide/pyguide.html#24-exceptions)
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "v0.0.36")]
pub(crate) struct BareExcept;
impl Violation for BareExcept {
#[derive_message_formats]
fn message(&self) -> String {
"Do not use bare `except`".to_string()
}
}
/// E722
pub(crate) fn bare_except(
checker: &Checker,
type_: Option<&Expr>,
body: &[Stmt],
handler: &ExceptHandler,
) {
if type_.is_none()
&& !body
.iter()
.any(|stmt| matches!(stmt, Stmt::Raise(ast::StmtRaise { exc: None, .. })))
{
checker.report_diagnostic(BareExcept, except(handler, checker.locator().contents()));
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/pycodestyle/rules/blank_lines.rs | crates/ruff_linter/src/rules/pycodestyle/rules/blank_lines.rs | use std::cmp::Ordering;
use std::iter::Peekable;
use std::num::NonZeroU32;
use std::slice::Iter;
use itertools::Itertools;
use ruff_macros::{ViolationMetadata, derive_message_formats};
use ruff_notebook::CellOffsets;
use ruff_python_ast::PySourceType;
use ruff_python_ast::token::TokenIterWithContext;
use ruff_python_ast::token::TokenKind;
use ruff_python_ast::token::Tokens;
use ruff_python_codegen::Stylist;
use ruff_python_trivia::PythonWhitespace;
use ruff_source_file::{LineRanges, UniversalNewlines};
use ruff_text_size::TextRange;
use ruff_text_size::TextSize;
use crate::checkers::ast::{DiagnosticGuard, LintContext};
use crate::checkers::logical_lines::expand_indent;
use crate::line_width::IndentWidth;
use crate::rules::pycodestyle::helpers::is_non_logical_token;
use crate::{AlwaysFixableViolation, Edit, Fix, Locator, Violation};
/// Number of blank lines around top level classes and functions.
const BLANK_LINES_TOP_LEVEL: u32 = 2;
/// Number of blank lines around methods and nested classes and functions.
const BLANK_LINES_NESTED_LEVEL: u32 = 1;
/// ## What it does
/// Checks for missing blank lines between methods of a class.
///
/// ## Why is this bad?
/// PEP 8 recommends exactly one blank line between methods of a class.
///
/// ## Example
/// ```python
/// class MyClass(object):
/// def func1():
/// pass
/// def func2():
/// pass
/// ```
///
/// Use instead:
/// ```python
/// class MyClass(object):
/// def func1():
/// pass
///
/// def func2():
/// pass
/// ```
///
/// ## Typing stub files (`.pyi`)
/// The typing style guide recommends to not use blank lines between methods except to group
/// them. That's why this rule is not enabled in typing stub files.
///
/// ## References
/// - [PEP 8: Blank Lines](https://peps.python.org/pep-0008/#blank-lines)
/// - [Flake 8 rule](https://www.flake8rules.com/rules/E301.html)
/// - [Typing Style Guide](https://typing.python.org/en/latest/guides/writing_stubs.html#blank-lines)
#[derive(ViolationMetadata)]
#[violation_metadata(preview_since = "v0.2.2")]
pub(crate) struct BlankLineBetweenMethods;
impl AlwaysFixableViolation for BlankLineBetweenMethods {
#[derive_message_formats]
fn message(&self) -> String {
format!("Expected {BLANK_LINES_NESTED_LEVEL:?} blank line, found 0")
}
fn fix_title(&self) -> String {
"Add missing blank line".to_string()
}
}
/// ## What it does
/// Checks for missing blank lines between top level functions and classes.
///
/// ## Why is this bad?
/// PEP 8 recommends exactly two blank lines between top level functions and classes.
///
/// The rule respects the [`lint.isort.lines-after-imports`] setting when
/// determining the required number of blank lines between top-level `import`
/// statements and function or class definitions for compatibility with isort.
///
/// ## Example
/// ```python
/// def func1():
/// pass
/// def func2():
/// pass
/// ```
///
/// Use instead:
/// ```python
/// def func1():
/// pass
///
///
/// def func2():
/// pass
/// ```
///
/// ## Typing stub files (`.pyi`)
/// The typing style guide recommends to not use blank lines between classes and functions except to group
/// them. That's why this rule is not enabled in typing stub files.
///
/// ## Options
/// - `lint.isort.lines-after-imports`
///
/// ## References
/// - [PEP 8: Blank Lines](https://peps.python.org/pep-0008/#blank-lines)
/// - [Flake 8 rule](https://www.flake8rules.com/rules/E302.html)
/// - [Typing Style Guide](https://typing.python.org/en/latest/guides/writing_stubs.html#blank-lines)
#[derive(ViolationMetadata)]
#[violation_metadata(preview_since = "v0.2.2")]
pub(crate) struct BlankLinesTopLevel {
actual_blank_lines: u32,
expected_blank_lines: u32,
}
impl AlwaysFixableViolation for BlankLinesTopLevel {
#[derive_message_formats]
fn message(&self) -> String {
let BlankLinesTopLevel {
actual_blank_lines,
expected_blank_lines,
} = self;
format!("Expected {expected_blank_lines:?} blank lines, found {actual_blank_lines}")
}
fn fix_title(&self) -> String {
"Add missing blank line(s)".to_string()
}
}
/// ## What it does
/// Checks for extraneous blank lines.
///
/// ## Why is this bad?
/// PEP 8 recommends using blank lines as follows:
/// - No more than two blank lines between top-level statements.
/// - No more than one blank line between non-top-level statements.
///
/// ## Example
/// ```python
/// def func1():
/// pass
///
///
///
/// def func2():
/// pass
/// ```
///
/// Use instead:
/// ```python
/// def func1():
/// pass
///
///
/// def func2():
/// pass
/// ```
///
/// ## Typing stub files (`.pyi`)
/// The rule allows at most one blank line in typing stub files in accordance to the typing style guide recommendation.
///
/// Note: The rule respects the following `isort` settings when determining the maximum number of blank lines allowed between two statements:
///
/// * [`lint.isort.lines-after-imports`]: For top-level statements directly following an import statement.
/// * [`lint.isort.lines-between-types`]: For `import` statements directly following a `from ... import ...` statement or vice versa.
///
/// ## Options
/// - `lint.isort.lines-after-imports`
/// - `lint.isort.lines-between-types`
///
/// ## References
/// - [PEP 8: Blank Lines](https://peps.python.org/pep-0008/#blank-lines)
/// - [Flake 8 rule](https://www.flake8rules.com/rules/E303.html)
/// - [Typing Style Guide](https://typing.python.org/en/latest/guides/writing_stubs.html#blank-lines)
#[derive(ViolationMetadata)]
#[violation_metadata(preview_since = "v0.2.2")]
pub(crate) struct TooManyBlankLines {
actual_blank_lines: u32,
}
impl AlwaysFixableViolation for TooManyBlankLines {
#[derive_message_formats]
fn message(&self) -> String {
let TooManyBlankLines { actual_blank_lines } = self;
format!("Too many blank lines ({actual_blank_lines})")
}
fn fix_title(&self) -> String {
"Remove extraneous blank line(s)".to_string()
}
}
/// ## What it does
/// Checks for extraneous blank line(s) after function decorators.
///
/// ## Why is this bad?
/// There should be no blank lines between a decorator and the object it is decorating.
///
/// ## Example
/// ```python
/// class User(object):
///
/// @property
///
/// def name(self):
/// pass
/// ```
///
/// Use instead:
/// ```python
/// class User(object):
///
/// @property
/// def name(self):
/// pass
/// ```
///
/// ## References
/// - [PEP 8: Blank Lines](https://peps.python.org/pep-0008/#blank-lines)
/// - [Flake 8 rule](https://www.flake8rules.com/rules/E304.html)
#[derive(ViolationMetadata)]
#[violation_metadata(preview_since = "v0.2.2")]
pub(crate) struct BlankLineAfterDecorator {
actual_blank_lines: u32,
}
impl AlwaysFixableViolation for BlankLineAfterDecorator {
#[derive_message_formats]
fn message(&self) -> String {
format!(
"Blank lines found after function decorator ({lines})",
lines = self.actual_blank_lines
)
}
fn fix_title(&self) -> String {
"Remove extraneous blank line(s)".to_string()
}
}
/// ## What it does
/// Checks for missing blank lines after the end of function or class.
///
/// ## Why is this bad?
/// PEP 8 recommends using blank lines as follows:
/// - Two blank lines are expected between functions and classes
/// - One blank line is expected between methods of a class.
///
/// ## Example
/// ```python
/// class User(object):
/// pass
/// user = User()
/// ```
///
/// Use instead:
/// ```python
/// class User(object):
/// pass
///
///
/// user = User()
/// ```
///
/// ## Typing stub files (`.pyi`)
/// The typing style guide recommends to not use blank lines between statements except to group
/// them. That's why this rule is not enabled in typing stub files.
///
/// ## References
/// - [PEP 8: Blank Lines](https://peps.python.org/pep-0008/#blank-lines)
/// - [Flake 8 rule](https://www.flake8rules.com/rules/E305.html)
/// - [Typing Style Guide](https://typing.python.org/en/latest/guides/writing_stubs.html#blank-lines)
#[derive(ViolationMetadata)]
#[violation_metadata(preview_since = "v0.2.2")]
pub(crate) struct BlankLinesAfterFunctionOrClass {
actual_blank_lines: u32,
}
impl AlwaysFixableViolation for BlankLinesAfterFunctionOrClass {
#[derive_message_formats]
fn message(&self) -> String {
let BlankLinesAfterFunctionOrClass {
actual_blank_lines: blank_lines,
} = self;
format!("Expected 2 blank lines after class or function definition, found ({blank_lines})")
}
fn fix_title(&self) -> String {
"Add missing blank line(s)".to_string()
}
}
/// ## What it does
/// Checks for 1 blank line between nested function or class definitions.
///
/// ## Why is this bad?
/// PEP 8 recommends using blank lines as follows:
/// - Two blank lines are expected between functions and classes
/// - One blank line is expected between methods of a class.
///
/// ## Example
/// ```python
/// def outer():
/// def inner():
/// pass
/// def inner2():
/// pass
/// ```
///
/// Use instead:
/// ```python
/// def outer():
/// def inner():
/// pass
///
/// def inner2():
/// pass
/// ```
///
/// ## Typing stub files (`.pyi`)
/// The typing style guide recommends to not use blank lines between classes and functions except to group
/// them. That's why this rule is not enabled in typing stub files.
///
/// ## References
/// - [PEP 8: Blank Lines](https://peps.python.org/pep-0008/#blank-lines)
/// - [Flake 8 rule](https://www.flake8rules.com/rules/E306.html)
/// - [Typing Style Guide](https://typing.python.org/en/latest/guides/writing_stubs.html#blank-lines)
#[derive(ViolationMetadata)]
#[violation_metadata(preview_since = "v0.2.2")]
pub(crate) struct BlankLinesBeforeNestedDefinition;
impl AlwaysFixableViolation for BlankLinesBeforeNestedDefinition {
#[derive_message_formats]
fn message(&self) -> String {
"Expected 1 blank line before a nested definition, found 0".to_string()
}
fn fix_title(&self) -> String {
"Add missing blank line".to_string()
}
}
#[derive(Debug)]
struct LogicalLineInfo {
kind: LogicalLineKind,
first_token_range: TextRange,
/// The kind of the last non-trivia token before the newline ending the logical line.
last_token: TokenKind,
/// The end of the logical line including the newline.
logical_line_end: TextSize,
/// `true` if this is not a blank but only consists of a comment.
is_comment_only: bool,
/// If running on a notebook, whether the line is the first logical line (or a comment preceding it) of its cell.
is_beginning_of_cell: bool,
/// `true` if the line is a string only (including trivia tokens) line, which is a docstring if coming right after a class/function definition.
is_docstring: bool,
/// The indentation length in columns. See [`expand_indent`] for the computation of the indent.
indent_length: usize,
/// The number of blank lines preceding the current line.
blank_lines: BlankLines,
/// The maximum number of consecutive blank lines between the current line
/// and the previous non-comment logical line.
/// One of its main uses is to allow a comments to directly precede or follow a class/function definition.
/// As such, `preceding_blank_lines` is used for rules that cannot trigger on comments (all rules except E303),
/// and `blank_lines` is used for the rule that can trigger on comments (E303).
preceding_blank_lines: BlankLines,
}
/// Iterator that processes tokens until a full logical line (or comment line) is "built".
/// It then returns characteristics of that logical line (see `LogicalLineInfo`).
struct LinePreprocessor<'a> {
tokens: TokenIterWithContext<'a>,
locator: &'a Locator<'a>,
indent_width: IndentWidth,
/// The start position of the next logical line.
line_start: TextSize,
/// Maximum number of consecutive blank lines between the current line and the previous non-comment logical line.
/// One of its main uses is to allow a comment to directly precede a class/function definition.
max_preceding_blank_lines: BlankLines,
/// The cell offsets of the notebook (if running on a notebook).
cell_offsets: Option<Peekable<Iter<'a, TextSize>>>,
/// If running on a notebook, whether the line is the first logical line (or a comment preceding it) of its cell.
is_beginning_of_cell: bool,
}
impl<'a> LinePreprocessor<'a> {
fn new(
tokens: &'a Tokens,
locator: &'a Locator,
indent_width: IndentWidth,
cell_offsets: Option<&'a CellOffsets>,
) -> LinePreprocessor<'a> {
LinePreprocessor {
tokens: tokens.iter_with_context(),
locator,
line_start: TextSize::new(0),
max_preceding_blank_lines: BlankLines::Zero,
indent_width,
is_beginning_of_cell: cell_offsets.is_some(),
cell_offsets: cell_offsets
.map(|cell_offsets| cell_offsets.get(1..).unwrap_or_default().iter().peekable()),
}
}
}
impl Iterator for LinePreprocessor<'_> {
type Item = LogicalLineInfo;
fn next(&mut self) -> Option<LogicalLineInfo> {
let mut line_is_comment_only = true;
let mut is_docstring = false;
// Number of consecutive blank lines directly preceding this logical line.
let mut blank_lines = BlankLines::Zero;
let mut first_logical_line_token: Option<(LogicalLineKind, TextRange)> = None;
let mut last_token = TokenKind::EndOfFile;
while let Some(token) = self.tokens.next() {
let (kind, range) = token.as_tuple();
if matches!(kind, TokenKind::Indent | TokenKind::Dedent) {
continue;
}
let (logical_line_kind, first_token_range) =
if let Some(first_token_range) = first_logical_line_token {
first_token_range
}
// At the start of the line...
else {
// Check if we are at the beginning of a cell in a notebook.
if let Some(ref mut cell_offsets) = self.cell_offsets {
if cell_offsets
.peek()
.is_some_and(|offset| offset == &&self.line_start)
{
self.is_beginning_of_cell = true;
cell_offsets.next();
blank_lines = BlankLines::Zero;
self.max_preceding_blank_lines = BlankLines::Zero;
}
}
// An empty line
if kind == TokenKind::NonLogicalNewline {
blank_lines.add(range);
self.line_start = range.end();
continue;
}
is_docstring = kind == TokenKind::String;
let logical_line_kind = match kind {
TokenKind::Class => LogicalLineKind::Class,
TokenKind::Comment => LogicalLineKind::Comment,
TokenKind::At => LogicalLineKind::Decorator,
TokenKind::Def => LogicalLineKind::Function,
// Lookahead to distinguish `async def` from `async with`.
TokenKind::Async
if self
.tokens
.peek()
.is_some_and(|token| token.kind() == TokenKind::Def) =>
{
LogicalLineKind::Function
}
TokenKind::Import => LogicalLineKind::Import,
TokenKind::From => LogicalLineKind::FromImport,
_ => LogicalLineKind::Other,
};
first_logical_line_token = Some((logical_line_kind, range));
(logical_line_kind, range)
};
if !is_non_logical_token(kind) {
line_is_comment_only = false;
}
// A docstring line is composed only of the docstring (TokenKind::String) and trivia tokens.
// (If a comment follows a docstring, we still count the line as a docstring)
if kind != TokenKind::String && !is_non_logical_token(kind) {
is_docstring = false;
}
if kind.is_any_newline() && !self.tokens.in_parenthesized_context() {
let indent_range = TextRange::new(self.line_start, first_token_range.start());
let indent_length =
expand_indent(self.locator.slice(indent_range), self.indent_width);
self.max_preceding_blank_lines = self.max_preceding_blank_lines.max(blank_lines);
let logical_line = LogicalLineInfo {
kind: logical_line_kind,
first_token_range,
last_token,
logical_line_end: range.end(),
is_comment_only: line_is_comment_only,
is_beginning_of_cell: self.is_beginning_of_cell,
is_docstring,
indent_length,
blank_lines,
preceding_blank_lines: self.max_preceding_blank_lines,
};
// Reset the blank lines after a non-comment only line.
if !line_is_comment_only {
self.max_preceding_blank_lines = BlankLines::Zero;
}
// Set the start for the next logical line.
self.line_start = range.end();
if self.cell_offsets.is_some() && !line_is_comment_only {
self.is_beginning_of_cell = false;
}
return Some(logical_line);
}
if !is_non_logical_token(kind) {
last_token = kind;
}
}
None
}
}
#[derive(Clone, Copy, Debug, Default)]
enum BlankLines {
/// No blank lines
#[default]
Zero,
/// One or more blank lines
Many { count: NonZeroU32, range: TextRange },
}
impl BlankLines {
fn add(&mut self, line_range: TextRange) {
match self {
BlankLines::Zero => {
*self = BlankLines::Many {
count: NonZeroU32::MIN,
range: line_range,
}
}
BlankLines::Many { count, range } => {
*count = count.saturating_add(1);
*range = TextRange::new(range.start(), line_range.end());
}
}
}
fn count(&self) -> u32 {
match self {
BlankLines::Zero => 0,
BlankLines::Many { count, .. } => count.get(),
}
}
fn range(&self) -> Option<TextRange> {
match self {
BlankLines::Zero => None,
BlankLines::Many { range, .. } => Some(*range),
}
}
}
impl PartialEq<u32> for BlankLines {
fn eq(&self, other: &u32) -> bool {
self.partial_cmp(other) == Some(Ordering::Equal)
}
}
impl PartialOrd<u32> for BlankLines {
fn partial_cmp(&self, other: &u32) -> Option<Ordering> {
self.count().partial_cmp(other)
}
}
impl PartialOrd for BlankLines {
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
Some(self.cmp(other))
}
}
impl Ord for BlankLines {
fn cmp(&self, other: &Self) -> Ordering {
self.count().cmp(&other.count())
}
}
impl PartialEq for BlankLines {
fn eq(&self, other: &Self) -> bool {
self.count() == other.count()
}
}
impl Eq for BlankLines {}
#[derive(Copy, Clone, Debug, Default)]
enum Follows {
#[default]
Other,
Decorator,
Def,
/// A function whose body is a dummy (...), if the ellipsis is on the same line as the def.
DummyDef,
Import,
FromImport,
Docstring,
}
impl Follows {
// Allow a function/method to follow a function/method with a dummy body.
const fn follows_def_with_dummy_body(self) -> bool {
matches!(self, Follows::DummyDef)
}
}
impl Follows {
const fn is_any_def(self) -> bool {
matches!(self, Follows::Def | Follows::DummyDef)
}
}
impl Follows {
const fn is_any_import(self) -> bool {
matches!(self, Follows::Import | Follows::FromImport)
}
}
#[derive(Copy, Clone, Debug, Default)]
enum Status {
/// Stores the indent level where the nesting started.
Inside(usize),
/// This is used to rectify a Inside switched to a Outside because of a dedented comment.
CommentAfter(usize),
#[default]
Outside,
}
impl Status {
fn update(&mut self, line: &LogicalLineInfo) {
match *self {
Status::Inside(nesting_indent) => {
if line.indent_length <= nesting_indent {
if line.is_comment_only {
*self = Status::CommentAfter(nesting_indent);
} else {
*self = Status::Outside;
}
}
}
Status::CommentAfter(indent) => {
if !line.is_comment_only {
if line.indent_length > indent {
*self = Status::Inside(indent);
} else {
*self = Status::Outside;
}
}
}
Status::Outside => {
// Nothing to do
}
}
}
}
/// Contains variables used for the linting of blank lines.
pub(crate) struct BlankLinesChecker<'a, 'b> {
stylist: &'a Stylist<'a>,
locator: &'a Locator<'a>,
source_type: PySourceType,
cell_offsets: Option<&'a CellOffsets>,
context: &'a LintContext<'b>,
}
impl<'a, 'b> BlankLinesChecker<'a, 'b> {
pub(crate) fn new(
locator: &'a Locator<'a>,
stylist: &'a Stylist<'a>,
source_type: PySourceType,
cell_offsets: Option<&'a CellOffsets>,
context: &'a LintContext<'b>,
) -> BlankLinesChecker<'a, 'b> {
BlankLinesChecker {
stylist,
locator,
source_type,
cell_offsets,
context,
}
}
/// Report a diagnostic if the associated rule is enabled.
fn report_diagnostic<T: Violation>(
&self,
kind: T,
range: TextRange,
) -> Option<DiagnosticGuard<'a, 'b>> {
self.context.report_diagnostic_if_enabled(kind, range)
}
/// E301, E302, E303, E304, E305, E306
pub(crate) fn check_lines(&self, tokens: &Tokens) {
let mut prev_indent_length: Option<usize> = None;
let mut prev_logical_line: Option<LogicalLineInfo> = None;
let mut state = BlankLinesState::default();
let line_preprocessor = LinePreprocessor::new(
tokens,
self.locator,
self.context.settings().tab_size,
self.cell_offsets,
);
for logical_line in line_preprocessor {
// Reset `follows` after a dedent:
// ```python
// if True:
// import test
// a = 10
// ```
// The `a` statement doesn't follow the `import` statement but the `if` statement.
if let Some(prev_indent_length) = prev_indent_length {
if prev_indent_length > logical_line.indent_length {
state.follows = Follows::Other;
}
}
// Reset the previous line end after an indent or dedent:
// ```python
// if True:
// import test
// # comment
// a = 10
// ```
// The `# comment` should be attached to the `import` statement, rather than the
// assignment.
if let Some(prev_logical_line) = prev_logical_line {
if prev_logical_line.is_comment_only {
if prev_logical_line.indent_length != logical_line.indent_length {
state.last_non_comment_line_end = prev_logical_line.logical_line_end;
}
}
}
state.class_status.update(&logical_line);
state.fn_status.update(&logical_line);
self.check_line(&logical_line, &state, prev_indent_length);
match logical_line.kind {
LogicalLineKind::Class => {
if matches!(state.class_status, Status::Outside) {
state.class_status = Status::Inside(logical_line.indent_length);
}
state.follows = Follows::Other;
}
LogicalLineKind::Decorator => {
state.follows = Follows::Decorator;
}
LogicalLineKind::Function => {
if matches!(state.fn_status, Status::Outside) {
state.fn_status = Status::Inside(logical_line.indent_length);
}
state.follows = if logical_line.last_token == TokenKind::Ellipsis {
Follows::DummyDef
} else {
Follows::Def
};
}
LogicalLineKind::Comment => {}
LogicalLineKind::Import => {
state.follows = Follows::Import;
}
LogicalLineKind::FromImport => {
state.follows = Follows::FromImport;
}
LogicalLineKind::Other => {
state.follows = Follows::Other;
}
}
if logical_line.is_docstring {
state.follows = Follows::Docstring;
}
if !logical_line.is_comment_only {
state.is_not_first_logical_line = true;
state.last_non_comment_line_end = logical_line.logical_line_end;
if logical_line.indent_length == 0 {
state.previous_unindented_line_kind = Some(logical_line.kind);
}
}
if !logical_line.is_comment_only {
prev_indent_length = Some(logical_line.indent_length);
}
prev_logical_line = Some(logical_line);
}
}
#[expect(clippy::nonminimal_bool)]
fn check_line(
&self,
line: &LogicalLineInfo,
state: &BlankLinesState,
prev_indent_length: Option<usize>,
) {
if line.preceding_blank_lines == 0
// Only applies to methods.
&& matches!(line.kind, LogicalLineKind::Function | LogicalLineKind::Decorator)
// Allow groups of one-liners.
&& !(state.follows.is_any_def() && line.last_token != TokenKind::Colon)
&& !state.follows.follows_def_with_dummy_body()
// Only for class scope: we must be inside a class block
&& matches!(state.class_status, Status::Inside(_))
// But NOT inside a function body; nested defs inside methods are handled by E306
&& matches!(state.fn_status, Status::Outside | Status::CommentAfter(_))
// The class/parent method's docstring can directly precede the def.
// Allow following a decorator (if there is an error it will be triggered on the first decorator).
&& !matches!(state.follows, Follows::Docstring | Follows::Decorator)
// Do not trigger when the def follows an if/while/etc...
&& prev_indent_length.is_some_and(|prev_indent_length| prev_indent_length >= line.indent_length)
// Blank lines in stub files are only used for grouping. Don't enforce blank lines.
&& !self.source_type.is_stub()
{
// E301
if let Some(mut diagnostic) =
self.report_diagnostic(BlankLineBetweenMethods, line.first_token_range)
{
diagnostic.set_fix(Fix::safe_edit(Edit::insertion(
self.stylist.line_ending().to_string(),
self.locator.line_start(state.last_non_comment_line_end),
)));
}
}
// Blank lines in stub files are used to group definitions. Don't enforce blank lines.
let max_lines_level = if self.source_type.is_stub() {
1
} else {
if line.indent_length == 0 {
BLANK_LINES_TOP_LEVEL
} else {
BLANK_LINES_NESTED_LEVEL
}
};
let expected_blank_lines_before_definition = if line.indent_length == 0 {
// Mimic the isort rules for the number of blank lines before classes and functions
if state.follows.is_any_import() {
// Fallback to the default if the value is too large for an u32 or if it is negative.
// A negative value means that isort should determine the blank lines automatically.
// `isort` defaults to 2 if before a class or function definition (except in stubs where it is one) and 1 otherwise.
// Defaulting to 2 (or 1 in stubs) here is correct because the variable is only used when testing the
// blank lines before a class or function definition.
u32::try_from(self.context.settings().isort.lines_after_imports)
.unwrap_or(max_lines_level)
} else {
max_lines_level
}
} else {
max_lines_level
};
if line.preceding_blank_lines < expected_blank_lines_before_definition
// Allow following a decorator (if there is an error it will be triggered on the first decorator).
&& !matches!(state.follows, Follows::Decorator)
// Allow groups of one-liners.
&& !(state.follows.is_any_def() && line.last_token != TokenKind::Colon)
&& !(state.follows.follows_def_with_dummy_body() && line.preceding_blank_lines == 0)
// Only trigger on non-indented classes and functions (for example functions within an if are ignored)
&& line.indent_length == 0
// Only apply to functions or classes.
&& line.kind.is_class_function_or_decorator()
// Blank lines in stub files are used to group definitions. Don't enforce blank lines.
&& !self.source_type.is_stub()
// Do not expect blank lines before the first logical line.
&& state.is_not_first_logical_line
// Ignore the first logical line (and any comment preceding it) of each cell in notebooks.
&& !line.is_beginning_of_cell
{
// E302
if let Some(mut diagnostic) = self.report_diagnostic(
BlankLinesTopLevel {
actual_blank_lines: line.preceding_blank_lines.count(),
expected_blank_lines: expected_blank_lines_before_definition,
},
line.first_token_range,
) {
if let Some(blank_lines_range) = line.blank_lines.range() {
diagnostic.set_fix(Fix::safe_edit(Edit::range_replacement(
self.stylist
.line_ending()
.repeat(expected_blank_lines_before_definition as usize),
blank_lines_range,
)));
} else {
diagnostic.set_fix(Fix::safe_edit(Edit::insertion(
self.stylist.line_ending().repeat(
(expected_blank_lines_before_definition
- line.preceding_blank_lines.count())
as usize,
),
self.locator.line_start(state.last_non_comment_line_end),
)));
}
}
}
// If between `import` and `from .. import ..` or the other way round,
// allow up to `lines_between_types` newlines for isort compatibility.
// We let `isort` remove extra blank lines when the imports belong
// to different sections.
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | true |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/pycodestyle/rules/mod.rs | crates/ruff_linter/src/rules/pycodestyle/rules/mod.rs | pub(crate) use ambiguous_class_name::*;
pub(crate) use ambiguous_function_name::*;
pub(crate) use ambiguous_variable_name::*;
pub(crate) use bare_except::*;
pub(crate) use blank_lines::*;
pub(crate) use compound_statements::*;
pub(crate) use doc_line_too_long::*;
pub use errors::IOError;
pub(crate) use errors::*;
pub(crate) use invalid_escape_sequence::*;
pub(crate) use lambda_assignment::*;
pub(crate) use line_too_long::*;
pub(crate) use literal_comparisons::*;
pub(crate) use missing_newline_at_end_of_file::*;
pub(crate) use mixed_spaces_and_tabs::*;
pub(crate) use module_import_not_at_top_of_file::*;
pub(crate) use multiple_imports_on_one_line::*;
pub(crate) use not_tests::*;
pub(crate) use tab_indentation::*;
pub(crate) use too_many_newlines_at_end_of_file::*;
pub(crate) use trailing_whitespace::*;
pub(crate) use type_comparison::*;
pub(crate) use whitespace_after_decorator::*;
mod ambiguous_class_name;
mod ambiguous_function_name;
mod ambiguous_variable_name;
mod bare_except;
mod blank_lines;
mod compound_statements;
mod doc_line_too_long;
mod errors;
mod invalid_escape_sequence;
mod lambda_assignment;
mod line_too_long;
mod literal_comparisons;
pub(crate) mod logical_lines;
mod missing_newline_at_end_of_file;
mod mixed_spaces_and_tabs;
mod module_import_not_at_top_of_file;
mod multiple_imports_on_one_line;
mod not_tests;
mod tab_indentation;
mod too_many_newlines_at_end_of_file;
mod trailing_whitespace;
mod type_comparison;
mod whitespace_after_decorator;
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/pycodestyle/rules/multiple_imports_on_one_line.rs | crates/ruff_linter/src/rules/pycodestyle/rules/multiple_imports_on_one_line.rs | use itertools::Itertools;
use ruff_macros::{ViolationMetadata, derive_message_formats};
use ruff_python_ast::{Alias, Stmt};
use ruff_python_codegen::Stylist;
use ruff_python_index::Indexer;
use ruff_python_trivia::indentation_at_offset;
use ruff_source_file::LineRanges;
use ruff_text_size::{Ranged, TextRange};
use crate::Locator;
use crate::checkers::ast::Checker;
use crate::{Edit, Fix, FixAvailability, Violation};
/// ## What it does
/// Check for multiple imports on one line.
///
/// ## Why is this bad?
/// According to [PEP 8], "imports should usually be on separate lines."
///
/// ## Example
/// ```python
/// import sys, os
/// ```
///
/// Use instead:
/// ```python
/// import os
/// import sys
/// ```
///
/// [PEP 8]: https://peps.python.org/pep-0008/#imports
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "v0.0.191")]
pub(crate) struct MultipleImportsOnOneLine;
impl Violation for MultipleImportsOnOneLine {
const FIX_AVAILABILITY: FixAvailability = FixAvailability::Sometimes;
#[derive_message_formats]
fn message(&self) -> String {
"Multiple imports on one line".to_string()
}
fn fix_title(&self) -> Option<String> {
Some("Split imports".to_string())
}
}
/// E401
pub(crate) fn multiple_imports_on_one_line(checker: &Checker, stmt: &Stmt, names: &[Alias]) {
if names.len() > 1 {
let mut diagnostic = checker.report_diagnostic(MultipleImportsOnOneLine, stmt.range());
diagnostic.set_fix(split_imports(
stmt,
names,
checker.locator(),
checker.indexer(),
checker.stylist(),
));
}
}
/// Generate a [`Fix`] to split the imports across multiple statements.
fn split_imports(
stmt: &Stmt,
names: &[Alias],
locator: &Locator,
indexer: &Indexer,
stylist: &Stylist,
) -> Fix {
if indexer.in_multi_statement_line(stmt, locator.contents()) {
// Ex) `x = 1; import os, sys` (convert to `x = 1; import os; import sys`)
let replacement = names
.iter()
.map(|alias| {
let Alias {
range: _,
node_index: _,
name,
asname,
} = alias;
if let Some(asname) = asname {
format!("import {name} as {asname}")
} else {
format!("import {name}")
}
})
.join("; ");
Fix::safe_edit(Edit::range_replacement(replacement, stmt.range()))
} else {
// Ex) `import os, sys` (convert to `import os\nimport sys`)
let indentation =
indentation_at_offset(stmt.start(), locator.contents()).unwrap_or_default();
// Generate newline-delimited imports.
let replacement = names
.iter()
.map(|alias| {
let Alias {
range: _,
node_index: _,
name,
asname,
} = alias;
if let Some(asname) = asname {
format!("{indentation}import {name} as {asname}")
} else {
format!("{indentation}import {name}")
}
})
.join(stylist.line_ending().as_str());
Fix::safe_edit(Edit::range_replacement(
replacement,
TextRange::new(locator.line_start(stmt.start()), stmt.end()),
))
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/pycodestyle/rules/ambiguous_function_name.rs | crates/ruff_linter/src/rules/pycodestyle/rules/ambiguous_function_name.rs | use ruff_python_ast::Identifier;
use ruff_macros::{ViolationMetadata, derive_message_formats};
use ruff_text_size::Ranged;
use crate::Violation;
use crate::checkers::ast::Checker;
use crate::rules::pycodestyle::helpers::is_ambiguous_name;
/// ## What it does
/// Checks for the use of the characters 'l', 'O', or 'I' as function names.
///
/// ## Why is this bad?
/// In some fonts, these characters are indistinguishable from the
/// numerals one and zero. When tempted to use 'l', use 'L' instead.
///
/// ## Example
///
/// ```python
/// def l(x): ...
/// ```
///
/// Use instead:
///
/// ```python
/// def long_name(x): ...
/// ```
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "v0.0.35")]
pub(crate) struct AmbiguousFunctionName(pub String);
impl Violation for AmbiguousFunctionName {
#[derive_message_formats]
fn message(&self) -> String {
let AmbiguousFunctionName(name) = self;
format!("Ambiguous function name: `{name}`")
}
}
/// E743
pub(crate) fn ambiguous_function_name(checker: &Checker, name: &Identifier) {
if is_ambiguous_name(name) {
checker.report_diagnostic(AmbiguousFunctionName(name.to_string()), name.range());
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/pycodestyle/rules/module_import_not_at_top_of_file.rs | crates/ruff_linter/src/rules/pycodestyle/rules/module_import_not_at_top_of_file.rs | use ruff_macros::{ViolationMetadata, derive_message_formats};
use ruff_python_ast::{PySourceType, Stmt};
use ruff_text_size::Ranged;
use crate::Violation;
use crate::checkers::ast::Checker;
/// ## What it does
/// Checks for imports that are not at the top of the file.
///
/// ## Why is this bad?
/// According to [PEP 8], "imports are always put at the top of the file, just after any
/// module comments and docstrings, and before module globals and constants."
///
/// This rule makes an exception for both `sys.path` modifications (allowing for
/// `sys.path.insert`, `sys.path.append`, etc.) and `os.environ` modifications
/// between imports.
///
/// ## Example
/// ```python
/// "One string"
/// "Two string"
/// a = 1
/// import os
/// from sys import x
/// ```
///
/// Use instead:
/// ```python
/// import os
/// from sys import x
///
/// "One string"
/// "Two string"
/// a = 1
/// ```
///
/// ## Notebook behavior
/// For Jupyter notebooks, this rule checks for imports that are not at the top of a *cell*.
///
/// [PEP 8]: https://peps.python.org/pep-0008/#imports
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "v0.0.28")]
pub(crate) struct ModuleImportNotAtTopOfFile {
source_type: PySourceType,
}
impl Violation for ModuleImportNotAtTopOfFile {
#[derive_message_formats]
fn message(&self) -> String {
if self.source_type.is_ipynb() {
"Module level import not at top of cell".to_string()
} else {
"Module level import not at top of file".to_string()
}
}
}
/// E402
pub(crate) fn module_import_not_at_top_of_file(checker: &Checker, stmt: &Stmt) {
if checker.semantic().seen_import_boundary() && checker.semantic().at_top_level() {
checker.report_diagnostic(
ModuleImportNotAtTopOfFile {
source_type: checker.source_type,
},
stmt.range(),
);
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/pycodestyle/rules/doc_line_too_long.rs | crates/ruff_linter/src/rules/pycodestyle/rules/doc_line_too_long.rs | use ruff_macros::{ViolationMetadata, derive_message_formats};
use ruff_python_trivia::CommentRanges;
use ruff_source_file::Line;
use crate::Violation;
use crate::checkers::ast::LintContext;
use crate::rules::pycodestyle::overlong::Overlong;
use crate::settings::LinterSettings;
/// ## What it does
/// Checks for doc lines that exceed the specified maximum character length.
///
/// ## Why is this bad?
/// For flowing long blocks of text (docstrings or comments), overlong lines
/// can hurt readability. [PEP 8], for example, recommends that such lines be
/// limited to 72 characters, while this rule enforces the limit specified by
/// the [`lint.pycodestyle.max-doc-length`] setting. (If no value is provided, this
/// rule will be ignored, even if it's added to your `--select` list.)
///
/// In the context of this rule, a "doc line" is defined as a line consisting
/// of either a standalone comment or a standalone string, like a docstring.
///
/// In the interest of pragmatism, this rule makes a few exceptions when
/// determining whether a line is overlong. Namely, it:
///
/// 1. Ignores lines that consist of a single "word" (i.e., without any
/// whitespace between its characters).
/// 2. Ignores lines that end with a URL, as long as the URL starts before
/// the line-length threshold.
/// 3. Ignores line that end with a pragma comment (e.g., `# type: ignore`
/// or `# noqa`), as long as the pragma comment starts before the
/// line-length threshold. That is, a line will not be flagged as
/// overlong if a pragma comment _causes_ it to exceed the line length.
/// (This behavior aligns with that of the Ruff formatter.)
///
/// If [`lint.pycodestyle.ignore-overlong-task-comments`] is `true`, this rule will
/// also ignore comments that start with any of the specified [`lint.task-tags`]
/// (e.g., `# TODO:`).
///
/// ## Example
/// ```python
/// def function(x):
/// """Lorem ipsum dolor sit amet, consectetur adipiscing elit. Duis auctor purus ut ex fermentum, at maximus est hendrerit."""
/// ```
///
/// Use instead:
/// ```python
/// def function(x):
/// """
/// Lorem ipsum dolor sit amet, consectetur adipiscing elit.
/// Duis auctor purus ut ex fermentum, at maximus est hendrerit.
/// """
/// ```
///
/// ## Error suppression
/// Hint: when suppressing `W505` errors within multi-line strings (like
/// docstrings), the `noqa` directive should come at the end of the string
/// (after the closing triple quote), and will apply to the entire string, like
/// so:
///
/// ```python
/// """Lorem ipsum dolor sit amet.
///
/// Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor.
/// """ # noqa: W505
/// ```
///
/// ## Options
/// - `lint.task-tags`
/// - `lint.pycodestyle.max-doc-length`
/// - `lint.pycodestyle.ignore-overlong-task-comments`
///
/// [PEP 8]: https://peps.python.org/pep-0008/#maximum-line-length
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "v0.0.219")]
pub(crate) struct DocLineTooLong(usize, usize);
impl Violation for DocLineTooLong {
#[derive_message_formats]
fn message(&self) -> String {
let DocLineTooLong(width, limit) = self;
format!("Doc line too long ({width} > {limit})")
}
}
/// W505
pub(crate) fn doc_line_too_long(
line: &Line,
comment_ranges: &CommentRanges,
settings: &LinterSettings,
context: &LintContext,
) {
let Some(limit) = settings.pycodestyle.max_doc_length else {
return;
};
if let Some(overlong) = Overlong::try_from_line(
line,
comment_ranges,
limit,
if settings.pycodestyle.ignore_overlong_task_comments {
&settings.task_tags
} else {
&[]
},
settings.tab_size,
) {
context.report_diagnostic(
DocLineTooLong(overlong.width(), limit.value() as usize),
overlong.range(),
);
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/pycodestyle/rules/ambiguous_class_name.rs | crates/ruff_linter/src/rules/pycodestyle/rules/ambiguous_class_name.rs | use ruff_python_ast::Identifier;
use ruff_macros::{ViolationMetadata, derive_message_formats};
use ruff_text_size::Ranged;
use crate::Violation;
use crate::checkers::ast::Checker;
use crate::rules::pycodestyle::helpers::is_ambiguous_name;
/// ## What it does
/// Checks for the use of the characters 'l', 'O', or 'I' as class names.
///
/// ## Why is this bad?
/// In some fonts, these characters are indistinguishable from the
/// numerals one and zero. When tempted to use 'l', use 'L' instead.
///
/// ## Example
///
/// ```python
/// class I(object): ...
/// ```
///
/// Use instead:
///
/// ```python
/// class Integer(object): ...
/// ```
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "v0.0.35")]
pub(crate) struct AmbiguousClassName(pub String);
impl Violation for AmbiguousClassName {
#[derive_message_formats]
fn message(&self) -> String {
let AmbiguousClassName(name) = self;
format!("Ambiguous class name: `{name}`")
}
}
/// E742
pub(crate) fn ambiguous_class_name(checker: &Checker, name: &Identifier) {
if is_ambiguous_name(name) {
checker.report_diagnostic(AmbiguousClassName(name.to_string()), name.range());
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/pycodestyle/rules/literal_comparisons.rs | crates/ruff_linter/src/rules/pycodestyle/rules/literal_comparisons.rs | use ruff_python_ast::token::{Tokens, parenthesized_range};
use rustc_hash::FxHashMap;
use ruff_macros::{ViolationMetadata, derive_message_formats};
use ruff_python_ast::helpers::{self, generate_comparison};
use ruff_python_ast::{self as ast, CmpOp, Expr};
use ruff_text_size::Ranged;
use crate::checkers::ast::Checker;
use crate::codes::Rule;
use crate::fix::snippet::SourceCodeSnippet;
use crate::{AlwaysFixableViolation, Edit, Fix};
#[derive(Debug, PartialEq, Eq, Copy, Clone)]
enum EqCmpOp {
Eq,
NotEq,
}
impl EqCmpOp {
fn try_from(value: CmpOp) -> Option<EqCmpOp> {
match value {
CmpOp::Eq => Some(EqCmpOp::Eq),
CmpOp::NotEq => Some(EqCmpOp::NotEq),
_ => None,
}
}
}
/// ## What it does
/// Checks for comparisons to `None` which are not using the `is` operator.
///
/// ## Why is this bad?
/// According to [PEP 8], "Comparisons to singletons like None should always be done with
/// `is` or `is not`, never the equality operators."
///
/// ## Example
/// ```python
/// if arg != None:
/// pass
/// if None == arg:
/// pass
/// ```
///
/// Use instead:
/// ```python
/// if arg is not None:
/// pass
/// ```
///
/// ## Fix safety
///
/// This rule's fix is marked as unsafe, as it may alter runtime behavior when
/// used with libraries that override the `==`/`__eq__` or `!=`/`__ne__` operators.
/// In these cases, `is`/`is not` may not be equivalent to `==`/`!=`. For more
/// information, see [this issue].
///
/// [PEP 8]: https://peps.python.org/pep-0008/#programming-recommendations
/// [this issue]: https://github.com/astral-sh/ruff/issues/4560
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "v0.0.28")]
pub(crate) struct NoneComparison(EqCmpOp);
impl AlwaysFixableViolation for NoneComparison {
#[derive_message_formats]
fn message(&self) -> String {
let NoneComparison(op) = self;
match op {
EqCmpOp::Eq => "Comparison to `None` should be `cond is None`".to_string(),
EqCmpOp::NotEq => "Comparison to `None` should be `cond is not None`".to_string(),
}
}
fn fix_title(&self) -> String {
let NoneComparison(op) = self;
let title = match op {
EqCmpOp::Eq => "Replace with `cond is None`",
EqCmpOp::NotEq => "Replace with `cond is not None`",
};
title.to_string()
}
}
/// ## What it does
/// Checks for equality comparisons to boolean literals.
///
/// ## Why is this bad?
/// [PEP 8] recommends against using the equality operators `==` and `!=` to
/// compare values to `True` or `False`.
///
/// Instead, use `if cond:` or `if not cond:` to check for truth values.
///
/// If you intend to check if a value is the boolean literal `True` or `False`,
/// consider using `is` or `is not` to check for identity instead.
///
/// ## Example
/// ```python
/// if foo == True:
/// ...
///
/// if bar == False:
/// ...
/// ```
///
/// Use instead:
/// ```python
/// if foo:
/// ...
///
/// if not bar:
/// ...
/// ```
///
/// ## Fix safety
///
/// This rule's fix is marked as unsafe, as it may alter runtime behavior when
/// used with libraries that override the `==`/`__eq__` or `!=`/`__ne__` operators.
/// In these cases, `is`/`is not` may not be equivalent to `==`/`!=`. For more
/// information, see [this issue].
///
/// [PEP 8]: https://peps.python.org/pep-0008/#programming-recommendations
/// [this issue]: https://github.com/astral-sh/ruff/issues/4560
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "v0.0.28")]
pub(crate) struct TrueFalseComparison {
value: bool,
op: EqCmpOp,
cond: Option<SourceCodeSnippet>,
}
impl AlwaysFixableViolation for TrueFalseComparison {
#[derive_message_formats]
fn message(&self) -> String {
let TrueFalseComparison { value, op, cond } = self;
let Some(cond) = cond else {
return "Avoid equality comparisons to `True` or `False`".to_string();
};
let cond = cond.truncated_display();
match (value, op) {
(true, EqCmpOp::Eq) => {
format!("Avoid equality comparisons to `True`; use `{cond}:` for truth checks")
}
(true, EqCmpOp::NotEq) => {
format!(
"Avoid inequality comparisons to `True`; use `not {cond}:` for false checks"
)
}
(false, EqCmpOp::Eq) => {
format!("Avoid equality comparisons to `False`; use `not {cond}:` for false checks")
}
(false, EqCmpOp::NotEq) => {
format!("Avoid inequality comparisons to `False`; use `{cond}:` for truth checks")
}
}
}
fn fix_title(&self) -> String {
let TrueFalseComparison { value, op, cond } = self;
let Some(cond) = cond.as_ref().and_then(|cond| cond.full_display()) else {
return "Replace comparison".to_string();
};
match (value, op) {
(true, EqCmpOp::Eq) => format!("Replace with `{cond}`"),
(true, EqCmpOp::NotEq) => format!("Replace with `not {cond}`"),
(false, EqCmpOp::Eq) => format!("Replace with `not {cond}`"),
(false, EqCmpOp::NotEq) => format!("Replace with `{cond}`"),
}
}
}
fn is_redundant_boolean_comparison(op: CmpOp, comparator: &Expr) -> Option<bool> {
let value = comparator.as_boolean_literal_expr()?.value;
match op {
CmpOp::Is | CmpOp::Eq => Some(value),
CmpOp::IsNot | CmpOp::NotEq => Some(!value),
_ => None,
}
}
fn generate_redundant_comparison(
compare: &ast::ExprCompare,
tokens: &Tokens,
source: &str,
comparator: &Expr,
kind: bool,
needs_wrap: bool,
) -> String {
let comparator_range = parenthesized_range(comparator.into(), compare.into(), tokens)
.unwrap_or(comparator.range());
let comparator_str = &source[comparator_range];
let result = if kind {
comparator_str.to_string()
} else {
format!("not {comparator_str}")
};
if needs_wrap {
format!("({result})")
} else {
result
}
}
/// E711, E712
pub(crate) fn literal_comparisons(checker: &Checker, compare: &ast::ExprCompare) {
// Mapping from (bad operator index) to (replacement operator). As we iterate
// through the list of operators, we apply "dummy" fixes for each error,
// then replace the entire expression at the end with one "real" fix, to
// avoid conflicts.
let mut bad_ops: FxHashMap<usize, CmpOp> = FxHashMap::default();
let mut diagnostics = vec![];
// Check `left`.
let mut comparator = compare.left.as_ref();
let [op, ..] = &*compare.ops else {
return;
};
let [next, ..] = &*compare.comparators else {
return;
};
if !helpers::is_constant_non_singleton(next) {
if let Some(op) = EqCmpOp::try_from(*op) {
if checker.is_rule_enabled(Rule::NoneComparison) && comparator.is_none_literal_expr() {
match op {
EqCmpOp::Eq => {
let diagnostic =
checker.report_diagnostic(NoneComparison(op), comparator.range());
bad_ops.insert(0, CmpOp::Is);
diagnostics.push(diagnostic);
}
EqCmpOp::NotEq => {
let diagnostic =
checker.report_diagnostic(NoneComparison(op), comparator.range());
bad_ops.insert(0, CmpOp::IsNot);
diagnostics.push(diagnostic);
}
}
}
if checker.is_rule_enabled(Rule::TrueFalseComparison) {
if let Expr::BooleanLiteral(ast::ExprBooleanLiteral { value, .. }) = comparator {
match op {
EqCmpOp::Eq => {
let cond = if compare.ops.len() == 1 {
Some(SourceCodeSnippet::from_str(checker.locator().slice(next)))
} else {
None
};
let diagnostic = checker.report_diagnostic(
TrueFalseComparison {
value: *value,
op,
cond,
},
compare.range(),
);
bad_ops.insert(0, CmpOp::Is);
diagnostics.push(diagnostic);
}
EqCmpOp::NotEq => {
let cond = if compare.ops.len() == 1 {
Some(SourceCodeSnippet::from_str(checker.locator().slice(next)))
} else {
None
};
let diagnostic = checker.report_diagnostic(
TrueFalseComparison {
value: *value,
op,
cond,
},
compare.range(),
);
bad_ops.insert(0, CmpOp::IsNot);
diagnostics.push(diagnostic);
}
}
}
}
}
}
// Check each comparator in order.
for (index, (op, next)) in compare.ops.iter().zip(&compare.comparators).enumerate() {
if helpers::is_constant_non_singleton(comparator) {
comparator = next;
continue;
}
if let Some(op) = EqCmpOp::try_from(*op) {
if checker.is_rule_enabled(Rule::NoneComparison) && next.is_none_literal_expr() {
match op {
EqCmpOp::Eq => {
let diagnostic =
checker.report_diagnostic(NoneComparison(op), next.range());
bad_ops.insert(index, CmpOp::Is);
diagnostics.push(diagnostic);
}
EqCmpOp::NotEq => {
let diagnostic =
checker.report_diagnostic(NoneComparison(op), next.range());
bad_ops.insert(index, CmpOp::IsNot);
diagnostics.push(diagnostic);
}
}
}
if checker.is_rule_enabled(Rule::TrueFalseComparison) {
if let Expr::BooleanLiteral(ast::ExprBooleanLiteral { value, .. }) = next {
match op {
EqCmpOp::Eq => {
if let Expr::BooleanLiteral(ast::ExprBooleanLiteral {
value: comparator_value,
..
}) = comparator
{
if value == comparator_value {
continue;
}
}
let cond = if compare.ops.len() == 1 {
Some(SourceCodeSnippet::from_str(
checker.locator().slice(comparator),
))
} else {
None
};
let diagnostic = checker.report_diagnostic(
TrueFalseComparison {
value: *value,
op,
cond,
},
compare.range(),
);
bad_ops.insert(index, CmpOp::Is);
diagnostics.push(diagnostic);
}
EqCmpOp::NotEq => {
let cond = if compare.ops.len() == 1 {
Some(SourceCodeSnippet::from_str(
checker.locator().slice(comparator),
))
} else {
None
};
let diagnostic = checker.report_diagnostic(
TrueFalseComparison {
value: *value,
op,
cond,
},
compare.range(),
);
bad_ops.insert(index, CmpOp::IsNot);
diagnostics.push(diagnostic);
}
}
}
}
}
comparator = next;
}
// TODO(charlie): Respect `noqa` directives. If one of the operators has a
// `noqa`, but another doesn't, both will be removed here.
if !bad_ops.is_empty() {
let ops = compare
.ops
.iter()
.enumerate()
.map(|(idx, op)| bad_ops.get(&idx).unwrap_or(op))
.copied()
.collect::<Vec<_>>();
let tokens = checker.tokens();
let source = checker.source();
let content = match (&*compare.ops, &*compare.comparators) {
([op], [comparator]) => {
if let Some(kind) = is_redundant_boolean_comparison(*op, &compare.left) {
let needs_wrap = compare.left.range().start() != compare.range().start();
generate_redundant_comparison(
compare, tokens, source, comparator, kind, needs_wrap,
)
} else if let Some(kind) = is_redundant_boolean_comparison(*op, comparator) {
let needs_wrap = comparator.range().end() != compare.range().end();
generate_redundant_comparison(
compare,
tokens,
source,
&compare.left,
kind,
needs_wrap,
)
} else {
generate_comparison(
&compare.left,
&ops,
&compare.comparators,
compare.into(),
tokens,
source,
)
}
}
_ => generate_comparison(
&compare.left,
&ops,
&compare.comparators,
compare.into(),
tokens,
source,
),
};
for diagnostic in &mut diagnostics {
diagnostic.set_fix(Fix::unsafe_edit(Edit::range_replacement(
content.clone(),
compare.range(),
)));
}
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/pycodestyle/rules/trailing_whitespace.rs | crates/ruff_linter/src/rules/pycodestyle/rules/trailing_whitespace.rs | use ruff_macros::{ViolationMetadata, derive_message_formats};
use ruff_python_index::Indexer;
use ruff_source_file::Line;
use ruff_text_size::{TextLen, TextRange, TextSize};
use crate::Locator;
use crate::checkers::ast::LintContext;
use crate::registry::Rule;
use crate::{AlwaysFixableViolation, Applicability, Edit, Fix};
/// ## What it does
/// Checks for superfluous trailing whitespace.
///
/// ## Why is this bad?
/// According to [PEP 8], "avoid trailing whitespace anywhere. Because it’s usually
/// invisible, it can be confusing"
///
/// ## Example
/// ```python
/// spam(1) \n#
/// ```
///
/// Use instead:
/// ```python
/// spam(1)\n#
/// ```
///
/// ## Fix safety
///
/// This fix is marked unsafe if the whitespace is inside a multiline string,
/// as removing it changes the string's content.
///
/// [PEP 8]: https://peps.python.org/pep-0008/#other-recommendations
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "v0.0.253")]
pub(crate) struct TrailingWhitespace;
impl AlwaysFixableViolation for TrailingWhitespace {
#[derive_message_formats]
fn message(&self) -> String {
"Trailing whitespace".to_string()
}
fn fix_title(&self) -> String {
"Remove trailing whitespace".to_string()
}
}
/// ## What it does
/// Checks for superfluous whitespace in blank lines.
///
/// ## Why is this bad?
/// According to [PEP 8], "avoid trailing whitespace anywhere. Because it’s usually
/// invisible, it can be confusing"
///
/// ## Example
/// ```python
/// class Foo(object):\n \n bang = 12
/// ```
///
/// Use instead:
/// ```python
/// class Foo(object):\n\n bang = 12
/// ```
///
/// ## Fix safety
///
/// This fix is marked unsafe if the whitespace is inside a multiline string,
/// as removing it changes the string's content.
///
/// [PEP 8]: https://peps.python.org/pep-0008/#other-recommendations
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "v0.0.253")]
pub(crate) struct BlankLineWithWhitespace;
impl AlwaysFixableViolation for BlankLineWithWhitespace {
#[derive_message_formats]
fn message(&self) -> String {
"Blank line contains whitespace".to_string()
}
fn fix_title(&self) -> String {
"Remove whitespace from blank line".to_string()
}
}
/// W291, W293
pub(crate) fn trailing_whitespace(
line: &Line,
locator: &Locator,
indexer: &Indexer,
context: &LintContext,
) {
let whitespace_len: TextSize = line
.chars()
.rev()
.take_while(|c| c.is_whitespace())
.map(TextLen::text_len)
.sum();
if whitespace_len > TextSize::from(0) {
let range = TextRange::new(line.end() - whitespace_len, line.end());
// Removing trailing whitespace is not safe inside multiline strings.
let applicability = if indexer.multiline_ranges().contains_range(range) {
Applicability::Unsafe
} else {
Applicability::Safe
};
if range == line.range() {
if context.is_rule_enabled(Rule::BlankLineWithWhitespace) {
let mut diagnostic = context.report_diagnostic(BlankLineWithWhitespace, range);
// Remove any preceding continuations, to avoid introducing a potential
// syntax error.
diagnostic.set_fix(Fix::applicable_edit(
Edit::range_deletion(TextRange::new(
indexer
.preceded_by_continuations(line.start(), locator.contents())
.unwrap_or(range.start()),
range.end(),
)),
applicability,
));
}
} else if context.is_rule_enabled(Rule::TrailingWhitespace) {
let mut diagnostic = context.report_diagnostic(TrailingWhitespace, range);
diagnostic.set_fix(Fix::applicable_edit(
Edit::range_deletion(range),
applicability,
));
}
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/pycodestyle/rules/invalid_escape_sequence.rs | crates/ruff_linter/src/rules/pycodestyle/rules/invalid_escape_sequence.rs | use memchr::memchr_iter;
use ruff_macros::{ViolationMetadata, derive_message_formats};
use ruff_python_ast::{
AnyStringFlags, InterpolatedStringElement, InterpolatedStringElements, StringLike,
StringLikePart,
};
use ruff_text_size::{Ranged, TextLen, TextRange, TextSize};
use crate::Locator;
use crate::checkers::ast::Checker;
use crate::fix::edits::pad_start;
use crate::{AlwaysFixableViolation, Edit, Fix};
/// ## What it does
/// Checks for invalid escape sequences.
///
/// ## Why is this bad?
/// Invalid escape sequences are deprecated in Python 3.6.
///
/// ## Example
/// ```python
/// regex = "\.png$"
/// ```
///
/// Use instead:
/// ```python
/// regex = r"\.png$"
/// ```
///
/// Or, if the string already contains a valid escape sequence:
/// ```python
/// value = "new line\nand invalid escape \_ here"
/// ```
///
/// Use instead:
/// ```python
/// value = "new line\nand invalid escape \\_ here"
/// ```
///
/// ## References
/// - [Python documentation: String and Bytes literals](https://docs.python.org/3/reference/lexical_analysis.html#string-and-bytes-literals)
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "v0.0.85")]
pub(crate) struct InvalidEscapeSequence {
ch: char,
fix_title: FixTitle,
}
impl AlwaysFixableViolation for InvalidEscapeSequence {
#[derive_message_formats]
fn message(&self) -> String {
let InvalidEscapeSequence { ch, .. } = self;
format!("Invalid escape sequence: `\\{ch}`")
}
fn fix_title(&self) -> String {
match self.fix_title {
FixTitle::AddBackslash => "Add backslash to escape sequence".to_string(),
FixTitle::UseRawStringLiteral => "Use a raw string literal".to_string(),
}
}
}
/// W605
pub(crate) fn invalid_escape_sequence(checker: &Checker, string_like: StringLike) {
let locator = checker.locator();
for part in string_like.parts() {
if part.flags().is_raw_string() {
continue;
}
let state = match part {
StringLikePart::String(_) | StringLikePart::Bytes(_) => {
analyze_escape_chars(locator, part.range(), part.flags())
}
StringLikePart::FString(f_string) => analyze_escape_chars_in_interpolated_string(
AnyStringFlags::from(f_string.flags),
&f_string.elements,
locator,
),
StringLikePart::TString(t_string) => analyze_escape_chars_in_interpolated_string(
AnyStringFlags::from(t_string.flags),
&t_string.elements,
locator,
),
};
check(checker, locator, part.start(), part.flags(), state);
}
}
#[derive(Default)]
struct EscapeCharsState {
contains_valid_escape_sequence: bool,
invalid_escape_chars: Vec<InvalidEscapeChar>,
}
impl EscapeCharsState {
fn update(&mut self, other: Self) {
self.contains_valid_escape_sequence |= other.contains_valid_escape_sequence;
self.invalid_escape_chars.extend(other.invalid_escape_chars);
}
}
/// Traverses string, collects invalid escape characters, and flags if a valid
/// escape character is found.
fn analyze_escape_chars(
locator: &Locator,
// Range in the source code to perform the analysis on.
source_range: TextRange,
flags: AnyStringFlags,
) -> EscapeCharsState {
let source = locator.slice(source_range);
let mut contains_valid_escape_sequence = false;
let mut invalid_escape_chars = Vec::new();
let mut prev = None;
let bytes = source.as_bytes();
for i in memchr_iter(b'\\', bytes) {
// If the previous character was also a backslash, skip.
if prev.is_some_and(|prev| prev == i - 1) {
prev = None;
continue;
}
prev = Some(i);
let next_char = match source[i + 1..].chars().next() {
Some(next_char) => next_char,
None if flags.is_interpolated_string() => {
// If we're at the end of a f-string middle token, the next character
// is actually emitted as a different token. For example,
//
// ```python
// f"\{1}"
// ```
//
// is lexed as `FStringMiddle('\\')` and `LBrace` (ignoring irrelevant
// tokens), so we need to check the next character in the source code.
//
// Now, if we're at the end of the f-string itself, the lexer wouldn't
// have emitted the `FStringMiddle` token in the first place. For example,
//
// ```python
// f"foo\"
// ```
//
// Here, there won't be any `FStringMiddle` because it's an unterminated
// f-string. This means that if there's a `FStringMiddle` token and we
// encounter a `\` character, then the next character is always going to
// be part of the f-string.
if let Some(next_char) = locator.after(source_range.end()).chars().next() {
next_char
} else {
continue;
}
}
// If we're at the end of the file, skip.
None => continue,
};
// If we're at the end of line, skip.
if matches!(next_char, '\n' | '\r') {
contains_valid_escape_sequence = true;
continue;
}
// If the next character is a valid escape sequence, skip.
// See: https://docs.python.org/3/reference/lexical_analysis.html#string-and-bytes-literals.
if matches!(
next_char,
'\n'
| '\\'
| '\''
| '"'
| 'a'
| 'b'
| 'f'
| 'n'
| 'r'
| 't'
| 'v'
| '0'
| '1'
| '2'
| '3'
| '4'
| '5'
| '6'
| '7'
| 'x'
// Escape sequences only recognized in string literals
| 'N'
| 'u'
| 'U'
) {
contains_valid_escape_sequence = true;
continue;
}
let location = source_range.start() + TextSize::try_from(i).unwrap();
let range = TextRange::at(location, next_char.text_len() + TextSize::from(1));
invalid_escape_chars.push(InvalidEscapeChar {
ch: next_char,
range,
});
}
EscapeCharsState {
contains_valid_escape_sequence,
invalid_escape_chars,
}
}
fn analyze_escape_chars_in_interpolated_string(
flags: AnyStringFlags,
elements: &InterpolatedStringElements,
locator: &Locator,
) -> EscapeCharsState {
let mut escape_chars_state = EscapeCharsState::default();
// Whether we suggest converting to a raw string or
// adding backslashes depends on the presence of valid
// escape characters in the entire f/t-string. Therefore,
// we must analyze escape characters in each f/t-string
// element before pushing a diagnostic and fix.
for element in elements {
match element {
InterpolatedStringElement::Literal(literal) => {
escape_chars_state.update(analyze_escape_chars(locator, literal.range(), flags));
}
InterpolatedStringElement::Interpolation(interpolation) => {
let Some(format_spec) = interpolation.format_spec.as_ref() else {
continue;
};
for literal in format_spec.elements.literals() {
escape_chars_state.update(analyze_escape_chars(
locator,
literal.range(),
flags,
));
}
}
}
}
escape_chars_state
}
/// Pushes a diagnostic and fix depending on escape characters seen so far.
///
/// If we have not seen any valid escape characters, we convert to
/// a raw string. If we have seen valid escape characters,
/// we manually add backslashes to each invalid escape character found.
fn check(
checker: &Checker,
locator: &Locator,
// Start position of the expression that contains the source range. This is used to generate
// the fix when the source range is part of the expression like in f-string which contains
// other f-string literal elements.
expr_start: TextSize,
flags: AnyStringFlags,
escape_chars_state: EscapeCharsState,
) {
let EscapeCharsState {
contains_valid_escape_sequence,
invalid_escape_chars,
} = escape_chars_state;
if contains_valid_escape_sequence {
// Escape with backslash.
for invalid_escape_char in &invalid_escape_chars {
let mut diagnostic = checker.report_diagnostic(
InvalidEscapeSequence {
ch: invalid_escape_char.ch,
fix_title: FixTitle::AddBackslash,
},
invalid_escape_char.range(),
);
diagnostic.set_fix(Fix::safe_edit(Edit::insertion(
r"\".to_string(),
invalid_escape_char.start() + TextSize::from(1),
)));
}
} else {
// Turn into raw string.
for invalid_escape_char in &invalid_escape_chars {
let mut diagnostic = checker.report_diagnostic(
InvalidEscapeSequence {
ch: invalid_escape_char.ch,
fix_title: FixTitle::UseRawStringLiteral,
},
invalid_escape_char.range(),
);
if flags.is_u_string() {
// Replace the Unicode prefix with `r`.
diagnostic.set_fix(Fix::safe_edit(Edit::replacement(
"r".to_string(),
expr_start,
expr_start + TextSize::from(1),
)));
} else {
// Insert the `r` prefix.
diagnostic.set_fix(
// If necessary, add a space between any leading keyword (`return`, `yield`,
// `assert`, etc.) and the string. For example, `return"foo"` is valid, but
// `returnr"foo"` is not.
Fix::safe_edit(Edit::insertion(
pad_start("r".to_string(), expr_start, locator),
expr_start,
)),
);
}
}
}
}
#[derive(Debug, PartialEq, Eq)]
enum FixTitle {
AddBackslash,
UseRawStringLiteral,
}
#[derive(Debug)]
struct InvalidEscapeChar {
ch: char,
range: TextRange,
}
impl Ranged for InvalidEscapeChar {
fn range(&self) -> TextRange {
self.range
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/pycodestyle/rules/ambiguous_variable_name.rs | crates/ruff_linter/src/rules/pycodestyle/rules/ambiguous_variable_name.rs | use ruff_text_size::TextRange;
use ruff_macros::{ViolationMetadata, derive_message_formats};
use crate::Violation;
use crate::checkers::ast::Checker;
use crate::rules::pycodestyle::helpers::is_ambiguous_name;
/// ## What it does
/// Checks for the use of the characters 'l', 'O', or 'I' as variable names.
///
/// Note: This rule is automatically disabled for all stub files
/// (files with `.pyi` extensions). The rule has little relevance for authors
/// of stubs: a well-written stub should aim to faithfully represent the
/// interface of the equivalent .py file as it exists at runtime, including any
/// ambiguously named variables in the runtime module.
///
/// ## Why is this bad?
/// In some fonts, these characters are indistinguishable from the
/// numerals one and zero. When tempted to use 'l', use 'L' instead.
///
/// ## Example
/// ```python
/// l = 0
/// O = 123
/// I = 42
/// ```
///
/// Use instead:
/// ```python
/// L = 0
/// o = 123
/// i = 42
/// ```
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "v0.0.34")]
pub(crate) struct AmbiguousVariableName(pub String);
impl Violation for AmbiguousVariableName {
#[derive_message_formats]
fn message(&self) -> String {
let AmbiguousVariableName(name) = self;
format!("Ambiguous variable name: `{name}`")
}
}
/// E741
pub(crate) fn ambiguous_variable_name(checker: &Checker, name: &str, range: TextRange) {
if checker.source_type.is_stub() {
return;
}
if is_ambiguous_name(name) {
checker.report_diagnostic(AmbiguousVariableName(name.to_string()), range);
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/pycodestyle/rules/not_tests.rs | crates/ruff_linter/src/rules/pycodestyle/rules/not_tests.rs | use ruff_macros::{ViolationMetadata, derive_message_formats};
use ruff_python_ast::helpers::generate_comparison;
use ruff_python_ast::{self as ast, CmpOp, Expr};
use ruff_text_size::Ranged;
use crate::checkers::ast::Checker;
use crate::fix::edits::pad;
use crate::registry::Rule;
use crate::{AlwaysFixableViolation, Edit, Fix};
/// ## What it does
/// Checks for membership tests using `not {element} in {collection}`.
///
/// ## Why is this bad?
/// Testing membership with `{element} not in {collection}` is more readable.
///
/// ## Example
/// ```python
/// Z = not X in Y
/// if not X.B in Y:
/// pass
/// ```
///
/// Use instead:
/// ```python
/// Z = X not in Y
/// if X.B not in Y:
/// pass
/// ```
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "v0.0.28")]
pub(crate) struct NotInTest;
impl AlwaysFixableViolation for NotInTest {
#[derive_message_formats]
fn message(&self) -> String {
"Test for membership should be `not in`".to_string()
}
fn fix_title(&self) -> String {
"Convert to `not in`".to_string()
}
}
/// ## What it does
/// Checks for identity comparisons using `not {foo} is {bar}`.
///
/// ## Why is this bad?
/// According to [PEP8], testing for an object's identity with `is not` is more
/// readable.
///
/// ## Example
/// ```python
/// if not X is Y:
/// pass
/// Z = not X.B is Y
/// ```
///
/// Use instead:
/// ```python
/// if X is not Y:
/// pass
/// Z = X.B is not Y
/// ```
///
/// [PEP8]: https://peps.python.org/pep-0008/#programming-recommendations
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "v0.0.28")]
pub(crate) struct NotIsTest;
impl AlwaysFixableViolation for NotIsTest {
#[derive_message_formats]
fn message(&self) -> String {
"Test for object identity should be `is not`".to_string()
}
fn fix_title(&self) -> String {
"Convert to `is not`".to_string()
}
}
/// E713, E714
pub(crate) fn not_tests(checker: &Checker, unary_op: &ast::ExprUnaryOp) {
if !unary_op.op.is_not() {
return;
}
let Expr::Compare(ast::ExprCompare {
left,
ops,
comparators,
range: _,
node_index: _,
}) = unary_op.operand.as_ref()
else {
return;
};
match &**ops {
[CmpOp::In] => {
if checker.is_rule_enabled(Rule::NotInTest) {
let mut diagnostic = checker.report_diagnostic(NotInTest, unary_op.operand.range());
diagnostic.set_fix(Fix::safe_edit(Edit::range_replacement(
pad(
generate_comparison(
left,
&[CmpOp::NotIn],
comparators,
unary_op.into(),
checker.tokens(),
checker.source(),
),
unary_op.range(),
checker.locator(),
),
unary_op.range(),
)));
}
}
[CmpOp::Is] => {
if checker.is_rule_enabled(Rule::NotIsTest) {
let mut diagnostic = checker.report_diagnostic(NotIsTest, unary_op.operand.range());
diagnostic.set_fix(Fix::safe_edit(Edit::range_replacement(
pad(
generate_comparison(
left,
&[CmpOp::IsNot],
comparators,
unary_op.into(),
checker.tokens(),
checker.source(),
),
unary_op.range(),
checker.locator(),
),
unary_op.range(),
)));
}
}
_ => {}
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/pycodestyle/rules/whitespace_after_decorator.rs | crates/ruff_linter/src/rules/pycodestyle/rules/whitespace_after_decorator.rs | use ruff_macros::{ViolationMetadata, derive_message_formats};
use ruff_python_ast::Decorator;
use ruff_python_trivia::is_python_whitespace;
use ruff_text_size::{Ranged, TextRange, TextSize};
use crate::checkers::ast::Checker;
use crate::{AlwaysFixableViolation, Edit, Fix};
/// ## What it does
/// Checks for trailing whitespace after a decorator's opening `@`.
///
/// ## Why is this bad?
/// Including whitespace after the `@` symbol is not compliant with
/// [PEP 8].
///
/// ## Example
///
/// ```python
/// @ decorator
/// def func():
/// pass
/// ```
///
/// Use instead:
/// ```python
/// @decorator
/// def func():
/// pass
/// ```
///
/// [PEP 8]: https://peps.python.org/pep-0008/#maximum-line-length
#[derive(ViolationMetadata)]
#[violation_metadata(preview_since = "0.5.1")]
pub(crate) struct WhitespaceAfterDecorator;
impl AlwaysFixableViolation for WhitespaceAfterDecorator {
#[derive_message_formats]
fn message(&self) -> String {
"Whitespace after decorator".to_string()
}
fn fix_title(&self) -> String {
"Remove whitespace".to_string()
}
}
/// E204
pub(crate) fn whitespace_after_decorator(checker: &Checker, decorator_list: &[Decorator]) {
for decorator in decorator_list {
let decorator_text = checker.locator().slice(decorator);
// Determine whether the `@` is followed by whitespace.
if let Some(trailing) = decorator_text.strip_prefix('@') {
// Collect the whitespace characters after the `@`.
if trailing.chars().next().is_some_and(is_python_whitespace) {
let end = trailing
.chars()
.position(|c| !(is_python_whitespace(c) || matches!(c, '\n' | '\r' | '\\')))
.unwrap_or(trailing.len());
let start = decorator.start() + TextSize::from(1);
let end = start + TextSize::try_from(end).unwrap();
let range = TextRange::new(start, end);
let mut diagnostic = checker.report_diagnostic(WhitespaceAfterDecorator, range);
diagnostic.set_fix(Fix::safe_edit(Edit::range_deletion(range)));
}
}
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/pycodestyle/rules/lambda_assignment.rs | crates/ruff_linter/src/rules/pycodestyle/rules/lambda_assignment.rs | use ruff_macros::{ViolationMetadata, derive_message_formats};
use ruff_python_ast::token::parenthesized_range;
use ruff_python_ast::{
self as ast, Expr, ExprEllipsisLiteral, ExprLambda, Identifier, Parameter,
ParameterWithDefault, Parameters, Stmt,
};
use ruff_python_semantic::SemanticModel;
use ruff_python_trivia::{has_leading_content, has_trailing_content, leading_indentation};
use ruff_source_file::UniversalNewlines;
use ruff_text_size::{Ranged, TextRange};
use crate::checkers::ast::Checker;
use crate::{Applicability, Edit, Fix, FixAvailability, Violation};
/// ## What it does
/// Checks for lambda expressions which are assigned to a variable.
///
/// ## Why is this bad?
/// Per PEP 8, you should "Always use a def statement instead of an assignment
/// statement that binds a lambda expression directly to an identifier."
///
/// Using a `def` statement leads to better tracebacks, and the assignment
/// itself negates the primary benefit of using a `lambda` expression (i.e.,
/// that it can be embedded inside another expression).
///
/// ## Example
/// ```python
/// f = lambda x: 2 * x
/// ```
///
/// Use instead:
/// ```python
/// def f(x):
/// return 2 * x
/// ```
///
/// [PEP 8]: https://peps.python.org/pep-0008/#programming-recommendations
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "v0.0.28")]
pub(crate) struct LambdaAssignment {
name: String,
}
impl Violation for LambdaAssignment {
const FIX_AVAILABILITY: FixAvailability = FixAvailability::Sometimes;
#[derive_message_formats]
fn message(&self) -> String {
"Do not assign a `lambda` expression, use a `def`".to_string()
}
fn fix_title(&self) -> Option<String> {
let LambdaAssignment { name } = self;
Some(format!("Rewrite `{name}` as a `def`"))
}
}
/// E731
pub(crate) fn lambda_assignment(
checker: &Checker,
target: &Expr,
value: &Expr,
annotation: Option<&Expr>,
stmt: &Stmt,
) {
let Expr::Name(ast::ExprName { id, .. }) = target else {
return;
};
let Expr::Lambda(lambda) = value else {
return;
};
// If the assignment is a class attribute (with an annotation), ignore it.
//
// This is most common for, e.g., dataclasses and Pydantic models. Those libraries will
// treat the lambda as an assignable field, and the use of a lambda is almost certainly
// intentional.
if annotation.is_some() && checker.semantic().current_scope().kind.is_class() {
return;
}
let mut diagnostic = checker.report_diagnostic(
LambdaAssignment {
name: id.to_string(),
},
stmt.range(),
);
if !has_leading_content(stmt.start(), checker.source())
&& !has_trailing_content(stmt.end(), checker.source())
{
let first_line = checker.locator().line_str(stmt.start());
let indentation = leading_indentation(first_line);
let mut indented = String::new();
for (idx, line) in function(id, lambda, annotation, stmt, checker)
.universal_newlines()
.enumerate()
{
if idx == 0 {
indented.push_str(&line);
} else {
indented.push_str(checker.stylist().line_ending().as_str());
indented.push_str(indentation);
indented.push_str(&line);
}
}
// If the lambda is shadowing a variable in the current scope,
// rewriting it as a function declaration may break type-checking.
// See: https://github.com/astral-sh/ruff/issues/5421
let applicability = if checker
.semantic()
.current_scope()
.get_all(id)
.any(|binding_id| checker.semantic().binding(binding_id).kind.is_annotation())
{
Applicability::DisplayOnly
} else {
Applicability::Unsafe
};
diagnostic.set_fix(Fix::applicable_edit(
Edit::range_replacement(indented, stmt.range()),
applicability,
));
}
}
/// Extract the argument types and return type from a `Callable` annotation.
/// The `Callable` import can be from either `collections.abc` or `typing`.
/// If an ellipsis is used for the argument types, an empty list is returned.
/// The returned values are cloned, so they can be used as-is.
fn extract_types(annotation: &Expr, semantic: &SemanticModel) -> Option<(Vec<Expr>, Expr)> {
let Expr::Subscript(ast::ExprSubscript { value, slice, .. }) = &annotation else {
return None;
};
let Expr::Tuple(ast::ExprTuple { elts, .. }) = slice.as_ref() else {
return None;
};
let [param_types, return_type] = elts.as_slice() else {
return None;
};
if !semantic
.resolve_qualified_name(value)
.is_some_and(|qualified_name| {
matches!(
qualified_name.segments(),
["collections", "abc", "Callable"]
) || semantic.match_typing_qualified_name(&qualified_name, "Callable")
})
{
return None;
}
// The first argument to `Callable` must be a list of types, parameter
// specification (e.g., a `ParamSpec`), or ellipsis.
// For parameter specifications, we cannot assign per-parameter annotations,
// but we can still preserve the return type annotation.
let params = match param_types {
Expr::List(ast::ExprList { elts, .. }) => elts.clone(),
Expr::EllipsisLiteral(_) => vec![],
// Treat any other form (e.g., `ParamSpec`, `Concatenate`, etc.) as a
// parameter specification: do not annotate individual parameters, but
// keep the return type.
_ => vec![],
};
// The second argument to `Callable` must be a type.
let return_type = return_type.clone();
Some((params, return_type))
}
/// Generate a function definition from a `lambda` expression.
fn function(
name: &str,
lambda: &ExprLambda,
annotation: Option<&Expr>,
stmt: &Stmt,
checker: &Checker,
) -> String {
// Use a dummy body. It gets replaced at the end with the actual body.
// This allows preserving the source formatting for the body.
let body = Stmt::Return(ast::StmtReturn {
value: Some(Box::new(Expr::EllipsisLiteral(
ExprEllipsisLiteral::default(),
))),
range: TextRange::default(),
node_index: ruff_python_ast::AtomicNodeIndex::NONE,
});
let parameters = lambda.parameters.as_deref().cloned().unwrap_or_default();
if let Some(annotation) = annotation {
if let Some((arg_types, return_type)) = extract_types(annotation, checker.semantic()) {
// A `lambda` expression can only have positional-only and positional-or-keyword
// arguments. The order is always positional-only first, then positional-or-keyword.
let new_posonlyargs = parameters
.posonlyargs
.iter()
.enumerate()
.map(|(idx, parameter)| ParameterWithDefault {
parameter: Parameter {
annotation: arg_types
.get(idx)
.map(|arg_type| Box::new(arg_type.clone())),
..parameter.parameter.clone()
},
..parameter.clone()
})
.collect::<Vec<_>>();
let new_args = parameters
.args
.iter()
.enumerate()
.map(|(idx, parameter)| ParameterWithDefault {
parameter: Parameter {
annotation: arg_types
.get(idx + new_posonlyargs.len())
.map(|arg_type| Box::new(arg_type.clone())),
..parameter.parameter.clone()
},
..parameter.clone()
})
.collect::<Vec<_>>();
let func = Stmt::FunctionDef(ast::StmtFunctionDef {
is_async: false,
name: Identifier::new(name.to_string(), TextRange::default()),
parameters: Box::new(Parameters {
posonlyargs: new_posonlyargs,
args: new_args,
..parameters
}),
body: vec![body],
decorator_list: vec![],
returns: Some(Box::new(return_type)),
type_params: None,
range: TextRange::default(),
node_index: ruff_python_ast::AtomicNodeIndex::NONE,
});
let generated = checker.generator().stmt(&func);
return replace_trailing_ellipsis_with_original_expr(generated, lambda, stmt, checker);
}
}
let function = Stmt::FunctionDef(ast::StmtFunctionDef {
is_async: false,
name: Identifier::new(name.to_string(), TextRange::default()),
parameters: Box::new(parameters),
body: vec![body],
decorator_list: vec![],
returns: None,
type_params: None,
range: TextRange::default(),
node_index: ruff_python_ast::AtomicNodeIndex::NONE,
});
let generated = checker.generator().stmt(&function);
replace_trailing_ellipsis_with_original_expr(generated, lambda, stmt, checker)
}
fn replace_trailing_ellipsis_with_original_expr(
mut generated: String,
lambda: &ExprLambda,
stmt: &Stmt,
checker: &Checker,
) -> String {
let original_expr_range =
parenthesized_range((&lambda.body).into(), lambda.into(), checker.tokens())
.unwrap_or(lambda.body.range());
// This prevents the autofix of introducing a syntax error if the lambda's body is an
// expression spanned across multiple lines. To avoid the syntax error we preserve
// the parenthesis around the body.
let original_expr_in_source =
if parenthesized_range(lambda.into(), stmt.into(), checker.tokens()).is_some() {
format!("({})", checker.locator().slice(original_expr_range))
} else {
checker.locator().slice(original_expr_range).to_string()
};
let placeholder_ellipsis_start = generated.rfind("...").unwrap();
let placeholder_ellipsis_end = placeholder_ellipsis_start + "...".len();
generated.replace_range(
placeholder_ellipsis_start..placeholder_ellipsis_end,
&original_expr_in_source,
);
generated
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/pycodestyle/rules/too_many_newlines_at_end_of_file.rs | crates/ruff_linter/src/rules/pycodestyle/rules/too_many_newlines_at_end_of_file.rs | use std::iter::Peekable;
use itertools::Itertools;
use ruff_macros::{ViolationMetadata, derive_message_formats};
use ruff_notebook::CellOffsets;
use ruff_python_ast::token::{Token, TokenKind, Tokens};
use ruff_text_size::{Ranged, TextRange, TextSize};
use crate::{AlwaysFixableViolation, Edit, Fix, checkers::ast::LintContext};
/// ## What it does
/// Checks for files with multiple trailing blank lines.
///
/// In the case of notebooks, this check is applied to
/// each cell separately.
///
/// ## Why is this bad?
/// Trailing blank lines in a file are superfluous.
///
/// However, the last line of the file should end with a newline.
///
/// ## Example
/// ```python
/// spam(1)\n\n\n
/// ```
///
/// Use instead:
/// ```python
/// spam(1)\n
/// ```
#[derive(ViolationMetadata)]
#[violation_metadata(preview_since = "v0.3.3")]
pub(crate) struct TooManyNewlinesAtEndOfFile {
num_trailing_newlines: u32,
in_notebook: bool,
}
impl AlwaysFixableViolation for TooManyNewlinesAtEndOfFile {
#[derive_message_formats]
fn message(&self) -> String {
let domain = if self.in_notebook { "cell" } else { "file" };
// We expect a single trailing newline; so two trailing newlines is one too many, three
// trailing newlines is two too many, etc.
if self.num_trailing_newlines > 2 {
format!("Too many newlines at end of {domain}")
} else {
format!("Extra newline at end of {domain}")
}
}
fn fix_title(&self) -> String {
let title = if self.num_trailing_newlines > 2 {
"Remove trailing newlines"
} else {
"Remove trailing newline"
};
title.to_string()
}
}
/// W391
pub(crate) fn too_many_newlines_at_end_of_file(
context: &LintContext,
tokens: &Tokens,
cell_offsets: Option<&CellOffsets>,
) {
let mut tokens_iter = tokens.iter().rev().peekable();
if let Some(cell_offsets) = cell_offsets {
notebook_newline_diagnostics(tokens_iter, cell_offsets, context);
} else {
newline_diagnostic(&mut tokens_iter, false, context);
}
}
/// Collects trailing newline diagnostics for each cell
fn notebook_newline_diagnostics<'a>(
mut tokens_iter: Peekable<impl Iterator<Item = &'a Token>>,
cell_offsets: &CellOffsets,
context: &LintContext,
) {
let offset_iter = cell_offsets.iter().rev();
// NB: When interpreting the below, recall that the iterators
// have been reversed.
for &offset in offset_iter {
// Advance to offset
tokens_iter
.peeking_take_while(|tok| tok.end() >= offset)
.for_each(drop);
newline_diagnostic(&mut tokens_iter, true, context);
}
}
/// Possible diagnostic, with fix, for too many newlines in cell or source file
fn newline_diagnostic<'a>(
tokens_iter: &mut Peekable<impl Iterator<Item = &'a Token>>,
in_notebook: bool,
context: &LintContext,
) {
let mut num_trailing_newlines: u32 = 0;
let mut newline_range_start: Option<TextSize> = None;
let mut newline_range_end: Option<TextSize> = None;
while let Some(next_token) = tokens_iter.peek() {
match next_token.kind() {
TokenKind::Newline | TokenKind::NonLogicalNewline => {
if newline_range_end.is_none() {
newline_range_end = Some(next_token.end());
}
newline_range_start = Some(next_token.end());
tokens_iter.next();
num_trailing_newlines += 1;
}
TokenKind::Dedent => {
tokens_iter.next();
}
_ => {
break;
}
}
}
if num_trailing_newlines == 0 || num_trailing_newlines == 1 {
return;
}
let Some((start, end)) = (match (newline_range_start, newline_range_end) {
(Some(s), Some(e)) => Some((s, e)),
_ => None,
}) else {
return;
};
let diagnostic_range = TextRange::new(start, end);
if let Some(mut diagnostic) = context.report_diagnostic_if_enabled(
TooManyNewlinesAtEndOfFile {
num_trailing_newlines,
in_notebook,
},
diagnostic_range,
) {
diagnostic.set_fix(Fix::safe_edit(Edit::range_deletion(diagnostic_range)));
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/pycodestyle/rules/logical_lines/whitespace_around_keywords.rs | crates/ruff_linter/src/rules/pycodestyle/rules/logical_lines/whitespace_around_keywords.rs | use ruff_macros::{ViolationMetadata, derive_message_formats};
use ruff_text_size::{Ranged, TextRange};
use crate::checkers::ast::LintContext;
use crate::{AlwaysFixableViolation, Edit, Fix};
use super::{LogicalLine, Whitespace};
/// ## What it does
/// Checks for extraneous whitespace after keywords.
///
/// ## Why is this bad?
///
///
/// ## Example
/// ```python
/// True and False
/// ```
///
/// Use instead:
/// ```python
/// True and False
/// ```
#[derive(ViolationMetadata)]
#[violation_metadata(preview_since = "v0.0.269")]
pub(crate) struct MultipleSpacesAfterKeyword;
impl AlwaysFixableViolation for MultipleSpacesAfterKeyword {
#[derive_message_formats]
fn message(&self) -> String {
"Multiple spaces after keyword".to_string()
}
fn fix_title(&self) -> String {
"Replace with single space".to_string()
}
}
/// ## What it does
/// Checks for extraneous whitespace before keywords.
///
/// ## Why is this bad?
///
///
/// ## Example
/// ```python
/// x and y
/// ```
///
/// Use instead:
/// ```python
/// x and y
/// ```
#[derive(ViolationMetadata)]
#[violation_metadata(preview_since = "v0.0.269")]
pub(crate) struct MultipleSpacesBeforeKeyword;
impl AlwaysFixableViolation for MultipleSpacesBeforeKeyword {
#[derive_message_formats]
fn message(&self) -> String {
"Multiple spaces before keyword".to_string()
}
fn fix_title(&self) -> String {
"Replace with single space".to_string()
}
}
/// ## What it does
/// Checks for extraneous tabs after keywords.
///
/// ## Why is this bad?
///
///
/// ## Example
/// ```python
/// True and\tFalse
/// ```
///
/// Use instead:
/// ```python
/// True and False
/// ```
#[derive(ViolationMetadata)]
#[violation_metadata(preview_since = "v0.0.269")]
pub(crate) struct TabAfterKeyword;
impl AlwaysFixableViolation for TabAfterKeyword {
#[derive_message_formats]
fn message(&self) -> String {
"Tab after keyword".to_string()
}
fn fix_title(&self) -> String {
"Replace with single space".to_string()
}
}
/// ## What it does
/// Checks for extraneous tabs before keywords.
///
/// ## Why is this bad?
///
///
/// ## Example
/// ```python
/// True\tand False
/// ```
///
/// Use instead:
/// ```python
/// True and False
/// ```
#[derive(ViolationMetadata)]
#[violation_metadata(preview_since = "v0.0.269")]
pub(crate) struct TabBeforeKeyword;
impl AlwaysFixableViolation for TabBeforeKeyword {
#[derive_message_formats]
fn message(&self) -> String {
"Tab before keyword".to_string()
}
fn fix_title(&self) -> String {
"Replace with single space".to_string()
}
}
/// E271, E272, E273, E274
pub(crate) fn whitespace_around_keywords(line: &LogicalLine, context: &LintContext) {
let mut after_keyword = false;
for token in line.tokens() {
let is_keyword = token.kind().is_keyword();
if is_keyword {
if !after_keyword {
match line.leading_whitespace(token) {
(Whitespace::Tab, offset) => {
let start = token.start();
if let Some(mut diagnostic) = context.report_diagnostic_if_enabled(
TabBeforeKeyword,
TextRange::at(start - offset, offset),
) {
diagnostic.set_fix(Fix::safe_edit(Edit::range_replacement(
" ".to_string(),
TextRange::at(start - offset, offset),
)));
}
}
(Whitespace::Many, offset) => {
let start = token.start();
if let Some(mut diagnostic) = context.report_diagnostic_if_enabled(
MultipleSpacesBeforeKeyword,
TextRange::at(start - offset, offset),
) {
diagnostic.set_fix(Fix::safe_edit(Edit::range_replacement(
" ".to_string(),
TextRange::at(start - offset, offset),
)));
}
}
_ => {}
}
}
match line.trailing_whitespace(token) {
(Whitespace::Tab, len) => {
if let Some(mut diagnostic) = context.report_diagnostic_if_enabled(
TabAfterKeyword,
TextRange::at(token.end(), len),
) {
diagnostic.set_fix(Fix::safe_edit(Edit::range_replacement(
" ".to_string(),
TextRange::at(token.end(), len),
)));
}
}
(Whitespace::Many, len) => {
if let Some(mut diagnostic) = context.report_diagnostic_if_enabled(
MultipleSpacesAfterKeyword,
TextRange::at(token.end(), len),
) {
diagnostic.set_fix(Fix::safe_edit(Edit::range_replacement(
" ".to_string(),
TextRange::at(token.end(), len),
)));
}
}
_ => {}
}
}
after_keyword = is_keyword;
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/pycodestyle/rules/logical_lines/whitespace_before_parameters.rs | crates/ruff_linter/src/rules/pycodestyle/rules/logical_lines/whitespace_before_parameters.rs | use ruff_macros::{ViolationMetadata, derive_message_formats};
use ruff_python_ast::token::TokenKind;
use ruff_text_size::{Ranged, TextRange, TextSize};
use crate::checkers::ast::LintContext;
use crate::rules::pycodestyle::rules::logical_lines::LogicalLine;
use crate::{AlwaysFixableViolation, Edit, Fix};
/// ## What it does
/// Checks for extraneous whitespace immediately preceding an open parenthesis
/// or bracket.
///
/// ## Why is this bad?
/// According to [PEP 8], open parentheses and brackets should not be preceded
/// by any trailing whitespace.
///
/// ## Example
/// ```python
/// spam (1)
/// ```
///
/// Use instead:
/// ```python
/// spam(1)
/// ```
///
/// [PEP 8]: https://peps.python.org/pep-0008/#pet-peeves
#[derive(ViolationMetadata)]
#[violation_metadata(preview_since = "v0.0.269")]
pub(crate) struct WhitespaceBeforeParameters {
bracket: TokenKind,
}
impl WhitespaceBeforeParameters {
fn bracket_text(&self) -> char {
match self.bracket {
TokenKind::Lpar => '(',
TokenKind::Lsqb => '[',
_ => unreachable!(),
}
}
}
impl AlwaysFixableViolation for WhitespaceBeforeParameters {
#[derive_message_formats]
fn message(&self) -> String {
let bracket = self.bracket_text();
format!("Whitespace before '{bracket}'")
}
fn fix_title(&self) -> String {
let bracket = self.bracket_text();
format!("Removed whitespace before '{bracket}'")
}
}
/// E211
pub(crate) fn whitespace_before_parameters(line: &LogicalLine, context: &LintContext) {
let previous = line.tokens().first().unwrap();
let mut pre_pre_kind: Option<TokenKind> = None;
let mut prev_token = previous.kind();
let mut prev_end = previous.end();
for token in line.tokens() {
let kind = token.kind();
if matches!(kind, TokenKind::Lpar | TokenKind::Lsqb)
&& matches!(
prev_token,
TokenKind::Name | TokenKind::Rpar | TokenKind::Rsqb | TokenKind::Rbrace
)
&& (pre_pre_kind != Some(TokenKind::Class))
&& token.start() != prev_end
{
let start = prev_end;
let end = token.end() - TextSize::from(1);
let kind: WhitespaceBeforeParameters = WhitespaceBeforeParameters { bracket: kind };
if let Some(mut diagnostic) =
context.report_diagnostic_if_enabled(kind, TextRange::new(start, end))
{
diagnostic.set_fix(Fix::safe_edit(Edit::deletion(start, end)));
}
}
pre_pre_kind = Some(prev_token);
prev_token = kind;
prev_end = token.end();
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/pycodestyle/rules/logical_lines/missing_whitespace_after_keyword.rs | crates/ruff_linter/src/rules/pycodestyle/rules/logical_lines/missing_whitespace_after_keyword.rs | use ruff_macros::{ViolationMetadata, derive_message_formats};
use ruff_python_ast::token::TokenKind;
use ruff_text_size::Ranged;
use crate::checkers::ast::LintContext;
use crate::rules::pycodestyle::rules::logical_lines::LogicalLine;
use crate::{AlwaysFixableViolation, Edit, Fix};
/// ## What it does
/// Checks for missing whitespace after keywords.
///
/// ## Why is this bad?
/// Missing whitespace after keywords makes the code harder to read.
///
/// ## Example
/// ```python
/// if(True):
/// pass
/// ```
///
/// Use instead:
/// ```python
/// if (True):
/// pass
/// ```
///
/// ## References
/// - [Python documentation: Keywords](https://docs.python.org/3/reference/lexical_analysis.html#keywords)
#[derive(ViolationMetadata)]
#[violation_metadata(preview_since = "v0.0.269")]
pub(crate) struct MissingWhitespaceAfterKeyword;
impl AlwaysFixableViolation for MissingWhitespaceAfterKeyword {
#[derive_message_formats]
fn message(&self) -> String {
"Missing whitespace after keyword".to_string()
}
fn fix_title(&self) -> String {
"Added missing whitespace after keyword".to_string()
}
}
/// E275
pub(crate) fn missing_whitespace_after_keyword(line: &LogicalLine, context: &LintContext) {
for window in line.tokens().windows(2) {
let tok0 = &window[0];
let tok1 = &window[1];
let tok0_kind = tok0.kind();
let tok1_kind = tok1.kind();
if tok0_kind.is_keyword()
&& !(tok0_kind.is_singleton()
|| matches!(tok0_kind, TokenKind::Async | TokenKind::Await)
|| tok0_kind == TokenKind::Except && tok1_kind == TokenKind::Star
|| tok0_kind == TokenKind::Yield
&& matches!(tok1_kind, TokenKind::Rpar | TokenKind::Comma)
|| matches!(
tok1_kind,
TokenKind::Colon
| TokenKind::Semi
| TokenKind::Newline
| TokenKind::NonLogicalNewline
// In the event of a syntax error, do not attempt to add a whitespace.
| TokenKind::Rpar
| TokenKind::Rsqb
| TokenKind::Rbrace
))
&& tok0.end() == tok1.start()
{
if let Some(mut diagnostic) =
context.report_diagnostic_if_enabled(MissingWhitespaceAfterKeyword, tok0.range())
{
diagnostic.set_fix(Fix::safe_edit(Edit::insertion(" ".to_string(), tok0.end())));
}
}
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/pycodestyle/rules/logical_lines/extraneous_whitespace.rs | crates/ruff_linter/src/rules/pycodestyle/rules/logical_lines/extraneous_whitespace.rs | use ruff_macros::{ViolationMetadata, derive_message_formats};
use ruff_python_ast::token::TokenKind;
use ruff_text_size::{Ranged, TextRange};
use crate::AlwaysFixableViolation;
use crate::Edit;
use crate::Fix;
use crate::checkers::ast::LintContext;
use super::{LogicalLine, Whitespace};
/// ## What it does
/// Checks for the use of extraneous whitespace after "(", "[" or "{".
///
/// ## Why is this bad?
/// [PEP 8] recommends the omission of whitespace in the following cases:
/// - "Immediately inside parentheses, brackets or braces."
/// - "Immediately before a comma, semicolon, or colon."
///
/// ## Example
/// ```python
/// spam( ham[1], {eggs: 2})
/// spam(ham[ 1], {eggs: 2})
/// spam(ham[1], { eggs: 2})
/// ```
///
/// Use instead:
/// ```python
/// spam(ham[1], {eggs: 2})
/// ```
///
/// [PEP 8]: https://peps.python.org/pep-0008/#pet-peeves
#[derive(ViolationMetadata)]
#[violation_metadata(preview_since = "v0.0.269")]
pub(crate) struct WhitespaceAfterOpenBracket {
symbol: char,
}
impl AlwaysFixableViolation for WhitespaceAfterOpenBracket {
#[derive_message_formats]
fn message(&self) -> String {
let WhitespaceAfterOpenBracket { symbol } = self;
format!("Whitespace after '{symbol}'")
}
fn fix_title(&self) -> String {
let WhitespaceAfterOpenBracket { symbol } = self;
format!("Remove whitespace before '{symbol}'")
}
}
/// ## What it does
/// Checks for the use of extraneous whitespace before ")", "]" or "}".
///
/// ## Why is this bad?
/// [PEP 8] recommends the omission of whitespace in the following cases:
/// - "Immediately inside parentheses, brackets or braces."
/// - "Immediately before a comma, semicolon, or colon."
///
/// ## Example
/// ```python
/// spam(ham[1], {eggs: 2} )
/// spam(ham[1 ], {eggs: 2})
/// spam(ham[1], {eggs: 2 })
/// ```
///
/// Use instead:
/// ```python
/// spam(ham[1], {eggs: 2})
/// ```
///
/// [PEP 8]: https://peps.python.org/pep-0008/#pet-peeves
#[derive(ViolationMetadata)]
#[violation_metadata(preview_since = "v0.0.269")]
pub(crate) struct WhitespaceBeforeCloseBracket {
symbol: char,
}
impl AlwaysFixableViolation for WhitespaceBeforeCloseBracket {
#[derive_message_formats]
fn message(&self) -> String {
let WhitespaceBeforeCloseBracket { symbol } = self;
format!("Whitespace before '{symbol}'")
}
fn fix_title(&self) -> String {
let WhitespaceBeforeCloseBracket { symbol } = self;
format!("Remove whitespace before '{symbol}'")
}
}
/// ## What it does
/// Checks for the use of extraneous whitespace before ",", ";" or ":".
///
/// ## Why is this bad?
/// [PEP 8] recommends the omission of whitespace in the following cases:
/// - "Immediately inside parentheses, brackets or braces."
/// - "Immediately before a comma, semicolon, or colon."
///
/// ## Example
/// ```python
/// if x == 4: print(x, y); x, y = y , x
/// ```
///
/// Use instead:
/// ```python
/// if x == 4: print(x, y); x, y = y, x
/// ```
///
/// [PEP 8]: https://peps.python.org/pep-0008/#pet-peeves
#[derive(ViolationMetadata)]
#[violation_metadata(preview_since = "v0.0.269")]
pub(crate) struct WhitespaceBeforePunctuation {
symbol: char,
}
impl AlwaysFixableViolation for WhitespaceBeforePunctuation {
#[derive_message_formats]
fn message(&self) -> String {
let WhitespaceBeforePunctuation { symbol } = self;
format!("Whitespace before '{symbol}'")
}
fn fix_title(&self) -> String {
let WhitespaceBeforePunctuation { symbol } = self;
format!("Remove whitespace before '{symbol}'")
}
}
/// E201, E202, E203
pub(crate) fn extraneous_whitespace(line: &LogicalLine, context: &LintContext) {
let mut interpolated_strings = 0u32;
let mut brackets = vec![];
let mut prev_token = None;
let mut iter = line.tokens().iter().peekable();
while let Some(token) = iter.next() {
let kind = token.kind();
match kind {
TokenKind::FStringStart | TokenKind::TStringStart => interpolated_strings += 1,
TokenKind::FStringEnd | TokenKind::TStringEnd => {
interpolated_strings = interpolated_strings.saturating_sub(1);
}
TokenKind::Lsqb => {
brackets.push(kind);
}
TokenKind::Rsqb => {
brackets.pop();
}
TokenKind::Lbrace => {
brackets.push(kind);
}
TokenKind::Rbrace => {
brackets.pop();
}
_ => {}
}
if let Some(symbol) = BracketOrPunctuation::from_kind(kind) {
// Whitespace before "{" or after "}" might be required in f-strings.
// For example,
//
// ```python
// f"{ {'a': 1} }"
// ```
//
// Here, `{{` / `}} would be interpreted as a single raw `{` / `}`
// character.
match symbol {
BracketOrPunctuation::OpenBracket(symbol)
if symbol != '{' || interpolated_strings == 0 =>
{
let (trailing, trailing_len) = line.trailing_whitespace(token);
if !matches!(trailing, Whitespace::None) {
let range = TextRange::at(token.end(), trailing_len);
if let Some(mut diagnostic) = context.report_diagnostic_if_enabled(
WhitespaceAfterOpenBracket { symbol },
range,
) {
diagnostic.set_fix(Fix::safe_edit(Edit::range_deletion(range)));
}
}
}
BracketOrPunctuation::CloseBracket(symbol)
if symbol != '}' || interpolated_strings == 0 =>
{
if !matches!(prev_token, Some(TokenKind::Comma)) {
if let (Whitespace::Single | Whitespace::Many | Whitespace::Tab, offset) =
line.leading_whitespace(token)
{
let range = TextRange::at(token.start() - offset, offset);
if let Some(mut diagnostic) = context.report_diagnostic_if_enabled(
WhitespaceBeforeCloseBracket { symbol },
range,
) {
diagnostic.set_fix(Fix::safe_edit(Edit::range_deletion(range)));
}
}
}
}
BracketOrPunctuation::Punctuation(symbol) => {
if !matches!(prev_token, Some(TokenKind::Comma)) {
let whitespace = line.leading_whitespace(token);
if let (Whitespace::Single | Whitespace::Many | Whitespace::Tab, offset) =
whitespace
{
// If we're in a slice, and the token is a colon, and it has
// equivalent spacing on both sides, allow it.
if symbol == ':'
&& brackets
.last()
.is_some_and(|kind| matches!(kind, TokenKind::Lsqb))
{
// If we're in the second half of a double colon, disallow
// any whitespace (e.g., `foo[1: :2]` or `foo[1 : : 2]`).
if matches!(prev_token, Some(TokenKind::Colon)) {
let range = TextRange::at(token.start() - offset, offset);
if let Some(mut diagnostic) = context
.report_diagnostic_if_enabled(
WhitespaceBeforePunctuation { symbol },
range,
)
{
diagnostic
.set_fix(Fix::safe_edit(Edit::range_deletion(range)));
}
} else if iter.peek().is_some_and(|token| {
matches!(token.kind(), TokenKind::Rsqb | TokenKind::Comma)
}) {
// Allow `foo[1 :]`, but not `foo[1 :]`.
// Or `foo[index :, 2]`, but not `foo[index :, 2]`.
if let (Whitespace::Many | Whitespace::Tab, offset) = whitespace
{
let range = TextRange::at(token.start() - offset, offset);
if let Some(mut diagnostic) = context
.report_diagnostic_if_enabled(
WhitespaceBeforePunctuation { symbol },
range,
)
{
diagnostic.set_fix(Fix::safe_edit(
Edit::range_deletion(range),
));
}
}
} else if iter.peek().is_some_and(|token| {
matches!(
token.kind(),
TokenKind::NonLogicalNewline | TokenKind::Comment
)
}) {
// Allow [
// long_expression_calculating_the_index() :
// ]
// But not [
// long_expression_calculating_the_index() :
// ]
// distinct from the above case, because ruff format produces a
// whitespace before the colon and so should the fix
if let (Whitespace::Many | Whitespace::Tab, offset) = whitespace
{
let range = TextRange::at(token.start() - offset, offset);
if let Some(mut diagnostic) = context
.report_diagnostic_if_enabled(
WhitespaceBeforePunctuation { symbol },
range,
)
{
diagnostic.set_fix(Fix::safe_edits(
Edit::range_deletion(range),
[Edit::insertion(
" ".into(),
token.start() - offset,
)],
));
}
}
} else {
// Allow, e.g., `foo[1:2]` or `foo[1 : 2]` or `foo[1 :: 2]`.
let token = iter
.peek()
.filter(|next| matches!(next.kind(), TokenKind::Colon))
.unwrap_or(&token);
if line.trailing_whitespace(token) != whitespace {
let range = TextRange::at(token.start() - offset, offset);
if let Some(mut diagnostic) = context
.report_diagnostic_if_enabled(
WhitespaceBeforePunctuation { symbol },
range,
)
{
diagnostic.set_fix(Fix::safe_edit(
Edit::range_deletion(range),
));
}
}
}
} else {
if interpolated_strings > 0
&& symbol == ':'
&& matches!(prev_token, Some(TokenKind::Equal))
{
// Avoid removing any whitespace for f-string debug expressions.
continue;
}
let range = TextRange::at(token.start() - offset, offset);
if let Some(mut diagnostic) = context.report_diagnostic_if_enabled(
WhitespaceBeforePunctuation { symbol },
range,
) {
diagnostic.set_fix(Fix::safe_edit(Edit::range_deletion(range)));
}
}
}
}
}
_ => {}
}
}
prev_token = Some(kind);
}
}
#[derive(Debug)]
enum BracketOrPunctuation {
OpenBracket(char),
CloseBracket(char),
Punctuation(char),
}
impl BracketOrPunctuation {
fn from_kind(kind: TokenKind) -> Option<BracketOrPunctuation> {
match kind {
TokenKind::Lbrace => Some(BracketOrPunctuation::OpenBracket('{')),
TokenKind::Lpar => Some(BracketOrPunctuation::OpenBracket('(')),
TokenKind::Lsqb => Some(BracketOrPunctuation::OpenBracket('[')),
TokenKind::Rbrace => Some(BracketOrPunctuation::CloseBracket('}')),
TokenKind::Rpar => Some(BracketOrPunctuation::CloseBracket(')')),
TokenKind::Rsqb => Some(BracketOrPunctuation::CloseBracket(']')),
TokenKind::Comma => Some(BracketOrPunctuation::Punctuation(',')),
TokenKind::Colon => Some(BracketOrPunctuation::Punctuation(':')),
TokenKind::Semi => Some(BracketOrPunctuation::Punctuation(';')),
_ => None,
}
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/pycodestyle/rules/logical_lines/mod.rs | crates/ruff_linter/src/rules/pycodestyle/rules/logical_lines/mod.rs | use std::fmt::{Debug, Formatter};
use std::iter::FusedIterator;
use bitflags::bitflags;
pub(crate) use extraneous_whitespace::*;
pub(crate) use indentation::*;
pub(crate) use missing_whitespace::*;
pub(crate) use missing_whitespace_after_keyword::*;
pub(crate) use missing_whitespace_around_operator::*;
pub(crate) use redundant_backslash::*;
use ruff_python_ast::token::{TokenKind, Tokens};
use ruff_python_trivia::is_python_whitespace;
use ruff_text_size::{Ranged, TextLen, TextRange, TextSize};
pub(crate) use space_around_operator::*;
pub(crate) use whitespace_around_keywords::*;
pub(crate) use whitespace_around_named_parameter_equals::*;
pub(crate) use whitespace_before_comment::*;
pub(crate) use whitespace_before_parameters::*;
use crate::Locator;
use crate::rules::pycodestyle::helpers::is_non_logical_token;
mod extraneous_whitespace;
mod indentation;
mod missing_whitespace;
mod missing_whitespace_after_keyword;
mod missing_whitespace_around_operator;
mod redundant_backslash;
mod space_around_operator;
mod whitespace_around_keywords;
mod whitespace_around_named_parameter_equals;
mod whitespace_before_comment;
mod whitespace_before_parameters;
bitflags! {
#[derive(Default, Eq, PartialEq, Clone, Copy, Debug)]
pub(crate) struct TokenFlags: u8 {
/// Whether the logical line contains an operator.
const OPERATOR = 1 << 0;
/// Whether the logical line contains a bracket.
const BRACKET = 1 << 1;
/// Whether the logical line contains a punctuation mark.
const PUNCTUATION = 1 << 2;
/// Whether the logical line contains a keyword.
const KEYWORD = 1 << 3;
/// Whether the logical line contains a comment.
const COMMENT = 1 << 4;
/// Whether the logical line contains any non trivia token (no comment, newline, or in/dedent)
const NON_TRIVIA = 1 << 5;
}
}
#[derive(Clone)]
pub(crate) struct LogicalLines<'a> {
tokens: Vec<LogicalLineToken>,
lines: Vec<Line>,
locator: &'a Locator<'a>,
}
impl<'a> LogicalLines<'a> {
pub(crate) fn from_tokens(tokens: &Tokens, locator: &'a Locator<'a>) -> Self {
assert!(u32::try_from(tokens.len()).is_ok());
let mut builder = LogicalLinesBuilder::with_capacity(tokens.len());
let mut tokens_iter = tokens.iter_with_context();
while let Some(token) = tokens_iter.next() {
builder.push_token(token.kind(), token.range());
if token.kind().is_any_newline() && !tokens_iter.in_parenthesized_context() {
builder.finish_line();
}
}
builder.finish(locator)
}
}
impl Debug for LogicalLines<'_> {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
f.debug_list()
.entries(self.into_iter().map(DebugLogicalLine))
.finish()
}
}
impl<'a> IntoIterator for &'a LogicalLines<'a> {
type IntoIter = LogicalLinesIter<'a>;
type Item = LogicalLine<'a>;
fn into_iter(self) -> Self::IntoIter {
LogicalLinesIter {
lines: self,
inner: self.lines.iter(),
}
}
}
/// A logical line spawns multiple lines in the source document if the line
/// ends with a parenthesized expression (`(..)`, `[..]`, `{..}`) that contains
/// line breaks.
///
/// ## Examples
/// This expression forms one logical line because the array elements are parenthesized.
///
/// ```python
/// a = [
/// 1,
/// 2
/// ]
/// ```
#[derive(Debug)]
pub(crate) struct LogicalLine<'a> {
lines: &'a LogicalLines<'a>,
line: &'a Line,
}
impl<'a> LogicalLine<'a> {
/// Returns `true` if this line is positioned at the start of the file.
pub(crate) const fn is_start_of_file(&self) -> bool {
self.line.tokens_start == 0
}
/// Returns `true` if this is a comment only line
pub(crate) fn is_comment_only(&self) -> bool {
self.flags() == TokenFlags::COMMENT
}
/// Returns logical line's text including comments, indents, dedent and trailing new lines.
pub(crate) fn text(&self) -> &'a str {
let tokens = self.tokens();
match (tokens.first(), tokens.last()) {
(Some(first), Some(last)) => self
.lines
.locator
.slice(TextRange::new(first.start(), last.end())),
_ => "",
}
}
/// Returns the text without any leading or trailing newline, comment, indent, or dedent of this line
#[cfg(test)]
pub(crate) fn text_trimmed(&self) -> &'a str {
let tokens = self.tokens_trimmed();
match (tokens.first(), tokens.last()) {
(Some(first), Some(last)) => self
.lines
.locator
.slice(TextRange::new(first.start(), last.end())),
_ => "",
}
}
pub(crate) fn tokens_trimmed(&self) -> &'a [LogicalLineToken] {
let tokens = self.tokens();
let start = tokens
.iter()
.position(|t| !is_non_logical_token(t.kind()))
.unwrap_or(tokens.len());
let tokens = &tokens[start..];
let end = tokens
.iter()
.rposition(|t| !is_non_logical_token(t.kind()))
.map_or(0, |pos| pos + 1);
&tokens[..end]
}
/// Returns the text after `token`
#[inline]
pub(crate) fn text_after(&self, token: &'a LogicalLineToken) -> &str {
// SAFETY: The line must have at least one token or `token` would not belong to this line.
let last_token = self.tokens().last().unwrap();
self.lines
.locator
.slice(TextRange::new(token.end(), last_token.end()))
}
/// Returns the text before `token`
#[inline]
pub(crate) fn text_before(&self, token: &'a LogicalLineToken) -> &str {
// SAFETY: The line must have at least one token or `token` would not belong to this line.
let first_token = self.tokens().first().unwrap();
self.lines
.locator
.slice(TextRange::new(first_token.start(), token.start()))
}
/// Returns the whitespace *after* the `token` with the byte length
pub(crate) fn trailing_whitespace(
&self,
token: &'a LogicalLineToken,
) -> (Whitespace, TextSize) {
Whitespace::leading(self.text_after(token))
}
/// Returns the whitespace and whitespace byte-length *before* the `token`
pub(crate) fn leading_whitespace(&self, token: &'a LogicalLineToken) -> (Whitespace, TextSize) {
Whitespace::trailing(self.text_before(token))
}
/// Returns all tokens of the line, including comments and trailing new lines.
pub(crate) fn tokens(&self) -> &'a [LogicalLineToken] {
&self.lines.tokens[self.line.tokens_start as usize..self.line.tokens_end as usize]
}
pub(crate) fn first_token(&self) -> Option<&'a LogicalLineToken> {
self.tokens().first()
}
/// Returns the line's flags
pub(crate) const fn flags(&self) -> TokenFlags {
self.line.flags
}
}
/// Helper struct to pretty print [`LogicalLine`] with `dbg`
struct DebugLogicalLine<'a>(LogicalLine<'a>);
impl Debug for DebugLogicalLine<'_> {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
f.debug_struct("LogicalLine")
.field("text", &self.0.text())
.field("flags", &self.0.flags())
.field("tokens", &self.0.tokens())
.finish()
}
}
/// Iterator over the logical lines of a document.
pub(crate) struct LogicalLinesIter<'a> {
lines: &'a LogicalLines<'a>,
inner: std::slice::Iter<'a, Line>,
}
impl<'a> Iterator for LogicalLinesIter<'a> {
type Item = LogicalLine<'a>;
fn next(&mut self) -> Option<Self::Item> {
let line = self.inner.next()?;
Some(LogicalLine {
lines: self.lines,
line,
})
}
fn size_hint(&self) -> (usize, Option<usize>) {
self.inner.size_hint()
}
}
impl DoubleEndedIterator for LogicalLinesIter<'_> {
fn next_back(&mut self) -> Option<Self::Item> {
let line = self.inner.next_back()?;
Some(LogicalLine {
lines: self.lines,
line,
})
}
}
impl ExactSizeIterator for LogicalLinesIter<'_> {}
impl FusedIterator for LogicalLinesIter<'_> {}
/// A token of a [`LogicalLine`]
#[derive(Clone, Debug)]
pub(crate) struct LogicalLineToken {
kind: TokenKind,
range: TextRange,
}
impl LogicalLineToken {
/// Returns the token's kind
#[inline]
pub(crate) const fn kind(&self) -> TokenKind {
self.kind
}
}
impl Ranged for LogicalLineToken {
/// Returns a tuple with the token's `(start, end)` locations
fn range(&self) -> TextRange {
self.range
}
}
#[derive(Debug, Copy, Clone, Eq, PartialEq)]
pub(crate) enum Whitespace {
None,
Single,
Many,
Tab,
}
impl Whitespace {
fn leading(content: &str) -> (Self, TextSize) {
let mut count = 0u32;
let mut len = TextSize::default();
let mut has_tabs = false;
for c in content.chars() {
if c == '#' {
// Ignore leading whitespace between a token and an end-of-line comment
return (Whitespace::None, TextSize::default());
} else if c == '\t' {
has_tabs = true;
len += c.text_len();
} else if matches!(c, '\n' | '\r') {
break;
} else if is_python_whitespace(c) {
count += 1;
len += c.text_len();
} else {
break;
}
}
if has_tabs {
(Whitespace::Tab, len)
} else {
match count {
0 => (Whitespace::None, len),
1 => (Whitespace::Single, len),
_ => (Whitespace::Many, len),
}
}
}
fn trailing(content: &str) -> (Self, TextSize) {
let mut len = TextSize::default();
let mut count = 0usize;
let mut has_tabs = false;
for c in content.chars().rev() {
if c == '\t' {
has_tabs = true;
len += c.text_len();
} else if matches!(c, '\n' | '\r') {
// Indent
return (Self::None, TextSize::default());
} else if is_python_whitespace(c) {
count += 1;
len += c.text_len();
} else {
break;
}
}
if len == content.text_len() {
// All whitespace up to the start of the line -> Indent
(Self::None, TextSize::default())
} else if has_tabs {
(Self::Tab, len)
} else {
match count {
0 => (Self::None, TextSize::default()),
1 => (Self::Single, len),
_ => (Self::Many, len),
}
}
}
}
#[derive(Debug, Default)]
struct CurrentLine {
flags: TokenFlags,
tokens_start: u32,
}
/// Builder for [`LogicalLines`]
#[derive(Debug, Default)]
struct LogicalLinesBuilder {
tokens: Vec<LogicalLineToken>,
lines: Vec<Line>,
current_line: CurrentLine,
}
impl LogicalLinesBuilder {
fn with_capacity(tokens: usize) -> Self {
Self {
tokens: Vec::with_capacity(tokens),
..Self::default()
}
}
// SAFETY: `LogicalLines::from_tokens` asserts that the file has less than `u32::MAX` tokens and each tokens is at least one character long
fn push_token(&mut self, kind: TokenKind, range: TextRange) {
let line = &mut self.current_line;
if matches!(kind, TokenKind::Comment) {
line.flags.insert(TokenFlags::COMMENT);
} else if kind.is_operator() {
line.flags.insert(TokenFlags::OPERATOR);
if matches!(
kind,
TokenKind::Lpar
| TokenKind::Lsqb
| TokenKind::Lbrace
| TokenKind::Rpar
| TokenKind::Rsqb
| TokenKind::Rbrace
) {
line.flags.insert(TokenFlags::BRACKET);
}
}
if matches!(kind, TokenKind::Comma | TokenKind::Semi | TokenKind::Colon) {
line.flags.insert(TokenFlags::PUNCTUATION);
} else if kind.is_keyword() {
line.flags.insert(TokenFlags::KEYWORD);
}
if !is_non_logical_token(kind) {
line.flags.insert(TokenFlags::NON_TRIVIA);
}
self.tokens.push(LogicalLineToken { kind, range });
}
// SAFETY: `LogicalLines::from_tokens` asserts that the file has less than `u32::MAX` tokens and each tokens is at least one character long
#[expect(clippy::cast_possible_truncation)]
fn finish_line(&mut self) {
let end = self.tokens.len() as u32;
if self.current_line.tokens_start < end {
let is_empty = self.tokens[self.current_line.tokens_start as usize..end as usize]
.iter()
.all(|token| token.kind.is_any_newline());
if !is_empty {
self.lines.push(Line {
flags: self.current_line.flags,
tokens_start: self.current_line.tokens_start,
tokens_end: end,
});
}
self.current_line = CurrentLine {
flags: TokenFlags::default(),
tokens_start: end,
}
}
}
fn finish<'a>(mut self, locator: &'a Locator<'a>) -> LogicalLines<'a> {
self.finish_line();
LogicalLines {
tokens: self.tokens,
lines: self.lines,
locator,
}
}
}
#[derive(Debug, Clone)]
struct Line {
flags: TokenFlags,
tokens_start: u32,
tokens_end: u32,
}
/// Keeps track of whether we are currently visiting a class or function definition in a
/// [`LogicalLine`]. If we are visiting a class or function, the enum also keeps track
/// of the [type parameters] of the class/function.
///
/// Call [`DefinitionState::visit_token_kind`] on the [`TokenKind`] of each
/// successive [`LogicalLineToken`] to ensure the state remains up to date.
///
/// [type parameters]: https://docs.python.org/3/reference/compound_stmts.html#type-params
#[derive(Debug, Clone, Copy)]
enum DefinitionState {
InClass(TypeParamsState),
InFunction(TypeParamsState),
InTypeAlias(TypeParamsState),
NotInDefinition,
}
impl DefinitionState {
fn from_tokens<'a>(tokens: impl IntoIterator<Item = &'a LogicalLineToken>) -> Self {
let mut token_kinds = tokens.into_iter().map(LogicalLineToken::kind);
while let Some(token_kind) = token_kinds.next() {
let state = match token_kind {
TokenKind::Indent | TokenKind::Dedent => continue,
TokenKind::Class => Self::InClass(TypeParamsState::default()),
TokenKind::Def => Self::InFunction(TypeParamsState::default()),
TokenKind::Async if matches!(token_kinds.next(), Some(TokenKind::Def)) => {
Self::InFunction(TypeParamsState::default())
}
TokenKind::Type => Self::InTypeAlias(TypeParamsState::default()),
_ => Self::NotInDefinition,
};
return state;
}
Self::NotInDefinition
}
const fn in_function_definition(self) -> bool {
matches!(self, Self::InFunction(_))
}
const fn type_params_state(self) -> Option<TypeParamsState> {
match self {
Self::InClass(state) | Self::InFunction(state) | Self::InTypeAlias(state) => {
Some(state)
}
Self::NotInDefinition => None,
}
}
fn in_type_params(self) -> bool {
matches!(
self.type_params_state(),
Some(TypeParamsState::InTypeParams { .. })
)
}
fn visit_token_kind(&mut self, token_kind: TokenKind) {
let type_params_state_mut = match self {
Self::InClass(type_params_state)
| Self::InFunction(type_params_state)
| Self::InTypeAlias(type_params_state) => type_params_state,
Self::NotInDefinition => return,
};
match token_kind {
TokenKind::Lpar | TokenKind::Equal if type_params_state_mut.before_type_params() => {
*type_params_state_mut = TypeParamsState::TypeParamsEnded;
}
TokenKind::Lsqb => match type_params_state_mut {
TypeParamsState::TypeParamsEnded => {}
TypeParamsState::BeforeTypeParams => {
*type_params_state_mut = TypeParamsState::InTypeParams {
inner_square_brackets: 0,
};
}
TypeParamsState::InTypeParams {
inner_square_brackets,
} => *inner_square_brackets += 1,
},
TokenKind::Rsqb => {
if let TypeParamsState::InTypeParams {
inner_square_brackets,
} = type_params_state_mut
{
if *inner_square_brackets == 0 {
*type_params_state_mut = TypeParamsState::TypeParamsEnded;
} else {
*inner_square_brackets -= 1;
}
}
}
_ => {}
}
}
}
#[derive(Debug, Clone, Copy, Default)]
enum TypeParamsState {
#[default]
BeforeTypeParams,
InTypeParams {
inner_square_brackets: u32,
},
TypeParamsEnded,
}
impl TypeParamsState {
const fn before_type_params(self) -> bool {
matches!(self, Self::BeforeTypeParams)
}
}
#[cfg(test)]
mod tests {
use ruff_python_parser::parse_module;
use crate::Locator;
use super::LogicalLines;
#[test]
fn multi_line() {
assert_logical_lines(
r"
x = 1
y = 2
z = x + 1"
.trim(),
&["x = 1", "y = 2", "z = x + 1"],
);
}
#[test]
fn indented() {
assert_logical_lines(
r"
x = [
1,
2,
3,
]
y = 2
z = x + 1"
.trim(),
&["x = [\n 1,\n 2,\n 3,\n]", "y = 2", "z = x + 1"],
);
}
#[test]
fn string_assignment() {
assert_logical_lines("x = 'abc'".trim(), &["x = 'abc'"]);
}
#[test]
fn function_definition() {
assert_logical_lines(
r"
def f():
x = 1
f()"
.trim(),
&["def f():", "x = 1", "f()"],
);
}
#[test]
fn trivia() {
assert_logical_lines(
r#"
def f():
"""Docstring goes here."""
# Comment goes here.
x = 1
f()"#
.trim(),
&[
"def f():",
"\"\"\"Docstring goes here.\"\"\"",
"",
"x = 1",
"f()",
],
);
}
#[test]
fn empty_line() {
assert_logical_lines(
r"
if False:
print()
"
.trim(),
&["if False:", "print()", ""],
);
}
fn assert_logical_lines(contents: &str, expected: &[&str]) {
let parsed = parse_module(contents).unwrap();
let locator = Locator::new(contents);
let actual: Vec<String> = LogicalLines::from_tokens(parsed.tokens(), &locator)
.into_iter()
.map(|line| line.text_trimmed())
.map(ToString::to_string)
.collect();
let expected: Vec<String> = expected.iter().map(ToString::to_string).collect();
assert_eq!(actual, expected);
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/pycodestyle/rules/logical_lines/missing_whitespace.rs | crates/ruff_linter/src/rules/pycodestyle/rules/logical_lines/missing_whitespace.rs | use ruff_macros::{ViolationMetadata, derive_message_formats};
use ruff_python_ast::token::TokenKind;
use ruff_text_size::Ranged;
use crate::Edit;
use crate::checkers::ast::LintContext;
use crate::{AlwaysFixableViolation, Fix};
use super::{DefinitionState, LogicalLine};
/// ## What it does
/// Checks for missing whitespace after `,`, `;`, and `:`.
///
/// ## Why is this bad?
/// Missing whitespace after `,`, `;`, and `:` makes the code harder to read.
///
/// ## Example
/// ```python
/// a = (1,2)
/// ```
///
/// Use instead:
/// ```python
/// a = (1, 2)
/// ```
#[derive(ViolationMetadata)]
#[violation_metadata(preview_since = "v0.0.269")]
pub(crate) struct MissingWhitespace {
token: TokenKind,
}
impl AlwaysFixableViolation for MissingWhitespace {
#[derive_message_formats]
fn message(&self) -> String {
format!("Missing whitespace after {}", self.token)
}
fn fix_title(&self) -> String {
"Add missing whitespace".to_string()
}
}
/// E231
pub(crate) fn missing_whitespace(line: &LogicalLine, context: &LintContext) {
let mut interpolated_strings = 0u32;
let mut definition_state = DefinitionState::from_tokens(line.tokens());
let mut brackets = Vec::new();
let mut iter = line.tokens().iter().peekable();
while let Some(token) = iter.next() {
let kind = token.kind();
definition_state.visit_token_kind(kind);
match kind {
TokenKind::FStringStart | TokenKind::TStringStart => interpolated_strings += 1,
TokenKind::FStringEnd | TokenKind::TStringEnd => {
interpolated_strings = interpolated_strings.saturating_sub(1);
}
TokenKind::Lsqb if interpolated_strings == 0 => {
brackets.push(kind);
}
TokenKind::Rsqb if interpolated_strings == 0 => {
brackets.pop();
}
TokenKind::Lbrace if interpolated_strings == 0 => {
brackets.push(kind);
}
TokenKind::Rbrace if interpolated_strings == 0 => {
brackets.pop();
}
TokenKind::Colon if interpolated_strings > 0 => {
// Colon in f-string, no space required. This will yield false
// negatives for cases like the following as it's hard to
// differentiate between the usage of a colon in a f-string.
//
// ```python
// f'{ {'x':1} }'
// f'{(lambda x:x)}'
// ```
continue;
}
TokenKind::Comma | TokenKind::Semi | TokenKind::Colon => {
let after = line.text_after(token);
if after
.chars()
.next()
.is_some_and(|c| !(char::is_whitespace(c) || c == '\\'))
{
if let Some(next_token) = iter.peek() {
match (kind, next_token.kind()) {
(TokenKind::Colon, _)
if matches!(brackets.last(), Some(TokenKind::Lsqb))
&& !(definition_state.in_type_params()
&& brackets.len() == 1) =>
{
continue; // Slice syntax, no space required
}
(TokenKind::Comma, TokenKind::Rpar | TokenKind::Rsqb) => {
continue; // Allow tuple with only one element: (3,)
}
(TokenKind::Colon, TokenKind::Equal) => {
continue; // Allow assignment expression
}
_ => {}
}
}
if let Some(mut diagnostic) = context.report_diagnostic_if_enabled(
MissingWhitespace { token: kind },
token.range(),
) {
diagnostic.set_fix(Fix::safe_edit(Edit::insertion(
" ".to_string(),
token.end(),
)));
}
}
}
_ => {}
}
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/pycodestyle/rules/logical_lines/whitespace_before_comment.rs | crates/ruff_linter/src/rules/pycodestyle/rules/logical_lines/whitespace_before_comment.rs | use ruff_macros::{ViolationMetadata, derive_message_formats};
use ruff_python_ast::token::TokenKind;
use ruff_python_trivia::PythonWhitespace;
use ruff_source_file::LineRanges;
use ruff_text_size::{Ranged, TextLen, TextRange, TextSize};
use crate::Locator;
use crate::checkers::ast::LintContext;
use crate::rules::pycodestyle::rules::logical_lines::LogicalLine;
use crate::{AlwaysFixableViolation, Edit, Fix};
/// ## What it does
/// Checks if inline comments are separated by at least two spaces.
///
/// ## Why is this bad?
/// An inline comment is a comment on the same line as a statement.
///
/// Per [PEP 8], inline comments should be separated by at least two spaces from
/// the preceding statement.
///
/// ## Example
/// ```python
/// x = x + 1 # Increment x
/// ```
///
/// Use instead:
/// ```python
/// x = x + 1 # Increment x
/// x = x + 1 # Increment x
/// ```
///
/// [PEP 8]: https://peps.python.org/pep-0008/#comments
#[derive(ViolationMetadata)]
#[violation_metadata(preview_since = "v0.0.269")]
pub(crate) struct TooFewSpacesBeforeInlineComment;
impl AlwaysFixableViolation for TooFewSpacesBeforeInlineComment {
#[derive_message_formats]
fn message(&self) -> String {
"Insert at least two spaces before an inline comment".to_string()
}
fn fix_title(&self) -> String {
"Insert spaces".to_string()
}
}
/// ## What it does
/// Checks if one space is used after inline comments.
///
/// ## Why is this bad?
/// An inline comment is a comment on the same line as a statement.
///
/// Per [PEP 8], inline comments should start with a # and a single space.
///
/// ## Example
/// ```python
/// x = x + 1 #Increment x
/// x = x + 1 # Increment x
/// x = x + 1 # \xa0Increment x
/// ```
///
/// Use instead:
/// ```python
/// x = x + 1 # Increment x
/// x = x + 1 # Increment x
/// ```
///
/// [PEP 8]: https://peps.python.org/pep-0008/#comments
#[derive(ViolationMetadata)]
#[violation_metadata(preview_since = "v0.0.269")]
pub(crate) struct NoSpaceAfterInlineComment;
impl AlwaysFixableViolation for NoSpaceAfterInlineComment {
#[derive_message_formats]
fn message(&self) -> String {
"Inline comment should start with `# `".to_string()
}
fn fix_title(&self) -> String {
"Format space".to_string()
}
}
/// ## What it does
/// Checks for block comments that lack a single space after the leading `#` character.
///
/// ## Why is this bad?
/// Per [PEP 8], "Block comments generally consist of one or more paragraphs built
/// out of complete sentences, with each sentence ending in a period."
///
/// Block comments should start with a `#` followed by a single space.
///
/// Shebangs (lines starting with `#!`, at the top of a file) are exempt from this
/// rule.
///
/// ## Example
/// ```python
/// #Block comment
/// ```
///
/// Use instead:
/// ```python
/// # Block comment
/// ```
///
/// [PEP 8]: https://peps.python.org/pep-0008/#comments
#[derive(ViolationMetadata)]
#[violation_metadata(preview_since = "v0.0.269")]
pub(crate) struct NoSpaceAfterBlockComment;
impl AlwaysFixableViolation for NoSpaceAfterBlockComment {
#[derive_message_formats]
fn message(&self) -> String {
"Block comment should start with `# `".to_string()
}
fn fix_title(&self) -> String {
"Format space".to_string()
}
}
/// ## What it does
/// Checks for block comments that start with multiple leading `#` characters.
///
/// ## Why is this bad?
/// Per [PEP 8], "Block comments generally consist of one or more paragraphs built
/// out of complete sentences, with each sentence ending in a period."
///
/// Each line of a block comment should start with a `#` followed by a single space.
///
/// Shebangs (lines starting with `#!`, at the top of a file) are exempt from this
/// rule.
///
/// ## Example
/// ```python
/// ### Block comment
/// ```
///
/// Use instead:
/// ```python
/// # Block comment
/// ```
///
/// Alternatively, this rule makes an exception for comments that consist
/// solely of `#` characters, as in:
///
/// ```python
/// ##############
/// # Block header
/// ##############
/// ```
///
/// [PEP 8]: https://peps.python.org/pep-0008/#comments
#[derive(ViolationMetadata)]
#[violation_metadata(preview_since = "v0.0.269")]
pub(crate) struct MultipleLeadingHashesForBlockComment;
impl AlwaysFixableViolation for MultipleLeadingHashesForBlockComment {
#[derive_message_formats]
fn message(&self) -> String {
"Too many leading `#` before block comment".to_string()
}
fn fix_title(&self) -> String {
"Remove leading `#`".to_string()
}
}
/// E261, E262, E265, E266
pub(crate) fn whitespace_before_comment(
line: &LogicalLine,
locator: &Locator,
context: &LintContext,
) {
let mut prev_end = TextSize::default();
for token in line.tokens() {
let kind = token.kind();
if let TokenKind::Comment = kind {
let range = token.range();
let line_text = locator.slice(TextRange::new(
locator.line_start(range.start()),
range.start(),
));
let token_text = locator.slice(range);
let is_inline_comment = !line_text.trim_whitespace().is_empty();
if is_inline_comment {
if range.start() - prev_end < " ".text_len() {
if let Some(mut diagnostic) = context.report_diagnostic_if_enabled(
TooFewSpacesBeforeInlineComment,
TextRange::new(prev_end, range.start()),
) {
diagnostic.set_fix(Fix::safe_edit(Edit::range_replacement(
" ".to_string(),
TextRange::new(prev_end, range.start()),
)));
}
}
}
// Split into the portion before and after the first space.
let mut parts = token_text.splitn(2, ' ');
let symbol = parts.next().unwrap_or("");
let comment = parts.next().unwrap_or("");
let bad_prefix = if symbol != "#" && symbol != "#:" {
Some(symbol.trim_start_matches('#').chars().next().unwrap_or('#'))
} else {
None
};
if is_inline_comment {
if bad_prefix.is_some() || comment.chars().next().is_some_and(char::is_whitespace) {
if let Some(mut diagnostic) =
context.report_diagnostic_if_enabled(NoSpaceAfterInlineComment, range)
{
diagnostic.set_fix(Fix::safe_edit(Edit::range_replacement(
format_leading_space(token_text),
range,
)));
}
}
} else if let Some(bad_prefix) = bad_prefix {
if bad_prefix != '!' || !line.is_start_of_file() {
if bad_prefix != '#' {
if let Some(mut diagnostic) =
context.report_diagnostic_if_enabled(NoSpaceAfterBlockComment, range)
{
diagnostic.set_fix(Fix::safe_edit(Edit::range_replacement(
format_leading_space(token_text),
range,
)));
}
} else if !comment.is_empty() {
if let Some(mut diagnostic) = context.report_diagnostic_if_enabled(
MultipleLeadingHashesForBlockComment,
range,
) {
diagnostic.set_fix(Fix::safe_edit(Edit::range_replacement(
format_leading_hashes(token_text),
range,
)));
}
}
}
}
} else if !matches!(kind, TokenKind::NonLogicalNewline) {
prev_end = token.end();
}
}
}
/// Format a comment to have a single space after the `#`.
fn format_leading_space(comment: &str) -> String {
if let Some(rest) = comment.strip_prefix("#:") {
format!("#: {}", rest.trim_start())
} else {
format!("# {}", comment.trim_start_matches('#').trim_start())
}
}
/// Format a comment to strip multiple leading `#` characters.
fn format_leading_hashes(comment: &str) -> String {
format!("# {}", comment.trim_start_matches('#').trim_start())
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/pycodestyle/rules/logical_lines/space_around_operator.rs | crates/ruff_linter/src/rules/pycodestyle/rules/logical_lines/space_around_operator.rs | use ruff_macros::{ViolationMetadata, derive_message_formats};
use ruff_python_ast::token::TokenKind;
use ruff_text_size::{Ranged, TextRange};
use crate::checkers::ast::LintContext;
use crate::{AlwaysFixableViolation, Edit, Fix};
use super::{LogicalLine, Whitespace};
/// ## What it does
/// Checks for extraneous tabs before an operator.
///
/// ## Why is this bad?
/// According to [PEP 8], operators should be surrounded by at most a single space on either
/// side.
///
/// ## Example
/// ```python
/// a = 4\t+ 5
/// ```
///
/// Use instead:
/// ```python
/// a = 4 + 5
/// ```
///
/// [PEP 8]: https://peps.python.org/pep-0008/#whitespace-in-expressions-and-statements
#[derive(ViolationMetadata)]
#[violation_metadata(preview_since = "v0.0.269")]
pub(crate) struct TabBeforeOperator;
impl AlwaysFixableViolation for TabBeforeOperator {
#[derive_message_formats]
fn message(&self) -> String {
"Tab before operator".to_string()
}
fn fix_title(&self) -> String {
"Replace with single space".to_string()
}
}
/// ## What it does
/// Checks for extraneous whitespace before an operator.
///
/// ## Why is this bad?
/// According to [PEP 8], operators should be surrounded by at most a single space on either
/// side.
///
/// ## Example
/// ```python
/// a = 4 + 5
/// ```
///
/// Use instead:
/// ```python
/// a = 4 + 5
/// ```
///
/// [PEP 8]: https://peps.python.org/pep-0008/#whitespace-in-expressions-and-statements
#[derive(ViolationMetadata)]
#[violation_metadata(preview_since = "v0.0.269")]
pub(crate) struct MultipleSpacesBeforeOperator;
impl AlwaysFixableViolation for MultipleSpacesBeforeOperator {
#[derive_message_formats]
fn message(&self) -> String {
"Multiple spaces before operator".to_string()
}
fn fix_title(&self) -> String {
"Replace with single space".to_string()
}
}
/// ## What it does
/// Checks for extraneous tabs after an operator.
///
/// ## Why is this bad?
/// According to [PEP 8], operators should be surrounded by at most a single space on either
/// side.
///
/// ## Example
/// ```python
/// a = 4 +\t5
/// ```
///
/// Use instead:
/// ```python
/// a = 4 + 5
/// ```
///
/// [PEP 8]: https://peps.python.org/pep-0008/#whitespace-in-expressions-and-statements
#[derive(ViolationMetadata)]
#[violation_metadata(preview_since = "v0.0.269")]
pub(crate) struct TabAfterOperator;
impl AlwaysFixableViolation for TabAfterOperator {
#[derive_message_formats]
fn message(&self) -> String {
"Tab after operator".to_string()
}
fn fix_title(&self) -> String {
"Replace with single space".to_string()
}
}
/// ## What it does
/// Checks for extraneous whitespace after an operator.
///
/// ## Why is this bad?
/// According to [PEP 8], operators should be surrounded by at most a single space on either
/// side.
///
/// ## Example
/// ```python
/// a = 4 + 5
/// ```
///
/// Use instead:
/// ```python
/// a = 4 + 5
/// ```
///
/// [PEP 8]: https://peps.python.org/pep-0008/#whitespace-in-expressions-and-statements
#[derive(ViolationMetadata)]
#[violation_metadata(preview_since = "v0.0.269")]
pub(crate) struct MultipleSpacesAfterOperator;
impl AlwaysFixableViolation for MultipleSpacesAfterOperator {
#[derive_message_formats]
fn message(&self) -> String {
"Multiple spaces after operator".to_string()
}
fn fix_title(&self) -> String {
"Replace with single space".to_string()
}
}
/// ## What it does
/// Checks for extraneous tabs after a comma.
///
/// ## Why is this bad?
/// Commas should be followed by one space, never tabs.
///
/// ## Example
/// ```python
/// a = 4,\t5
/// ```
///
/// Use instead:
/// ```python
/// a = 4, 5
/// ```
///
#[derive(ViolationMetadata)]
#[violation_metadata(preview_since = "v0.0.281")]
pub(crate) struct TabAfterComma;
impl AlwaysFixableViolation for TabAfterComma {
#[derive_message_formats]
fn message(&self) -> String {
"Tab after comma".to_string()
}
fn fix_title(&self) -> String {
"Replace with single space".to_string()
}
}
/// ## What it does
/// Checks for extraneous whitespace after a comma.
///
/// ## Why is this bad?
/// Consistency is good. This rule helps ensure you have a consistent
/// formatting style across your project.
///
/// ## Example
/// ```python
/// a = 4, 5
/// ```
///
/// Use instead:
/// ```python
/// a = 4, 5
/// ```
#[derive(ViolationMetadata)]
#[violation_metadata(preview_since = "v0.0.281")]
pub(crate) struct MultipleSpacesAfterComma;
impl AlwaysFixableViolation for MultipleSpacesAfterComma {
#[derive_message_formats]
fn message(&self) -> String {
"Multiple spaces after comma".to_string()
}
fn fix_title(&self) -> String {
"Replace with single space".to_string()
}
}
/// E221, E222, E223, E224
pub(crate) fn space_around_operator(line: &LogicalLine, context: &LintContext) {
let mut after_operator = false;
for token in line.tokens() {
let is_operator = is_operator_token(token.kind());
if is_operator {
if !after_operator {
match line.leading_whitespace(token) {
(Whitespace::Tab, offset) => {
if let Some(mut diagnostic) = context.report_diagnostic_if_enabled(
TabBeforeOperator,
TextRange::at(token.start() - offset, offset),
) {
diagnostic.set_fix(Fix::safe_edit(Edit::range_replacement(
" ".to_string(),
TextRange::at(token.start() - offset, offset),
)));
}
}
(Whitespace::Many, offset) => {
if let Some(mut diagnostic) = context.report_diagnostic_if_enabled(
MultipleSpacesBeforeOperator,
TextRange::at(token.start() - offset, offset),
) {
diagnostic.set_fix(Fix::safe_edit(Edit::range_replacement(
" ".to_string(),
TextRange::at(token.start() - offset, offset),
)));
}
}
_ => {}
}
}
match line.trailing_whitespace(token) {
(Whitespace::Tab, len) => {
if let Some(mut diagnostic) = context.report_diagnostic_if_enabled(
TabAfterOperator,
TextRange::at(token.end(), len),
) {
diagnostic.set_fix(Fix::safe_edit(Edit::range_replacement(
" ".to_string(),
TextRange::at(token.end(), len),
)));
}
}
(Whitespace::Many, len) => {
if let Some(mut diagnostic) = context.report_diagnostic_if_enabled(
MultipleSpacesAfterOperator,
TextRange::at(token.end(), len),
) {
diagnostic.set_fix(Fix::safe_edit(Edit::range_replacement(
" ".to_string(),
TextRange::at(token.end(), len),
)));
}
}
_ => {}
}
}
after_operator = is_operator;
}
}
/// E241, E242
pub(crate) fn space_after_comma(line: &LogicalLine, context: &LintContext) {
for token in line.tokens() {
if matches!(token.kind(), TokenKind::Comma) {
match line.trailing_whitespace(token) {
(Whitespace::Tab, len) => {
if let Some(mut diagnostic) = context.report_diagnostic_if_enabled(
TabAfterComma,
TextRange::at(token.end(), len),
) {
diagnostic.set_fix(Fix::safe_edit(Edit::range_replacement(
" ".to_string(),
TextRange::at(token.end(), len),
)));
}
}
(Whitespace::Many, len) => {
if let Some(mut diagnostic) = context.report_diagnostic_if_enabled(
MultipleSpacesAfterComma,
TextRange::at(token.end(), len),
) {
diagnostic.set_fix(Fix::safe_edit(Edit::range_replacement(
" ".to_string(),
TextRange::at(token.end(), len),
)));
}
}
_ => {}
}
}
}
}
const fn is_operator_token(token: TokenKind) -> bool {
matches!(
token,
TokenKind::Plus
| TokenKind::Minus
| TokenKind::Star
| TokenKind::Slash
| TokenKind::Vbar
| TokenKind::Amper
| TokenKind::Less
| TokenKind::Greater
| TokenKind::Equal
| TokenKind::Percent
| TokenKind::NotEqual
| TokenKind::EqEqual
| TokenKind::LessEqual
| TokenKind::GreaterEqual
| TokenKind::CircumFlex
| TokenKind::LeftShift
| TokenKind::RightShift
| TokenKind::DoubleStar
| TokenKind::PlusEqual
| TokenKind::MinusEqual
| TokenKind::StarEqual
| TokenKind::SlashEqual
| TokenKind::PercentEqual
| TokenKind::AmperEqual
| TokenKind::VbarEqual
| TokenKind::CircumflexEqual
| TokenKind::LeftShiftEqual
| TokenKind::RightShiftEqual
| TokenKind::DoubleStarEqual
| TokenKind::DoubleSlash
| TokenKind::DoubleSlashEqual
| TokenKind::ColonEqual
)
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/pycodestyle/rules/logical_lines/whitespace_around_named_parameter_equals.rs | crates/ruff_linter/src/rules/pycodestyle/rules/logical_lines/whitespace_around_named_parameter_equals.rs | use ruff_macros::{ViolationMetadata, derive_message_formats};
use ruff_python_ast::token::TokenKind;
use ruff_text_size::{Ranged, TextRange, TextSize};
use crate::checkers::ast::LintContext;
use crate::rules::pycodestyle::rules::logical_lines::{DefinitionState, LogicalLine};
use crate::{AlwaysFixableViolation, Edit, Fix};
/// ## What it does
/// Checks for missing whitespace around the equals sign in an unannotated
/// function keyword parameter.
///
/// ## Why is this bad?
/// According to [PEP 8], there should be no spaces around the equals sign in a
/// keyword parameter, if it is unannotated:
///
/// > Don’t use spaces around the = sign when used to indicate a keyword
/// > argument, or when used to indicate a default value for an unannotated
/// > function parameter.
///
/// ## Example
/// ```python
/// def add(a = 0) -> int:
/// return a + 1
/// ```
///
/// Use instead:
/// ```python
/// def add(a=0) -> int:
/// return a + 1
/// ```
///
/// [PEP 8]: https://peps.python.org/pep-0008/#whitespace-in-expressions-and-statements
#[derive(ViolationMetadata)]
#[violation_metadata(preview_since = "v0.0.269")]
pub(crate) struct UnexpectedSpacesAroundKeywordParameterEquals;
impl AlwaysFixableViolation for UnexpectedSpacesAroundKeywordParameterEquals {
#[derive_message_formats]
fn message(&self) -> String {
"Unexpected spaces around keyword / parameter equals".to_string()
}
fn fix_title(&self) -> String {
"Remove whitespace".to_string()
}
}
/// ## What it does
/// Checks for missing whitespace around the equals sign in an annotated
/// function keyword parameter.
///
/// ## Why is this bad?
/// According to [PEP 8], the spaces around the equals sign in a keyword
/// parameter should only be omitted when the parameter is unannotated:
///
/// > Don’t use spaces around the = sign when used to indicate a keyword
/// > argument, or when used to indicate a default value for an unannotated
/// > function parameter.
///
/// ## Example
/// ```python
/// def add(a: int=0) -> int:
/// return a + 1
/// ```
///
/// Use instead:
/// ```python
/// def add(a: int = 0) -> int:
/// return a + 1
/// ```
///
/// [PEP 8]: https://peps.python.org/pep-0008/#whitespace-in-expressions-and-statements
#[derive(ViolationMetadata)]
#[violation_metadata(preview_since = "v0.0.269")]
pub(crate) struct MissingWhitespaceAroundParameterEquals;
impl AlwaysFixableViolation for MissingWhitespaceAroundParameterEquals {
#[derive_message_formats]
fn message(&self) -> String {
"Missing whitespace around parameter equals".to_string()
}
fn fix_title(&self) -> String {
"Add missing whitespace".to_string()
}
}
/// E251, E252
pub(crate) fn whitespace_around_named_parameter_equals(line: &LogicalLine, context: &LintContext) {
let mut parens = 0u32;
let mut fstrings = 0u32;
let mut annotated_func_arg = false;
let mut prev_end = TextSize::default();
let mut definition_state = DefinitionState::from_tokens(line.tokens());
let mut iter = line.tokens().iter().peekable();
while let Some(token) = iter.next() {
let kind = token.kind();
definition_state.visit_token_kind(kind);
match kind {
TokenKind::NonLogicalNewline => continue,
TokenKind::FStringStart => fstrings += 1,
TokenKind::FStringEnd => fstrings = fstrings.saturating_sub(1),
TokenKind::Lpar | TokenKind::Lsqb => {
parens = parens.saturating_add(1);
}
TokenKind::Rpar | TokenKind::Rsqb => {
parens = parens.saturating_sub(1);
if parens == 0 {
annotated_func_arg = false;
}
}
TokenKind::Colon if parens == 1 && definition_state.in_function_definition() => {
annotated_func_arg = true;
}
TokenKind::Comma if parens == 1 => {
annotated_func_arg = false;
}
TokenKind::Equal
if definition_state.in_type_params() || (parens > 0 && fstrings == 0) =>
{
if definition_state.in_type_params() || (annotated_func_arg && parens == 1) {
let start = token.start();
if start == prev_end && prev_end != TextSize::new(0) {
if let Some(mut diagnostic) = context.report_diagnostic_if_enabled(
MissingWhitespaceAroundParameterEquals,
token.range,
) {
diagnostic.set_fix(Fix::safe_edit(Edit::insertion(
" ".to_string(),
token.start(),
)));
}
}
while let Some(next) = iter.peek() {
if next.kind() == TokenKind::NonLogicalNewline {
iter.next();
} else {
let next_start = next.start();
if next_start == token.end() {
if let Some(mut diagnostic) = context.report_diagnostic_if_enabled(
MissingWhitespaceAroundParameterEquals,
token.range,
) {
diagnostic.set_fix(Fix::safe_edit(Edit::insertion(
" ".to_string(),
token.end(),
)));
}
}
break;
}
}
} else {
// If there's space between the preceding token and the equals sign, report it.
if token.start() != prev_end {
if let Some(mut diagnostic) = context.report_diagnostic_if_enabled(
UnexpectedSpacesAroundKeywordParameterEquals,
TextRange::new(prev_end, token.start()),
) {
diagnostic
.set_fix(Fix::safe_edit(Edit::deletion(prev_end, token.start())));
}
}
// If there's space between the equals sign and the following token, report it.
while let Some(next) = iter.peek() {
if next.kind() == TokenKind::NonLogicalNewline {
iter.next();
} else {
if next.start() != token.end() {
if let Some(mut diagnostic) = context.report_diagnostic_if_enabled(
UnexpectedSpacesAroundKeywordParameterEquals,
TextRange::new(token.end(), next.start()),
) {
diagnostic.set_fix(Fix::safe_edit(Edit::deletion(
token.end(),
next.start(),
)));
}
}
break;
}
}
}
}
_ => {}
}
prev_end = token.end();
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/pycodestyle/rules/logical_lines/redundant_backslash.rs | crates/ruff_linter/src/rules/pycodestyle/rules/logical_lines/redundant_backslash.rs | use ruff_macros::{ViolationMetadata, derive_message_formats};
use ruff_python_ast::token::TokenKind;
use ruff_python_index::Indexer;
use ruff_source_file::LineRanges;
use ruff_text_size::{Ranged, TextRange, TextSize};
use crate::Locator;
use crate::checkers::ast::LintContext;
use crate::{AlwaysFixableViolation, Edit, Fix};
use super::LogicalLine;
/// ## What it does
/// Checks for redundant backslashes between brackets.
///
/// ## Why is this bad?
/// Explicit line joins using a backslash are redundant between brackets.
///
/// ## Example
/// ```python
/// x = (2 + \
/// 2)
/// ```
///
/// Use instead:
/// ```python
/// x = (2 +
/// 2)
/// ```
///
/// [PEP 8]: https://peps.python.org/pep-0008/#maximum-line-length
#[derive(ViolationMetadata)]
#[violation_metadata(preview_since = "v0.3.3")]
pub(crate) struct RedundantBackslash;
impl AlwaysFixableViolation for RedundantBackslash {
#[derive_message_formats]
fn message(&self) -> String {
"Redundant backslash".to_string()
}
fn fix_title(&self) -> String {
"Remove redundant backslash".to_string()
}
}
/// E502
pub(crate) fn redundant_backslash(
line: &LogicalLine,
locator: &Locator,
indexer: &Indexer,
context: &LintContext,
) {
let mut parens = 0;
let continuation_lines = indexer.continuation_line_starts();
let mut start_index = 0;
for token in line.tokens() {
match token.kind() {
TokenKind::Lpar | TokenKind::Lsqb | TokenKind::Lbrace => {
if parens == 0 {
let start = locator.line_start(token.start());
start_index = continuation_lines
.binary_search(&start)
.unwrap_or_else(|err_index| err_index);
}
parens += 1;
}
TokenKind::Rpar | TokenKind::Rsqb | TokenKind::Rbrace => {
parens -= 1;
if parens == 0 {
let end = locator.line_start(token.start());
let end_index = continuation_lines
.binary_search(&end)
.unwrap_or_else(|err_index| err_index);
for continuation_line in &continuation_lines[start_index..end_index] {
let backslash_end = locator.line_end(*continuation_line);
let backslash_start = backslash_end - TextSize::new(1);
if let Some(mut diagnostic) = context.report_diagnostic_if_enabled(
RedundantBackslash,
TextRange::new(backslash_start, backslash_end),
) {
diagnostic.set_fix(Fix::safe_edit(Edit::deletion(
backslash_start,
backslash_end,
)));
}
}
}
}
_ => continue,
}
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/pycodestyle/rules/logical_lines/missing_whitespace_around_operator.rs | crates/ruff_linter/src/rules/pycodestyle/rules/logical_lines/missing_whitespace_around_operator.rs | use ruff_macros::{ViolationMetadata, derive_message_formats};
use ruff_python_ast::token::TokenKind;
use ruff_text_size::{Ranged, TextRange};
use crate::checkers::ast::LintContext;
use crate::rules::pycodestyle::helpers::is_non_logical_token;
use crate::rules::pycodestyle::rules::logical_lines::{DefinitionState, LogicalLine};
use crate::{AlwaysFixableViolation, Edit, Fix};
/// ## What it does
/// Checks for missing whitespace around all operators.
///
/// ## Why is this bad?
/// According to [PEP 8], there should be one space before and after all
/// assignment (`=`), augmented assignment (`+=`, `-=`, etc.), comparison,
/// and Booleans operators.
///
/// ## Example
/// ```python
/// if number==42:
/// print('you have found the meaning of life')
/// ```
///
/// Use instead:
/// ```python
/// if number == 42:
/// print('you have found the meaning of life')
/// ```
///
/// [PEP 8]: https://peps.python.org/pep-0008/#pet-peeves
// E225
#[derive(ViolationMetadata)]
#[violation_metadata(preview_since = "v0.0.269")]
pub(crate) struct MissingWhitespaceAroundOperator;
impl AlwaysFixableViolation for MissingWhitespaceAroundOperator {
#[derive_message_formats]
fn message(&self) -> String {
"Missing whitespace around operator".to_string()
}
fn fix_title(&self) -> String {
"Add missing whitespace".to_string()
}
}
/// ## What it does
/// Checks for missing whitespace arithmetic operators.
///
/// ## Why is this bad?
/// [PEP 8] recommends never using more than one space, and always having the
/// same amount of whitespace on both sides of a binary operator.
///
/// For consistency, this rule enforces one space before and after an
/// arithmetic operator (`+`, `-`, `/`, and `*`).
///
/// (Note that [PEP 8] suggests only adding whitespace around the operator with
/// the lowest precedence, but that authors should "use [their] own judgment".)
///
/// ## Example
/// ```python
/// number = 40+2
/// ```
///
/// Use instead:
/// ```python
/// number = 40 + 2
/// ```
///
/// [PEP 8]: https://peps.python.org/pep-0008/#other-recommendations
// E226
#[derive(ViolationMetadata)]
#[violation_metadata(preview_since = "v0.0.269")]
pub(crate) struct MissingWhitespaceAroundArithmeticOperator;
impl AlwaysFixableViolation for MissingWhitespaceAroundArithmeticOperator {
#[derive_message_formats]
fn message(&self) -> String {
"Missing whitespace around arithmetic operator".to_string()
}
fn fix_title(&self) -> String {
"Add missing whitespace".to_string()
}
}
/// ## What it does
/// Checks for missing whitespace around bitwise and shift operators.
///
/// ## Why is this bad?
/// [PEP 8] recommends never using more than one space, and always having the
/// same amount of whitespace on both sides of a binary operator.
///
/// For consistency, this rule enforces one space before and after bitwise and
/// shift operators (`<<`, `>>`, `&`, `|`, `^`).
///
/// (Note that [PEP 8] suggests only adding whitespace around the operator with
/// the lowest precedence, but that authors should "use [their] own judgment".)
///
/// ## Example
/// ```python
/// x = 128<<1
/// ```
///
/// Use instead:
/// ```python
/// x = 128 << 1
/// ```
///
/// [PEP 8]: https://peps.python.org/pep-0008/#other-recommendations
// E227
#[derive(ViolationMetadata)]
#[violation_metadata(preview_since = "v0.0.269")]
pub(crate) struct MissingWhitespaceAroundBitwiseOrShiftOperator;
impl AlwaysFixableViolation for MissingWhitespaceAroundBitwiseOrShiftOperator {
#[derive_message_formats]
fn message(&self) -> String {
"Missing whitespace around bitwise or shift operator".to_string()
}
fn fix_title(&self) -> String {
"Add missing whitespace".to_string()
}
}
/// ## What it does
/// Checks for missing whitespace around the modulo operator.
///
/// ## Why is this bad?
/// [PEP 8] recommends never using more than one space, and always having the
/// same amount of whitespace on both sides of a binary operator.
///
/// For consistency, this rule enforces one space before and after a modulo
/// operator (`%`).
///
/// (Note that [PEP 8] suggests only adding whitespace around the operator with
/// the lowest precedence, but that authors should "use [their] own judgment".)
///
/// ## Example
/// ```python
/// remainder = 10%2
/// ```
///
/// Use instead:
/// ```python
/// remainder = 10 % 2
/// ```
///
/// [PEP 8]: https://peps.python.org/pep-0008/#other-recommendations
// E228
#[derive(ViolationMetadata)]
#[violation_metadata(preview_since = "v0.0.269")]
pub(crate) struct MissingWhitespaceAroundModuloOperator;
impl AlwaysFixableViolation for MissingWhitespaceAroundModuloOperator {
#[derive_message_formats]
fn message(&self) -> String {
"Missing whitespace around modulo operator".to_string()
}
fn fix_title(&self) -> String {
"Add missing whitespace".to_string()
}
}
/// E225, E226, E227, E228
pub(crate) fn missing_whitespace_around_operator(line: &LogicalLine, context: &LintContext) {
let mut definition_state = DefinitionState::from_tokens(line.tokens());
let mut tokens = line.tokens().iter().peekable();
let first_token = tokens
.by_ref()
.find(|token| !is_non_logical_token(token.kind()));
let Some(mut prev_token) = first_token else {
return;
};
let mut parens = u32::from(matches!(
prev_token.kind(),
TokenKind::Lpar | TokenKind::Lambda
));
let mut fstrings = u32::from(matches!(prev_token.kind(), TokenKind::FStringStart));
while let Some(token) = tokens.next() {
let kind = token.kind();
definition_state.visit_token_kind(kind);
if is_non_logical_token(kind) {
continue;
}
match kind {
TokenKind::FStringStart => fstrings += 1,
TokenKind::FStringEnd => fstrings = fstrings.saturating_sub(1),
TokenKind::Lpar | TokenKind::Lambda => parens += 1,
TokenKind::Rpar => parens = parens.saturating_sub(1),
_ => {}
}
let needs_space = if kind == TokenKind::Equal
&& (parens > 0 || fstrings > 0 || definition_state.in_type_params())
{
// Allow keyword args, defaults: foo(bar=None) and f-strings: f'{foo=}'
// Also ignore `foo[T=int]`, which is handled by E251.
NeedsSpace::No
} else if kind == TokenKind::Slash {
// Tolerate the "/" operator in function definition
// For more info see PEP570
// `def f(a, /, b):` or `def f(a, b, /):` or `f = lambda a, /:`
// ^ ^ ^
let slash_in_func = matches!(
tokens.peek().map(|t| t.kind()),
Some(TokenKind::Comma | TokenKind::Rpar | TokenKind::Colon)
);
NeedsSpace::from(!slash_in_func)
} else if kind.is_unary_arithmetic_operator()
|| matches!(kind, TokenKind::Star | TokenKind::DoubleStar)
{
let is_binary = {
let prev_kind = prev_token.kind();
// Check if the operator is used as a binary operator.
// Allow unary operators: -123, -x, +1.
// Allow argument unpacking: foo(*args, **kwargs)
matches!(
prev_kind,
TokenKind::Rpar | TokenKind::Rsqb | TokenKind::Rbrace
) || !(prev_kind.is_operator() || prev_kind.is_keyword())
};
if is_binary {
if kind == TokenKind::DoubleStar {
// Enforce consistent spacing, but don't enforce whitespaces.
NeedsSpace::Optional
} else {
NeedsSpace::Yes
}
} else {
NeedsSpace::No
}
} else if tokens.peek().is_some_and(|token| {
matches!(
token.kind(),
TokenKind::Rpar | TokenKind::Rsqb | TokenKind::Rbrace
)
}) {
// There should not be a closing bracket directly after a token, as it is a syntax
// error. For example:
// ```
// 1+)
// ```
//
// However, allow it in order to prevent entering an infinite loop in which E225 adds a
// space only for E202 to remove it.
NeedsSpace::No
} else if is_whitespace_needed(kind) {
NeedsSpace::Yes
} else {
NeedsSpace::No
};
if needs_space != NeedsSpace::No {
let has_leading_trivia =
prev_token.end() < token.start() || is_non_logical_token(prev_token.kind());
let has_trailing_trivia = tokens
.peek()
.is_none_or(|next| token.end() < next.start() || is_non_logical_token(next.kind()));
match (has_leading_trivia, has_trailing_trivia) {
// Operator with trailing but no leading space, enforce consistent spacing.
(false, true) => {
if let Some(mut diagnostic) =
diagnostic_kind_for_operator(kind, token.range(), context)
{
diagnostic.set_fix(Fix::safe_edit(Edit::insertion(
" ".to_string(),
token.start(),
)));
}
}
// Operator with leading but no trailing space, enforce consistent spacing.
(true, false) => {
if let Some(mut diagnostic) =
diagnostic_kind_for_operator(kind, token.range(), context)
{
diagnostic.set_fix(Fix::safe_edit(Edit::insertion(
" ".to_string(),
token.end(),
)));
}
}
// Operator with no space, require spaces if it is required by the operator.
(false, false) => {
if needs_space == NeedsSpace::Yes {
if let Some(mut diagnostic) =
diagnostic_kind_for_operator(kind, token.range(), context)
{
diagnostic.set_fix(Fix::safe_edits(
Edit::insertion(" ".to_string(), token.start()),
[Edit::insertion(" ".to_string(), token.end())],
));
}
}
}
(true, true) => {
// Operator has leading and trailing spaces, all good.
}
}
}
prev_token = token;
}
}
#[derive(Copy, Clone, Eq, PartialEq, Debug)]
enum NeedsSpace {
/// Needs a leading and trailing space.
Yes,
/// Doesn't need a leading or trailing space. Or in other words, we don't care how many
/// leading or trailing spaces that token has.
No,
/// Needs consistent leading and trailing spacing. The operator needs spacing if
/// * it has a leading space
/// * it has a trailing space
Optional,
}
impl From<bool> for NeedsSpace {
fn from(value: bool) -> Self {
if value {
NeedsSpace::Yes
} else {
NeedsSpace::No
}
}
}
fn diagnostic_kind_for_operator<'a>(
operator: TokenKind,
range: TextRange,
context: &'a LintContext<'a>,
) -> Option<crate::checkers::ast::DiagnosticGuard<'a, 'a>> {
if operator == TokenKind::Percent {
context.report_diagnostic_if_enabled(MissingWhitespaceAroundModuloOperator, range)
} else if operator.is_bitwise_or_shift() {
context.report_diagnostic_if_enabled(MissingWhitespaceAroundBitwiseOrShiftOperator, range)
} else if operator.is_arithmetic() {
context.report_diagnostic_if_enabled(MissingWhitespaceAroundArithmeticOperator, range)
} else {
context.report_diagnostic_if_enabled(MissingWhitespaceAroundOperator, range)
}
}
fn is_whitespace_needed(kind: TokenKind) -> bool {
matches!(
kind,
TokenKind::DoubleStarEqual
| TokenKind::StarEqual
| TokenKind::SlashEqual
| TokenKind::DoubleSlashEqual
| TokenKind::PlusEqual
| TokenKind::MinusEqual
| TokenKind::NotEqual
| TokenKind::Less
| TokenKind::Greater
| TokenKind::PercentEqual
| TokenKind::CircumflexEqual
| TokenKind::AmperEqual
| TokenKind::VbarEqual
| TokenKind::EqEqual
| TokenKind::LessEqual
| TokenKind::GreaterEqual
| TokenKind::LeftShiftEqual
| TokenKind::RightShiftEqual
| TokenKind::Equal
| TokenKind::And
| TokenKind::Or
| TokenKind::In
| TokenKind::Is
| TokenKind::Rarrow
| TokenKind::ColonEqual
| TokenKind::Slash
| TokenKind::Percent
) || kind.is_arithmetic()
|| (kind.is_bitwise_or_shift() &&
// As a special-case, pycodestyle seems to ignore whitespace around the tilde.
!matches!(kind, TokenKind::Tilde))
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/pycodestyle/rules/logical_lines/indentation.rs | crates/ruff_linter/src/rules/pycodestyle/rules/logical_lines/indentation.rs | use ruff_macros::{ViolationMetadata, derive_message_formats};
use ruff_python_ast::token::TokenKind;
use ruff_text_size::TextRange;
use crate::Violation;
use crate::checkers::ast::LintContext;
use super::LogicalLine;
/// ## What it does
/// Checks for indentation with a non-multiple of 4 spaces.
///
/// ## Why is this bad?
/// According to [PEP 8], 4 spaces per indentation level should be preferred.
///
/// ## Example
/// ```python
/// if True:
/// a = 1
/// ```
///
/// Use instead:
/// ```python
/// if True:
/// a = 1
/// ```
///
/// ## Formatter compatibility
/// We recommend against using this rule alongside the [formatter]. The
/// formatter enforces consistent indentation, making the rule redundant.
///
/// The rule is also incompatible with the [formatter] when using
/// `indent-width` with a value other than `4`.
///
/// ## Options
/// - `indent-width`
///
/// [PEP 8]: https://peps.python.org/pep-0008/#indentation
/// [formatter]:https://docs.astral.sh/ruff/formatter/
#[derive(ViolationMetadata)]
#[violation_metadata(preview_since = "v0.0.269")]
pub(crate) struct IndentationWithInvalidMultiple {
indent_width: usize,
}
impl Violation for IndentationWithInvalidMultiple {
#[derive_message_formats]
fn message(&self) -> String {
let Self { indent_width } = self;
format!("Indentation is not a multiple of {indent_width}")
}
}
/// ## What it does
/// Checks for indentation of comments with a non-multiple of 4 spaces.
///
/// ## Why is this bad?
/// According to [PEP 8], 4 spaces per indentation level should be preferred.
///
/// ## Example
/// ```python
/// if True:
/// # a = 1
/// ...
/// ```
///
/// Use instead:
/// ```python
/// if True:
/// # a = 1
/// ...
/// ```
///
/// ## Formatter compatibility
/// We recommend against using this rule alongside the [formatter]. The
/// formatter enforces consistent indentation, making the rule redundant.
///
/// The rule is also incompatible with the [formatter] when using
/// `indent-width` with a value other than `4`.
///
/// ## Options
/// - `indent-width`
///
/// [PEP 8]: https://peps.python.org/pep-0008/#indentation
/// [formatter]:https://docs.astral.sh/ruff/formatter/
#[derive(ViolationMetadata)]
#[violation_metadata(preview_since = "v0.0.269")]
pub(crate) struct IndentationWithInvalidMultipleComment {
indent_width: usize,
}
impl Violation for IndentationWithInvalidMultipleComment {
#[derive_message_formats]
fn message(&self) -> String {
let Self { indent_width } = self;
format!("Indentation is not a multiple of {indent_width} (comment)")
}
}
/// ## What it does
/// Checks for indented blocks that are lacking indentation.
///
/// ## Why is this bad?
/// All indented blocks should be indented; otherwise, they are not valid
/// Python syntax.
///
/// ## Example
/// ```python
/// for item in items:
/// pass
/// ```
///
/// Use instead:
/// ```python
/// for item in items:
/// pass
/// ```
///
/// [PEP 8]: https://peps.python.org/pep-0008/#indentation
#[derive(ViolationMetadata)]
#[violation_metadata(preview_since = "v0.0.269")]
pub(crate) struct NoIndentedBlock;
impl Violation for NoIndentedBlock {
#[derive_message_formats]
fn message(&self) -> String {
"Expected an indented block".to_string()
}
}
/// ## What it does
/// Checks for comments in a code blocks that are lacking indentation.
///
/// ## Why is this bad?
/// Comments within an indented block should themselves be indented, to
/// indicate that they are part of the block.
///
/// ## Example
/// ```python
/// for item in items:
/// # Hi
/// pass
/// ```
///
/// Use instead:
/// ```python
/// for item in items:
/// # Hi
/// pass
/// ```
///
/// [PEP 8]: https://peps.python.org/pep-0008/#indentation
#[derive(ViolationMetadata)]
#[violation_metadata(preview_since = "v0.0.269")]
pub(crate) struct NoIndentedBlockComment;
impl Violation for NoIndentedBlockComment {
#[derive_message_formats]
fn message(&self) -> String {
"Expected an indented block (comment)".to_string()
}
}
/// ## What it does
/// Checks for unexpected indentation.
///
/// ## Why is this bad?
/// Indentation outside of a code block is not valid Python syntax.
///
/// ## Example
/// ```python
/// a = 1
/// b = 2
/// ```
///
/// Use instead:
/// ```python
/// a = 1
/// b = 2
/// ```
///
/// [PEP 8]: https://peps.python.org/pep-0008/#indentation
#[derive(ViolationMetadata)]
#[violation_metadata(preview_since = "v0.0.269")]
pub(crate) struct UnexpectedIndentation;
impl Violation for UnexpectedIndentation {
#[derive_message_formats]
fn message(&self) -> String {
"Unexpected indentation".to_string()
}
}
/// ## What it does
/// Checks for unexpected indentation of comment.
///
/// ## Why is this bad?
/// Comments should match the indentation of the containing code block.
///
/// ## Example
/// ```python
/// a = 1
/// # b = 2
/// ```
///
/// Use instead:
/// ```python
/// a = 1
/// # b = 2
/// ```
///
/// [PEP 8]: https://peps.python.org/pep-0008/#indentation
#[derive(ViolationMetadata)]
#[violation_metadata(preview_since = "v0.0.269")]
pub(crate) struct UnexpectedIndentationComment;
impl Violation for UnexpectedIndentationComment {
#[derive_message_formats]
fn message(&self) -> String {
"Unexpected indentation (comment)".to_string()
}
}
/// ## What it does
/// Checks for over-indented code.
///
/// ## Why is this bad?
/// According to [PEP 8], 4 spaces per indentation level should be preferred. Increased
/// indentation can lead to inconsistent formatting, which can hurt
/// readability.
///
/// ## Example
/// ```python
/// for item in items:
/// pass
/// ```
///
/// Use instead:
/// ```python
/// for item in items:
/// pass
/// ```
///
/// ## Formatter compatibility
/// We recommend against using this rule alongside the [formatter]. The
/// formatter enforces consistent indentation, making the rule redundant.
///
/// [PEP 8]: https://peps.python.org/pep-0008/#indentation
/// [formatter]:https://docs.astral.sh/ruff/formatter/
#[derive(ViolationMetadata)]
#[violation_metadata(preview_since = "v0.0.269")]
pub(crate) struct OverIndented {
is_comment: bool,
}
impl Violation for OverIndented {
#[derive_message_formats]
fn message(&self) -> String {
if self.is_comment {
"Over-indented (comment)".to_string()
} else {
"Over-indented".to_string()
}
}
}
/// E111, E112, E113, E114, E115, E116, E117
#[expect(clippy::too_many_arguments)]
pub(crate) fn indentation(
logical_line: &LogicalLine,
prev_logical_line: Option<&LogicalLine>,
indent_char: char,
indent_level: usize,
prev_indent_level: Option<usize>,
indent_size: usize,
range: TextRange,
context: &LintContext,
) {
if !indent_level.is_multiple_of(indent_size) {
if logical_line.is_comment_only() {
context.report_diagnostic_if_enabled(
IndentationWithInvalidMultipleComment {
indent_width: indent_size,
},
range,
);
} else {
context.report_diagnostic_if_enabled(
IndentationWithInvalidMultiple {
indent_width: indent_size,
},
range,
);
}
}
let indent_expect = prev_logical_line
.and_then(|prev_logical_line| prev_logical_line.tokens_trimmed().last())
.is_some_and(|t| t.kind() == TokenKind::Colon);
if indent_expect && indent_level <= prev_indent_level.unwrap_or(0) {
if logical_line.is_comment_only() {
context.report_diagnostic_if_enabled(NoIndentedBlockComment, range);
} else {
context.report_diagnostic_if_enabled(NoIndentedBlock, range);
}
} else if !indent_expect
&& prev_indent_level.is_some_and(|prev_indent_level| indent_level > prev_indent_level)
{
if logical_line.is_comment_only() {
context.report_diagnostic_if_enabled(UnexpectedIndentationComment, range);
} else {
context.report_diagnostic_if_enabled(UnexpectedIndentation, range);
}
}
if indent_expect {
let expected_indent_amount = if indent_char == '\t' { 8 } else { 4 };
let expected_indent_level = prev_indent_level.unwrap_or(0) + expected_indent_amount;
if indent_level > expected_indent_level {
context.report_diagnostic_if_enabled(
OverIndented {
is_comment: logical_line.is_comment_only(),
},
range,
);
}
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/flake8_datetimez/helpers.rs | crates/ruff_linter/src/rules/flake8_datetimez/helpers.rs | use ruff_python_ast::{AnyNodeRef, Expr, ExprAttribute, ExprCall};
use crate::checkers::ast::Checker;
#[derive(Debug, Clone, Copy, Eq, PartialEq)]
pub(super) enum DatetimeModuleAntipattern {
NoTzArgumentPassed,
NonePassedToTzArgument,
}
/// Check if the "current expression" being visited is followed
/// in the source code by a chain of `.replace()` calls followed by `.astimezone`.
/// The function operates on the assumption that the current expression
/// is a [`datetime.datetime`][datetime] object.
///
/// For example, given the following Python source code:
///
/// ```py
/// import datetime
///
/// datetime.now().replace(hours=4).replace(minutes=46).astimezone()
/// ```
///
/// The last line will produce an AST looking something like this
/// (this is pseudocode approximating our AST):
///
/// ```rs
/// Call {
/// func: Attribute {
/// value: Call {
/// func: Attribute {
/// value: Call {
/// func: Attribute {
/// value: Call { // We are visiting this
/// func: Attribute { // expression node here
/// value: Call { //
/// func: Name { //
/// id: "datetime", //
/// }, //
/// }, //
/// attr: "now" //
/// }, //
/// }, //
/// attr: "replace"
/// },
/// },
/// attr: "replace"
/// },
/// },
/// attr: "astimezone"
/// },
/// }
/// ```
///
/// The node we are visiting as the "current expression" is deeply
/// nested inside many other expressions. As such, in order to check
/// whether the `datetime.now()` call is followed by 0-or-more `.replace()`
/// calls and then an `.astimezone()` call, we must iterate up through the
/// "parent expressions" in the semantic model, checking if they match this
/// AST pattern.
///
/// [datetime]: https://docs.python.org/3/library/datetime.html#datetime-objects
pub(super) fn followed_by_astimezone(checker: &Checker) -> bool {
let semantic = checker.semantic();
let mut last = None;
for (index, expr) in semantic.current_expressions().enumerate() {
if index == 0 {
// datetime.now(...).replace(...).astimezone
// ^^^^^^^^^^^^^^^^^
continue;
}
if index % 2 == 1 {
// datetime.now(...).replace(...).astimezone
// ^^^^^^^ ^^^^^^^^^^
let Expr::Attribute(ExprAttribute { attr, .. }) = expr else {
return false;
};
match attr.as_str() {
"replace" => last = Some(AnyNodeRef::from(expr)),
"astimezone" => return true,
_ => return false,
}
} else {
// datetime.now(...).replace(...).astimezone
// ^^^^^
let Expr::Call(ExprCall { func, .. }) = expr else {
return false;
};
// Without this branch, we would fail to emit a diagnostic on code like this:
//
// ```py
// foo.replace(datetime.now().replace).astimezone()
// # ^^^^^^^^^^^^^^ Diagnostic should be emitted here
// # since the `datetime.now()` call is not followed
// # by `.astimezone()`
// ```
if !last.is_some_and(|it| it.ptr_eq(AnyNodeRef::from(&**func))) {
return false;
}
}
}
false
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/flake8_datetimez/mod.rs | crates/ruff_linter/src/rules/flake8_datetimez/mod.rs | //! Rules from [flake8-datetimez](https://pypi.org/project/flake8-datetimez/).
mod helpers;
pub(crate) mod rules;
#[cfg(test)]
mod tests {
use std::path::Path;
use anyhow::Result;
use test_case::test_case;
use crate::registry::Rule;
use crate::test::test_path;
use crate::{assert_diagnostics, settings};
#[test_case(Rule::CallDatetimeWithoutTzinfo, Path::new("DTZ001.py"))]
#[test_case(Rule::CallDatetimeToday, Path::new("DTZ002.py"))]
#[test_case(Rule::CallDatetimeUtcnow, Path::new("DTZ003.py"))]
#[test_case(Rule::CallDatetimeUtcfromtimestamp, Path::new("DTZ004.py"))]
#[test_case(Rule::CallDatetimeNowWithoutTzinfo, Path::new("DTZ005.py"))]
#[test_case(Rule::CallDatetimeFromtimestamp, Path::new("DTZ006.py"))]
#[test_case(Rule::CallDatetimeStrptimeWithoutZone, Path::new("DTZ007.py"))]
#[test_case(Rule::CallDateToday, Path::new("DTZ011.py"))]
#[test_case(Rule::CallDateFromtimestamp, Path::new("DTZ012.py"))]
#[test_case(Rule::DatetimeMinMax, Path::new("DTZ901.py"))]
fn rules(rule_code: Rule, path: &Path) -> Result<()> {
let snapshot = format!("{}_{}", rule_code.noqa_code(), path.to_string_lossy());
let diagnostics = test_path(
Path::new("flake8_datetimez").join(path).as_path(),
&settings::LinterSettings::for_rule(rule_code),
)?;
assert_diagnostics!(snapshot, diagnostics);
Ok(())
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/flake8_datetimez/rules/call_date_fromtimestamp.rs | crates/ruff_linter/src/rules/flake8_datetimez/rules/call_date_fromtimestamp.rs | use ruff_python_ast::Expr;
use ruff_text_size::TextRange;
use ruff_macros::{ViolationMetadata, derive_message_formats};
use ruff_python_semantic::Modules;
use crate::Violation;
use crate::checkers::ast::Checker;
/// ## What it does
/// Checks for usage of `datetime.date.fromtimestamp()`.
///
/// ## Why is this bad?
/// Python date objects are naive, that is, not timezone-aware. While an aware
/// object represents a specific moment in time, a naive object does not
/// contain enough information to unambiguously locate itself relative to other
/// datetime objects. Since this can lead to errors, it is recommended to
/// always use timezone-aware objects.
///
/// `datetime.date.fromtimestamp(ts)` returns a naive date object.
/// Instead, use `datetime.datetime.fromtimestamp(ts, tz=...).date()` to
/// create a timezone-aware datetime object and retrieve its date component.
///
/// ## Example
/// ```python
/// import datetime
///
/// datetime.date.fromtimestamp(946684800)
/// ```
///
/// Use instead:
/// ```python
/// import datetime
///
/// datetime.datetime.fromtimestamp(946684800, tz=datetime.timezone.utc).date()
/// ```
///
/// Or, for Python 3.11 and later:
/// ```python
/// import datetime
///
/// datetime.datetime.fromtimestamp(946684800, tz=datetime.UTC).date()
/// ```
///
/// ## References
/// - [Python documentation: Aware and Naive Objects](https://docs.python.org/3/library/datetime.html#aware-and-naive-objects)
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "v0.0.188")]
pub(crate) struct CallDateFromtimestamp;
impl Violation for CallDateFromtimestamp {
#[derive_message_formats]
fn message(&self) -> String {
"`datetime.date.fromtimestamp()` used".to_string()
}
fn fix_title(&self) -> Option<String> {
Some("Use `datetime.datetime.fromtimestamp(ts, tz=...).date()` instead".to_string())
}
}
/// DTZ012
pub(crate) fn call_date_fromtimestamp(checker: &Checker, func: &Expr, location: TextRange) {
if !checker.semantic().seen_module(Modules::DATETIME) {
return;
}
if checker
.semantic()
.resolve_qualified_name(func)
.is_some_and(|qualified_name| {
matches!(
qualified_name.segments(),
["datetime", "date", "fromtimestamp"]
)
})
{
checker.report_diagnostic(CallDateFromtimestamp, location);
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/flake8_datetimez/rules/call_datetime_strptime_without_zone.rs | crates/ruff_linter/src/rules/flake8_datetimez/rules/call_datetime_strptime_without_zone.rs | use ruff_macros::{ViolationMetadata, derive_message_formats};
use ruff_python_ast::{self as ast, Expr};
use ruff_python_semantic::Modules;
use crate::Violation;
use crate::checkers::ast::Checker;
use crate::rules::flake8_datetimez::helpers::DatetimeModuleAntipattern;
/// ## What it does
/// Checks for uses of `datetime.datetime.strptime()` that lead to naive
/// datetime objects.
///
/// ## Why is this bad?
/// Python datetime objects can be naive or timezone-aware. While an aware
/// object represents a specific moment in time, a naive object does not
/// contain enough information to unambiguously locate itself relative to other
/// datetime objects. Since this can lead to errors, it is recommended to
/// always use timezone-aware objects.
///
/// `datetime.datetime.strptime()` without `%z` returns a naive datetime
/// object. Follow it with `.replace(tzinfo=<timezone>)` or `.astimezone()`.
///
/// ## Example
/// ```python
/// import datetime
///
/// datetime.datetime.strptime("2022/01/31", "%Y/%m/%d")
/// ```
///
/// Instead, use `.replace(tzinfo=<timezone>)`:
/// ```python
/// import datetime
///
/// datetime.datetime.strptime("2022/01/31", "%Y/%m/%d").replace(
/// tzinfo=datetime.timezone.utc
/// )
/// ```
///
/// Or, use `.astimezone()`:
/// ```python
/// import datetime
///
/// datetime.datetime.strptime("2022/01/31", "%Y/%m/%d").astimezone(datetime.timezone.utc)
/// ```
///
/// On Python 3.11 and later, `datetime.timezone.utc` can be replaced with
/// `datetime.UTC`.
///
/// ## References
/// - [Python documentation: Aware and Naive Objects](https://docs.python.org/3/library/datetime.html#aware-and-naive-objects)
/// - [Python documentation: `strftime()` and `strptime()` Behavior](https://docs.python.org/3/library/datetime.html#strftime-and-strptime-behavior)
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "v0.0.188")]
pub(crate) struct CallDatetimeStrptimeWithoutZone(DatetimeModuleAntipattern);
impl Violation for CallDatetimeStrptimeWithoutZone {
#[derive_message_formats]
fn message(&self) -> String {
let CallDatetimeStrptimeWithoutZone(antipattern) = self;
match antipattern {
DatetimeModuleAntipattern::NoTzArgumentPassed => {
"Naive datetime constructed using `datetime.datetime.strptime()` without %z"
.to_string()
}
DatetimeModuleAntipattern::NonePassedToTzArgument => {
"`datetime.datetime.strptime(...).replace(tz=None)` used".to_string()
}
}
}
fn fix_title(&self) -> Option<String> {
let CallDatetimeStrptimeWithoutZone(antipattern) = self;
let title = match antipattern {
DatetimeModuleAntipattern::NoTzArgumentPassed => {
"Call `.replace(tzinfo=<timezone>)` or `.astimezone()` \
to convert to an aware datetime"
}
DatetimeModuleAntipattern::NonePassedToTzArgument => {
"Pass a `datetime.timezone` object to the `tzinfo` parameter"
}
};
Some(title.to_string())
}
}
/// DTZ007
pub(crate) fn call_datetime_strptime_without_zone(checker: &Checker, call: &ast::ExprCall) {
if !checker.semantic().seen_module(Modules::DATETIME) {
return;
}
if !checker
.semantic()
.resolve_qualified_name(&call.func)
.is_some_and(|qualified_name| {
matches!(
qualified_name.segments(),
["datetime", "datetime", "strptime"]
)
})
{
return;
}
// Does the `strptime` call contain a format string with a timezone specifier?
if let Some(expr) = call.arguments.args.get(1) {
match expr {
Expr::StringLiteral(ast::ExprStringLiteral { value, .. }) => {
if value.to_str().contains("%z") {
return;
}
}
Expr::FString(ast::ExprFString { value, .. }) => {
for f_string_part in value {
match f_string_part {
ast::FStringPart::Literal(string) => {
if string.contains("%z") {
return;
}
}
ast::FStringPart::FString(f_string) => {
if f_string
.elements
.literals()
.any(|literal| literal.contains("%z"))
{
return;
}
}
}
}
}
_ => {}
}
}
let semantic = checker.semantic();
if let Some(antipattern) = find_antipattern(
semantic.current_expression_grandparent(),
semantic.current_expression_parent(),
) {
checker.report_diagnostic(CallDatetimeStrptimeWithoutZone(antipattern), call.range);
}
}
fn find_antipattern(
grandparent: Option<&Expr>,
parent: Option<&Expr>,
) -> Option<DatetimeModuleAntipattern> {
let Some(Expr::Call(ast::ExprCall { arguments, .. })) = grandparent else {
return Some(DatetimeModuleAntipattern::NoTzArgumentPassed);
};
let Some(Expr::Attribute(ast::ExprAttribute { attr, .. })) = parent else {
return Some(DatetimeModuleAntipattern::NoTzArgumentPassed);
};
// Ex) `datetime.strptime(...).astimezone()`
if attr == "astimezone" {
return None;
}
if attr != "replace" {
return Some(DatetimeModuleAntipattern::NoTzArgumentPassed);
}
match arguments.find_keyword("tzinfo") {
// Ex) `datetime.strptime(...).replace(tzinfo=None)`
Some(ast::Keyword {
value: Expr::NoneLiteral(_),
..
}) => Some(DatetimeModuleAntipattern::NonePassedToTzArgument),
// Ex) `datetime.strptime(...).replace(tzinfo=...)`
Some(_) => None,
// Ex) `datetime.strptime(...).replace(...)` with no `tzinfo` argument
None => Some(DatetimeModuleAntipattern::NoTzArgumentPassed),
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/flake8_datetimez/rules/datetime_min_max.rs | crates/ruff_linter/src/rules/flake8_datetimez/rules/datetime_min_max.rs | use std::fmt::{Display, Formatter};
use ruff_macros::{ViolationMetadata, derive_message_formats};
use ruff_python_ast::{Expr, ExprAttribute, ExprCall};
use ruff_python_semantic::{Modules, SemanticModel};
use ruff_text_size::Ranged;
use crate::Violation;
use crate::checkers::ast::Checker;
/// ## What it does
/// Checks for uses of `datetime.datetime.min` and `datetime.datetime.max`.
///
/// ## Why is this bad?
/// `datetime.min` and `datetime.max` are non-timezone-aware datetime objects.
///
/// As such, operations on `datetime.min` and `datetime.max` may behave
/// unexpectedly, as in:
///
/// ```python
/// import datetime
///
/// # Timezone: UTC-14
/// datetime.datetime.min.timestamp() # ValueError: year 0 is out of range
/// datetime.datetime.max.timestamp() # ValueError: year 10000 is out of range
/// ```
///
/// ## Example
/// ```python
/// import datetime
///
/// datetime.datetime.max
/// ```
///
/// Use instead:
/// ```python
/// import datetime
///
/// datetime.datetime.max.replace(tzinfo=datetime.UTC)
/// ```
///
/// ## References
/// - [Python documentation: Aware and Naive Objects](https://docs.python.org/3/library/datetime.html#aware-and-naive-objects)
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "0.10.0")]
pub(crate) struct DatetimeMinMax {
min_max: MinMax,
}
impl Violation for DatetimeMinMax {
#[derive_message_formats]
fn message(&self) -> String {
let DatetimeMinMax { min_max } = self;
format!("Use of `datetime.datetime.{min_max}` without timezone information")
}
fn fix_title(&self) -> Option<String> {
let DatetimeMinMax { min_max } = self;
Some(format!(
"Replace with `datetime.datetime.{min_max}.replace(tzinfo=...)`"
))
}
}
/// DTZ901
pub(crate) fn datetime_min_max(checker: &Checker, expr: &Expr) {
let semantic = checker.semantic();
if !semantic.seen_module(Modules::DATETIME) {
return;
}
let Some(qualified_name) = semantic.resolve_qualified_name(expr) else {
return;
};
let min_max = match qualified_name.segments() {
["datetime", "datetime", "min"] => MinMax::Min,
["datetime", "datetime", "max"] => MinMax::Max,
_ => return,
};
if usage_is_safe(checker.semantic()) {
return;
}
checker.report_diagnostic(DatetimeMinMax { min_max }, expr.range());
}
/// Check if the current expression has the pattern `foo.replace(tzinfo=bar)` or `foo.time()`.
fn usage_is_safe(semantic: &SemanticModel) -> bool {
let Some(parent) = semantic.current_expression_parent() else {
return false;
};
let Some(grandparent) = semantic.current_expression_grandparent() else {
return false;
};
match (parent, grandparent) {
(Expr::Attribute(ExprAttribute { attr, .. }), Expr::Call(ExprCall { arguments, .. })) => {
attr == "time" || (attr == "replace" && arguments.find_keyword("tzinfo").is_some())
}
_ => false,
}
}
#[derive(Debug, Copy, Clone, Eq, PartialEq)]
enum MinMax {
/// `datetime.datetime.min`
Min,
/// `datetime.datetime.max`
Max,
}
impl Display for MinMax {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
match self {
MinMax::Min => write!(f, "min"),
MinMax::Max => write!(f, "max"),
}
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/flake8_datetimez/rules/call_datetime_without_tzinfo.rs | crates/ruff_linter/src/rules/flake8_datetimez/rules/call_datetime_without_tzinfo.rs | use ruff_macros::{ViolationMetadata, derive_message_formats};
use ruff_python_ast as ast;
use ruff_python_semantic::Modules;
use crate::Violation;
use crate::checkers::ast::Checker;
use crate::rules::flake8_datetimez::helpers::{self, DatetimeModuleAntipattern};
/// ## What it does
/// Checks for `datetime` instantiations that do not specify a timezone.
///
/// ## Why is this bad?
/// `datetime` objects are "naive" by default, in that they do not include
/// timezone information. "Naive" objects are easy to understand, but ignore
/// some aspects of reality, which can lead to subtle bugs. Timezone-aware
/// `datetime` objects are preferred, as they represent a specific moment in
/// time, unlike "naive" objects.
///
/// By providing a non-`None` value for `tzinfo`, a `datetime` can be made
/// timezone-aware.
///
/// ## Example
/// ```python
/// import datetime
///
/// datetime.datetime(2000, 1, 1, 0, 0, 0)
/// ```
///
/// Use instead:
/// ```python
/// import datetime
///
/// datetime.datetime(2000, 1, 1, 0, 0, 0, tzinfo=datetime.timezone.utc)
/// ```
///
/// Or, on Python 3.11 and later:
/// ```python
/// import datetime
///
/// datetime.datetime(2000, 1, 1, 0, 0, 0, tzinfo=datetime.UTC)
/// ```
///
/// ## References
/// - [Python documentation: Aware and Naive Objects](https://docs.python.org/3/library/datetime.html#aware-and-naive-objects)
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "v0.0.188")]
pub(crate) struct CallDatetimeWithoutTzinfo(DatetimeModuleAntipattern);
impl Violation for CallDatetimeWithoutTzinfo {
#[derive_message_formats]
fn message(&self) -> String {
let CallDatetimeWithoutTzinfo(antipattern) = self;
match antipattern {
DatetimeModuleAntipattern::NoTzArgumentPassed => {
"`datetime.datetime()` called without a `tzinfo` argument".to_string()
}
DatetimeModuleAntipattern::NonePassedToTzArgument => {
"`tzinfo=None` passed to `datetime.datetime()`".to_string()
}
}
}
fn fix_title(&self) -> Option<String> {
Some("Pass a `datetime.timezone` object to the `tzinfo` parameter".to_string())
}
}
/// DTZ001
pub(crate) fn call_datetime_without_tzinfo(checker: &Checker, call: &ast::ExprCall) {
if !checker.semantic().seen_module(Modules::DATETIME) {
return;
}
if !checker
.semantic()
.resolve_qualified_name(&call.func)
.is_some_and(|qualified_name| matches!(qualified_name.segments(), ["datetime", "datetime"]))
{
return;
}
if helpers::followed_by_astimezone(checker) {
return;
}
let antipattern = match call.arguments.find_argument_value("tzinfo", 7) {
Some(ast::Expr::NoneLiteral(_)) => DatetimeModuleAntipattern::NonePassedToTzArgument,
Some(_) => return,
None => DatetimeModuleAntipattern::NoTzArgumentPassed,
};
checker.report_diagnostic(CallDatetimeWithoutTzinfo(antipattern), call.range);
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/flake8_datetimez/rules/call_datetime_utcfromtimestamp.rs | crates/ruff_linter/src/rules/flake8_datetimez/rules/call_datetime_utcfromtimestamp.rs | use ruff_python_ast::Expr;
use ruff_text_size::TextRange;
use ruff_macros::{ViolationMetadata, derive_message_formats};
use ruff_python_semantic::Modules;
use crate::Violation;
use crate::checkers::ast::Checker;
use crate::rules::flake8_datetimez::helpers;
/// ## What it does
/// Checks for usage of `datetime.datetime.utcfromtimestamp()`.
///
/// ## Why is this bad?
/// Python datetime objects can be naive or timezone-aware. While an aware
/// object represents a specific moment in time, a naive object does not
/// contain enough information to unambiguously locate itself relative to other
/// datetime objects. Since this can lead to errors, it is recommended to
/// always use timezone-aware objects.
///
/// `datetime.datetime.utcfromtimestamp()` returns a naive datetime
/// object; instead, use `datetime.datetime.fromtimestamp(ts, tz=...)`
/// to create a timezone-aware object.
///
/// ## Example
/// ```python
/// import datetime
///
/// datetime.datetime.utcfromtimestamp(946684800)
/// ```
///
/// Use instead:
/// ```python
/// import datetime
///
/// datetime.datetime.fromtimestamp(946684800, tz=datetime.timezone.utc)
/// ```
///
/// Or, on Python 3.11 and later:
/// ```python
/// import datetime
///
/// datetime.datetime.fromtimestamp(946684800, tz=datetime.UTC)
/// ```
///
/// ## References
/// - [Python documentation: Aware and Naive Objects](https://docs.python.org/3/library/datetime.html#aware-and-naive-objects)
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "v0.0.188")]
pub(crate) struct CallDatetimeUtcfromtimestamp;
impl Violation for CallDatetimeUtcfromtimestamp {
#[derive_message_formats]
fn message(&self) -> String {
"`datetime.datetime.utcfromtimestamp()` used".to_string()
}
fn fix_title(&self) -> Option<String> {
Some("Use `datetime.datetime.fromtimestamp(ts, tz=...)` instead".to_string())
}
}
/// DTZ004
pub(crate) fn call_datetime_utcfromtimestamp(checker: &Checker, func: &Expr, location: TextRange) {
if !checker.semantic().seen_module(Modules::DATETIME) {
return;
}
if !checker
.semantic()
.resolve_qualified_name(func)
.is_some_and(|qualified_name| {
matches!(
qualified_name.segments(),
["datetime", "datetime", "utcfromtimestamp"]
)
})
{
return;
}
if helpers::followed_by_astimezone(checker) {
return;
}
checker.report_diagnostic(CallDatetimeUtcfromtimestamp, location);
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/flake8_datetimez/rules/mod.rs | crates/ruff_linter/src/rules/flake8_datetimez/rules/mod.rs | pub(crate) use call_date_fromtimestamp::*;
pub(crate) use call_date_today::*;
pub(crate) use call_datetime_fromtimestamp::*;
pub(crate) use call_datetime_now_without_tzinfo::*;
pub(crate) use call_datetime_strptime_without_zone::*;
pub(crate) use call_datetime_today::*;
pub(crate) use call_datetime_utcfromtimestamp::*;
pub(crate) use call_datetime_utcnow::*;
pub(crate) use call_datetime_without_tzinfo::*;
pub(crate) use datetime_min_max::*;
mod call_date_fromtimestamp;
mod call_date_today;
mod call_datetime_fromtimestamp;
mod call_datetime_now_without_tzinfo;
mod call_datetime_strptime_without_zone;
mod call_datetime_today;
mod call_datetime_utcfromtimestamp;
mod call_datetime_utcnow;
mod call_datetime_without_tzinfo;
mod datetime_min_max;
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/flake8_datetimez/rules/call_datetime_utcnow.rs | crates/ruff_linter/src/rules/flake8_datetimez/rules/call_datetime_utcnow.rs | use ruff_python_ast::Expr;
use ruff_text_size::TextRange;
use ruff_macros::{ViolationMetadata, derive_message_formats};
use ruff_python_semantic::Modules;
use crate::Violation;
use crate::checkers::ast::Checker;
use crate::rules::flake8_datetimez::helpers;
/// ## What it does
/// Checks for usage of `datetime.datetime.utcnow()`.
///
/// ## Why is this bad?
/// Python datetime objects can be naive or timezone-aware. While an aware
/// object represents a specific moment in time, a naive object does not
/// contain enough information to unambiguously locate itself relative to other
/// datetime objects. Since this can lead to errors, it is recommended to
/// always use timezone-aware objects.
///
/// `datetime.datetime.utcnow()` returns a naive datetime object; instead, use
/// `datetime.datetime.now(tz=...)` to create a timezone-aware object.
///
/// ## Example
/// ```python
/// import datetime
///
/// datetime.datetime.utcnow()
/// ```
///
/// Use instead:
/// ```python
/// import datetime
///
/// datetime.datetime.now(tz=datetime.timezone.utc)
/// ```
///
/// Or, for Python 3.11 and later:
/// ```python
/// import datetime
///
/// datetime.datetime.now(tz=datetime.UTC)
/// ```
///
/// ## References
/// - [Python documentation: Aware and Naive Objects](https://docs.python.org/3/library/datetime.html#aware-and-naive-objects)
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "v0.0.188")]
pub(crate) struct CallDatetimeUtcnow;
impl Violation for CallDatetimeUtcnow {
#[derive_message_formats]
fn message(&self) -> String {
"`datetime.datetime.utcnow()` used".to_string()
}
fn fix_title(&self) -> Option<String> {
Some("Use `datetime.datetime.now(tz=...)` instead".to_string())
}
}
/// DTZ003
pub(crate) fn call_datetime_utcnow(checker: &Checker, func: &Expr, location: TextRange) {
if !checker.semantic().seen_module(Modules::DATETIME) {
return;
}
if !checker
.semantic()
.resolve_qualified_name(func)
.is_some_and(|qualified_name| {
matches!(
qualified_name.segments(),
["datetime", "datetime", "utcnow"]
)
})
{
return;
}
if helpers::followed_by_astimezone(checker) {
return;
}
checker.report_diagnostic(CallDatetimeUtcnow, location);
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/flake8_datetimez/rules/call_datetime_now_without_tzinfo.rs | crates/ruff_linter/src/rules/flake8_datetimez/rules/call_datetime_now_without_tzinfo.rs | use ruff_macros::{ViolationMetadata, derive_message_formats};
use ruff_python_ast as ast;
use ruff_python_semantic::Modules;
use crate::Violation;
use crate::checkers::ast::Checker;
use crate::rules::flake8_datetimez::helpers::{self, DatetimeModuleAntipattern};
/// ## What it does
/// Checks for usages of `datetime.datetime.now()` that do not specify a timezone.
///
/// ## Why is this bad?
/// Python datetime objects can be naive or timezone-aware. While an aware
/// object represents a specific moment in time, a naive object does not
/// contain enough information to unambiguously locate itself relative to other
/// datetime objects. Since this can lead to errors, it is recommended to
/// always use timezone-aware objects.
///
/// `datetime.datetime.now()` or `datetime.datetime.now(tz=None)` returns a naive
/// datetime object. Instead, use `datetime.datetime.now(tz=<timezone>)` to create
/// a timezone-aware object.
///
/// ## Example
/// ```python
/// import datetime
///
/// datetime.datetime.now()
/// ```
///
/// Use instead:
/// ```python
/// import datetime
///
/// datetime.datetime.now(tz=datetime.timezone.utc)
/// ```
///
/// Or, for Python 3.11 and later:
/// ```python
/// import datetime
///
/// datetime.datetime.now(tz=datetime.UTC)
/// ```
///
/// ## References
/// - [Python documentation: Aware and Naive Objects](https://docs.python.org/3/library/datetime.html#aware-and-naive-objects)
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "v0.0.188")]
pub(crate) struct CallDatetimeNowWithoutTzinfo(DatetimeModuleAntipattern);
impl Violation for CallDatetimeNowWithoutTzinfo {
#[derive_message_formats]
fn message(&self) -> String {
let CallDatetimeNowWithoutTzinfo(antipattern) = self;
match antipattern {
DatetimeModuleAntipattern::NoTzArgumentPassed => {
"`datetime.datetime.now()` called without a `tz` argument".to_string()
}
DatetimeModuleAntipattern::NonePassedToTzArgument => {
"`tz=None` passed to `datetime.datetime.now()`".to_string()
}
}
}
fn fix_title(&self) -> Option<String> {
Some("Pass a `datetime.timezone` object to the `tz` parameter".to_string())
}
}
/// DTZ005
pub(crate) fn call_datetime_now_without_tzinfo(checker: &Checker, call: &ast::ExprCall) {
if !checker.semantic().seen_module(Modules::DATETIME) {
return;
}
if !checker
.semantic()
.resolve_qualified_name(&call.func)
.is_some_and(|qualified_name| {
matches!(qualified_name.segments(), ["datetime", "datetime", "now"])
})
{
return;
}
if helpers::followed_by_astimezone(checker) {
return;
}
let antipattern = match call.arguments.find_argument_value("tz", 0) {
Some(ast::Expr::NoneLiteral(_)) => DatetimeModuleAntipattern::NonePassedToTzArgument,
Some(_) => return,
None => DatetimeModuleAntipattern::NoTzArgumentPassed,
};
checker.report_diagnostic(CallDatetimeNowWithoutTzinfo(antipattern), call.range);
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/flake8_datetimez/rules/call_datetime_today.rs | crates/ruff_linter/src/rules/flake8_datetimez/rules/call_datetime_today.rs | use ruff_python_ast::Expr;
use ruff_text_size::TextRange;
use ruff_macros::{ViolationMetadata, derive_message_formats};
use ruff_python_semantic::Modules;
use crate::Violation;
use crate::checkers::ast::Checker;
use crate::rules::flake8_datetimez::helpers;
/// ## What it does
/// Checks for usage of `datetime.datetime.today()`.
///
/// ## Why is this bad?
/// `datetime` objects are "naive" by default, in that they do not include
/// timezone information. "Naive" objects are easy to understand, but ignore
/// some aspects of reality, which can lead to subtle bugs. Timezone-aware
/// `datetime` objects are preferred, as they represent a specific moment in
/// time, unlike "naive" objects.
///
/// `datetime.datetime.today()` creates a "naive" object; instead, use
/// `datetime.datetime.now(tz=...)` to create a timezone-aware object.
///
/// ## Example
/// ```python
/// import datetime
///
/// datetime.datetime.today()
/// ```
///
/// Use instead:
/// ```python
/// import datetime
///
/// datetime.datetime.now(tz=datetime.timezone.utc)
/// ```
///
/// Or, for Python 3.11 and later:
/// ```python
/// import datetime
///
/// datetime.datetime.now(tz=datetime.UTC)
/// ```
///
/// ## References
/// - [Python documentation: Aware and Naive Objects](https://docs.python.org/3/library/datetime.html#aware-and-naive-objects)
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "v0.0.188")]
pub(crate) struct CallDatetimeToday;
impl Violation for CallDatetimeToday {
#[derive_message_formats]
fn message(&self) -> String {
"`datetime.datetime.today()` used".to_string()
}
fn fix_title(&self) -> Option<String> {
Some("Use `datetime.datetime.now(tz=...)` instead".to_string())
}
}
/// DTZ002
pub(crate) fn call_datetime_today(checker: &Checker, func: &Expr, location: TextRange) {
if !checker.semantic().seen_module(Modules::DATETIME) {
return;
}
if !checker
.semantic()
.resolve_qualified_name(func)
.is_some_and(|qualified_name| {
matches!(qualified_name.segments(), ["datetime", "datetime", "today"])
})
{
return;
}
if helpers::followed_by_astimezone(checker) {
return;
}
checker.report_diagnostic(CallDatetimeToday, location);
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/flake8_datetimez/rules/call_date_today.rs | crates/ruff_linter/src/rules/flake8_datetimez/rules/call_date_today.rs | use ruff_python_ast::Expr;
use ruff_text_size::TextRange;
use ruff_macros::{ViolationMetadata, derive_message_formats};
use ruff_python_semantic::Modules;
use crate::Violation;
use crate::checkers::ast::Checker;
/// ## What it does
/// Checks for usage of `datetime.date.today()`.
///
/// ## Why is this bad?
/// Python date objects are naive, that is, not timezone-aware. While an aware
/// object represents a specific moment in time, a naive object does not
/// contain enough information to unambiguously locate itself relative to other
/// datetime objects. Since this can lead to errors, it is recommended to
/// always use timezone-aware objects.
///
/// `datetime.date.today` returns a naive date object without taking timezones
/// into account. Instead, use `datetime.datetime.now(tz=...).date()` to
/// create a timezone-aware object and retrieve its date component.
///
/// ## Example
/// ```python
/// import datetime
///
/// datetime.date.today()
/// ```
///
/// Use instead:
/// ```python
/// import datetime
///
/// datetime.datetime.now(tz=datetime.timezone.utc).date()
/// ```
///
/// Or, for Python 3.11 and later:
/// ```python
/// import datetime
///
/// datetime.datetime.now(tz=datetime.UTC).date()
/// ```
///
/// ## References
/// - [Python documentation: Aware and Naive Objects](https://docs.python.org/3/library/datetime.html#aware-and-naive-objects)
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "v0.0.188")]
pub(crate) struct CallDateToday;
impl Violation for CallDateToday {
#[derive_message_formats]
fn message(&self) -> String {
"`datetime.date.today()` used".to_string()
}
fn fix_title(&self) -> Option<String> {
Some("Use `datetime.datetime.now(tz=...).date()` instead".to_string())
}
}
/// DTZ011
pub(crate) fn call_date_today(checker: &Checker, func: &Expr, location: TextRange) {
if !checker.semantic().seen_module(Modules::DATETIME) {
return;
}
if checker
.semantic()
.resolve_qualified_name(func)
.is_some_and(|qualified_name| {
matches!(qualified_name.segments(), ["datetime", "date", "today"])
})
{
checker.report_diagnostic(CallDateToday, location);
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/flake8_datetimez/rules/call_datetime_fromtimestamp.rs | crates/ruff_linter/src/rules/flake8_datetimez/rules/call_datetime_fromtimestamp.rs | use ruff_macros::{ViolationMetadata, derive_message_formats};
use ruff_python_ast::{self as ast};
use ruff_python_semantic::Modules;
use crate::Violation;
use crate::checkers::ast::Checker;
use crate::rules::flake8_datetimez::helpers::{self, DatetimeModuleAntipattern};
/// ## What it does
/// Checks for usage of `datetime.datetime.fromtimestamp()` that do not specify
/// a timezone.
///
/// ## Why is this bad?
/// Python datetime objects can be naive or timezone-aware. While an aware
/// object represents a specific moment in time, a naive object does not
/// contain enough information to unambiguously locate itself relative to other
/// datetime objects. Since this can lead to errors, it is recommended to
/// always use timezone-aware objects.
///
/// `datetime.datetime.fromtimestamp(ts)` or
/// `datetime.datetime.fromtimestampe(ts, tz=None)` returns a naive datetime
/// object. Instead, use `datetime.datetime.fromtimestamp(ts, tz=<timezone>)`
/// to create a timezone-aware object.
///
/// ## Example
/// ```python
/// import datetime
///
/// datetime.datetime.fromtimestamp(946684800)
/// ```
///
/// Use instead:
/// ```python
/// import datetime
///
/// datetime.datetime.fromtimestamp(946684800, tz=datetime.timezone.utc)
/// ```
///
/// Or, on Python 3.11 and later:
/// ```python
/// import datetime
///
/// datetime.datetime.fromtimestamp(946684800, tz=datetime.UTC)
/// ```
///
/// ## References
/// - [Python documentation: Aware and Naive Objects](https://docs.python.org/3/library/datetime.html#aware-and-naive-objects)
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "v0.0.188")]
pub(crate) struct CallDatetimeFromtimestamp(DatetimeModuleAntipattern);
impl Violation for CallDatetimeFromtimestamp {
#[derive_message_formats]
fn message(&self) -> String {
let CallDatetimeFromtimestamp(antipattern) = self;
match antipattern {
DatetimeModuleAntipattern::NoTzArgumentPassed => {
"`datetime.datetime.fromtimestamp()` called without a `tz` argument".to_string()
}
DatetimeModuleAntipattern::NonePassedToTzArgument => {
"`tz=None` passed to `datetime.datetime.fromtimestamp()`".to_string()
}
}
}
fn fix_title(&self) -> Option<String> {
Some("Pass a `datetime.timezone` object to the `tz` parameter".to_string())
}
}
/// DTZ006
pub(crate) fn call_datetime_fromtimestamp(checker: &Checker, call: &ast::ExprCall) {
if !checker.semantic().seen_module(Modules::DATETIME) {
return;
}
if !checker
.semantic()
.resolve_qualified_name(&call.func)
.is_some_and(|qualified_name| {
matches!(
qualified_name.segments(),
["datetime", "datetime", "fromtimestamp"]
)
})
{
return;
}
if helpers::followed_by_astimezone(checker) {
return;
}
let antipattern = match call.arguments.find_argument_value("tz", 1) {
Some(ast::Expr::NoneLiteral(_)) => DatetimeModuleAntipattern::NonePassedToTzArgument,
Some(_) => return,
None => DatetimeModuleAntipattern::NoTzArgumentPassed,
};
checker.report_diagnostic(CallDatetimeFromtimestamp(antipattern), call.range);
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/flake8_logging_format/violations.rs | crates/ruff_linter/src/rules/flake8_logging_format/violations.rs | use ruff_macros::{ViolationMetadata, derive_message_formats};
use crate::{AlwaysFixableViolation, Violation};
/// ## What it does
/// Checks for uses of `str.format` to format logging messages.
///
/// ## Why is this bad?
/// The `logging` module provides a mechanism for passing additional values to
/// be logged using the `extra` keyword argument. This is more consistent, more
/// efficient, and less error-prone than formatting the string directly.
///
/// Using `str.format` to format a logging message requires that Python eagerly
/// format the string, even if the logging statement is never executed (e.g.,
/// if the log level is above the level of the logging statement), whereas
/// using the `extra` keyword argument defers formatting until required.
///
/// Additionally, the use of `extra` will ensure that the values are made
/// available to all handlers, which can then be configured to log the values
/// in a consistent manner.
///
/// As an alternative to `extra`, passing values as arguments to the logging
/// method can also be used to defer string formatting until required.
///
/// ## Known problems
///
/// This rule detects uses of the `logging` module via a heuristic.
/// Specifically, it matches against:
///
/// - Uses of the `logging` module itself (e.g., `import logging; logging.info(...)`).
/// - Uses of `flask.current_app.logger` (e.g., `from flask import current_app; current_app.logger.info(...)`).
/// - Objects whose name starts with `log` or ends with `logger` or `logging`,
/// when used in the same file in which they are defined (e.g., `logger = logging.getLogger(); logger.info(...)`).
/// - Imported objects marked as loggers via the [`lint.logger-objects`] setting, which can be
/// used to enforce these rules against shared logger objects (e.g., `from module import logger; logger.info(...)`,
/// when [`lint.logger-objects`] is set to `["module.logger"]`).
///
/// ## Example
/// ```python
/// import logging
///
/// logging.basicConfig(format="%(message)s", level=logging.INFO)
///
/// user = "Maria"
///
/// logging.info("{} - Something happened".format(user))
/// ```
///
/// Use instead:
/// ```python
/// import logging
///
/// logging.basicConfig(format="%(user_id)s - %(message)s", level=logging.INFO)
///
/// user = "Maria"
///
/// logging.info("Something happened", extra={"user_id": user})
/// ```
///
/// Or:
/// ```python
/// import logging
///
/// logging.basicConfig(format="%(message)s", level=logging.INFO)
///
/// user = "Maria"
///
/// logging.info("%s - Something happened", user)
/// ```
///
/// ## Options
/// - `lint.logger-objects`
///
/// ## References
/// - [Python documentation: `logging`](https://docs.python.org/3/library/logging.html)
/// - [Python documentation: Optimization](https://docs.python.org/3/howto/logging.html#optimization)
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "v0.0.236")]
pub(crate) struct LoggingStringFormat;
impl Violation for LoggingStringFormat {
#[derive_message_formats]
fn message(&self) -> String {
"Logging statement uses `str.format`".to_string()
}
}
/// ## What it does
/// Checks for uses of `printf`-style format strings to format logging
/// messages.
///
/// ## Why is this bad?
/// The `logging` module provides a mechanism for passing additional values to
/// be logged using the `extra` keyword argument. This is more consistent, more
/// efficient, and less error-prone than formatting the string directly.
///
/// Using `printf`-style format strings to format a logging message requires
/// that Python eagerly format the string, even if the logging statement is
/// never executed (e.g., if the log level is above the level of the logging
/// statement), whereas using the `extra` keyword argument defers formatting
/// until required.
///
/// Additionally, the use of `extra` will ensure that the values are made
/// available to all handlers, which can then be configured to log the values
/// in a consistent manner.
///
/// As an alternative to `extra`, passing values as arguments to the logging
/// method can also be used to defer string formatting until required.
///
/// ## Known problems
///
/// This rule detects uses of the `logging` module via a heuristic.
/// Specifically, it matches against:
///
/// - Uses of the `logging` module itself (e.g., `import logging; logging.info(...)`).
/// - Uses of `flask.current_app.logger` (e.g., `from flask import current_app; current_app.logger.info(...)`).
/// - Objects whose name starts with `log` or ends with `logger` or `logging`,
/// when used in the same file in which they are defined (e.g., `logger = logging.getLogger(); logger.info(...)`).
/// - Imported objects marked as loggers via the [`lint.logger-objects`] setting, which can be
/// used to enforce these rules against shared logger objects (e.g., `from module import logger; logger.info(...)`,
/// when [`lint.logger-objects`] is set to `["module.logger"]`).
///
/// ## Example
/// ```python
/// import logging
///
/// logging.basicConfig(format="%(message)s", level=logging.INFO)
///
/// user = "Maria"
///
/// logging.info("%s - Something happened" % user)
/// ```
///
/// Use instead:
/// ```python
/// import logging
///
/// logging.basicConfig(format="%(user_id)s - %(message)s", level=logging.INFO)
///
/// user = "Maria"
///
/// logging.info("Something happened", extra=dict(user_id=user))
/// ```
///
/// Or:
/// ```python
/// import logging
///
/// logging.basicConfig(format="%(message)s", level=logging.INFO)
///
/// user = "Maria"
///
/// logging.info("%s - Something happened", user)
/// ```
///
/// ## Options
/// - `lint.logger-objects`
///
/// ## References
/// - [Python documentation: `logging`](https://docs.python.org/3/library/logging.html)
/// - [Python documentation: Optimization](https://docs.python.org/3/howto/logging.html#optimization)
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "v0.0.236")]
pub(crate) struct LoggingPercentFormat;
impl Violation for LoggingPercentFormat {
#[derive_message_formats]
fn message(&self) -> String {
"Logging statement uses `%`".to_string()
}
}
/// ## What it does
/// Checks for uses string concatenation via the `+` operator to format logging
/// messages.
///
/// ## Why is this bad?
/// The `logging` module provides a mechanism for passing additional values to
/// be logged using the `extra` keyword argument. This is more consistent, more
/// efficient, and less error-prone than formatting the string directly.
///
/// Using concatenation to format a logging message requires that Python
/// eagerly format the string, even if the logging statement is never executed
/// (e.g., if the log level is above the level of the logging statement),
/// whereas using the `extra` keyword argument defers formatting until required.
///
/// Additionally, the use of `extra` will ensure that the values are made
/// available to all handlers, which can then be configured to log the values
/// in a consistent manner.
///
/// As an alternative to `extra`, passing values as arguments to the logging
/// method can also be used to defer string formatting until required.
///
/// ## Known problems
///
/// This rule detects uses of the `logging` module via a heuristic.
/// Specifically, it matches against:
///
/// - Uses of the `logging` module itself (e.g., `import logging; logging.info(...)`).
/// - Uses of `flask.current_app.logger` (e.g., `from flask import current_app; current_app.logger.info(...)`).
/// - Objects whose name starts with `log` or ends with `logger` or `logging`,
/// when used in the same file in which they are defined (e.g., `logger = logging.getLogger(); logger.info(...)`).
/// - Imported objects marked as loggers via the [`lint.logger-objects`] setting, which can be
/// used to enforce these rules against shared logger objects (e.g., `from module import logger; logger.info(...)`,
/// when [`lint.logger-objects`] is set to `["module.logger"]`).
///
/// ## Example
/// ```python
/// import logging
///
/// logging.basicConfig(format="%(message)s", level=logging.INFO)
///
/// user = "Maria"
///
/// logging.info(user + " - Something happened")
/// ```
///
/// Use instead:
/// ```python
/// import logging
///
/// logging.basicConfig(format="%(user_id)s - %(message)s", level=logging.INFO)
///
/// user = "Maria"
///
/// logging.info("Something happened", extra=dict(user_id=user))
/// ```
///
/// Or:
/// ```python
/// import logging
///
/// logging.basicConfig(format="%(message)s", level=logging.INFO)
///
/// user = "Maria"
///
/// logging.info("%s - Something happened", user)
/// ```
///
/// ## Options
/// - `lint.logger-objects`
///
/// ## References
/// - [Python documentation: `logging`](https://docs.python.org/3/library/logging.html)
/// - [Python documentation: Optimization](https://docs.python.org/3/howto/logging.html#optimization)
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "v0.0.236")]
pub(crate) struct LoggingStringConcat;
impl Violation for LoggingStringConcat {
#[derive_message_formats]
fn message(&self) -> String {
"Logging statement uses `+`".to_string()
}
}
/// ## What it does
/// Checks for uses of f-strings to format logging messages.
///
/// ## Why is this bad?
/// The `logging` module provides a mechanism for passing additional values to
/// be logged using the `extra` keyword argument. This is more consistent, more
/// efficient, and less error-prone than formatting the string directly.
///
/// Using f-strings to format a logging message requires that Python eagerly
/// format the string, even if the logging statement is never executed (e.g.,
/// if the log level is above the level of the logging statement), whereas
/// using the `extra` keyword argument defers formatting until required.
///
/// Additionally, the use of `extra` will ensure that the values are made
/// available to all handlers, which can then be configured to log the values
/// in a consistent manner.
///
/// As an alternative to `extra`, passing values as arguments to the logging
/// method can also be used to defer string formatting until required.
///
/// ## Known problems
///
/// This rule detects uses of the `logging` module via a heuristic.
/// Specifically, it matches against:
///
/// - Uses of the `logging` module itself (e.g., `import logging; logging.info(...)`).
/// - Uses of `flask.current_app.logger` (e.g., `from flask import current_app; current_app.logger.info(...)`).
/// - Objects whose name starts with `log` or ends with `logger` or `logging`,
/// when used in the same file in which they are defined (e.g., `logger = logging.getLogger(); logger.info(...)`).
/// - Imported objects marked as loggers via the [`lint.logger-objects`] setting, which can be
/// used to enforce these rules against shared logger objects (e.g., `from module import logger; logger.info(...)`,
/// when [`lint.logger-objects`] is set to `["module.logger"]`).
///
/// ## Example
/// ```python
/// import logging
///
/// logging.basicConfig(format="%(message)s", level=logging.INFO)
///
/// user = "Maria"
///
/// logging.info(f"{user} - Something happened")
/// ```
///
/// Use instead:
/// ```python
/// import logging
///
/// logging.basicConfig(format="%(user_id)s - %(message)s", level=logging.INFO)
///
/// user = "Maria"
///
/// logging.info("Something happened", extra=dict(user_id=user))
/// ```
///
/// Or:
/// ```python
/// import logging
///
/// logging.basicConfig(format="%(message)s", level=logging.INFO)
///
/// user = "Maria"
///
/// logging.info("%s - Something happened", user)
/// ```
///
/// ## Options
/// - `lint.logger-objects`
///
/// ## References
/// - [Python documentation: `logging`](https://docs.python.org/3/library/logging.html)
/// - [Python documentation: Optimization](https://docs.python.org/3/howto/logging.html#optimization)
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "v0.0.236")]
pub(crate) struct LoggingFString;
impl Violation for LoggingFString {
const FIX_AVAILABILITY: crate::FixAvailability = crate::FixAvailability::Sometimes;
#[derive_message_formats]
fn message(&self) -> String {
"Logging statement uses f-string".to_string()
}
fn fix_title(&self) -> Option<String> {
Some("Convert to lazy `%` formatting".to_string())
}
}
/// ## What it does
/// Checks for uses of `logging.warn` and `logging.Logger.warn`.
///
/// ## Why is this bad?
/// `logging.warn` and `logging.Logger.warn` are deprecated in favor of
/// `logging.warning` and `logging.Logger.warning`, which are functionally
/// equivalent.
///
/// ## Known problems
///
/// This rule detects uses of the `logging` module via a heuristic.
/// Specifically, it matches against:
///
/// - Uses of the `logging` module itself (e.g., `import logging; logging.info(...)`).
/// - Uses of `flask.current_app.logger` (e.g., `from flask import current_app; current_app.logger.info(...)`).
/// - Objects whose name starts with `log` or ends with `logger` or `logging`,
/// when used in the same file in which they are defined (e.g., `logger = logging.getLogger(); logger.info(...)`).
/// - Imported objects marked as loggers via the [`lint.logger-objects`] setting, which can be
/// used to enforce these rules against shared logger objects (e.g., `from module import logger; logger.info(...)`,
/// when [`lint.logger-objects`] is set to `["module.logger"]`).
///
/// ## Example
/// ```python
/// import logging
///
/// logging.warn("Something happened")
/// ```
///
/// Use instead:
/// ```python
/// import logging
///
/// logging.warning("Something happened")
/// ```
///
/// ## Options
/// - `lint.logger-objects`
///
/// ## References
/// - [Python documentation: `logging.warning`](https://docs.python.org/3/library/logging.html#logging.warning)
/// - [Python documentation: `logging.Logger.warning`](https://docs.python.org/3/library/logging.html#logging.Logger.warning)
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "v0.0.236")]
pub(crate) struct LoggingWarn;
impl AlwaysFixableViolation for LoggingWarn {
#[derive_message_formats]
fn message(&self) -> String {
"Logging statement uses `warn` instead of `warning`".to_string()
}
fn fix_title(&self) -> String {
"Convert to `warning`".to_string()
}
}
/// ## What it does
/// Checks for `extra` keywords in logging statements that clash with
/// `LogRecord` attributes.
///
/// ## Why is this bad?
/// The `logging` module provides a mechanism for passing additional values to
/// be logged using the `extra` keyword argument. These values are then passed
/// to the `LogRecord` constructor.
///
/// Providing a value via `extra` that clashes with one of the attributes of
/// the `LogRecord` constructor will raise a `KeyError` when the `LogRecord` is
/// constructed.
///
/// ## Known problems
///
/// This rule detects uses of the `logging` module via a heuristic.
/// Specifically, it matches against:
///
/// - Uses of the `logging` module itself (e.g., `import logging; logging.info(...)`).
/// - Uses of `flask.current_app.logger` (e.g., `from flask import current_app; current_app.logger.info(...)`).
/// - Objects whose name starts with `log` or ends with `logger` or `logging`,
/// when used in the same file in which they are defined (e.g., `logger = logging.getLogger(); logger.info(...)`).
/// - Imported objects marked as loggers via the [`lint.logger-objects`] setting, which can be
/// used to enforce these rules against shared logger objects (e.g., `from module import logger; logger.info(...)`,
/// when [`lint.logger-objects`] is set to `["module.logger"]`).
///
/// ## Example
/// ```python
/// import logging
///
/// logging.basicConfig(format="%(name) - %(message)s", level=logging.INFO)
///
/// username = "Maria"
///
/// logging.info("Something happened", extra=dict(name=username))
/// ```
///
/// Use instead:
/// ```python
/// import logging
///
/// logging.basicConfig(format="%(user_id)s - %(message)s", level=logging.INFO)
///
/// username = "Maria"
///
/// logging.info("Something happened", extra=dict(user_id=username))
/// ```
///
/// ## Options
/// - `lint.logger-objects`
///
/// ## References
/// - [Python documentation: LogRecord attributes](https://docs.python.org/3/library/logging.html#logrecord-attributes)
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "v0.0.236")]
pub(crate) struct LoggingExtraAttrClash(pub String);
impl Violation for LoggingExtraAttrClash {
#[derive_message_formats]
fn message(&self) -> String {
let LoggingExtraAttrClash(key) = self;
format!(
"Logging statement uses an `extra` field that clashes with a `LogRecord` field: `{key}`"
)
}
}
/// ## What it does
/// Checks for uses of `logging.error` that pass `exc_info=True`.
///
/// ## Why is this bad?
/// Calling `logging.error` with `exc_info=True` is equivalent to calling
/// `logging.exception`. Using `logging.exception` is more concise, more
/// readable, and conveys the intent of the logging statement more clearly.
///
/// ## Known problems
///
/// This rule detects uses of the `logging` module via a heuristic.
/// Specifically, it matches against:
///
/// - Uses of the `logging` module itself (e.g., `import logging; logging.info(...)`).
/// - Uses of `flask.current_app.logger` (e.g., `from flask import current_app; current_app.logger.info(...)`).
/// - Objects whose name starts with `log` or ends with `logger` or `logging`,
/// when used in the same file in which they are defined (e.g., `logger = logging.getLogger(); logger.info(...)`).
/// - Imported objects marked as loggers via the [`lint.logger-objects`] setting, which can be
/// used to enforce these rules against shared logger objects (e.g., `from module import logger; logger.info(...)`,
/// when [`lint.logger-objects`] is set to `["module.logger"]`).
///
/// ## Example
/// ```python
/// import logging
///
/// try:
/// ...
/// except ValueError:
/// logging.error("Exception occurred", exc_info=True)
/// ```
///
/// Use instead:
/// ```python
/// import logging
///
/// try:
/// ...
/// except ValueError:
/// logging.exception("Exception occurred")
/// ```
///
/// ## Options
/// - `lint.logger-objects`
///
/// ## References
/// - [Python documentation: `logging.exception`](https://docs.python.org/3/library/logging.html#logging.exception)
/// - [Python documentation: `exception`](https://docs.python.org/3/library/logging.html#logging.Logger.exception)
/// - [Python documentation: `logging.error`](https://docs.python.org/3/library/logging.html#logging.error)
/// - [Python documentation: `error`](https://docs.python.org/3/library/logging.html#logging.Logger.error)
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "v0.0.236")]
pub(crate) struct LoggingExcInfo;
impl Violation for LoggingExcInfo {
#[derive_message_formats]
fn message(&self) -> String {
"Logging `.exception(...)` should be used instead of `.error(..., exc_info=True)`"
.to_string()
}
}
/// ## What it does
/// Checks for redundant `exc_info` keyword arguments in logging statements.
///
/// ## Why is this bad?
/// `exc_info` is `True` by default for `logging.exception`, and `False` by
/// default for `logging.error`.
///
/// Passing `exc_info=True` to `logging.exception` calls is redundant, as is
/// passing `exc_info=False` to `logging.error` calls.
///
/// ## Known problems
///
/// This rule detects uses of the `logging` module via a heuristic.
/// Specifically, it matches against:
///
/// - Uses of the `logging` module itself (e.g., `import logging; logging.info(...)`).
/// - Uses of `flask.current_app.logger` (e.g., `from flask import current_app; current_app.logger.info(...)`).
/// - Objects whose name starts with `log` or ends with `logger` or `logging`,
/// when used in the same file in which they are defined (e.g., `logger = logging.getLogger(); logger.info(...)`).
/// - Imported objects marked as loggers via the [`lint.logger-objects`] setting, which can be
/// used to enforce these rules against shared logger objects (e.g., `from module import logger; logger.info(...)`,
/// when [`lint.logger-objects`] is set to `["module.logger"]`).
///
/// ## Example
/// ```python
/// import logging
///
/// try:
/// ...
/// except ValueError:
/// logging.exception("Exception occurred", exc_info=True)
/// ```
///
/// Use instead:
/// ```python
/// import logging
///
/// try:
/// ...
/// except ValueError:
/// logging.exception("Exception occurred")
/// ```
///
/// ## Options
/// - `lint.logger-objects`
///
/// ## References
/// - [Python documentation: `logging.exception`](https://docs.python.org/3/library/logging.html#logging.exception)
/// - [Python documentation: `exception`](https://docs.python.org/3/library/logging.html#logging.Logger.exception)
/// - [Python documentation: `logging.error`](https://docs.python.org/3/library/logging.html#logging.error)
/// - [Python documentation: `error`](https://docs.python.org/3/library/logging.html#logging.Logger.error)
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "v0.0.236")]
pub(crate) struct LoggingRedundantExcInfo;
impl Violation for LoggingRedundantExcInfo {
#[derive_message_formats]
fn message(&self) -> String {
"Logging statement has redundant `exc_info`".to_string()
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/flake8_logging_format/mod.rs | crates/ruff_linter/src/rules/flake8_logging_format/mod.rs | //! Rules from [flake8-logging-format](https://pypi.org/project/flake8-logging-format/).
pub(crate) mod rules;
pub(crate) mod violations;
#[cfg(test)]
mod tests {
use std::path::Path;
use anyhow::Result;
use test_case::test_case;
use crate::registry::Rule;
use crate::test::test_path;
use crate::{assert_diagnostics, settings};
#[test_case(Path::new("G_argparse_parser_error_ok.py"))]
#[test_case(Path::new("G_extra_ok.py"))]
#[test_case(Path::new("G_extra_str_format_ok.py"))]
#[test_case(Path::new("G_simple_ok.py"))]
#[test_case(Path::new("G_warnings_ok.py"))]
#[test_case(Path::new("G001.py"))]
#[test_case(Path::new("G002.py"))]
#[test_case(Path::new("G003.py"))]
#[test_case(Path::new("G004.py"))]
#[test_case(Path::new("G004_arg_order.py"))]
#[test_case(Path::new("G004_implicit_concat.py"))]
#[test_case(Path::new("G010.py"))]
#[test_case(Path::new("G101_1.py"))]
#[test_case(Path::new("G101_2.py"))]
#[test_case(Path::new("G201.py"))]
#[test_case(Path::new("G202.py"))]
fn rules(path: &Path) -> Result<()> {
let snapshot = path.to_string_lossy().into_owned();
let diagnostics = test_path(
Path::new("flake8_logging_format").join(path).as_path(),
&settings::LinterSettings {
logger_objects: vec!["logging_setup.logger".to_string()],
..settings::LinterSettings::for_rules(vec![
Rule::LoggingStringFormat,
Rule::LoggingPercentFormat,
Rule::LoggingStringConcat,
Rule::LoggingFString,
Rule::LoggingWarn,
Rule::LoggingExtraAttrClash,
Rule::LoggingExcInfo,
Rule::LoggingRedundantExcInfo,
])
},
)?;
assert_diagnostics!(snapshot, diagnostics);
Ok(())
}
#[test_case(Rule::LoggingFString, Path::new("G004.py"))]
#[test_case(Rule::LoggingFString, Path::new("G004_arg_order.py"))]
#[test_case(Rule::LoggingFString, Path::new("G004_implicit_concat.py"))]
fn preview_rules(rule_code: Rule, path: &Path) -> Result<()> {
let snapshot = format!(
"preview__{}_{}",
rule_code.noqa_code(),
path.to_string_lossy()
);
let diagnostics = test_path(
Path::new("flake8_logging_format").join(path).as_path(),
&settings::LinterSettings {
logger_objects: vec!["logging_setup.logger".to_string()],
preview: settings::types::PreviewMode::Enabled,
..settings::LinterSettings::for_rule(rule_code)
},
)?;
assert_diagnostics!(snapshot, diagnostics);
Ok(())
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/flake8_logging_format/rules/logging_call.rs | crates/ruff_linter/src/rules/flake8_logging_format/rules/logging_call.rs | use ruff_python_ast::InterpolatedStringElement;
use ruff_python_ast::{self as ast, Arguments, Expr, Keyword, Operator, StringFlags};
use ruff_python_semantic::analyze::logging;
use ruff_python_stdlib::logging::LoggingLevel;
use ruff_text_size::{Ranged, TextRange};
use crate::checkers::ast::Checker;
use crate::preview::is_fix_f_string_logging_enabled;
use crate::registry::Rule;
use crate::rules::flake8_logging_format::violations::{
LoggingExcInfo, LoggingExtraAttrClash, LoggingFString, LoggingPercentFormat,
LoggingRedundantExcInfo, LoggingStringConcat, LoggingStringFormat, LoggingWarn,
};
use crate::{Edit, Fix};
fn logging_f_string(
checker: &Checker,
msg: &Expr,
f_string: &ast::ExprFString,
arguments: &Arguments,
msg_pos: usize,
) {
// Report the diagnostic up-front so we can attach a fix later only when preview is enabled.
let mut diagnostic = checker.report_diagnostic(LoggingFString, msg.range());
// Preview gate for the automatic fix.
if !is_fix_f_string_logging_enabled(checker.settings()) {
return;
}
// If there are existing positional arguments after the message, bail out.
// This could indicate a mistake or complex usage we shouldn't try to fix.
if arguments.args.len() > msg_pos + 1 {
return;
}
let mut format_string = String::new();
let mut args: Vec<&str> = Vec::new();
// Try to reuse the first part's quote style when building the replacement.
// Default to double quotes if we can't determine it.
let quote_str = f_string
.value
.iter()
.map(|part| match part {
ast::FStringPart::Literal(literal) => literal.flags.quote_str(),
ast::FStringPart::FString(f) => f.flags.quote_str(),
})
.next()
.unwrap_or("\"");
for part in &f_string.value {
match part {
ast::FStringPart::Literal(literal) => {
let literal_text = literal.as_str();
if literal_text.contains('%') {
return;
}
format_string.push_str(literal_text);
}
ast::FStringPart::FString(f) => {
for element in &f.elements {
match element {
InterpolatedStringElement::Literal(lit) => {
// If the literal text contains a '%' placeholder, bail out: mixing
// f-string interpolation with '%' placeholders is ambiguous for our
// automatic conversion, so don't offer a fix for this case.
if lit.value.as_ref().contains('%') {
return;
}
format_string.push_str(lit.value.as_ref());
}
InterpolatedStringElement::Interpolation(interpolated) => {
if interpolated.format_spec.is_some()
|| !matches!(
interpolated.conversion,
ruff_python_ast::ConversionFlag::None
)
{
return;
}
match interpolated.expression.as_ref() {
Expr::Name(name) => {
format_string.push_str("%s");
args.push(name.id.as_str());
}
_ => return,
}
}
}
}
}
}
}
if args.is_empty() {
return;
}
let replacement = format!(
"{q}{format_string}{q}, {args}",
q = quote_str,
format_string = format_string,
args = args.join(", ")
);
let fix = Fix::safe_edit(Edit::range_replacement(replacement, msg.range()));
diagnostic.set_fix(fix);
}
/// Returns `true` if the attribute is a reserved attribute on the `logging` module's `LogRecord`
/// class.
fn is_reserved_attr(attr: &str) -> bool {
matches!(
attr,
"args"
| "asctime"
| "created"
| "exc_info"
| "exc_text"
| "filename"
| "funcName"
| "levelname"
| "levelno"
| "lineno"
| "module"
| "msecs"
| "message"
| "msg"
| "name"
| "pathname"
| "process"
| "processName"
| "relativeCreated"
| "stack_info"
| "thread"
| "threadName"
)
}
/// Check logging messages for violations.
fn check_msg(checker: &Checker, msg: &Expr, arguments: &Arguments, msg_pos: usize) {
match msg {
// Check for string concatenation and percent format.
Expr::BinOp(ast::ExprBinOp { op, .. }) => match op {
Operator::Add => {
checker.report_diagnostic_if_enabled(LoggingStringConcat, msg.range());
}
Operator::Mod => {
checker.report_diagnostic_if_enabled(LoggingPercentFormat, msg.range());
}
_ => {}
},
// Check for f-strings.
Expr::FString(f_string) => {
if checker.is_rule_enabled(Rule::LoggingFString) {
logging_f_string(checker, msg, f_string, arguments, msg_pos);
}
}
// Check for .format() calls.
Expr::Call(ast::ExprCall { func, .. }) => {
if checker.is_rule_enabled(Rule::LoggingStringFormat) {
if let Expr::Attribute(ast::ExprAttribute { value, attr, .. }) = func.as_ref() {
if attr == "format" && value.is_literal_expr() {
checker.report_diagnostic(LoggingStringFormat, msg.range());
}
}
}
}
_ => {}
}
}
/// Check contents of the `extra` argument to logging calls.
fn check_log_record_attr_clash(checker: &Checker, extra: &Keyword) {
match &extra.value {
Expr::Dict(dict) => {
for invalid_key in dict.iter_keys().filter_map(|key| {
let string_key = key?.as_string_literal_expr()?;
if is_reserved_attr(string_key.value.to_str()) {
Some(string_key)
} else {
None
}
}) {
checker.report_diagnostic(
LoggingExtraAttrClash(invalid_key.value.to_string()),
invalid_key.range(),
);
}
}
Expr::Call(ast::ExprCall {
func,
arguments: Arguments { keywords, .. },
..
}) => {
if checker.semantic().match_builtin_expr(func, "dict") {
for keyword in keywords {
if let Some(attr) = &keyword.arg {
if is_reserved_attr(attr) {
checker.report_diagnostic(
LoggingExtraAttrClash(attr.to_string()),
keyword.range(),
);
}
}
}
}
}
_ => {}
}
}
#[derive(Debug, Copy, Clone)]
pub(crate) enum LoggingCallType {
/// Logging call with a level method, e.g., `logging.info`.
LevelCall(LoggingLevel),
/// Logging call with an integer level as an argument, e.g., `logger.log(level, ...)`.
LogCall,
}
impl LoggingCallType {
fn from_attribute(attr: &str) -> Option<Self> {
if attr == "log" {
Some(LoggingCallType::LogCall)
} else {
LoggingLevel::from_attribute(attr).map(LoggingCallType::LevelCall)
}
}
}
pub(crate) fn find_logging_call(
checker: &Checker,
call: &ast::ExprCall,
) -> Option<(LoggingCallType, TextRange)> {
// Determine the call type (e.g., `info` vs. `exception`) and the range of the attribute.
match call.func.as_ref() {
Expr::Attribute(ast::ExprAttribute { value: _, attr, .. }) => {
let call_type = LoggingCallType::from_attribute(attr.as_str())?;
if !logging::is_logger_candidate(
&call.func,
checker.semantic(),
&checker.settings().logger_objects,
) {
return None;
}
Some((call_type, attr.range()))
}
Expr::Name(_) => {
let qualified_name = checker
.semantic()
.resolve_qualified_name(call.func.as_ref())?;
let ["logging", attribute] = qualified_name.segments() else {
return None;
};
let call_type = LoggingCallType::from_attribute(attribute)?;
Some((call_type, call.func.range()))
}
_ => None,
}
}
/// Check logging calls for violations.
pub(crate) fn logging_call(checker: &Checker, call: &ast::ExprCall) {
let Some((logging_call_type, range)) = find_logging_call(checker, call) else {
return;
};
// G001, G002, G003, G004
let msg_pos = usize::from(matches!(logging_call_type, LoggingCallType::LogCall));
if let Some(format_arg) = call.arguments.find_argument_value("msg", msg_pos) {
check_msg(checker, format_arg, &call.arguments, msg_pos);
}
// G010
if checker.is_rule_enabled(Rule::LoggingWarn) {
if matches!(
logging_call_type,
LoggingCallType::LevelCall(LoggingLevel::Warn)
) {
let mut diagnostic = checker.report_diagnostic(LoggingWarn, range);
diagnostic.set_fix(Fix::safe_edit(Edit::range_replacement(
"warning".to_string(),
range,
)));
}
}
// G101
if checker.is_rule_enabled(Rule::LoggingExtraAttrClash) {
if let Some(extra) = call.arguments.find_keyword("extra") {
check_log_record_attr_clash(checker, extra);
}
}
// G201, G202
if checker.any_rule_enabled(&[Rule::LoggingExcInfo, Rule::LoggingRedundantExcInfo]) {
if !checker.semantic().in_exception_handler() {
return;
}
let Some(exc_info) = logging::exc_info(&call.arguments, checker.semantic()) else {
return;
};
if let LoggingCallType::LevelCall(logging_level) = logging_call_type {
match logging_level {
LoggingLevel::Error => {
checker.report_diagnostic_if_enabled(LoggingExcInfo, range);
}
LoggingLevel::Exception => {
checker.report_diagnostic_if_enabled(LoggingRedundantExcInfo, exc_info.range());
}
_ => {}
}
}
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/flake8_logging_format/rules/mod.rs | crates/ruff_linter/src/rules/flake8_logging_format/rules/mod.rs | pub(crate) use logging_call::*;
mod logging_call;
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/flake8_future_annotations/mod.rs | crates/ruff_linter/src/rules/flake8_future_annotations/mod.rs | //! Rules from [flake8-future-annotations](https://pypi.org/project/flake8-future-annotations/).
pub(crate) mod rules;
#[cfg(test)]
mod tests {
use std::path::Path;
use anyhow::Result;
use test_case::test_case;
use crate::registry::Rule;
use crate::settings::types::PreviewMode;
use crate::test::test_path;
use crate::{assert_diagnostics, settings};
use ruff_python_ast::PythonVersion;
#[test_case(Path::new("edge_case.py"))]
#[test_case(Path::new("from_typing_import.py"))]
#[test_case(Path::new("from_typing_import_many.py"))]
#[test_case(Path::new("import_typing.py"))]
#[test_case(Path::new("import_typing_as.py"))]
#[test_case(Path::new("no_future_import_uses_lowercase.py"))]
#[test_case(Path::new("no_future_import_uses_union.py"))]
#[test_case(Path::new("no_future_import_uses_union_inner.py"))]
#[test_case(Path::new("ok_no_types.py"))]
#[test_case(Path::new("ok_non_simplifiable_types.py"))]
#[test_case(Path::new("ok_uses_future.py"))]
#[test_case(Path::new("ok_variable_name.py"))]
fn fa100(path: &Path) -> Result<()> {
let snapshot = path.to_string_lossy().into_owned();
let diagnostics = test_path(
Path::new("flake8_future_annotations").join(path).as_path(),
&settings::LinterSettings {
unresolved_target_version: PythonVersion::PY37.into(),
..settings::LinterSettings::for_rule(Rule::FutureRewritableTypeAnnotation)
},
)?;
assert_diagnostics!(snapshot, diagnostics);
Ok(())
}
#[test_case(Path::new("no_future_import_uses_lowercase.py"))]
#[test_case(Path::new("no_future_import_uses_preview_generics.py"))]
#[test_case(Path::new("no_future_import_uses_union.py"))]
#[test_case(Path::new("no_future_import_uses_union_inner.py"))]
#[test_case(Path::new("ok_no_types.py"))]
#[test_case(Path::new("ok_uses_future.py"))]
#[test_case(Path::new("ok_quoted_type.py"))]
fn fa102(path: &Path) -> Result<()> {
let snapshot = format!("fa102_{}", path.to_string_lossy());
let diagnostics = test_path(
Path::new("flake8_future_annotations").join(path).as_path(),
&settings::LinterSettings {
unresolved_target_version: PythonVersion::PY37.into(),
..settings::LinterSettings::for_rule(Rule::FutureRequiredTypeAnnotation)
},
)?;
assert_diagnostics!(snapshot, diagnostics);
Ok(())
}
#[test_case(Path::new("no_future_import_uses_preview_generics.py"))]
fn fa102_preview(path: &Path) -> Result<()> {
let snapshot = format!("fa102_preview_{}", path.to_string_lossy());
let diagnostics = test_path(
Path::new("flake8_future_annotations").join(path).as_path(),
&settings::LinterSettings {
unresolved_target_version: PythonVersion::PY37.into(),
preview: PreviewMode::Enabled,
..settings::LinterSettings::for_rule(Rule::FutureRequiredTypeAnnotation)
},
)?;
assert_diagnostics!(snapshot, diagnostics);
Ok(())
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/flake8_future_annotations/rules/future_rewritable_type_annotation.rs | crates/ruff_linter/src/rules/flake8_future_annotations/rules/future_rewritable_type_annotation.rs | use ruff_python_ast::Expr;
use ruff_macros::{ViolationMetadata, derive_message_formats};
use ruff_text_size::Ranged;
use crate::checkers::ast::Checker;
use crate::{AlwaysFixableViolation, Fix};
/// ## What it does
/// Checks for missing `from __future__ import annotations` imports upon
/// detecting type annotations that can be written more succinctly under
/// PEP 563.
///
/// ## Why is this bad?
/// PEP 585 enabled the use of a number of convenient type annotations, such as
/// `list[str]` instead of `List[str]`. However, these annotations are only
/// available on Python 3.9 and higher, _unless_ the `from __future__ import annotations`
/// import is present.
///
/// Similarly, PEP 604 enabled the use of the `|` operator for unions, such as
/// `str | None` instead of `Optional[str]`. However, these annotations are only
/// available on Python 3.10 and higher, _unless_ the `from __future__ import annotations`
/// import is present.
///
/// By adding the `__future__` import, the pyupgrade rules can automatically
/// migrate existing code to use the new syntax, even for older Python versions.
/// This rule thus pairs well with pyupgrade and with Ruff's pyupgrade rules.
///
/// This rule respects the [`target-version`] setting. For example, if your
/// project targets Python 3.10 and above, adding `from __future__ import annotations`
/// does not impact your ability to leverage PEP 604-style unions (e.g., to
/// convert `Optional[str]` to `str | None`). As such, this rule will only
/// flag such usages if your project targets Python 3.9 or below.
///
/// ## Example
///
/// ```python
/// from typing import List, Dict, Optional
///
///
/// def func(obj: Dict[str, Optional[int]]) -> None: ...
/// ```
///
/// Use instead:
///
/// ```python
/// from __future__ import annotations
///
/// from typing import List, Dict, Optional
///
///
/// def func(obj: Dict[str, Optional[int]]) -> None: ...
/// ```
///
/// After running the additional pyupgrade rules:
///
/// ```python
/// from __future__ import annotations
///
///
/// def func(obj: dict[str, int | None]) -> None: ...
/// ```
///
/// ## Fix safety
/// This rule's fix is marked as unsafe, as adding `from __future__ import annotations`
/// may change the semantics of the program.
///
/// ## Options
/// - `target-version`
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "v0.0.269")]
pub(crate) struct FutureRewritableTypeAnnotation {
name: String,
}
impl AlwaysFixableViolation for FutureRewritableTypeAnnotation {
#[derive_message_formats]
fn message(&self) -> String {
let FutureRewritableTypeAnnotation { name } = self;
format!("Add `from __future__ import annotations` to simplify `{name}`")
}
fn fix_title(&self) -> String {
"Add `from __future__ import annotations`".to_string()
}
}
/// FA100
pub(crate) fn future_rewritable_type_annotation(checker: &Checker, expr: &Expr) {
let name = checker
.semantic()
.resolve_qualified_name(expr)
.map(|binding| binding.to_string());
let Some(name) = name else { return };
checker
.report_diagnostic(FutureRewritableTypeAnnotation { name }, expr.range())
.set_fix(Fix::unsafe_edit(checker.importer().add_future_import()));
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/flake8_future_annotations/rules/mod.rs | crates/ruff_linter/src/rules/flake8_future_annotations/rules/mod.rs | pub(crate) use future_required_type_annotation::*;
pub(crate) use future_rewritable_type_annotation::*;
mod future_required_type_annotation;
mod future_rewritable_type_annotation;
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/flake8_future_annotations/rules/future_required_type_annotation.rs | crates/ruff_linter/src/rules/flake8_future_annotations/rules/future_required_type_annotation.rs | use std::fmt;
use ruff_macros::{ViolationMetadata, derive_message_formats};
use ruff_python_ast::Expr;
use ruff_text_size::Ranged;
use crate::checkers::ast::Checker;
use crate::{AlwaysFixableViolation, Fix};
/// ## What it does
/// Checks for uses of PEP 585- and PEP 604-style type annotations in Python
/// modules that lack the required `from __future__ import annotations` import
/// for compatibility with older Python versions.
///
/// ## Why is this bad?
/// Using PEP 585 and PEP 604 style annotations without a `from __future__ import
/// annotations` import will cause runtime errors on Python versions prior to
/// 3.9 and 3.10, respectively.
///
/// By adding the `__future__` import, the interpreter will no longer interpret
/// annotations at evaluation time, making the code compatible with both past
/// and future Python versions.
///
/// This rule respects the [`target-version`] setting. For example, if your
/// project targets Python 3.10 and above, adding `from __future__ import annotations`
/// does not impact your ability to leverage PEP 604-style unions (e.g., to
/// convert `Optional[str]` to `str | None`). As such, this rule will only
/// flag such usages if your project targets Python 3.9 or below.
///
/// ## Example
///
/// ```python
/// def func(obj: dict[str, int | None]) -> None: ...
/// ```
///
/// Use instead:
///
/// ```python
/// from __future__ import annotations
///
///
/// def func(obj: dict[str, int | None]) -> None: ...
/// ```
///
/// ## Fix safety
/// This rule's fix is marked as unsafe, as adding `from __future__ import annotations`
/// may change the semantics of the program.
///
/// ## Options
/// - `target-version`
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "v0.0.271")]
pub(crate) struct FutureRequiredTypeAnnotation {
reason: Reason,
}
#[derive(Debug, PartialEq, Eq)]
pub(crate) enum Reason {
/// The type annotation is written in PEP 585 style (e.g., `list[int]`).
PEP585,
/// The type annotation is written in PEP 604 style (e.g., `int | None`).
PEP604,
}
impl fmt::Display for Reason {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
match self {
Reason::PEP585 => fmt.write_str("PEP 585 collection"),
Reason::PEP604 => fmt.write_str("PEP 604 union"),
}
}
}
impl AlwaysFixableViolation for FutureRequiredTypeAnnotation {
#[derive_message_formats]
fn message(&self) -> String {
let FutureRequiredTypeAnnotation { reason } = self;
format!("Missing `from __future__ import annotations`, but uses {reason}")
}
fn fix_title(&self) -> String {
"Add `from __future__ import annotations`".to_string()
}
}
/// FA102
pub(crate) fn future_required_type_annotation(checker: &Checker, expr: &Expr, reason: Reason) {
checker
.report_diagnostic(FutureRequiredTypeAnnotation { reason }, expr.range())
.set_fix(Fix::unsafe_edit(checker.importer().add_future_import()));
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/flake8_pie/mod.rs | crates/ruff_linter/src/rules/flake8_pie/mod.rs | //! Rules from [flake8-pie](https://pypi.org/project/flake8-pie/).
pub(crate) mod rules;
#[cfg(test)]
mod tests {
use std::path::Path;
use anyhow::Result;
use test_case::test_case;
use crate::registry::Rule;
use crate::test::test_path;
use crate::{assert_diagnostics, settings};
#[test_case(Rule::DuplicateClassFieldDefinition, Path::new("PIE794.py"))]
#[test_case(Rule::UnnecessaryDictKwargs, Path::new("PIE804.py"))]
#[test_case(Rule::MultipleStartsEndsWith, Path::new("PIE810.py"))]
#[test_case(Rule::UnnecessaryRangeStart, Path::new("PIE808.py"))]
#[test_case(Rule::UnnecessaryPlaceholder, Path::new("PIE790.py"))]
#[test_case(Rule::UnnecessarySpread, Path::new("PIE800.py"))]
#[test_case(Rule::ReimplementedContainerBuiltin, Path::new("PIE807.py"))]
#[test_case(Rule::NonUniqueEnums, Path::new("PIE796.py"))]
#[test_case(Rule::NonUniqueEnums, Path::new("PIE796.pyi"))]
fn rules(rule_code: Rule, path: &Path) -> Result<()> {
let snapshot = format!("{}_{}", rule_code.noqa_code(), path.to_string_lossy());
let diagnostics = test_path(
Path::new("flake8_pie").join(path).as_path(),
&settings::LinterSettings::for_rule(rule_code),
)?;
assert_diagnostics!(snapshot, diagnostics);
Ok(())
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/flake8_pie/rules/multiple_starts_ends_with.rs | crates/ruff_linter/src/rules/flake8_pie/rules/multiple_starts_ends_with.rs | use std::collections::BTreeMap;
use std::iter;
use itertools::Either::{Left, Right};
use ruff_python_semantic::{SemanticModel, analyze};
use ruff_text_size::{Ranged, TextRange};
use ruff_python_ast::{self as ast, Arguments, BoolOp, Expr, ExprContext, Identifier};
use ruff_macros::{ViolationMetadata, derive_message_formats};
use crate::AlwaysFixableViolation;
use crate::checkers::ast::Checker;
use crate::{Edit, Fix};
/// ## What it does
/// Checks for `startswith` or `endswith` calls on the same value with
/// different prefixes or suffixes.
///
/// ## Why is this bad?
/// The `startswith` and `endswith` methods accept tuples of prefixes or
/// suffixes respectively. Passing a tuple of prefixes or suffixes is more
/// efficient and readable than calling the method multiple times.
///
/// ## Example
/// ```python
/// msg = "Hello, world!"
/// if msg.startswith("Hello") or msg.startswith("Hi"):
/// print("Greetings!")
/// ```
///
/// Use instead:
/// ```python
/// msg = "Hello, world!"
/// if msg.startswith(("Hello", "Hi")):
/// print("Greetings!")
/// ```
///
/// ## Fix safety
/// This rule's fix is unsafe, as in some cases, it will be unable to determine
/// whether the argument to an existing `.startswith` or `.endswith` call is a
/// tuple. For example, given `msg.startswith(x) or msg.startswith(y)`, if `x`
/// or `y` is a tuple, and the semantic model is unable to detect it as such,
/// the rule will suggest `msg.startswith((x, y))`, which will error at
/// runtime.
///
/// ## References
/// - [Python documentation: `str.startswith`](https://docs.python.org/3/library/stdtypes.html#str.startswith)
/// - [Python documentation: `str.endswith`](https://docs.python.org/3/library/stdtypes.html#str.endswith)
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "v0.0.243")]
pub(crate) struct MultipleStartsEndsWith {
attr: String,
}
impl AlwaysFixableViolation for MultipleStartsEndsWith {
#[derive_message_formats]
fn message(&self) -> String {
let MultipleStartsEndsWith { attr } = self;
format!("Call `{attr}` once with a `tuple`")
}
fn fix_title(&self) -> String {
let MultipleStartsEndsWith { attr } = self;
format!("Merge into a single `{attr}` call")
}
}
/// PIE810
pub(crate) fn multiple_starts_ends_with(checker: &Checker, expr: &Expr) {
let Expr::BoolOp(ast::ExprBoolOp {
op: BoolOp::Or,
values,
range: _,
node_index: _,
}) = expr
else {
return;
};
let mut duplicates = BTreeMap::new();
for (index, call) in values.iter().enumerate() {
let Expr::Call(ast::ExprCall {
func,
arguments:
Arguments {
args,
keywords,
range: _,
node_index: _,
},
range: _,
node_index: _,
}) = &call
else {
continue;
};
if !keywords.is_empty() {
continue;
}
let [arg] = &**args else {
continue;
};
let Expr::Attribute(ast::ExprAttribute { value, attr, .. }) = func.as_ref() else {
continue;
};
if attr != "startswith" && attr != "endswith" {
continue;
}
let Expr::Name(ast::ExprName { id: arg_name, .. }) = value.as_ref() else {
continue;
};
// If the argument is bound to a tuple, skip it, since we don't want to suggest
// `startswith((x, y))` where `x` or `y` are tuples. (Tuple literals are okay, since we
// inline them below.)
if is_bound_to_tuple(arg, checker.semantic()) {
continue;
}
duplicates
.entry((attr.as_str(), arg_name.as_str()))
.or_insert_with(Vec::new)
.push(index);
}
// Generate a `Diagnostic` for each duplicate.
for ((attr_name, arg_name), indices) in duplicates {
if indices.len() > 1 {
let mut diagnostic = checker.report_diagnostic(
MultipleStartsEndsWith {
attr: attr_name.to_string(),
},
expr.range(),
);
let words: Vec<&Expr> = indices
.iter()
.map(|index| &values[*index])
.map(|expr| {
let Expr::Call(ast::ExprCall {
func: _,
arguments:
Arguments {
args,
keywords: _,
range: _,
node_index: _,
},
range: _,
node_index: _,
}) = expr
else {
unreachable!(
"{}",
format!("Indices should only contain `{attr_name}` calls")
)
};
args.first()
.unwrap_or_else(|| panic!("`{attr_name}` should have one argument"))
})
.collect();
let node = Expr::Tuple(ast::ExprTuple {
elts: words
.iter()
.flat_map(|value| {
if let Expr::Tuple(tuple) = value {
Left(tuple.iter())
} else {
Right(iter::once(*value))
}
})
.cloned()
.collect(),
ctx: ExprContext::Load,
range: TextRange::default(),
node_index: ruff_python_ast::AtomicNodeIndex::NONE,
parenthesized: true,
});
let node1 = Expr::Name(ast::ExprName {
id: arg_name.into(),
ctx: ExprContext::Load,
range: TextRange::default(),
node_index: ruff_python_ast::AtomicNodeIndex::NONE,
});
let node2 = Expr::Attribute(ast::ExprAttribute {
value: Box::new(node1),
attr: Identifier::new(attr_name.to_string(), TextRange::default()),
ctx: ExprContext::Load,
range: TextRange::default(),
node_index: ruff_python_ast::AtomicNodeIndex::NONE,
});
let node3 = Expr::Call(ast::ExprCall {
func: Box::new(node2),
arguments: Arguments {
args: Box::from([node]),
keywords: Box::from([]),
range: TextRange::default(),
node_index: ruff_python_ast::AtomicNodeIndex::NONE,
},
range: TextRange::default(),
node_index: ruff_python_ast::AtomicNodeIndex::NONE,
});
let call = node3;
// Generate the combined `BoolOp`.
let mut call = Some(call);
let node = Expr::BoolOp(ast::ExprBoolOp {
op: BoolOp::Or,
values: values
.iter()
.enumerate()
.filter_map(|(index, elt)| {
if indices.contains(&index) {
std::mem::take(&mut call)
} else {
Some(elt.clone())
}
})
.collect(),
range: TextRange::default(),
node_index: ruff_python_ast::AtomicNodeIndex::NONE,
});
let bool_op = node;
diagnostic.set_fix(Fix::unsafe_edit(Edit::range_replacement(
checker.generator().expr(&bool_op),
expr.range(),
)));
}
}
}
/// Returns `true` if the expression definitively resolves to a tuple (e.g., `x` in `x = (1, 2)`).
fn is_bound_to_tuple(arg: &Expr, semantic: &SemanticModel) -> bool {
let Expr::Name(ast::ExprName { id, .. }) = arg else {
return false;
};
let Some(binding_id) = semantic.lookup_symbol(id.as_str()) else {
return false;
};
let binding = semantic.binding(binding_id);
analyze::typing::is_tuple(binding, semantic)
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/flake8_pie/rules/reimplemented_container_builtin.rs | crates/ruff_linter/src/rules/flake8_pie/rules/reimplemented_container_builtin.rs | use ruff_python_ast::{Expr, ExprLambda};
use ruff_macros::{ViolationMetadata, derive_message_formats};
use ruff_text_size::Ranged;
use crate::checkers::ast::Checker;
use crate::{Edit, Fix};
use crate::{FixAvailability, Violation};
/// ## What it does
/// Checks for lambdas that can be replaced with the `list` or `dict` builtins.
///
/// ## Why is this bad?
/// Using container builtins are more succinct and idiomatic than wrapping
/// the literal in a lambda.
///
/// ## Example
/// ```python
/// from dataclasses import dataclass, field
///
///
/// @dataclass
/// class Foo:
/// bar: list[int] = field(default_factory=lambda: [])
/// ```
///
/// Use instead:
/// ```python
/// from dataclasses import dataclass, field
///
///
/// @dataclass
/// class Foo:
/// bar: list[int] = field(default_factory=list)
/// baz: dict[str, int] = field(default_factory=dict)
/// ```
///
/// ## References
/// - [Python documentation: `list`](https://docs.python.org/3/library/functions.html#func-list)
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "v0.0.208")]
pub(crate) struct ReimplementedContainerBuiltin {
container: Container,
}
impl Violation for ReimplementedContainerBuiltin {
const FIX_AVAILABILITY: FixAvailability = FixAvailability::Sometimes;
#[derive_message_formats]
fn message(&self) -> String {
let Self { container } = self;
format!("Prefer `{container}` over useless lambda")
}
fn fix_title(&self) -> Option<String> {
let Self { container } = self;
Some(format!("Replace with `lambda` with `{container}`"))
}
}
/// PIE807
pub(crate) fn reimplemented_container_builtin(checker: &Checker, expr: &ExprLambda) {
let ExprLambda {
parameters,
body,
range: _,
node_index: _,
} = expr;
if parameters.is_some() {
return;
}
let container = match &**body {
Expr::List(list) if list.is_empty() => Container::List,
Expr::Dict(dict) if dict.is_empty() => Container::Dict,
_ => return,
};
let mut diagnostic =
checker.report_diagnostic(ReimplementedContainerBuiltin { container }, expr.range());
diagnostic.try_set_fix(|| {
let (import_edit, binding) = checker.importer().get_or_import_builtin_symbol(
container.as_str(),
expr.start(),
checker.semantic(),
)?;
let binding_edit = Edit::range_replacement(binding, expr.range());
Ok(Fix::safe_edits(binding_edit, import_edit))
});
}
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
enum Container {
List,
Dict,
}
impl Container {
const fn as_str(self) -> &'static str {
match self {
Container::List => "list",
Container::Dict => "dict",
}
}
}
impl std::fmt::Display for Container {
fn fmt(&self, fmt: &mut std::fmt::Formatter) -> std::fmt::Result {
fmt.write_str(self.as_str())
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/flake8_pie/rules/unnecessary_dict_kwargs.rs | crates/ruff_linter/src/rules/flake8_pie/rules/unnecessary_dict_kwargs.rs | use itertools::Itertools;
use rustc_hash::{FxBuildHasher, FxHashSet};
use ruff_macros::{ViolationMetadata, derive_message_formats};
use ruff_python_ast::token::parenthesized_range;
use ruff_python_ast::{self as ast, Expr};
use ruff_python_stdlib::identifiers::is_identifier;
use ruff_text_size::Ranged;
use crate::checkers::ast::Checker;
use crate::fix::edits::{Parentheses, remove_argument};
use crate::{Applicability, Edit, Fix, FixAvailability, Violation};
/// ## What it does
/// Checks for unnecessary `dict` kwargs.
///
/// ## Why is this bad?
/// If the `dict` keys are valid identifiers, they can be passed as keyword
/// arguments directly, without constructing unnecessary dictionary.
/// This also makes code more type-safe as type checkers often cannot
/// precisely verify dynamic keyword arguments.
///
/// ## Example
///
/// ```python
/// def foo(bar):
/// return bar + 1
///
///
/// print(foo(**{"bar": 2})) # prints 3
///
/// # No typing errors, but results in an exception at runtime.
/// print(foo(**{"bar": 2, "baz": 3}))
/// ```
///
/// Use instead:
///
/// ```python
/// def foo(bar):
/// return bar + 1
///
///
/// print(foo(bar=2)) # prints 3
///
/// # Typing error detected: No parameter named "baz".
/// print(foo(bar=2, baz=3))
/// ```
///
/// ## Fix safety
///
/// This rule's fix is marked as unsafe for dictionaries with comments interleaved between
/// the items, as comments may be removed.
///
/// For example, the fix would be marked as unsafe in the following case:
///
/// ```python
/// foo(
/// **{
/// # comment
/// "x": 1.0,
/// # comment
/// "y": 2.0,
/// }
/// )
/// ```
///
/// as this is converted to `foo(x=1.0, y=2.0)` without any of the comments.
///
/// ## References
/// - [Python documentation: Dictionary displays](https://docs.python.org/3/reference/expressions.html#dictionary-displays)
/// - [Python documentation: Calls](https://docs.python.org/3/reference/expressions.html#calls)
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "v0.0.231")]
pub(crate) struct UnnecessaryDictKwargs;
impl Violation for UnnecessaryDictKwargs {
const FIX_AVAILABILITY: FixAvailability = FixAvailability::Sometimes;
#[derive_message_formats]
fn message(&self) -> String {
"Unnecessary `dict` kwargs".to_string()
}
fn fix_title(&self) -> Option<String> {
Some("Remove unnecessary kwargs".to_string())
}
}
/// PIE804
pub(crate) fn unnecessary_dict_kwargs(checker: &Checker, call: &ast::ExprCall) {
let mut duplicate_keywords = None;
for keyword in &*call.arguments.keywords {
// keyword is a spread operator (indicated by None).
if keyword.arg.is_some() {
continue;
}
let Expr::Dict(dict) = &keyword.value else {
continue;
};
// Ex) `foo(**{**bar})`
if let [ast::DictItem { key: None, value }] = dict.items.as_slice() {
let edit = Edit::range_replacement(
format!("**{}", checker.locator().slice(value)),
keyword.range(),
);
checker
.report_diagnostic(UnnecessaryDictKwargs, keyword.range())
.set_fix(Fix::safe_edit(edit));
continue;
}
// Ensure that every keyword is a valid keyword argument (e.g., avoid errors for cases like
// `foo(**{"bar-bar": 1})`).
let kwargs: Vec<&str> = dict
.iter_keys()
.filter_map(|key| key.and_then(as_kwarg))
.collect();
if kwargs.len() != dict.len() {
continue;
}
let mut diagnostic = checker.report_diagnostic(UnnecessaryDictKwargs, keyword.range());
if dict.is_empty() {
diagnostic.try_set_fix(|| {
remove_argument(
keyword,
&call.arguments,
Parentheses::Preserve,
checker.source(),
checker.tokens(),
)
.map(Fix::safe_edit)
});
} else {
// Compute the set of duplicate keywords (lazily).
if duplicate_keywords.is_none() {
duplicate_keywords = Some(duplicates(call));
}
// Avoid fixing if doing so could introduce a duplicate keyword argument.
if let Some(duplicate_keywords) = duplicate_keywords.as_ref() {
if kwargs
.iter()
.all(|kwarg| !duplicate_keywords.contains(kwarg))
{
let edit = Edit::range_replacement(
kwargs
.iter()
.zip(dict.iter_values())
.map(|(kwarg, value)| {
format!(
"{}={}",
kwarg,
checker.locator().slice(
parenthesized_range(
value.into(),
dict.into(),
checker.tokens()
)
.unwrap_or(value.range())
)
)
})
.join(", "),
keyword.range(),
);
diagnostic.set_fix(Fix::applicable_edit(
edit,
if checker.comment_ranges().intersects(dict.range()) {
Applicability::Unsafe
} else {
Applicability::Safe
},
));
}
}
}
}
}
/// Determine the set of keywords that appear in multiple positions (either directly, as in
/// `func(x=1)`, or indirectly, as in `func(**{"x": 1})`).
fn duplicates(call: &ast::ExprCall) -> FxHashSet<&str> {
let mut seen =
FxHashSet::with_capacity_and_hasher(call.arguments.keywords.len(), FxBuildHasher);
let mut duplicates =
FxHashSet::with_capacity_and_hasher(call.arguments.keywords.len(), FxBuildHasher);
for keyword in &*call.arguments.keywords {
if let Some(name) = &keyword.arg {
if !seen.insert(name.as_str()) {
duplicates.insert(name.as_str());
}
} else if let Expr::Dict(dict) = &keyword.value {
for key in dict.iter_keys() {
if let Some(name) = key.and_then(as_kwarg) {
if !seen.insert(name) {
duplicates.insert(name);
}
}
}
}
}
duplicates
}
/// Return `Some` if a key is a valid keyword argument name, or `None` otherwise.
fn as_kwarg(key: &Expr) -> Option<&str> {
if let Expr::StringLiteral(ast::ExprStringLiteral { value, .. }) = key {
if is_identifier(value.to_str()) {
return Some(value.to_str());
}
}
None
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/flake8_pie/rules/unnecessary_placeholder.rs | crates/ruff_linter/src/rules/flake8_pie/rules/unnecessary_placeholder.rs | use ruff_macros::{ViolationMetadata, derive_message_formats};
use ruff_python_ast::whitespace::trailing_comment_start_offset;
use ruff_python_ast::{Expr, ExprStringLiteral, Stmt, StmtExpr};
use ruff_text_size::Ranged;
use crate::checkers::ast::Checker;
use crate::fix;
use crate::{AlwaysFixableViolation, Applicability};
use crate::{Edit, Fix};
/// ## What it does
/// Checks for unnecessary `pass` statements and ellipsis (`...`) literals in
/// functions, classes, and other blocks.
///
/// ## Why is this bad?
/// In Python, the `pass` statement and ellipsis (`...`) literal serve as
/// placeholders, allowing for syntactically correct empty code blocks. The
/// primary purpose of these nodes is to avoid syntax errors in situations
/// where a statement or expression is syntactically required, but no code
/// needs to be executed.
///
/// If a `pass` or ellipsis is present in a code block that includes at least
/// one other statement (even, e.g., a docstring), it is unnecessary and should
/// be removed.
///
/// ## Example
/// ```python
/// def func():
/// """Placeholder docstring."""
/// pass
/// ```
///
/// Use instead:
/// ```python
/// def func():
/// """Placeholder docstring."""
/// ```
///
/// Or, given:
/// ```python
/// def func():
/// """Placeholder docstring."""
/// ...
/// ```
///
/// Use instead:
/// ```python
/// def func():
/// """Placeholder docstring."""
/// ```
///
/// ## Fix safety
/// This rule's fix is marked as unsafe in the rare case that the `pass` or ellipsis
/// is followed by a string literal, since removal of the placeholder would convert the
/// subsequent string literal into a docstring.
///
/// ## References
/// - [Python documentation: The `pass` statement](https://docs.python.org/3/reference/simple_stmts.html#the-pass-statement)
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "v0.0.208")]
pub(crate) struct UnnecessaryPlaceholder {
kind: Placeholder,
}
impl AlwaysFixableViolation for UnnecessaryPlaceholder {
#[derive_message_formats]
fn message(&self) -> String {
match &self.kind {
Placeholder::Pass => "Unnecessary `pass` statement".to_string(),
Placeholder::Ellipsis => "Unnecessary `...` literal".to_string(),
}
}
fn fix_title(&self) -> String {
let title = match &self.kind {
Placeholder::Pass => "Remove unnecessary `pass`",
Placeholder::Ellipsis => "Remove unnecessary `...`",
};
title.to_string()
}
}
/// PIE790
pub(crate) fn unnecessary_placeholder(checker: &Checker, body: &[Stmt]) {
if body.len() < 2 {
return;
}
for (index, stmt) in body.iter().enumerate() {
let kind = match stmt {
Stmt::Pass(_) => Placeholder::Pass,
Stmt::Expr(expr) if expr.value.is_ellipsis_literal_expr() => {
// In a type-checking block, a trailing ellipsis might be meaningful.
// A user might be using the type-checking context to declare a stub.
if checker.semantic().in_type_checking_block() {
return;
}
// Ellipses are significant in protocol methods and abstract methods.
// Specifically, Pyright uses the presence of an ellipsis to indicate that
// a method is a stub, rather than a default implementation.
if checker.semantic().in_protocol_or_abstract_method() {
return;
}
Placeholder::Ellipsis
}
_ => continue,
};
let next_stmt = body.get(index + 1);
add_diagnostic(checker, stmt, next_stmt, kind);
}
}
/// Add a diagnostic for the given statement.
fn add_diagnostic(
checker: &Checker,
stmt: &Stmt,
next_stmt: Option<&Stmt>,
placeholder_kind: Placeholder,
) {
let edit = if let Some(index) = trailing_comment_start_offset(stmt, checker.source()) {
Edit::range_deletion(stmt.range().add_end(index))
} else {
fix::edits::delete_stmt(stmt, None, checker.locator(), checker.indexer())
};
let applicability = match next_stmt {
// Mark the fix as unsafe if the following statement is a string literal,
// as it will become the module/class/function's docstring after the fix.
Some(Stmt::Expr(StmtExpr { value, .. })) => match value.as_ref() {
Expr::StringLiteral(ExprStringLiteral { .. }) => Applicability::Unsafe,
_ => Applicability::Safe,
},
_ => Applicability::Safe,
};
let isolation_level = Checker::isolation(checker.semantic().current_statement_id());
let fix = Fix::applicable_edit(edit, applicability).isolate(isolation_level);
checker
.report_diagnostic(
UnnecessaryPlaceholder {
kind: placeholder_kind,
},
stmt.range(),
)
.set_fix(fix);
}
#[derive(Debug, PartialEq, Eq)]
enum Placeholder {
Pass,
Ellipsis,
}
impl std::fmt::Display for Placeholder {
fn fmt(&self, fmt: &mut std::fmt::Formatter) -> std::fmt::Result {
match self {
Self::Pass => fmt.write_str("pass"),
Self::Ellipsis => fmt.write_str("..."),
}
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/flake8_pie/rules/mod.rs | crates/ruff_linter/src/rules/flake8_pie/rules/mod.rs | pub(crate) use duplicate_class_field_definition::*;
pub(crate) use multiple_starts_ends_with::*;
pub(crate) use non_unique_enums::*;
pub(crate) use reimplemented_container_builtin::*;
pub(crate) use unnecessary_dict_kwargs::*;
pub(crate) use unnecessary_placeholder::*;
pub(crate) use unnecessary_range_start::*;
pub(crate) use unnecessary_spread::*;
mod duplicate_class_field_definition;
mod multiple_starts_ends_with;
mod non_unique_enums;
mod reimplemented_container_builtin;
mod unnecessary_dict_kwargs;
mod unnecessary_placeholder;
mod unnecessary_range_start;
mod unnecessary_spread;
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/flake8_pie/rules/non_unique_enums.rs | crates/ruff_linter/src/rules/flake8_pie/rules/non_unique_enums.rs | use ruff_python_semantic::SemanticModel;
use rustc_hash::FxHashSet;
use ruff_macros::{ViolationMetadata, derive_message_formats};
use ruff_python_ast::comparable::ComparableExpr;
use ruff_python_ast::{self as ast, Expr, ExprCall, Stmt};
use ruff_text_size::Ranged;
use crate::Violation;
use crate::checkers::ast::Checker;
/// ## What it does
/// Checks for enums that contain duplicate values.
///
/// ## Why is this bad?
/// Enum values should be unique. Non-unique values are redundant and likely a
/// mistake.
///
/// ## Example
/// ```python
/// from enum import Enum
///
///
/// class Foo(Enum):
/// A = 1
/// B = 2
/// C = 1
/// ```
///
/// Use instead:
/// ```python
/// from enum import Enum
///
///
/// class Foo(Enum):
/// A = 1
/// B = 2
/// C = 3
/// ```
///
/// ## References
/// - [Python documentation: `enum.Enum`](https://docs.python.org/3/library/enum.html#enum.Enum)
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "v0.0.224")]
pub(crate) struct NonUniqueEnums {
value: String,
}
impl Violation for NonUniqueEnums {
#[derive_message_formats]
fn message(&self) -> String {
let NonUniqueEnums { value } = self;
format!("Enum contains duplicate value: `{value}`")
}
}
/// PIE796
pub(crate) fn non_unique_enums(checker: &Checker, parent: &Stmt, body: &[Stmt]) {
let semantic = checker.semantic();
let Stmt::ClassDef(parent) = parent else {
return;
};
if !parent.bases().iter().any(|expr| {
semantic
.resolve_qualified_name(expr)
.is_some_and(|qualified_name| matches!(qualified_name.segments(), ["enum", "Enum"]))
}) {
return;
}
let mut seen_targets: FxHashSet<ComparableExpr> = FxHashSet::default();
for stmt in body {
let Stmt::Assign(ast::StmtAssign { value, .. }) = stmt else {
continue;
};
if is_call_to_enum_auto(semantic, value) {
continue;
} else if let Expr::Tuple(ast::ExprTuple { elts, .. }) = value.as_ref() {
if elts.iter().any(|elt| is_call_to_enum_auto(semantic, elt)) {
continue;
}
}
if checker.source_type.is_stub() && member_has_unknown_value(semantic, value) {
continue;
}
let comparable = ComparableExpr::from(value);
if !seen_targets.insert(comparable) {
checker.report_diagnostic(
NonUniqueEnums {
value: checker.generator().expr(value),
},
stmt.range(),
);
}
}
}
fn is_call_to_enum_auto(semantic: &SemanticModel, expr: &Expr) -> bool {
expr.as_call_expr().is_some_and(|call| {
semantic
.resolve_qualified_name(&call.func)
.is_some_and(|qualified_name| matches!(qualified_name.segments(), ["enum", "auto"]))
})
}
/// Whether the value is a bare ellipsis literal (`A = ...`)
/// or a casted one (`A = cast(SomeType, ...)`).
fn member_has_unknown_value(semantic: &SemanticModel, expr: &Expr) -> bool {
match expr {
Expr::EllipsisLiteral(_) => true,
Expr::Call(ExprCall {
func, arguments, ..
}) => {
if !semantic.match_typing_expr(func, "cast") {
return false;
}
if !arguments.keywords.is_empty() {
return false;
}
matches!(arguments.args.as_ref(), [_, Expr::EllipsisLiteral(_)])
}
_ => false,
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/flake8_pie/rules/unnecessary_range_start.rs | crates/ruff_linter/src/rules/flake8_pie/rules/unnecessary_range_start.rs | use ruff_macros::{ViolationMetadata, derive_message_formats};
use ruff_python_ast::{self as ast, Expr};
use ruff_text_size::Ranged;
use crate::checkers::ast::Checker;
use crate::fix::edits::{Parentheses, remove_argument};
use crate::{AlwaysFixableViolation, Fix};
/// ## What it does
/// Checks for `range` calls with an unnecessary `start` argument.
///
/// ## Why is this bad?
/// `range(0, x)` is equivalent to `range(x)`, as `0` is the default value for
/// the `start` argument. Omitting the `start` argument makes the code more
/// concise and idiomatic.
///
/// ## Example
/// ```python
/// range(0, 3)
/// ```
///
/// Use instead:
/// ```python
/// range(3)
/// ```
///
/// ## References
/// - [Python documentation: `range`](https://docs.python.org/3/library/stdtypes.html#range)
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "v0.0.286")]
pub(crate) struct UnnecessaryRangeStart;
impl AlwaysFixableViolation for UnnecessaryRangeStart {
#[derive_message_formats]
fn message(&self) -> String {
"Unnecessary `start` argument in `range`".to_string()
}
fn fix_title(&self) -> String {
"Remove `start` argument".to_string()
}
}
/// PIE808
pub(crate) fn unnecessary_range_start(checker: &Checker, call: &ast::ExprCall) {
// `range` doesn't accept keyword arguments.
if !call.arguments.keywords.is_empty() {
return;
}
// Verify that the call has exactly two arguments (no `step`).
let [start, _] = &*call.arguments.args else {
return;
};
// Verify that the `start` argument is the literal `0`.
let Expr::NumberLiteral(ast::ExprNumberLiteral {
value: ast::Number::Int(value),
..
}) = start
else {
return;
};
if *value != 0 {
return;
}
// Verify that the call is to the `range` builtin.
if !checker.semantic().match_builtin_expr(&call.func, "range") {
return;
}
let mut diagnostic = checker.report_diagnostic(UnnecessaryRangeStart, start.range());
diagnostic.try_set_fix(|| {
remove_argument(
start,
&call.arguments,
Parentheses::Preserve,
checker.source(),
checker.tokens(),
)
.map(Fix::safe_edit)
});
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/flake8_pie/rules/unnecessary_spread.rs | crates/ruff_linter/src/rules/flake8_pie/rules/unnecessary_spread.rs | use ruff_macros::{ViolationMetadata, derive_message_formats};
use ruff_python_ast::token::{TokenKind, Tokens};
use ruff_python_ast::{self as ast, Expr};
use ruff_text_size::{Ranged, TextLen, TextSize};
use crate::checkers::ast::Checker;
use crate::{Edit, Fix, FixAvailability, Violation};
/// ## What it does
/// Checks for unnecessary dictionary unpacking operators (`**`).
///
/// ## Why is this bad?
/// Unpacking a dictionary into another dictionary is redundant. The unpacking
/// operator can be removed, making the code more readable.
///
/// ## Example
/// ```python
/// foo = {"A": 1, "B": 2}
/// bar = {**foo, **{"C": 3}}
/// ```
///
/// Use instead:
/// ```python
/// foo = {"A": 1, "B": 2}
/// bar = {**foo, "C": 3}
/// ```
///
/// ## References
/// - [Python documentation: Dictionary displays](https://docs.python.org/3/reference/expressions.html#dictionary-displays)
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "v0.0.231")]
pub(crate) struct UnnecessarySpread;
impl Violation for UnnecessarySpread {
const FIX_AVAILABILITY: FixAvailability = FixAvailability::Sometimes;
#[derive_message_formats]
fn message(&self) -> String {
"Unnecessary spread `**`".to_string()
}
fn fix_title(&self) -> Option<String> {
Some("Remove unnecessary dict".to_string())
}
}
/// PIE800
pub(crate) fn unnecessary_spread(checker: &Checker, dict: &ast::ExprDict) {
// The first "end" is the start of the dictionary, immediately following the open bracket.
let mut prev_end = dict.start() + TextSize::from(1);
for ast::DictItem { key, value } in dict {
if key.is_none() {
// We only care about when the key is None which indicates a spread `**`
// inside a dict.
if let Expr::Dict(inner) = value {
let mut diagnostic = checker.report_diagnostic(UnnecessarySpread, value.range());
if let Some(fix) = unnecessary_spread_fix(inner, prev_end, checker.tokens()) {
diagnostic.set_fix(fix);
}
}
}
prev_end = value.end();
}
}
/// Generate a [`Fix`] to remove an unnecessary dictionary spread.
fn unnecessary_spread_fix(
dict: &ast::ExprDict,
prev_end: TextSize,
tokens: &Tokens,
) -> Option<Fix> {
// Find the `**` token preceding the spread.
let doublestar = tokens
.after(prev_end)
.iter()
.find(|tok| matches!(tok.kind(), TokenKind::DoubleStar))?;
let (empty, last_value_end) = if let Some(last) = dict.iter_values().last() {
(false, last.end())
} else {
(true, dict.start() + "{".text_len())
};
let mut edits = vec![];
let mut open_parens: u32 = 0;
for tok in tokens.after(doublestar.end()) {
match tok.kind() {
kind if kind.is_trivia() => {}
TokenKind::Lpar => {
edits.push(Edit::range_deletion(tok.range()));
open_parens += 1;
}
TokenKind::Lbrace => {
edits.push(Edit::range_deletion(tok.range()));
break;
}
_ => {
// Unexpected token, bail
return None;
}
}
}
let mut found_r_curly = false;
let mut found_dict_comma = false;
for tok in tokens.after(last_value_end) {
if found_r_curly && open_parens == 0 && (!empty || found_dict_comma) {
break;
}
match tok.kind() {
kind if kind.is_trivia() => {}
TokenKind::Comma => {
edits.push(Edit::range_deletion(tok.range()));
if found_r_curly {
found_dict_comma = true;
}
}
TokenKind::Rpar => {
if found_r_curly {
edits.push(Edit::range_deletion(tok.range()));
open_parens -= 1;
}
}
TokenKind::Rbrace => {
if found_r_curly {
break;
}
edits.push(Edit::range_deletion(tok.range()));
found_r_curly = true;
}
_ => {
// Unexpected token, bail
return None;
}
}
}
Some(Fix::safe_edits(
Edit::range_deletion(doublestar.range()),
edits,
))
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/flake8_pie/rules/duplicate_class_field_definition.rs | crates/ruff_linter/src/rules/flake8_pie/rules/duplicate_class_field_definition.rs | use rustc_hash::FxHashSet;
use ruff_macros::{ViolationMetadata, derive_message_formats};
use ruff_python_ast::helpers::any_over_expr;
use ruff_python_ast::{self as ast, Expr, Stmt};
use ruff_text_size::Ranged;
use crate::checkers::ast::Checker;
use crate::fix;
use crate::{AlwaysFixableViolation, Fix};
/// ## What it does
/// Checks for duplicate field definitions in classes.
///
/// ## Why is this bad?
/// Defining a field multiple times in a class body is redundant and likely a
/// mistake.
///
/// ## Example
/// ```python
/// class Person:
/// name = Tom
/// ...
/// name = Ben
/// ```
///
/// Use instead:
/// ```python
/// class Person:
/// name = Tom
/// ...
/// ```
///
/// ## Fix safety
/// This fix is always marked as unsafe since we cannot know
/// for certain which assignment was intended.
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "v0.0.208")]
pub(crate) struct DuplicateClassFieldDefinition {
name: String,
}
impl AlwaysFixableViolation for DuplicateClassFieldDefinition {
#[derive_message_formats]
fn message(&self) -> String {
let DuplicateClassFieldDefinition { name } = self;
format!("Class field `{name}` is defined multiple times")
}
fn fix_title(&self) -> String {
let DuplicateClassFieldDefinition { name } = self;
format!("Remove duplicate field definition for `{name}`")
}
}
/// PIE794
pub(crate) fn duplicate_class_field_definition(checker: &Checker, body: &[Stmt]) {
let mut seen_targets: FxHashSet<&str> = FxHashSet::default();
for stmt in body {
// Extract the property name from the assignment statement.
let target = match stmt {
Stmt::Assign(ast::StmtAssign { targets, .. }) => {
if let [Expr::Name(id)] = targets.as_slice() {
id
} else {
continue;
}
}
Stmt::AnnAssign(ast::StmtAnnAssign { target, .. }) => {
if let Expr::Name(id) = target.as_ref() {
id
} else {
continue;
}
}
_ => continue,
};
// If this is an unrolled augmented assignment (e.g., `x = x + 1`), skip it.
match stmt {
Stmt::Assign(ast::StmtAssign { value, .. }) => {
if any_over_expr(value.as_ref(), &|expr| {
expr.as_name_expr().is_some_and(|name| name.id == target.id)
}) {
continue;
}
}
Stmt::AnnAssign(ast::StmtAnnAssign {
value: Some(value), ..
}) => {
if any_over_expr(value.as_ref(), &|expr| {
expr.as_name_expr().is_some_and(|name| name.id == target.id)
}) {
continue;
}
}
_ => continue,
}
if !seen_targets.insert(target.id.as_str()) {
let mut diagnostic = checker.report_diagnostic(
DuplicateClassFieldDefinition {
name: target.id.to_string(),
},
stmt.range(),
);
let edit =
fix::edits::delete_stmt(stmt, Some(stmt), checker.locator(), checker.indexer());
diagnostic.set_fix(Fix::unsafe_edit(edit).isolate(Checker::isolation(
checker.semantic().current_statement_id(),
)));
}
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/pylint/settings.rs | crates/ruff_linter/src/rules/pylint/settings.rs | //! Settings for the `pylint` plugin.
use rustc_hash::FxHashSet;
use serde::{Deserialize, Serialize};
use std::fmt;
use crate::display_settings;
use ruff_macros::CacheKey;
use ruff_python_ast::{ExprNumberLiteral, LiteralExpressionRef, Number};
#[derive(Debug, Copy, Clone, PartialEq, Eq, Serialize, Deserialize, CacheKey)]
#[serde(deny_unknown_fields, rename_all = "kebab-case")]
#[cfg_attr(feature = "schemars", derive(schemars::JsonSchema))]
pub enum ConstantType {
Bytes,
Complex,
Float,
Int,
Str,
}
impl ConstantType {
pub fn try_from_literal_expr(literal_expr: LiteralExpressionRef<'_>) -> Option<Self> {
match literal_expr {
LiteralExpressionRef::StringLiteral(_) => Some(Self::Str),
LiteralExpressionRef::BytesLiteral(_) => Some(Self::Bytes),
LiteralExpressionRef::NumberLiteral(ExprNumberLiteral { value, .. }) => match value {
Number::Int(_) => Some(Self::Int),
Number::Float(_) => Some(Self::Float),
Number::Complex { .. } => Some(Self::Complex),
},
LiteralExpressionRef::BooleanLiteral(_)
| LiteralExpressionRef::NoneLiteral(_)
| LiteralExpressionRef::EllipsisLiteral(_) => None,
}
}
}
impl fmt::Display for ConstantType {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
Self::Bytes => write!(f, "bytes"),
Self::Complex => write!(f, "complex"),
Self::Float => write!(f, "float"),
Self::Int => write!(f, "int"),
Self::Str => write!(f, "str"),
}
}
}
#[derive(Debug, Clone, CacheKey)]
pub struct Settings {
pub allow_magic_value_types: Vec<ConstantType>,
pub allow_dunder_method_names: FxHashSet<String>,
pub max_args: usize,
pub max_positional_args: usize,
pub max_returns: usize,
pub max_bool_expr: usize,
pub max_branches: usize,
pub max_statements: usize,
pub max_public_methods: usize,
pub max_locals: usize,
pub max_nested_blocks: usize,
}
impl Default for Settings {
fn default() -> Self {
Self {
allow_magic_value_types: vec![ConstantType::Str, ConstantType::Bytes],
allow_dunder_method_names: FxHashSet::default(),
max_args: 5,
max_positional_args: 5,
max_returns: 6,
max_bool_expr: 5,
max_branches: 12,
max_statements: 50,
max_public_methods: 20,
max_locals: 15,
max_nested_blocks: 5,
}
}
}
impl fmt::Display for Settings {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
display_settings! {
formatter = f,
namespace = "linter.pylint",
fields = [
self.allow_magic_value_types | array,
self.allow_dunder_method_names | set,
self.max_args,
self.max_positional_args,
self.max_returns,
self.max_bool_expr,
self.max_branches,
self.max_statements,
self.max_public_methods,
self.max_locals,
self.max_nested_blocks
]
}
Ok(())
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/pylint/helpers.rs | crates/ruff_linter/src/rules/pylint/helpers.rs | use ruff_python_ast as ast;
use ruff_python_ast::visitor::Visitor;
use ruff_python_ast::{Arguments, Expr, Stmt, visitor};
use ruff_python_semantic::analyze::function_type;
use ruff_python_semantic::{ScopeKind, SemanticModel};
use ruff_text_size::TextRange;
use crate::settings::LinterSettings;
/// Returns the value of the `name` parameter to, e.g., a `TypeVar` constructor.
pub(super) fn type_param_name(arguments: &Arguments) -> Option<&str> {
// Handle both `TypeVar("T")` and `TypeVar(name="T")`.
let name_param = arguments.find_argument_value("name", 0)?;
if let Expr::StringLiteral(ast::ExprStringLiteral { value, .. }) = &name_param {
Some(value.to_str())
} else {
None
}
}
pub(super) fn in_dunder_method(
dunder_name: &str,
semantic: &SemanticModel,
settings: &LinterSettings,
) -> bool {
let scope = semantic.current_scope();
let ScopeKind::Function(ast::StmtFunctionDef {
name,
decorator_list,
..
}) = scope.kind
else {
return false;
};
if name != dunder_name {
return false;
}
let Some(parent) = semantic.first_non_type_parent_scope(scope) else {
return false;
};
if !matches!(
function_type::classify(
name,
decorator_list,
parent,
semantic,
&settings.pep8_naming.classmethod_decorators,
&settings.pep8_naming.staticmethod_decorators,
),
function_type::FunctionType::Method
) {
return false;
}
true
}
/// Visitor to track reads from an iterable in a loop.
#[derive(Debug)]
pub(crate) struct SequenceIndexVisitor<'a> {
/// `letters`, given `for index, letter in enumerate(letters)`.
sequence_name: &'a str,
/// `index`, given `for index, letter in enumerate(letters)`.
index_name: &'a str,
/// `letter`, given `for index, letter in enumerate(letters)`.
value_name: &'a str,
/// The ranges of any `letters[index]` accesses.
accesses: Vec<TextRange>,
/// Whether any of the variables have been modified.
modified: bool,
}
impl<'a> SequenceIndexVisitor<'a> {
pub(crate) fn new(sequence_name: &'a str, index_name: &'a str, value_name: &'a str) -> Self {
Self {
sequence_name,
index_name,
value_name,
accesses: Vec::new(),
modified: false,
}
}
pub(crate) fn into_accesses(self) -> Vec<TextRange> {
self.accesses
}
}
impl SequenceIndexVisitor<'_> {
fn is_assignment(&self, expr: &Expr) -> bool {
// If we see the sequence, a subscript, or the index being modified, we'll stop emitting
// diagnostics.
match expr {
Expr::Name(ast::ExprName { id, .. }) => {
id == self.sequence_name || id == self.index_name || id == self.value_name
}
Expr::Subscript(ast::ExprSubscript { value, slice, .. }) => {
let Expr::Name(ast::ExprName { id, .. }) = value.as_ref() else {
return false;
};
if id == self.sequence_name {
let Expr::Name(ast::ExprName { id, .. }) = slice.as_ref() else {
return false;
};
if id == self.index_name {
return true;
}
}
false
}
_ => false,
}
}
}
impl Visitor<'_> for SequenceIndexVisitor<'_> {
fn visit_stmt(&mut self, stmt: &Stmt) {
if self.modified {
return;
}
match stmt {
Stmt::Assign(ast::StmtAssign { targets, value, .. }) => {
self.modified = targets.iter().any(|target| self.is_assignment(target));
self.visit_expr(value);
}
Stmt::AnnAssign(ast::StmtAnnAssign { target, value, .. }) => {
if let Some(value) = value {
self.modified = self.is_assignment(target);
self.visit_expr(value);
}
}
Stmt::AugAssign(ast::StmtAugAssign { target, value, .. }) => {
self.modified = self.is_assignment(target);
self.visit_expr(value);
}
Stmt::Delete(ast::StmtDelete { targets, .. }) => {
self.modified = targets.iter().any(|target| self.is_assignment(target));
}
_ => visitor::walk_stmt(self, stmt),
}
}
fn visit_expr(&mut self, expr: &Expr) {
if self.modified {
return;
}
if let Expr::Subscript(ast::ExprSubscript {
value,
slice,
range,
..
}) = expr
{
if let Expr::Name(ast::ExprName { id, .. }) = &**value {
if id == self.sequence_name {
if let Expr::Name(ast::ExprName { id, .. }) = &**slice {
if id == self.index_name {
self.accesses.push(*range);
}
}
}
}
}
visitor::walk_expr(self, expr);
}
}
pub(crate) fn is_dunder_operator_method(method: &str) -> bool {
matches!(
method,
"__lt__"
| "__le__"
| "__eq__"
| "__ne__"
| "__gt__"
| "__ge__"
| "__add__"
| "__sub__"
| "__mul__"
| "__matmul__"
| "__truediv__"
| "__floordiv__"
| "__mod__"
| "__divmod__"
| "__pow__"
| "__lshift__"
| "__rshift__"
| "__and__"
| "__xor__"
| "__or__"
| "__radd__"
| "__rsub__"
| "__rmul__"
| "__rmatmul__"
| "__rtruediv__"
| "__rfloordiv__"
| "__rmod__"
| "__rdivmod__"
| "__rpow__"
| "__rlshift__"
| "__rrshift__"
| "__rand__"
| "__rxor__"
| "__ror__"
| "__iadd__"
| "__isub__"
| "__imul__"
| "__imatmul__"
| "__itruediv__"
| "__ifloordiv__"
| "__imod__"
| "__ipow__"
| "__ilshift__"
| "__irshift__"
| "__iand__"
| "__ixor__"
| "__ior__"
)
}
/// Returns `true` if a method is a known dunder method.
pub(super) fn is_known_dunder_method(method: &str) -> bool {
is_dunder_operator_method(method)
|| matches!(
method,
"__abs__"
| "__aenter__"
| "__aexit__"
| "__aiter__"
| "__anext__"
| "__attrs_init__"
| "__attrs_post_init__"
| "__attrs_pre_init__"
| "__await__"
| "__bool__"
| "__buffer__"
| "__bytes__"
| "__call__"
| "__ceil__"
| "__class__"
| "__class_getitem__"
| "__complex__"
| "__contains__"
| "__copy__"
| "__deepcopy__"
| "__del__"
| "__delattr__"
| "__delete__"
| "__delitem__"
| "__dict__"
| "__dir__"
| "__doc__"
| "__enter__"
| "__exit__"
| "__float__"
| "__floor__"
| "__format__"
| "__fspath__"
| "__get__"
| "__getattr__"
| "__getattribute__"
| "__getitem__"
| "__getnewargs__"
| "__getnewargs_ex__"
| "__getstate__"
| "__hash__"
| "__html__"
| "__index__"
| "__init__"
| "__init_subclass__"
| "__instancecheck__"
| "__int__"
| "__invert__"
| "__iter__"
| "__len__"
| "__length_hint__"
| "__missing__"
| "__module__"
| "__mro_entries__"
| "__neg__"
| "__new__"
| "__next__"
| "__pos__"
| "__post_init__"
| "__prepare__"
| "__reduce__"
| "__reduce_ex__"
| "__release_buffer__"
| "__replace__"
| "__repr__"
| "__reversed__"
| "__round__"
| "__set__"
| "__set_name__"
| "__setattr__"
| "__setitem__"
| "__setstate__"
| "__sizeof__"
| "__str__"
| "__subclasscheck__"
| "__subclasses__"
| "__subclasshook__"
| "__trunc__"
| "__weakref__"
// Overridable sunder names from the `Enum` class.
// See: https://docs.python.org/3/library/enum.html#supported-sunder-names
| "_add_alias_"
| "_add_value_alias_"
| "_name_"
| "_value_"
| "_missing_"
| "_ignore_"
| "_order_"
| "_generate_next_value_"
)
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/pylint/mod.rs | crates/ruff_linter/src/rules/pylint/mod.rs | //! Rules from [Pylint](https://pypi.org/project/pylint/).
pub(crate) mod helpers;
pub(crate) mod rules;
pub mod settings;
#[cfg(test)]
mod tests {
use std::path::Path;
use anyhow::Result;
use regex::Regex;
use ruff_python_ast::PythonVersion;
use rustc_hash::FxHashSet;
use test_case::test_case;
use crate::registry::Rule;
use crate::rules::{flake8_tidy_imports, pylint};
use crate::settings::LinterSettings;
use crate::settings::types::PreviewMode;
use crate::test::test_path;
use crate::{assert_diagnostics, assert_diagnostics_diff};
#[test_case(Rule::SingledispatchMethod, Path::new("singledispatch_method.py"))]
#[test_case(
Rule::SingledispatchmethodFunction,
Path::new("singledispatchmethod_function.py")
)]
#[test_case(Rule::AssertOnStringLiteral, Path::new("assert_on_string_literal.py"))]
#[test_case(Rule::AwaitOutsideAsync, Path::new("await_outside_async.py"))]
#[test_case(Rule::AwaitOutsideAsync, Path::new("await_outside_async.ipynb"))]
#[test_case(Rule::BadOpenMode, Path::new("bad_open_mode.py"))]
#[test_case(
Rule::BadStringFormatCharacter,
Path::new("bad_string_format_character.py")
)]
#[test_case(Rule::BadStrStripCall, Path::new("bad_str_strip_call.py"))]
#[test_case(Rule::BadStringFormatType, Path::new("bad_string_format_type.py"))]
#[test_case(Rule::BidirectionalUnicode, Path::new("bidirectional_unicode.py"))]
#[test_case(Rule::BinaryOpException, Path::new("binary_op_exception.py"))]
#[test_case(
Rule::BooleanChainedComparison,
Path::new("boolean_chained_comparison.py")
)]
#[test_case(Rule::CollapsibleElseIf, Path::new("collapsible_else_if.py"))]
#[test_case(Rule::CompareToEmptyString, Path::new("compare_to_empty_string.py"))]
#[test_case(Rule::ComparisonOfConstant, Path::new("comparison_of_constant.py"))]
#[test_case(Rule::ComparisonWithItself, Path::new("comparison_with_itself.py"))]
#[test_case(Rule::EqWithoutHash, Path::new("eq_without_hash.py"))]
#[test_case(Rule::EmptyComment, Path::new("empty_comment.py"))]
#[test_case(Rule::EmptyComment, Path::new("empty_comment_line_continuation.py"))]
#[test_case(Rule::ManualFromImport, Path::new("import_aliasing.py"))]
#[test_case(Rule::IfStmtMinMax, Path::new("if_stmt_min_max.py"))]
#[test_case(Rule::SingleStringSlots, Path::new("single_string_slots.py"))]
#[test_case(Rule::StopIterationReturn, Path::new("stop_iteration_return.py"))]
#[test_case(Rule::SysExitAlias, Path::new("sys_exit_alias_0.py"))]
#[test_case(Rule::SysExitAlias, Path::new("sys_exit_alias_1.py"))]
#[test_case(Rule::SysExitAlias, Path::new("sys_exit_alias_2.py"))]
#[test_case(Rule::SysExitAlias, Path::new("sys_exit_alias_3.py"))]
#[test_case(Rule::SysExitAlias, Path::new("sys_exit_alias_4.py"))]
#[test_case(Rule::SysExitAlias, Path::new("sys_exit_alias_5.py"))]
#[test_case(Rule::SysExitAlias, Path::new("sys_exit_alias_6.py"))]
#[test_case(Rule::SysExitAlias, Path::new("sys_exit_alias_7.py"))]
#[test_case(Rule::SysExitAlias, Path::new("sys_exit_alias_8.py"))]
#[test_case(Rule::SysExitAlias, Path::new("sys_exit_alias_9.py"))]
#[test_case(Rule::SysExitAlias, Path::new("sys_exit_alias_10.py"))]
#[test_case(Rule::SysExitAlias, Path::new("sys_exit_alias_11.py"))]
#[test_case(Rule::SysExitAlias, Path::new("sys_exit_alias_12.py"))]
#[test_case(Rule::SysExitAlias, Path::new("sys_exit_alias_13.py"))]
#[test_case(Rule::SysExitAlias, Path::new("sys_exit_alias_14.py"))]
#[test_case(Rule::SysExitAlias, Path::new("sys_exit_alias_15.py"))]
#[test_case(Rule::SysExitAlias, Path::new("sys_exit_alias_16.py"))]
#[test_case(Rule::ContinueInFinally, Path::new("continue_in_finally.py"))]
#[test_case(Rule::GlobalStatement, Path::new("global_statement.py"))]
#[test_case(
Rule::GlobalVariableNotAssigned,
Path::new("global_variable_not_assigned.py")
)]
#[test_case(Rule::ImportOutsideTopLevel, Path::new("import_outside_top_level.py"))]
#[test_case(
Rule::ImportPrivateName,
Path::new("import_private_name/submodule/__main__.py")
)]
#[test_case(Rule::ImportSelf, Path::new("import_self/module.py"))]
#[test_case(Rule::InvalidAllFormat, Path::new("invalid_all_format.py"))]
#[test_case(Rule::InvalidAllObject, Path::new("invalid_all_object.py"))]
#[test_case(Rule::InvalidBoolReturnType, Path::new("invalid_return_type_bool.py"))]
#[test_case(
Rule::InvalidBytesReturnType,
Path::new("invalid_return_type_bytes.py")
)]
#[test_case(
Rule::InvalidIndexReturnType,
Path::new("invalid_return_type_index.py")
)]
#[test_case(Rule::InvalidHashReturnType, Path::new("invalid_return_type_hash.py"))]
#[test_case(
Rule::InvalidLengthReturnType,
Path::new("invalid_return_type_length.py")
)]
#[test_case(Rule::InvalidStrReturnType, Path::new("invalid_return_type_str.py"))]
#[test_case(Rule::DuplicateBases, Path::new("duplicate_bases.py"))]
#[test_case(Rule::InvalidCharacterBackspace, Path::new("invalid_characters.py"))]
#[test_case(Rule::InvalidCharacterEsc, Path::new("invalid_characters.py"))]
#[test_case(Rule::InvalidCharacterNul, Path::new("invalid_characters.py"))]
#[test_case(Rule::InvalidCharacterSub, Path::new("invalid_characters.py"))]
#[test_case(
Rule::InvalidCharacterZeroWidthSpace,
Path::new("invalid_characters.py")
)]
#[test_case(
Rule::InvalidCharacterBackspace,
Path::new("invalid_characters_syntax_error.py")
)]
#[test_case(Rule::ShallowCopyEnviron, Path::new("shallow_copy_environ.py"))]
#[test_case(Rule::InvalidEnvvarDefault, Path::new("invalid_envvar_default.py"))]
#[test_case(Rule::InvalidEnvvarValue, Path::new("invalid_envvar_value.py"))]
#[test_case(Rule::IterationOverSet, Path::new("iteration_over_set.py"))]
#[test_case(Rule::LoggingTooFewArgs, Path::new("logging_too_few_args.py"))]
#[test_case(Rule::LoggingTooManyArgs, Path::new("logging_too_many_args.py"))]
#[test_case(Rule::MagicValueComparison, Path::new("magic_value_comparison.py"))]
#[test_case(Rule::ModifiedIteratingSet, Path::new("modified_iterating_set.py"))]
#[test_case(
Rule::NamedExprWithoutContext,
Path::new("named_expr_without_context.py")
)]
#[test_case(Rule::NonlocalAndGlobal, Path::new("nonlocal_and_global.py"))]
#[test_case(
Rule::RedefinedSlotsInSubclass,
Path::new("redefined_slots_in_subclass.py")
)]
#[test_case(Rule::NonlocalWithoutBinding, Path::new("nonlocal_without_binding.py"))]
#[test_case(Rule::NonSlotAssignment, Path::new("non_slot_assignment.py"))]
#[test_case(Rule::PropertyWithParameters, Path::new("property_with_parameters.py"))]
#[test_case(Rule::RedeclaredAssignedName, Path::new("redeclared_assigned_name.py"))]
#[test_case(
Rule::RedefinedArgumentFromLocal,
Path::new("redefined_argument_from_local.py")
)]
#[test_case(Rule::RedefinedLoopName, Path::new("redefined_loop_name.py"))]
#[test_case(Rule::ReturnInInit, Path::new("return_in_init.py"))]
#[test_case(Rule::TooManyArguments, Path::new("too_many_arguments.py"))]
#[test_case(
Rule::TooManyPositionalArguments,
Path::new("too_many_positional_arguments.py")
)]
#[test_case(Rule::TooManyBranches, Path::new("too_many_branches.py"))]
#[test_case(
Rule::TooManyReturnStatements,
Path::new("too_many_return_statements.py")
)]
#[test_case(Rule::TooManyStatements, Path::new("too_many_statements.py"))]
#[test_case(Rule::TypeBivariance, Path::new("type_bivariance.py"))]
#[test_case(
Rule::TypeNameIncorrectVariance,
Path::new("type_name_incorrect_variance.py")
)]
#[test_case(Rule::TypeParamNameMismatch, Path::new("type_param_name_mismatch.py"))]
#[test_case(
Rule::UnexpectedSpecialMethodSignature,
Path::new("unexpected_special_method_signature.py")
)]
#[test_case(
Rule::UnnecessaryDirectLambdaCall,
Path::new("unnecessary_direct_lambda_call.py")
)]
#[test_case(
Rule::LoadBeforeGlobalDeclaration,
Path::new("load_before_global_declaration.py")
)]
#[test_case(Rule::UselessElseOnLoop, Path::new("useless_else_on_loop.py"))]
#[test_case(Rule::UselessImportAlias, Path::new("import_aliasing.py"))]
#[test_case(Rule::UselessImportAlias, Path::new("import_aliasing_2/__init__.py"))]
#[test_case(Rule::UselessReturn, Path::new("useless_return.py"))]
#[test_case(Rule::UselessWithLock, Path::new("useless_with_lock.py"))]
#[test_case(Rule::UnreachableCode, Path::new("unreachable.py"))]
#[test_case(
Rule::YieldFromInAsyncFunction,
Path::new("yield_from_in_async_function.py")
)]
#[test_case(Rule::YieldInInit, Path::new("yield_in_init.py"))]
#[test_case(Rule::NestedMinMax, Path::new("nested_min_max.py"))]
#[test_case(
Rule::RepeatedEqualityComparison,
Path::new("repeated_equality_comparison.py")
)]
#[test_case(Rule::SelfAssigningVariable, Path::new("self_assigning_variable.py"))]
#[test_case(
Rule::SubprocessPopenPreexecFn,
Path::new("subprocess_popen_preexec_fn.py")
)]
#[test_case(
Rule::SubprocessRunWithoutCheck,
Path::new("subprocess_run_without_check.py")
)]
#[test_case(Rule::UnspecifiedEncoding, Path::new("unspecified_encoding.py"))]
#[test_case(Rule::BadDunderMethodName, Path::new("bad_dunder_method_name.py"))]
#[test_case(Rule::NoSelfUse, Path::new("no_self_use.py"))]
#[test_case(Rule::MisplacedBareRaise, Path::new("misplaced_bare_raise.py"))]
#[test_case(Rule::LiteralMembership, Path::new("literal_membership.py"))]
#[test_case(Rule::GlobalAtModuleLevel, Path::new("global_at_module_level.py"))]
#[test_case(Rule::UnnecessaryLambda, Path::new("unnecessary_lambda.py"))]
#[test_case(Rule::NonAsciiImportName, Path::new("non_ascii_module_import.py"))]
#[test_case(Rule::NonAsciiName, Path::new("non_ascii_name.py"))]
#[test_case(
Rule::RepeatedKeywordArgument,
Path::new("repeated_keyword_argument.py")
)]
#[test_case(
Rule::UnnecessaryListIndexLookup,
Path::new("unnecessary_list_index_lookup.py")
)]
#[test_case(Rule::NoClassmethodDecorator, Path::new("no_method_decorator.py"))]
#[test_case(Rule::UnnecessaryDunderCall, Path::new("unnecessary_dunder_call.py"))]
#[test_case(Rule::NoStaticmethodDecorator, Path::new("no_method_decorator.py"))]
#[test_case(Rule::PotentialIndexError, Path::new("potential_index_error.py"))]
#[test_case(Rule::SuperWithoutBrackets, Path::new("super_without_brackets.py"))]
#[test_case(Rule::SelfOrClsAssignment, Path::new("self_or_cls_assignment.py"))]
#[test_case(Rule::TooManyNestedBlocks, Path::new("too_many_nested_blocks.py"))]
#[test_case(Rule::DictIndexMissingItems, Path::new("dict_index_missing_items.py"))]
#[test_case(Rule::DictIterMissingItems, Path::new("dict_iter_missing_items.py"))]
#[test_case(
Rule::UnnecessaryDictIndexLookup,
Path::new("unnecessary_dict_index_lookup.py")
)]
#[test_case(Rule::NonAugmentedAssignment, Path::new("non_augmented_assignment.py"))]
#[test_case(
Rule::UselessExceptionStatement,
Path::new("useless_exception_statement.py")
)]
#[test_case(Rule::NanComparison, Path::new("nan_comparison.py"))]
#[test_case(
Rule::BadStaticmethodArgument,
Path::new("bad_staticmethod_argument.py")
)]
#[test_case(Rule::LenTest, Path::new("len_as_condition.py"))]
#[test_case(Rule::MissingMaxsplitArg, Path::new("missing_maxsplit_arg.py"))]
fn rules(rule_code: Rule, path: &Path) -> Result<()> {
let snapshot = format!("{}_{}", rule_code.noqa_code(), path.to_string_lossy());
let diagnostics = test_path(
Path::new("pylint").join(path).as_path(),
&LinterSettings {
pylint: pylint::settings::Settings {
allow_dunder_method_names: FxHashSet::from_iter([
"__special_custom_magic__".to_string()
]),
..pylint::settings::Settings::default()
},
..LinterSettings::for_rule(rule_code)
},
)?;
assert_diagnostics!(snapshot, diagnostics);
Ok(())
}
#[test_case(
Rule::UselessExceptionStatement,
Path::new("useless_exception_statement.py")
)]
fn preview_rules(rule_code: Rule, path: &Path) -> Result<()> {
let snapshot = format!(
"preview__{}_{}",
rule_code.noqa_code(),
path.to_string_lossy()
);
assert_diagnostics_diff!(
snapshot,
Path::new("pylint").join(path).as_path(),
&LinterSettings {
preview: PreviewMode::Disabled,
..LinterSettings::for_rule(rule_code)
},
&LinterSettings {
preview: PreviewMode::Enabled,
..LinterSettings::for_rule(rule_code)
}
);
Ok(())
}
#[test]
fn continue_in_finally() -> Result<()> {
let diagnostics = test_path(
Path::new("pylint/continue_in_finally.py"),
&LinterSettings::for_rule(Rule::ContinueInFinally)
.with_target_version(PythonVersion::PY37),
)?;
assert_diagnostics!(diagnostics);
Ok(())
}
#[test]
fn allow_magic_value_types() -> Result<()> {
let diagnostics = test_path(
Path::new("pylint/magic_value_comparison.py"),
&LinterSettings {
pylint: pylint::settings::Settings {
allow_magic_value_types: vec![pylint::settings::ConstantType::Int],
..pylint::settings::Settings::default()
},
..LinterSettings::for_rule(Rule::MagicValueComparison)
},
)?;
assert_diagnostics!(diagnostics);
Ok(())
}
#[test]
fn max_args() -> Result<()> {
let diagnostics = test_path(
Path::new("pylint/too_many_arguments_params.py"),
&LinterSettings {
pylint: pylint::settings::Settings {
max_args: 4,
..pylint::settings::Settings::default()
},
..LinterSettings::for_rule(Rule::TooManyArguments)
},
)?;
assert_diagnostics!(diagnostics);
Ok(())
}
#[test]
fn max_args_with_dummy_variables() -> Result<()> {
let diagnostics = test_path(
Path::new("pylint/too_many_arguments_params.py"),
&LinterSettings {
dummy_variable_rgx: Regex::new(r"skip_.*").unwrap(),
..LinterSettings::for_rule(Rule::TooManyArguments)
},
)?;
assert_diagnostics!(diagnostics);
Ok(())
}
#[test]
fn max_positional_args() -> Result<()> {
let diagnostics = test_path(
Path::new("pylint/too_many_positional_params.py"),
&LinterSettings {
pylint: pylint::settings::Settings {
max_positional_args: 4,
..pylint::settings::Settings::default()
},
..LinterSettings::for_rule(Rule::TooManyPositionalArguments)
},
)?;
assert_diagnostics!(diagnostics);
Ok(())
}
#[test]
fn max_branches() -> Result<()> {
let diagnostics = test_path(
Path::new("pylint/too_many_branches_params.py"),
&LinterSettings {
pylint: pylint::settings::Settings {
max_branches: 1,
..pylint::settings::Settings::default()
},
..LinterSettings::for_rule(Rule::TooManyBranches)
},
)?;
assert_diagnostics!(diagnostics);
Ok(())
}
#[test]
fn max_boolean_expressions() -> Result<()> {
let diagnostics = test_path(
Path::new("pylint/too_many_boolean_expressions.py"),
&LinterSettings {
pylint: pylint::settings::Settings {
max_bool_expr: 5,
..pylint::settings::Settings::default()
},
..LinterSettings::for_rule(Rule::TooManyBooleanExpressions)
},
)?;
assert_diagnostics!(diagnostics);
Ok(())
}
#[test]
fn max_statements() -> Result<()> {
let diagnostics = test_path(
Path::new("pylint/too_many_statements_params.py"),
&LinterSettings {
pylint: pylint::settings::Settings {
max_statements: 1,
..pylint::settings::Settings::default()
},
..LinterSettings::for_rule(Rule::TooManyStatements)
},
)?;
assert_diagnostics!(diagnostics);
Ok(())
}
#[test]
fn max_return_statements() -> Result<()> {
let diagnostics = test_path(
Path::new("pylint/too_many_return_statements_params.py"),
&LinterSettings {
pylint: pylint::settings::Settings {
max_returns: 1,
..pylint::settings::Settings::default()
},
..LinterSettings::for_rule(Rule::TooManyReturnStatements)
},
)?;
assert_diagnostics!(diagnostics);
Ok(())
}
#[test]
fn too_many_public_methods() -> Result<()> {
let diagnostics = test_path(
Path::new("pylint/too_many_public_methods.py"),
&LinterSettings {
pylint: pylint::settings::Settings {
max_public_methods: 7,
..pylint::settings::Settings::default()
},
..LinterSettings::for_rules(vec![Rule::TooManyPublicMethods])
},
)?;
assert_diagnostics!(diagnostics);
Ok(())
}
#[test]
fn too_many_locals() -> Result<()> {
let diagnostics = test_path(
Path::new("pylint/too_many_locals.py"),
&LinterSettings {
pylint: pylint::settings::Settings {
max_locals: 15,
..pylint::settings::Settings::default()
},
..LinterSettings::for_rules(vec![Rule::TooManyLocals])
},
)?;
assert_diagnostics!(diagnostics);
Ok(())
}
#[test]
fn import_outside_top_level_with_banned() -> Result<()> {
let diagnostics = test_path(
Path::new("pylint/import_outside_top_level_with_banned.py"),
&LinterSettings {
preview: PreviewMode::Enabled,
flake8_tidy_imports: flake8_tidy_imports::settings::Settings {
banned_module_level_imports: vec![
"foo_banned".to_string(),
"pkg_banned".to_string(),
"pkg.bar_banned".to_string(),
],
..Default::default()
},
..LinterSettings::for_rules(vec![
Rule::BannedModuleLevelImports,
Rule::ImportOutsideTopLevel,
])
},
)?;
assert_diagnostics!(diagnostics);
Ok(())
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/pylint/rules/non_augmented_assignment.rs | crates/ruff_linter/src/rules/pylint/rules/non_augmented_assignment.rs | use ast::Expr;
use ruff_macros::{ViolationMetadata, derive_message_formats};
use ruff_python_ast as ast;
use ruff_python_ast::comparable::ComparableExpr;
use ruff_python_ast::token::parenthesized_range;
use ruff_python_ast::{ExprBinOp, ExprRef, Operator};
use ruff_text_size::{Ranged, TextRange};
use crate::checkers::ast::Checker;
use crate::{AlwaysFixableViolation, Edit, Fix};
/// ## What it does
/// Checks for assignments that can be replaced with augmented assignment
/// statements.
///
/// ## Why is this bad?
/// If the right-hand side of an assignment statement consists of a binary
/// operation in which one operand is the same as the assignment target,
/// it can be rewritten as an augmented assignment. For example, `x = x + 1`
/// can be rewritten as `x += 1`.
///
/// When performing such an operation, an augmented assignment is more concise
/// and idiomatic.
///
/// ## Known problems
/// In some cases, this rule will not detect assignments in which the target
/// is on the right-hand side of a binary operation (e.g., `x = y + x`, as
/// opposed to `x = x + y`), as such operations are not commutative for
/// certain data types, like strings.
///
/// For example, `x = "prefix-" + x` is not equivalent to `x += "prefix-"`,
/// while `x = 1 + x` is equivalent to `x += 1`.
///
/// If the type of the left-hand side cannot be trivially inferred, the rule
/// will ignore the assignment.
///
/// ## Example
/// ```python
/// x = x + 1
/// ```
///
/// Use instead:
/// ```python
/// x += 1
/// ```
///
/// ## Fix safety
/// This rule's fix is marked as unsafe, as augmented assignments have
/// different semantics when the target is a mutable data type, like a list or
/// dictionary.
///
/// For example, consider the following:
///
/// ```python
/// foo = [1]
/// bar = foo
/// foo = foo + [2]
/// assert (foo, bar) == ([1, 2], [1])
/// ```
///
/// If the assignment is replaced with an augmented assignment, the update
/// operation will apply to both `foo` and `bar`, as they refer to the same
/// object:
///
/// ```python
/// foo = [1]
/// bar = foo
/// foo += [2]
/// assert (foo, bar) == ([1, 2], [1, 2])
/// ```
#[derive(ViolationMetadata)]
#[violation_metadata(preview_since = "v0.3.7")]
pub(crate) struct NonAugmentedAssignment {
operator: AugmentedOperator,
}
impl AlwaysFixableViolation for NonAugmentedAssignment {
#[derive_message_formats]
fn message(&self) -> String {
let NonAugmentedAssignment { operator } = self;
format!("Use `{operator}` to perform an augmented assignment directly")
}
fn fix_title(&self) -> String {
"Replace with augmented assignment".to_string()
}
}
/// PLR6104
pub(crate) fn non_augmented_assignment(checker: &Checker, assign: &ast::StmtAssign) {
// Ignore multiple assignment targets.
let [target] = assign.targets.as_slice() else {
return;
};
// Match, e.g., `x = x + 1`.
let Expr::BinOp(value) = &*assign.value else {
return;
};
let operator = AugmentedOperator::from(value.op);
// Match, e.g., `x = x + 1`.
if ComparableExpr::from(target) == ComparableExpr::from(&value.left) {
let mut diagnostic =
checker.report_diagnostic(NonAugmentedAssignment { operator }, assign.range());
diagnostic.set_fix(Fix::unsafe_edit(augmented_assignment(
checker,
target,
operator,
&value.right,
value,
assign.range,
)));
return;
}
// If the operator is commutative, match, e.g., `x = 1 + x`, but limit such matches to primitive
// types.
if operator.is_commutative()
&& (value.left.is_number_literal_expr() || value.left.is_boolean_literal_expr())
&& ComparableExpr::from(target) == ComparableExpr::from(&value.right)
{
let mut diagnostic =
checker.report_diagnostic(NonAugmentedAssignment { operator }, assign.range());
diagnostic.set_fix(Fix::unsafe_edit(augmented_assignment(
checker,
target,
operator,
&value.left,
value,
assign.range,
)));
}
}
/// Generate a fix to convert an assignment statement to an augmented assignment.
///
/// For example, given `x = x + 1`, the fix would be `x += 1`.
fn augmented_assignment(
checker: &Checker,
target: &Expr,
operator: AugmentedOperator,
right_operand: &Expr,
original_expr: &ExprBinOp,
range: TextRange,
) -> Edit {
let locator = checker.locator();
let right_operand_ref = ExprRef::from(right_operand);
let parent = original_expr.into();
let tokens = checker.tokens();
let right_operand_range =
parenthesized_range(right_operand_ref, parent, tokens).unwrap_or(right_operand.range());
let right_operand_expr = locator.slice(right_operand_range);
let target_expr = locator.slice(target);
let new_content = format!("{target_expr} {operator} {right_operand_expr}");
Edit::range_replacement(new_content, range)
}
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
enum AugmentedOperator {
Add,
BitAnd,
BitOr,
BitXor,
Div,
FloorDiv,
LShift,
MatMult,
Mod,
Mult,
Pow,
RShift,
Sub,
}
impl AugmentedOperator {
/// Returns `true` if the operator is commutative.
fn is_commutative(self) -> bool {
matches!(
self,
Self::Add | Self::BitAnd | Self::BitOr | Self::BitXor | Self::Mult
)
}
}
impl From<Operator> for AugmentedOperator {
fn from(value: Operator) -> Self {
match value {
Operator::Add => Self::Add,
Operator::BitAnd => Self::BitAnd,
Operator::BitOr => Self::BitOr,
Operator::BitXor => Self::BitXor,
Operator::Div => Self::Div,
Operator::FloorDiv => Self::FloorDiv,
Operator::LShift => Self::LShift,
Operator::MatMult => Self::MatMult,
Operator::Mod => Self::Mod,
Operator::Mult => Self::Mult,
Operator::Pow => Self::Pow,
Operator::RShift => Self::RShift,
Operator::Sub => Self::Sub,
}
}
}
impl std::fmt::Display for AugmentedOperator {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
Self::Add => f.write_str("+="),
Self::BitAnd => f.write_str("&="),
Self::BitOr => f.write_str("|="),
Self::BitXor => f.write_str("^="),
Self::Div => f.write_str("/="),
Self::FloorDiv => f.write_str("//="),
Self::LShift => f.write_str("<<="),
Self::MatMult => f.write_str("@="),
Self::Mod => f.write_str("%="),
Self::Mult => f.write_str("*="),
Self::Pow => f.write_str("**="),
Self::RShift => f.write_str(">>="),
Self::Sub => f.write_str("-="),
}
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/pylint/rules/sys_exit_alias.rs | crates/ruff_linter/src/rules/pylint/rules/sys_exit_alias.rs | use ruff_macros::{ViolationMetadata, derive_message_formats};
use ruff_python_ast::ExprCall;
use ruff_text_size::Ranged;
use crate::checkers::ast::Checker;
use crate::importer::ImportRequest;
use crate::{Edit, Fix, FixAvailability, Violation};
/// ## What it does
/// Checks for uses of the `exit()` and `quit()`.
///
/// ## Why is this bad?
/// `exit` and `quit` come from the `site` module, which is typically imported
/// automatically during startup. However, it is not _guaranteed_ to be
/// imported, and so using these functions may result in a `NameError` at
/// runtime. Generally, these constants are intended to be used in an interactive
/// interpreter, and not in programs.
///
/// Prefer `sys.exit()`, as the `sys` module is guaranteed to exist in all
/// contexts.
///
/// ## Fix safety
/// This fix is always unsafe. When replacing `exit` or `quit` with `sys.exit`,
/// the behavior can change in the following ways:
///
/// 1. If the code runs in an environment where the `site` module is not imported
/// (e.g., with `python -S`), the original code would raise a `NameError`, while
/// the fixed code would execute normally.
///
/// 2. `site.exit` and `sys.exit` handle tuple arguments differently. `site.exit`
/// treats tuples as regular objects and always returns exit code 1, while `sys.exit`
/// interprets tuple contents to determine the exit code: an empty tuple () results in
/// exit code 0, and a single-element tuple like (2,) uses that element's value (2) as
/// the exit code.
///
/// ## Example
/// ```python
/// if __name__ == "__main__":
/// exit()
/// ```
///
/// Use instead:
/// ```python
/// import sys
///
/// if __name__ == "__main__":
/// sys.exit()
/// ```
///
/// ## References
/// - [Python documentation: Constants added by the `site` module](https://docs.python.org/3/library/constants.html#constants-added-by-the-site-module)
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "v0.0.156")]
pub(crate) struct SysExitAlias {
name: String,
}
impl Violation for SysExitAlias {
const FIX_AVAILABILITY: FixAvailability = FixAvailability::Sometimes;
#[derive_message_formats]
fn message(&self) -> String {
let SysExitAlias { name } = self;
format!("Use `sys.exit()` instead of `{name}`")
}
fn fix_title(&self) -> Option<String> {
let SysExitAlias { name } = self;
Some(format!("Replace `{name}` with `sys.exit()`"))
}
}
/// PLR1722
pub(crate) fn sys_exit_alias(checker: &Checker, call: &ExprCall) {
let Some(builtin) = checker.semantic().resolve_builtin_symbol(&call.func) else {
return;
};
if !matches!(builtin, "exit" | "quit") {
return;
}
let mut diagnostic = checker.report_diagnostic(
SysExitAlias {
name: builtin.to_string(),
},
call.func.range(),
);
let has_star_kwargs = call
.arguments
.keywords
.iter()
.any(|kwarg| kwarg.arg.is_none());
// only one optional argument allowed, and we can't convert **kwargs
if call.arguments.len() > 1 || has_star_kwargs {
return;
}
diagnostic.try_set_fix(|| {
let (import_edit, binding) = checker.importer().get_or_import_symbol(
&ImportRequest::import("sys", "exit"),
call.func.start(),
checker.semantic(),
)?;
let reference_edit = Edit::range_replacement(binding, call.func.range());
let mut edits = vec![reference_edit];
if let Some(kwarg) = call.arguments.find_keyword("code") {
edits.push(Edit::range_replacement(
checker.source()[kwarg.value.range()].to_string(),
kwarg.range,
));
}
Ok(Fix::unsafe_edits(import_edit, edits))
});
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/pylint/rules/invalid_all_object.rs | crates/ruff_linter/src/rules/pylint/rules/invalid_all_object.rs | use ruff_macros::{ViolationMetadata, derive_message_formats};
use ruff_python_semantic::Binding;
use ruff_text_size::Ranged;
use crate::{Violation, checkers::ast::Checker};
/// ## What it does
/// Checks for the inclusion of invalid objects in `__all__`.
///
/// ## Why is this bad?
/// In Python, `__all__` should contain a sequence of strings that represent
/// the names of all "public" symbols exported by a module.
///
/// Assigning anything other than a `tuple` or `list` of strings to `__all__`
/// is invalid.
///
/// ## Example
/// ```python
/// __all__ = [Foo, 1, None]
/// ```
///
/// Use instead:
/// ```python
/// __all__ = ["Foo", "Bar", "Baz"]
/// ```
///
/// ## References
/// - [Python documentation: The `import` statement](https://docs.python.org/3/reference/simple_stmts.html#the-import-statement)
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "v0.0.237")]
pub(crate) struct InvalidAllObject;
impl Violation for InvalidAllObject {
#[derive_message_formats]
fn message(&self) -> String {
"Invalid object in `__all__`, must contain only strings".to_string()
}
}
/// PLE0604
pub(crate) fn invalid_all_object(checker: &Checker, binding: &Binding) {
if binding.is_invalid_all_object() {
checker.report_diagnostic(InvalidAllObject, binding.range());
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/pylint/rules/comparison_with_itself.rs | crates/ruff_linter/src/rules/pylint/rules/comparison_with_itself.rs | use itertools::Itertools;
use ruff_macros::{ViolationMetadata, derive_message_formats};
use ruff_python_ast::{CmpOp, Expr};
use ruff_text_size::Ranged;
use crate::Violation;
use crate::checkers::ast::Checker;
use crate::fix::snippet::SourceCodeSnippet;
/// ## What it does
/// Checks for operations that compare a name to itself.
///
/// ## Why is this bad?
/// Comparing a name to itself always results in the same value, and is likely
/// a mistake.
///
/// ## Example
/// ```python
/// foo == foo
/// ```
///
/// In some cases, self-comparisons are used to determine whether a float is
/// NaN. Instead, prefer `math.isnan`:
/// ```python
/// import math
///
/// math.isnan(foo)
/// ```
///
/// ## References
/// - [Python documentation: Comparisons](https://docs.python.org/3/reference/expressions.html#comparisons)
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "v0.0.273")]
pub(crate) struct ComparisonWithItself {
actual: SourceCodeSnippet,
}
impl Violation for ComparisonWithItself {
#[derive_message_formats]
fn message(&self) -> String {
if let Some(actual) = self.actual.full_display() {
format!("Name compared with itself, consider replacing `{actual}`")
} else {
"Name compared with itself".to_string()
}
}
}
/// PLR0124
pub(crate) fn comparison_with_itself(
checker: &Checker,
left: &Expr,
ops: &[CmpOp],
comparators: &[Expr],
) {
for ((left, right), op) in std::iter::once(left)
.chain(comparators)
.tuple_windows()
.zip(ops)
{
match (left, right) {
// Ex) `foo == foo`
(Expr::Name(left_name), Expr::Name(right_name)) if left_name.id == right_name.id => {
let actual = format!(
"{} {} {}",
checker.locator().slice(left),
op,
checker.locator().slice(right)
);
checker.report_diagnostic(
ComparisonWithItself {
actual: SourceCodeSnippet::new(actual),
},
left_name.range(),
);
}
// Ex) `id(foo) == id(foo)`
(Expr::Call(left_call), Expr::Call(right_call)) => {
// Both calls must take a single argument, of the same name.
if !left_call.arguments.keywords.is_empty()
|| !right_call.arguments.keywords.is_empty()
{
continue;
}
let [Expr::Name(left_arg)] = &*left_call.arguments.args else {
continue;
};
let [Expr::Name(right_right)] = &*right_call.arguments.args else {
continue;
};
if left_arg.id != right_right.id {
continue;
}
// Both calls must be to the same function.
let semantic = checker.semantic();
let Some(left_name) = semantic.resolve_builtin_symbol(&left_call.func) else {
continue;
};
let Some(right_name) = semantic.resolve_builtin_symbol(&right_call.func) else {
continue;
};
if left_name != right_name {
continue;
}
// The call must be to pure function, like `id`.
if matches!(
left_name,
"id" | "len" | "type" | "int" | "bool" | "str" | "repr" | "bytes"
) {
let actual = format!(
"{} {} {}",
checker.locator().slice(left),
op,
checker.locator().slice(right)
);
checker.report_diagnostic(
ComparisonWithItself {
actual: SourceCodeSnippet::new(actual),
},
left_call.range(),
);
}
}
_ => {}
}
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/pylint/rules/nested_min_max.rs | crates/ruff_linter/src/rules/pylint/rules/nested_min_max.rs | use ruff_python_ast::{self as ast, Arguments, Expr, Keyword};
use ruff_text_size::{Ranged, TextRange};
use ruff_macros::{ViolationMetadata, derive_message_formats};
use ruff_python_semantic::SemanticModel;
use crate::checkers::ast::Checker;
use crate::{Edit, Fix, FixAvailability, Violation};
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
pub(crate) enum MinMax {
Min,
Max,
}
/// ## What it does
/// Checks for nested `min` and `max` calls.
///
/// ## Why is this bad?
/// Nested `min` and `max` calls can be flattened into a single call to improve
/// readability.
///
/// ## Example
///
/// ```python
/// minimum = min(1, 2, min(3, 4, 5))
/// maximum = max(1, 2, max(3, 4, 5))
/// diff = maximum - minimum
/// ```
///
/// Use instead:
///
/// ```python
/// minimum = min(1, 2, 3, 4, 5)
/// maximum = max(1, 2, 3, 4, 5)
/// diff = maximum - minimum
/// ```
///
/// ## Known issues
///
/// The resulting code may be slower and use more memory, especially for nested iterables. For
/// example, this code:
///
/// ```python
/// iterable = range(3)
/// min(1, min(iterable))
/// ```
///
/// will be fixed to:
///
/// ```python
/// iterable = range(3)
/// min(1, *iterable)
/// ```
///
/// At least on current versions of CPython, this allocates a collection for the whole iterable
/// before calling `min` and could cause performance regressions, at least for large iterables.
///
/// ## Fix safety
///
/// This fix is always unsafe and may change the program's behavior for types without full
/// equivalence relations, such as float comparisons involving `NaN`.
///
/// ```python
/// print(min(2.0, min(float("nan"), 1.0))) # before fix: 2.0
/// print(min(2.0, float("nan"), 1.0)) # after fix: 1.0
///
/// print(max(1.0, max(float("nan"), 2.0))) # before fix: 1.0
/// print(max(1.0, float("nan"), 2.0)) # after fix: 2.0
/// ```
///
/// The fix will also remove any comments within the outer call.
///
/// ## References
/// - [Python documentation: `min`](https://docs.python.org/3/library/functions.html#min)
/// - [Python documentation: `max`](https://docs.python.org/3/library/functions.html#max)
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "v0.0.266")]
pub(crate) struct NestedMinMax {
func: MinMax,
}
impl Violation for NestedMinMax {
const FIX_AVAILABILITY: FixAvailability = FixAvailability::Sometimes;
#[derive_message_formats]
fn message(&self) -> String {
let NestedMinMax { func } = self;
format!("Nested `{func}` calls can be flattened")
}
fn fix_title(&self) -> Option<String> {
let NestedMinMax { func } = self;
Some(format!("Flatten nested `{func}` calls"))
}
}
impl MinMax {
/// Converts a function call [`Expr`] into a [`MinMax`] if it is a call to `min` or `max`.
fn try_from_call(
func: &Expr,
keywords: &[Keyword],
semantic: &SemanticModel,
) -> Option<MinMax> {
if !keywords.is_empty() {
return None;
}
match semantic.resolve_builtin_symbol(func)? {
"min" => Some(Self::Min),
"max" => Some(Self::Max),
_ => None,
}
}
}
impl std::fmt::Display for MinMax {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
MinMax::Min => write!(f, "min"),
MinMax::Max => write!(f, "max"),
}
}
}
/// Collect a new set of arguments to by either accepting existing args as-is or
/// collecting child arguments, if it's a call to the same function.
fn collect_nested_args(min_max: MinMax, args: &[Expr], semantic: &SemanticModel) -> Vec<Expr> {
fn inner(min_max: MinMax, args: &[Expr], semantic: &SemanticModel, new_args: &mut Vec<Expr>) {
for arg in args {
if let Expr::Call(ast::ExprCall {
func,
arguments:
Arguments {
args,
keywords,
range: _,
node_index: _,
},
range: _,
node_index: _,
}) = arg
{
if MinMax::try_from_call(func, keywords, semantic) == Some(min_max) {
if let [arg] = &**args {
if arg.as_starred_expr().is_none() {
let new_arg = Expr::Starred(ast::ExprStarred {
value: Box::new(arg.clone()),
ctx: ast::ExprContext::Load,
range: TextRange::default(),
node_index: ruff_python_ast::AtomicNodeIndex::NONE,
});
new_args.push(new_arg);
continue;
}
}
inner(min_max, args, semantic, new_args);
continue;
}
}
new_args.push(arg.clone());
}
}
let mut new_args = Vec::with_capacity(args.len());
inner(min_max, args, semantic, &mut new_args);
new_args
}
/// PLW3301
pub(crate) fn nested_min_max(
checker: &Checker,
expr: &Expr,
func: &Expr,
args: &[Expr],
keywords: &[Keyword],
) {
let Some(min_max) = MinMax::try_from_call(func, keywords, checker.semantic()) else {
return;
};
// It's only safe to flatten nested calls if the outer call has more than one argument.
// When the outer call has a single argument, flattening would change the semantics by
// changing the shape of the call from treating the inner result as an iterable (or a scalar)
// to passing multiple arguments directly, which can lead to behavioral changes.
if args.len() < 2 {
return;
}
if args.iter().any(|arg| {
let Expr::Call(ast::ExprCall {
func,
arguments: Arguments { keywords, .. },
..
}) = arg
else {
return false;
};
MinMax::try_from_call(func.as_ref(), keywords.as_ref(), checker.semantic()) == Some(min_max)
}) {
let mut diagnostic =
checker.report_diagnostic(NestedMinMax { func: min_max }, expr.range());
let flattened_expr = Expr::Call(ast::ExprCall {
func: Box::new(func.clone()),
arguments: Arguments {
args: collect_nested_args(min_max, args, checker.semantic()).into_boxed_slice(),
keywords: Box::from(keywords),
range: TextRange::default(),
node_index: ruff_python_ast::AtomicNodeIndex::NONE,
},
range: TextRange::default(),
node_index: ruff_python_ast::AtomicNodeIndex::NONE,
});
diagnostic.set_fix(Fix::unsafe_edit(Edit::range_replacement(
checker.generator().expr(&flattened_expr),
expr.range(),
)));
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/pylint/rules/empty_comment.rs | crates/ruff_linter/src/rules/pylint/rules/empty_comment.rs | use ruff_macros::{ViolationMetadata, derive_message_formats};
use ruff_python_index::Indexer;
use ruff_python_trivia::{CommentRanges, is_python_whitespace};
use ruff_source_file::LineRanges;
use ruff_text_size::{TextRange, TextSize};
use crate::Locator;
use crate::checkers::ast::LintContext;
use crate::{Edit, Fix, FixAvailability, Violation};
/// ## What it does
/// Checks for a # symbol appearing on a line not followed by an actual comment.
///
/// ## Why is this bad?
/// Empty comments don't provide any clarity to the code, and just add clutter.
/// Either add a comment or delete the empty comment.
///
/// ## Example
/// ```python
/// class Foo: #
/// pass
/// ```
///
/// Use instead:
/// ```python
/// class Foo:
/// pass
/// ```
///
/// ## References
/// - [Pylint documentation](https://pylint.pycqa.org/en/latest/user_guide/messages/refactor/empty-comment.html)
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "0.5.0")]
pub(crate) struct EmptyComment;
impl Violation for EmptyComment {
const FIX_AVAILABILITY: FixAvailability = FixAvailability::Always;
#[derive_message_formats]
fn message(&self) -> String {
"Line with empty comment".to_string()
}
fn fix_title(&self) -> Option<String> {
Some("Delete the empty comment".to_string())
}
}
/// PLR2044
pub(crate) fn empty_comments(
context: &LintContext,
comment_ranges: &CommentRanges,
locator: &Locator,
indexer: &Indexer,
) {
let block_comments = comment_ranges.block_comments(locator.contents());
for range in comment_ranges {
// Ignore comments that are part of multi-line "comment blocks".
if block_comments.binary_search(&range.start()).is_ok() {
continue;
}
// If the line contains an empty comment, add a diagnostic.
empty_comment(context, range, locator, indexer);
}
}
/// Return a [`Diagnostic`] if the comment at the given [`TextRange`] is empty.
fn empty_comment(context: &LintContext, range: TextRange, locator: &Locator, indexer: &Indexer) {
// Check: is the comment empty?
if !locator
.slice(range)
.chars()
.skip(1)
.all(is_python_whitespace)
{
return;
}
// Find the location of the `#`.
let first_hash_col = range.start();
// Find the start of the line.
let line = locator.line_range(first_hash_col);
// Find the last character in the line that precedes the comment, if any.
let deletion_start_col = locator
.slice(TextRange::new(line.start(), first_hash_col))
.char_indices()
.rev()
.find_map(|(index, char)| {
if is_python_whitespace(char) || char == '#' {
None
} else {
// SAFETY: <= first_hash_col
Some(TextSize::try_from(index + char.len_utf8()).unwrap())
}
});
// If there is no character preceding the comment, this comment must be on its own physical line.
// If there is a line preceding the empty comment's line, check if it ends in a line continuation character. (`\`)
let is_on_same_logical_line = indexer
.preceded_by_continuations(first_hash_col, locator.contents())
.is_some();
if let Some(mut diagnostic) = context
.report_diagnostic_if_enabled(EmptyComment, TextRange::new(first_hash_col, line.end()))
{
diagnostic.set_fix(Fix::safe_edit(
if let Some(deletion_start_col) = deletion_start_col {
Edit::deletion(line.start() + deletion_start_col, line.end())
} else if is_on_same_logical_line {
Edit::deletion(first_hash_col, line.end())
} else {
Edit::range_deletion(locator.full_line_range(first_hash_col))
},
));
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/pylint/rules/singledispatch_method.rs | crates/ruff_linter/src/rules/pylint/rules/singledispatch_method.rs | use ruff_macros::{ViolationMetadata, derive_message_formats};
use ruff_python_ast as ast;
use ruff_python_semantic::Scope;
use ruff_python_semantic::analyze::function_type;
use ruff_text_size::Ranged;
use crate::checkers::ast::Checker;
use crate::importer::ImportRequest;
use crate::{Edit, Fix, FixAvailability, Violation};
/// ## What it does
/// Checks for methods decorated with `@singledispatch`.
///
/// ## Why is this bad?
/// The `@singledispatch` decorator is intended for use with functions, not methods.
///
/// Instead, use the `@singledispatchmethod` decorator, or migrate the method to a
/// standalone function.
///
/// ## Example
///
/// ```python
/// from functools import singledispatch
///
///
/// class Class:
/// @singledispatch
/// def method(self, arg): ...
/// ```
///
/// Use instead:
///
/// ```python
/// from functools import singledispatchmethod
///
///
/// class Class:
/// @singledispatchmethod
/// def method(self, arg): ...
/// ```
///
/// ## Fix safety
/// This rule's fix is marked as unsafe, as migrating from `@singledispatch` to
/// `@singledispatchmethod` may change the behavior of the code.
///
/// ## Options
///
/// This rule applies to regular, static, and class methods. You can customize how Ruff categorizes
/// methods with the following options:
///
/// - `lint.pep8-naming.classmethod-decorators`
/// - `lint.pep8-naming.staticmethod-decorators`
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "0.6.0")]
pub(crate) struct SingledispatchMethod;
impl Violation for SingledispatchMethod {
const FIX_AVAILABILITY: FixAvailability = FixAvailability::Sometimes;
#[derive_message_formats]
fn message(&self) -> String {
"`@singledispatch` decorator should not be used on methods".to_string()
}
fn fix_title(&self) -> Option<String> {
Some("Replace with `@singledispatchmethod`".to_string())
}
}
/// PLE1519
pub(crate) fn singledispatch_method(checker: &Checker, scope: &Scope) {
let Some(func) = scope.kind.as_function() else {
return;
};
let ast::StmtFunctionDef {
name,
decorator_list,
..
} = func;
let Some(parent) = checker.semantic().first_non_type_parent_scope(scope) else {
return;
};
let type_ = function_type::classify(
name,
decorator_list,
parent,
checker.semantic(),
&checker.settings().pep8_naming.classmethod_decorators,
&checker.settings().pep8_naming.staticmethod_decorators,
);
if !matches!(
type_,
function_type::FunctionType::Method
| function_type::FunctionType::ClassMethod
| function_type::FunctionType::StaticMethod
) {
return;
}
for decorator in decorator_list {
if checker
.semantic()
.resolve_qualified_name(&decorator.expression)
.is_some_and(|qualified_name| {
matches!(qualified_name.segments(), ["functools", "singledispatch"])
})
{
let mut diagnostic = checker.report_diagnostic(SingledispatchMethod, decorator.range());
diagnostic.try_set_fix(|| {
let (import_edit, binding) = checker.importer().get_or_import_symbol(
&ImportRequest::import("functools", "singledispatchmethod"),
decorator.start(),
checker.semantic(),
)?;
Ok(Fix::unsafe_edits(
Edit::range_replacement(binding, decorator.expression.range()),
[import_edit],
))
});
}
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/pylint/rules/self_assigning_variable.rs | crates/ruff_linter/src/rules/pylint/rules/self_assigning_variable.rs | use itertools::Itertools;
use ruff_python_ast::{self as ast, Expr};
use ruff_macros::{ViolationMetadata, derive_message_formats};
use ruff_text_size::Ranged;
use crate::Violation;
use crate::checkers::ast::Checker;
/// ## What it does
/// Checks for self-assignment of variables.
///
/// ## Why is this bad?
/// Self-assignment of variables is redundant and likely a mistake.
///
/// ## Example
/// ```python
/// country = "Poland"
/// country = country
/// ```
///
/// Use instead:
/// ```python
/// country = "Poland"
/// ```
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "v0.0.281")]
pub(crate) struct SelfAssigningVariable {
name: String,
}
impl Violation for SelfAssigningVariable {
#[derive_message_formats]
fn message(&self) -> String {
let SelfAssigningVariable { name } = self;
format!("Self-assignment of variable `{name}`")
}
}
/// PLW0127
pub(crate) fn self_assignment(checker: &Checker, assign: &ast::StmtAssign) {
// Assignments in class bodies are attributes (e.g., `x = x` assigns `x` to `self.x`, and thus
// is not a self-assignment).
if checker.semantic().current_scope().kind.is_class() {
return;
}
for (left, right) in assign
.targets
.iter()
.chain(std::iter::once(assign.value.as_ref()))
.tuple_combinations()
{
visit_assignments(checker, left, right);
}
}
/// PLW0127
pub(crate) fn self_annotated_assignment(checker: &Checker, assign: &ast::StmtAnnAssign) {
let Some(value) = assign.value.as_ref() else {
return;
};
// Assignments in class bodies are attributes (e.g., `x = x` assigns `x` to `self.x`, and thus
// is not a self-assignment).
if checker.semantic().current_scope().kind.is_class() {
return;
}
visit_assignments(checker, &assign.target, value);
}
fn visit_assignments(checker: &Checker, left: &Expr, right: &Expr) {
match (left, right) {
(Expr::Tuple(lhs), Expr::Tuple(rhs)) if lhs.len() == rhs.len() => lhs
.iter()
.zip(rhs)
.for_each(|(lhs_elem, rhs_elem)| visit_assignments(checker, lhs_elem, rhs_elem)),
(
Expr::Name(ast::ExprName { id: lhs_name, .. }),
Expr::Name(ast::ExprName { id: rhs_name, .. }),
) if lhs_name == rhs_name => {
checker.report_diagnostic(
SelfAssigningVariable {
name: lhs_name.to_string(),
},
left.range(),
);
}
_ => {}
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/pylint/rules/type_bivariance.rs | crates/ruff_linter/src/rules/pylint/rules/type_bivariance.rs | use std::fmt;
use ruff_macros::{ViolationMetadata, derive_message_formats};
use ruff_python_ast::helpers::is_const_true;
use ruff_python_ast::{self as ast, Expr};
use ruff_text_size::Ranged;
use crate::Violation;
use crate::checkers::ast::Checker;
use crate::rules::pylint::helpers::type_param_name;
/// ## What it does
/// Checks for `TypeVar` and `ParamSpec` definitions in which the type is
/// both covariant and contravariant.
///
/// ## Why is this bad?
/// By default, Python's generic types are invariant, but can be marked as
/// either covariant or contravariant via the `covariant` and `contravariant`
/// keyword arguments. While the API does allow you to mark a type as both
/// covariant and contravariant, this is not supported by the type system,
/// and should be avoided.
///
/// Instead, change the variance of the type to be either covariant,
/// contravariant, or invariant. If you want to describe both covariance and
/// contravariance, consider using two separate type parameters.
///
/// For context: an "invariant" generic type only accepts values that exactly
/// match the type parameter; for example, `list[Dog]` accepts only `list[Dog]`,
/// not `list[Animal]` (superclass) or `list[Bulldog]` (subclass). This is
/// the default behavior for Python's generic types.
///
/// A "covariant" generic type accepts subclasses of the type parameter; for
/// example, `Sequence[Animal]` accepts `Sequence[Dog]`. A "contravariant"
/// generic type accepts superclasses of the type parameter; for example,
/// `Callable[Dog]` accepts `Callable[Animal]`.
///
/// ## Example
/// ```python
/// from typing import TypeVar
///
/// T = TypeVar("T", covariant=True, contravariant=True)
/// ```
///
/// Use instead:
/// ```python
/// from typing import TypeVar
///
/// T_co = TypeVar("T_co", covariant=True)
/// T_contra = TypeVar("T_contra", contravariant=True)
/// ```
///
/// ## References
/// - [Python documentation: `typing` — Support for type hints](https://docs.python.org/3/library/typing.html)
/// - [PEP 483 – The Theory of Type Hints: Covariance and Contravariance](https://peps.python.org/pep-0483/#covariance-and-contravariance)
/// - [PEP 484 – Type Hints: Covariance and contravariance](https://peps.python.org/pep-0484/#covariance-and-contravariance)
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "v0.0.278")]
pub(crate) struct TypeBivariance {
kind: VarKind,
param_name: Option<String>,
}
impl Violation for TypeBivariance {
#[derive_message_formats]
fn message(&self) -> String {
let TypeBivariance { kind, param_name } = self;
match param_name {
None => format!("`{kind}` cannot be both covariant and contravariant"),
Some(param_name) => {
format!("`{kind}` \"{param_name}\" cannot be both covariant and contravariant",)
}
}
}
}
/// PLC0131
pub(crate) fn type_bivariance(checker: &Checker, value: &Expr) {
// If the typing modules were never imported, we'll never match below.
if !checker.semantic().seen_typing() {
return;
}
let Expr::Call(ast::ExprCall {
func, arguments, ..
}) = value
else {
return;
};
let Some(covariant) = arguments
.find_keyword("covariant")
.map(|keyword| &keyword.value)
else {
return;
};
let Some(contravariant) = arguments
.find_keyword("contravariant")
.map(|keyword| &keyword.value)
else {
return;
};
if is_const_true(covariant) && is_const_true(contravariant) {
let Some(kind) =
checker
.semantic()
.resolve_qualified_name(func)
.and_then(|qualified_name| {
if checker
.semantic()
.match_typing_qualified_name(&qualified_name, "ParamSpec")
{
Some(VarKind::ParamSpec)
} else if checker
.semantic()
.match_typing_qualified_name(&qualified_name, "TypeVar")
{
Some(VarKind::TypeVar)
} else {
None
}
})
else {
return;
};
checker.report_diagnostic(
TypeBivariance {
kind,
param_name: type_param_name(arguments).map(ToString::to_string),
},
func.range(),
);
}
}
#[derive(Debug, PartialEq, Eq, Copy, Clone)]
enum VarKind {
TypeVar,
ParamSpec,
}
impl fmt::Display for VarKind {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
match self {
VarKind::TypeVar => fmt.write_str("TypeVar"),
VarKind::ParamSpec => fmt.write_str("ParamSpec"),
}
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/pylint/rules/binary_op_exception.rs | crates/ruff_linter/src/rules/pylint/rules/binary_op_exception.rs | use ruff_python_ast::{self as ast, ExceptHandler, Expr};
use ruff_macros::{ViolationMetadata, derive_message_formats};
use ruff_text_size::Ranged;
use crate::Violation;
use crate::checkers::ast::Checker;
#[derive(Debug, PartialEq, Eq, Copy, Clone)]
enum BoolOp {
And,
Or,
}
impl From<&ast::BoolOp> for BoolOp {
fn from(op: &ast::BoolOp) -> Self {
match op {
ast::BoolOp::And => BoolOp::And,
ast::BoolOp::Or => BoolOp::Or,
}
}
}
/// ## What it does
/// Checks for `except` clauses that attempt to catch multiple
/// exceptions with a binary operation (`and` or `or`).
///
/// ## Why is this bad?
/// A binary operation will not catch multiple exceptions. Instead, the binary
/// operation will be evaluated first, and the result of _that_ operation will
/// be caught (for an `or` operation, this is typically the first exception in
/// the list). This is almost never the desired behavior.
///
/// ## Example
/// ```python
/// try:
/// pass
/// except A or B:
/// pass
/// ```
///
/// Use instead:
/// ```python
/// try:
/// pass
/// except (A, B):
/// pass
/// ```
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "v0.0.258")]
pub(crate) struct BinaryOpException {
op: BoolOp,
}
impl Violation for BinaryOpException {
#[derive_message_formats]
fn message(&self) -> String {
match self.op {
BoolOp::And => {
"Exception to catch is the result of a binary `and` operation".to_string()
}
BoolOp::Or => "Exception to catch is the result of a binary `or` operation".to_string(),
}
}
}
/// PLW0711
pub(crate) fn binary_op_exception(checker: &Checker, except_handler: &ExceptHandler) {
let ExceptHandler::ExceptHandler(ast::ExceptHandlerExceptHandler { type_, .. }) =
except_handler;
let Some(type_) = type_ else {
return;
};
let Expr::BoolOp(ast::ExprBoolOp { op, .. }) = type_.as_ref() else {
return;
};
checker.report_diagnostic(BinaryOpException { op: op.into() }, type_.range());
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/pylint/rules/if_stmt_min_max.rs | crates/ruff_linter/src/rules/pylint/rules/if_stmt_min_max.rs | use ruff_macros::{ViolationMetadata, derive_message_formats};
use ruff_python_ast::comparable::ComparableExpr;
use ruff_python_ast::token::parenthesized_range;
use ruff_python_ast::{self as ast, CmpOp, Stmt};
use ruff_text_size::Ranged;
use crate::checkers::ast::Checker;
use crate::fix::snippet::SourceCodeSnippet;
use crate::{Applicability, Edit, Fix, FixAvailability, Violation};
/// ## What it does
/// Checks for `if` statements that can be replaced with `min()` or `max()`
/// calls.
///
/// ## Why is this bad?
/// An `if` statement that selects the lesser or greater of two sub-expressions
/// can be replaced with a `min()` or `max()` call respectively. Where possible,
/// prefer `min()` and `max()`, as they're more concise and readable than the
/// equivalent `if` statements.
///
/// ## Example
/// ```python
/// if score > highest_score:
/// highest_score = score
/// ```
///
/// Use instead:
/// ```python
/// highest_score = max(highest_score, score)
/// ```
///
/// ## Fix safety
/// This fix is marked unsafe if it would delete any comments within the replacement range.
///
/// An example to illustrate where comments are preserved and where they are not:
///
/// ```py
/// a, b = 0, 10
///
/// if a >= b: # deleted comment
/// # deleted comment
/// a = b # preserved comment
/// ```
///
/// ## References
/// - [Python documentation: `max`](https://docs.python.org/3/library/functions.html#max)
/// - [Python documentation: `min`](https://docs.python.org/3/library/functions.html#min)
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "0.6.0")]
pub(crate) struct IfStmtMinMax {
min_max: MinMax,
replacement: SourceCodeSnippet,
}
impl Violation for IfStmtMinMax {
const FIX_AVAILABILITY: FixAvailability = FixAvailability::Sometimes;
#[derive_message_formats]
fn message(&self) -> String {
let Self {
min_max,
replacement,
} = self;
if let Some(replacement) = replacement.full_display() {
format!("Replace `if` statement with `{replacement}`")
} else {
format!("Replace `if` statement with `{min_max}` call")
}
}
fn fix_title(&self) -> Option<String> {
let Self {
min_max,
replacement,
} = self;
if let Some(replacement) = replacement.full_display() {
Some(format!("Replace with `{replacement}`"))
} else {
Some(format!("Replace with `{min_max}` call"))
}
}
}
/// PLR1730, PLR1731
pub(crate) fn if_stmt_min_max(checker: &Checker, stmt_if: &ast::StmtIf) {
let ast::StmtIf {
test,
body,
elif_else_clauses,
range: _,
node_index: _,
} = stmt_if;
if !elif_else_clauses.is_empty() {
return;
}
let [
body @ Stmt::Assign(ast::StmtAssign {
targets: body_targets,
value: body_value,
..
}),
] = body.as_slice()
else {
return;
};
let [body_target] = body_targets.as_slice() else {
return;
};
let Some(ast::ExprCompare {
ops,
left,
comparators,
..
}) = test.as_compare_expr()
else {
return;
};
// Ignore, e.g., `foo < bar < baz`.
let [op] = &**ops else {
return;
};
let [right] = &**comparators else {
return;
};
// extract helpful info from expression of the form
// `if cmp_left op cmp_right: target = assignment_value`
let cmp_left = ComparableExpr::from(left);
let cmp_right = ComparableExpr::from(right);
let target = ComparableExpr::from(body_target);
let assignment_value = ComparableExpr::from(body_value);
// Ex): if a < b: a = b
let (min_max, flip_args) = if cmp_left == target && cmp_right == assignment_value {
match op {
CmpOp::Lt => (MinMax::Max, false),
CmpOp::LtE => (MinMax::Max, true),
CmpOp::Gt => (MinMax::Min, false),
CmpOp::GtE => (MinMax::Min, true),
_ => return,
}
}
// Ex): `if a < b: b = a`
else if cmp_left == assignment_value && cmp_right == target {
match op {
CmpOp::Lt => (MinMax::Min, true),
CmpOp::LtE => (MinMax::Min, false),
CmpOp::Gt => (MinMax::Max, true),
CmpOp::GtE => (MinMax::Max, false),
_ => return,
}
} else {
return;
};
let (arg1, arg2) = if flip_args {
(right, &**left)
} else {
(&**left, right)
};
let replacement = format!(
"{} = {min_max}({}, {})",
checker.locator().slice(
parenthesized_range(body_target.into(), body.into(), checker.tokens())
.unwrap_or(body_target.range())
),
checker.locator().slice(arg1),
checker.locator().slice(arg2),
);
let mut diagnostic = checker.report_diagnostic(
IfStmtMinMax {
min_max,
replacement: SourceCodeSnippet::from_str(replacement.as_str()),
},
stmt_if.range(),
);
let range_replacement = stmt_if.range();
let applicability = if checker.comment_ranges().intersects(range_replacement) {
Applicability::Unsafe
} else {
Applicability::Safe
};
if checker.semantic().has_builtin_binding(min_max.as_str()) {
diagnostic.set_fix(Fix::applicable_edit(
Edit::range_replacement(replacement, range_replacement),
applicability,
));
}
}
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
enum MinMax {
Min,
Max,
}
impl MinMax {
const fn as_str(self) -> &'static str {
match self {
Self::Min => "min",
Self::Max => "max",
}
}
}
impl std::fmt::Display for MinMax {
fn fmt(&self, fmt: &mut std::fmt::Formatter) -> std::fmt::Result {
write!(fmt, "{}", self.as_str())
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/pylint/rules/invalid_bytes_return.rs | crates/ruff_linter/src/rules/pylint/rules/invalid_bytes_return.rs | use ruff_macros::{ViolationMetadata, derive_message_formats};
use ruff_python_ast::helpers::ReturnStatementVisitor;
use ruff_python_ast::identifier::Identifier;
use ruff_python_ast::visitor::Visitor;
use ruff_python_ast::{self as ast};
use ruff_python_semantic::analyze::function_type::is_stub;
use ruff_python_semantic::analyze::terminal::Terminal;
use ruff_python_semantic::analyze::type_inference::{PythonType, ResolvedPythonType};
use ruff_text_size::Ranged;
use crate::Violation;
use crate::checkers::ast::Checker;
/// ## What it does
/// Checks for `__bytes__` implementations that return types other than `bytes`.
///
/// ## Why is this bad?
/// The `__bytes__` method should return a `bytes` object. Returning a different
/// type may cause unexpected behavior.
///
/// ## Example
/// ```python
/// class Foo:
/// def __bytes__(self):
/// return 2
/// ```
///
/// Use instead:
/// ```python
/// class Foo:
/// def __bytes__(self):
/// return b"2"
/// ```
///
/// ## References
/// - [Python documentation: The `__bytes__` method](https://docs.python.org/3/reference/datamodel.html#object.__bytes__)
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "0.6.0")]
pub(crate) struct InvalidBytesReturnType;
impl Violation for InvalidBytesReturnType {
#[derive_message_formats]
fn message(&self) -> String {
"`__bytes__` does not return `bytes`".to_string()
}
}
/// PLE0308
pub(crate) fn invalid_bytes_return(checker: &Checker, function_def: &ast::StmtFunctionDef) {
if function_def.name.as_str() != "__bytes__" {
return;
}
if !checker.semantic().current_scope().kind.is_class() {
return;
}
if is_stub(function_def, checker.semantic()) {
return;
}
// Determine the terminal behavior (i.e., implicit return, no return, etc.).
let terminal = Terminal::from_function(function_def);
// If every control flow path raises an exception, ignore the function.
if terminal == Terminal::Raise {
return;
}
// If there are no return statements, add a diagnostic.
if terminal == Terminal::Implicit {
checker.report_diagnostic(InvalidBytesReturnType, function_def.identifier());
return;
}
let returns = {
let mut visitor = ReturnStatementVisitor::default();
visitor.visit_body(&function_def.body);
visitor.returns
};
for stmt in returns {
if let Some(value) = stmt.value.as_deref() {
if !matches!(
ResolvedPythonType::from(value),
ResolvedPythonType::Unknown | ResolvedPythonType::Atom(PythonType::Bytes)
) {
checker.report_diagnostic(InvalidBytesReturnType, value.range());
}
} else {
// Disallow implicit `None`.
checker.report_diagnostic(InvalidBytesReturnType, stmt.range());
}
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/pylint/rules/missing_maxsplit_arg.rs | crates/ruff_linter/src/rules/pylint/rules/missing_maxsplit_arg.rs | use ruff_macros::{ViolationMetadata, derive_message_formats};
use ruff_python_ast::{
DictItem, Expr, ExprAttribute, ExprCall, ExprDict, ExprNumberLiteral, ExprStringLiteral,
ExprSubscript, ExprUnaryOp, Keyword, Number, UnaryOp,
};
use ruff_python_semantic::{SemanticModel, analyze::typing};
use ruff_text_size::Ranged;
use crate::checkers::ast::Checker;
use crate::fix;
use crate::{AlwaysFixableViolation, Applicability, Edit, Fix};
/// ## What it does
/// Checks for access to the first or last element of `str.split()` or `str.rsplit()` without
/// `maxsplit=1`
///
/// ## Why is this bad?
/// Calling `str.split()` or `str.rsplit()` without passing `maxsplit=1` splits on every delimiter in the
/// string. When accessing only the first or last element of the result, it
/// would be more efficient to only split once.
///
/// ## Example
/// ```python
/// url = "www.example.com"
/// prefix = url.split(".")[0]
/// ```
///
/// Use instead:
/// ```python
/// url = "www.example.com"
/// prefix = url.split(".", maxsplit=1)[0]
/// ```
///
/// To access the last element, use `str.rsplit()` instead of `str.split()`:
/// ```python
/// url = "www.example.com"
/// suffix = url.rsplit(".", maxsplit=1)[-1]
/// ```
///
/// ## Fix Safety
/// This rule's fix is marked as unsafe for `split()`/`rsplit()` calls that contain `*args` or `**kwargs` arguments, as
/// adding a `maxsplit` argument to such a call may lead to duplicated arguments.
#[derive(ViolationMetadata)]
#[violation_metadata(preview_since = "0.11.12")]
pub(crate) struct MissingMaxsplitArg {
actual_split_type: String,
suggested_split_type: String,
}
/// Represents the index of the slice used for this rule (which can only be 0 or -1)
enum SliceBoundary {
First,
Last,
}
impl AlwaysFixableViolation for MissingMaxsplitArg {
#[derive_message_formats]
fn message(&self) -> String {
let MissingMaxsplitArg {
actual_split_type: _,
suggested_split_type,
} = self;
format!("Replace with `{suggested_split_type}(..., maxsplit=1)`.")
}
fn fix_title(&self) -> String {
let MissingMaxsplitArg {
actual_split_type,
suggested_split_type,
} = self;
if actual_split_type == suggested_split_type {
format!("Pass `maxsplit=1` into `str.{actual_split_type}()`")
} else {
format!("Use `str.{suggested_split_type}()` and pass `maxsplit=1`")
}
}
}
fn is_string(expr: &Expr, semantic: &SemanticModel) -> bool {
if let Expr::Name(name) = expr {
semantic
.only_binding(name)
.is_some_and(|binding_id| typing::is_string(semantic.binding(binding_id), semantic))
} else if let Some(binding_id) = semantic.lookup_attribute(expr) {
typing::is_string(semantic.binding(binding_id), semantic)
} else {
expr.is_string_literal_expr()
}
}
/// PLC0207
pub(crate) fn missing_maxsplit_arg(checker: &Checker, value: &Expr, slice: &Expr, expr: &Expr) {
// Check the sliced expression is a function
let Expr::Call(ExprCall {
func, arguments, ..
}) = value
else {
return;
};
// Check the slice index is either 0 or -1 (first or last value)
let index = match slice {
Expr::NumberLiteral(ExprNumberLiteral {
value: Number::Int(number_value),
..
}) => number_value.as_i64(),
Expr::UnaryOp(ExprUnaryOp {
op: UnaryOp::USub,
operand,
..
}) => match operand.as_ref() {
Expr::NumberLiteral(ExprNumberLiteral {
value: Number::Int(number_value),
..
}) => number_value.as_i64().map(|number| -number),
_ => return,
},
_ => return,
};
let slice_boundary = match index {
Some(0) => SliceBoundary::First,
Some(-1) => SliceBoundary::Last,
_ => return,
};
let Expr::Attribute(ExprAttribute { attr, value, .. }) = func.as_ref() else {
return;
};
// Check the function is "split" or "rsplit"
let actual_split_type = attr.as_str();
if !matches!(actual_split_type, "split" | "rsplit") {
return;
}
let mut target_instance = value;
// a subscripted value could technically be subscripted further ad infinitum, so we
// recurse into the subscript expressions until we find the value being subscripted
while let Expr::Subscript(ExprSubscript { value, .. }) = target_instance.as_ref() {
target_instance = value;
}
// Check the function is called on a string
if !is_string(target_instance, checker.semantic()) {
return;
}
// Check the function does not have maxsplit set
if arguments.find_argument_value("maxsplit", 1).is_some() {
return;
}
// Check maxsplit kwarg not set via unpacked dict literal
for keyword in &*arguments.keywords {
let Keyword { value, .. } = keyword;
if let Expr::Dict(ExprDict { items, .. }) = value {
for item in items {
let DictItem { key, .. } = item;
if let Some(Expr::StringLiteral(ExprStringLiteral { value, .. })) = key {
if value.to_str() == "maxsplit" {
return;
}
}
}
}
}
let suggested_split_type = match slice_boundary {
SliceBoundary::First => "split",
SliceBoundary::Last => "rsplit",
};
let maxsplit_argument_edit =
fix::edits::add_argument("maxsplit=1", arguments, checker.tokens());
// Only change `actual_split_type` if it doesn't match `suggested_split_type`
let split_type_edit: Option<Edit> = if actual_split_type == suggested_split_type {
None
} else {
Some(Edit::range_replacement(
suggested_split_type.to_string(),
attr.range(),
))
};
let mut diagnostic = checker.report_diagnostic(
MissingMaxsplitArg {
actual_split_type: actual_split_type.to_string(),
suggested_split_type: suggested_split_type.to_string(),
},
expr.range(),
);
diagnostic.set_fix(Fix::applicable_edits(
maxsplit_argument_edit,
split_type_edit,
// Mark the fix as unsafe, if there are `*args` or `**kwargs`
if arguments.args.iter().any(Expr::is_starred_expr)
|| arguments
.keywords
.iter()
.any(|keyword| keyword.arg.is_none())
{
Applicability::Unsafe
} else {
Applicability::Safe
},
));
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/pylint/rules/unnecessary_list_index_lookup.rs | crates/ruff_linter/src/rules/pylint/rules/unnecessary_list_index_lookup.rs | use ruff_macros::{ViolationMetadata, derive_message_formats};
use ruff_python_ast::visitor::Visitor;
use ruff_python_ast::{self as ast, Expr, Int, Number, StmtFor};
use ruff_python_semantic::SemanticModel;
use ruff_text_size::Ranged;
use crate::checkers::ast::Checker;
use crate::rules::pylint::helpers::SequenceIndexVisitor;
use crate::{AlwaysFixableViolation, Edit, Fix};
/// ## What it does
/// Checks for index-based list accesses during `enumerate` iterations.
///
/// ## Why is this bad?
/// When iterating over a list with `enumerate`, the current item is already
/// available alongside its index. Using the index to look up the item is
/// unnecessary.
///
/// ## Example
/// ```python
/// letters = ["a", "b", "c"]
///
/// for index, letter in enumerate(letters):
/// print(letters[index])
/// ```
///
/// Use instead:
/// ```python
/// letters = ["a", "b", "c"]
///
/// for index, letter in enumerate(letters):
/// print(letter)
/// ```
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "0.5.0")]
pub(crate) struct UnnecessaryListIndexLookup;
impl AlwaysFixableViolation for UnnecessaryListIndexLookup {
#[derive_message_formats]
fn message(&self) -> String {
"List index lookup in `enumerate()` loop".to_string()
}
fn fix_title(&self) -> String {
"Use the loop variable directly".to_string()
}
}
/// PLR1736
pub(crate) fn unnecessary_list_index_lookup(checker: &Checker, stmt_for: &StmtFor) {
let Some((sequence, index_name, value_name)) =
enumerate_items(&stmt_for.iter, &stmt_for.target, checker.semantic())
else {
return;
};
let ranges = {
let mut visitor = SequenceIndexVisitor::new(&sequence.id, &index_name.id, &value_name.id);
visitor.visit_body(&stmt_for.body);
visitor.visit_body(&stmt_for.orelse);
visitor.into_accesses()
};
for range in ranges {
let mut diagnostic = checker.report_diagnostic(UnnecessaryListIndexLookup, range);
diagnostic.set_fix(Fix::safe_edits(
Edit::range_replacement(value_name.id.to_string(), range),
[noop(index_name), noop(value_name)],
));
}
}
/// PLR1736
pub(crate) fn unnecessary_list_index_lookup_comprehension(checker: &Checker, expr: &Expr) {
let (Expr::Generator(ast::ExprGenerator {
elt, generators, ..
})
| Expr::DictComp(ast::ExprDictComp {
value: elt,
generators,
..
})
| Expr::SetComp(ast::ExprSetComp {
elt, generators, ..
})
| Expr::ListComp(ast::ExprListComp {
elt, generators, ..
})) = expr
else {
return;
};
for comp in generators {
let Some((sequence, index_name, value_name)) =
enumerate_items(&comp.iter, &comp.target, checker.semantic())
else {
return;
};
let ranges = {
let mut visitor =
SequenceIndexVisitor::new(&sequence.id, &index_name.id, &value_name.id);
visitor.visit_expr(elt.as_ref());
visitor.into_accesses()
};
for range in ranges {
let mut diagnostic = checker.report_diagnostic(UnnecessaryListIndexLookup, range);
diagnostic.set_fix(Fix::safe_edits(
Edit::range_replacement(value_name.id.to_string(), range),
[noop(index_name), noop(value_name)],
));
}
}
}
fn enumerate_items<'a>(
call_expr: &'a Expr,
tuple_expr: &'a Expr,
semantic: &SemanticModel,
) -> Option<(&'a ast::ExprName, &'a ast::ExprName, &'a ast::ExprName)> {
let ast::ExprCall {
func, arguments, ..
} = call_expr.as_call_expr()?;
let Expr::Tuple(ast::ExprTuple { elts, .. }) = tuple_expr else {
return None;
};
let [index, value] = elts.as_slice() else {
return None;
};
// Grab the variable names.
let Expr::Name(index_name) = index else {
return None;
};
let Expr::Name(value_name) = value else {
return None;
};
// If either of the variable names are intentionally ignored by naming them `_`, then don't
// emit.
if index_name.id == "_" || value_name.id == "_" {
return None;
}
// Get the first argument of the enumerate call.
let Some(Expr::Name(sequence)) = arguments.args.first() else {
return None;
};
// If the `enumerate` call has a non-zero `start`, don't omit.
if !arguments
.find_argument_value("start", 1)
.is_none_or(|expr| {
matches!(
expr,
Expr::NumberLiteral(ast::ExprNumberLiteral {
value: Number::Int(Int::ZERO),
..
})
)
})
{
return None;
}
// Check that the function is the `enumerate` builtin.
if !semantic.match_builtin_expr(func, "enumerate") {
return None;
}
Some((sequence, index_name, value_name))
}
/// Return a no-op edit for the given name.
fn noop(name: &ast::ExprName) -> Edit {
Edit::range_replacement(name.id.to_string(), name.range())
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/pylint/rules/literal_membership.rs | crates/ruff_linter/src/rules/pylint/rules/literal_membership.rs | use ruff_macros::{ViolationMetadata, derive_message_formats};
use ruff_python_ast::{self as ast, CmpOp, Expr};
use ruff_python_semantic::analyze::typing;
use ruff_text_size::Ranged;
use crate::checkers::ast::Checker;
use crate::{AlwaysFixableViolation, Edit, Fix};
/// ## What it does
/// Checks for membership tests on `list` and `tuple` literals.
///
/// ## Why is this bad?
/// When testing for membership in a static sequence, prefer a `set` literal
/// over a `list` or `tuple`, as Python optimizes `set` membership tests.
///
/// ## Example
/// ```python
/// 1 in [1, 2, 3]
/// ```
///
/// Use instead:
/// ```python
/// 1 in {1, 2, 3}
/// ```
///
/// ## Fix safety
/// This rule's fix is marked as unsafe, as the use of a `set` literal will
/// error at runtime if either the element being tested for membership (the
/// left-hand side) or any element of the sequence (the right-hand side)
/// is unhashable (like lists or dictionaries). While Ruff will attempt to
/// infer the hashability of both sides and skip the fix when it can determine
/// that either side is unhashable, it may not always be able to do so.
///
/// ## References
/// - [What’s New In Python 3.2](https://docs.python.org/3/whatsnew/3.2.html#optimizations)
#[derive(ViolationMetadata)]
#[violation_metadata(preview_since = "v0.1.1")]
pub(crate) struct LiteralMembership;
impl AlwaysFixableViolation for LiteralMembership {
#[derive_message_formats]
fn message(&self) -> String {
"Use a set literal when testing for membership".to_string()
}
fn fix_title(&self) -> String {
"Convert to `set`".to_string()
}
}
/// PLR6201
pub(crate) fn literal_membership(checker: &Checker, compare: &ast::ExprCompare) {
let [op] = &*compare.ops else {
return;
};
if !matches!(op, CmpOp::In | CmpOp::NotIn) {
return;
}
let [right] = &*compare.comparators else {
return;
};
let elts = match right {
Expr::List(ast::ExprList { elts, .. }) => elts,
Expr::Tuple(ast::ExprTuple { elts, .. }) => elts,
_ => return,
};
// Skip empty collections (#15729).
if elts.is_empty() {
return;
}
// If `left`, or any of the elements in `right`, are known to _not_ be hashable, return.
if std::iter::once(compare.left.as_ref())
.chain(elts)
.any(|expr| match expr {
// Expressions that are known _not_ to be hashable.
Expr::List(_)
| Expr::Set(_)
| Expr::Dict(_)
| Expr::ListComp(_)
| Expr::SetComp(_)
| Expr::DictComp(_)
| Expr::Generator(_)
| Expr::Await(_)
| Expr::Yield(_)
| Expr::YieldFrom(_) => true,
// Expressions that can be _inferred_ not to be hashable.
Expr::Name(name) => {
let Some(id) = checker.semantic().resolve_name(name) else {
return false;
};
let binding = checker.semantic().binding(id);
typing::is_list(binding, checker.semantic())
|| typing::is_dict(binding, checker.semantic())
|| typing::is_set(binding, checker.semantic())
}
_ => false,
})
{
return;
}
let mut diagnostic = checker.report_diagnostic(LiteralMembership, right.range());
let literal = checker.locator().slice(right);
let set = format!("{{{}}}", &literal[1..literal.len() - 1]);
diagnostic.set_fix(Fix::unsafe_edit(Edit::range_replacement(
set,
right.range(),
)));
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/pylint/rules/dict_index_missing_items.rs | crates/ruff_linter/src/rules/pylint/rules/dict_index_missing_items.rs | use ruff_macros::{ViolationMetadata, derive_message_formats};
use ruff_python_ast::comparable::ComparableExpr;
use ruff_python_ast::{
self as ast, Expr, ExprContext, StmtFor,
token::parenthesized_range,
visitor::{self, Visitor},
};
use ruff_python_semantic::SemanticModel;
use ruff_python_semantic::analyze::type_inference::{PythonType, ResolvedPythonType};
use ruff_python_semantic::analyze::typing::is_dict;
use ruff_text_size::{Ranged, TextRange};
use crate::Violation;
use crate::checkers::ast::{Checker, DiagnosticGuard};
/// ## What it does
/// Checks for dictionary iterations that extract the dictionary value
/// via explicit indexing, instead of using `.items()`.
///
/// ## Why is this bad?
/// Iterating over a dictionary with `.items()` is semantically clearer
/// and more efficient than extracting the value with the key.
///
/// ## Example
/// ```python
/// ORCHESTRA = {
/// "violin": "strings",
/// "oboe": "woodwind",
/// "tuba": "brass",
/// "gong": "percussion",
/// }
///
/// for instrument in ORCHESTRA:
/// print(f"{instrument}: {ORCHESTRA[instrument]}")
/// ```
///
/// Use instead:
/// ```python
/// ORCHESTRA = {
/// "violin": "strings",
/// "oboe": "woodwind",
/// "tuba": "brass",
/// "gong": "percussion",
/// }
///
/// for instrument, section in ORCHESTRA.items():
/// print(f"{instrument}: {section}")
/// ```
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "0.8.0")]
pub(crate) struct DictIndexMissingItems<'a> {
key: &'a str,
dict: &'a str,
}
impl Violation for DictIndexMissingItems<'_> {
#[derive_message_formats]
fn message(&self) -> String {
"Extracting value from dictionary without calling `.items()`".to_string()
}
fn fix_title(&self) -> Option<String> {
let Self { key, dict } = self;
Some(format!("Use `for {key}, value in {dict}.items()` instead"))
}
}
/// PLC0206
pub(crate) fn dict_index_missing_items(checker: &Checker, stmt_for: &StmtFor) {
let StmtFor { iter, body, .. } = stmt_for;
// Extract the name of the iteration object (e.g., `obj` in `for key in obj:`).
let Some(dict_name) = extract_dict_name(iter) else {
return;
};
// Determine if the right-hand side is a dictionary literal (i.e. `for key in (dict := {"a": 1}):`).
let is_dict_literal = matches!(
ResolvedPythonType::from(&**iter),
ResolvedPythonType::Atom(PythonType::Dict),
);
if !is_dict_literal && !is_inferred_dict(dict_name, checker.semantic()) {
return;
}
SubscriptVisitor::new(stmt_for, dict_name, checker).visit_body(body);
}
/// A visitor to detect subscript operations on a target dictionary.
struct SubscriptVisitor<'a, 'b> {
/// The target of the for loop (e.g., `key` in `for key in obj:`).
target: &'a Expr,
/// The name of the iterated object (e.g., `obj` in `for key in obj:`).
dict_name: &'a ast::ExprName,
/// The range to use for the primary diagnostic.
range: TextRange,
/// The [`Checker`] used to emit diagnostics.
checker: &'a Checker<'b>,
/// The [`DiagnosticGuard`] used to attach additional annotations for each subscript.
///
/// The guard is initially `None` and then set to `Some` when the first subscript is found.
guard: Option<DiagnosticGuard<'a, 'b>>,
}
impl<'a, 'b> SubscriptVisitor<'a, 'b> {
fn new(stmt_for: &'a StmtFor, dict_name: &'a ast::ExprName, checker: &'a Checker<'b>) -> Self {
let StmtFor { target, iter, .. } = stmt_for;
let range = {
let target_start =
parenthesized_range(target.into(), stmt_for.into(), checker.tokens())
.map_or(target.start(), TextRange::start);
TextRange::new(target_start, iter.end())
};
Self {
target,
dict_name,
range,
checker,
guard: None,
}
}
}
impl<'a> Visitor<'a> for SubscriptVisitor<'a, '_> {
fn visit_expr(&mut self, expr: &'a Expr) {
// Given `obj[key]`, `value` must be `obj` and `slice` must be `key`.
if let Expr::Subscript(ast::ExprSubscript {
value,
slice,
ctx: ExprContext::Load,
..
}) = expr
{
let Expr::Name(name) = value.as_ref() else {
return;
};
// Check that the sliced dictionary name is the same as the iterated object name.
if name.id != self.dict_name.id {
return;
}
// Check that the sliced value is the same as the target of the `for` loop.
if ComparableExpr::from(slice) != ComparableExpr::from(self.target) {
return;
}
let guard = self.guard.get_or_insert_with(|| {
self.checker.report_diagnostic(
DictIndexMissingItems {
key: self.checker.locator().slice(self.target),
dict: self.checker.locator().slice(self.dict_name),
},
self.range,
)
});
guard.secondary_annotation("", expr);
} else {
visitor::walk_expr(self, expr);
}
}
}
/// Extracts the name of the dictionary from the expression.
fn extract_dict_name(expr: &Expr) -> Option<&ast::ExprName> {
// Ex) `for key in obj:`
if let Some(name_expr) = expr.as_name_expr() {
return Some(name_expr);
}
// Ex) `for key in obj.keys():`
if let Expr::Call(ast::ExprCall { func, .. }) = expr {
if let Expr::Attribute(ast::ExprAttribute { attr, value, .. }) = func.as_ref() {
if attr == "keys" {
if let Expr::Name(var_name) = value.as_ref() {
return Some(var_name);
}
}
}
}
// Ex) `for key in (my_dict := {"foo": "bar"}):`
if let Expr::Named(ast::ExprNamed { target, value, .. }) = expr {
if let Expr::Dict(ast::ExprDict { .. }) = value.as_ref() {
if let Expr::Name(var_name) = target.as_ref() {
return Some(var_name);
}
}
}
None
}
/// Returns `true` if the binding is a dictionary, inferred from the type.
fn is_inferred_dict(name: &ast::ExprName, semantic: &SemanticModel) -> bool {
semantic
.only_binding(name)
.map(|id| semantic.binding(id))
.is_some_and(|binding| is_dict(binding, semantic))
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.