repo stringlengths 6 65 | file_url stringlengths 81 311 | file_path stringlengths 6 227 | content stringlengths 0 32.8k | language stringclasses 1
value | license stringclasses 7
values | commit_sha stringlengths 40 40 | retrieved_at stringdate 2026-01-04 15:31:58 2026-01-04 20:25:31 | truncated bool 2
classes |
|---|---|---|---|---|---|---|---|---|
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ty_ide/src/hover.rs | crates/ty_ide/src/hover.rs | use crate::docstring::Docstring;
use crate::goto::{GotoTarget, find_goto_target};
use crate::{Db, MarkupKind, RangedValue};
use ruff_db::files::{File, FileRange};
use ruff_db::parsed::parsed_module;
use ruff_text_size::{Ranged, TextSize};
use std::fmt;
use std::fmt::Formatter;
use ty_python_semantic::types::{KnownInstanceType, Type, TypeVarVariance};
use ty_python_semantic::{DisplaySettings, SemanticModel};
pub fn hover(db: &dyn Db, file: File, offset: TextSize) -> Option<RangedValue<Hover<'_>>> {
let parsed = parsed_module(db, file).load(db);
let model = SemanticModel::new(db, file);
let goto_target = find_goto_target(&model, &parsed, offset)?;
if let GotoTarget::Expression(expr) = goto_target {
if expr.is_literal_expr() {
return None;
}
}
let docs = goto_target
.get_definition_targets(
&model,
ty_python_semantic::ImportAliasResolution::ResolveAliases,
)
.and_then(|definitions| definitions.docstring(db))
.map(HoverContent::Docstring);
let mut contents = Vec::new();
if let Some(signature) = goto_target.call_type_simplified_by_overloads(&model) {
contents.push(HoverContent::Signature(signature));
} else if let Some(ty) = goto_target.inferred_type(&model) {
tracing::debug!("Inferred type of covering node is {}", ty.display(db));
contents.push(match ty {
Type::KnownInstance(KnownInstanceType::TypeVar(typevar)) => typevar
.bind_pep695(db)
.map_or(HoverContent::Type(ty, None), |typevar| {
HoverContent::Type(Type::TypeVar(typevar), Some(typevar.variance(db)))
}),
Type::TypeVar(typevar) => HoverContent::Type(ty, Some(typevar.variance(db))),
_ => HoverContent::Type(ty, None),
});
}
contents.extend(docs);
if contents.is_empty() {
return None;
}
Some(RangedValue {
range: FileRange::new(file, goto_target.range()),
value: Hover { contents },
})
}
pub struct Hover<'db> {
contents: Vec<HoverContent<'db>>,
}
impl<'db> Hover<'db> {
/// Renders the hover to a string using the specified markup kind.
pub const fn display<'a>(&'a self, db: &'db dyn Db, kind: MarkupKind) -> DisplayHover<'db, 'a> {
DisplayHover {
db,
hover: self,
kind,
}
}
fn iter(&self) -> std::slice::Iter<'_, HoverContent<'db>> {
self.contents.iter()
}
}
impl<'db> IntoIterator for Hover<'db> {
type Item = HoverContent<'db>;
type IntoIter = std::vec::IntoIter<Self::Item>;
fn into_iter(self) -> Self::IntoIter {
self.contents.into_iter()
}
}
impl<'a, 'db> IntoIterator for &'a Hover<'db> {
type Item = &'a HoverContent<'db>;
type IntoIter = std::slice::Iter<'a, HoverContent<'db>>;
fn into_iter(self) -> Self::IntoIter {
self.iter()
}
}
pub struct DisplayHover<'db, 'a> {
db: &'db dyn Db,
hover: &'a Hover<'db>,
kind: MarkupKind,
}
impl fmt::Display for DisplayHover<'_, '_> {
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
let mut first = true;
for content in &self.hover.contents {
if !first {
self.kind.horizontal_line().fmt(f)?;
}
content.display(self.db, self.kind).fmt(f)?;
first = false;
}
Ok(())
}
}
#[derive(Debug, Clone)]
pub enum HoverContent<'db> {
Signature(String),
Type(Type<'db>, Option<TypeVarVariance>),
Docstring(Docstring),
}
impl<'db> HoverContent<'db> {
fn display(&self, db: &'db dyn Db, kind: MarkupKind) -> DisplayHoverContent<'_, 'db> {
DisplayHoverContent {
db,
content: self,
kind,
}
}
}
pub(crate) struct DisplayHoverContent<'a, 'db> {
db: &'db dyn Db,
content: &'a HoverContent<'db>,
kind: MarkupKind,
}
impl fmt::Display for DisplayHoverContent<'_, '_> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self.content {
HoverContent::Signature(signature) => {
self.kind.fenced_code_block(&signature, "python").fmt(f)
}
HoverContent::Type(ty, variance) => {
let variance = match variance {
Some(TypeVarVariance::Covariant) => " (covariant)",
Some(TypeVarVariance::Contravariant) => " (contravariant)",
Some(TypeVarVariance::Invariant) => " (invariant)",
Some(TypeVarVariance::Bivariant) => " (bivariant)",
None => "",
};
// Special types like `<special-form of whatever 'blahblah' with 'florps'>`
// render poorly with python syntax-highlighting but well as xml
let ty_string = ty
.display_with(self.db, DisplaySettings::default().multiline())
.to_string();
let syntax = if ty_string.starts_with('<') {
"xml"
} else {
"python"
};
self.kind
.fenced_code_block(format!("{ty_string}{variance}"), syntax)
.fmt(f)
}
HoverContent::Docstring(docstring) => docstring.render(self.kind).fmt(f),
}
}
}
#[cfg(test)]
mod tests {
use crate::tests::{CursorTest, cursor_test};
use crate::{MarkupKind, hover};
use insta::assert_snapshot;
use ruff_db::diagnostic::{
Annotation, Diagnostic, DiagnosticFormat, DiagnosticId, DisplayDiagnosticConfig, LintName,
Severity, Span,
};
use ruff_text_size::{Ranged, TextRange};
#[test]
fn hover_basic() {
let test = cursor_test(
r#"
a = 10
"""This is the docs for this value
Wow these are good docs!
"""
a<CURSOR>
"#,
);
assert_snapshot!(test.hover(), @r#"
Literal[10]
---------------------------------------------
This is the docs for this value
Wow these are good docs!
---------------------------------------------
```python
Literal[10]
```
---
This is the docs for this value
Wow these are good docs!
---------------------------------------------
info[hover]: Hovered content is
--> main.py:8:1
|
6 | """
7 |
8 | a
| ^- Cursor offset
| |
| source
|
"#);
}
#[test]
fn hover_function() {
let test = cursor_test(
r#"
def my_func(a, b):
'''This is such a great func!!
Args:
a: first for a reason
b: coming for `a`'s title
'''
return 0
my_fu<CURSOR>nc(1, 2)
"#,
);
assert_snapshot!(test.hover(), @r"
def my_func(
a,
b
) -> Unknown
---------------------------------------------
This is such a great func!!
Args:
a: first for a reason
b: coming for `a`'s title
---------------------------------------------
```python
def my_func(
a,
b
) -> Unknown
```
---
This is such a great func!!
Args:
a: first for a reason
b: coming for `a`'s title
---------------------------------------------
info[hover]: Hovered content is
--> main.py:11:1
|
9 | return 0
10 |
11 | my_func(1, 2)
| ^^^^^-^
| | |
| | Cursor offset
| source
|
");
}
#[test]
fn hover_function_def() {
let test = cursor_test(
r#"
def my_fu<CURSOR>nc(a, b):
'''This is such a great func!!
Args:
a: first for a reason
b: coming for `a`'s title
'''
return 0
"#,
);
assert_snapshot!(test.hover(), @r"
def my_func(
a,
b
) -> Unknown
---------------------------------------------
This is such a great func!!
Args:
a: first for a reason
b: coming for `a`'s title
---------------------------------------------
```python
def my_func(
a,
b
) -> Unknown
```
---
This is such a great func!!
Args:
a: first for a reason
b: coming for `a`'s title
---------------------------------------------
info[hover]: Hovered content is
--> main.py:2:5
|
2 | def my_func(a, b):
| ^^^^^-^
| | |
| | Cursor offset
| source
3 | '''This is such a great func!!
|
");
}
#[test]
fn hover_class() {
let test = cursor_test(
r#"
class MyClass:
'''
This is such a great class!!
Don't you know?
Everyone loves my class!!
'''
def __init__(self, val):
"""initializes MyClass (perfectly)"""
self.val = val
def my_method(self, a, b):
'''This is such a great func!!
Args:
a: first for a reason
b: coming for `a`'s title
'''
return 0
MyCla<CURSOR>ss
"#,
);
assert_snapshot!(test.hover(), @r"
<class 'MyClass'>
---------------------------------------------
This is such a great class!!
Don't you know?
Everyone loves my class!!
---------------------------------------------
```xml
<class 'MyClass'>
```
---
This is such a great class!!
Don't you know?
Everyone loves my class!!
---------------------------------------------
info[hover]: Hovered content is
--> main.py:24:1
|
22 | return 0
23 |
24 | MyClass
| ^^^^^-^
| | |
| | Cursor offset
| source
|
");
}
#[test]
fn hover_class_def() {
let test = cursor_test(
r#"
class MyCla<CURSOR>ss:
'''
This is such a great class!!
Don't you know?
Everyone loves my class!!
'''
def __init__(self, val):
"""initializes MyClass (perfectly)"""
self.val = val
def my_method(self, a, b):
'''This is such a great func!!
Args:
a: first for a reason
b: coming for `a`'s title
'''
return 0
"#,
);
assert_snapshot!(test.hover(), @r"
<class 'MyClass'>
---------------------------------------------
This is such a great class!!
Don't you know?
Everyone loves my class!!
---------------------------------------------
```xml
<class 'MyClass'>
```
---
This is such a great class!!
Don't you know?
Everyone loves my class!!
---------------------------------------------
info[hover]: Hovered content is
--> main.py:2:7
|
2 | class MyClass:
| ^^^^^-^
| | |
| | Cursor offset
| source
3 | '''
4 | This is such a great class!!
|
");
}
#[test]
fn hover_class_init() {
let test = cursor_test(
r#"
class MyClass:
'''
This is such a great class!!
Don't you know?
Everyone loves my class!!
'''
def __init__(self, val):
"""initializes MyClass (perfectly)"""
self.val = val
def my_method(self, a, b):
'''This is such a great func!!
Args:
a: first for a reason
b: coming for `a`'s title
'''
return 0
x = MyCla<CURSOR>ss(0)
"#,
);
assert_snapshot!(test.hover(), @r"
<class 'MyClass'>
---------------------------------------------
initializes MyClass (perfectly)
---------------------------------------------
```xml
<class 'MyClass'>
```
---
initializes MyClass (perfectly)
---------------------------------------------
info[hover]: Hovered content is
--> main.py:24:5
|
22 | return 0
23 |
24 | x = MyClass(0)
| ^^^^^-^
| | |
| | Cursor offset
| source
|
");
}
#[test]
fn hover_class_init_attr() {
let test = CursorTest::builder()
.source(
"mymod.py",
r#"
class MyClass:
'''
This is such a great class!!
Don't you know?
Everyone loves my class!!
'''
def __init__(self, val):
"""initializes MyClass (perfectly)"""
self.val = val
"#,
)
.source(
"main.py",
r#"
import mymod
x = mymod.MyCla<CURSOR>ss(0)
"#,
)
.build();
assert_snapshot!(test.hover(), @r"
<class 'MyClass'>
---------------------------------------------
initializes MyClass (perfectly)
---------------------------------------------
```xml
<class 'MyClass'>
```
---
initializes MyClass (perfectly)
---------------------------------------------
info[hover]: Hovered content is
--> main.py:4:11
|
2 | import mymod
3 |
4 | x = mymod.MyClass(0)
| ^^^^^-^
| | |
| | Cursor offset
| source
|
");
}
#[test]
fn hover_class_init_no_init_docs() {
let test = cursor_test(
r#"
class MyClass:
'''
This is such a great class!!
Don't you know?
Everyone loves my class!!
'''
def __init__(self, val):
self.val = val
def my_method(self, a, b):
'''This is such a great func!!
Args:
a: first for a reason
b: coming for `a`'s title
'''
return 0
x = MyCla<CURSOR>ss(0)
"#,
);
assert_snapshot!(test.hover(), @r"
<class 'MyClass'>
---------------------------------------------
This is such a great class!!
Don't you know?
Everyone loves my class!!
---------------------------------------------
```xml
<class 'MyClass'>
```
---
This is such a great class!!
Don't you know?
Everyone loves my class!!
---------------------------------------------
info[hover]: Hovered content is
--> main.py:23:5
|
21 | return 0
22 |
23 | x = MyClass(0)
| ^^^^^-^
| | |
| | Cursor offset
| source
|
");
}
#[test]
fn hover_class_method() {
let test = cursor_test(
r#"
class MyClass:
'''
This is such a great class!!
Don't you know?
Everyone loves my class!!
'''
def __init__(self, val):
"""initializes MyClass (perfectly)"""
self.val = val
def my_method(self, a, b):
'''This is such a great func!!
Args:
a: first for a reason
b: coming for `a`'s title
'''
return 0
x = MyClass(0)
x.my_me<CURSOR>thod(2, 3)
"#,
);
assert_snapshot!(test.hover(), @r"
bound method MyClass.my_method(
a,
b
) -> Unknown
---------------------------------------------
This is such a great func!!
Args:
a: first for a reason
b: coming for `a`'s title
---------------------------------------------
```python
bound method MyClass.my_method(
a,
b
) -> Unknown
```
---
This is such a great func!!
Args:
a: first for a reason
b: coming for `a`'s title
---------------------------------------------
info[hover]: Hovered content is
--> main.py:25:3
|
24 | x = MyClass(0)
25 | x.my_method(2, 3)
| ^^^^^-^^^
| | |
| | Cursor offset
| source
|
");
}
#[test]
fn hover_member() {
let test = cursor_test(
r#"
class Foo:
a: int = 10
def __init__(a: int, b: str):
self.a = a
"""This is the docs for this value
Wow these are good docs!
"""
self.b: str = b
foo = Foo()
foo.<CURSOR>a
"#,
);
assert_snapshot!(test.hover(), @r"
int
---------------------------------------------
```python
int
```
---------------------------------------------
info[hover]: Hovered content is
--> main.py:14:5
|
13 | foo = Foo()
14 | foo.a
| -
| |
| source
| Cursor offset
|
");
}
#[test]
fn hover_function_typed_variable() {
let test = cursor_test(
r#"
def foo(a, b): ...
foo<CURSOR>
"#,
);
assert_snapshot!(test.hover(), @r"
def foo(
a,
b
) -> Unknown
---------------------------------------------
```python
def foo(
a,
b
) -> Unknown
```
---------------------------------------------
info[hover]: Hovered content is
--> main.py:4:1
|
2 | def foo(a, b): ...
3 |
4 | foo
| ^^^- Cursor offset
| |
| source
|
");
}
#[test]
fn hover_binary_expression() {
let test = cursor_test(
r#"
def foo(a: int, b: int, c: int):
a + b ==<CURSOR> c
"#,
);
assert_snapshot!(test.hover(), @r"
bool
---------------------------------------------
```python
bool
```
---------------------------------------------
info[hover]: Hovered content is
--> main.py:3:5
|
2 | def foo(a: int, b: int, c: int):
3 | a + b == c
| ^^^^^^^^-^
| | |
| | Cursor offset
| source
|
");
}
#[test]
fn hover_keyword_parameter() {
let test = cursor_test(
r#"
def test(ab: int):
"""my cool test
Args:
ab: a nice little integer
"""
return 0
test(a<CURSOR>b= 123)
"#,
);
// TODO: This should reveal `int` because the user hovers over the parameter and not the value.
assert_snapshot!(test.hover(), @r"
Literal[123]
---------------------------------------------
```python
Literal[123]
```
---------------------------------------------
info[hover]: Hovered content is
--> main.py:10:6
|
8 | return 0
9 |
10 | test(ab= 123)
| ^-
| ||
| |Cursor offset
| source
|
");
}
#[test]
fn hover_keyword_parameter_def() {
let test = cursor_test(
r#"
def test(a<CURSOR>b: int):
"""my cool test
Args:
ab: a nice little integer
"""
return 0
"#,
);
assert_snapshot!(test.hover(), @r#"
int
---------------------------------------------
```python
int
```
---------------------------------------------
info[hover]: Hovered content is
--> main.py:2:10
|
2 | def test(ab: int):
| ^-
| ||
| |Cursor offset
| source
3 | """my cool test
|
"#);
}
#[test]
fn hover_union() {
let test = cursor_test(
r#"
def foo(a, b):
"""The foo function"""
return 0
def bar(a, b):
"""The bar function"""
return 1
if random.choice([True, False]):
a = foo
else:
a = bar
a<CURSOR>
"#,
);
assert_snapshot!(test.hover(), @r"
(def foo(a, b) -> Unknown) | (def bar(a, b) -> Unknown)
---------------------------------------------
```python
(def foo(a, b) -> Unknown) | (def bar(a, b) -> Unknown)
```
---------------------------------------------
info[hover]: Hovered content is
--> main.py:16:1
|
14 | a = bar
15 |
16 | a
| ^- Cursor offset
| |
| source
|
");
}
#[test]
fn hover_string_annotation1() {
let test = cursor_test(
r#"
a: "MyCla<CURSOR>ss" = 1
class MyClass:
"""some docs"""
"#,
);
assert_snapshot!(test.hover(), @r#"
MyClass
---------------------------------------------
some docs
---------------------------------------------
```python
MyClass
```
---
some docs
---------------------------------------------
info[hover]: Hovered content is
--> main.py:2:5
|
2 | a: "MyClass" = 1
| ^^^^^-^
| | |
| | Cursor offset
| source
3 |
4 | class MyClass:
|
"#);
}
#[test]
fn hover_string_annotation2() {
let test = cursor_test(
r#"
a: "None | MyCl<CURSOR>ass" = 1
class MyClass:
"""some docs"""
"#,
);
assert_snapshot!(test.hover(), @r#"
some docs
---------------------------------------------
some docs
---------------------------------------------
info[hover]: Hovered content is
--> main.py:2:12
|
2 | a: "None | MyClass" = 1
| ^^^^-^^
| | |
| | Cursor offset
| source
3 |
4 | class MyClass:
|
"#);
}
#[test]
fn hover_string_annotation3() {
let test = cursor_test(
r#"
a: "None |<CURSOR> MyClass" = 1
class MyClass:
"""some docs"""
"#,
);
assert_snapshot!(test.hover(), @"Hover provided no content");
}
#[test]
fn hover_string_annotation4() {
let test = cursor_test(
r#"
a: "None | MyClass<CURSOR>" = 1
class MyClass:
"""some docs"""
"#,
);
assert_snapshot!(test.hover(), @r#"
some docs
---------------------------------------------
some docs
---------------------------------------------
info[hover]: Hovered content is
--> main.py:2:12
|
2 | a: "None | MyClass" = 1
| ^^^^^^^- Cursor offset
| |
| source
3 |
4 | class MyClass:
|
"#);
}
#[test]
fn hover_string_annotation5() {
let test = cursor_test(
r#"
a: "None | MyClass"<CURSOR> = 1
class MyClass:
"""some docs"""
"#,
);
assert_snapshot!(test.hover(), @"Hover provided no content");
}
#[test]
fn hover_string_annotation_dangling1() {
let test = cursor_test(
r#"
a: "MyCl<CURSOR>ass |" = 1
class MyClass:
"""some docs"""
"#,
);
assert_snapshot!(test.hover(), @"Hover provided no content");
}
#[test]
fn hover_string_annotation_dangling2() {
let test = cursor_test(
r#"
a: "MyCl<CURSOR>ass | No" = 1
class MyClass:
"""some docs"""
"#,
);
assert_snapshot!(test.hover(), @r#"
some docs
---------------------------------------------
some docs
---------------------------------------------
info[hover]: Hovered content is
--> main.py:2:5
|
2 | a: "MyClass | No" = 1
| ^^^^-^^
| | |
| | Cursor offset
| source
3 |
4 | class MyClass:
|
"#);
}
#[test]
fn hover_string_annotation_dangling3() {
let test = cursor_test(
r#"
a: "MyClass | N<CURSOR>o" = 1
class MyClass:
"""some docs"""
"#,
);
assert_snapshot!(test.hover(), @"Hover provided no content");
}
#[test]
fn hover_string_annotation_recursive() {
let test = cursor_test(
r#"
ab: "a<CURSOR>b"
"#,
);
assert_snapshot!(test.hover(), @r#"
Unknown
---------------------------------------------
```python
Unknown
```
---------------------------------------------
info[hover]: Hovered content is
--> main.py:2:6
|
2 | ab: "ab"
| ^-
| ||
| |Cursor offset
| source
|
"#);
}
#[test]
fn hover_string_annotation_unknown() {
let test = cursor_test(
r#"
x: "foo<CURSOR>bar"
"#,
);
assert_snapshot!(test.hover(), @r#"
Unknown
---------------------------------------------
```python
Unknown
```
---------------------------------------------
info[hover]: Hovered content is
--> main.py:2:5
|
2 | x: "foobar"
| ^^^-^^
| | |
| | Cursor offset
| source
|
"#);
}
#[test]
fn hover_overload_type_disambiguated1() {
let test = CursorTest::builder()
.source(
"main.py",
"
from mymodule import ab
a<CURSOR>b(1)
",
)
.source(
"mymodule.py",
r#"
def ab(a):
"""the real implementation!"""
"#,
)
.source(
"mymodule.pyi",
r#"
from typing import overload
@overload
def ab(a: int):
"""the int overload"""
@overload
def ab(a: str): ...
"""the str overload"""
"#,
)
.build();
assert_snapshot!(test.hover(), @r"
def ab(a: int) -> Unknown
---------------------------------------------
the int overload
---------------------------------------------
```python
def ab(a: int) -> Unknown
```
---
the int overload
---------------------------------------------
info[hover]: Hovered content is
--> main.py:4:1
|
2 | from mymodule import ab
3 |
4 | ab(1)
| ^-
| ||
| |Cursor offset
| source
|
");
}
#[test]
fn hover_overload_type_disambiguated2() {
let test = CursorTest::builder()
.source(
"main.py",
r#"
from mymodule import ab
a<CURSOR>b("hello")
"#,
)
.source(
"mymodule.py",
r#"
def ab(a):
"""the real implementation!"""
"#,
)
.source(
"mymodule.pyi",
r#"
from typing import overload
@overload
def ab(a: int):
"""the int overload"""
@overload
def ab(a: str):
"""the str overload"""
"#,
)
.build();
assert_snapshot!(test.hover(), @r#"
def ab(a: str) -> Unknown
---------------------------------------------
the int overload
---------------------------------------------
```python
def ab(a: str) -> Unknown
```
---
the int overload
---------------------------------------------
info[hover]: Hovered content is
--> main.py:4:1
|
2 | from mymodule import ab
3 |
4 | ab("hello")
| ^-
| ||
| |Cursor offset
| source
|
"#);
}
#[test]
fn hover_overload_arity_disambiguated1() {
let test = CursorTest::builder()
.source(
"main.py",
"
from mymodule import ab
a<CURSOR>b(1, 2)
",
)
.source(
"mymodule.py",
r#"
def ab(a, b = None):
"""the real implementation!"""
"#,
)
.source(
"mymodule.pyi",
r#"
from typing import overload
@overload
def ab(a: int, b: int):
"""the two arg overload"""
@overload
def ab(a: int):
"""the one arg overload"""
"#,
)
.build();
assert_snapshot!(test.hover(), @r"
def ab(
a: int,
b: int
) -> Unknown
---------------------------------------------
the two arg overload
---------------------------------------------
```python
def ab(
a: int,
b: int
) -> Unknown
```
---
the two arg overload
---------------------------------------------
info[hover]: Hovered content is
--> main.py:4:1
|
2 | from mymodule import ab
3 |
4 | ab(1, 2)
| ^-
| ||
| |Cursor offset
| source
|
");
}
#[test]
fn hover_overload_arity_disambiguated2() {
let test = CursorTest::builder()
.source(
"main.py",
"
from mymodule import ab
a<CURSOR>b(1)
",
)
.source(
"mymodule.py",
r#"
def ab(a, b = None):
"""the real implementation!"""
"#,
)
.source(
"mymodule.pyi",
r#"
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | true |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ty_ide/src/document_symbols.rs | crates/ty_ide/src/document_symbols.rs | use crate::symbols::{FlatSymbols, symbols_for_file};
use ruff_db::files::File;
use ty_project::Db;
/// Get all document symbols for a file with the given options.
pub fn document_symbols(db: &dyn Db, file: File) -> &FlatSymbols {
symbols_for_file(db, file)
}
#[cfg(test)]
mod tests {
use super::*;
use crate::symbols::{HierarchicalSymbols, SymbolId, SymbolInfo};
use crate::tests::{CursorTest, IntoDiagnostic, cursor_test};
use insta::assert_snapshot;
use ruff_db::diagnostic::{
Annotation, Diagnostic, DiagnosticId, LintName, Severity, Span, SubDiagnostic,
SubDiagnosticSeverity,
};
#[test]
fn test_document_symbols_simple() {
let test = cursor_test(
"
def hello():
pass
class World:
def method(self):
pass
<CURSOR>",
);
assert_snapshot!(test.document_symbols(), @r"
info[document-symbols]: SymbolInfo
--> main.py:2:5
|
2 | def hello():
| ^^^^^
3 | pass
|
info: Function hello
info[document-symbols]: SymbolInfo
--> main.py:5:7
|
3 | pass
4 |
5 | class World:
| ^^^^^
6 | def method(self):
7 | pass
|
info: Class World
info[document-symbols]: SymbolInfo
--> main.py:6:9
|
5 | class World:
6 | def method(self):
| ^^^^^^
7 | pass
|
info: Method method
");
}
#[test]
fn test_document_symbols_complex() {
let test = cursor_test(
"
import os
from typing import List
CONSTANT = 42
variable = 'hello'
typed_global: str = 'typed'
annotated_only: int
class MyClass:
class_var = 100
typed_class_var: str = 'class_typed'
annotated_class_var: float
def __init__(self):
self.instance_var = 0
def public_method(self):
return self.instance_var
def _private_method(self):
pass
def standalone_function():
local_var = 10
return local_var
<CURSOR>",
);
assert_snapshot!(test.document_symbols(), @r"
info[document-symbols]: SymbolInfo
--> main.py:5:1
|
3 | from typing import List
4 |
5 | CONSTANT = 42
| ^^^^^^^^
6 | variable = 'hello'
7 | typed_global: str = 'typed'
|
info: Constant CONSTANT
info[document-symbols]: SymbolInfo
--> main.py:6:1
|
5 | CONSTANT = 42
6 | variable = 'hello'
| ^^^^^^^^
7 | typed_global: str = 'typed'
8 | annotated_only: int
|
info: Variable variable
info[document-symbols]: SymbolInfo
--> main.py:7:1
|
5 | CONSTANT = 42
6 | variable = 'hello'
7 | typed_global: str = 'typed'
| ^^^^^^^^^^^^
8 | annotated_only: int
|
info: Variable typed_global
info[document-symbols]: SymbolInfo
--> main.py:8:1
|
6 | variable = 'hello'
7 | typed_global: str = 'typed'
8 | annotated_only: int
| ^^^^^^^^^^^^^^
9 |
10 | class MyClass:
|
info: Variable annotated_only
info[document-symbols]: SymbolInfo
--> main.py:10:7
|
8 | annotated_only: int
9 |
10 | class MyClass:
| ^^^^^^^
11 | class_var = 100
12 | typed_class_var: str = 'class_typed'
|
info: Class MyClass
info[document-symbols]: SymbolInfo
--> main.py:11:5
|
10 | class MyClass:
11 | class_var = 100
| ^^^^^^^^^
12 | typed_class_var: str = 'class_typed'
13 | annotated_class_var: float
|
info: Field class_var
info[document-symbols]: SymbolInfo
--> main.py:12:5
|
10 | class MyClass:
11 | class_var = 100
12 | typed_class_var: str = 'class_typed'
| ^^^^^^^^^^^^^^^
13 | annotated_class_var: float
|
info: Field typed_class_var
info[document-symbols]: SymbolInfo
--> main.py:13:5
|
11 | class_var = 100
12 | typed_class_var: str = 'class_typed'
13 | annotated_class_var: float
| ^^^^^^^^^^^^^^^^^^^
14 |
15 | def __init__(self):
|
info: Field annotated_class_var
info[document-symbols]: SymbolInfo
--> main.py:15:9
|
13 | annotated_class_var: float
14 |
15 | def __init__(self):
| ^^^^^^^^
16 | self.instance_var = 0
|
info: Constructor __init__
info[document-symbols]: SymbolInfo
--> main.py:18:9
|
16 | self.instance_var = 0
17 |
18 | def public_method(self):
| ^^^^^^^^^^^^^
19 | return self.instance_var
|
info: Method public_method
info[document-symbols]: SymbolInfo
--> main.py:21:9
|
19 | return self.instance_var
20 |
21 | def _private_method(self):
| ^^^^^^^^^^^^^^^
22 | pass
|
info: Method _private_method
info[document-symbols]: SymbolInfo
--> main.py:24:5
|
22 | pass
23 |
24 | def standalone_function():
| ^^^^^^^^^^^^^^^^^^^
25 | local_var = 10
26 | return local_var
|
info: Function standalone_function
");
}
#[test]
fn test_document_symbols_nested() {
let test = cursor_test(
"
class OuterClass:
OUTER_CONSTANT = 100
def outer_method(self):
return self.OUTER_CONSTANT
class InnerClass:
def inner_method(self):
pass
<CURSOR>",
);
assert_snapshot!(test.document_symbols(), @r"
info[document-symbols]: SymbolInfo
--> main.py:2:7
|
2 | class OuterClass:
| ^^^^^^^^^^
3 | OUTER_CONSTANT = 100
|
info: Class OuterClass
info[document-symbols]: SymbolInfo
--> main.py:3:5
|
2 | class OuterClass:
3 | OUTER_CONSTANT = 100
| ^^^^^^^^^^^^^^
4 |
5 | def outer_method(self):
|
info: Constant OUTER_CONSTANT
info[document-symbols]: SymbolInfo
--> main.py:5:9
|
3 | OUTER_CONSTANT = 100
4 |
5 | def outer_method(self):
| ^^^^^^^^^^^^
6 | return self.OUTER_CONSTANT
|
info: Method outer_method
info[document-symbols]: SymbolInfo
--> main.py:8:11
|
6 | return self.OUTER_CONSTANT
7 |
8 | class InnerClass:
| ^^^^^^^^^^
9 | def inner_method(self):
10 | pass
|
info: Class InnerClass
info[document-symbols]: SymbolInfo
--> main.py:9:13
|
8 | class InnerClass:
9 | def inner_method(self):
| ^^^^^^^^^^^^
10 | pass
|
info: Method inner_method
");
}
impl CursorTest {
fn document_symbols(&self) -> String {
let symbols = document_symbols(&self.db, self.cursor.file).to_hierarchical();
if symbols.is_empty() {
return "No symbols found".to_string();
}
self.render_diagnostics(symbols.iter().flat_map(|(id, symbol)| {
symbol_to_diagnostics(&symbols, id, symbol, self.cursor.file)
}))
}
}
fn symbol_to_diagnostics<'db>(
symbols: &'db HierarchicalSymbols,
id: SymbolId,
symbol: SymbolInfo<'db>,
file: File,
) -> Vec<DocumentSymbolDiagnostic<'db>> {
// Output the symbol and recursively output all child symbols
let mut diagnostics = vec![DocumentSymbolDiagnostic::new(symbol, file)];
for (child_id, child) in symbols.children(id) {
diagnostics.extend(symbol_to_diagnostics(symbols, child_id, child, file));
}
diagnostics
}
struct DocumentSymbolDiagnostic<'db> {
symbol: SymbolInfo<'db>,
file: File,
}
impl<'db> DocumentSymbolDiagnostic<'db> {
fn new(symbol: SymbolInfo<'db>, file: File) -> Self {
Self { symbol, file }
}
}
impl IntoDiagnostic for DocumentSymbolDiagnostic<'_> {
fn into_diagnostic(self) -> Diagnostic {
let symbol_kind_str = self.symbol.kind.to_string();
let info_text = format!("{} {}", symbol_kind_str, self.symbol.name);
let sub = SubDiagnostic::new(SubDiagnosticSeverity::Info, info_text);
let mut main = Diagnostic::new(
DiagnosticId::Lint(LintName::of("document-symbols")),
Severity::Info,
"SymbolInfo".to_string(),
);
main.annotate(Annotation::primary(
Span::from(self.file).with_range(self.symbol.name_range),
));
main.sub(sub);
main
}
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ty_ide/src/symbols.rs | crates/ty_ide/src/symbols.rs | //! Implements logic used by the document symbol provider, workspace symbol
//! provider, and auto-import feature of the completion provider.
use std::borrow::Cow;
use std::ops::Range;
use regex::Regex;
use ruff_db::files::File;
use ruff_db::parsed::parsed_module;
use ruff_index::{IndexVec, newtype_index};
use ruff_python_ast as ast;
use ruff_python_ast::name::{Name, UnqualifiedName};
use ruff_python_ast::visitor::source_order::{self, SourceOrderVisitor};
use ruff_text_size::{Ranged, TextRange};
use rustc_hash::{FxHashMap, FxHashSet};
use ty_module_resolver::{ModuleName, resolve_module};
use ty_project::Db;
use crate::completion::CompletionKind;
/// A compiled query pattern used for searching symbols.
///
/// This can be used with the `FlatSymbols::search` API.
#[derive(Clone, Debug)]
pub struct QueryPattern {
re: Option<Regex>,
original: String,
original_is_exact: bool,
}
impl QueryPattern {
/// Create a new query pattern from a literal search string given.
pub fn fuzzy(literal_query_string: &str) -> QueryPattern {
let mut pattern = "(?i)".to_string();
for ch in literal_query_string.chars() {
pattern.push_str(®ex::escape(ch.encode_utf8(&mut [0; 4])));
pattern.push_str(".*");
}
// In theory regex compilation could fail if the pattern string
// was long enough to exceed the default regex compilation size
// limit. But this length would be approaching ~10MB or so. If
// is does somehow fail, we'll just fall back to simple substring
// search using `original`.
QueryPattern {
re: Regex::new(&pattern).ok(),
original: literal_query_string.to_string(),
original_is_exact: false,
}
}
/// Create a new query
pub fn exactly(symbol: &str) -> QueryPattern {
QueryPattern {
re: None,
original: symbol.to_string(),
original_is_exact: true,
}
}
/// Create a new query pattern that matches all symbols.
pub fn matches_all_symbols() -> QueryPattern {
QueryPattern {
re: None,
original: String::new(),
original_is_exact: false,
}
}
fn is_match_symbol(&self, symbol: &SymbolInfo<'_>) -> bool {
self.is_match_symbol_name(&symbol.name)
}
pub fn is_match_symbol_name(&self, symbol_name: &str) -> bool {
if let Some(ref re) = self.re {
re.is_match(symbol_name)
} else if self.original_is_exact {
symbol_name == self.original
} else {
// This is a degenerate case. The only way
// we should get here is if the query string
// was thousands (or more) characters long.
// ... or, if "typed" text could not be found.
symbol_name.contains(&self.original)
}
}
/// Returns true when it is known that this pattern will return `true` for
/// all inputs given to `QueryPattern::is_match_symbol_name`.
///
/// This will never return `true` incorrectly, but it may return `false`
/// incorrectly. That is, it's possible that this query will match all
/// inputs but this still returns `false`.
pub fn will_match_everything(&self) -> bool {
self.re.is_none() && self.original.is_empty()
}
}
impl From<&str> for QueryPattern {
fn from(literal_query_string: &str) -> QueryPattern {
QueryPattern::fuzzy(literal_query_string)
}
}
impl Eq for QueryPattern {}
impl PartialEq for QueryPattern {
fn eq(&self, rhs: &QueryPattern) -> bool {
self.original == rhs.original
}
}
/// A flat list of indexed symbols for a single file.
#[derive(Clone, Debug, Default, PartialEq, Eq, get_size2::GetSize)]
pub struct FlatSymbols {
/// The symbols exported by a module.
symbols: IndexVec<SymbolId, SymbolTree>,
/// The names found in an `__all__` for a module.
///
/// This is `None` if the module has no `__all__` at module
/// scope.
all_names: Option<FxHashSet<Name>>,
}
impl FlatSymbols {
/// Get the symbol info for the symbol identified by the given ID.
///
/// Returns `None` when the given ID does not reference a symbol in this
/// collection.
pub fn get(&self, id: SymbolId) -> Option<SymbolInfo<'_>> {
self.symbols.get(id).map(Into::into)
}
/// Returns true if and only if this collection is empty.
pub fn is_empty(&self) -> bool {
self.symbols.is_empty()
}
/// Returns the total number of symbols in this collection.
pub fn len(&self) -> usize {
self.symbols.len()
}
/// Returns an iterator over every symbol along with its ID.
pub fn iter(&self) -> impl Iterator<Item = (SymbolId, SymbolInfo<'_>)> {
self.symbols
.iter_enumerated()
.map(|(id, symbol)| (id, symbol.into()))
}
/// Returns a sequence of symbols that matches the given query.
pub fn search(&self, query: &QueryPattern) -> impl Iterator<Item = (SymbolId, SymbolInfo<'_>)> {
self.iter()
.filter(|(_, symbol)| query.is_match_symbol(symbol))
}
/// Turns this flat sequence of symbols into a hierarchy of symbols.
pub fn to_hierarchical(&self) -> HierarchicalSymbols {
let mut children_ids: IndexVec<SymbolId, Vec<SymbolId>> = IndexVec::new();
for (id, symbol) in self.symbols.iter_enumerated() {
children_ids.push(vec![]);
let Some(parent_id) = symbol.parent else {
continue;
};
// OK because the symbol visitor guarantees that
// all parents are ordered before their children.
assert!(parent_id.index() < id.index());
children_ids[parent_id].push(id);
}
// Now flatten our map of symbol ID to its children
// IDs into a single vec that doesn't nest allocations.
let mut symbols = IndexVec::new();
let mut children: Vec<SymbolId> = vec![];
let mut last_end: usize = 0;
for (tree, child_symbol_ids) in self.symbols.iter().zip(children_ids) {
let start = last_end;
let end = start + child_symbol_ids.len();
symbols.push(SymbolTreeWithChildren {
tree: tree.clone(),
children: start..end,
});
children.extend_from_slice(&child_symbol_ids);
last_end = end;
}
HierarchicalSymbols { symbols, children }
}
}
/// A collection of hierarchical indexed symbols for a single file.
#[derive(Clone, Debug, Default, PartialEq, Eq)]
pub struct HierarchicalSymbols {
symbols: IndexVec<SymbolId, SymbolTreeWithChildren>,
children: Vec<SymbolId>,
}
impl HierarchicalSymbols {
/// Get the symbol info for the symbol identified by the given ID.
///
/// Returns `None` when the given ID does not reference a symbol in this
/// collection.
pub fn get(&self, id: SymbolId) -> Option<SymbolInfo<'_>> {
self.symbols.get(id).map(Into::into)
}
/// Returns true if and only if this collection is empty.
pub fn is_empty(&self) -> bool {
self.symbols.is_empty()
}
/// Returns the total number of symbols in this collection.
pub fn len(&self) -> usize {
self.symbols.len()
}
/// Returns an iterator over every top-level symbol along with its ID.
pub fn iter(&self) -> impl Iterator<Item = (SymbolId, SymbolInfo<'_>)> {
self.symbols
.iter_enumerated()
.filter(|(_, symbol)| symbol.tree.parent.is_none())
.map(|(id, symbol)| (id, symbol.into()))
}
/// Returns an iterator over the child symbols for the symbol
/// identified by the given ID.
///
/// Returns `None` when there aren't any children or when the given
/// ID does not reference a symbol in this collection.
pub fn children(&self, id: SymbolId) -> impl Iterator<Item = (SymbolId, SymbolInfo<'_>)> {
self.symbols
.get(id)
.into_iter()
.flat_map(|symbol| self.children[symbol.children.clone()].iter())
.copied()
.map(|id| (id, SymbolInfo::from(&self.symbols[id])))
}
}
#[derive(Clone, Debug, PartialEq, Eq)]
struct SymbolTreeWithChildren {
tree: SymbolTree,
/// The index range into `HierarchicalSymbols::children`
/// corresponding to the children symbol IDs for this
/// symbol.
children: Range<usize>,
}
/// Uniquely identifies a symbol.
#[newtype_index]
#[derive(get_size2::GetSize)]
pub struct SymbolId;
/// Symbol information for IDE features like document outline.
#[derive(Clone, Debug, PartialEq, Eq)]
pub struct SymbolInfo<'a> {
/// The name of the symbol
pub name: Cow<'a, str>,
/// The kind of symbol (function, class, variable, etc.)
pub kind: SymbolKind,
/// The range of the symbol name
pub name_range: TextRange,
/// The full range of the symbol (including body)
pub full_range: TextRange,
}
impl SymbolInfo<'_> {
pub fn to_owned(&self) -> SymbolInfo<'static> {
SymbolInfo {
name: Cow::Owned(self.name.to_string()),
kind: self.kind,
name_range: self.name_range,
full_range: self.full_range,
}
}
}
impl<'a> From<&'a SymbolTree> for SymbolInfo<'a> {
fn from(symbol: &'a SymbolTree) -> SymbolInfo<'a> {
SymbolInfo {
name: Cow::Borrowed(&symbol.name),
kind: symbol.kind,
name_range: symbol.name_range,
full_range: symbol.full_range,
}
}
}
impl<'a> From<&'a SymbolTreeWithChildren> for SymbolInfo<'a> {
fn from(symbol: &'a SymbolTreeWithChildren) -> SymbolInfo<'a> {
SymbolInfo::from(&symbol.tree)
}
}
/// The kind of symbol.
///
/// Note that this is computed on a best effort basis. The nature of
/// auto-import is that it tries to do a very low effort scan of a lot of code
/// very quickly. This means that it doesn't use things like type information
/// or completely resolve the definition of every symbol. So for example, we
/// might label a module as a variable, depending on how it was introduced.
#[derive(Debug, Clone, Copy, PartialEq, Eq, get_size2::GetSize)]
pub enum SymbolKind {
Module,
Class,
Method,
Function,
Variable,
Constant,
Property,
Field,
Constructor,
Parameter,
TypeParameter,
Import,
}
impl SymbolKind {
/// Returns the string representation of the symbol kind.
pub fn to_string(self) -> &'static str {
match self {
SymbolKind::Module => "Module",
SymbolKind::Class => "Class",
SymbolKind::Method => "Method",
SymbolKind::Function => "Function",
SymbolKind::Variable => "Variable",
SymbolKind::Constant => "Constant",
SymbolKind::Property => "Property",
SymbolKind::Field => "Field",
SymbolKind::Constructor => "Constructor",
SymbolKind::Parameter => "Parameter",
SymbolKind::TypeParameter => "TypeParameter",
SymbolKind::Import => "Import",
}
}
/// Maps this to a "completion" kind if a sensible mapping exists.
pub fn to_completion_kind(self) -> Option<CompletionKind> {
Some(match self {
SymbolKind::Module => CompletionKind::Module,
SymbolKind::Class => CompletionKind::Class,
SymbolKind::Method => CompletionKind::Method,
SymbolKind::Function => CompletionKind::Function,
SymbolKind::Variable => CompletionKind::Variable,
SymbolKind::Constant => CompletionKind::Constant,
SymbolKind::Property => CompletionKind::Property,
SymbolKind::Field => CompletionKind::Field,
SymbolKind::Constructor => CompletionKind::Constructor,
SymbolKind::Parameter => CompletionKind::Variable,
SymbolKind::TypeParameter => CompletionKind::TypeParameter,
// Not quite sure what to do with this one. I guess
// in theory the import should be "resolved" to its
// underlying kind, but that seems expensive.
SymbolKind::Import => return None,
})
}
}
/// Returns a flat list of symbols in the file given.
///
/// The flattened list includes parent/child information and can be
/// converted into a hierarchical collection of symbols.
#[salsa::tracked(returns(ref), heap_size=ruff_memory_usage::heap_size)]
pub(crate) fn symbols_for_file(db: &dyn Db, file: File) -> FlatSymbols {
let parsed = parsed_module(db, file);
let module = parsed.load(db);
let mut visitor = SymbolVisitor::tree(db, file);
visitor.visit_body(&module.syntax().body);
visitor.into_flat_symbols()
}
/// Returns a flat list of *only global* symbols in the file given.
///
/// While callers can convert this into a hierarchical collection of
/// symbols, it won't result in anything meaningful since the flat list
/// returned doesn't include children.
#[salsa::tracked(
returns(ref),
cycle_initial=symbols_for_file_global_only_cycle_initial,
heap_size=ruff_memory_usage::heap_size,
)]
pub(crate) fn symbols_for_file_global_only(db: &dyn Db, file: File) -> FlatSymbols {
let parsed = parsed_module(db, file);
let module = parsed.load(db);
let mut visitor = SymbolVisitor::globals(db, file);
visitor.visit_body(&module.syntax().body);
if file
.path(db)
.as_system_path()
.is_none_or(|path| !db.project().is_file_included(db, path))
{
// Eagerly clear ASTs of third party files.
parsed.clear();
}
visitor.into_flat_symbols()
}
fn symbols_for_file_global_only_cycle_initial(
_db: &dyn Db,
_id: salsa::Id,
_file: File,
) -> FlatSymbols {
FlatSymbols::default()
}
#[derive(Debug, Clone, PartialEq, Eq, get_size2::GetSize)]
struct SymbolTree {
parent: Option<SymbolId>,
name: String,
kind: SymbolKind,
name_range: TextRange,
full_range: TextRange,
import_kind: Option<ImportKind>,
}
#[derive(Debug, Clone, Copy, PartialEq, Eq, get_size2::GetSize)]
enum ImportKind {
Normal,
RedundantAlias,
Wildcard,
}
/// An abstraction for managing module scope imports.
///
/// This is meant to recognize the following idioms for updating
/// `__all__` in module scope:
///
/// ```ignore
/// __all__ += submodule.__all__
/// __all__.extend(submodule.__all__)
/// ```
///
/// # Correctness
///
/// The approach used here is not correct 100% of the time.
/// For example, it is somewhat easy to defeat it:
///
/// ```ignore
/// from numpy import *
/// from importlib import resources
/// import numpy as np
/// np = resources
/// __all__ = []
/// __all__ += np.__all__
/// ```
///
/// In this example, `np` will still be resolved to the `numpy`
/// module instead of the `importlib.resources` module. Namely, this
/// abstraction doesn't track all definitions. This would result in a
/// silently incorrect `__all__`.
///
/// This abstraction does handle the case when submodules are imported.
/// Namely, we do get this case correct:
///
/// ```ignore
/// from importlib.resources import *
/// from importlib import resources
/// __all__ = []
/// __all__ += resources.__all__
/// ```
///
/// We do this by treating all imports in a `from ... import ...`
/// statement as *possible* modules. Then when we lookup `resources`,
/// we attempt to resolve it to an actual module. If that fails, then
/// we consider `__all__` invalid.
///
/// There are likely many many other cases that we don't handle as
/// well, which ty does (it has its own `__all__` parsing using types
/// to deal with this case). We can add handling for those as they
/// come up in real world examples.
///
/// # Performance
///
/// This abstraction recognizes that, compared to all possible imports,
/// it is very rare to use one of them to update `__all__`. Therefore,
/// we are careful not to do too much work up-front (like eagerly
/// manifesting `ModuleName` values).
#[derive(Clone, Debug, Default, get_size2::GetSize)]
struct Imports<'db> {
/// A map from the name that a module is available
/// under to its actual module name (and our level
/// of certainty that it ought to be treated as a module).
module_names: FxHashMap<&'db str, ImportModuleKind<'db>>,
}
impl<'db> Imports<'db> {
/// Track the imports from the given `import ...` statement.
fn add_import(&mut self, import: &'db ast::StmtImport) {
for alias in &import.names {
let asname = alias
.asname
.as_ref()
.map(|ident| &ident.id)
.unwrap_or(&alias.name.id);
let module_name = ImportModuleName::Import(&alias.name.id);
self.module_names
.insert(asname, ImportModuleKind::Definitive(module_name));
}
}
/// Track the imports from the given `from ... import ...` statement.
fn add_import_from(&mut self, import_from: &'db ast::StmtImportFrom) {
for alias in &import_from.names {
if &alias.name == "*" {
// FIXME: We'd ideally include the names
// imported from the module, but we don't
// want to do this eagerly. So supporting
// this requires more infrastructure in
// `Imports`.
continue;
}
let asname = alias
.asname
.as_ref()
.map(|ident| &ident.id)
.unwrap_or(&alias.name.id);
let module_name = ImportModuleName::ImportFrom {
parent: import_from,
child: &alias.name.id,
};
self.module_names
.insert(asname, ImportModuleKind::Possible(module_name));
}
}
/// Return the symbols exported by the module referred to by `name`.
///
/// e.g., This can be used to resolve `__all__ += submodule.__all__`,
/// where `name` is `submodule`.
fn get_module_symbols(
&self,
db: &'db dyn Db,
importing_file: File,
name: &Name,
) -> Option<&'db FlatSymbols> {
let module_name = match self.module_names.get(name.as_str())? {
ImportModuleKind::Definitive(name) | ImportModuleKind::Possible(name) => {
name.to_module_name(db, importing_file)?
}
};
let module = resolve_module(db, importing_file, &module_name)?;
Some(symbols_for_file_global_only(db, module.file(db)?))
}
}
/// Describes the level of certainty that an import is a module.
///
/// For example, `import foo`, then `foo` is definitively a module.
/// But `from quux import foo`, then `quux.foo` is possibly a module.
#[derive(Debug, Clone, Copy, get_size2::GetSize)]
enum ImportModuleKind<'db> {
Definitive(ImportModuleName<'db>),
Possible(ImportModuleName<'db>),
}
/// A representation of something that can be turned into a
/// `ModuleName`.
///
/// We don't do this eagerly, and instead represent the constituent
/// pieces, in order to avoid the work needed to build a `ModuleName`.
/// In particular, it is somewhat rare for the visitor to need
/// to access the imports found in a module. At time of writing
/// (2025-12-10), this only happens when referencing a submodule
/// to augment an `__all__` definition. For example, as found in
/// `matplotlib`:
///
/// ```ignore
/// import numpy as np
/// __all__ = ['rand', 'randn', 'repmat']
/// __all__ += np.__all__
/// ```
///
/// This construct is somewhat rare and it would be sad to allocate a
/// `ModuleName` for every imported item unnecessarily.
#[derive(Debug, Clone, Copy, get_size2::GetSize)]
enum ImportModuleName<'db> {
/// The `foo` in `import quux, foo as blah, baz`.
Import(&'db Name),
/// A possible module in a `from ... import ...` statement.
ImportFrom {
/// The `..foo` in `from ..foo import quux`.
parent: &'db ast::StmtImportFrom,
/// The `foo` in `from quux import foo`.
child: &'db Name,
},
}
impl<'db> ImportModuleName<'db> {
/// Converts the lazy representation of a module name into an
/// actual `ModuleName` that can be used for module resolution.
fn to_module_name(self, db: &'db dyn Db, importing_file: File) -> Option<ModuleName> {
match self {
ImportModuleName::Import(name) => ModuleName::new(name),
ImportModuleName::ImportFrom { parent, child } => {
let mut module_name =
ModuleName::from_import_statement(db, importing_file, parent).ok()?;
let child_module_name = ModuleName::new(child)?;
module_name.extend(&child_module_name);
Some(module_name)
}
}
}
}
/// A visitor over all symbols in a single file.
///
/// This guarantees that child symbols have a symbol ID greater
/// than all of its parents.
#[allow(clippy::struct_excessive_bools)]
struct SymbolVisitor<'db> {
db: &'db dyn Db,
file: File,
symbols: IndexVec<SymbolId, SymbolTree>,
symbol_stack: Vec<SymbolId>,
/// Track if we're currently inside a function at any point.
///
/// This is true even when we're inside a class definition
/// that is inside a class.
in_function: bool,
/// Track if we're currently inside a class at any point.
///
/// This is true even when we're inside a function definition
/// that is inside a class.
in_class: bool,
/// When enabled, the visitor should only try to extract
/// symbols from a module that we believed form the "exported"
/// interface for that module. i.e., `__all__` is only respected
/// when this is enabled. It's otherwise ignored.
exports_only: bool,
/// The origin of an `__all__` variable, if found.
all_origin: Option<DunderAllOrigin>,
/// A set of names extracted from `__all__`.
all_names: FxHashSet<Name>,
/// A flag indicating whether the module uses unrecognized
/// `__all__` idioms or there are any invalid elements in
/// `__all__`.
all_invalid: bool,
/// A collection of imports found while visiting the AST.
///
/// These are used to help resolve references to modules
/// in some limited cases.
imports: Imports<'db>,
}
impl<'db> SymbolVisitor<'db> {
fn tree(db: &'db dyn Db, file: File) -> Self {
Self {
db,
file,
symbols: IndexVec::new(),
symbol_stack: vec![],
in_function: false,
in_class: false,
exports_only: false,
all_origin: None,
all_names: FxHashSet::default(),
all_invalid: false,
imports: Imports::default(),
}
}
fn globals(db: &'db dyn Db, file: File) -> Self {
Self {
exports_only: true,
..Self::tree(db, file)
}
}
fn into_flat_symbols(mut self) -> FlatSymbols {
// If `__all__` was found but wasn't recognized,
// then we emit a diagnostic message indicating as such.
if self.all_invalid {
tracing::debug!("Invalid `__all__` in `{}`", self.file.path(self.db));
}
// We want to filter out some of the symbols we collected.
// Specifically, to respect conventions around library
// interface.
//
// But, we always assigned IDs to each symbol based on
// their position in a sequence. So when we filter some
// out, we need to remap the identifiers.
//
// We also want to deduplicate when `exports_only` is
// `true`. In particular, dealing with `__all__` can
// result in cycles, and we need to make sure our output
// is stable for that reason.
//
// N.B. The remapping could be skipped when `exports_only` is
// true, since in that case, none of the symbols have a parent
// ID by construction.
let mut remap = IndexVec::with_capacity(self.symbols.len());
let mut seen = self.exports_only.then(FxHashSet::default);
let mut new = IndexVec::with_capacity(self.symbols.len());
for mut symbol in std::mem::take(&mut self.symbols) {
// If we're deduplicating and we've already seen
// this symbol, then skip it.
//
// FIXME: We should do this without copying every
// symbol name. ---AG
if let Some(ref mut seen) = seen {
if !seen.insert(symbol.name.clone()) {
continue;
}
}
if !self.is_part_of_library_interface(&symbol) {
remap.push(None);
continue;
}
if let Some(ref mut parent) = symbol.parent {
// OK because the visitor guarantees that
// all parents have IDs less than their
// children. So its ID has already been
// remapped.
if let Some(new_parent) = remap[*parent] {
*parent = new_parent;
} else {
// The parent symbol was dropped, so
// all of its children should be as
// well.
remap.push(None);
continue;
}
}
let new_id = new.next_index();
remap.push(Some(new_id));
new.push(symbol);
}
FlatSymbols {
symbols: new,
all_names: self.all_origin.map(|_| self.all_names),
}
}
fn visit_body(&mut self, body: &'db [ast::Stmt]) {
for stmt in body {
self.visit_stmt(stmt);
}
}
/// Add a new symbol and return its ID.
fn add_symbol(&mut self, mut symbol: SymbolTree) -> SymbolId {
if let Some(&parent_id) = self.symbol_stack.last() {
symbol.parent = Some(parent_id);
}
// It's important that we push the symbol and allocate
// an ID before visiting its child. This preserves the
// guarantee that parent IDs are always less than their
// children IDs.
let symbol_id = self.symbols.next_index();
self.symbols.push(symbol);
symbol_id
}
/// Adds a symbol introduced via an assignment.
fn add_assignment(&mut self, stmt: &ast::Stmt, name: &ast::ExprName) -> SymbolId {
let kind = if Self::is_constant_name(name.id.as_str()) {
SymbolKind::Constant
} else if self
.iter_symbol_stack()
.any(|s| s.kind == SymbolKind::Class)
{
SymbolKind::Field
} else {
SymbolKind::Variable
};
let symbol = SymbolTree {
parent: None,
name: name.id.to_string(),
kind,
name_range: name.range(),
full_range: stmt.range(),
import_kind: None,
};
self.add_symbol(symbol)
}
/// Adds a symbol introduced via an import `stmt`.
fn add_import_alias(&mut self, stmt: &ast::Stmt, alias: &ast::Alias) -> SymbolId {
let name = alias.asname.as_ref().unwrap_or(&alias.name);
let kind = if stmt.is_import_stmt() {
SymbolKind::Module
} else if Self::is_constant_name(name.as_str()) {
SymbolKind::Constant
} else {
SymbolKind::Variable
};
let re_export = Some(
if alias.asname.as_ref().map(ast::Identifier::as_str) == Some(alias.name.as_str()) {
ImportKind::RedundantAlias
} else {
ImportKind::Normal
},
);
self.add_symbol(SymbolTree {
parent: None,
name: name.id.to_string(),
kind,
name_range: name.range(),
full_range: stmt.range(),
import_kind: re_export,
})
}
/// Extracts `__all__` names from the given assignment.
///
/// If the assignment isn't for `__all__`, then this is a no-op.
fn add_all_assignment(&mut self, targets: &[ast::Expr], value: Option<&ast::Expr>) {
// We don't care about `__all__` unless we're
// specifically looking for exported symbols.
if !self.exports_only {
return;
}
if self.in_function || self.in_class {
return;
}
let Some(target) = targets.first() else {
return;
};
if !is_dunder_all(target) {
return;
}
let Some(value) = value else { return };
match *value {
// `__all__ = [...]`
// `__all__ = (...)`
ast::Expr::List(ast::ExprList { ref elts, .. })
| ast::Expr::Tuple(ast::ExprTuple { ref elts, .. }) => {
self.update_all_origin(DunderAllOrigin::CurrentModule);
if !self.add_all_names(elts) {
self.all_invalid = true;
}
}
_ => {
self.all_invalid = true;
}
}
}
/// Extends the current set of names with the names from the
/// given expression which currently must be a list/tuple/set of
/// string-literal names. This currently does not support using a
/// submodule's `__all__` variable.
///
/// Returns `true` if the expression is a valid list/tuple/set or
/// module `__all__`, `false` otherwise.
///
/// N.B. Supporting all instances of `__all__ += submodule.__all__`
/// and `__all__.extend(submodule.__all__)` is likely difficult
/// in this context. Namely, `submodule` needs to be resolved
/// to a particular module. ty proper can do this (by virtue
/// of inferring the type of `submodule`). With that said, we
/// could likely support a subset of cases here without too much
/// ceremony. ---AG
fn extend_all(&mut self, expr: &ast::Expr) -> bool {
match expr {
// `__all__ += [...]`
// `__all__ += (...)`
// `__all__ += {...}`
ast::Expr::List(ast::ExprList { elts, .. })
| ast::Expr::Tuple(ast::ExprTuple { elts, .. })
| ast::Expr::Set(ast::ExprSet { elts, .. }) => self.add_all_names(elts),
// `__all__ += module.__all__`
// `__all__.extend(module.__all__)`
ast::Expr::Attribute(ast::ExprAttribute { .. }) => {
let Some(unqualified) = UnqualifiedName::from_expr(expr) else {
return false;
};
let Some((&attr, rest)) = unqualified.segments().split_last() else {
return false;
};
if attr != "__all__" {
return false;
}
let possible_module_name = Name::new(rest.join("."));
let Some(symbols) =
self.imports
.get_module_symbols(self.db, self.file, &possible_module_name)
else {
return false;
};
let Some(ref all) = symbols.all_names else {
return false;
};
self.all_names.extend(all.iter().cloned());
true
}
_ => false,
}
}
/// Processes a call idiom for `__all__` and updates the set of
/// names accordingly.
///
/// Returns `true` if the call idiom is recognized and valid,
/// `false` otherwise.
fn update_all_by_call_idiom(
&mut self,
function_name: &ast::Identifier,
arguments: &ast::Arguments,
) -> bool {
if arguments.len() != 1 {
return false;
}
let Some(argument) = arguments.find_positional(0) else {
return false;
};
match function_name.as_str() {
// `__all__.extend([...])`
// `__all__.extend(module.__all__)`
"extend" => {
if !self.extend_all(argument) {
return false;
}
}
// `__all__.append(...)`
"append" => {
let Some(name) = create_all_name(argument) else {
return false;
};
self.all_names.insert(name);
}
// `__all__.remove(...)`
"remove" => {
let Some(name) = create_all_name(argument) else {
return false;
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | true |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ty_ide/src/find_references.rs | crates/ty_ide/src/find_references.rs | use crate::goto::find_goto_target;
use crate::references::{ReferencesMode, references};
use crate::{Db, ReferenceTarget};
use ruff_db::files::File;
use ruff_text_size::TextSize;
use ty_python_semantic::SemanticModel;
/// Find all references to a symbol at the given position.
/// Search for references across all files in the project.
pub fn find_references(
db: &dyn Db,
file: File,
offset: TextSize,
include_declaration: bool,
) -> Option<Vec<ReferenceTarget>> {
let parsed = ruff_db::parsed::parsed_module(db, file);
let module = parsed.load(db);
let model = SemanticModel::new(db, file);
// Get the definitions for the symbol at the cursor position
let goto_target = find_goto_target(&model, &module, offset)?;
let mode = if include_declaration {
ReferencesMode::References
} else {
ReferencesMode::ReferencesSkipDeclaration
};
references(db, file, &goto_target, mode)
}
#[cfg(test)]
mod tests {
use super::*;
use crate::tests::{CursorTest, IntoDiagnostic, cursor_test};
use insta::assert_snapshot;
use ruff_db::diagnostic::{Annotation, Diagnostic, DiagnosticId, LintName, Severity, Span};
impl CursorTest {
fn references(&self) -> String {
let Some(mut reference_results) =
find_references(&self.db, self.cursor.file, self.cursor.offset, true)
else {
return "No references found".to_string();
};
if reference_results.is_empty() {
return "No references found".to_string();
}
reference_results.sort_by_key(ReferenceTarget::file);
self.render_diagnostics([ReferenceResult {
references: reference_results,
}])
}
}
struct ReferenceResult {
references: Vec<ReferenceTarget>,
}
impl IntoDiagnostic for ReferenceResult {
fn into_diagnostic(self) -> Diagnostic {
let mut main = Diagnostic::new(
DiagnosticId::Lint(LintName::of("references")),
Severity::Info,
format!("Found {} references", self.references.len()),
);
for reference in self.references {
main.annotate(Annotation::secondary(
Span::from(reference.file()).with_range(reference.range()),
));
}
main
}
}
#[test]
fn parameter_references_in_function() {
let test = cursor_test(
"
def calculate_sum(<CURSOR>value: int) -> int:
doubled = value * 2
result = value + doubled
return value
# Call with keyword argument
result = calculate_sum(value=42)
",
);
assert_snapshot!(test.references(), @r"
info[references]: Found 5 references
--> main.py:2:19
|
2 | def calculate_sum(value: int) -> int:
| -----
3 | doubled = value * 2
| -----
4 | result = value + doubled
| -----
5 | return value
| -----
6 |
7 | # Call with keyword argument
8 | result = calculate_sum(value=42)
| -----
|
");
}
#[test]
fn nonlocal_variable_references() {
let test = cursor_test(
"
def outer_function():
coun<CURSOR>ter = 0
def increment():
nonlocal counter
counter += 1
return counter
def decrement():
nonlocal counter
counter -= 1
return counter
# Use counter in outer scope
initial = counter
increment()
decrement()
final = counter
return increment, decrement
",
);
assert_snapshot!(test.references(), @r"
info[references]: Found 9 references
--> main.py:3:5
|
2 | def outer_function():
3 | counter = 0
| -------
4 |
5 | def increment():
6 | nonlocal counter
| -------
7 | counter += 1
| -------
8 | return counter
| -------
9 |
10 | def decrement():
11 | nonlocal counter
| -------
12 | counter -= 1
| -------
13 | return counter
| -------
14 |
15 | # Use counter in outer scope
16 | initial = counter
| -------
17 | increment()
18 | decrement()
19 | final = counter
| -------
20 |
21 | return increment, decrement
|
");
}
#[test]
fn global_variable_references() {
let test = cursor_test(
"
glo<CURSOR>bal_counter = 0
def increment_global():
global global_counter
global_counter += 1
return global_counter
def decrement_global():
global global_counter
global_counter -= 1
return global_counter
# Use global_counter at module level
initial_value = global_counter
increment_global()
decrement_global()
final_value = global_counter
",
);
assert_snapshot!(test.references(), @r"
info[references]: Found 9 references
--> main.py:2:1
|
2 | global_counter = 0
| --------------
3 |
4 | def increment_global():
5 | global global_counter
| --------------
6 | global_counter += 1
| --------------
7 | return global_counter
| --------------
8 |
9 | def decrement_global():
10 | global global_counter
| --------------
11 | global_counter -= 1
| --------------
12 | return global_counter
| --------------
13 |
14 | # Use global_counter at module level
15 | initial_value = global_counter
| --------------
16 | increment_global()
17 | decrement_global()
18 | final_value = global_counter
| --------------
|
");
}
#[test]
fn except_handler_variable_references() {
let test = cursor_test(
"
try:
x = 1 / 0
except ZeroDivisionError as e<CURSOR>rr:
print(f'Error: {err}')
return err
try:
y = 2 / 0
except ValueError as err:
print(f'Different error: {err}')
",
);
assert_snapshot!(test.references(), @r"
info[references]: Found 4 references
--> main.py:4:29
|
2 | try:
3 | x = 1 / 0
4 | except ZeroDivisionError as err:
| ---
5 | print(f'Error: {err}')
| ---
6 | return err
| ---
7 |
8 | try:
9 | y = 2 / 0
10 | except ValueError as err:
11 | print(f'Different error: {err}')
| ---
|
");
}
#[test]
fn pattern_match_as_references() {
let test = cursor_test(
"
match x:
case [a, b] as patter<CURSOR>n:
print(f'Matched: {pattern}')
return pattern
case _:
pass
",
);
assert_snapshot!(test.references(), @r"
info[references]: Found 3 references
--> main.py:3:20
|
2 | match x:
3 | case [a, b] as pattern:
| -------
4 | print(f'Matched: {pattern}')
| -------
5 | return pattern
| -------
6 | case _:
7 | pass
|
");
}
#[test]
fn pattern_match_mapping_rest_references() {
let test = cursor_test(
"
match data:
case {'a': a, 'b': b, **re<CURSOR>st}:
print(f'Rest data: {rest}')
process(rest)
return rest
",
);
assert_snapshot!(test.references(), @r"
info[references]: Found 4 references
--> main.py:3:29
|
2 | match data:
3 | case {'a': a, 'b': b, **rest}:
| ----
4 | print(f'Rest data: {rest}')
| ----
5 | process(rest)
| ----
6 | return rest
| ----
|
");
}
#[test]
fn function_definition_references() {
let test = cursor_test(
"
def my_func<CURSOR>tion():
return 42
# Call the function multiple times
result1 = my_function()
result2 = my_function()
# Function passed as an argument
callback = my_function
# Function used in different contexts
print(my_function())
value = my_function
",
);
assert_snapshot!(test.references(), @r"
info[references]: Found 6 references
--> main.py:2:5
|
2 | def my_function():
| -----------
3 | return 42
|
::: main.py:6:11
|
5 | # Call the function multiple times
6 | result1 = my_function()
| -----------
7 | result2 = my_function()
| -----------
8 |
9 | # Function passed as an argument
10 | callback = my_function
| -----------
11 |
12 | # Function used in different contexts
13 | print(my_function())
| -----------
14 | value = my_function
| -----------
|
");
}
#[test]
fn class_definition_references() {
let test = cursor_test(
"
class My<CURSOR>Class:
def __init__(self):
pass
# Create instances
obj1 = MyClass()
obj2 = MyClass()
# Use in type annotations
def process(instance: MyClass) -> MyClass:
return instance
# Reference the class itself
cls = MyClass
",
);
assert_snapshot!(test.references(), @r"
info[references]: Found 6 references
--> main.py:2:7
|
2 | class MyClass:
| -------
3 | def __init__(self):
4 | pass
|
::: main.py:7:8
|
6 | # Create instances
7 | obj1 = MyClass()
| -------
8 | obj2 = MyClass()
| -------
9 |
10 | # Use in type annotations
11 | def process(instance: MyClass) -> MyClass:
| ------- -------
12 | return instance
|
::: main.py:15:7
|
14 | # Reference the class itself
15 | cls = MyClass
| -------
|
");
}
#[test]
fn references_string_annotation1() {
let test = cursor_test(
r#"
a: "MyCla<CURSOR>ss" = 1
class MyClass:
"""some docs"""
"#,
);
assert_snapshot!(test.references(), @r#"
info[references]: Found 2 references
--> main.py:2:5
|
2 | a: "MyClass" = 1
| -------
3 |
4 | class MyClass:
| -------
5 | """some docs"""
|
"#);
}
#[test]
fn references_string_annotation2() {
let test = cursor_test(
r#"
a: "None | MyCl<CURSOR>ass" = 1
class MyClass:
"""some docs"""
"#,
);
assert_snapshot!(test.references(), @r#"
info[references]: Found 2 references
--> main.py:2:12
|
2 | a: "None | MyClass" = 1
| -------
3 |
4 | class MyClass:
| -------
5 | """some docs"""
|
"#);
}
#[test]
fn references_string_annotation3() {
let test = cursor_test(
r#"
a: "None |<CURSOR> MyClass" = 1
class MyClass:
"""some docs"""
"#,
);
assert_snapshot!(test.references(), @"No references found");
}
#[test]
fn references_string_annotation4() {
let test = cursor_test(
r#"
a: "None | MyClass<CURSOR>" = 1
class MyClass:
"""some docs"""
"#,
);
assert_snapshot!(test.references(), @r#"
info[references]: Found 2 references
--> main.py:2:12
|
2 | a: "None | MyClass" = 1
| -------
3 |
4 | class MyClass:
| -------
5 | """some docs"""
|
"#);
}
#[test]
fn references_string_annotation5() {
let test = cursor_test(
r#"
a: "None | MyClass"<CURSOR> = 1
class MyClass:
"""some docs"""
"#,
);
assert_snapshot!(test.references(), @"No references found");
}
#[test]
fn references_string_annotation_dangling1() {
let test = cursor_test(
r#"
a: "MyCl<CURSOR>ass |" = 1
class MyClass:
"""some docs"""
"#,
);
assert_snapshot!(test.references(), @"No references found");
}
#[test]
fn references_string_annotation_dangling2() {
let test = cursor_test(
r#"
a: "MyCl<CURSOR>ass | No" = 1
class MyClass:
"""some docs"""
"#,
);
assert_snapshot!(test.references(), @r#"
info[references]: Found 2 references
--> main.py:2:5
|
2 | a: "MyClass | No" = 1
| -------
3 |
4 | class MyClass:
| -------
5 | """some docs"""
|
"#);
}
#[test]
fn references_string_annotation_dangling3() {
let test = cursor_test(
r#"
a: "MyClass | N<CURSOR>o" = 1
class MyClass:
"""some docs"""
"#,
);
assert_snapshot!(test.references(), @"No references found");
}
#[test]
fn references_string_annotation_recursive() {
let test = cursor_test(
r#"
ab: "a<CURSOR>b"
"#,
);
assert_snapshot!(test.references(), @r#"
info[references]: Found 2 references
--> main.py:2:1
|
2 | ab: "ab"
| -- --
|
"#);
}
#[test]
fn references_string_annotation_unknown() {
let test = cursor_test(
r#"
x: "foo<CURSOR>bar"
"#,
);
assert_snapshot!(test.references(), @"No references found");
}
#[test]
fn references_match_name_stmt() {
let test = cursor_test(
r#"
def my_func(command: str):
match command.split():
case ["get", a<CURSOR>b]:
x = ab
"#,
);
assert_snapshot!(test.references(), @r#"
info[references]: Found 2 references
--> main.py:4:22
|
2 | def my_func(command: str):
3 | match command.split():
4 | case ["get", ab]:
| --
5 | x = ab
| --
|
"#);
}
#[test]
fn references_match_name_binding() {
let test = cursor_test(
r#"
def my_func(command: str):
match command.split():
case ["get", ab]:
x = a<CURSOR>b
"#,
);
assert_snapshot!(test.references(), @r#"
info[references]: Found 2 references
--> main.py:4:22
|
2 | def my_func(command: str):
3 | match command.split():
4 | case ["get", ab]:
| --
5 | x = ab
| --
|
"#);
}
#[test]
fn references_match_rest_stmt() {
let test = cursor_test(
r#"
def my_func(command: str):
match command.split():
case ["get", *a<CURSOR>b]:
x = ab
"#,
);
assert_snapshot!(test.references(), @r#"
info[references]: Found 2 references
--> main.py:4:23
|
2 | def my_func(command: str):
3 | match command.split():
4 | case ["get", *ab]:
| --
5 | x = ab
| --
|
"#);
}
#[test]
fn references_match_rest_binding() {
let test = cursor_test(
r#"
def my_func(command: str):
match command.split():
case ["get", *ab]:
x = a<CURSOR>b
"#,
);
assert_snapshot!(test.references(), @r#"
info[references]: Found 2 references
--> main.py:4:23
|
2 | def my_func(command: str):
3 | match command.split():
4 | case ["get", *ab]:
| --
5 | x = ab
| --
|
"#);
}
#[test]
fn references_match_as_stmt() {
let test = cursor_test(
r#"
def my_func(command: str):
match command.split():
case ["get", ("a" | "b") as a<CURSOR>b]:
x = ab
"#,
);
assert_snapshot!(test.references(), @r#"
info[references]: Found 2 references
--> main.py:4:37
|
2 | def my_func(command: str):
3 | match command.split():
4 | case ["get", ("a" | "b") as ab]:
| --
5 | x = ab
| --
|
"#);
}
#[test]
fn references_match_as_binding() {
let test = cursor_test(
r#"
def my_func(command: str):
match command.split():
case ["get", ("a" | "b") as ab]:
x = a<CURSOR>b
"#,
);
assert_snapshot!(test.references(), @r#"
info[references]: Found 2 references
--> main.py:4:37
|
2 | def my_func(command: str):
3 | match command.split():
4 | case ["get", ("a" | "b") as ab]:
| --
5 | x = ab
| --
|
"#);
}
#[test]
fn references_match_keyword_stmt() {
let test = cursor_test(
r#"
class Click:
__match_args__ = ("position", "button")
def __init__(self, pos, btn):
self.position: int = pos
self.button: str = btn
def my_func(event: Click):
match event:
case Click(x, button=a<CURSOR>b):
x = ab
"#,
);
assert_snapshot!(test.references(), @r"
info[references]: Found 2 references
--> main.py:10:30
|
8 | def my_func(event: Click):
9 | match event:
10 | case Click(x, button=ab):
| --
11 | x = ab
| --
|
");
}
#[test]
fn references_match_keyword_binding() {
let test = cursor_test(
r#"
class Click:
__match_args__ = ("position", "button")
def __init__(self, pos, btn):
self.position: int = pos
self.button: str = btn
def my_func(event: Click):
match event:
case Click(x, button=ab):
x = a<CURSOR>b
"#,
);
assert_snapshot!(test.references(), @r"
info[references]: Found 2 references
--> main.py:10:30
|
8 | def my_func(event: Click):
9 | match event:
10 | case Click(x, button=ab):
| --
11 | x = ab
| --
|
");
}
#[test]
fn references_match_class_name() {
let test = cursor_test(
r#"
class Click:
__match_args__ = ("position", "button")
def __init__(self, pos, btn):
self.position: int = pos
self.button: str = btn
def my_func(event: Click):
match event:
case Cl<CURSOR>ick(x, button=ab):
x = ab
"#,
);
assert_snapshot!(test.references(), @r#"
info[references]: Found 3 references
--> main.py:2:7
|
2 | class Click:
| -----
3 | __match_args__ = ("position", "button")
4 | def __init__(self, pos, btn):
|
::: main.py:8:20
|
6 | self.button: str = btn
7 |
8 | def my_func(event: Click):
| -----
9 | match event:
10 | case Click(x, button=ab):
| -----
11 | x = ab
|
"#);
}
#[test]
fn references_match_class_field_name() {
let test = cursor_test(
r#"
class Click:
__match_args__ = ("position", "button")
def __init__(self, pos, btn):
self.position: int = pos
self.button: str = btn
def my_func(event: Click):
match event:
case Click(x, but<CURSOR>ton=ab):
x = ab
"#,
);
assert_snapshot!(test.references(), @"No references found");
}
#[test]
fn references_typevar_name_stmt() {
let test = cursor_test(
r#"
type Alias1[A<CURSOR>B: int = bool] = tuple[AB, list[AB]]
"#,
);
assert_snapshot!(test.references(), @r"
info[references]: Found 3 references
--> main.py:2:13
|
2 | type Alias1[AB: int = bool] = tuple[AB, list[AB]]
| -- -- --
|
");
}
#[test]
fn references_typevar_name_binding() {
let test = cursor_test(
r#"
type Alias1[AB: int = bool] = tuple[A<CURSOR>B, list[AB]]
"#,
);
assert_snapshot!(test.references(), @r"
info[references]: Found 3 references
--> main.py:2:13
|
2 | type Alias1[AB: int = bool] = tuple[AB, list[AB]]
| -- -- --
|
");
}
#[test]
fn references_typevar_spec_stmt() {
let test = cursor_test(
r#"
from typing import Callable
type Alias2[**A<CURSOR>B = [int, str]] = Callable[AB, tuple[AB]]
"#,
);
assert_snapshot!(test.references(), @r"
info[references]: Found 3 references
--> main.py:3:15
|
2 | from typing import Callable
3 | type Alias2[**AB = [int, str]] = Callable[AB, tuple[AB]]
| -- -- --
|
");
}
#[test]
fn references_typevar_spec_binding() {
let test = cursor_test(
r#"
from typing import Callable
type Alias2[**AB = [int, str]] = Callable[A<CURSOR>B, tuple[AB]]
"#,
);
assert_snapshot!(test.references(), @r"
info[references]: Found 3 references
--> main.py:3:15
|
2 | from typing import Callable
3 | type Alias2[**AB = [int, str]] = Callable[AB, tuple[AB]]
| -- -- --
|
");
}
#[test]
fn references_typevar_tuple_stmt() {
let test = cursor_test(
r#"
type Alias3[*A<CURSOR>B = ()] = tuple[tuple[*AB], tuple[*AB]]
"#,
);
assert_snapshot!(test.references(), @r"
info[references]: Found 3 references
--> main.py:2:14
|
2 | type Alias3[*AB = ()] = tuple[tuple[*AB], tuple[*AB]]
| -- -- --
|
");
}
#[test]
fn references_typevar_tuple_binding() {
let test = cursor_test(
r#"
type Alias3[*AB = ()] = tuple[tuple[*A<CURSOR>B], tuple[*AB]]
"#,
);
assert_snapshot!(test.references(), @r"
info[references]: Found 3 references
--> main.py:2:14
|
2 | type Alias3[*AB = ()] = tuple[tuple[*AB], tuple[*AB]]
| -- -- --
|
");
}
#[test]
fn multi_file_function_references() {
let test = CursorTest::builder()
.source(
"utils.py",
"
def fun<CURSOR>c(x):
return x * 2
",
)
.source(
"module.py",
"
from utils import func
def process_data(data):
return func(data)
",
)
.source(
"app.py",
"
from utils import func
class DataProcessor:
def __init__(self):
self.multiplier = func
def process(self, value):
return func(value)
",
)
.build();
assert_snapshot!(test.references(), @r"
info[references]: Found 6 references
--> app.py:2:19
|
2 | from utils import func
| ----
3 |
4 | class DataProcessor:
5 | def __init__(self):
6 | self.multiplier = func
| ----
7 |
8 | def process(self, value):
9 | return func(value)
| ----
|
::: module.py:2:19
|
2 | from utils import func
| ----
3 |
4 | def process_data(data):
5 | return func(data)
| ----
|
::: utils.py:2:5
|
2 | def func(x):
| ----
3 | return x * 2
|
");
}
#[test]
fn multi_file_class_attribute_references() {
let test = CursorTest::builder()
.source(
"models.py",
"
class MyModel:
a<CURSOR>ttr = 42
def get_attribute(self):
return MyModel.attr
",
)
.source(
"main.py",
"
from models import MyModel
def process_model():
model = MyModel()
value = model.attr
model.attr = 100
return model.attr
",
)
.build();
assert_snapshot!(test.references(), @r"
info[references]: Found 5 references
--> main.py:6:19
|
4 | def process_model():
5 | model = MyModel()
6 | value = model.attr
| ----
7 | model.attr = 100
| ----
8 | return model.attr
| ----
|
::: models.py:3:5
|
2 | class MyModel:
3 | attr = 42
| ----
4 |
5 | def get_attribute(self):
6 | return MyModel.attr
| ----
|
");
}
#[test]
fn import_alias_references_should_not_resolve_to_original() {
let test = CursorTest::builder()
.source(
"original.py",
"
def func():
pass
func()
",
)
.source(
"importer.py",
"
from original import func as func_alias
func<CURSOR>_alias()
",
)
.build();
// When finding references to the alias, we should NOT find references
// to the original function in the original module
assert_snapshot!(test.references(), @r"
info[references]: Found 2 references
--> importer.py:2:30
|
2 | from original import func as func_alias
| ----------
3 |
4 | func_alias()
| ----------
|
");
}
#[test]
fn stub_target() {
let test = CursorTest::builder()
.source(
"path.pyi",
r#"
class Path:
def __init__(self, path: str): ...
"#,
)
.source(
"path.py",
r#"
class Path:
def __init__(self, path: str):
self.path = path
"#,
)
.source(
"importer.py",
r#"
from path import Path<CURSOR>
a: Path = Path("test")
"#,
)
.build();
assert_snapshot!(test.references(), @r#"
info[references]: Found 4 references
--> importer.py:2:18
|
2 | from path import Path
| ----
3 |
4 | a: Path = Path("test")
| ---- ----
|
::: path.pyi:2:7
|
2 | class Path:
| ----
3 | def __init__(self, path: str): ...
|
"#);
}
#[test]
fn import_alias() {
let test = CursorTest::builder()
.source(
"main.py",
r#"
import warnings
import warnings as <CURSOR>abc
x = abc
y = warnings
"#,
)
.build();
assert_snapshot!(test.references(), @r"
info[references]: Found 2 references
--> main.py:3:20
|
2 | import warnings
3 | import warnings as abc
| ---
4 |
5 | x = abc
| ---
6 | y = warnings
|
");
}
#[test]
fn import_alias_use() {
let test = CursorTest::builder()
.source(
"main.py",
r#"
import warnings
import warnings as abc
x = abc<CURSOR>
y = warnings
"#,
)
.build();
assert_snapshot!(test.references(), @r"
info[references]: Found 2 references
--> main.py:3:20
|
2 | import warnings
3 | import warnings as abc
| ---
4 |
5 | x = abc
| ---
6 | y = warnings
|
");
}
#[test]
fn import_from_alias() {
let test = CursorTest::builder()
.source(
"main.py",
r#"
from warnings import deprecated as xyz<CURSOR>
from warnings import deprecated
y = xyz
z = deprecated
"#,
)
.build();
assert_snapshot!(test.references(), @r"
info[references]: Found 2 references
--> main.py:2:36
|
2 | from warnings import deprecated as xyz
| ---
3 | from warnings import deprecated
4 |
5 | y = xyz
| ---
6 | z = deprecated
|
");
}
#[test]
fn import_from_alias_use() {
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | true |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ty_ide/src/goto_definition.rs | crates/ty_ide/src/goto_definition.rs | use crate::goto::find_goto_target;
use crate::{Db, NavigationTargets, RangedValue};
use ruff_db::files::{File, FileRange};
use ruff_db::parsed::parsed_module;
use ruff_text_size::{Ranged, TextSize};
use ty_python_semantic::{ImportAliasResolution, SemanticModel};
/// Navigate to the definition of a symbol.
///
/// A "definition" is the actual implementation of a symbol, potentially in a source file
/// rather than a stub file. This differs from "declaration" which may navigate to stub files.
/// When possible, this function will map from stub file declarations to their corresponding
/// source file implementations using the `StubMapper`.
pub fn goto_definition(
db: &dyn Db,
file: File,
offset: TextSize,
) -> Option<RangedValue<NavigationTargets>> {
let module = parsed_module(db, file).load(db);
let model = SemanticModel::new(db, file);
let goto_target = find_goto_target(&model, &module, offset)?;
let definition_targets = goto_target
.get_definition_targets(&model, ImportAliasResolution::ResolveAliases)?
.definition_targets(db)?;
Some(RangedValue {
range: FileRange::new(file, goto_target.range()),
value: definition_targets,
})
}
#[cfg(test)]
pub(super) mod test {
use crate::tests::{CursorTest, IntoDiagnostic};
use crate::{NavigationTargets, RangedValue, goto_definition};
use insta::assert_snapshot;
use ruff_db::diagnostic::{
Annotation, Diagnostic, DiagnosticId, LintName, Severity, Span, SubDiagnostic,
SubDiagnosticSeverity,
};
use ruff_text_size::Ranged;
/// goto-definition on a module should go to the .py not the .pyi
///
/// TODO: this currently doesn't work right! This is especially surprising
/// because [`goto_definition_stub_map_module_ref`] works fine.
#[test]
fn goto_definition_stub_map_module_import() {
let test = CursorTest::builder()
.source(
"main.py",
"
from mymo<CURSOR>dule import my_function
",
)
.source(
"mymodule.py",
r#"
def my_function():
return "hello"
"#,
)
.source(
"mymodule.pyi",
r#"
def my_function(): ...
"#,
)
.build();
assert_snapshot!(test.goto_definition(), @r#"
info[goto-definition]: Go to definition
--> main.py:2:6
|
2 | from mymodule import my_function
| ^^^^^^^^ Clicking here
|
info: Found 1 definition
--> mymodule.py:1:1
|
1 |
| -
2 | def my_function():
3 | return "hello"
|
"#);
}
/// goto-definition on a module ref should go to the .py not the .pyi
#[test]
fn goto_definition_stub_map_module_ref() {
let test = CursorTest::builder()
.source(
"main.py",
"
import mymodule
x = mymo<CURSOR>dule
",
)
.source(
"mymodule.py",
r#"
def my_function():
return "hello"
"#,
)
.source(
"mymodule.pyi",
r#"
def my_function(): ...
"#,
)
.build();
assert_snapshot!(test.goto_definition(), @r#"
info[goto-definition]: Go to definition
--> main.py:3:5
|
2 | import mymodule
3 | x = mymodule
| ^^^^^^^^ Clicking here
|
info: Found 1 definition
--> mymodule.py:1:1
|
1 |
| -
2 | def my_function():
3 | return "hello"
|
"#);
}
/// goto-definition on a function call should go to the .py not the .pyi
#[test]
fn goto_definition_stub_map_function() {
let test = CursorTest::builder()
.source(
"main.py",
"
from mymodule import my_function
print(my_func<CURSOR>tion())
",
)
.source(
"mymodule.py",
r#"
def my_function():
return "hello"
def other_function():
return "other"
"#,
)
.source(
"mymodule.pyi",
r#"
def my_function(): ...
def other_function(): ...
"#,
)
.build();
assert_snapshot!(test.goto_definition(), @r#"
info[goto-definition]: Go to definition
--> main.py:3:7
|
2 | from mymodule import my_function
3 | print(my_function())
| ^^^^^^^^^^^ Clicking here
|
info: Found 1 definition
--> mymodule.py:2:5
|
2 | def my_function():
| -----------
3 | return "hello"
|
"#);
}
/// goto-definition on a function definition in a .pyi should go to the .py
#[test]
fn goto_definition_stub_map_function_def() {
let test = CursorTest::builder()
.source(
"mymodule.py",
r#"
def my_function():
return "hello"
def other_function():
return "other"
"#,
)
.source(
"mymodule.pyi",
r#"
def my_fun<CURSOR>ction(): ...
def other_function(): ...
"#,
)
.build();
assert_snapshot!(test.goto_definition(), @r#"
info[goto-definition]: Go to definition
--> mymodule.pyi:2:5
|
2 | def my_function(): ...
| ^^^^^^^^^^^ Clicking here
3 |
4 | def other_function(): ...
|
info: Found 1 definition
--> mymodule.py:2:5
|
2 | def my_function():
| -----------
3 | return "hello"
|
"#);
}
/// goto-definition on a function that's redefined many times in the impl .py
///
/// Currently this yields all instances. There's an argument for only yielding
/// the final one since that's the one "exported" but, this is consistent for
/// how we do file-local goto-definition.
#[test]
fn goto_definition_stub_map_function_redefine() {
let test = CursorTest::builder()
.source(
"main.py",
"
from mymodule import my_function
print(my_func<CURSOR>tion())
",
)
.source(
"mymodule.py",
r#"
def my_function():
return "hello"
def my_function():
return "hello again"
def my_function():
return "we can't keep doing this"
def other_function():
return "other"
"#,
)
.source(
"mymodule.pyi",
r#"
def my_function(): ...
def other_function(): ...
"#,
)
.build();
assert_snapshot!(test.goto_definition(), @r#"
info[goto-definition]: Go to definition
--> main.py:3:7
|
2 | from mymodule import my_function
3 | print(my_function())
| ^^^^^^^^^^^ Clicking here
|
info: Found 3 definitions
--> mymodule.py:2:5
|
2 | def my_function():
| -----------
3 | return "hello"
4 |
5 | def my_function():
| -----------
6 | return "hello again"
7 |
8 | def my_function():
| -----------
9 | return "we can't keep doing this"
|
"#);
}
/// goto-definition on a class ref go to the .py not the .pyi
#[test]
fn goto_definition_stub_map_class_ref() {
let test = CursorTest::builder()
.source(
"main.py",
"
from mymodule import MyClass
x = MyC<CURSOR>lass
",
)
.source(
"mymodule.py",
r#"
class MyClass:
def __init__(self, val):
self.val = val
class MyOtherClass:
def __init__(self, val):
self.val = val + 1
"#,
)
.source(
"mymodule.pyi",
r#"
class MyClass:
def __init__(self, val: bool): ...
class MyOtherClass:
def __init__(self, val: bool): ...
"#,
)
.build();
assert_snapshot!(test.goto_definition(), @r"
info[goto-definition]: Go to definition
--> main.py:3:5
|
2 | from mymodule import MyClass
3 | x = MyClass
| ^^^^^^^ Clicking here
|
info: Found 1 definition
--> mymodule.py:2:7
|
2 | class MyClass:
| -------
3 | def __init__(self, val):
4 | self.val = val
|
");
}
/// goto-definition on a class def in a .pyi should go to the .py
#[test]
fn goto_definition_stub_map_class_def() {
let test = CursorTest::builder()
.source(
"mymodule.py",
r#"
class MyClass:
def __init__(self, val):
self.val = val
class MyOtherClass:
def __init__(self, val):
self.val = val + 1
"#,
)
.source(
"mymodule.pyi",
r#"
class MyCl<CURSOR>ass:
def __init__(self, val: bool): ...
class MyOtherClass:
def __init__(self, val: bool): ...
"#,
)
.build();
assert_snapshot!(test.goto_definition(), @r"
info[goto-definition]: Go to definition
--> mymodule.pyi:2:7
|
2 | class MyClass:
| ^^^^^^^ Clicking here
3 | def __init__(self, val: bool): ...
|
info: Found 1 definition
--> mymodule.py:2:7
|
2 | class MyClass:
| -------
3 | def __init__(self, val):
4 | self.val = val
|
");
}
/// goto-definition on a class init should go to the .py not the .pyi
#[test]
fn goto_definition_stub_map_class_init() {
let test = CursorTest::builder()
.source(
"main.py",
"
from mymodule import MyClass
x = MyCl<CURSOR>ass(0)
",
)
.source(
"mymodule.py",
r#"
class MyClass:
def __init__(self, val):
self.val = val
class MyOtherClass:
def __init__(self, val):
self.val = val + 1
"#,
)
.source(
"mymodule.pyi",
r#"
class MyClass:
def __init__(self, val: bool): ...
class MyOtherClass:
def __init__(self, val: bool): ...
"#,
)
.build();
assert_snapshot!(test.goto_definition(), @r"
info[goto-definition]: Go to definition
--> main.py:3:5
|
2 | from mymodule import MyClass
3 | x = MyClass(0)
| ^^^^^^^ Clicking here
|
info: Found 2 definitions
--> mymodule.py:2:7
|
2 | class MyClass:
| -------
3 | def __init__(self, val):
| --------
4 | self.val = val
|
");
}
/// goto-definition on a class method should go to the .py not the .pyi
#[test]
fn goto_definition_stub_map_class_method() {
let test = CursorTest::builder()
.source(
"main.py",
"
from mymodule import MyClass
x = MyClass(0)
x.act<CURSOR>ion()
",
)
.source(
"mymodule.py",
r#"
class MyClass:
def __init__(self, val):
self.val = val
def action(self):
print(self.val)
class MyOtherClass:
def __init__(self, val):
self.val = val + 1
"#,
)
.source(
"mymodule.pyi",
r#"
class MyClass:
def __init__(self, val: bool): ...
def action(self): ...
class MyOtherClass:
def __init__(self, val: bool): ...
"#,
)
.build();
assert_snapshot!(test.goto_definition(), @r"
info[goto-definition]: Go to definition
--> main.py:4:3
|
2 | from mymodule import MyClass
3 | x = MyClass(0)
4 | x.action()
| ^^^^^^ Clicking here
|
info: Found 1 definition
--> mymodule.py:5:9
|
3 | def __init__(self, val):
4 | self.val = val
5 | def action(self):
| ------
6 | print(self.val)
|
");
}
/// goto-definition on a class function should go to the .py not the .pyi
#[test]
fn goto_definition_stub_map_class_function() {
let test = CursorTest::builder()
.source(
"main.py",
"
from mymodule import MyClass
x = MyClass.act<CURSOR>ion()
",
)
.source(
"mymodule.py",
r#"
class MyClass:
def __init__(self, val):
self.val = val
def action():
print("hi!")
class MyOtherClass:
def __init__(self, val):
self.val = val + 1
"#,
)
.source(
"mymodule.pyi",
r#"
class MyClass:
def __init__(self, val: bool): ...
def action(): ...
class MyOtherClass:
def __init__(self, val: bool): ...
"#,
)
.build();
assert_snapshot!(test.goto_definition(), @r#"
info[goto-definition]: Go to definition
--> main.py:3:13
|
2 | from mymodule import MyClass
3 | x = MyClass.action()
| ^^^^^^ Clicking here
|
info: Found 1 definition
--> mymodule.py:5:9
|
3 | def __init__(self, val):
4 | self.val = val
5 | def action():
| ------
6 | print("hi!")
|
"#);
}
/// goto-definition on a class import should go to the .py not the .pyi
#[test]
fn goto_definition_stub_map_class_import() {
let test = CursorTest::builder()
.source(
"main.py",
"
from mymodule import MyC<CURSOR>lass
",
)
.source(
"mymodule.py",
r#"
class MyClass: ...
"#,
)
.source(
"mymodule.pyi",
r#"
class MyClass: ...
"#,
)
.build();
assert_snapshot!(test.goto_definition(), @r"
info[goto-definition]: Go to definition
--> main.py:2:22
|
2 | from mymodule import MyClass
| ^^^^^^^ Clicking here
|
info: Found 1 definition
--> mymodule.py:2:7
|
2 | class MyClass: ...
| -------
|
");
}
/// goto-definition on a nested call using a keyword arg where both funcs have that arg name
///
/// In this case they ultimately have different signatures.
#[test]
fn goto_definition_nested_keyword_arg1() {
let test = CursorTest::builder()
.source(
"main.py",
r#"
def my_func(ab, y, z = None): ...
def my_other_func(ab, y): ...
my_other_func(my_func(a<CURSOR>b=5, y=2), 0)
my_func(my_other_func(ab=5, y=2), 0)
"#,
)
.build();
assert_snapshot!(test.goto_definition(), @r"
info[goto-definition]: Go to definition
--> main.py:5:23
|
3 | def my_other_func(ab, y): ...
4 |
5 | my_other_func(my_func(ab=5, y=2), 0)
| ^^ Clicking here
6 | my_func(my_other_func(ab=5, y=2), 0)
|
info: Found 1 definition
--> main.py:2:13
|
2 | def my_func(ab, y, z = None): ...
| --
3 | def my_other_func(ab, y): ...
|
");
}
/// goto-definition on a nested call using a keyword arg where both funcs have that arg name
///
/// In this case they ultimately have different signatures.
#[test]
fn goto_definition_nested_keyword_arg2() {
let test = CursorTest::builder()
.source(
"main.py",
r#"
def my_func(ab, y, z = None): ...
def my_other_func(ab, y): ...
my_other_func(my_func(ab=5, y=2), 0)
my_func(my_other_func(a<CURSOR>b=5, y=2), 0)
"#,
)
.build();
assert_snapshot!(test.goto_definition(), @r"
info[goto-definition]: Go to definition
--> main.py:6:23
|
5 | my_other_func(my_func(ab=5, y=2), 0)
6 | my_func(my_other_func(ab=5, y=2), 0)
| ^^ Clicking here
|
info: Found 1 definition
--> main.py:3:19
|
2 | def my_func(ab, y, z = None): ...
3 | def my_other_func(ab, y): ...
| --
4 |
5 | my_other_func(my_func(ab=5, y=2), 0)
|
");
}
/// goto-definition on a nested call using a keyword arg where both funcs have that arg name
///
/// In this case they have identical signatures.
#[test]
fn goto_definition_nested_keyword_arg3() {
let test = CursorTest::builder()
.source(
"main.py",
r#"
def my_func(ab, y): ...
def my_other_func(ab, y): ...
my_other_func(my_func(a<CURSOR>b=5, y=2), 0)
my_func(my_other_func(ab=5, y=2), 0)
"#,
)
.build();
assert_snapshot!(test.goto_definition(), @r"
info[goto-definition]: Go to definition
--> main.py:5:23
|
3 | def my_other_func(ab, y): ...
4 |
5 | my_other_func(my_func(ab=5, y=2), 0)
| ^^ Clicking here
6 | my_func(my_other_func(ab=5, y=2), 0)
|
info: Found 1 definition
--> main.py:2:13
|
2 | def my_func(ab, y): ...
| --
3 | def my_other_func(ab, y): ...
|
");
}
/// goto-definition on a nested call using a keyword arg where both funcs have that arg name
///
/// In this case they have identical signatures.
#[test]
fn goto_definition_nested_keyword_arg4() {
let test = CursorTest::builder()
.source(
"main.py",
r#"
def my_func(ab, y): ...
def my_other_func(ab, y): ...
my_other_func(my_func(ab=5, y=2), 0)
my_func(my_other_func(a<CURSOR>b=5, y=2), 0)
"#,
)
.build();
assert_snapshot!(test.goto_definition(), @r"
info[goto-definition]: Go to definition
--> main.py:6:23
|
5 | my_other_func(my_func(ab=5, y=2), 0)
6 | my_func(my_other_func(ab=5, y=2), 0)
| ^^ Clicking here
|
info: Found 1 definition
--> main.py:3:19
|
2 | def my_func(ab, y): ...
3 | def my_other_func(ab, y): ...
| --
4 |
5 | my_other_func(my_func(ab=5, y=2), 0)
|
");
}
#[test]
fn goto_definition_overload_type_disambiguated1() {
let test = CursorTest::builder()
.source(
"main.py",
"
from mymodule import ab
a<CURSOR>b(1)
",
)
.source(
"mymodule.py",
r#"
def ab(a):
"""the real implementation!"""
"#,
)
.source(
"mymodule.pyi",
r#"
from typing import overload
@overload
def ab(a: int): ...
@overload
def ab(a: str): ...
"#,
)
.build();
assert_snapshot!(test.goto_definition(), @r#"
info[goto-definition]: Go to definition
--> main.py:4:1
|
2 | from mymodule import ab
3 |
4 | ab(1)
| ^^ Clicking here
|
info: Found 1 definition
--> mymodule.py:2:5
|
2 | def ab(a):
| --
3 | """the real implementation!"""
|
"#);
}
#[test]
fn goto_definition_overload_type_disambiguated2() {
let test = CursorTest::builder()
.source(
"main.py",
r#"
from mymodule import ab
a<CURSOR>b("hello")
"#,
)
.source(
"mymodule.py",
r#"
def ab(a):
"""the real implementation!"""
"#,
)
.source(
"mymodule.pyi",
r#"
from typing import overload
@overload
def ab(a: int): ...
@overload
def ab(a: str): ...
"#,
)
.build();
assert_snapshot!(test.goto_definition(), @r#"
info[goto-definition]: Go to definition
--> main.py:4:1
|
2 | from mymodule import ab
3 |
4 | ab("hello")
| ^^ Clicking here
|
info: Found 1 definition
--> mymodule.py:2:5
|
2 | def ab(a):
| --
3 | """the real implementation!"""
|
"#);
}
#[test]
fn goto_definition_overload_arity_disambiguated1() {
let test = CursorTest::builder()
.source(
"main.py",
"
from mymodule import ab
a<CURSOR>b(1, 2)
",
)
.source(
"mymodule.py",
r#"
def ab(a, b = None):
"""the real implementation!"""
"#,
)
.source(
"mymodule.pyi",
r#"
from typing import overload
@overload
def ab(a: int, b: int): ...
@overload
def ab(a: int): ...
"#,
)
.build();
assert_snapshot!(test.goto_definition(), @r#"
info[goto-definition]: Go to definition
--> main.py:4:1
|
2 | from mymodule import ab
3 |
4 | ab(1, 2)
| ^^ Clicking here
|
info: Found 1 definition
--> mymodule.py:2:5
|
2 | def ab(a, b = None):
| --
3 | """the real implementation!"""
|
"#);
}
#[test]
fn goto_definition_overload_arity_disambiguated2() {
let test = CursorTest::builder()
.source(
"main.py",
"
from mymodule import ab
a<CURSOR>b(1)
",
)
.source(
"mymodule.py",
r#"
def ab(a, b = None):
"""the real implementation!"""
"#,
)
.source(
"mymodule.pyi",
r#"
from typing import overload
@overload
def ab(a: int, b: int): ...
@overload
def ab(a: int): ...
"#,
)
.build();
assert_snapshot!(test.goto_definition(), @r#"
info[goto-definition]: Go to definition
--> main.py:4:1
|
2 | from mymodule import ab
3 |
4 | ab(1)
| ^^ Clicking here
|
info: Found 1 definition
--> mymodule.py:2:5
|
2 | def ab(a, b = None):
| --
3 | """the real implementation!"""
|
"#);
}
#[test]
fn goto_definition_overload_keyword_disambiguated1() {
let test = CursorTest::builder()
.source(
"main.py",
"
from mymodule import ab
a<CURSOR>b(1, b=2)
",
)
.source(
"mymodule.py",
r#"
def ab(a, *, b = None, c = None):
"""the real implementation!"""
"#,
)
.source(
"mymodule.pyi",
r#"
from typing import overload
@overload
def ab(a: int): ...
@overload
def ab(a: int, *, b: int): ...
@overload
def ab(a: int, *, c: int): ...
"#,
)
.build();
assert_snapshot!(test.goto_definition(), @r#"
info[goto-definition]: Go to definition
--> main.py:4:1
|
2 | from mymodule import ab
3 |
4 | ab(1, b=2)
| ^^ Clicking here
|
info: Found 1 definition
--> mymodule.py:2:5
|
2 | def ab(a, *, b = None, c = None):
| --
3 | """the real implementation!"""
|
"#);
}
#[test]
fn goto_definition_overload_keyword_disambiguated2() {
let test = CursorTest::builder()
.source(
"main.py",
"
from mymodule import ab
a<CURSOR>b(1, c=2)
",
)
.source(
"mymodule.py",
r#"
def ab(a, *, b = None, c = None):
"""the real implementation!"""
"#,
)
.source(
"mymodule.pyi",
r#"
from typing import overload
@overload
def ab(a: int): ...
@overload
def ab(a: int, *, b: int): ...
@overload
def ab(a: int, *, c: int): ...
"#,
)
.build();
assert_snapshot!(test.goto_definition(), @r#"
info[goto-definition]: Go to definition
--> main.py:4:1
|
2 | from mymodule import ab
3 |
4 | ab(1, c=2)
| ^^ Clicking here
|
info: Found 1 definition
--> mymodule.py:2:5
|
2 | def ab(a, *, b = None, c = None):
| --
3 | """the real implementation!"""
|
"#);
}
#[test]
fn goto_definition_binary_operator() {
let test = CursorTest::builder()
.source(
"main.py",
"
class Test:
def __add__(self, other):
return Test()
a = Test()
b = Test()
a <CURSOR>+ b
",
)
.build();
assert_snapshot!(test.goto_definition(), @r"
info[goto-definition]: Go to definition
--> main.py:10:3
|
8 | b = Test()
9 |
10 | a + b
| ^ Clicking here
|
info: Found 1 definition
--> main.py:3:9
|
2 | class Test:
3 | def __add__(self, other):
| -------
4 | return Test()
|
");
}
#[test]
fn goto_definition_binary_operator_reflected_dunder() {
let test = CursorTest::builder()
.source(
"main.py",
"
class A:
def __radd__(self, other) -> A:
return self
class B: ...
B() <CURSOR>+ A()
",
)
.build();
assert_snapshot!(test.goto_definition(), @r"
info[goto-definition]: Go to definition
--> main.py:8:5
|
6 | class B: ...
7 |
8 | B() + A()
| ^ Clicking here
|
info: Found 1 definition
--> main.py:3:9
|
2 | class A:
3 | def __radd__(self, other) -> A:
| --------
4 | return self
|
");
}
#[test]
fn goto_definition_binary_operator_no_spaces_before_operator() {
let test = CursorTest::builder()
.source(
"main.py",
"
class Test:
def __add__(self, other):
return Test()
a = Test()
b = Test()
a<CURSOR>+b
",
)
.build();
assert_snapshot!(test.goto_definition(), @r"
info[goto-definition]: Go to definition
--> main.py:10:2
|
8 | b = Test()
9 |
10 | a+b
| ^ Clicking here
|
info: Found 1 definition
--> main.py:3:9
|
2 | class Test:
3 | def __add__(self, other):
| -------
4 | return Test()
|
");
}
#[test]
fn goto_definition_binary_operator_no_spaces_after_operator() {
let test = CursorTest::builder()
.source(
"main.py",
"
class Test:
def __add__(self, other):
return Test()
a = Test()
b = Test()
a+<CURSOR>b
",
)
.build();
assert_snapshot!(test.goto_definition(), @r"
info[goto-definition]: Go to definition
--> main.py:10:3
|
8 | b = Test()
9 |
10 | a+b
| ^ Clicking here
|
info: Found 1 definition
--> main.py:8:1
|
7 | a = Test()
8 | b = Test()
| -
9 |
10 | a+b
|
");
}
#[test]
fn goto_definition_binary_operator_comment() {
let test = CursorTest::builder()
.source(
"main.py",
"
class Test:
def __add__(self, other):
return Test()
(
Test() <CURSOR># comment
+ Test()
)
",
)
.build();
assert_snapshot!(test.goto_definition(), @"No goto target found");
}
#[test]
fn goto_definition_unary_operator() {
let test = CursorTest::builder()
.source(
"main.py",
"
class Test:
def __invert__(self) -> 'Test': ...
a = Test()
<CURSOR>~a
",
)
.build();
assert_snapshot!(test.goto_definition(), @r"
info[goto-definition]: Go to definition
--> main.py:7:1
|
5 | a = Test()
6 |
7 | ~a
| ^ Clicking here
|
info: Found 1 definition
--> main.py:3:9
|
2 | class Test:
3 | def __invert__(self) -> 'Test': ...
| ----------
4 |
5 | a = Test()
|
");
}
/// We jump to the `__invert__` definition here even though its signature is incorrect.
#[test]
fn goto_definition_unary_operator_with_bad_dunder_definition() {
let test = CursorTest::builder()
.source(
"main.py",
"
class Test:
def __invert__(self, extra_arg) -> 'Test': ...
a = Test()
<CURSOR>~a
",
)
.build();
assert_snapshot!(test.goto_definition(), @r"
info[goto-definition]: Go to definition
--> main.py:7:1
|
5 | a = Test()
6 |
7 | ~a
| ^ Clicking here
|
info: Found 1 definition
--> main.py:3:9
|
2 | class Test:
3 | def __invert__(self, extra_arg) -> 'Test': ...
| ----------
4 |
5 | a = Test()
|
");
}
#[test]
fn goto_definition_unary_after_operator() {
let test = CursorTest::builder()
.source(
"main.py",
"
class Test:
def __invert__(self) -> 'Test': ...
a = Test()
~<CURSOR> a
",
)
.build();
assert_snapshot!(test.goto_definition(), @r"
info[goto-definition]: Go to definition
--> main.py:7:1
|
5 | a = Test()
6 |
7 | ~ a
| ^ Clicking here
|
info: Found 1 definition
--> main.py:3:9
|
2 | class Test:
3 | def __invert__(self) -> 'Test': ...
| ----------
4 |
5 | a = Test()
|
");
}
#[test]
fn goto_definition_unary_between_operator_and_operand() {
let test = CursorTest::builder()
.source(
"main.py",
"
class Test:
def __invert__(self) -> 'Test': ...
a = Test()
-<CURSOR>a
",
)
.build();
assert_snapshot!(test.goto_definition(), @r"
info[goto-definition]: Go to definition
--> main.py:7:2
|
5 | a = Test()
6 |
7 | -a
| ^ Clicking here
|
info: Found 1 definition
--> main.py:5:1
|
3 | def __invert__(self) -> 'Test': ...
4 |
5 | a = Test()
| -
6 |
7 | -a
|
");
}
#[test]
fn goto_definition_unary_not_with_dunder_bool() {
let test = CursorTest::builder()
.source(
"main.py",
"
class Test:
def __bool__(self) -> bool: ...
a = Test()
<CURSOR>not a
",
)
.build();
assert_snapshot!(test.goto_definition(), @r"
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | true |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ty_ide/src/goto_type_definition.rs | crates/ty_ide/src/goto_type_definition.rs | use crate::goto::find_goto_target;
use crate::{Db, HasNavigationTargets, NavigationTargets, RangedValue};
use ruff_db::files::{File, FileRange};
use ruff_db::parsed::parsed_module;
use ruff_text_size::{Ranged, TextSize};
use ty_python_semantic::SemanticModel;
pub fn goto_type_definition(
db: &dyn Db,
file: File,
offset: TextSize,
) -> Option<RangedValue<NavigationTargets>> {
let module = parsed_module(db, file).load(db);
let model = SemanticModel::new(db, file);
let goto_target = find_goto_target(&model, &module, offset)?;
let ty = goto_target.inferred_type(&model)?;
tracing::debug!("Inferred type of covering node is {}", ty.display(db));
let navigation_targets = ty.navigation_targets(db);
Some(RangedValue {
range: FileRange::new(file, goto_target.range()),
value: navigation_targets,
})
}
#[cfg(test)]
mod tests {
use crate::goto_type_definition;
use crate::tests::{CursorTest, cursor_test};
use insta::assert_snapshot;
#[test]
fn goto_type_of_expression_with_class_type() {
let test = cursor_test(
r#"
class Test: ...
a<CURSOR>b = Test()
"#,
);
assert_snapshot!(test.goto_type_definition(), @r"
info[goto-type definition]: Go to type definition
--> main.py:4:1
|
2 | class Test: ...
3 |
4 | ab = Test()
| ^^ Clicking here
|
info: Found 1 type definition
--> main.py:2:7
|
2 | class Test: ...
| ----
3 |
4 | ab = Test()
|
");
}
#[test]
fn goto_type_of_typing_dot_literal() {
let test = cursor_test(
r#"
from typing import Literal
a<CURSOR>b = Literal
"#,
);
assert_snapshot!(test.goto_type_definition(), @r#"
info[goto-type definition]: Go to type definition
--> main.py:4:1
|
2 | from typing import Literal
3 |
4 | ab = Literal
| ^^ Clicking here
|
info: Found 1 type definition
--> stdlib/typing.pyi:487:1
|
485 | """
486 |
487 | Literal: _SpecialForm
| -------
488 | """Special typing form to define literal types (a.k.a. value types).
|
"#);
}
// this is a slightly different case to the one above,
// since `Any` is a class in typeshed rather than a variable
#[test]
fn goto_type_of_typing_dot_any() {
let test = cursor_test(
r#"
from typing import Any
a<CURSOR>b = Any
"#,
);
assert_snapshot!(test.goto_type_definition(), @r#"
info[goto-type definition]: Go to type definition
--> main.py:4:1
|
2 | from typing import Any
3 |
4 | ab = Any
| ^^ Clicking here
|
info: Found 1 type definition
--> stdlib/typing.pyi:166:7
|
164 | # from _typeshed import AnnotationForm
165 |
166 | class Any:
| ---
167 | """Special type indicating an unconstrained type.
|
"#);
}
// Similarly, `Generic` is a `type[]` type in typeshed
#[test]
fn goto_type_of_typing_dot_generic() {
let test = cursor_test(
r#"
from typing import Generic
a<CURSOR>b = Generic
"#,
);
assert_snapshot!(test.goto_type_definition(), @r#"
info[goto-type definition]: Go to type definition
--> main.py:4:1
|
2 | from typing import Generic
3 |
4 | ab = Generic
| ^^ Clicking here
|
info: Found 1 type definition
--> stdlib/typing.pyi:1268:1
|
1266 | def __class_getitem__(cls, args: TypeVar | tuple[TypeVar, ...]) -> _Final: ...
1267 |
1268 | Generic: type[_Generic]
| -------
1269 | """Abstract base class for generic types.
|
"#);
}
#[test]
fn goto_type_of_ty_extensions_special_form() {
let test = cursor_test(
r#"
from ty_extensions import AlwaysTruthy
a<CURSOR>b = AlwaysTruthy
"#,
);
assert_snapshot!(test.goto_type_definition(), @r"
info[goto-type definition]: Go to type definition
--> main.py:4:1
|
2 | from ty_extensions import AlwaysTruthy
3 |
4 | ab = AlwaysTruthy
| ^^ Clicking here
|
info: Found 1 type definition
--> stdlib/ty_extensions.pyi:15:1
|
13 | # Types
14 | Unknown = object()
15 | AlwaysTruthy = object()
| ------------
16 | AlwaysFalsy = object()
|
");
}
#[test]
fn goto_type_of_expression_with_function_type() {
let test = cursor_test(
r#"
def foo(a, b): ...
ab = foo
a<CURSOR>b
"#,
);
assert_snapshot!(test.goto_type_definition(), @r"
info[goto-type definition]: Go to type definition
--> main.py:6:1
|
4 | ab = foo
5 |
6 | ab
| ^^ Clicking here
|
info: Found 1 type definition
--> main.py:2:5
|
2 | def foo(a, b): ...
| ---
3 |
4 | ab = foo
|
");
}
#[test]
fn goto_type_of_expression_with_union_type() {
let test = cursor_test(
r#"
def foo(a, b): ...
def bar(a, b): ...
if random.choice():
a = foo
else:
a = bar
a<CURSOR>
"#,
);
assert_snapshot!(test.goto_type_definition(), @r"
info[goto-type definition]: Go to type definition
--> main.py:12:1
|
10 | a = bar
11 |
12 | a
| ^ Clicking here
|
info: Found 2 type definitions
--> main.py:3:5
|
3 | def foo(a, b): ...
| ---
4 |
5 | def bar(a, b): ...
| ---
6 |
7 | if random.choice():
|
");
}
#[test]
fn goto_type_of_import_module() {
let mut test = cursor_test(
r#"
import l<CURSOR>ib
"#,
);
test.write_file("lib.py", "a = 10").unwrap();
assert_snapshot!(test.goto_type_definition(), @r"
info[goto-type definition]: Go to type definition
--> main.py:2:8
|
2 | import lib
| ^^^ Clicking here
|
info: Found 1 type definition
--> lib.py:1:1
|
1 | a = 10
| ------
|
");
}
#[test]
fn goto_type_of_import_module_multi1() {
let mut test = cursor_test(
r#"
import li<CURSOR>b.submod
"#,
);
test.write_file("lib/__init__.py", "b = 7").unwrap();
test.write_file("lib/submod.py", "a = 10").unwrap();
assert_snapshot!(test.goto_type_definition(), @r"
info[goto-type definition]: Go to type definition
--> main.py:2:8
|
2 | import lib.submod
| ^^^ Clicking here
|
info: Found 1 type definition
--> lib/__init__.py:1:1
|
1 | b = 7
| -----
|
");
}
#[test]
fn goto_type_of_import_module_multi2() {
let mut test = cursor_test(
r#"
import lib.subm<CURSOR>od
"#,
);
test.write_file("lib/__init__.py", "b = 7").unwrap();
test.write_file("lib/submod.py", "a = 10").unwrap();
assert_snapshot!(test.goto_type_definition(), @r"
info[goto-type definition]: Go to type definition
--> main.py:2:12
|
2 | import lib.submod
| ^^^^^^ Clicking here
|
info: Found 1 type definition
--> lib/submod.py:1:1
|
1 | a = 10
| ------
|
");
}
#[test]
fn goto_type_of_from_import_module() {
let mut test = cursor_test(
r#"
from l<CURSOR>ib import a
"#,
);
test.write_file("lib.py", "a = 10").unwrap();
assert_snapshot!(test.goto_type_definition(), @r"
info[goto-type definition]: Go to type definition
--> main.py:2:6
|
2 | from lib import a
| ^^^ Clicking here
|
info: Found 1 type definition
--> lib.py:1:1
|
1 | a = 10
| ------
|
");
}
#[test]
fn goto_type_of_from_import_module_multi1() {
let mut test = cursor_test(
r#"
from li<CURSOR>b.submod import a
"#,
);
test.write_file("lib/__init__.py", "b = 7").unwrap();
test.write_file("lib/submod.py", "a = 10").unwrap();
assert_snapshot!(test.goto_type_definition(), @r"
info[goto-type definition]: Go to type definition
--> main.py:2:6
|
2 | from lib.submod import a
| ^^^ Clicking here
|
info: Found 1 type definition
--> lib/__init__.py:1:1
|
1 | b = 7
| -----
|
");
}
#[test]
fn goto_type_of_from_import_module_multi2() {
let mut test = cursor_test(
r#"
from lib.subm<CURSOR>od import a
"#,
);
test.write_file("lib/__init__.py", "b = 7").unwrap();
test.write_file("lib/submod.py", "a = 10").unwrap();
assert_snapshot!(test.goto_type_definition(), @r"
info[goto-type definition]: Go to type definition
--> main.py:2:10
|
2 | from lib.submod import a
| ^^^^^^ Clicking here
|
info: Found 1 type definition
--> lib/submod.py:1:1
|
1 | a = 10
| ------
|
");
}
#[test]
fn goto_type_of_from_import_rel1() {
let mut test = CursorTest::builder()
.source(
"lib/sub/__init__.py",
r#"
from .bot.bot<CURSOR>mod import *
sub = 2
"#,
)
.build();
test.write_file("lib/__init__.py", "lib = 1").unwrap();
// test.write_file("lib/sub/__init__.py", "sub = 2").unwrap();
test.write_file("lib/sub/bot/__init__.py", "bot = 3")
.unwrap();
test.write_file("lib/sub/submod.py", "submod = 21").unwrap();
test.write_file("lib/sub/bot/botmod.py", "botmod = 31")
.unwrap();
assert_snapshot!(test.goto_type_definition(), @r"
info[goto-type definition]: Go to type definition
--> lib/sub/__init__.py:2:11
|
2 | from .bot.botmod import *
| ^^^^^^ Clicking here
3 | sub = 2
|
info: Found 1 type definition
--> lib/sub/bot/botmod.py:1:1
|
1 | botmod = 31
| -----------
|
");
}
#[test]
fn goto_type_of_from_import_rel2() {
let mut test = CursorTest::builder()
.source(
"lib/sub/__init__.py",
r#"
from .bo<CURSOR>t.botmod import *
sub = 2
"#,
)
.build();
test.write_file("lib/__init__.py", "lib = 1").unwrap();
// test.write_file("lib/sub/__init__.py", "sub = 2").unwrap();
test.write_file("lib/sub/bot/__init__.py", "bot = 3")
.unwrap();
test.write_file("lib/sub/submod.py", "submod = 21").unwrap();
test.write_file("lib/sub/bot/botmod.py", "botmod = 31")
.unwrap();
assert_snapshot!(test.goto_type_definition(), @r"
info[goto-type definition]: Go to type definition
--> lib/sub/__init__.py:2:7
|
2 | from .bot.botmod import *
| ^^^ Clicking here
3 | sub = 2
|
info: Found 1 type definition
--> lib/sub/bot/__init__.py:1:1
|
1 | bot = 3
| -------
|
");
}
#[test]
fn goto_type_of_from_import_rel3() {
let mut test = CursorTest::builder()
.source(
"lib/sub/__init__.py",
r#"
from .<CURSOR>bot.botmod import *
sub = 2
"#,
)
.build();
test.write_file("lib/__init__.py", "lib = 1").unwrap();
// test.write_file("lib/sub/__init__.py", "sub = 2").unwrap();
test.write_file("lib/sub/bot/__init__.py", "bot = 3")
.unwrap();
test.write_file("lib/sub/submod.py", "submod = 21").unwrap();
test.write_file("lib/sub/bot/botmod.py", "botmod = 31")
.unwrap();
assert_snapshot!(test.goto_type_definition(), @r"
info[goto-type definition]: Go to type definition
--> lib/sub/__init__.py:2:7
|
2 | from .bot.botmod import *
| ^^^ Clicking here
3 | sub = 2
|
info: Found 1 type definition
--> lib/sub/bot/__init__.py:1:1
|
1 | bot = 3
| -------
|
");
}
#[test]
fn goto_type_of_from_import_rel4() {
let mut test = CursorTest::builder()
.source(
"lib/sub/__init__.py",
r#"
from .<CURSOR> import submod
sub = 2
"#,
)
.build();
test.write_file("lib/__init__.py", "lib = 1").unwrap();
// test.write_file("lib/sub/__init__.py", "sub = 2").unwrap();
test.write_file("lib/sub/bot/__init__.py", "bot = 3")
.unwrap();
test.write_file("lib/sub/submod.py", "submod = 21").unwrap();
test.write_file("lib/sub/bot/botmod.py", "botmod = 31")
.unwrap();
assert_snapshot!(test.goto_type_definition(), @"No goto target found");
}
#[test]
fn goto_type_of_expression_with_module() {
let mut test = cursor_test(
r#"
import lib
lib<CURSOR>
"#,
);
test.write_file("lib.py", "a = 10").unwrap();
assert_snapshot!(test.goto_type_definition(), @r"
info[goto-type definition]: Go to type definition
--> main.py:4:1
|
2 | import lib
3 |
4 | lib
| ^^^ Clicking here
|
info: Found 1 type definition
--> lib.py:1:1
|
1 | a = 10
| ------
|
");
}
#[test]
fn goto_type_of_expression_with_literal_type() {
let test = cursor_test(
r#"
a: str = "test"
a<CURSOR>
"#,
);
assert_snapshot!(test.goto_type_definition(), @r#"
info[goto-type definition]: Go to type definition
--> main.py:4:1
|
2 | a: str = "test"
3 |
4 | a
| ^ Clicking here
|
info: Found 1 type definition
--> stdlib/builtins.pyi:915:7
|
914 | @disjoint_base
915 | class str(Sequence[str]):
| ---
916 | """str(object='') -> str
917 | str(bytes_or_buffer[, encoding[, errors]]) -> str
|
"#);
}
#[test]
fn goto_type_of_expression_with_literal_node() {
let test = cursor_test(
r#"
a: str = "te<CURSOR>st"
"#,
);
assert_snapshot!(test.goto_type_definition(), @r#"
info[goto-type definition]: Go to type definition
--> main.py:2:10
|
2 | a: str = "test"
| ^^^^^^ Clicking here
|
info: Found 1 type definition
--> stdlib/builtins.pyi:915:7
|
914 | @disjoint_base
915 | class str(Sequence[str]):
| ---
916 | """str(object='') -> str
917 | str(bytes_or_buffer[, encoding[, errors]]) -> str
|
"#);
}
#[test]
fn goto_type_of_expression_with_type_var_type() {
let test = cursor_test(
r#"
type Alias[T: int = bool] = list[T<CURSOR>]
"#,
);
assert_snapshot!(test.goto_type_definition(), @r"
info[goto-type definition]: Go to type definition
--> main.py:2:34
|
2 | type Alias[T: int = bool] = list[T]
| ^ Clicking here
|
info: Found 1 type definition
--> main.py:2:12
|
2 | type Alias[T: int = bool] = list[T]
| -
|
");
}
#[test]
fn goto_type_of_expression_with_type_param_spec() {
let test = cursor_test(
r#"
type Alias[**P = [int, str]] = Callable[P<CURSOR>, int]
"#,
);
assert_snapshot!(test.goto_type_definition(), @r"
info[goto-type definition]: Go to type definition
--> main.py:2:41
|
2 | type Alias[**P = [int, str]] = Callable[P, int]
| ^ Clicking here
|
info: Found 1 type definition
--> main.py:2:14
|
2 | type Alias[**P = [int, str]] = Callable[P, int]
| -
|
");
}
#[test]
fn goto_type_of_expression_with_type_var_tuple() {
let test = cursor_test(
r#"
type Alias[*Ts = ()] = tuple[*Ts<CURSOR>]
"#,
);
// TODO: Goto type definition currently doesn't work for type var tuples
// because the inference doesn't support them yet.
// This snapshot should show a single target pointing to `T`
assert_snapshot!(test.goto_type_definition(), @"No type definitions found");
}
#[test]
fn goto_type_of_bare_type_alias_type() {
let test = cursor_test(
r#"
from typing_extensions import TypeAliasType
Alias = TypeAliasType("Alias", tuple[int, int])
Alias<CURSOR>
"#,
);
// TODO: This should jump to the definition of `Alias` above.
assert_snapshot!(test.goto_type_definition(), @"No type definitions found");
}
#[test]
fn goto_type_string_annotation1() {
let test = cursor_test(
r#"
a: "MyCla<CURSOR>ss" = 1
class MyClass:
"""some docs"""
"#,
);
assert_snapshot!(test.goto_type_definition(), @r#"
info[goto-type definition]: Go to type definition
--> main.py:2:5
|
2 | a: "MyClass" = 1
| ^^^^^^^ Clicking here
3 |
4 | class MyClass:
|
info: Found 1 type definition
--> main.py:4:7
|
2 | a: "MyClass" = 1
3 |
4 | class MyClass:
| -------
5 | """some docs"""
|
"#);
}
#[test]
fn goto_type_string_annotation2() {
let test = cursor_test(
r#"
a: "None | MyCl<CURSOR>ass" = 1
class MyClass:
"""some docs"""
"#,
);
assert_snapshot!(test.goto_type_definition(), @"No goto target found");
}
#[test]
fn goto_type_string_annotation3() {
let test = cursor_test(
r#"
a: "None |<CURSOR> MyClass" = 1
class MyClass:
"""some docs"""
"#,
);
assert_snapshot!(test.goto_type_definition(), @r#"
info[goto-type definition]: Go to type definition
--> main.py:2:4
|
2 | a: "None | MyClass" = 1
| ^^^^^^^^^^^^^^^^ Clicking here
3 |
4 | class MyClass:
|
info: Found 2 type definitions
--> main.py:4:7
|
2 | a: "None | MyClass" = 1
3 |
4 | class MyClass:
| -------
5 | """some docs"""
|
::: stdlib/types.pyi:974:11
|
972 | if sys.version_info >= (3, 10):
973 | @final
974 | class NoneType:
| --------
975 | """The type of the None singleton."""
|
"#);
}
#[test]
fn goto_type_string_annotation4() {
let test = cursor_test(
r#"
a: "None | MyClass<CURSOR>" = 1
class MyClass:
"""some docs"""
"#,
);
assert_snapshot!(test.goto_type_definition(), @"No goto target found");
}
#[test]
fn goto_type_string_annotation5() {
let test = cursor_test(
r#"
a: "None | MyClass"<CURSOR> = 1
class MyClass:
"""some docs"""
"#,
);
assert_snapshot!(test.goto_type_definition(), @r#"
info[goto-type definition]: Go to type definition
--> main.py:2:4
|
2 | a: "None | MyClass" = 1
| ^^^^^^^^^^^^^^^^ Clicking here
3 |
4 | class MyClass:
|
info: Found 2 type definitions
--> main.py:4:7
|
2 | a: "None | MyClass" = 1
3 |
4 | class MyClass:
| -------
5 | """some docs"""
|
::: stdlib/types.pyi:974:11
|
972 | if sys.version_info >= (3, 10):
973 | @final
974 | class NoneType:
| --------
975 | """The type of the None singleton."""
|
"#);
}
#[test]
fn goto_type_string_annotation_dangling1() {
let test = cursor_test(
r#"
a: "MyCl<CURSOR>ass |" = 1
class MyClass:
"""some docs"""
"#,
);
assert_snapshot!(test.goto_type_definition(), @r#"
info[goto-type definition]: Go to type definition
--> main.py:2:4
|
2 | a: "MyClass |" = 1
| ^^^^^^^^^^^ Clicking here
3 |
4 | class MyClass:
|
info: Found 1 type definition
--> stdlib/ty_extensions.pyi:14:1
|
13 | # Types
14 | Unknown = object()
| -------
15 | AlwaysTruthy = object()
16 | AlwaysFalsy = object()
|
"#);
}
#[test]
fn goto_type_string_annotation_dangling2() {
let test = cursor_test(
r#"
a: "MyCl<CURSOR>ass | No" = 1
class MyClass:
"""some docs"""
"#,
);
assert_snapshot!(test.goto_type_definition(), @"No goto target found");
}
#[test]
fn goto_type_string_annotation_dangling3() {
let test = cursor_test(
r#"
a: "MyClass | N<CURSOR>o" = 1
class MyClass:
"""some docs"""
"#,
);
assert_snapshot!(test.goto_type_definition(), @"No goto target found");
}
#[test]
fn goto_type_string_annotation_recursive() {
let test = cursor_test(
r#"
ab: "a<CURSOR>b"
"#,
);
assert_snapshot!(test.goto_type_definition(), @r#"
info[goto-type definition]: Go to type definition
--> main.py:2:6
|
2 | ab: "ab"
| ^^ Clicking here
|
info: Found 1 type definition
--> stdlib/ty_extensions.pyi:14:1
|
13 | # Types
14 | Unknown = object()
| -------
15 | AlwaysTruthy = object()
16 | AlwaysFalsy = object()
|
"#);
}
#[test]
fn goto_type_string_annotation_unknown() {
let test = cursor_test(
r#"
x: "foo<CURSOR>bar"
"#,
);
assert_snapshot!(test.goto_type_definition(), @r#"
info[goto-type definition]: Go to type definition
--> main.py:2:5
|
2 | x: "foobar"
| ^^^^^^ Clicking here
|
info: Found 1 type definition
--> stdlib/ty_extensions.pyi:14:1
|
13 | # Types
14 | Unknown = object()
| -------
15 | AlwaysTruthy = object()
16 | AlwaysFalsy = object()
|
"#);
}
#[test]
fn goto_type_match_name_stmt() {
let test = cursor_test(
r#"
def my_func(command: str):
match command.split():
case ["get", a<CURSOR>b]:
x = ab
"#,
);
assert_snapshot!(test.goto_type_definition(), @"No goto target found");
}
#[test]
fn goto_type_match_name_binding() {
let test = cursor_test(
r#"
def my_func(command: str):
match command.split():
case ["get", ab]:
x = a<CURSOR>b
"#,
);
assert_snapshot!(test.goto_type_definition(), @"No type definitions found");
}
#[test]
fn goto_type_match_rest_stmt() {
let test = cursor_test(
r#"
def my_func(command: str):
match command.split():
case ["get", *a<CURSOR>b]:
x = ab
"#,
);
assert_snapshot!(test.goto_type_definition(), @"No goto target found");
}
#[test]
fn goto_type_match_rest_binding() {
let test = cursor_test(
r#"
def my_func(command: str):
match command.split():
case ["get", *ab]:
x = a<CURSOR>b
"#,
);
assert_snapshot!(test.goto_type_definition(), @"No type definitions found");
}
#[test]
fn goto_type_match_as_stmt() {
let test = cursor_test(
r#"
def my_func(command: str):
match command.split():
case ["get", ("a" | "b") as a<CURSOR>b]:
x = ab
"#,
);
assert_snapshot!(test.goto_type_definition(), @"No goto target found");
}
#[test]
fn goto_type_match_as_binding() {
let test = cursor_test(
r#"
def my_func(command: str):
match command.split():
case ["get", ("a" | "b") as ab]:
x = a<CURSOR>b
"#,
);
assert_snapshot!(test.goto_type_definition(), @"No type definitions found");
}
#[test]
fn goto_type_match_keyword_stmt() {
let test = cursor_test(
r#"
class Click:
__match_args__ = ("position", "button")
def __init__(self, pos, btn):
self.position: int = pos
self.button: str = btn
def my_func(event: Click):
match event:
case Click(x, button=a<CURSOR>b):
x = ab
"#,
);
assert_snapshot!(test.goto_type_definition(), @"No goto target found");
}
#[test]
fn goto_type_match_keyword_binding() {
let test = cursor_test(
r#"
class Click:
__match_args__ = ("position", "button")
def __init__(self, pos, btn):
self.position: int = pos
self.button: str = btn
def my_func(event: Click):
match event:
case Click(x, button=ab):
x = a<CURSOR>b
"#,
);
assert_snapshot!(test.goto_type_definition(), @"No type definitions found");
}
#[test]
fn goto_type_match_class_name() {
let test = cursor_test(
r#"
class Click:
__match_args__ = ("position", "button")
def __init__(self, pos, btn):
self.position: int = pos
self.button: str = btn
def my_func(event: Click):
match event:
case Cl<CURSOR>ick(x, button=ab):
x = ab
"#,
);
assert_snapshot!(test.goto_type_definition(), @r#"
info[goto-type definition]: Go to type definition
--> main.py:10:14
|
8 | def my_func(event: Click):
9 | match event:
10 | case Click(x, button=ab):
| ^^^^^ Clicking here
11 | x = ab
|
info: Found 1 type definition
--> main.py:2:7
|
2 | class Click:
| -----
3 | __match_args__ = ("position", "button")
4 | def __init__(self, pos, btn):
|
"#);
}
#[test]
fn goto_type_match_class_field_name() {
let test = cursor_test(
r#"
class Click:
__match_args__ = ("position", "button")
def __init__(self, pos, btn):
self.position: int = pos
self.button: str = btn
def my_func(event: Click):
match event:
case Click(x, but<CURSOR>ton=ab):
x = ab
"#,
);
assert_snapshot!(test.goto_type_definition(), @"No goto target found");
}
#[test]
fn goto_type_typevar_name_stmt() {
let test = cursor_test(
r#"
type Alias1[A<CURSOR>B: int = bool] = tuple[AB, list[AB]]
"#,
);
assert_snapshot!(test.goto_type_definition(), @r"
info[goto-type definition]: Go to type definition
--> main.py:2:13
|
2 | type Alias1[AB: int = bool] = tuple[AB, list[AB]]
| ^^ Clicking here
|
info: Found 1 type definition
--> main.py:2:13
|
2 | type Alias1[AB: int = bool] = tuple[AB, list[AB]]
| --
|
");
}
#[test]
fn goto_type_typevar_name_binding() {
let test = cursor_test(
r#"
type Alias1[AB: int = bool] = tuple[A<CURSOR>B, list[AB]]
"#,
);
assert_snapshot!(test.goto_type_definition(), @r"
info[goto-type definition]: Go to type definition
--> main.py:2:37
|
2 | type Alias1[AB: int = bool] = tuple[AB, list[AB]]
| ^^ Clicking here
|
info: Found 1 type definition
--> main.py:2:13
|
2 | type Alias1[AB: int = bool] = tuple[AB, list[AB]]
| --
|
");
}
#[test]
fn goto_type_typevar_spec_stmt() {
let test = cursor_test(
r#"
from typing import Callable
type Alias2[**A<CURSOR>B = [int, str]] = Callable[AB, tuple[AB]]
"#,
);
assert_snapshot!(test.goto_type_definition(), @"No goto target found");
}
#[test]
fn goto_type_typevar_spec_binding() {
let test = cursor_test(
r#"
from typing import Callable
type Alias2[**AB = [int, str]] = Callable[A<CURSOR>B, tuple[AB]]
"#,
);
assert_snapshot!(test.goto_type_definition(), @"No type definitions found");
}
#[test]
fn goto_type_typevar_tuple_stmt() {
let test = cursor_test(
r#"
type Alias3[*A<CURSOR>B = ()] = tuple[tuple[*AB], tuple[*AB]]
"#,
);
assert_snapshot!(test.goto_type_definition(), @"No goto target found");
}
#[test]
fn goto_type_typevar_tuple_binding() {
let test = cursor_test(
r#"
type Alias3[*AB = ()] = tuple[tuple[*A<CURSOR>B], tuple[*AB]]
"#,
);
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | true |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ty_ide/src/signature_help.rs | crates/ty_ide/src/signature_help.rs | //! This module handles the "signature help" request in the language server
//! protocol. This request is typically issued by a client when the user types
//! an open parenthesis and starts to enter arguments for a function call.
//! The signature help provides information that the editor displays to the
//! user about the target function signature including parameter names,
//! types, and documentation. It supports multiple signatures for union types
//! and overloads.
use crate::Db;
use crate::docstring::Docstring;
use crate::goto::Definitions;
use ruff_db::files::File;
use ruff_db::parsed::parsed_module;
use ruff_python_ast::find_node::covering_node;
use ruff_python_ast::token::TokenKind;
use ruff_python_ast::{self as ast, AnyNodeRef};
use ruff_text_size::{Ranged, TextRange, TextSize};
use ty_python_semantic::ResolvedDefinition;
use ty_python_semantic::SemanticModel;
use ty_python_semantic::semantic_index::definition::Definition;
use ty_python_semantic::types::ide_support::{
CallSignatureDetails, call_signature_details, find_active_signature_from_details,
};
use ty_python_semantic::types::{ParameterKind, Type};
// TODO: We may want to add special-case handling for calls to constructors
// so the class docstring is used in place of (or inaddition to) any docstring
// associated with the __new__ or __init__ call.
/// Information about a function parameter
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct ParameterDetails<'db> {
/// The parameter name (e.g., "param1")
pub name: String,
/// The parameter label in the signature (e.g., "param1: str")
pub label: String,
/// The annotated type of the parameter, if any
pub ty: Option<Type<'db>>,
/// Documentation specific to the parameter, typically extracted from the
/// function's docstring
pub documentation: Option<String>,
/// True if the parameter is positional-only.
pub is_positional_only: bool,
}
/// Information about a function signature
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct SignatureDetails<'db> {
/// Text representation of the full signature (including input parameters and return type).
pub label: String,
/// Documentation for the signature, typically from the function's docstring.
pub documentation: Option<Docstring>,
/// Information about each of the parameters in left-to-right order.
pub parameters: Vec<ParameterDetails<'db>>,
/// Index of the parameter that corresponds to the argument where the
/// user's cursor is currently positioned.
pub active_parameter: Option<usize>,
}
/// Signature help information for function calls
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct SignatureHelpInfo<'db> {
/// Information about each of the signatures for the function call. We
/// need to handle multiple because of unions, overloads, and composite
/// calls like constructors (which invoke both __new__ and __init__).
pub signatures: Vec<SignatureDetails<'db>>,
/// Index of the "active signature" which is the first signature where
/// all arguments that are currently present in the code map to parameters.
pub active_signature: Option<usize>,
}
/// Signature help information for function calls at the given position
pub fn signature_help(db: &dyn Db, file: File, offset: TextSize) -> Option<SignatureHelpInfo<'_>> {
let parsed = parsed_module(db, file).load(db);
// Get the call expression at the given position.
let (call_expr, current_arg_index) = get_call_expr(&parsed, offset)?;
let model = SemanticModel::new(db, file);
// Get signature details from the semantic analyzer.
let signature_details: Vec<CallSignatureDetails<'_>> =
call_signature_details(&model, call_expr);
if signature_details.is_empty() {
return None;
}
// Find the active signature - the first signature where all arguments map to parameters.
let active_signature_index = find_active_signature_from_details(&signature_details);
// Convert to SignatureDetails objects.
let signatures: Vec<SignatureDetails> = signature_details
.into_iter()
.map(|details| {
create_signature_details_from_call_signature_details(db, &details, current_arg_index)
})
.collect();
Some(SignatureHelpInfo {
signatures,
active_signature: active_signature_index,
})
}
/// Returns the innermost call expression that contains the specified offset
/// and the index of the argument that the offset maps to.
fn get_call_expr(
parsed: &ruff_db::parsed::ParsedModuleRef,
offset: TextSize,
) -> Option<(&ast::ExprCall, usize)> {
let root_node: AnyNodeRef = parsed.syntax().into();
// Find the token under the cursor and use its offset to find the node
let token = parsed
.tokens()
.at_offset(offset)
.max_by_key(|token| match token.kind() {
TokenKind::Name
| TokenKind::String
| TokenKind::Complex
| TokenKind::Float
| TokenKind::Int => 1,
_ => 0,
})?;
// Find the covering node at the given position that is a function call.
// Note that we are okay with the range being anywhere within a call
// expression, even if it's not in the arguments portion of the call
// expression. This is because, e.g., a user can request signature
// information at a call site, and this should ideally work anywhere
// within the call site, even at the function name.
let call = covering_node(root_node, token.range())
.find_first(|node| {
if !node.is_expr_call() {
return false;
}
// Close the signature help if the cursor is at the closing parenthesis
if token.kind() == TokenKind::Rpar && node.end() == token.end() && offset == token.end()
{
return false;
}
if token.range().is_empty() && node.end() == token.end() {
return false;
}
true
})
.ok()?;
// Get the function call expression.
let AnyNodeRef::ExprCall(call_expr) = call.node() else {
return None;
};
// Determine which argument corresponding to the current cursor location.
let current_arg_index = get_argument_index(call_expr, offset);
Some((call_expr, current_arg_index))
}
/// Determine which argument is associated with the specified offset.
/// Returns zero if not within any argument.
fn get_argument_index(call_expr: &ast::ExprCall, offset: TextSize) -> usize {
let mut current_arg = 0;
for (i, arg) in call_expr.arguments.arguments_source_order().enumerate() {
if offset <= arg.end() {
return i;
}
current_arg = i + 1;
}
current_arg
}
/// Create signature details from `CallSignatureDetails`.
fn create_signature_details_from_call_signature_details<'db>(
db: &dyn crate::Db,
details: &CallSignatureDetails<'db>,
current_arg_index: usize,
) -> SignatureDetails<'db> {
let signature_label = details.label.clone();
let documentation = get_callable_documentation(db, details.definition);
// Translate the argument index to parameter index using the mapping.
let active_parameter =
if details.argument_to_parameter_mapping.is_empty() && current_arg_index == 0 {
Some(0)
} else {
details
.argument_to_parameter_mapping
.get(current_arg_index)
.and_then(|mapping| mapping.parameters.first().copied())
.or({
// If we can't find a mapping for this argument, but we have a current
// argument index, use that as the active parameter if it's within bounds.
if current_arg_index < details.parameter_label_offsets.len() {
Some(current_arg_index)
} else {
None
}
})
};
let parameters = create_parameters_from_offsets(
&details.parameter_label_offsets,
&signature_label,
documentation.as_ref(),
&details.parameter_names,
&details.parameter_kinds,
&details.parameter_types,
);
SignatureDetails {
label: signature_label,
documentation,
parameters,
active_parameter,
}
}
/// Determine appropriate documentation for a callable type based on its original type.
fn get_callable_documentation(
db: &dyn crate::Db,
definition: Option<Definition>,
) -> Option<Docstring> {
Definitions(vec![ResolvedDefinition::Definition(definition?)]).docstring(db)
}
/// Create `ParameterDetails` objects from parameter label offsets.
fn create_parameters_from_offsets<'db>(
parameter_offsets: &[TextRange],
signature_label: &str,
docstring: Option<&Docstring>,
parameter_names: &[String],
parameter_kinds: &[ParameterKind],
parameter_types: &[Option<Type<'db>>],
) -> Vec<ParameterDetails<'db>> {
// Extract parameter documentation from the function's docstring if available.
let param_docs = if let Some(docstring) = docstring {
docstring.parameter_documentation()
} else {
std::collections::HashMap::new()
};
parameter_offsets
.iter()
.enumerate()
.map(|(i, offset)| {
// Extract the parameter label from the signature string.
let start = usize::from(offset.start());
let end = usize::from(offset.end());
let label = signature_label
.get(start..end)
.unwrap_or("unknown")
.to_string();
// Get the parameter name for documentation lookup.
let param_name = parameter_names.get(i).map(String::as_str).unwrap_or("");
let is_positional_only = matches!(
parameter_kinds.get(i),
Some(ParameterKind::PositionalOnly { .. })
);
let ty = parameter_types.get(i).copied().flatten();
ParameterDetails {
name: param_name.to_string(),
label,
ty,
documentation: param_docs.get(param_name).cloned(),
is_positional_only,
}
})
.collect()
}
#[cfg(test)]
mod tests {
use insta::assert_snapshot;
use crate::MarkupKind;
use crate::docstring::Docstring;
use crate::signature_help::SignatureHelpInfo;
use crate::tests::{CursorTest, cursor_test};
#[test]
fn signature_help_basic_function_call() {
let test = cursor_test(
r#"
def example_function(param1: str, param2: int) -> str:
"""This is a docstring for the example function.
Args:
param1: The first parameter as a string
param2: The second parameter as an integer
Returns:
A formatted string combining both parameters
"""
return f"{param1}: {param2}"
result = example_function(<CURSOR>
"#,
);
// Test that signature help is provided
let result = test.signature_help().expect("Should have signature help");
assert_eq!(result.signatures.len(), 1);
let signature = &result.signatures[0];
assert!(signature.label.contains("param1") && signature.label.contains("param2"));
// Verify that the docstring is extracted and included in the documentation
let expected_docstring = concat!(
"This is a docstring for the example function.\n",
"\n",
"Args:\n",
" param1: The first parameter as a string\n",
" param2: The second parameter as an integer\n",
"\n",
"Returns:\n",
" A formatted string combining both parameters\n",
);
assert_eq!(
signature
.documentation
.as_ref()
.map(Docstring::render_plaintext),
Some(expected_docstring.to_string())
);
assert_eq!(result.active_signature, Some(0));
assert_eq!(signature.active_parameter, Some(0));
}
#[test]
fn signature_help_method_call() {
let test = cursor_test(
r#"
class MyClass:
def my_method(self, arg1: str, arg2: bool) -> None:
pass
obj = MyClass()
obj.my_method(arg2=True, arg1=<CURSOR>
"#,
);
// Test that signature help is provided for method calls
let result = test.signature_help().expect("Should have signature help");
assert_eq!(result.signatures.len(), 1);
let signature = &result.signatures[0];
assert!(signature.label.contains("arg1") && signature.label.contains("arg2"));
assert_eq!(result.active_signature, Some(0));
// Check the active parameter from the active signature
if let Some(active_sig_index) = result.active_signature {
let active_signature = &result.signatures[active_sig_index];
assert_eq!(active_signature.active_parameter, Some(0));
}
}
#[test]
fn signature_help_nested_function_calls() {
let test = cursor_test(
r#"
def outer(a: int) -> int:
return a * 2
def inner(b: str) -> str:
return b.upper()
result = outer(inner(<CURSOR>
"#,
);
// Test that signature help focuses on the innermost function call
let result = test.signature_help().expect("Should have signature help");
assert_eq!(result.signatures.len(), 1);
let signature = &result.signatures[0];
assert!(signature.label.contains("str") || signature.label.contains("->"));
assert_eq!(result.active_signature, Some(0));
assert_eq!(signature.active_parameter, Some(0));
}
#[test]
fn signature_help_union_callable() {
let test = cursor_test(
r#"
import random
def func_a(x: int) -> int:
return x
def func_b(y: str) -> str:
return y
if random.random() > 0.5:
f = func_a
else:
f = func_b
f(<CURSOR>
"#,
);
let result = test.signature_help().expect("Should have signature help");
assert_eq!(result.signatures.len(), 2);
let signature = &result.signatures[0];
assert_eq!(signature.label, "(x: int) -> int");
assert_eq!(signature.parameters.len(), 1);
// Check parameter information
let param = &signature.parameters[0];
assert_eq!(param.label, "x: int");
assert_eq!(param.name, "x");
// Validate the second signature (from func_b)
let signature_b = &result.signatures[1];
assert_eq!(signature_b.label, "(y: str) -> str");
assert_eq!(signature_b.parameters.len(), 1);
// Check parameter information for the second signature
let param_b = &signature_b.parameters[0];
assert_eq!(param_b.label, "y: str");
assert_eq!(param_b.name, "y");
assert_eq!(result.active_signature, Some(0));
// Check the active parameter from the active signature
if let Some(active_sig_index) = result.active_signature {
let active_signature = &result.signatures[active_sig_index];
assert_eq!(active_signature.active_parameter, Some(0));
}
}
#[test]
fn signature_help_overloaded_function() {
let test = cursor_test(
r#"
from typing import overload
@overload
def process(value: int) -> str: ...
@overload
def process(value: str) -> int: ...
def process(value):
if isinstance(value, int):
return str(value)
else:
return len(value)
result = process(<CURSOR>
"#,
);
// Test that signature help is provided for overloaded functions
let result = test.signature_help().expect("Should have signature help");
// We should have signatures for the overloads
assert_eq!(result.signatures.len(), 2);
assert_eq!(result.active_signature, Some(0));
// Check the active parameter from the active signature
if let Some(active_sig_index) = result.active_signature {
let active_signature = &result.signatures[active_sig_index];
assert_eq!(active_signature.active_parameter, Some(0));
}
// Validate the first overload: process(value: int) -> str
let signature1 = &result.signatures[0];
assert_eq!(signature1.label, "(value: int) -> str");
assert_eq!(signature1.parameters.len(), 1);
let param1 = &signature1.parameters[0];
assert_eq!(param1.label, "value: int");
assert_eq!(param1.name, "value");
// Validate the second overload: process(value: str) -> int
let signature2 = &result.signatures[1];
assert_eq!(signature2.label, "(value: str) -> int");
assert_eq!(signature2.parameters.len(), 1);
let param2 = &signature2.parameters[0];
assert_eq!(param2.label, "value: str");
assert_eq!(param2.name, "value");
}
#[test]
fn signature_help_overload_type_disambiguated1() {
let test = CursorTest::builder()
.source(
"main.py",
"
from mymodule import ab
ab(1<CURSOR>)
",
)
.source(
"mymodule.py",
r#"
def ab(a):
"""the real implementation!"""
"#,
)
.source(
"mymodule.pyi",
r#"
from typing import overload
@overload
def ab(a: int):
"""the int overload"""
@overload
def ab(a: str): ...
"""the str overload"""
"#,
)
.build();
assert_snapshot!(test.signature_help_render(), @r"
============== active signature =============
(a: int) -> Unknown
---------------------------------------------
the int overload
-------------- active parameter -------------
a: int
---------------------------------------------
=============== other signature =============
(a: str) -> Unknown
---------------------------------------------
the real implementation!
-------------- active parameter -------------
a: str
---------------------------------------------
");
}
#[test]
fn signature_help_overload_type_disambiguated2() {
let test = CursorTest::builder()
.source(
"main.py",
r#"
from mymodule import ab
ab("hello"<CURSOR>)
"#,
)
.source(
"mymodule.py",
r#"
def ab(a):
"""the real implementation!"""
"#,
)
.source(
"mymodule.pyi",
r#"
from typing import overload
@overload
def ab(a: int):
"""the int overload"""
@overload
def ab(a: str):
"""the str overload"""
"#,
)
.build();
assert_snapshot!(test.signature_help_render(), @r"
============== active signature =============
(a: int) -> Unknown
---------------------------------------------
the int overload
-------------- active parameter -------------
a: int
---------------------------------------------
=============== other signature =============
(a: str) -> Unknown
---------------------------------------------
the str overload
-------------- active parameter -------------
a: str
---------------------------------------------
");
}
#[test]
fn signature_help_overload_arity_disambiguated1() {
let test = CursorTest::builder()
.source(
"main.py",
"
from mymodule import ab
ab(1, 2<CURSOR>)
",
)
.source(
"mymodule.py",
r#"
def ab(a, b = None):
"""the real implementation!"""
"#,
)
.source(
"mymodule.pyi",
r#"
from typing import overload
@overload
def ab(a: int, b: int):
"""the two arg overload"""
@overload
def ab(a: int):
"""the one arg overload"""
"#,
)
.build();
assert_snapshot!(test.signature_help_render(), @r"
============== active signature =============
(a: int, b: int) -> Unknown
---------------------------------------------
the two arg overload
-------------- active parameter -------------
b: int
---------------------------------------------
=============== other signature =============
(a: int) -> Unknown
---------------------------------------------
the one arg overload
(no active parameter specified)
");
}
#[test]
fn signature_help_overload_arity_disambiguated2() {
let test = CursorTest::builder()
.source(
"main.py",
"
from mymodule import ab
ab(1<CURSOR>)
",
)
.source(
"mymodule.py",
r#"
def ab(a, b = None):
"""the real implementation!"""
"#,
)
.source(
"mymodule.pyi",
r#"
from typing import overload
@overload
def ab(a: int, b: int):
"""the two arg overload"""
@overload
def ab(a: int):
"""the one arg overload"""
"#,
)
.build();
assert_snapshot!(test.signature_help_render(), @r"
============== active signature =============
(a: int, b: int) -> Unknown
---------------------------------------------
the two arg overload
-------------- active parameter -------------
a: int
---------------------------------------------
=============== other signature =============
(a: int) -> Unknown
---------------------------------------------
the one arg overload
-------------- active parameter -------------
a: int
---------------------------------------------
");
}
#[test]
fn signature_help_overload_keyword_disambiguated1() {
let test = CursorTest::builder()
.source(
"main.py",
"
from mymodule import ab
ab(1, b=2<CURSOR>)
",
)
.source(
"mymodule.py",
r#"
def ab(a, *, b = None, c = None):
"""the real implementation!"""
"#,
)
.source(
"mymodule.pyi",
r#"
from typing import overload
@overload
def ab(a: int):
"""keywordless overload"""
@overload
def ab(a: int, *, b: int):
"""b overload"""
@overload
def ab(a: int, *, c: int):
"""c overload"""
"#,
)
.build();
assert_snapshot!(test.signature_help_render(), @r"
============== active signature =============
(a: int, *, b: int) -> Unknown
---------------------------------------------
b overload
-------------- active parameter -------------
b: int
---------------------------------------------
=============== other signature =============
(a: int) -> Unknown
---------------------------------------------
keywordless overload
(no active parameter specified)
=============== other signature =============
(a: int, *, c: int) -> Unknown
---------------------------------------------
c overload
-------------- active parameter -------------
c: int
---------------------------------------------
");
}
#[test]
fn signature_help_overload_keyword_disambiguated2() {
let test = CursorTest::builder()
.source(
"main.py",
"
from mymodule import ab
ab(1, c=2<CURSOR>)
",
)
.source(
"mymodule.py",
r#"
def ab(a, *, b = None, c = None):
"""the real implementation!"""
"#,
)
.source(
"mymodule.pyi",
r#"
from typing import overload
@overload
def ab(a: int):
"""keywordless overload"""
@overload
def ab(a: int, *, b: int):
"""b overload"""
@overload
def ab(a: int, *, c: int):
"""c overload"""
"#,
)
.build();
assert_snapshot!(test.signature_help_render(), @r"
============== active signature =============
(a: int, *, c: int) -> Unknown
---------------------------------------------
c overload
-------------- active parameter -------------
c: int
---------------------------------------------
=============== other signature =============
(a: int) -> Unknown
---------------------------------------------
keywordless overload
(no active parameter specified)
=============== other signature =============
(a: int, *, b: int) -> Unknown
---------------------------------------------
b overload
-------------- active parameter -------------
b: int
---------------------------------------------
");
}
#[test]
fn signature_help_class_constructor() {
let test = cursor_test(
r#"
class Point:
"""A simple point class representing a 2D coordinate."""
def __init__(self, x: int, y: int):
"""Initialize a point with x and y coordinates.
Args:
x: The x-coordinate
y: The y-coordinate
"""
self.x = x
self.y = y
point = Point(<CURSOR>
"#,
);
let result = test.signature_help().expect("Should have signature help");
// Should have exactly one signature for the constructor
assert_eq!(result.signatures.len(), 1);
let signature = &result.signatures[0];
// Validate the constructor signature
assert_eq!(signature.label, "(x: int, y: int) -> Point");
assert_eq!(signature.parameters.len(), 2);
// Validate the first parameter (x: int)
let param_x = &signature.parameters[0];
assert_eq!(param_x.label, "x: int");
assert_eq!(param_x.name, "x");
assert_eq!(param_x.documentation, Some("The x-coordinate".to_string()));
// Validate the second parameter (y: int)
let param_y = &signature.parameters[1];
assert_eq!(param_y.label, "y: int");
assert_eq!(param_y.name, "y");
assert_eq!(param_y.documentation, Some("The y-coordinate".to_string()));
// Should have the __init__ method docstring as documentation (not the class docstring)
let expected_docstring = "Initialize a point with x and y coordinates.\n\nArgs:\n x: The x-coordinate\n y: The y-coordinate\n";
assert_eq!(
signature
.documentation
.as_ref()
.map(Docstring::render_plaintext),
Some(expected_docstring.to_string())
);
}
#[test]
fn signature_help_callable_object() {
let test = cursor_test(
r#"
class Multiplier:
def __call__(self, x: int) -> int:
return x * 2
multiplier = Multiplier()
result = multiplier(<CURSOR>
"#,
);
let result = test.signature_help().expect("Should have signature help");
// Should have a signature for the callable object
assert!(!result.signatures.is_empty());
let signature = &result.signatures[0];
// Should provide signature help for the callable
assert!(signature.label.contains("int") || signature.label.contains("->"));
}
#[test]
fn signature_help_subclass_of_constructor() {
let test = cursor_test(
r#"
from typing import Type
def create_instance(cls: Type[list]) -> list:
return cls(<CURSOR>
"#,
);
let result = test.signature_help().expect("Should have signature help");
// Should have a signature
assert!(!result.signatures.is_empty());
let signature = &result.signatures[0];
// Should have empty documentation for now
assert_eq!(
signature
.documentation
.as_ref()
.map(Docstring::render_plaintext),
None
);
}
#[test]
fn signature_help_parameter_label_offsets() {
let test = cursor_test(
r#"
def test_function(param1: str, param2: int, param3: bool) -> str:
return f"{param1}: {param2}, {param3}"
result = test_function(<CURSOR>
"#,
);
let result = test.signature_help().expect("Should have signature help");
assert_eq!(result.signatures.len(), 1);
let signature = &result.signatures[0];
assert_eq!(signature.parameters.len(), 3);
// Check that we have parameter labels
for (i, param) in signature.parameters.iter().enumerate() {
let expected_param_spec = match i {
0 => "param1: str",
1 => "param2: int",
2 => "param3: bool",
_ => panic!("Unexpected parameter index"),
};
assert_eq!(param.label, expected_param_spec);
}
}
#[test]
fn signature_help_active_signature_selection() {
// This test verifies that the algorithm correctly selects the first signature
// where all arguments present in the call have valid parameter mappings.
let test = cursor_test(
r#"
from typing import overload
@overload
def process(value: int) -> str: ...
@overload
def process(value: str, flag: bool) -> int: ...
def process(value, flag=None):
if isinstance(value, int):
return str(value)
elif flag is not None:
return len(value) if flag else 0
else:
return len(value)
# Call with two arguments - should select the second overload
result = process("hello", True<CURSOR>)
"#,
);
let result = test.signature_help().expect("Should have signature help");
// Should have signatures for the overloads.
assert!(!result.signatures.is_empty());
// Check that we have an active signature and parameter
if let Some(active_sig_index) = result.active_signature {
let active_signature = &result.signatures[active_sig_index];
assert_eq!(active_signature.active_parameter, Some(1));
}
}
#[test]
fn signature_help_parameter_documentation() {
let test = cursor_test(
r#"
def documented_function(param1: str, param2: int) -> str:
"""This is a function with parameter documentation.
Args:
param1: The first parameter description
param2: The second parameter description
"""
return f"{param1}: {param2}"
result = documented_function(<CURSOR>
"#,
);
let result = test.signature_help().expect("Should have signature help");
assert_eq!(result.signatures.len(), 1);
let signature = &result.signatures[0];
assert_eq!(signature.parameters.len(), 2);
// Check that parameter documentation is extracted
let param1 = &signature.parameters[0];
assert_eq!(
param1.documentation,
Some("The first parameter description".to_string())
);
let param2 = &signature.parameters[1];
assert_eq!(
param2.documentation,
Some("The second parameter description".to_string())
);
}
#[test]
fn signature_help_after_closing_paren() {
let test = cursor_test(
r#"
def func1(v: str) -> str:
return v
r = func1("")<CURSOR>
print(r)
"#,
);
let result = test.signature_help();
assert!(
result.is_none(),
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | true |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ty_ide/src/workspace_symbols.rs | crates/ty_ide/src/workspace_symbols.rs | use crate::symbols::{QueryPattern, SymbolInfo, symbols_for_file};
use ruff_db::files::File;
use ty_project::Db;
/// Get all workspace symbols matching the query string.
/// Returns symbols from all files in the workspace, filtered by the query.
pub fn workspace_symbols(db: &dyn Db, query: &str) -> Vec<WorkspaceSymbolInfo> {
// If the query is empty, return immediately to avoid expensive file scanning
if query.is_empty() {
return Vec::new();
}
let workspace_symbols_span = tracing::debug_span!("workspace_symbols");
let _span = workspace_symbols_span.enter();
let project = db.project();
let query = QueryPattern::fuzzy(query);
let files = project.files(db);
let results = std::sync::Mutex::new(Vec::new());
{
let db = db.dyn_clone();
let files = &files;
let results = &results;
let query = &query;
let workspace_symbols_span = &workspace_symbols_span;
rayon::scope(move |s| {
// For each file, extract symbols and add them to results
for file in files.iter() {
let db = db.dyn_clone();
s.spawn(move |_| {
let symbols_for_file_span = tracing::debug_span!(parent: workspace_symbols_span, "symbols_for_file", ?file);
let _entered = symbols_for_file_span.entered();
for (_, symbol) in symbols_for_file(&*db, *file).search(query) {
// It seems like we could do better here than
// locking `results` for every single symbol,
// but this works pretty well as it is.
results.lock().unwrap().push(WorkspaceSymbolInfo {
symbol: symbol.to_owned(),
file: *file,
});
}
});
}
});
}
results.into_inner().unwrap()
}
/// A symbol found in the workspace, including the file it was found in.
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct WorkspaceSymbolInfo {
/// The symbol information
pub symbol: SymbolInfo<'static>,
/// The file containing the symbol
pub file: File,
}
#[cfg(test)]
mod tests {
use super::*;
use crate::tests::CursorTest;
use crate::tests::IntoDiagnostic;
use insta::assert_snapshot;
use ruff_db::diagnostic::{
Annotation, Diagnostic, DiagnosticId, LintName, Severity, Span, SubDiagnostic,
SubDiagnosticSeverity,
};
#[test]
fn workspace_symbols_multi_file() {
let test = CursorTest::builder()
.source(
"utils.py",
"
def utility_function():
'''A helpful utility function'''
pass
",
)
.source(
"models.py",
"
class DataModel:
'''A data model class'''
def __init__(self):
pass
",
)
.source(
"constants.py",
"
API_BASE_URL = 'https://api.example.com'
<CURSOR>",
)
.build();
assert_snapshot!(test.workspace_symbols("ufunc"), @r"
info[workspace-symbols]: WorkspaceSymbolInfo
--> utils.py:2:5
|
2 | def utility_function():
| ^^^^^^^^^^^^^^^^
3 | '''A helpful utility function'''
4 | pass
|
info: Function utility_function
");
assert_snapshot!(test.workspace_symbols("data"), @r"
info[workspace-symbols]: WorkspaceSymbolInfo
--> models.py:2:7
|
2 | class DataModel:
| ^^^^^^^^^
3 | '''A data model class'''
4 | def __init__(self):
|
info: Class DataModel
");
assert_snapshot!(test.workspace_symbols("apibase"), @r"
info[workspace-symbols]: WorkspaceSymbolInfo
--> constants.py:2:1
|
2 | API_BASE_URL = 'https://api.example.com'
| ^^^^^^^^^^^^
|
info: Constant API_BASE_URL
");
}
#[test]
fn members() {
let test = CursorTest::builder()
.source(
"utils.py",
"
class Test:
def from_path(): ...
<CURSOR>",
)
.build();
assert_snapshot!(test.workspace_symbols("from"), @r"
info[workspace-symbols]: WorkspaceSymbolInfo
--> utils.py:3:9
|
2 | class Test:
3 | def from_path(): ...
| ^^^^^^^^^
|
info: Method from_path
");
}
#[test]
fn ignore_all() {
let test = CursorTest::builder()
.source(
"utils.py",
"
__all__ = []
class Test:
def from_path(): ...
<CURSOR>",
)
.build();
assert_snapshot!(test.workspace_symbols("from"), @r"
info[workspace-symbols]: WorkspaceSymbolInfo
--> utils.py:4:9
|
2 | __all__ = []
3 | class Test:
4 | def from_path(): ...
| ^^^^^^^^^
|
info: Method from_path
");
}
#[test]
fn ignore_imports() {
let test = CursorTest::builder()
.source(
"utils.py",
"
import re
import json as json
from collections import defaultdict
foo = 1
<CURSOR>",
)
.build();
assert_snapshot!(test.workspace_symbols("foo"), @r"
info[workspace-symbols]: WorkspaceSymbolInfo
--> utils.py:5:1
|
3 | import json as json
4 | from collections import defaultdict
5 | foo = 1
| ^^^
|
info: Variable foo
");
assert_snapshot!(test.workspace_symbols("re"), @"No symbols found");
assert_snapshot!(test.workspace_symbols("json"), @"No symbols found");
assert_snapshot!(test.workspace_symbols("default"), @"No symbols found");
}
impl CursorTest {
fn workspace_symbols(&self, query: &str) -> String {
let symbols = workspace_symbols(&self.db, query);
if symbols.is_empty() {
return "No symbols found".to_string();
}
self.render_diagnostics(symbols.into_iter().map(WorkspaceSymbolDiagnostic::new))
}
}
struct WorkspaceSymbolDiagnostic {
symbol_info: WorkspaceSymbolInfo,
}
impl WorkspaceSymbolDiagnostic {
fn new(symbol_info: WorkspaceSymbolInfo) -> Self {
Self { symbol_info }
}
}
impl IntoDiagnostic for WorkspaceSymbolDiagnostic {
fn into_diagnostic(self) -> Diagnostic {
let symbol_kind_str = self.symbol_info.symbol.kind.to_string();
let info_text = format!("{} {}", symbol_kind_str, self.symbol_info.symbol.name);
let sub = SubDiagnostic::new(SubDiagnosticSeverity::Info, info_text);
let mut main = Diagnostic::new(
DiagnosticId::Lint(LintName::of("workspace-symbols")),
Severity::Info,
"WorkspaceSymbolInfo".to_string(),
);
main.annotate(Annotation::primary(
Span::from(self.symbol_info.file).with_range(self.symbol_info.symbol.name_range),
));
main.sub(sub);
main
}
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ty_ide/src/code_action.rs | crates/ty_ide/src/code_action.rs | use crate::completion;
use ruff_db::{files::File, parsed::parsed_module};
use ruff_diagnostics::Edit;
use ruff_python_ast::find_node::covering_node;
use ruff_text_size::TextRange;
use ty_project::Db;
use ty_python_semantic::create_suppression_fix;
use ty_python_semantic::lint::LintId;
use ty_python_semantic::types::{UNDEFINED_REVEAL, UNRESOLVED_REFERENCE};
/// A `QuickFix` Code Action
#[derive(Debug, Clone)]
pub struct QuickFix {
pub title: String,
pub edits: Vec<Edit>,
pub preferred: bool,
}
pub fn code_actions(
db: &dyn Db,
file: File,
diagnostic_range: TextRange,
diagnostic_id: &str,
) -> Vec<QuickFix> {
let registry = db.lint_registry();
let Ok(lint_id) = registry.get(diagnostic_id) else {
return Vec::new();
};
let mut actions = Vec::new();
// Suggest imports/qualifications for unresolved references (often ideal)
let is_unresolved_reference =
lint_id == LintId::of(&UNRESOLVED_REFERENCE) || lint_id == LintId::of(&UNDEFINED_REVEAL);
if is_unresolved_reference
&& let Some(import_quick_fix) = unresolved_fixes(db, file, diagnostic_range)
{
actions.extend(import_quick_fix);
}
// Suggest just suppressing the lint (always a valid option, but never ideal)
actions.push(QuickFix {
title: format!("Ignore '{}' for this line", lint_id.name()),
edits: create_suppression_fix(db, file, lint_id, diagnostic_range).into_edits(),
preferred: false,
});
actions
}
fn unresolved_fixes(
db: &dyn Db,
file: File,
diagnostic_range: TextRange,
) -> Option<impl Iterator<Item = QuickFix>> {
let parsed = parsed_module(db, file).load(db);
let node = covering_node(parsed.syntax().into(), diagnostic_range).node();
let symbol = &node.expr_name()?.id;
Some(
completion::unresolved_fixes(db, file, &parsed, symbol, node)
.into_iter()
.map(|import| QuickFix {
title: import.label,
edits: vec![import.edit],
preferred: true,
}),
)
}
#[cfg(test)]
mod tests {
use crate::code_actions;
use insta::assert_snapshot;
use ruff_db::{
diagnostic::{
Annotation, Diagnostic, DiagnosticFormat, DiagnosticId, DisplayDiagnosticConfig,
LintName, Span, SubDiagnostic,
},
files::{File, system_path_to_file},
system::{DbWithWritableSystem, SystemPathBuf},
};
use ruff_diagnostics::Fix;
use ruff_python_trivia::textwrap::dedent;
use ruff_text_size::{TextRange, TextSize};
use ty_project::ProjectMetadata;
use ty_python_semantic::{
lint::LintMetadata,
types::{UNDEFINED_REVEAL, UNRESOLVED_REFERENCE},
};
#[test]
fn add_ignore() {
let test = CodeActionTest::with_source(r#"b = <START>a<END> / 10"#);
assert_snapshot!(test.code_actions(&UNRESOLVED_REFERENCE), @r"
info[code-action]: Ignore 'unresolved-reference' for this line
--> main.py:1:5
|
1 | b = a / 10
| ^
|
- b = a / 10
1 + b = a / 10 # ty:ignore[unresolved-reference]
");
}
#[test]
fn add_ignore_existing_comment() {
let test = CodeActionTest::with_source(r#"b = <START>a<END> / 10 # fmt: off"#);
assert_snapshot!(test.code_actions(&UNRESOLVED_REFERENCE), @r"
info[code-action]: Ignore 'unresolved-reference' for this line
--> main.py:1:5
|
1 | b = a / 10 # fmt: off
| ^
|
- b = a / 10 # fmt: off
1 + b = a / 10 # fmt: off # ty:ignore[unresolved-reference]
");
}
#[test]
fn add_ignore_trailing_whitespace() {
let test = CodeActionTest::with_source(r#"b = <START>a<END> / 10 "#);
assert_snapshot!(test.code_actions(&UNRESOLVED_REFERENCE), @r"
info[code-action]: Ignore 'unresolved-reference' for this line
--> main.py:1:5
|
1 | b = a / 10
| ^
|
- b = a / 10
1 + b = a / 10 # ty:ignore[unresolved-reference]
");
}
#[test]
fn add_code_existing_ignore() {
let test = CodeActionTest::with_source(
r#"
b = <START>a<END> / 0 # ty:ignore[division-by-zero]
"#,
);
assert_snapshot!(test.code_actions(&UNRESOLVED_REFERENCE), @r"
info[code-action]: Ignore 'unresolved-reference' for this line
--> main.py:2:5
|
2 | b = a / 0 # ty:ignore[division-by-zero]
| ^
|
1 |
- b = a / 0 # ty:ignore[division-by-zero]
2 + b = a / 0 # ty:ignore[division-by-zero, unresolved-reference]
");
}
#[test]
fn add_code_existing_ignore_trailing_comma() {
let test = CodeActionTest::with_source(
r#"
b = <START>a<END> / 0 # ty:ignore[division-by-zero,]
"#,
);
assert_snapshot!(test.code_actions(&UNRESOLVED_REFERENCE), @r"
info[code-action]: Ignore 'unresolved-reference' for this line
--> main.py:2:5
|
2 | b = a / 0 # ty:ignore[division-by-zero,]
| ^
|
1 |
- b = a / 0 # ty:ignore[division-by-zero,]
2 + b = a / 0 # ty:ignore[division-by-zero, unresolved-reference]
");
}
#[test]
fn add_code_existing_ignore_trailing_whitespace() {
let test = CodeActionTest::with_source(
r#"
b = <START>a<END> / 0 # ty:ignore[division-by-zero ]
"#,
);
assert_snapshot!(test.code_actions(&UNRESOLVED_REFERENCE), @r"
info[code-action]: Ignore 'unresolved-reference' for this line
--> main.py:2:5
|
2 | b = a / 0 # ty:ignore[division-by-zero ]
| ^
|
1 |
- b = a / 0 # ty:ignore[division-by-zero ]
2 + b = a / 0 # ty:ignore[division-by-zero, unresolved-reference ]
");
}
#[test]
fn add_code_existing_ignore_with_reason() {
let test = CodeActionTest::with_source(
r#"
b = <START>a<END> / 0 # ty:ignore[division-by-zero] some explanation
"#,
);
assert_snapshot!(test.code_actions(&UNRESOLVED_REFERENCE), @r"
info[code-action]: Ignore 'unresolved-reference' for this line
--> main.py:2:5
|
2 | b = a / 0 # ty:ignore[division-by-zero] some explanation
| ^
|
1 |
- b = a / 0 # ty:ignore[division-by-zero] some explanation
2 + b = a / 0 # ty:ignore[division-by-zero] some explanation # ty:ignore[unresolved-reference]
");
}
#[test]
fn add_code_existing_ignore_start_line() {
let test = CodeActionTest::with_source(
r#"
b = (
<START>a # ty:ignore[division-by-zero]
/
0<END>
)
"#,
);
assert_snapshot!(test.code_actions(&UNRESOLVED_REFERENCE), @r"
info[code-action]: Ignore 'unresolved-reference' for this line
--> main.py:3:9
|
2 | b = (
3 | / a # ty:ignore[division-by-zero]
4 | | /
5 | | 0
| |_________^
6 | )
|
1 |
2 | b = (
- a # ty:ignore[division-by-zero]
3 + a # ty:ignore[division-by-zero, unresolved-reference]
4 | /
5 | 0
6 | )
");
}
#[test]
fn add_code_existing_ignore_end_line() {
let test = CodeActionTest::with_source(
r#"
b = (
<START>a
/
0<END> # ty:ignore[division-by-zero]
)
"#,
);
assert_snapshot!(test.code_actions(&UNRESOLVED_REFERENCE), @r"
info[code-action]: Ignore 'unresolved-reference' for this line
--> main.py:3:9
|
2 | b = (
3 | / a
4 | | /
5 | | 0 # ty:ignore[division-by-zero]
| |_________^
6 | )
|
2 | b = (
3 | a
4 | /
- 0 # ty:ignore[division-by-zero]
5 + 0 # ty:ignore[division-by-zero, unresolved-reference]
6 | )
");
}
#[test]
fn add_code_existing_ignores() {
let test = CodeActionTest::with_source(
r#"
b = (
<START>a # ty:ignore[division-by-zero]
/
0<END> # ty:ignore[division-by-zero]
)
"#,
);
assert_snapshot!(test.code_actions(&UNRESOLVED_REFERENCE), @r"
info[code-action]: Ignore 'unresolved-reference' for this line
--> main.py:3:9
|
2 | b = (
3 | / a # ty:ignore[division-by-zero]
4 | | /
5 | | 0 # ty:ignore[division-by-zero]
| |_________^
6 | )
|
1 |
2 | b = (
- a # ty:ignore[division-by-zero]
3 + a # ty:ignore[division-by-zero, unresolved-reference]
4 | /
5 | 0 # ty:ignore[division-by-zero]
6 | )
");
}
#[test]
fn add_code_interpolated_string() {
let test = CodeActionTest::with_source(
r#"
b = f"""
{<START>a<END>}
more text
"""
"#,
);
assert_snapshot!(test.code_actions(&UNRESOLVED_REFERENCE), @r#"
info[code-action]: Ignore 'unresolved-reference' for this line
--> main.py:3:6
|
2 | b = f"""
3 | {a}
| ^
4 | more text
5 | """
|
2 | b = f"""
3 | {a}
4 | more text
- """
5 + """ # ty:ignore[unresolved-reference]
"#);
}
#[test]
fn add_code_multiline_interpolation() {
let test = CodeActionTest::with_source(
r#"
b = f"""
{
<START>a<END>
}
more text
"""
"#,
);
assert_snapshot!(test.code_actions(&UNRESOLVED_REFERENCE), @r#"
info[code-action]: Ignore 'unresolved-reference' for this line
--> main.py:4:5
|
2 | b = f"""
3 | {
4 | a
| ^
5 | }
6 | more text
|
1 |
2 | b = f"""
3 | {
- a
4 + a # ty:ignore[unresolved-reference]
5 | }
6 | more text
7 | """
"#);
}
#[test]
fn add_code_followed_by_multiline_string() {
let test = CodeActionTest::with_source(
r#"
b = <START>a<END> + """
more text
"""
"#,
);
assert_snapshot!(test.code_actions(&UNRESOLVED_REFERENCE), @r#"
info[code-action]: Ignore 'unresolved-reference' for this line
--> main.py:2:5
|
2 | b = a + """
| ^
3 | more text
4 | """
|
1 |
2 | b = a + """
3 | more text
- """
4 + """ # ty:ignore[unresolved-reference]
"#);
}
#[test]
fn add_code_followed_by_continuation() {
let test = CodeActionTest::with_source(
r#"
b = <START>a<END> \
+ "test"
"#,
);
assert_snapshot!(test.code_actions(&UNRESOLVED_REFERENCE), @r#"
info[code-action]: Ignore 'unresolved-reference' for this line
--> main.py:2:5
|
2 | b = a \
| ^
3 | + "test"
|
1 |
2 | b = a \
- + "test"
3 + + "test" # ty:ignore[unresolved-reference]
"#);
}
#[test]
fn undefined_reveal_type() {
let test = CodeActionTest::with_source(
r#"
<START>reveal_type<END>(1)
"#,
);
assert_snapshot!(test.code_actions(&UNDEFINED_REVEAL), @r"
info[code-action]: import typing.reveal_type
--> main.py:2:1
|
2 | reveal_type(1)
| ^^^^^^^^^^^
|
help: This is a preferred code action
1 + from typing import reveal_type
2 |
3 | reveal_type(1)
info[code-action]: Ignore 'undefined-reveal' for this line
--> main.py:2:1
|
2 | reveal_type(1)
| ^^^^^^^^^^^
|
1 |
- reveal_type(1)
2 + reveal_type(1) # ty:ignore[undefined-reveal]
");
}
#[test]
fn unresolved_deprecated() {
let test = CodeActionTest::with_source(
r#"
@<START>deprecated<END>("do not use")
def my_func(): ...
"#,
);
assert_snapshot!(test.code_actions(&UNRESOLVED_REFERENCE), @r#"
info[code-action]: import warnings.deprecated
--> main.py:2:2
|
2 | @deprecated("do not use")
| ^^^^^^^^^^
3 | def my_func(): ...
|
help: This is a preferred code action
1 + from warnings import deprecated
2 |
3 | @deprecated("do not use")
4 | def my_func(): ...
info[code-action]: Ignore 'unresolved-reference' for this line
--> main.py:2:2
|
2 | @deprecated("do not use")
| ^^^^^^^^^^
3 | def my_func(): ...
|
1 |
- @deprecated("do not use")
2 + @deprecated("do not use") # ty:ignore[unresolved-reference]
3 | def my_func(): ...
"#);
}
#[test]
fn unresolved_deprecated_warnings_imported() {
let test = CodeActionTest::with_source(
r#"
import warnings
@<START>deprecated<END>("do not use")
def my_func(): ...
"#,
);
assert_snapshot!(test.code_actions(&UNRESOLVED_REFERENCE), @r#"
info[code-action]: import warnings.deprecated
--> main.py:4:2
|
2 | import warnings
3 |
4 | @deprecated("do not use")
| ^^^^^^^^^^
5 | def my_func(): ...
|
help: This is a preferred code action
1 + from warnings import deprecated
2 |
3 | import warnings
4 |
info[code-action]: qualify warnings.deprecated
--> main.py:4:2
|
2 | import warnings
3 |
4 | @deprecated("do not use")
| ^^^^^^^^^^
5 | def my_func(): ...
|
help: This is a preferred code action
1 |
2 | import warnings
3 |
- @deprecated("do not use")
4 + @warnings.deprecated("do not use")
5 | def my_func(): ...
info[code-action]: Ignore 'unresolved-reference' for this line
--> main.py:4:2
|
2 | import warnings
3 |
4 | @deprecated("do not use")
| ^^^^^^^^^^
5 | def my_func(): ...
|
1 |
2 | import warnings
3 |
- @deprecated("do not use")
4 + @deprecated("do not use") # ty:ignore[unresolved-reference]
5 | def my_func(): ...
"#);
}
// using `importlib.abc.ExecutionLoader` when no imports are in scope
#[test]
fn unresolved_loader() {
let test = CodeActionTest::with_source(
r#"
<START>ExecutionLoader<END>
"#,
);
assert_snapshot!(test.code_actions(&UNRESOLVED_REFERENCE), @r"
info[code-action]: import importlib.abc.ExecutionLoader
--> main.py:2:1
|
2 | ExecutionLoader
| ^^^^^^^^^^^^^^^
|
help: This is a preferred code action
1 + from importlib.abc import ExecutionLoader
2 |
3 | ExecutionLoader
info[code-action]: Ignore 'unresolved-reference' for this line
--> main.py:2:1
|
2 | ExecutionLoader
| ^^^^^^^^^^^^^^^
|
1 |
- ExecutionLoader
2 + ExecutionLoader # ty:ignore[unresolved-reference]
");
}
// using `importlib.abc.ExecutionLoader` when `import importlib` is in scope
//
// TODO: `importlib.abc` is available whenever `importlib` is, so qualifying
// `importlib.abc.ExecutionLoader` without adding imports is actually legal here!
#[test]
fn unresolved_loader_importlib_imported() {
let test = CodeActionTest::with_source(
r#"
import importlib
<START>ExecutionLoader<END>
"#,
);
assert_snapshot!(test.code_actions(&UNRESOLVED_REFERENCE), @r"
info[code-action]: import importlib.abc.ExecutionLoader
--> main.py:3:1
|
2 | import importlib
3 | ExecutionLoader
| ^^^^^^^^^^^^^^^
|
help: This is a preferred code action
1 + from importlib.abc import ExecutionLoader
2 |
3 | import importlib
4 | ExecutionLoader
info[code-action]: Ignore 'unresolved-reference' for this line
--> main.py:3:1
|
2 | import importlib
3 | ExecutionLoader
| ^^^^^^^^^^^^^^^
|
1 |
2 | import importlib
- ExecutionLoader
3 + ExecutionLoader # ty:ignore[unresolved-reference]
");
}
// Using `importlib.abc.ExecutionLoader` when `import importlib.abc` is in scope
#[test]
fn unresolved_loader_abc_imported() {
let test = CodeActionTest::with_source(
r#"
import importlib.abc
<START>ExecutionLoader<END>
"#,
);
assert_snapshot!(test.code_actions(&UNRESOLVED_REFERENCE), @r"
info[code-action]: import importlib.abc.ExecutionLoader
--> main.py:3:1
|
2 | import importlib.abc
3 | ExecutionLoader
| ^^^^^^^^^^^^^^^
|
help: This is a preferred code action
1 + from importlib.abc import ExecutionLoader
2 |
3 | import importlib.abc
4 | ExecutionLoader
info[code-action]: qualify importlib.abc.ExecutionLoader
--> main.py:3:1
|
2 | import importlib.abc
3 | ExecutionLoader
| ^^^^^^^^^^^^^^^
|
help: This is a preferred code action
1 |
2 | import importlib.abc
- ExecutionLoader
3 + importlib.abc.ExecutionLoader
info[code-action]: Ignore 'unresolved-reference' for this line
--> main.py:3:1
|
2 | import importlib.abc
3 | ExecutionLoader
| ^^^^^^^^^^^^^^^
|
1 |
2 | import importlib.abc
- ExecutionLoader
3 + ExecutionLoader # ty:ignore[unresolved-reference]
");
}
pub(super) struct CodeActionTest {
pub(super) db: ty_project::TestDb,
pub(super) file: File,
pub(super) diagnostic_range: TextRange,
}
impl CodeActionTest {
pub(super) fn with_source(source: &str) -> Self {
let mut db = ty_project::TestDb::new(ProjectMetadata::new(
"test".into(),
SystemPathBuf::from("/"),
));
db.init_program().unwrap();
let mut cleansed = dedent(source).to_string();
let start = cleansed
.find("<START>")
.expect("source text should contain a `<START>` marker");
cleansed.replace_range(start..start + "<START>".len(), "");
let end = cleansed
.find("<END>")
.expect("source text should contain a `<END>` marker");
cleansed.replace_range(end..end + "<END>".len(), "");
assert!(start <= end, "<START> marker should be before <END> marker");
db.write_file("main.py", cleansed)
.expect("write to memory file system to be successful");
let file = system_path_to_file(&db, "main.py").expect("newly written file to existing");
Self {
db,
file,
diagnostic_range: TextRange::new(
TextSize::try_from(start).unwrap(),
TextSize::try_from(end).unwrap(),
),
}
}
pub(super) fn code_actions(&self, lint: &'static LintMetadata) -> String {
use std::fmt::Write;
let mut buf = String::new();
let config = DisplayDiagnosticConfig::default()
.color(false)
.show_fix_diff(true)
.format(DiagnosticFormat::Full);
for mut action in code_actions(&self.db, self.file, self.diagnostic_range, &lint.name) {
let mut diagnostic = Diagnostic::new(
DiagnosticId::Lint(LintName::of("code-action")),
ruff_db::diagnostic::Severity::Info,
action.title,
);
diagnostic.annotate(Annotation::primary(
Span::from(self.file).with_range(self.diagnostic_range),
));
if action.preferred {
diagnostic.sub(SubDiagnostic::new(
ruff_db::diagnostic::SubDiagnosticSeverity::Help,
"This is a preferred code action",
));
}
let first_edit = action.edits.remove(0);
diagnostic.set_fix(Fix::safe_edits(first_edit, action.edits));
write!(buf, "{}", diagnostic.display(&self.db, &config)).unwrap();
}
buf
}
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_python_stdlib/src/path.rs | crates/ruff_python_stdlib/src/path.rs | use std::ffi::OsStr;
use std::path::Path;
/// Return `true` if the [`Path`] is named `pyproject.toml`.
pub fn is_pyproject_toml(path: &Path) -> bool {
path.file_name()
.is_some_and(|name| name == "pyproject.toml")
}
/// Return `true` if a [`Path`] should use the name of its parent directory as its module name.
pub fn is_module_file(path: &Path) -> bool {
matches!(
path.file_name().and_then(OsStr::to_str),
Some("__init__.py" | "__init__.pyi" | "__main__.py" | "__main__.pyi")
)
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_python_stdlib/src/lib.rs | crates/ruff_python_stdlib/src/lib.rs | pub mod builtins;
pub mod identifiers;
pub mod keyword;
pub mod logging;
pub mod open_mode;
pub mod path;
pub mod str;
pub mod sys;
pub mod typing;
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_python_stdlib/src/identifiers.rs | crates/ruff_python_stdlib/src/identifiers.rs | use unicode_ident::{is_xid_continue, is_xid_start};
use crate::keyword::is_keyword;
/// Returns `true` if a string is a valid Python identifier (e.g., variable
/// name).
pub fn is_identifier(name: &str) -> bool {
// Is the first character a letter or underscore?
let mut chars = name.chars();
if !chars.next().is_some_and(is_identifier_start) {
return false;
}
// Are the rest of the characters letters, digits, or underscores?
if !chars.all(is_identifier_continuation) {
return false;
}
// Is the identifier a keyword?
if is_keyword(name) {
return false;
}
true
}
// Checks if the character c is a valid starting character as described
// in https://docs.python.org/3/reference/lexical_analysis.html#identifiers
fn is_identifier_start(c: char) -> bool {
matches!(c, 'a'..='z' | 'A'..='Z' | '_') || is_xid_start(c)
}
// Checks if the character c is a valid continuation character as described
// in https://docs.python.org/3/reference/lexical_analysis.html#identifiers
fn is_identifier_continuation(c: char) -> bool {
// Arrange things such that ASCII codepoints never
// result in the slower `is_xid_continue` getting called.
if c.is_ascii() {
matches!(c, 'a'..='z' | 'A'..='Z' | '_' | '0'..='9')
} else {
is_xid_continue(c)
}
}
/// Returns `true` if a string is a private identifier, such that, when the
/// identifier is defined in a class definition, it will be mangled prior to
/// code generation.
///
/// See: <https://docs.python.org/3.5/reference/expressions.html?highlight=mangling#index-5>.
pub fn is_mangled_private(id: &str) -> bool {
id.starts_with("__") && !id.ends_with("__")
}
/// Returns `true` if a string is a PEP 8-compliant module name (i.e., consists of lowercase
/// letters, numbers, underscores, and is not a keyword).
pub fn is_module_name(name: &str) -> bool {
// Is the first character a letter or underscore?
let mut chars = name.chars();
if !chars
.next()
.is_some_and(|c| c.is_ascii_lowercase() || c == '_')
{
return false;
}
// Are the rest of the characters letters, digits, or underscores?
if !chars.all(|c| c.is_ascii_lowercase() || c.is_ascii_digit() || c == '_') {
return false;
}
// Is the identifier a keyword?
if is_keyword(name) {
return false;
}
true
}
/// Returns `true` if a string appears to be a valid migration file name (e.g., `0001_initial.py`).
pub fn is_migration_name(name: &str) -> bool {
// Are characters letters, digits, or underscores?
if !name
.chars()
.all(|c| c.is_ascii_lowercase() || c.is_ascii_digit() || c == '_')
{
return false;
}
// Is the identifier a keyword?
if is_keyword(name) {
return false;
}
true
}
#[cfg(test)]
mod tests {
use crate::identifiers::{is_identifier, is_migration_name, is_module_name};
#[test]
fn valid_identifiers() {
assert!(is_identifier("_abc"));
assert!(is_identifier("abc"));
assert!(is_identifier("_"));
assert!(is_identifier("a_b_c"));
assert!(is_identifier("abc123"));
assert!(is_identifier("abc_123"));
assert!(is_identifier("漢字"));
assert!(is_identifier("ひらがな"));
assert!(is_identifier("العربية"));
assert!(is_identifier("кириллица"));
assert!(is_identifier("πr"));
assert!(!is_identifier(""));
assert!(!is_identifier("percentile_co³t"));
assert!(!is_identifier("HelloWorld❤️"));
}
#[test]
fn module_name() {
assert!(is_module_name("_abc"));
assert!(is_module_name("a"));
assert!(is_module_name("a_b_c"));
assert!(is_module_name("abc"));
assert!(is_module_name("abc0"));
assert!(is_module_name("abc_"));
assert!(!is_module_name("0001_initial"));
assert!(!is_module_name("0abc"));
assert!(!is_module_name("a-b-c"));
assert!(!is_module_name("a_B_c"));
assert!(!is_module_name("class"));
assert!(!is_module_name("δ"));
}
#[test]
fn migration_name() {
assert!(is_migration_name("0001_initial"));
assert!(is_migration_name("0abc"));
assert!(is_migration_name("_abc"));
assert!(is_migration_name("a"));
assert!(is_migration_name("a_b_c"));
assert!(is_migration_name("abc"));
assert!(is_migration_name("abc0"));
assert!(is_migration_name("abc_"));
assert!(!is_migration_name("a-b-c"));
assert!(!is_migration_name("a_B_c"));
assert!(!is_migration_name("class"));
assert!(!is_migration_name("δ"));
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_python_stdlib/src/keyword.rs | crates/ruff_python_stdlib/src/keyword.rs | // See: https://github.com/python/cpython/blob/9d692841691590c25e6cf5b2250a594d3bf54825/Lib/keyword.py#L18
pub fn is_keyword(name: &str) -> bool {
matches!(
name,
"False"
| "None"
| "True"
| "and"
| "as"
| "assert"
| "async"
| "await"
| "break"
| "class"
| "continue"
| "def"
| "del"
| "elif"
| "else"
| "except"
| "finally"
| "for"
| "from"
| "global"
| "if"
| "import"
| "in"
| "is"
| "lambda"
| "nonlocal"
| "not"
| "or"
| "pass"
| "raise"
| "return"
| "try"
| "while"
| "with"
| "yield",
)
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_python_stdlib/src/builtins.rs | crates/ruff_python_stdlib/src/builtins.rs | /// A list of all builtins that are available in IPython.
///
/// How to create this list:
/// ```python
/// import json
/// from subprocess import check_output
///
/// builtins_python = json.loads(check_output(["python3", "-c" "import json; print(json.dumps(dir(__builtins__)))"]))
/// builtins_ipython = json.loads(check_output(["ipython3", "-c" "import json; print(json.dumps(dir(__builtins__)))"]))
/// print(sorted(set(builtins_ipython) - set(builtins_python)))
/// ```
///
/// Intended to be kept in sync with [`is_ipython_builtin`].
const IPYTHON_BUILTINS: &[&str] = &["__IPYTHON__", "display", "get_ipython"];
/// Globally defined names which are not attributes of the builtins module, or
/// are only present on some platforms.
pub const MAGIC_GLOBALS: &[&str] = &[
"WindowsError",
"__annotations__",
"__builtins__",
"__cached__",
"__warningregistry__",
"__file__",
];
/// Magic globals that are only available starting in specific Python versions.
///
/// `__annotate__` was introduced in Python 3.14.
static PY314_PLUS_MAGIC_GLOBALS: &[&str] = &["__annotate__"];
static ALWAYS_AVAILABLE_BUILTINS: &[&str] = &[
"ArithmeticError",
"AssertionError",
"AttributeError",
"BaseException",
"BlockingIOError",
"BrokenPipeError",
"BufferError",
"BytesWarning",
"ChildProcessError",
"ConnectionAbortedError",
"ConnectionError",
"ConnectionRefusedError",
"ConnectionResetError",
"DeprecationWarning",
"EOFError",
"Ellipsis",
"EnvironmentError",
"Exception",
"False",
"FileExistsError",
"FileNotFoundError",
"FloatingPointError",
"FutureWarning",
"GeneratorExit",
"IOError",
"ImportError",
"ImportWarning",
"IndentationError",
"IndexError",
"InterruptedError",
"IsADirectoryError",
"KeyError",
"KeyboardInterrupt",
"LookupError",
"MemoryError",
"ModuleNotFoundError",
"NameError",
"None",
"NotADirectoryError",
"NotImplemented",
"NotImplementedError",
"OSError",
"OverflowError",
"PendingDeprecationWarning",
"PermissionError",
"ProcessLookupError",
"RecursionError",
"ReferenceError",
"ResourceWarning",
"RuntimeError",
"RuntimeWarning",
"StopAsyncIteration",
"StopIteration",
"SyntaxError",
"SyntaxWarning",
"SystemError",
"SystemExit",
"TabError",
"TimeoutError",
"True",
"TypeError",
"UnboundLocalError",
"UnicodeDecodeError",
"UnicodeEncodeError",
"UnicodeError",
"UnicodeTranslateError",
"UnicodeWarning",
"UserWarning",
"ValueError",
"Warning",
"ZeroDivisionError",
"__build_class__",
"__debug__",
"__doc__",
"__import__",
"__loader__",
"__name__",
"__package__",
"__spec__",
"abs",
"all",
"any",
"ascii",
"bin",
"bool",
"breakpoint",
"bytearray",
"bytes",
"callable",
"chr",
"classmethod",
"compile",
"complex",
"copyright",
"credits",
"delattr",
"dict",
"dir",
"divmod",
"enumerate",
"eval",
"exec",
"exit",
"filter",
"float",
"format",
"frozenset",
"getattr",
"globals",
"hasattr",
"hash",
"help",
"hex",
"id",
"input",
"int",
"isinstance",
"issubclass",
"iter",
"len",
"license",
"list",
"locals",
"map",
"max",
"memoryview",
"min",
"next",
"object",
"oct",
"open",
"ord",
"pow",
"print",
"property",
"quit",
"range",
"repr",
"reversed",
"round",
"set",
"setattr",
"slice",
"sorted",
"staticmethod",
"str",
"sum",
"super",
"tuple",
"type",
"vars",
"zip",
];
static PY310_PLUS_BUILTINS: &[&str] = &["EncodingWarning", "aiter", "anext"];
static PY311_PLUS_BUILTINS: &[&str] = &["BaseExceptionGroup", "ExceptionGroup"];
static PY313_PLUS_BUILTINS: &[&str] = &["PythonFinalizationError"];
/// Return the list of builtins for the given Python minor version.
///
/// Intended to be kept in sync with [`is_python_builtin`].
pub fn python_builtins(minor_version: u8, is_notebook: bool) -> impl Iterator<Item = &'static str> {
let py310_builtins = if minor_version >= 10 {
Some(PY310_PLUS_BUILTINS)
} else {
None
};
let py311_builtins = if minor_version >= 11 {
Some(PY311_PLUS_BUILTINS)
} else {
None
};
let py313_builtins = if minor_version >= 13 {
Some(PY313_PLUS_BUILTINS)
} else {
None
};
let ipython_builtins = if is_notebook {
Some(IPYTHON_BUILTINS)
} else {
None
};
py310_builtins
.into_iter()
.chain(py311_builtins)
.chain(py313_builtins)
.chain(ipython_builtins)
.flatten()
.chain(ALWAYS_AVAILABLE_BUILTINS)
.copied()
}
/// Return the list of magic globals for the given Python minor version.
pub fn python_magic_globals(minor_version: u8) -> impl Iterator<Item = &'static str> {
let py314_magic_globals = if minor_version >= 14 {
Some(PY314_PLUS_MAGIC_GLOBALS)
} else {
None
};
py314_magic_globals
.into_iter()
.flatten()
.chain(MAGIC_GLOBALS)
.copied()
}
/// Returns `true` if the given name is that of a Python builtin.
///
/// Intended to be kept in sync with [`python_builtins`].
pub fn is_python_builtin(name: &str, minor_version: u8, is_notebook: bool) -> bool {
if is_notebook && is_ipython_builtin(name) {
return true;
}
matches!(
(minor_version, name),
(
_,
"ArithmeticError"
| "AssertionError"
| "AttributeError"
| "BaseException"
| "BlockingIOError"
| "BrokenPipeError"
| "BufferError"
| "BytesWarning"
| "ChildProcessError"
| "ConnectionAbortedError"
| "ConnectionError"
| "ConnectionRefusedError"
| "ConnectionResetError"
| "DeprecationWarning"
| "EOFError"
| "Ellipsis"
| "EnvironmentError"
| "Exception"
| "False"
| "FileExistsError"
| "FileNotFoundError"
| "FloatingPointError"
| "FutureWarning"
| "GeneratorExit"
| "IOError"
| "ImportError"
| "ImportWarning"
| "IndentationError"
| "IndexError"
| "InterruptedError"
| "IsADirectoryError"
| "KeyError"
| "KeyboardInterrupt"
| "LookupError"
| "MemoryError"
| "ModuleNotFoundError"
| "NameError"
| "None"
| "NotADirectoryError"
| "NotImplemented"
| "NotImplementedError"
| "OSError"
| "OverflowError"
| "PendingDeprecationWarning"
| "PermissionError"
| "ProcessLookupError"
| "RecursionError"
| "ReferenceError"
| "ResourceWarning"
| "RuntimeError"
| "RuntimeWarning"
| "StopAsyncIteration"
| "StopIteration"
| "SyntaxError"
| "SyntaxWarning"
| "SystemError"
| "SystemExit"
| "TabError"
| "TimeoutError"
| "True"
| "TypeError"
| "UnboundLocalError"
| "UnicodeDecodeError"
| "UnicodeEncodeError"
| "UnicodeError"
| "UnicodeTranslateError"
| "UnicodeWarning"
| "UserWarning"
| "ValueError"
| "Warning"
| "ZeroDivisionError"
| "__build_class__"
| "__debug__"
| "__doc__"
| "__import__"
| "__loader__"
| "__name__"
| "__package__"
| "__spec__"
| "abs"
| "all"
| "any"
| "ascii"
| "bin"
| "bool"
| "breakpoint"
| "bytearray"
| "bytes"
| "callable"
| "chr"
| "classmethod"
| "compile"
| "complex"
| "copyright"
| "credits"
| "delattr"
| "dict"
| "dir"
| "divmod"
| "enumerate"
| "eval"
| "exec"
| "exit"
| "filter"
| "float"
| "format"
| "frozenset"
| "getattr"
| "globals"
| "hasattr"
| "hash"
| "help"
| "hex"
| "id"
| "input"
| "int"
| "isinstance"
| "issubclass"
| "iter"
| "len"
| "license"
| "list"
| "locals"
| "map"
| "max"
| "memoryview"
| "min"
| "next"
| "object"
| "oct"
| "open"
| "ord"
| "pow"
| "print"
| "property"
| "quit"
| "range"
| "repr"
| "reversed"
| "round"
| "set"
| "setattr"
| "slice"
| "sorted"
| "staticmethod"
| "str"
| "sum"
| "super"
| "tuple"
| "type"
| "vars"
| "zip"
) | (10.., "EncodingWarning" | "aiter" | "anext")
| (11.., "BaseExceptionGroup" | "ExceptionGroup")
| (13.., "PythonFinalizationError")
)
}
/// Return `Some(version)`, where `version` corresponds to the Python minor version
/// in which the builtin was added
pub fn version_builtin_was_added(name: &str) -> Option<u8> {
if PY310_PLUS_BUILTINS.contains(&name) {
Some(10)
} else if PY311_PLUS_BUILTINS.contains(&name) {
Some(11)
} else if PY313_PLUS_BUILTINS.contains(&name) {
Some(13)
} else if ALWAYS_AVAILABLE_BUILTINS.contains(&name) {
Some(0)
} else {
None
}
}
/// Returns `true` if the given name is that of a Python builtin iterator.
pub fn is_iterator(name: &str) -> bool {
matches!(
name,
"enumerate" | "filter" | "map" | "reversed" | "zip" | "iter"
)
}
/// Returns `true` if the given name is that of an IPython builtin.
///
/// Intended to be kept in sync with [`IPYTHON_BUILTINS`].
fn is_ipython_builtin(name: &str) -> bool {
// Constructed by converting the `IPYTHON_BUILTINS` slice to a `match` expression.
matches!(name, "__IPYTHON__" | "display" | "get_ipython")
}
/// Returns `true` if the given name is that of a builtin exception.
///
/// See: <https://docs.python.org/3/library/exceptions.html#exception-hierarchy>
pub fn is_exception(name: &str, minor_version: u8) -> bool {
matches!(
(minor_version, name),
(
_,
"BaseException"
| "GeneratorExit"
| "KeyboardInterrupt"
| "SystemExit"
| "Exception"
| "ArithmeticError"
| "FloatingPointError"
| "OverflowError"
| "ZeroDivisionError"
| "AssertionError"
| "AttributeError"
| "BufferError"
| "EOFError"
| "ImportError"
| "ModuleNotFoundError"
| "LookupError"
| "IndexError"
| "KeyError"
| "MemoryError"
| "NameError"
| "UnboundLocalError"
| "OSError"
| "BlockingIOError"
| "ChildProcessError"
| "ConnectionError"
| "BrokenPipeError"
| "ConnectionAbortedError"
| "ConnectionRefusedError"
| "ConnectionResetError"
| "FileExistsError"
| "FileNotFoundError"
| "InterruptedError"
| "IsADirectoryError"
| "NotADirectoryError"
| "PermissionError"
| "ProcessLookupError"
| "TimeoutError"
| "ReferenceError"
| "RuntimeError"
| "NotImplementedError"
| "RecursionError"
| "StopAsyncIteration"
| "StopIteration"
| "SyntaxError"
| "IndentationError"
| "TabError"
| "SystemError"
| "TypeError"
| "ValueError"
| "UnicodeError"
| "UnicodeDecodeError"
| "UnicodeEncodeError"
| "UnicodeTranslateError"
| "Warning"
| "BytesWarning"
| "DeprecationWarning"
| "FutureWarning"
| "ImportWarning"
| "PendingDeprecationWarning"
| "ResourceWarning"
| "RuntimeWarning"
| "SyntaxWarning"
| "UnicodeWarning"
| "UserWarning"
) | (10.., "EncodingWarning")
| (11.., "BaseExceptionGroup" | "ExceptionGroup")
| (13.., "PythonFinalizationError")
)
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_python_stdlib/src/open_mode.rs | crates/ruff_python_stdlib/src/open_mode.rs | bitflags::bitflags! {
#[derive(Copy, Clone, Debug, PartialEq, Eq)]
pub struct OpenMode: u8 {
/// `r`
const READ = 1 << 0;
/// `w`
const WRITE = 1 << 1;
/// `a`
const APPEND = 1 << 2;
/// `x`
const CREATE = 1 << 3;
/// `b`
const BINARY = 1 << 4;
/// `t`
const TEXT = 1 << 5;
/// `+`
const PLUS = 1 << 6;
/// `U`
const UNIVERSAL_NEWLINES = 1 << 7;
}
}
impl OpenMode {
/// Parse an [`OpenMode`] from a sequence of characters.
pub fn from_chars(chars: impl Iterator<Item = char>) -> Result<Self, String> {
let mut open_mode = OpenMode::empty();
for c in chars {
let flag = OpenMode::try_from(c)?;
if flag.intersects(open_mode) {
return Err(format!("Open mode contains duplicate flag: `{c}`"));
}
open_mode.insert(flag);
}
// Both text and binary mode cannot be set at the same time.
if open_mode.contains(OpenMode::TEXT | OpenMode::BINARY) {
return Err(
"Open mode cannot contain both text (`t`) and binary (`b`) flags".to_string(),
);
}
// The `U` mode is only valid with `r`.
if open_mode.contains(OpenMode::UNIVERSAL_NEWLINES)
&& open_mode.intersects(OpenMode::WRITE | OpenMode::APPEND | OpenMode::CREATE)
{
return Err("Open mode cannot contain the universal newlines (`U`) flag with write (`w`), append (`a`), or create (`x`) flags".to_string());
}
// Otherwise, reading, writing, creating, and appending are mutually exclusive.
if [
OpenMode::READ | OpenMode::UNIVERSAL_NEWLINES,
OpenMode::WRITE,
OpenMode::CREATE,
OpenMode::APPEND,
]
.into_iter()
.filter(|flag| open_mode.intersects(*flag))
.count()
!= 1
{
return Err("Open mode must contain exactly one of the following flags: read (`r`), write (`w`), create (`x`), or append (`a`)".to_string());
}
Ok(open_mode)
}
/// Remove any redundant flags from the open mode.
#[must_use]
pub fn reduce(self) -> Self {
let mut open_mode = self;
// `t` is always redundant.
open_mode.remove(Self::TEXT);
// `U` is always redundant.
open_mode.remove(Self::UNIVERSAL_NEWLINES);
// `r` is redundant, unless `b` or `+` is also set, in which case, we need one of `w`, `a`, `r`, or `x`.
if open_mode.intersects(Self::BINARY | Self::PLUS) {
if !open_mode.intersects(Self::WRITE | Self::CREATE | Self::APPEND) {
open_mode.insert(Self::READ);
}
} else {
open_mode.remove(Self::READ);
}
open_mode
}
}
/// Write the [`OpenMode`] as a string.
impl std::fmt::Display for OpenMode {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
if self.contains(OpenMode::READ) {
write!(f, "r")?;
}
if self.contains(OpenMode::WRITE) {
write!(f, "w")?;
}
if self.contains(OpenMode::APPEND) {
write!(f, "a")?;
}
if self.contains(OpenMode::CREATE) {
write!(f, "x")?;
}
if self.contains(OpenMode::UNIVERSAL_NEWLINES) {
write!(f, "U")?;
}
if self.contains(OpenMode::BINARY) {
write!(f, "b")?;
}
if self.contains(OpenMode::TEXT) {
write!(f, "t")?;
}
if self.contains(OpenMode::PLUS) {
write!(f, "+")?;
}
Ok(())
}
}
impl TryFrom<char> for OpenMode {
type Error = String;
fn try_from(value: char) -> Result<Self, Self::Error> {
match value {
'r' => Ok(Self::READ),
'w' => Ok(Self::WRITE),
'a' => Ok(Self::APPEND),
'x' => Ok(Self::CREATE),
'b' => Ok(Self::BINARY),
't' => Ok(Self::TEXT),
'+' => Ok(Self::PLUS),
'U' => Ok(Self::UNIVERSAL_NEWLINES),
_ => Err(format!("Invalid open mode flag: `{value}`")),
}
}
}
impl TryFrom<&str> for OpenMode {
type Error = String;
fn try_from(value: &str) -> Result<Self, Self::Error> {
OpenMode::from_chars(value.chars())
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_python_stdlib/src/typing.rs | crates/ruff_python_stdlib/src/typing.rs | /// Returns `true` if a call path is a generic from the Python standard library (e.g. `list`, which
/// can be used as `list[int]`).
///
/// See: <https://docs.python.org/3/library/typing.html>
pub fn is_standard_library_generic(qualified_name: &[&str]) -> bool {
matches!(
qualified_name,
[
"" | "builtins",
"dict" | "frozenset" | "list" | "set" | "tuple" | "type"
] | [
"collections" | "typing" | "typing_extensions",
"ChainMap" | "Counter"
] | ["collections" | "typing", "OrderedDict"]
| ["collections", "defaultdict" | "deque"]
| [
"collections",
"abc",
"AsyncGenerator"
| "AsyncIterable"
| "AsyncIterator"
| "Awaitable"
| "ByteString"
| "Callable"
| "Collection"
| "Container"
| "Coroutine"
| "Generator"
| "ItemsView"
| "Iterable"
| "Iterator"
| "KeysView"
| "Mapping"
| "MappingView"
| "MutableMapping"
| "MutableSequence"
| "MutableSet"
| "Reversible"
| "Sequence"
| "Set"
| "ValuesView"
]
| [
"contextlib",
"AbstractAsyncContextManager" | "AbstractContextManager"
]
| ["re" | "typing", "Match" | "Pattern"]
| [
"typing",
"AbstractSet"
| "AsyncContextManager"
| "AsyncGenerator"
| "AsyncIterator"
| "Awaitable"
| "BinaryIO"
| "ByteString"
| "Callable"
| "ClassVar"
| "Collection"
| "Concatenate"
| "Container"
| "ContextManager"
| "Coroutine"
| "DefaultDict"
| "Deque"
| "Dict"
| "Final"
| "FrozenSet"
| "Generator"
| "Generic"
| "IO"
| "ItemsView"
| "Iterable"
| "Iterator"
| "KeysView"
| "List"
| "Mapping"
| "MutableMapping"
| "MutableSequence"
| "MutableSet"
| "Optional"
| "Reversible"
| "Sequence"
| "Set"
| "TextIO"
| "Tuple"
| "Type"
| "TypeGuard"
| "Union"
| "Unpack"
| "ValuesView"
]
| ["typing", "io", "BinaryIO" | "IO" | "TextIO"]
| ["typing", "re", "Match" | "Pattern"]
| [
"typing_extensions",
"AsyncContextManager"
| "AsyncGenerator"
| "AsyncIterable"
| "AsyncIterator"
| "Awaitable"
| "ClassVar"
| "Concatenate"
| "ContextManager"
| "Coroutine"
| "DefaultDict"
| "Deque"
| "Type"
]
| [
"weakref",
"WeakKeyDictionary" | "WeakSet" | "WeakValueDictionary"
]
)
}
/// Returns `true` if a call path is a [PEP 593] generic (e.g. `Annotated`).
///
/// See: <https://docs.python.org/3/library/typing.html>
///
/// [PEP 593]: https://peps.python.org/pep-0593/
pub fn is_pep_593_generic_type(qualified_name: &[&str]) -> bool {
matches!(
qualified_name,
["typing" | "typing_extensions", "Annotated"]
)
}
pub fn is_typed_dict(qualified_name: &[&str]) -> bool {
matches!(
qualified_name,
["typing" | "typing_extensions", "TypedDict"]
)
}
/// Returns `true` if a call path is `Literal`.
pub fn is_standard_library_literal(qualified_name: &[&str]) -> bool {
matches!(qualified_name, ["typing" | "typing_extensions", "Literal"])
}
/// Returns `true` if a name matches that of a generic from the Python standard library (e.g.
/// `list` or `Set`).
///
/// See: <https://docs.python.org/3/library/typing.html>
pub fn is_standard_library_generic_member(member: &str) -> bool {
// Constructed by taking every pattern from `is_standard_library_generic`, removing all but
// the last element in each pattern, and de-duplicating the values.
matches!(
member,
"dict"
| "AbstractAsyncContextManager"
| "AbstractContextManager"
| "AbstractSet"
| "AsyncContextManager"
| "AsyncGenerator"
| "AsyncIterable"
| "AsyncIterator"
| "Awaitable"
| "BinaryIO"
| "ByteString"
| "Callable"
| "ChainMap"
| "ClassVar"
| "Collection"
| "Concatenate"
| "Container"
| "ContextManager"
| "Coroutine"
| "Counter"
| "DefaultDict"
| "Deque"
| "Dict"
| "Final"
| "FrozenSet"
| "Generator"
| "Generic"
| "IO"
| "ItemsView"
| "Iterable"
| "Iterator"
| "KeysView"
| "List"
| "Mapping"
| "MappingView"
| "Match"
| "MutableMapping"
| "MutableSequence"
| "MutableSet"
| "Optional"
| "OrderedDict"
| "Pattern"
| "Reversible"
| "Sequence"
| "Set"
| "TextIO"
| "Tuple"
| "Type"
| "TypeGuard"
| "Union"
| "Unpack"
| "ValuesView"
| "WeakKeyDictionary"
| "WeakSet"
| "WeakValueDictionary"
| "defaultdict"
| "deque"
| "frozenset"
| "list"
| "set"
| "tuple"
| "type"
)
}
/// Returns `true` if a name matches that of a generic from [PEP 593] (e.g. `Annotated`).
///
/// See: <https://docs.python.org/3/library/typing.html>
///
/// [PEP 593]: https://peps.python.org/pep-0593/
pub fn is_pep_593_generic_member(member: &str) -> bool {
// Constructed by taking every pattern from `is_pep_593_generic`, removing all but
// the last element in each pattern, and de-duplicating the values.
matches!(member, "Annotated")
}
/// Returns `true` if a name matches that of `TypedDict`.
///
/// See: <https://docs.python.org/3/library/typing.html>
pub fn is_typed_dict_member(member: &str) -> bool {
// Constructed by taking every pattern from `is_pep_593_generic`, removing all but
// the last element in each pattern, and de-duplicating the values.
matches!(member, "TypedDict")
}
/// Returns `true` if a name matches that of the `Literal` generic.
pub fn is_literal_member(member: &str) -> bool {
matches!(member, "Literal")
}
/// Returns `true` if a call path represents that of an immutable, non-generic type from the Python
/// standard library (e.g. `int` or `str`).
pub fn is_immutable_non_generic_type(qualified_name: &[&str]) -> bool {
matches!(
qualified_name,
["collections", "abc", "Sized"]
| ["typing", "LiteralString" | "Sized"]
| [
"",
"bool"
| "bytes"
| "complex"
| "float"
| "frozenset"
| "int"
| "object"
| "range"
| "str"
]
)
}
/// Returns `true` if a call path represents that of an immutable, generic type from the Python
/// standard library (e.g. `tuple`).
pub fn is_immutable_generic_type(qualified_name: &[&str]) -> bool {
matches!(
qualified_name,
["" | "builtins", "tuple"]
| [
"collections",
"abc",
"ByteString"
| "Collection"
| "Container"
| "Iterable"
| "Mapping"
| "Reversible"
| "Sequence"
| "Set"
]
| [
"typing",
"AbstractSet"
| "ByteString"
| "Callable"
| "Collection"
| "Container"
| "FrozenSet"
| "Iterable"
| "Literal"
| "Mapping"
| "Never"
| "NoReturn"
| "Reversible"
| "Sequence"
| "Tuple"
]
)
}
/// Returns `true` if a call path represents a function from the Python standard library that
/// returns a mutable value (e.g., `dict`).
pub fn is_mutable_return_type(qualified_name: &[&str]) -> bool {
matches!(
qualified_name,
["" | "builtins", "dict" | "list" | "set"]
| [
"collections",
"Counter" | "OrderedDict" | "defaultdict" | "deque"
]
)
}
/// Returns `true` if a call path represents a function from the Python standard library that
/// returns a immutable value (e.g., `bool`).
pub fn is_immutable_return_type(qualified_name: &[&str]) -> bool {
matches!(
qualified_name,
[
"datetime",
"date" | "datetime" | "time" | "timedelta" | "timezone" | "tzinfo"
] | ["decimal", "Decimal"]
| ["fractions", "Fraction"]
| ["operator", "attrgetter" | "itemgetter" | "methodcaller"]
| ["pathlib", "Path"]
| ["types", "MappingProxyType"]
| ["re", "compile"]
| [
"",
"bool"
| "bytes"
| "complex"
| "float"
| "frozenset"
| "int"
| "str"
| "tuple"
| "slice"
]
)
}
type ModuleMember = (&'static str, &'static str);
/// Given a typing member, returns the module and member name for a generic from the Python standard
/// library (e.g., `list` for `typing.List`), if such a generic was introduced by [PEP 585].
///
/// [PEP 585]: https://peps.python.org/pep-0585/
pub fn as_pep_585_generic(module: &str, member: &str) -> Option<ModuleMember> {
match (module, member) {
("typing", "Dict") => Some(("", "dict")),
("typing", "FrozenSet") => Some(("", "frozenset")),
("typing", "List") => Some(("", "list")),
("typing", "Set") => Some(("", "set")),
("typing", "Tuple") => Some(("", "tuple")),
("typing", "Type") => Some(("", "type")),
("typing_extensions", "Type") => Some(("", "type")),
("typing", "Deque") => Some(("collections", "deque")),
("typing_extensions", "Deque") => Some(("collections", "deque")),
("typing", "DefaultDict") => Some(("collections", "defaultdict")),
("typing_extensions", "DefaultDict") => Some(("collections", "defaultdict")),
_ => None,
}
}
/// Returns the expected return type for a magic method.
///
/// See: <https://github.com/JelleZijlstra/autotyping/blob/0adba5ba0eee33c1de4ad9d0c79acfd737321dd9/autotyping/autotyping.py#L69-L91>
pub fn simple_magic_return_type(method: &str) -> Option<&'static str> {
match method {
"__str__" | "__repr__" | "__format__" => Some("str"),
"__bytes__" => Some("bytes"),
"__len__" | "__length_hint__" | "__int__" | "__index__" => Some("int"),
"__float__" => Some("float"),
"__complex__" => Some("complex"),
"__bool__" | "__contains__" | "__instancecheck__" | "__subclasscheck__" => Some("bool"),
"__init__" | "__del__" | "__setattr__" | "__delattr__" | "__setitem__" | "__delitem__"
| "__set__" => Some("None"),
_ => None,
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_python_stdlib/src/logging.rs | crates/ruff_python_stdlib/src/logging.rs | #[derive(Debug, Copy, Clone)]
pub enum LoggingLevel {
Debug,
Critical,
Error,
Exception,
Info,
Warn,
Warning,
}
impl LoggingLevel {
pub fn from_attribute(level: &str) -> Option<Self> {
match level {
"debug" => Some(LoggingLevel::Debug),
"critical" => Some(LoggingLevel::Critical),
"error" => Some(LoggingLevel::Error),
"exception" => Some(LoggingLevel::Exception),
"info" => Some(LoggingLevel::Info),
"warn" => Some(LoggingLevel::Warn),
"warning" => Some(LoggingLevel::Warning),
_ => None,
}
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_python_stdlib/src/str.rs | crates/ruff_python_stdlib/src/str.rs | /// Return `true` if a string is lowercase.
///
/// A string is lowercase if all alphabetic characters in the string are lowercase.
///
/// ## Examples
///
/// ```rust
/// use ruff_python_stdlib::str::is_lowercase;
///
/// assert!(is_lowercase("abc"));
/// assert!(is_lowercase("a_b_c"));
/// assert!(is_lowercase("a2c"));
/// assert!(!is_lowercase("aBc"));
/// assert!(!is_lowercase("ABC"));
/// assert!(is_lowercase(""));
/// assert!(is_lowercase("_"));
/// assert!(is_lowercase("αbc"));
/// assert!(!is_lowercase("αBC"));
/// assert!(!is_lowercase("Ωbc"));
/// ```
pub fn is_lowercase(s: &str) -> bool {
for (i, &c) in s.as_bytes().iter().enumerate() {
match c {
// Match against ASCII uppercase characters.
b'A'..=b'Z' => return false,
_ if c.is_ascii() => {}
// If the character is non-ASCII, fallback to slow path.
_ => {
return s[i..]
.chars()
.all(|c| c.is_lowercase() || !c.is_alphabetic());
}
}
}
true
}
/// Return `true` if a string is uppercase.
///
/// A string is uppercase if all alphabetic characters in the string are uppercase.
///
/// ## Examples
///
/// ```rust
/// use ruff_python_stdlib::str::is_uppercase;
///
/// assert!(is_uppercase("ABC"));
/// assert!(is_uppercase("A_B_C"));
/// assert!(is_uppercase("A2C"));
/// assert!(!is_uppercase("aBc"));
/// assert!(!is_uppercase("abc"));
/// assert!(is_uppercase(""));
/// assert!(is_uppercase("_"));
/// assert!(is_uppercase("ΩBC"));
/// assert!(!is_uppercase("Ωbc"));
/// assert!(!is_uppercase("αBC"));
/// ```
pub fn is_uppercase(s: &str) -> bool {
for (i, &c) in s.as_bytes().iter().enumerate() {
match c {
// Match against ASCII lowercase characters.
b'a'..=b'z' => return false,
_ if c.is_ascii() => {}
// If the character is non-ASCII, fallback to slow path.
_ => {
return s[i..]
.chars()
.all(|c| c.is_uppercase() || !c.is_alphabetic());
}
}
}
true
}
/// Return `true` if a string is _cased_ as lowercase.
///
/// A string is cased as lowercase if it contains at least one lowercase character and no uppercase
/// characters.
///
/// This differs from `str::is_lowercase` in that it returns `false` for empty strings and strings
/// that contain only underscores or other non-alphabetic characters.
///
/// ## Examples
///
/// ```rust
/// use ruff_python_stdlib::str::is_cased_lowercase;
///
/// assert!(is_cased_lowercase("abc"));
/// assert!(is_cased_lowercase("a_b_c"));
/// assert!(is_cased_lowercase("a2c"));
/// assert!(!is_cased_lowercase("aBc"));
/// assert!(!is_cased_lowercase("ABC"));
/// assert!(!is_cased_lowercase(""));
/// assert!(!is_cased_lowercase("_"));
/// ```
pub fn is_cased_lowercase(s: &str) -> bool {
let mut cased = false;
for c in s.chars() {
if c.is_uppercase() {
return false;
} else if !cased && c.is_lowercase() {
cased = true;
}
}
cased
}
/// Return `true` if a string is _cased_ as uppercase.
///
/// A string is cased as uppercase if it contains at least one uppercase character and no lowercase
/// characters.
///
/// This differs from `str::is_uppercase` in that it returns `false` for empty strings and strings
/// that contain only underscores or other non-alphabetic characters.
///
/// ## Examples
///
/// ```rust
/// use ruff_python_stdlib::str::is_cased_uppercase;
///
/// assert!(is_cased_uppercase("ABC"));
/// assert!(is_cased_uppercase("A_B_C"));
/// assert!(is_cased_uppercase("A2C"));
/// assert!(!is_cased_uppercase("aBc"));
/// assert!(!is_cased_uppercase("abc"));
/// assert!(!is_cased_uppercase(""));
/// assert!(!is_cased_uppercase("_"));
/// ```
pub fn is_cased_uppercase(s: &str) -> bool {
let mut cased = false;
for c in s.chars() {
if c.is_lowercase() {
return false;
} else if !cased && c.is_uppercase() {
cased = true;
}
}
cased
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_python_stdlib/src/sys/builtin_modules.rs | crates/ruff_python_stdlib/src/sys/builtin_modules.rs | //! This file is generated by `scripts/generate_builtin_modules.py`
/// Return `true` if `module` is a [builtin module] on the given
/// Python 3 version.
///
/// "Builtin modules" are modules that are compiled directly into the
/// Python interpreter. These can never be shadowed by first-party
/// modules; the normal rules of module resolution do not apply to these
/// modules.
///
/// [builtin module]: https://docs.python.org/3/library/sys.html#sys.builtin_module_names
#[expect(clippy::unnested_or_patterns)]
pub fn is_builtin_module(minor_version: u8, module: &str) -> bool {
matches!(
(minor_version, module),
(
_,
"_abc"
| "_ast"
| "_codecs"
| "_collections"
| "_functools"
| "_imp"
| "_io"
| "_locale"
| "_operator"
| "_signal"
| "_sre"
| "_stat"
| "_string"
| "_symtable"
| "_thread"
| "_tracemalloc"
| "_warnings"
| "_weakref"
| "atexit"
| "builtins"
| "errno"
| "faulthandler"
| "gc"
| "itertools"
| "marshal"
| "posix"
| "pwd"
| "sys"
| "time"
) | (7, "xxsubtype" | "zipimport")
| (8, "xxsubtype")
| (9, "_peg_parser" | "xxsubtype")
| (10, "xxsubtype")
| (11, "_tokenize" | "xxsubtype")
| (12, "_tokenize" | "_typing")
| (13, "_suggestions" | "_sysconfig" | "_tokenize" | "_typing")
)
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_python_stdlib/src/sys/mod.rs | crates/ruff_python_stdlib/src/sys/mod.rs | mod builtin_modules;
mod known_stdlib;
pub use builtin_modules::is_builtin_module;
pub use known_stdlib::is_known_standard_library;
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_python_stdlib/src/sys/known_stdlib.rs | crates/ruff_python_stdlib/src/sys/known_stdlib.rs | //! This file is generated by `scripts/generate_known_standard_library.py`
pub fn is_known_standard_library(minor_version: u8, module: &str) -> bool {
matches!(
(minor_version, module),
(
_,
"__hello__"
| "__phello__"
| "_abc"
| "_ast"
| "_asyncio"
| "_bisect"
| "_blake2"
| "_bz2"
| "_codecs"
| "_codecs_cn"
| "_codecs_hk"
| "_codecs_iso2022"
| "_codecs_jp"
| "_codecs_kr"
| "_codecs_tw"
| "_collections"
| "_collections_abc"
| "_compat_pickle"
| "_contextvars"
| "_csv"
| "_ctypes"
| "_ctypes_test"
| "_curses"
| "_curses_panel"
| "_datetime"
| "_dbm"
| "_decimal"
| "_elementtree"
| "_frozen_importlib"
| "_frozen_importlib_external"
| "_functools"
| "_gdbm"
| "_hashlib"
| "_heapq"
| "_imp"
| "_io"
| "_json"
| "_locale"
| "_lsprof"
| "_lzma"
| "_markupbase"
| "_md5"
| "_multibytecodec"
| "_multiprocessing"
| "_opcode"
| "_operator"
| "_osx_support"
| "_overlapped"
| "_pickle"
| "_posixsubprocess"
| "_py_abc"
| "_pydecimal"
| "_pyio"
| "_queue"
| "_random"
| "_scproxy"
| "_sha1"
| "_sha3"
| "_signal"
| "_sitebuiltins"
| "_socket"
| "_sqlite3"
| "_sre"
| "_ssl"
| "_stat"
| "_string"
| "_strptime"
| "_struct"
| "_symtable"
| "_testbuffer"
| "_testcapi"
| "_testconsole"
| "_testimportmultiple"
| "_testmultiphase"
| "_thread"
| "_threading_local"
| "_tkinter"
| "_tracemalloc"
| "_uuid"
| "_warnings"
| "_weakref"
| "_weakrefset"
| "_winapi"
| "_xxtestfuzz"
| "abc"
| "antigravity"
| "argparse"
| "array"
| "ast"
| "asyncio"
| "atexit"
| "base64"
| "bdb"
| "binascii"
| "bisect"
| "builtins"
| "bz2"
| "cProfile"
| "calendar"
| "cmath"
| "cmd"
| "code"
| "codecs"
| "codeop"
| "collections"
| "colorsys"
| "compileall"
| "concurrent"
| "configparser"
| "contextlib"
| "contextvars"
| "copy"
| "copyreg"
| "csv"
| "ctypes"
| "curses"
| "dataclasses"
| "datetime"
| "dbm"
| "decimal"
| "difflib"
| "dis"
| "doctest"
| "email"
| "encodings"
| "ensurepip"
| "enum"
| "errno"
| "faulthandler"
| "fcntl"
| "filecmp"
| "fileinput"
| "fnmatch"
| "fractions"
| "ftplib"
| "functools"
| "gc"
| "genericpath"
| "getopt"
| "getpass"
| "gettext"
| "glob"
| "grp"
| "gzip"
| "hashlib"
| "heapq"
| "hmac"
| "html"
| "http"
| "idlelib"
| "imaplib"
| "importlib"
| "inspect"
| "io"
| "ipaddress"
| "itertools"
| "json"
| "keyword"
| "linecache"
| "locale"
| "logging"
| "lzma"
| "mailbox"
| "marshal"
| "math"
| "mimetypes"
| "mmap"
| "modulefinder"
| "msvcrt"
| "multiprocessing"
| "netrc"
| "nt"
| "ntpath"
| "nturl2path"
| "numbers"
| "opcode"
| "operator"
| "optparse"
| "os"
| "pathlib"
| "pdb"
| "pickle"
| "pickletools"
| "pkgutil"
| "platform"
| "plistlib"
| "poplib"
| "posix"
| "posixpath"
| "pprint"
| "profile"
| "pstats"
| "pty"
| "pwd"
| "py_compile"
| "pyclbr"
| "pydoc"
| "pydoc_data"
| "pyexpat"
| "queue"
| "quopri"
| "random"
| "re"
| "readline"
| "reprlib"
| "resource"
| "rlcompleter"
| "runpy"
| "sched"
| "secrets"
| "select"
| "selectors"
| "shelve"
| "shlex"
| "shutil"
| "signal"
| "site"
| "smtplib"
| "socket"
| "socketserver"
| "sqlite3"
| "sre_compile"
| "sre_constants"
| "sre_parse"
| "ssl"
| "stat"
| "statistics"
| "string"
| "stringprep"
| "struct"
| "subprocess"
| "symtable"
| "sys"
| "sysconfig"
| "syslog"
| "tabnanny"
| "tarfile"
| "tempfile"
| "termios"
| "textwrap"
| "this"
| "threading"
| "time"
| "timeit"
| "tkinter"
| "token"
| "tokenize"
| "trace"
| "traceback"
| "tracemalloc"
| "tty"
| "turtle"
| "turtledemo"
| "types"
| "typing"
| "unicodedata"
| "unittest"
| "urllib"
| "uuid"
| "venv"
| "warnings"
| "wave"
| "weakref"
| "webbrowser"
| "winreg"
| "winsound"
| "wsgiref"
| "xml"
| "xmlrpc"
| "xx"
| "xxlimited"
| "xxsubtype"
| "zipapp"
| "zipfile"
| "zipimport"
| "zlib"
) | (
7,
"_bootlocale"
| "_compression"
| "_crypt"
| "_dummy_thread"
| "_msi"
| "_sha256"
| "_sha512"
| "aifc"
| "asynchat"
| "asyncore"
| "audioop"
| "binhex"
| "cgi"
| "cgitb"
| "chunk"
| "crypt"
| "distutils"
| "dummy_threading"
| "formatter"
| "imghdr"
| "imp"
| "lib2to3"
| "macpath"
| "mailcap"
| "msilib"
| "nis"
| "nntplib"
| "ossaudiodev"
| "parser"
| "pipes"
| "smtpd"
| "sndhdr"
| "spwd"
| "sunau"
| "symbol"
| "telnetlib"
| "uu"
| "xdrlib"
) | (
8,
"_bootlocale"
| "_compression"
| "_crypt"
| "_dummy_thread"
| "_msi"
| "_posixshmem"
| "_sha256"
| "_sha512"
| "_statistics"
| "_testinternalcapi"
| "_xxsubinterpreters"
| "aifc"
| "asynchat"
| "asyncore"
| "audioop"
| "binhex"
| "cgi"
| "cgitb"
| "chunk"
| "crypt"
| "distutils"
| "dummy_threading"
| "formatter"
| "imghdr"
| "imp"
| "lib2to3"
| "mailcap"
| "msilib"
| "nis"
| "nntplib"
| "ossaudiodev"
| "parser"
| "pipes"
| "smtpd"
| "sndhdr"
| "spwd"
| "sunau"
| "symbol"
| "telnetlib"
| "uu"
| "xdrlib"
) | (
9,
"_aix_support"
| "_bootlocale"
| "_bootsubprocess"
| "_compression"
| "_crypt"
| "_msi"
| "_peg_parser"
| "_posixshmem"
| "_sha256"
| "_sha512"
| "_statistics"
| "_testinternalcapi"
| "_xxsubinterpreters"
| "_zoneinfo"
| "aifc"
| "asynchat"
| "asyncore"
| "audioop"
| "binhex"
| "cgi"
| "cgitb"
| "chunk"
| "crypt"
| "distutils"
| "formatter"
| "graphlib"
| "imghdr"
| "imp"
| "lib2to3"
| "mailcap"
| "msilib"
| "nis"
| "nntplib"
| "ossaudiodev"
| "parser"
| "pipes"
| "smtpd"
| "sndhdr"
| "spwd"
| "sunau"
| "symbol"
| "telnetlib"
| "uu"
| "xdrlib"
| "zoneinfo"
) | (
10,
"_aix_support"
| "_bootsubprocess"
| "_compression"
| "_crypt"
| "_msi"
| "_posixshmem"
| "_sha256"
| "_sha512"
| "_statistics"
| "_testclinic"
| "_testinternalcapi"
| "_xxsubinterpreters"
| "_zoneinfo"
| "aifc"
| "asynchat"
| "asyncore"
| "audioop"
| "binhex"
| "cgi"
| "cgitb"
| "chunk"
| "crypt"
| "distutils"
| "graphlib"
| "imghdr"
| "imp"
| "lib2to3"
| "mailcap"
| "msilib"
| "nis"
| "nntplib"
| "ossaudiodev"
| "pipes"
| "smtpd"
| "sndhdr"
| "spwd"
| "sunau"
| "telnetlib"
| "uu"
| "xdrlib"
| "xxlimited_35"
| "zoneinfo"
) | (
11,
"__hello_alias__"
| "__hello_only__"
| "__phello_alias__"
| "_aix_support"
| "_bootsubprocess"
| "_compression"
| "_crypt"
| "_msi"
| "_posixshmem"
| "_sha256"
| "_sha512"
| "_statistics"
| "_testclinic"
| "_testinternalcapi"
| "_tokenize"
| "_typing"
| "_xxsubinterpreters"
| "_zoneinfo"
| "aifc"
| "asynchat"
| "asyncore"
| "audioop"
| "cgi"
| "cgitb"
| "chunk"
| "crypt"
| "distutils"
| "graphlib"
| "imghdr"
| "imp"
| "lib2to3"
| "mailcap"
| "msilib"
| "nis"
| "nntplib"
| "ossaudiodev"
| "pipes"
| "smtpd"
| "sndhdr"
| "spwd"
| "sunau"
| "telnetlib"
| "tomllib"
| "uu"
| "xdrlib"
| "xxlimited_35"
| "zoneinfo"
) | (
12,
"__hello_alias__"
| "__hello_only__"
| "__phello_alias__"
| "_aix_support"
| "_compression"
| "_crypt"
| "_msi"
| "_posixshmem"
| "_pydatetime"
| "_pylong"
| "_sha2"
| "_statistics"
| "_testclinic"
| "_testinternalcapi"
| "_testsinglephase"
| "_tokenize"
| "_typing"
| "_wmi"
| "_xxinterpchannels"
| "_xxsubinterpreters"
| "_zoneinfo"
| "aifc"
| "audioop"
| "cgi"
| "cgitb"
| "chunk"
| "crypt"
| "graphlib"
| "imghdr"
| "lib2to3"
| "mailcap"
| "msilib"
| "nis"
| "nntplib"
| "ossaudiodev"
| "pipes"
| "sndhdr"
| "spwd"
| "sunau"
| "telnetlib"
| "tomllib"
| "uu"
| "xdrlib"
| "xxlimited_35"
| "zoneinfo"
) | (
13,
"__hello_alias__"
| "__hello_only__"
| "__phello_alias__"
| "_aix_support"
| "_android_support"
| "_apple_support"
| "_colorize"
| "_compression"
| "_interpchannels"
| "_interpqueues"
| "_interpreters"
| "_ios_support"
| "_opcode_metadata"
| "_posixshmem"
| "_pydatetime"
| "_pylong"
| "_pyrepl"
| "_sha2"
| "_statistics"
| "_suggestions"
| "_sysconfig"
| "_testcapi_datetime"
| "_testclinic"
| "_testclinic_limited"
| "_testexternalinspection"
| "_testinternalcapi"
| "_testlimitedcapi"
| "_testsinglephase"
| "_tokenize"
| "_typing"
| "_wmi"
| "_zoneinfo"
| "graphlib"
| "tomllib"
| "xxlimited_35"
| "zoneinfo"
) | (
14,
"__hello_alias__"
| "__hello_only__"
| "__phello_alias__"
| "_aix_support"
| "_android_support"
| "_apple_support"
| "_ast_unparse"
| "_colorize"
| "_hmac"
| "_interpchannels"
| "_interpqueues"
| "_interpreters"
| "_ios_support"
| "_opcode_metadata"
| "_posixshmem"
| "_py_warnings"
| "_pydatetime"
| "_pylong"
| "_pyrepl"
| "_remote_debugging"
| "_sha2"
| "_statistics"
| "_suggestions"
| "_sysconfig"
| "_testcapi_datetime"
| "_testclinic"
| "_testclinic_limited"
| "_testinternalcapi"
| "_testlimitedcapi"
| "_testsinglephase"
| "_tokenize"
| "_types"
| "_typing"
| "_wmi"
| "_zoneinfo"
| "_zstd"
| "annotationlib"
| "compression"
| "graphlib"
| "tomllib"
| "xxlimited_35"
| "zoneinfo"
)
)
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_index/src/slice.rs | crates/ruff_index/src/slice.rs | use crate::Idx;
use crate::vec::IndexVec;
use std::fmt::{Debug, Formatter};
use std::marker::PhantomData;
use std::ops::{Index, IndexMut, Range};
/// A view into contiguous `T`s, indexed by `I` rather than by `usize`.
#[derive(PartialEq, Eq, Hash)]
#[repr(transparent)]
pub struct IndexSlice<I, T> {
index: PhantomData<I>,
pub raw: [T],
}
impl<I: Idx, T> IndexSlice<I, T> {
#[inline]
pub const fn empty() -> &'static Self {
Self::from_raw(&[])
}
#[inline]
pub const fn from_raw(raw: &[T]) -> &Self {
let ptr: *const [T] = raw;
#[expect(unsafe_code)]
// SAFETY: `IndexSlice` is `repr(transparent)` over a normal slice
unsafe {
&*(ptr as *const Self)
}
}
#[inline]
pub fn from_raw_mut(raw: &mut [T]) -> &mut Self {
let ptr: *mut [T] = raw;
#[expect(unsafe_code)]
// SAFETY: `IndexSlice` is `repr(transparent)` over a normal slice
unsafe {
&mut *(ptr as *mut Self)
}
}
#[inline]
pub const fn first(&self) -> Option<&T> {
self.raw.first()
}
#[inline]
pub const fn len(&self) -> usize {
self.raw.len()
}
#[inline]
pub const fn is_empty(&self) -> bool {
self.raw.is_empty()
}
#[inline]
pub fn iter(&self) -> std::slice::Iter<'_, T> {
self.raw.iter()
}
/// Returns an iterator over the indices
#[inline]
pub fn indices(
&self,
) -> impl DoubleEndedIterator<Item = I> + ExactSizeIterator + Clone + 'static {
(0..self.len()).map(|n| I::new(n))
}
#[inline]
pub fn iter_enumerated(
&self,
) -> impl DoubleEndedIterator<Item = (I, &T)> + ExactSizeIterator + '_ {
self.raw.iter().enumerate().map(|(n, t)| (I::new(n), t))
}
#[inline]
pub fn iter_mut(&mut self) -> std::slice::IterMut<'_, T> {
self.raw.iter_mut()
}
#[inline]
pub fn iter_mut_enumerated(
&mut self,
) -> impl DoubleEndedIterator<Item = (I, &mut T)> + ExactSizeIterator + '_ {
self.raw.iter_mut().enumerate().map(|(n, t)| (I::new(n), t))
}
#[inline]
pub fn last_index(&self) -> Option<I> {
self.len().checked_sub(1).map(I::new)
}
#[inline]
pub fn swap(&mut self, a: I, b: I) {
self.raw.swap(a.index(), b.index());
}
#[inline]
pub fn get(&self, index: I) -> Option<&T> {
self.raw.get(index.index())
}
#[inline]
pub fn get_mut(&mut self, index: I) -> Option<&mut T> {
self.raw.get_mut(index.index())
}
#[inline]
pub fn binary_search(&self, value: &T) -> Result<I, I>
where
T: Ord,
{
match self.raw.binary_search(value) {
Ok(i) => Ok(Idx::new(i)),
Err(i) => Err(Idx::new(i)),
}
}
}
impl<I, T> Debug for IndexSlice<I, T>
where
I: Idx,
T: Debug,
{
fn fmt(&self, fmt: &mut Formatter<'_>) -> std::fmt::Result {
std::fmt::Debug::fmt(&self.raw, fmt)
}
}
impl<I: Idx, T> Index<I> for IndexSlice<I, T> {
type Output = T;
#[inline]
fn index(&self, index: I) -> &T {
&self.raw[index.index()]
}
}
impl<I: Idx, T> Index<Range<I>> for IndexSlice<I, T> {
type Output = [T];
#[inline]
fn index(&self, range: Range<I>) -> &[T] {
&self.raw[range.start.index()..range.end.index()]
}
}
impl<I: Idx, T> IndexMut<I> for IndexSlice<I, T> {
#[inline]
fn index_mut(&mut self, index: I) -> &mut T {
&mut self.raw[index.index()]
}
}
impl<I: Idx, T> IndexMut<Range<I>> for IndexSlice<I, T> {
#[inline]
fn index_mut(&mut self, range: Range<I>) -> &mut [T] {
&mut self.raw[range.start.index()..range.end.index()]
}
}
impl<'a, I: Idx, T> IntoIterator for &'a IndexSlice<I, T> {
type IntoIter = std::slice::Iter<'a, T>;
type Item = &'a T;
#[inline]
fn into_iter(self) -> std::slice::Iter<'a, T> {
self.raw.iter()
}
}
impl<'a, I: Idx, T> IntoIterator for &'a mut IndexSlice<I, T> {
type IntoIter = std::slice::IterMut<'a, T>;
type Item = &'a mut T;
#[inline]
fn into_iter(self) -> std::slice::IterMut<'a, T> {
self.raw.iter_mut()
}
}
impl<I: Idx, T: Clone> ToOwned for IndexSlice<I, T> {
type Owned = IndexVec<I, T>;
fn to_owned(&self) -> IndexVec<I, T> {
IndexVec::from_raw(self.raw.to_owned())
}
fn clone_into(&self, target: &mut IndexVec<I, T>) {
self.raw.clone_into(&mut target.raw);
}
}
impl<I: Idx, T> Default for &IndexSlice<I, T> {
#[inline]
fn default() -> Self {
IndexSlice::from_raw(Default::default())
}
}
impl<I: Idx, T> Default for &mut IndexSlice<I, T> {
#[inline]
fn default() -> Self {
IndexSlice::from_raw_mut(Default::default())
}
}
// Whether `IndexSlice` is `Send` depends only on the data,
// not the phantom data.
#[expect(unsafe_code)]
unsafe impl<I: Idx, T> Send for IndexSlice<I, T> where T: Send {}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_index/src/vec.rs | crates/ruff_index/src/vec.rs | use crate::Idx;
use crate::slice::IndexSlice;
use std::borrow::{Borrow, BorrowMut};
use std::fmt::{Debug, Formatter};
use std::marker::PhantomData;
use std::ops::{Deref, DerefMut, RangeBounds};
/// An owned sequence of `T` indexed by `I`
#[derive(Clone, PartialEq, Eq, Hash, get_size2::GetSize)]
#[repr(transparent)]
pub struct IndexVec<I, T> {
pub raw: Vec<T>,
index: PhantomData<I>,
}
impl<I: Idx, T> IndexVec<I, T> {
#[inline]
pub fn new() -> Self {
Self {
raw: Vec::new(),
index: PhantomData,
}
}
#[inline]
pub fn with_capacity(capacity: usize) -> Self {
Self {
raw: Vec::with_capacity(capacity),
index: PhantomData,
}
}
#[inline]
pub fn from_raw(raw: Vec<T>) -> Self {
Self {
raw,
index: PhantomData,
}
}
#[inline]
pub fn drain<R: RangeBounds<usize>>(&mut self, range: R) -> impl Iterator<Item = T> + '_ {
self.raw.drain(range)
}
#[inline]
pub fn truncate(&mut self, a: usize) {
self.raw.truncate(a);
}
#[inline]
pub fn as_slice(&self) -> &IndexSlice<I, T> {
IndexSlice::from_raw(&self.raw)
}
#[inline]
pub fn as_mut_slice(&mut self) -> &mut IndexSlice<I, T> {
IndexSlice::from_raw_mut(&mut self.raw)
}
#[inline]
pub fn push(&mut self, data: T) -> I {
let index = self.next_index();
self.raw.push(data);
index
}
#[inline]
pub fn next_index(&self) -> I {
I::new(self.raw.len())
}
#[inline]
pub fn shrink_to_fit(&mut self) {
self.raw.shrink_to_fit();
}
#[inline]
pub fn resize(&mut self, new_len: usize, value: T)
where
T: Clone,
{
self.raw.resize(new_len, value);
}
}
impl<I, T> Debug for IndexVec<I, T>
where
T: Debug,
{
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
std::fmt::Debug::fmt(&self.raw, f)
}
}
impl<I: Idx, T> Deref for IndexVec<I, T> {
type Target = IndexSlice<I, T>;
fn deref(&self) -> &Self::Target {
self.as_slice()
}
}
impl<I: Idx, T> DerefMut for IndexVec<I, T> {
fn deref_mut(&mut self) -> &mut Self::Target {
self.as_mut_slice()
}
}
impl<I: Idx, T> Borrow<IndexSlice<I, T>> for IndexVec<I, T> {
fn borrow(&self) -> &IndexSlice<I, T> {
self
}
}
impl<I: Idx, T> BorrowMut<IndexSlice<I, T>> for IndexVec<I, T> {
fn borrow_mut(&mut self) -> &mut IndexSlice<I, T> {
self
}
}
impl<I, T> Extend<T> for IndexVec<I, T> {
#[inline]
fn extend<Iter: IntoIterator<Item = T>>(&mut self, iter: Iter) {
self.raw.extend(iter);
}
}
impl<I: Idx, T> FromIterator<T> for IndexVec<I, T> {
#[inline]
fn from_iter<Iter: IntoIterator<Item = T>>(iter: Iter) -> Self {
Self::from_raw(Vec::from_iter(iter))
}
}
impl<I: Idx, T> IntoIterator for IndexVec<I, T> {
type IntoIter = std::vec::IntoIter<T>;
type Item = T;
#[inline]
fn into_iter(self) -> std::vec::IntoIter<T> {
self.raw.into_iter()
}
}
impl<'a, I: Idx, T> IntoIterator for &'a IndexVec<I, T> {
type IntoIter = std::slice::Iter<'a, T>;
type Item = &'a T;
#[inline]
fn into_iter(self) -> std::slice::Iter<'a, T> {
self.iter()
}
}
impl<'a, I: Idx, T> IntoIterator for &'a mut IndexVec<I, T> {
type IntoIter = std::slice::IterMut<'a, T>;
type Item = &'a mut T;
#[inline]
fn into_iter(self) -> std::slice::IterMut<'a, T> {
self.iter_mut()
}
}
impl<I: Idx, T> Default for IndexVec<I, T> {
#[inline]
fn default() -> Self {
IndexVec::new()
}
}
impl<I: Idx, T, const N: usize> From<[T; N]> for IndexVec<I, T> {
#[inline]
fn from(array: [T; N]) -> Self {
IndexVec::from_raw(array.into())
}
}
// Whether `IndexVec` is `Send` depends only on the data,
// not the phantom data.
#[expect(unsafe_code)]
unsafe impl<I: Idx, T> Send for IndexVec<I, T> where T: Send {}
#[expect(unsafe_code)]
#[cfg(feature = "salsa")]
unsafe impl<I, T> salsa::Update for IndexVec<I, T>
where
T: salsa::Update,
{
#[expect(unsafe_code)]
unsafe fn maybe_update(old_pointer: *mut Self, new_value: Self) -> bool {
let old_vec: &mut IndexVec<I, T> = unsafe { &mut *old_pointer };
unsafe { salsa::Update::maybe_update(&raw mut old_vec.raw, new_value.raw) }
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_index/src/lib.rs | crates/ruff_index/src/lib.rs | //! Provides new-type wrappers for collections that are indexed by a [`Idx`] rather
//! than `usize`.
//!
//! Inspired by [rustc_index](https://github.com/rust-lang/rust/blob/master/compiler/rustc_index/src/lib.rs).
mod idx;
mod slice;
mod vec;
pub use idx::Idx;
pub use ruff_macros::newtype_index;
pub use slice::IndexSlice;
pub use vec::IndexVec;
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_index/src/idx.rs | crates/ruff_index/src/idx.rs | use std::hash::Hash;
/// Represents a newtype wrapper used to index into a Vec or a slice.
///
/// You can use the [`newtype_index`](crate::newtype_index) macro to define your own index.
pub trait Idx: Copy + PartialEq + Eq + Hash + std::fmt::Debug + 'static {
fn new(value: usize) -> Self;
fn index(self) -> usize;
}
#[cfg(test)]
mod tests {
use crate::newtype_index;
use static_assertions::{assert_eq_size, assert_impl_all};
// Allows the macro invocation below to work
use crate as ruff_index;
#[newtype_index]
#[derive(PartialOrd, Ord)]
struct MyIndex;
assert_impl_all!(MyIndex: Ord, PartialOrd);
assert_eq_size!(MyIndex, Option<MyIndex>);
#[test]
#[should_panic(expected = "assertion failed: value <= Self::MAX")]
fn from_u32_panics_for_u32_max() {
MyIndex::from_u32(u32::MAX);
}
#[test]
#[should_panic(expected = "assertion failed: value <= Self::MAX")]
fn from_usize_panics_for_u32_max() {
MyIndex::from_usize(u32::MAX as usize);
}
#[test]
fn max_value() {
let max_value = MyIndex::from_u32(u32::MAX - 1);
assert_eq!(max_value.as_u32(), u32::MAX - 1);
}
#[test]
fn max_value_usize() {
let max_value = MyIndex::from_usize((u32::MAX - 1) as usize);
assert_eq!(max_value.as_u32(), u32::MAX - 1);
}
#[test]
fn debug() {
let output = format!("{:?}", MyIndex::from(10u32));
assert_eq!(output, "MyIndex(10)");
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ty_module_resolver/src/db.rs | crates/ty_module_resolver/src/db.rs | use ruff_db::Db as SourceDb;
use crate::resolve::SearchPaths;
#[salsa::db]
pub trait Db: SourceDb {
/// Returns the search paths for module resolution.
fn search_paths(&self) -> &SearchPaths;
}
#[cfg(test)]
pub(crate) mod tests {
use std::sync::{Arc, Mutex};
use ruff_db::Db as SourceDb;
use ruff_db::files::Files;
use ruff_db::system::{DbWithTestSystem, TestSystem};
use ruff_db::vendored::VendoredFileSystem;
use ruff_python_ast::PythonVersion;
use super::Db;
use crate::resolve::SearchPaths;
type Events = Arc<Mutex<Vec<salsa::Event>>>;
#[salsa::db]
#[derive(Clone)]
pub(crate) struct TestDb {
storage: salsa::Storage<Self>,
files: Files,
system: TestSystem,
vendored: VendoredFileSystem,
search_paths: Arc<SearchPaths>,
python_version: PythonVersion,
events: Events,
}
impl TestDb {
pub(crate) fn new() -> Self {
let events = Events::default();
Self {
storage: salsa::Storage::new(Some(Box::new({
let events = events.clone();
move |event| {
tracing::trace!("event: {event:?}");
let mut events = events.lock().unwrap();
events.push(event);
}
}))),
system: TestSystem::default(),
vendored: ty_vendored::file_system().clone(),
files: Files::default(),
search_paths: Arc::new(SearchPaths::empty(ty_vendored::file_system())),
python_version: PythonVersion::default(),
events,
}
}
pub(crate) fn with_search_paths(mut self, search_paths: SearchPaths) -> Self {
self.set_search_paths(search_paths);
self
}
pub(crate) fn with_python_version(mut self, python_version: PythonVersion) -> Self {
self.python_version = python_version;
self
}
pub(crate) fn set_search_paths(&mut self, search_paths: SearchPaths) {
search_paths.try_register_static_roots(self);
self.search_paths = Arc::new(search_paths);
}
/// Takes the salsa events.
pub(crate) fn take_salsa_events(&mut self) -> Vec<salsa::Event> {
let mut events = self.events.lock().unwrap();
std::mem::take(&mut *events)
}
/// Clears the salsa events.
pub(crate) fn clear_salsa_events(&mut self) {
self.take_salsa_events();
}
}
impl DbWithTestSystem for TestDb {
fn test_system(&self) -> &TestSystem {
&self.system
}
fn test_system_mut(&mut self) -> &mut TestSystem {
&mut self.system
}
}
#[salsa::db]
impl SourceDb for TestDb {
fn vendored(&self) -> &VendoredFileSystem {
&self.vendored
}
fn system(&self) -> &dyn ruff_db::system::System {
&self.system
}
fn files(&self) -> &Files {
&self.files
}
fn python_version(&self) -> PythonVersion {
self.python_version
}
}
#[salsa::db]
impl Db for TestDb {
fn search_paths(&self) -> &SearchPaths {
&self.search_paths
}
}
#[salsa::db]
impl salsa::Database for TestDb {}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ty_module_resolver/src/settings.rs | crates/ty_module_resolver/src/settings.rs | //! Search path configuration settings.
use ruff_db::system::{System, SystemPathBuf};
use ruff_db::vendored::VendoredFileSystem;
use crate::path::SearchPathError;
use crate::resolve::SearchPaths;
use crate::typeshed::TypeshedVersionsParseError;
/// How to handle apparent misconfiguration
#[derive(PartialEq, Eq, Debug, Copy, Clone, Default, get_size2::GetSize)]
pub enum MisconfigurationMode {
/// Settings Failure Is Not An Error.
///
/// This is used by the default database, which we are incentivized to make infallible,
/// while still trying to "do our best" to set things up properly where we can.
UseDefault,
/// Settings Failure Is An Error.
#[default]
Fail,
}
/// Configures the search paths for module resolution.
#[derive(Eq, PartialEq, Debug, Clone)]
pub struct SearchPathSettings {
/// List of user-provided paths that should take first priority in the module resolution.
/// Examples in other type checkers are mypy's MYPYPATH environment variable,
/// or pyright's stubPath configuration setting.
pub extra_paths: Vec<SystemPathBuf>,
/// The root of the project, used for finding first-party modules.
pub src_roots: Vec<SystemPathBuf>,
/// Optional path to a "custom typeshed" directory on disk for us to use for standard-library types.
/// If this is not provided, we will fallback to our vendored typeshed stubs for the stdlib,
/// bundled as a zip file in the binary
pub custom_typeshed: Option<SystemPathBuf>,
/// List of site packages paths to use.
pub site_packages_paths: Vec<SystemPathBuf>,
/// Option path to the real stdlib on the system, and not some instance of typeshed.
///
/// We should ideally only ever use this for things like goto-definition,
/// where typeshed isn't the right answer.
pub real_stdlib_path: Option<SystemPathBuf>,
/// How to handle apparent misconfiguration
pub misconfiguration_mode: MisconfigurationMode,
}
impl SearchPathSettings {
pub fn new(src_roots: Vec<SystemPathBuf>) -> Self {
Self {
src_roots,
..SearchPathSettings::empty()
}
}
pub fn empty() -> Self {
SearchPathSettings {
src_roots: vec![],
extra_paths: vec![],
custom_typeshed: None,
site_packages_paths: vec![],
real_stdlib_path: None,
misconfiguration_mode: MisconfigurationMode::Fail,
}
}
pub fn to_search_paths(
&self,
system: &dyn System,
vendored: &VendoredFileSystem,
) -> Result<SearchPaths, SearchPathSettingsError> {
SearchPaths::from_settings(self, system, vendored)
}
}
/// Enumeration describing the various ways in which validation of the search paths options might fail.
///
/// If validation fails for a search path derived from the user settings,
/// a message must be displayed to the user,
/// as type checking cannot be done reliably in these circumstances.
#[derive(Debug, thiserror::Error)]
pub enum SearchPathSettingsError {
#[error(transparent)]
InvalidSearchPath(#[from] SearchPathError),
/// The typeshed path provided by the user is a directory,
/// but `stdlib/VERSIONS` could not be read.
/// (This is only relevant for stdlib search paths.)
#[error("Failed to read the custom typeshed versions file '{path}'")]
FailedToReadVersionsFile {
path: SystemPathBuf,
#[source]
error: std::io::Error,
},
/// The path provided by the user is a directory,
/// and a `stdlib/VERSIONS` file exists, but it fails to parse.
/// (This is only relevant for stdlib search paths.)
#[error(transparent)]
VersionsParseError(#[from] TypeshedVersionsParseError),
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ty_module_resolver/src/path.rs | crates/ty_module_resolver/src/path.rs | //! Internal abstractions for differentiating between different kinds of search paths.
use std::fmt;
use std::sync::Arc;
use camino::{Utf8Path, Utf8PathBuf};
use ruff_db::files::{File, FileError, FilePath, system_path_to_file, vendored_path_to_file};
use ruff_db::system::{System, SystemPath, SystemPathBuf};
use ruff_db::vendored::{VendoredPath, VendoredPathBuf};
use crate::Db;
use crate::module_name::ModuleName;
use crate::resolve::{PyTyped, ResolverContext};
use crate::typeshed::{TypeshedVersionsQueryResult, typeshed_versions};
/// A path that points to a Python module.
///
/// A `ModulePath` is made up of two elements:
/// - The [`SearchPath`] that was used to find this module.
/// This could point to a directory on disk or a directory
/// in the vendored zip archive.
/// - A relative path from the search path to the file
/// that contains the source code of the Python module in question.
#[derive(Debug, Clone, PartialEq, Eq, Hash)]
pub(crate) struct ModulePath {
search_path: SearchPath,
relative_path: Utf8PathBuf,
}
impl ModulePath {
#[must_use]
pub(crate) fn is_standard_library(&self) -> bool {
matches!(
&*self.search_path.0,
SearchPathInner::StandardLibraryCustom(_) | SearchPathInner::StandardLibraryVendored(_)
)
}
/// Returns true if this is a path to a "stub file."
///
/// i.e., A module whose file extension is `pyi`.
#[must_use]
pub(crate) fn is_stub_file(&self) -> bool {
self.relative_path.extension() == Some("pyi")
}
/// Returns true if this is a path to a "stub package."
///
/// i.e., A module whose top-most parent package corresponds to a
/// directory with a `-stubs` suffix in its name.
#[must_use]
pub(crate) fn is_stub_package(&self) -> bool {
let Some(first) = self.relative_path.components().next() else {
return false;
};
first.as_str().ends_with("-stubs")
}
pub(crate) fn push(&mut self, component: &str) {
if let Some(component_extension) = camino::Utf8Path::new(component).extension() {
assert!(
self.relative_path.extension().is_none(),
"Cannot push part {component} to {self:?}, which already has an extension"
);
if self.is_standard_library() {
assert_eq!(
component_extension, "pyi",
"Extension must be `pyi`; got `{component_extension}`"
);
} else {
assert!(
matches!(component_extension, "pyi" | "py"),
"Extension must be `py` or `pyi`; got `{component_extension}`"
);
}
}
self.relative_path.push(component);
}
pub(crate) fn pop(&mut self) -> bool {
self.relative_path.pop()
}
pub(super) fn search_path(&self) -> &SearchPath {
&self.search_path
}
#[must_use]
pub(super) fn is_directory(&self, resolver: &ResolverContext) -> bool {
let ModulePath {
search_path,
relative_path,
} = self;
match &*search_path.0 {
SearchPathInner::Extra(search_path)
| SearchPathInner::FirstParty(search_path)
| SearchPathInner::SitePackages(search_path)
| SearchPathInner::Editable(search_path)
| SearchPathInner::StandardLibraryReal(search_path) => {
system_path_to_file(resolver.db, search_path.join(relative_path))
== Err(FileError::IsADirectory)
}
SearchPathInner::StandardLibraryCustom(stdlib_root) => {
match query_stdlib_version(relative_path, resolver) {
TypeshedVersionsQueryResult::DoesNotExist => false,
TypeshedVersionsQueryResult::Exists
| TypeshedVersionsQueryResult::MaybeExists => {
system_path_to_file(resolver.db, stdlib_root.join(relative_path))
== Err(FileError::IsADirectory)
}
}
}
SearchPathInner::StandardLibraryVendored(stdlib_root) => {
match query_stdlib_version(relative_path, resolver) {
TypeshedVersionsQueryResult::DoesNotExist => false,
TypeshedVersionsQueryResult::Exists
| TypeshedVersionsQueryResult::MaybeExists => resolver
.vendored()
.is_directory(stdlib_root.join(relative_path)),
}
}
}
}
#[must_use]
pub(super) fn is_regular_package(&self, resolver: &ResolverContext) -> bool {
let ModulePath {
search_path,
relative_path,
} = self;
match &*search_path.0 {
SearchPathInner::Extra(search_path)
| SearchPathInner::FirstParty(search_path)
| SearchPathInner::SitePackages(search_path)
| SearchPathInner::Editable(search_path) => {
let absolute_path = search_path.join(relative_path);
system_path_to_file(resolver.db, absolute_path.join("__init__.py")).is_ok()
|| system_path_to_file(resolver.db, absolute_path.join("__init__.pyi")).is_ok()
}
SearchPathInner::StandardLibraryReal(search_path) => {
let absolute_path = search_path.join(relative_path);
system_path_to_file(resolver.db, absolute_path.join("__init__.py")).is_ok()
}
SearchPathInner::StandardLibraryCustom(search_path) => {
match query_stdlib_version(relative_path, resolver) {
TypeshedVersionsQueryResult::DoesNotExist => false,
TypeshedVersionsQueryResult::Exists
| TypeshedVersionsQueryResult::MaybeExists => system_path_to_file(
resolver.db,
search_path.join(relative_path).join("__init__.pyi"),
)
.is_ok(),
}
}
SearchPathInner::StandardLibraryVendored(search_path) => {
match query_stdlib_version(relative_path, resolver) {
TypeshedVersionsQueryResult::DoesNotExist => false,
TypeshedVersionsQueryResult::Exists
| TypeshedVersionsQueryResult::MaybeExists => resolver
.vendored()
.exists(search_path.join(relative_path).join("__init__.pyi")),
}
}
}
}
/// Get the `py.typed` info for this package (not considering parent packages)
pub(super) fn py_typed(&self, resolver: &ResolverContext) -> PyTyped {
let Some(py_typed_contents) = self.to_system_path().and_then(|path| {
let py_typed_path = path.join("py.typed");
let py_typed_file = system_path_to_file(resolver.db, py_typed_path).ok()?;
// If we fail to read it let's say that's like it doesn't exist
// (right now the difference between Untyped and Full is academic)
py_typed_file.read_to_string(resolver.db).ok()
}) else {
return PyTyped::Untyped;
};
// The python typing spec says to look for "partial\n" but in the wild we've seen:
//
// * PARTIAL\n
// * partial\\n (as in they typed "\n")
// * partial/n
//
// since the py.typed file never really grew any other contents, let's be permissive
if py_typed_contents.to_ascii_lowercase().contains("partial") {
PyTyped::Partial
} else {
PyTyped::Full
}
}
pub(super) fn to_system_path(&self) -> Option<SystemPathBuf> {
let ModulePath {
search_path,
relative_path,
} = self;
match &*search_path.0 {
SearchPathInner::Extra(search_path)
| SearchPathInner::FirstParty(search_path)
| SearchPathInner::SitePackages(search_path)
| SearchPathInner::Editable(search_path) => Some(search_path.join(relative_path)),
SearchPathInner::StandardLibraryReal(stdlib_root)
| SearchPathInner::StandardLibraryCustom(stdlib_root) => {
Some(stdlib_root.join(relative_path))
}
SearchPathInner::StandardLibraryVendored(_) => None,
}
}
#[must_use]
pub(super) fn to_file(&self, resolver: &ResolverContext) -> Option<File> {
let db = resolver.db;
let ModulePath {
search_path,
relative_path,
} = self;
match &*search_path.0 {
SearchPathInner::Extra(search_path)
| SearchPathInner::FirstParty(search_path)
| SearchPathInner::SitePackages(search_path)
| SearchPathInner::Editable(search_path) => {
system_path_to_file(db, search_path.join(relative_path)).ok()
}
SearchPathInner::StandardLibraryReal(search_path) => {
system_path_to_file(db, search_path.join(relative_path)).ok()
}
SearchPathInner::StandardLibraryCustom(stdlib_root) => {
match query_stdlib_version(relative_path, resolver) {
TypeshedVersionsQueryResult::DoesNotExist => None,
TypeshedVersionsQueryResult::Exists
| TypeshedVersionsQueryResult::MaybeExists => {
system_path_to_file(db, stdlib_root.join(relative_path)).ok()
}
}
}
SearchPathInner::StandardLibraryVendored(stdlib_root) => {
match query_stdlib_version(relative_path, resolver) {
TypeshedVersionsQueryResult::DoesNotExist => None,
TypeshedVersionsQueryResult::Exists
| TypeshedVersionsQueryResult::MaybeExists => {
vendored_path_to_file(db, stdlib_root.join(relative_path)).ok()
}
}
}
}
}
#[must_use]
pub(crate) fn to_module_name(&self) -> Option<ModuleName> {
fn strip_stubs(component: &str) -> &str {
component.strip_suffix("-stubs").unwrap_or(component)
}
let ModulePath {
search_path: _,
relative_path,
} = self;
if self.is_standard_library() {
stdlib_path_to_module_name(relative_path)
} else {
let parent = relative_path.parent()?;
let name = relative_path.file_stem()?;
if parent.as_str().is_empty() {
// Stubs should only be stripped when there is no
// extension. e.g., `foo-stubs` should be stripped
// by not `foo-stubs.pyi`. In the latter case,
// `ModuleName::new` will fail (which is what we want).
return ModuleName::new(if relative_path.extension().is_some() {
name
} else {
strip_stubs(relative_path.as_str())
});
}
let parent_components = parent.components().enumerate().map(|(index, component)| {
let component = component.as_str();
// For stub packages, strip the `-stubs` suffix from
// the first component because it isn't a valid module
// name part AND the module name is the name without
// the `-stubs`.
if index == 0 {
strip_stubs(component)
} else {
component
}
});
let skip_final_part =
relative_path.ends_with("__init__.py") || relative_path.ends_with("__init__.pyi");
if skip_final_part {
ModuleName::from_components(parent_components)
} else {
ModuleName::from_components(parent_components.chain([name]))
}
}
}
#[must_use]
pub(crate) fn with_pyi_extension(&self) -> Self {
let ModulePath {
search_path,
relative_path,
} = self;
ModulePath {
search_path: search_path.clone(),
relative_path: relative_path.with_extension("pyi"),
}
}
#[must_use]
pub(crate) fn with_py_extension(&self) -> Option<Self> {
if self.is_standard_library() {
return None;
}
let ModulePath {
search_path,
relative_path,
} = self;
Some(ModulePath {
search_path: search_path.clone(),
relative_path: relative_path.with_extension("py"),
})
}
}
impl PartialEq<SystemPathBuf> for ModulePath {
fn eq(&self, other: &SystemPathBuf) -> bool {
let ModulePath {
search_path,
relative_path,
} = self;
search_path
.as_system_path()
.and_then(|search_path| other.strip_prefix(search_path).ok())
.is_some_and(|other_relative_path| other_relative_path.as_utf8_path() == relative_path)
}
}
impl PartialEq<ModulePath> for SystemPathBuf {
fn eq(&self, other: &ModulePath) -> bool {
other.eq(self)
}
}
impl PartialEq<VendoredPathBuf> for ModulePath {
fn eq(&self, other: &VendoredPathBuf) -> bool {
let ModulePath {
search_path,
relative_path,
} = self;
search_path
.as_vendored_path()
.and_then(|search_path| other.strip_prefix(search_path).ok())
.is_some_and(|other_relative_path| other_relative_path.as_utf8_path() == relative_path)
}
}
impl PartialEq<ModulePath> for VendoredPathBuf {
fn eq(&self, other: &ModulePath) -> bool {
other.eq(self)
}
}
#[must_use]
fn stdlib_path_to_module_name(relative_path: &Utf8Path) -> Option<ModuleName> {
let parent_components = relative_path
.parent()?
.components()
.map(|component| component.as_str());
let skip_final_part = relative_path.ends_with("__init__.pyi");
if skip_final_part {
ModuleName::from_components(parent_components)
} else {
ModuleName::from_components(parent_components.chain(relative_path.file_stem()))
}
}
#[must_use]
fn query_stdlib_version(
relative_path: &Utf8Path,
context: &ResolverContext,
) -> TypeshedVersionsQueryResult {
let Some(module_name) = stdlib_path_to_module_name(relative_path) else {
return TypeshedVersionsQueryResult::DoesNotExist;
};
let ResolverContext {
db,
python_version,
mode: _,
} = context;
typeshed_versions(*db).query_module(&module_name, *python_version)
}
#[derive(Debug, thiserror::Error)]
pub enum SearchPathError {
/// The path provided by the user was not a directory
#[error("{0} does not point to a directory")]
NotADirectory(SystemPathBuf),
/// The path provided by the user is a directory,
/// but no `stdlib/` subdirectory exists.
/// (This is only relevant for stdlib search paths.)
#[error("The directory at {0} has no `stdlib/` subdirectory")]
NoStdlibSubdirectory(SystemPathBuf),
}
type SearchPathResult<T> = Result<T, SearchPathError>;
#[derive(Debug, Clone, PartialEq, Eq, Hash, get_size2::GetSize)]
enum SearchPathInner {
Extra(SystemPathBuf),
FirstParty(SystemPathBuf),
StandardLibraryCustom(SystemPathBuf),
StandardLibraryVendored(VendoredPathBuf),
StandardLibraryReal(SystemPathBuf),
SitePackages(SystemPathBuf),
Editable(SystemPathBuf),
}
/// Unification of the various kinds of search paths
/// that can be used to locate Python modules.
///
/// The different kinds of search paths are:
/// - "Extra" search paths: these go at the start of the module resolution order
/// - First-party search paths: the user code that we are directly invoked on.
/// - Standard-library search paths: these come in three different forms:
/// - Custom standard-library search paths: paths provided by the user
/// pointing to a custom typeshed directory on disk
/// - Vendored standard-library search paths: paths pointing to a directory
/// in the vendored zip archive.
/// - Real standard-library search paths: path pointing to a directory
/// of the real python stdlib for the environment.
/// - Site-packages search paths: search paths that point to the `site-packages`
/// directory, in which packages are installed from ``PyPI``.
/// - Editable search paths: Additional search paths added to the end of the module
/// resolution order. We discover these by iterating through `.pth` files in
/// the `site-packages` directory and searching for lines in those `.pth` files
/// that point to existing directories on disk. Such lines indicate editable
/// installations, which will be appended to `sys.path` at runtime,
/// and thus should also be considered valid search paths for our purposes.
///
/// For some of the above categories, there may be an arbitrary number
/// in any given list of search paths: for example, the "Extra" category
/// or the "Editable" category. For the "First-party", "Site-packages"
/// and "Standard-library" categories, however, there will always be exactly
/// one search path from that category in any given list of search paths.
#[derive(Debug, Clone, PartialEq, Eq, Hash, get_size2::GetSize)]
pub struct SearchPath(Arc<SearchPathInner>);
impl SearchPath {
fn directory_path(system: &dyn System, root: SystemPathBuf) -> SearchPathResult<SystemPathBuf> {
if system.is_directory(&root) {
Ok(root)
} else {
Err(SearchPathError::NotADirectory(root))
}
}
/// Create a new "Extra" search path
pub(crate) fn extra(system: &dyn System, root: SystemPathBuf) -> SearchPathResult<Self> {
Ok(Self(Arc::new(SearchPathInner::Extra(
Self::directory_path(system, root)?,
))))
}
/// Create a new first-party search path, pointing to the user code we were directly invoked on
pub(crate) fn first_party(system: &dyn System, root: SystemPathBuf) -> SearchPathResult<Self> {
Ok(Self(Arc::new(SearchPathInner::FirstParty(
Self::directory_path(system, root)?,
))))
}
/// Create a new standard-library search path pointing to a custom directory on disk
pub(crate) fn custom_stdlib(
system: &dyn System,
typeshed: &SystemPath,
) -> SearchPathResult<Self> {
if !system.is_directory(typeshed) {
return Err(SearchPathError::NotADirectory(typeshed.to_path_buf()));
}
let stdlib =
Self::directory_path(system, typeshed.join("stdlib")).map_err(|err| match err {
SearchPathError::NotADirectory(_) => {
SearchPathError::NoStdlibSubdirectory(typeshed.to_path_buf())
}
SearchPathError::NoStdlibSubdirectory(_) => err,
})?;
Ok(Self(Arc::new(SearchPathInner::StandardLibraryCustom(
stdlib,
))))
}
/// Create a new search path pointing to the `stdlib/` subdirectory in the vendored zip archive
#[must_use]
pub(crate) fn vendored_stdlib() -> Self {
Self(Arc::new(SearchPathInner::StandardLibraryVendored(
VendoredPathBuf::from("stdlib"),
)))
}
/// Create a new search path pointing to the real stdlib of a python install
pub(crate) fn real_stdlib(system: &dyn System, root: SystemPathBuf) -> SearchPathResult<Self> {
Ok(Self(Arc::new(SearchPathInner::StandardLibraryReal(
Self::directory_path(system, root)?,
))))
}
/// Create a new search path pointing to the `site-packages` directory on disk
///
/// TODO: the validation done here is somewhat redundant given that `site-packages`
/// are already validated at a higher level by the time we get here.
/// However, removing the validation here breaks some file-watching tests -- and
/// ultimately we'll probably want all search paths to be validated before a
/// `Program` is instantiated, so it doesn't seem like a huge priority right now.
pub(crate) fn site_packages(
system: &dyn System,
root: SystemPathBuf,
) -> SearchPathResult<Self> {
Ok(Self(Arc::new(SearchPathInner::SitePackages(
Self::directory_path(system, root)?,
))))
}
/// Create a new search path pointing to an editable installation
pub(crate) fn editable(system: &dyn System, root: SystemPathBuf) -> SearchPathResult<Self> {
Ok(Self(Arc::new(SearchPathInner::Editable(
Self::directory_path(system, root)?,
))))
}
#[must_use]
pub(crate) fn to_module_path(&self) -> ModulePath {
ModulePath {
search_path: self.clone(),
relative_path: Utf8PathBuf::new(),
}
}
/// Does this search path point to the standard library?
#[must_use]
pub fn is_standard_library(&self) -> bool {
matches!(
&*self.0,
SearchPathInner::StandardLibraryCustom(_)
| SearchPathInner::StandardLibraryVendored(_)
| SearchPathInner::StandardLibraryReal(_)
)
}
pub fn is_first_party(&self) -> bool {
matches!(&*self.0, SearchPathInner::FirstParty(_))
}
fn is_valid_extension(&self, extension: &str) -> bool {
if self.is_standard_library() {
extension == "pyi"
} else {
matches!(extension, "pyi" | "py")
}
}
#[must_use]
pub(crate) fn relativize_system_path(&self, path: &SystemPath) -> Option<ModulePath> {
self.relativize_system_path_only(path)
.map(|relative_path| ModulePath {
search_path: self.clone(),
relative_path: relative_path.as_utf8_path().to_path_buf(),
})
}
#[must_use]
pub(crate) fn relativize_system_path_only<'a>(
&self,
path: &'a SystemPath,
) -> Option<&'a SystemPath> {
if path
.extension()
.is_some_and(|extension| !self.is_valid_extension(extension))
{
return None;
}
match &*self.0 {
SearchPathInner::Extra(search_path)
| SearchPathInner::FirstParty(search_path)
| SearchPathInner::StandardLibraryCustom(search_path)
| SearchPathInner::StandardLibraryReal(search_path)
| SearchPathInner::SitePackages(search_path)
| SearchPathInner::Editable(search_path) => path.strip_prefix(search_path).ok(),
SearchPathInner::StandardLibraryVendored(_) => None,
}
}
#[must_use]
pub(crate) fn relativize_vendored_path(&self, path: &VendoredPath) -> Option<ModulePath> {
if path
.extension()
.is_some_and(|extension| !self.is_valid_extension(extension))
{
return None;
}
match &*self.0 {
SearchPathInner::Extra(_)
| SearchPathInner::FirstParty(_)
| SearchPathInner::StandardLibraryCustom(_)
| SearchPathInner::StandardLibraryReal(_)
| SearchPathInner::SitePackages(_)
| SearchPathInner::Editable(_) => None,
SearchPathInner::StandardLibraryVendored(search_path) => path
.strip_prefix(search_path)
.ok()
.map(|relative_path| ModulePath {
search_path: self.clone(),
relative_path: relative_path.as_utf8_path().to_path_buf(),
}),
}
}
#[must_use]
pub(super) fn as_path(&self) -> SystemOrVendoredPathRef<'_> {
match *self.0 {
SearchPathInner::Extra(ref path)
| SearchPathInner::FirstParty(ref path)
| SearchPathInner::StandardLibraryCustom(ref path)
| SearchPathInner::StandardLibraryReal(ref path)
| SearchPathInner::SitePackages(ref path)
| SearchPathInner::Editable(ref path) => SystemOrVendoredPathRef::System(path),
SearchPathInner::StandardLibraryVendored(ref path) => {
SystemOrVendoredPathRef::Vendored(path)
}
}
}
#[must_use]
pub fn as_system_path(&self) -> Option<&SystemPath> {
self.as_path().as_system_path()
}
#[must_use]
pub(crate) fn as_vendored_path(&self) -> Option<&VendoredPath> {
self.as_path().as_vendored_path()
}
/// Returns a succinct string representing the *internal kind* of this
/// search path. This is useful in snapshot tests where one wants to
/// capture this specific detail about search paths.
#[cfg(test)]
#[must_use]
pub(crate) fn debug_kind(&self) -> &'static str {
match *self.0 {
SearchPathInner::Extra(_) => "extra",
SearchPathInner::FirstParty(_) => "first-party",
SearchPathInner::StandardLibraryCustom(_) => "std-custom",
SearchPathInner::StandardLibraryReal(_) => "std-real",
SearchPathInner::SitePackages(_) => "site-packages",
SearchPathInner::Editable(_) => "editable",
SearchPathInner::StandardLibraryVendored(_) => "std-vendored",
}
}
/// Returns a string suitable for describing what kind of search path this is
/// in user-facing diagnostics.
#[must_use]
pub fn describe_kind(&self) -> &'static str {
match *self.0 {
SearchPathInner::Extra(_) => {
"extra search path specified on the CLI or in your config file"
}
SearchPathInner::FirstParty(_) => "first-party code",
SearchPathInner::StandardLibraryCustom(_) => {
"custom stdlib stubs specified on the CLI or in your config file"
}
SearchPathInner::StandardLibraryReal(_) => "runtime stdlib source code",
SearchPathInner::SitePackages(_) => "site-packages",
SearchPathInner::Editable(_) => "editable install",
SearchPathInner::StandardLibraryVendored(_) => "stdlib typeshed stubs vendored by ty",
}
}
}
impl PartialEq<SystemPath> for SearchPath {
fn eq(&self, other: &SystemPath) -> bool {
self.as_system_path().is_some_and(|path| path == other)
}
}
impl PartialEq<SearchPath> for SystemPath {
fn eq(&self, other: &SearchPath) -> bool {
other.eq(self)
}
}
impl PartialEq<SystemPathBuf> for SearchPath {
fn eq(&self, other: &SystemPathBuf) -> bool {
self.eq(&**other)
}
}
impl PartialEq<SearchPath> for SystemPathBuf {
fn eq(&self, other: &SearchPath) -> bool {
other.eq(self)
}
}
impl PartialEq<VendoredPath> for SearchPath {
fn eq(&self, other: &VendoredPath) -> bool {
self.as_vendored_path().is_some_and(|path| path == other)
}
}
impl PartialEq<SearchPath> for VendoredPath {
fn eq(&self, other: &SearchPath) -> bool {
other.eq(self)
}
}
impl PartialEq<VendoredPathBuf> for SearchPath {
fn eq(&self, other: &VendoredPathBuf) -> bool {
self.eq(&**other)
}
}
impl PartialEq<SearchPath> for VendoredPathBuf {
fn eq(&self, other: &SearchPath) -> bool {
other.eq(self)
}
}
impl fmt::Display for SearchPath {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match &*self.0 {
SearchPathInner::Extra(system_path_buf)
| SearchPathInner::FirstParty(system_path_buf)
| SearchPathInner::SitePackages(system_path_buf)
| SearchPathInner::Editable(system_path_buf)
| SearchPathInner::StandardLibraryReal(system_path_buf)
| SearchPathInner::StandardLibraryCustom(system_path_buf) => system_path_buf.fmt(f),
SearchPathInner::StandardLibraryVendored(vendored_path_buf) => vendored_path_buf.fmt(f),
}
}
}
#[derive(Debug, Clone, Copy)]
pub(super) enum SystemOrVendoredPathRef<'db> {
System(&'db SystemPath),
Vendored(&'db VendoredPath),
}
impl<'db> SystemOrVendoredPathRef<'db> {
pub(super) fn try_from_file(db: &'db dyn Db, file: File) -> Option<Self> {
match file.path(db) {
FilePath::System(system) => Some(Self::System(system)),
FilePath::Vendored(vendored) => Some(Self::Vendored(vendored)),
FilePath::SystemVirtual(_) => None,
}
}
pub(super) fn file_name(&self) -> Option<&str> {
match self {
Self::System(system) => system.file_name(),
Self::Vendored(vendored) => vendored.file_name(),
}
}
pub(super) fn extension(&self) -> Option<&str> {
match self {
Self::System(system) => system.extension(),
Self::Vendored(vendored) => vendored.extension(),
}
}
pub(super) fn parent<'a>(&'a self) -> Option<SystemOrVendoredPathRef<'a>>
where
'a: 'db,
{
match self {
Self::System(system) => system.parent().map(Self::System),
Self::Vendored(vendored) => vendored.parent().map(Self::Vendored),
}
}
fn as_system_path(&self) -> Option<&'db SystemPath> {
match self {
SystemOrVendoredPathRef::System(path) => Some(path),
SystemOrVendoredPathRef::Vendored(_) => None,
}
}
fn as_vendored_path(&self) -> Option<&'db VendoredPath> {
match self {
SystemOrVendoredPathRef::Vendored(path) => Some(path),
SystemOrVendoredPathRef::System(_) => None,
}
}
}
impl<'a> From<&'a SystemPath> for SystemOrVendoredPathRef<'a> {
fn from(path: &'a SystemPath) -> SystemOrVendoredPathRef<'a> {
SystemOrVendoredPathRef::System(path)
}
}
impl<'a> From<&'a VendoredPath> for SystemOrVendoredPathRef<'a> {
fn from(path: &'a VendoredPath) -> SystemOrVendoredPathRef<'a> {
SystemOrVendoredPathRef::Vendored(path)
}
}
impl std::fmt::Display for SystemOrVendoredPathRef<'_> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
SystemOrVendoredPathRef::System(system) => system.fmt(f),
SystemOrVendoredPathRef::Vendored(vendored) => vendored.fmt(f),
}
}
}
#[cfg(test)]
mod tests {
use ruff_db::Db;
use ruff_python_ast::PythonVersion;
use crate::db::tests::TestDb;
use crate::resolve::ModuleResolveMode;
use crate::testing::{FileSpec, MockedTypeshed, TestCase, TestCaseBuilder};
use super::*;
impl ModulePath {
#[must_use]
fn join(&self, component: &str) -> ModulePath {
let mut result = self.clone();
result.push(component);
result
}
}
impl SearchPath {
fn join(&self, component: &str) -> ModulePath {
self.to_module_path().join(component)
}
}
#[test]
fn with_extension_methods() {
let TestCase {
db, src, stdlib, ..
} = TestCaseBuilder::new()
.with_mocked_typeshed(MockedTypeshed::default())
.build();
assert_eq!(
SearchPath::custom_stdlib(db.system(), stdlib.parent().unwrap())
.unwrap()
.to_module_path()
.with_py_extension(),
None
);
assert_eq!(
&SearchPath::custom_stdlib(db.system(), stdlib.parent().unwrap())
.unwrap()
.join("foo")
.with_pyi_extension(),
&stdlib.join("foo.pyi")
);
assert_eq!(
&SearchPath::first_party(db.system(), src.clone())
.unwrap()
.join("foo/bar")
.with_py_extension()
.unwrap(),
&src.join("foo/bar.py")
);
}
#[test]
fn module_name_1_part() {
let TestCase { db, src, .. } = TestCaseBuilder::new().build();
let src_search_path = SearchPath::first_party(db.system(), src).unwrap();
let foo_module_name = ModuleName::new_static("foo").unwrap();
assert_eq!(
src_search_path
.to_module_path()
.join("foo")
.to_module_name()
.as_ref(),
Some(&foo_module_name)
);
assert_eq!(
src_search_path.join("foo.pyi").to_module_name().as_ref(),
Some(&foo_module_name)
);
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | true |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ty_module_resolver/src/lib.rs | crates/ty_module_resolver/src/lib.rs | use std::iter::FusedIterator;
use ruff_db::system::SystemPath;
pub use db::Db;
pub use module::KnownModule;
pub use module::Module;
pub use module_name::{ModuleName, ModuleNameResolutionError};
pub use path::{SearchPath, SearchPathError};
pub use resolve::{
SearchPaths, file_to_module, resolve_module, resolve_module_confident, resolve_real_module,
resolve_real_module_confident, resolve_real_shadowable_module,
};
pub use settings::{MisconfigurationMode, SearchPathSettings, SearchPathSettingsError};
pub use typeshed::{
PyVersionRange, TypeshedVersions, TypeshedVersionsParseError, vendored_typeshed_versions,
};
pub use list::{all_modules, list_modules};
pub use resolve::{ModuleResolveMode, SearchPathIterator, search_paths};
mod db;
mod list;
mod module;
mod module_name;
mod path;
mod resolve;
mod settings;
mod typeshed;
#[cfg(test)]
mod testing;
/// Returns an iterator over all search paths pointing to a system path
pub fn system_module_search_paths(db: &dyn Db) -> SystemModuleSearchPathsIter<'_> {
SystemModuleSearchPathsIter {
// Always run in `StubsAllowed` mode because we want to include as much as possible
// and we don't care about the "real" stdlib
inner: search_paths(db, ModuleResolveMode::StubsAllowed),
}
}
pub struct SystemModuleSearchPathsIter<'db> {
inner: SearchPathIterator<'db>,
}
impl<'db> Iterator for SystemModuleSearchPathsIter<'db> {
type Item = &'db SystemPath;
fn next(&mut self) -> Option<Self::Item> {
loop {
let next = self.inner.next()?;
if let Some(system_path) = next.as_system_path() {
return Some(system_path);
}
}
}
}
impl FusedIterator for SystemModuleSearchPathsIter<'_> {}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ty_module_resolver/src/module_name.rs | crates/ty_module_resolver/src/module_name.rs | use std::fmt;
use std::num::NonZeroU32;
use std::ops::Deref;
use compact_str::{CompactString, ToCompactString};
use ruff_db::files::File;
use ruff_python_ast as ast;
use ruff_python_stdlib::identifiers::is_identifier;
use crate::db::Db;
use crate::resolve::file_to_module;
/// A module name, e.g. `foo.bar`.
///
/// Always normalized to the absolute form (never a relative module name, i.e., never `.foo`).
#[derive(Clone, Debug, Eq, PartialEq, Hash, PartialOrd, Ord, get_size2::GetSize)]
pub struct ModuleName(compact_str::CompactString);
impl ModuleName {
/// Creates a new module name for `name`. Returns `Some` if `name` is a valid, absolute
/// module name and `None` otherwise.
///
/// The module name is invalid if:
///
/// * The name is empty
/// * The name is relative
/// * The name ends with a `.`
/// * The name contains a sequence of multiple dots
/// * A component of a name (the part between two dots) isn't a valid python identifier.
#[inline]
#[must_use]
pub fn new(name: &str) -> Option<Self> {
Self::is_valid_name(name).then(|| Self(CompactString::from(name)))
}
/// Creates a new module name for `name` where `name` is a static string.
/// Returns `Some` if `name` is a valid, absolute module name and `None` otherwise.
///
/// The module name is invalid if:
///
/// * The name is empty
/// * The name is relative
/// * The name ends with a `.`
/// * The name contains a sequence of multiple dots
/// * A component of a name (the part between two dots) isn't a valid python identifier.
///
/// ## Examples
///
/// ```
/// use ty_module_resolver::ModuleName;
///
/// assert_eq!(ModuleName::new_static("foo.bar").as_deref(), Some("foo.bar"));
/// assert_eq!(ModuleName::new_static(""), None);
/// assert_eq!(ModuleName::new_static("..foo"), None);
/// assert_eq!(ModuleName::new_static(".foo"), None);
/// assert_eq!(ModuleName::new_static("foo."), None);
/// assert_eq!(ModuleName::new_static("foo..bar"), None);
/// assert_eq!(ModuleName::new_static("2000"), None);
/// ```
#[inline]
#[must_use]
pub fn new_static(name: &'static str) -> Option<Self> {
Self::is_valid_name(name).then(|| Self(CompactString::const_new(name)))
}
#[must_use]
fn is_valid_name(name: &str) -> bool {
!name.is_empty() && name.split('.').all(is_identifier)
}
/// An iterator over the components of the module name:
///
/// # Examples
///
/// ```
/// use ty_module_resolver::ModuleName;
///
/// assert_eq!(ModuleName::new_static("foo.bar.baz").unwrap().components().collect::<Vec<_>>(), vec!["foo", "bar", "baz"]);
/// ```
#[must_use]
pub fn components(&self) -> impl DoubleEndedIterator<Item = &str> {
self.0.split('.')
}
/// The name of this module's immediate parent, if it has a parent.
///
/// # Examples
///
/// ```
/// use ty_module_resolver::ModuleName;
///
/// assert_eq!(ModuleName::new_static("foo.bar").unwrap().parent(), Some(ModuleName::new_static("foo").unwrap()));
/// assert_eq!(ModuleName::new_static("foo.bar.baz").unwrap().parent(), Some(ModuleName::new_static("foo.bar").unwrap()));
/// assert_eq!(ModuleName::new_static("root").unwrap().parent(), None);
/// ```
#[must_use]
pub fn parent(&self) -> Option<ModuleName> {
let (parent, _) = self.0.rsplit_once('.')?;
Some(Self(parent.to_compact_string()))
}
/// Returns `true` if the name starts with `other`.
///
/// This is equivalent to checking if `self` is a sub-module of `other`.
///
/// # Examples
///
/// ```
/// use ty_module_resolver::ModuleName;
///
/// assert!(ModuleName::new_static("foo.bar").unwrap().starts_with(&ModuleName::new_static("foo").unwrap()));
///
/// assert!(!ModuleName::new_static("foo.bar").unwrap().starts_with(&ModuleName::new_static("bar").unwrap()));
/// assert!(!ModuleName::new_static("foo_bar").unwrap().starts_with(&ModuleName::new_static("foo").unwrap()));
/// ```
#[must_use]
pub fn starts_with(&self, other: &ModuleName) -> bool {
let mut self_components = self.components();
let other_components = other.components();
for other_component in other_components {
if self_components.next() != Some(other_component) {
return false;
}
}
true
}
/// Given a parent module name of this module name, return the relative
/// portion of this module name.
///
/// For example, a parent module name of `importlib` with this module name
/// as `importlib.resources`, this returns `resources`.
///
/// If `parent` isn't a parent name of this module name, then this returns
/// `None`.
///
/// # Examples
///
/// This example shows some cases where `parent` is an actual parent of the
/// module name:
///
/// ```
/// use ty_module_resolver::ModuleName;
///
/// let this = ModuleName::new_static("importlib.resources").unwrap();
/// let parent = ModuleName::new_static("importlib").unwrap();
/// assert_eq!(this.relative_to(&parent), ModuleName::new_static("resources"));
///
/// let this = ModuleName::new_static("foo.bar.baz.quux").unwrap();
/// let parent = ModuleName::new_static("foo.bar").unwrap();
/// assert_eq!(this.relative_to(&parent), ModuleName::new_static("baz.quux"));
/// ```
///
/// This shows some cases where it isn't a parent:
///
/// ```
/// use ty_module_resolver::ModuleName;
///
/// let this = ModuleName::new_static("importliblib.resources").unwrap();
/// let parent = ModuleName::new_static("importlib").unwrap();
/// assert_eq!(this.relative_to(&parent), None);
///
/// let this = ModuleName::new_static("foo.bar.baz.quux").unwrap();
/// let parent = ModuleName::new_static("foo.barbaz").unwrap();
/// assert_eq!(this.relative_to(&parent), None);
///
/// let this = ModuleName::new_static("importlibbbbb.resources").unwrap();
/// let parent = ModuleName::new_static("importlib").unwrap();
/// assert_eq!(this.relative_to(&parent), None);
/// ```
#[must_use]
pub fn relative_to(&self, parent: &ModuleName) -> Option<ModuleName> {
let relative_name = self.0.strip_prefix(&*parent.0)?.strip_prefix('.')?;
// At this point, `relative_name` *has* to be a
// proper suffix of `self`. Otherwise, one of the two
// `strip_prefix` calls above would return `None`.
// (Notably, a valid `ModuleName` cannot end with a `.`.)
assert!(!relative_name.is_empty());
// This must also be true for this implementation to be
// correct. That is, the parent must be a prefix of this
// module name according to the rules of how module name
// components are split up. This could technically trip if
// the implementation of `starts_with` diverges from the
// implementation in this routine. But that seems unlikely.
debug_assert!(self.starts_with(parent));
Some(ModuleName(CompactString::from(relative_name)))
}
#[must_use]
#[inline]
pub fn as_str(&self) -> &str {
&self.0
}
/// Construct a [`ModuleName`] from a sequence of parts.
///
/// # Examples
///
/// ```
/// use ty_module_resolver::ModuleName;
///
/// assert_eq!(&*ModuleName::from_components(["a"]).unwrap(), "a");
/// assert_eq!(&*ModuleName::from_components(["a", "b"]).unwrap(), "a.b");
/// assert_eq!(&*ModuleName::from_components(["a", "b", "c"]).unwrap(), "a.b.c");
///
/// assert_eq!(ModuleName::from_components(["a-b"]), None);
/// assert_eq!(ModuleName::from_components(["a", "a-b"]), None);
/// assert_eq!(ModuleName::from_components(["a", "b", "a-b-c"]), None);
/// ```
#[must_use]
pub fn from_components<'a>(components: impl IntoIterator<Item = &'a str>) -> Option<Self> {
let mut components = components.into_iter();
let first_part = components.next()?;
if !is_identifier(first_part) {
return None;
}
let name = if let Some(second_part) = components.next() {
if !is_identifier(second_part) {
return None;
}
let mut name = format!("{first_part}.{second_part}");
for part in components {
if !is_identifier(part) {
return None;
}
name.push('.');
name.push_str(part);
}
CompactString::from(&name)
} else {
CompactString::from(first_part)
};
Some(Self(name))
}
/// Extend `self` with the components of `other`
///
/// # Examples
///
/// ```
/// use ty_module_resolver::ModuleName;
///
/// let mut module_name = ModuleName::new_static("foo").unwrap();
/// module_name.extend(&ModuleName::new_static("bar").unwrap());
/// assert_eq!(&module_name, "foo.bar");
/// module_name.extend(&ModuleName::new_static("baz.eggs.ham").unwrap());
/// assert_eq!(&module_name, "foo.bar.baz.eggs.ham");
/// ```
pub fn extend(&mut self, other: &ModuleName) {
self.0.push('.');
self.0.push_str(other);
}
/// Returns an iterator of this module name and all of its parent modules.
///
/// # Examples
///
/// ```
/// use ty_module_resolver::ModuleName;
///
/// assert_eq!(
/// ModuleName::new_static("foo.bar.baz").unwrap().ancestors().collect::<Vec<_>>(),
/// vec![
/// ModuleName::new_static("foo.bar.baz").unwrap(),
/// ModuleName::new_static("foo.bar").unwrap(),
/// ModuleName::new_static("foo").unwrap(),
/// ],
/// );
/// ```
pub fn ancestors(&self) -> impl Iterator<Item = Self> {
std::iter::successors(Some(self.clone()), Self::parent)
}
/// Extracts a module name from the AST of a `from <module> import ...`
/// statement.
///
/// `importing_file` must be the [`File`] that contains the import
/// statement.
///
/// This handles relative import statements.
pub fn from_import_statement<'db>(
db: &'db dyn Db,
importing_file: File,
node: &'db ast::StmtImportFrom,
) -> Result<Self, ModuleNameResolutionError> {
let ast::StmtImportFrom {
module,
level,
names: _,
range: _,
node_index: _,
} = node;
Self::from_identifier_parts(db, importing_file, module.as_deref(), *level)
}
/// Computes the absolute module name from the LHS components of `from LHS import RHS`
pub fn from_identifier_parts(
db: &dyn Db,
importing_file: File,
module: Option<&str>,
level: u32,
) -> Result<Self, ModuleNameResolutionError> {
if let Some(level) = NonZeroU32::new(level) {
relative_module_name(db, importing_file, module, level)
} else {
module
.and_then(Self::new)
.ok_or(ModuleNameResolutionError::InvalidSyntax)
}
}
/// Computes the absolute module name for the package this file belongs to.
///
/// i.e. this resolves `.`
pub fn package_for_file(
db: &dyn Db,
importing_file: File,
) -> Result<Self, ModuleNameResolutionError> {
Self::from_identifier_parts(db, importing_file, None, 1)
}
}
impl Deref for ModuleName {
type Target = str;
#[inline]
fn deref(&self) -> &Self::Target {
self.as_str()
}
}
impl PartialEq<str> for ModuleName {
fn eq(&self, other: &str) -> bool {
self.as_str() == other
}
}
impl PartialEq<ModuleName> for str {
fn eq(&self, other: &ModuleName) -> bool {
self == other.as_str()
}
}
impl std::fmt::Display for ModuleName {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> std::fmt::Result {
f.write_str(&self.0)
}
}
/// Given a `from .foo import bar` relative import, resolve the relative module
/// we're importing `bar` from into an absolute [`ModuleName`]
/// using the name of the module we're currently analyzing.
///
/// - `level` is the number of dots at the beginning of the relative module name:
/// - `from .foo.bar import baz` => `level == 1`
/// - `from ...foo.bar import baz` => `level == 3`
/// - `tail` is the relative module name stripped of all leading dots:
/// - `from .foo import bar` => `tail == "foo"`
/// - `from ..foo.bar import baz` => `tail == "foo.bar"`
fn relative_module_name(
db: &dyn Db,
importing_file: File,
tail: Option<&str>,
level: NonZeroU32,
) -> Result<ModuleName, ModuleNameResolutionError> {
let module = file_to_module(db, importing_file)
.ok_or(ModuleNameResolutionError::UnknownCurrentModule)?;
let mut level = level.get();
if module.kind(db).is_package() {
level = level.saturating_sub(1);
}
let mut module_name = module
.name(db)
.ancestors()
.nth(level as usize)
.ok_or(ModuleNameResolutionError::TooManyDots)?;
if let Some(tail) = tail {
let tail = ModuleName::new(tail).ok_or(ModuleNameResolutionError::InvalidSyntax)?;
module_name.extend(&tail);
}
Ok(module_name)
}
/// Various ways in which resolving a [`ModuleName`]
/// from an [`ast::StmtImport`] or [`ast::StmtImportFrom`] node might fail
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
pub enum ModuleNameResolutionError {
/// The import statement has invalid syntax
InvalidSyntax,
/// We couldn't resolve the file we're currently analyzing back to a module
/// (Only necessary for relative import statements)
UnknownCurrentModule,
/// The relative import statement seems to take us outside of the module search path
/// (e.g. our current module is `foo.bar`, and the relative import statement in `foo.bar`
/// is `from ....baz import spam`)
TooManyDots,
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ty_module_resolver/src/module.rs | crates/ty_module_resolver/src/module.rs | use std::fmt::Formatter;
use std::str::FromStr;
use ruff_db::files::{File, system_path_to_file, vendored_path_to_file};
use ruff_db::system::SystemPath;
use ruff_db::vendored::VendoredPath;
use salsa::Database;
use salsa::plumbing::AsId;
use crate::Db;
use crate::module_name::ModuleName;
use crate::path::{SearchPath, SystemOrVendoredPathRef};
/// Representation of a Python module.
#[derive(Clone, Copy, Eq, Hash, PartialEq, salsa::Supertype, salsa::Update)]
pub enum Module<'db> {
File(FileModule<'db>),
Namespace(NamespacePackage<'db>),
}
// The Salsa heap is tracked separately.
impl get_size2::GetSize for Module<'_> {}
#[salsa::tracked]
impl<'db> Module<'db> {
pub(crate) fn file_module(
db: &'db dyn Db,
name: ModuleName,
kind: ModuleKind,
search_path: SearchPath,
file: File,
) -> Self {
let known = KnownModule::try_from_search_path_and_name(&search_path, &name);
Self::File(FileModule::new(db, name, kind, search_path, file, known))
}
pub(crate) fn namespace_package(db: &'db dyn Db, name: ModuleName) -> Self {
Self::Namespace(NamespacePackage::new(db, name))
}
/// The absolute name of the module (e.g. `foo.bar`)
pub fn name(self, db: &'db dyn Database) -> &'db ModuleName {
match self {
Module::File(module) => module.name(db),
Module::Namespace(ref package) => package.name(db),
}
}
/// The file to the source code that defines this module
///
/// This is `None` for namespace packages.
pub fn file(self, db: &'db dyn Database) -> Option<File> {
match self {
Module::File(module) => Some(module.file(db)),
Module::Namespace(_) => None,
}
}
/// Is this a module that we special-case somehow? If so, which one?
pub fn known(self, db: &'db dyn Database) -> Option<KnownModule> {
match self {
Module::File(module) => module.known(db),
Module::Namespace(_) => None,
}
}
/// Does this module represent the given known module?
pub fn is_known(self, db: &'db dyn Database, known_module: KnownModule) -> bool {
self.known(db) == Some(known_module)
}
/// The search path from which the module was resolved.
///
/// It is guaranteed that if `None` is returned, then this is a namespace
/// package. Otherwise, this is a regular package or file module.
pub fn search_path(self, db: &'db dyn Database) -> Option<&'db SearchPath> {
match self {
Module::File(module) => Some(module.search_path(db)),
Module::Namespace(_) => None,
}
}
/// Determine whether this module is a single-file module or a package
pub fn kind(self, db: &'db dyn Database) -> ModuleKind {
match self {
Module::File(module) => module.kind(db),
Module::Namespace(_) => ModuleKind::Package,
}
}
/// Return a list of all submodules of this module.
///
/// Returns an empty list if the module is not a package, if it is an empty package,
/// or if it is a namespace package (one without an `__init__.py` or `__init__.pyi` file).
///
/// The names returned correspond to the "base" name of the module.
/// That is, `{self.name}.{basename}` should give the full module name.
pub fn all_submodules(self, db: &'db dyn Db) -> &'db [Module<'db>] {
all_submodule_names_for_package(db, self)
.as_deref()
.unwrap_or_default()
}
}
impl std::fmt::Debug for Module<'_> {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
salsa::with_attached_database(|db| {
f.debug_struct("Module")
.field("name", &self.name(db))
.field("kind", &self.kind(db))
.field("file", &self.file(db))
.field("search_path", &self.search_path(db))
.field("known", &self.known(db))
.finish()
})
.unwrap_or_else(|| f.debug_tuple("Module").field(&self.as_id()).finish())
}
}
#[allow(clippy::ref_option)]
#[salsa::tracked(returns(ref), heap_size=ruff_memory_usage::heap_size)]
fn all_submodule_names_for_package<'db>(
db: &'db dyn Db,
module: Module<'db>,
) -> Option<Vec<Module<'db>>> {
fn is_submodule(
is_dir: bool,
is_file: bool,
basename: Option<&str>,
extension: Option<&str>,
) -> bool {
is_dir
|| (is_file
&& matches!(extension, Some("py" | "pyi"))
&& !matches!(basename, Some("__init__.py" | "__init__.pyi")))
}
fn find_package_init_system(db: &dyn Db, dir: &SystemPath) -> Option<File> {
system_path_to_file(db, dir.join("__init__.pyi"))
.or_else(|_| system_path_to_file(db, dir.join("__init__.py")))
.ok()
}
fn find_package_init_vendored(db: &dyn Db, dir: &VendoredPath) -> Option<File> {
vendored_path_to_file(db, dir.join("__init__.pyi"))
.or_else(|_| vendored_path_to_file(db, dir.join("__init__.py")))
.ok()
}
// It would be complex and expensive to compute all submodules for
// namespace packages, since a namespace package doesn't correspond
// to a single file; it can span multiple directories across multiple
// search paths. For now, we only compute submodules for traditional
// packages that exist in a single directory on a single search path.
let Module::File(module) = module else {
return None;
};
if !matches!(module.kind(db), ModuleKind::Package) {
return None;
}
let path = SystemOrVendoredPathRef::try_from_file(db, module.file(db))?;
debug_assert!(
matches!(path.file_name(), Some("__init__.py" | "__init__.pyi")),
"expected package file `{:?}` to be `__init__.py` or `__init__.pyi`",
path.file_name(),
);
Some(match path.parent()? {
SystemOrVendoredPathRef::System(parent_directory) => {
// Read the revision on the corresponding file root to
// register an explicit dependency on this directory
// tree. When the revision gets bumped, the cache
// that Salsa creates does for this routine will be
// invalidated.
let root = db.files().expect_root(db, parent_directory);
let _ = root.revision(db);
db.system()
.read_directory(parent_directory)
.inspect_err(|err| {
tracing::debug!(
"Failed to read {parent_directory:?} when looking for \
its possible submodules: {err}"
);
})
.ok()?
.flatten()
.filter(|entry| {
let ty = entry.file_type();
let path = entry.path();
is_submodule(
ty.is_directory(),
ty.is_file(),
path.file_name(),
path.extension(),
)
})
.filter_map(|entry| {
let stem = entry.path().file_stem()?;
let mut name = module.name(db).clone();
name.extend(&ModuleName::new(stem)?);
let (kind, file) = if entry.file_type().is_directory() {
(
ModuleKind::Package,
find_package_init_system(db, entry.path())?,
)
} else {
let file = system_path_to_file(db, entry.path()).ok()?;
(ModuleKind::Module, file)
};
Some(Module::file_module(
db,
name,
kind,
module.search_path(db).clone(),
file,
))
})
.collect()
}
SystemOrVendoredPathRef::Vendored(parent_directory) => db
.vendored()
.read_directory(parent_directory)
.into_iter()
.filter(|entry| {
let ty = entry.file_type();
let path = entry.path();
is_submodule(
ty.is_directory(),
ty.is_file(),
path.file_name(),
path.extension(),
)
})
.filter_map(|entry| {
let stem = entry.path().file_stem()?;
let mut name = module.name(db).clone();
name.extend(&ModuleName::new(stem)?);
let (kind, file) = if entry.file_type().is_directory() {
(
ModuleKind::Package,
find_package_init_vendored(db, entry.path())?,
)
} else {
let file = vendored_path_to_file(db, entry.path()).ok()?;
(ModuleKind::Module, file)
};
Some(Module::file_module(
db,
name,
kind,
module.search_path(db).clone(),
file,
))
})
.collect(),
})
}
/// A module that resolves to a file (`lib.py` or `package/__init__.py`)
#[salsa::interned(debug, heap_size=ruff_memory_usage::heap_size)]
pub struct FileModule<'db> {
#[returns(ref)]
pub(super) name: ModuleName,
pub(super) kind: ModuleKind,
#[returns(ref)]
pub(super) search_path: SearchPath,
pub(super) file: File,
pub(super) known: Option<KnownModule>,
}
/// A namespace package.
///
/// Namespace packages are special because there are
/// multiple possible paths and they have no corresponding code file.
#[salsa::interned(debug, heap_size=ruff_memory_usage::heap_size)]
pub struct NamespacePackage<'db> {
#[returns(ref)]
pub(super) name: ModuleName,
}
#[derive(Copy, Clone, Debug, Eq, PartialEq, Hash, get_size2::GetSize)]
pub enum ModuleKind {
/// A single-file module (e.g. `foo.py` or `foo.pyi`)
Module,
/// A python package (`foo/__init__.py` or `foo/__init__.pyi`)
Package,
}
impl ModuleKind {
pub const fn is_package(self) -> bool {
matches!(self, ModuleKind::Package)
}
pub const fn is_module(self) -> bool {
matches!(self, ModuleKind::Module)
}
}
/// Enumeration of various core stdlib modules in which important types are located
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, strum_macros::EnumString, get_size2::GetSize)]
#[cfg_attr(test, derive(strum_macros::EnumIter))]
#[strum(serialize_all = "snake_case")]
pub enum KnownModule {
Builtins,
Enum,
Types,
#[strum(serialize = "_typeshed")]
Typeshed,
TypingExtensions,
Typing,
Sys,
Os,
Tempfile,
Pathlib,
Abc,
Contextlib,
Dataclasses,
Collections,
Inspect,
#[strum(serialize = "string.templatelib")]
Templatelib,
#[strum(serialize = "_typeshed._type_checker_internals")]
TypeCheckerInternals,
TyExtensions,
#[strum(serialize = "importlib")]
ImportLib,
#[strum(serialize = "unittest.mock")]
UnittestMock,
Uuid,
Warnings,
}
impl KnownModule {
pub const fn as_str(self) -> &'static str {
match self {
Self::Builtins => "builtins",
Self::Enum => "enum",
Self::Types => "types",
Self::Typing => "typing",
Self::Typeshed => "_typeshed",
Self::TypingExtensions => "typing_extensions",
Self::Sys => "sys",
Self::Os => "os",
Self::Tempfile => "tempfile",
Self::Pathlib => "pathlib",
Self::Abc => "abc",
Self::Contextlib => "contextlib",
Self::Dataclasses => "dataclasses",
Self::Collections => "collections",
Self::Inspect => "inspect",
Self::TypeCheckerInternals => "_typeshed._type_checker_internals",
Self::TyExtensions => "ty_extensions",
Self::ImportLib => "importlib",
Self::Warnings => "warnings",
Self::UnittestMock => "unittest.mock",
Self::Uuid => "uuid",
Self::Templatelib => "string.templatelib",
}
}
pub fn name(self) -> ModuleName {
ModuleName::new_static(self.as_str())
.unwrap_or_else(|| panic!("{self} should be a valid module name!"))
}
fn try_from_search_path_and_name(search_path: &SearchPath, name: &ModuleName) -> Option<Self> {
if search_path.is_standard_library() {
Self::from_str(name.as_str()).ok()
} else {
None
}
}
pub const fn is_builtins(self) -> bool {
matches!(self, Self::Builtins)
}
pub const fn is_typing(self) -> bool {
matches!(self, Self::Typing)
}
pub const fn is_ty_extensions(self) -> bool {
matches!(self, Self::TyExtensions)
}
pub const fn is_inspect(self) -> bool {
matches!(self, Self::Inspect)
}
pub const fn is_importlib(self) -> bool {
matches!(self, Self::ImportLib)
}
}
impl std::fmt::Display for KnownModule {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
f.write_str(self.as_str())
}
}
#[cfg(test)]
mod tests {
use super::*;
use strum::IntoEnumIterator;
#[test]
fn known_module_roundtrip_from_str() {
let stdlib_search_path = SearchPath::vendored_stdlib();
for module in KnownModule::iter() {
let module_name = module.name();
assert_eq!(
KnownModule::try_from_search_path_and_name(&stdlib_search_path, &module_name),
Some(module),
"The strum `EnumString` implementation appears to be incorrect for `{module_name}`"
);
}
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ty_module_resolver/src/list.rs | crates/ty_module_resolver/src/list.rs | use std::collections::btree_map::{BTreeMap, Entry};
use ruff_python_ast::PythonVersion;
use crate::db::Db;
use crate::module::{Module, ModuleKind};
use crate::module_name::ModuleName;
use crate::path::{ModulePath, SearchPath, SystemOrVendoredPathRef};
use crate::resolve::{ModuleResolveMode, ResolverContext, resolve_file_module, search_paths};
/// List all available modules, including all sub-modules, sorted in lexicographic order.
pub fn all_modules(db: &dyn Db) -> Vec<Module<'_>> {
let mut modules = list_modules(db);
let mut stack = modules.clone();
while let Some(module) = stack.pop() {
for &submodule in module.all_submodules(db) {
modules.push(submodule);
stack.push(submodule);
}
}
modules.sort_by_key(|module| module.name(db));
modules
}
/// List all available top-level modules.
#[salsa::tracked]
pub fn list_modules(db: &dyn Db) -> Vec<Module<'_>> {
let mut modules = BTreeMap::new();
for search_path in search_paths(db, ModuleResolveMode::StubsAllowed) {
for module in list_modules_in(db, SearchPathIngredient::new(db, search_path.clone())) {
match modules.entry(module.name(db)) {
Entry::Vacant(entry) => {
entry.insert(module);
}
Entry::Occupied(mut entry) => {
// The only case where a module can override
// a module with the same name in a higher
// precedent search path is if the higher
// precedent search path contained a namespace
// package and the lower precedent search path
// contained a "regular" module.
if let (None, Some(_)) = (entry.get().search_path(db), module.search_path(db)) {
entry.insert(module);
}
}
}
}
}
modules.into_values().collect()
}
#[salsa::tracked(debug, heap_size=ruff_memory_usage::heap_size)]
struct SearchPathIngredient<'db> {
#[returns(ref)]
path: SearchPath,
}
/// List all available top-level modules in the given `SearchPath`.
#[salsa::tracked]
fn list_modules_in<'db>(
db: &'db dyn Db,
search_path: SearchPathIngredient<'db>,
) -> Vec<Module<'db>> {
tracing::debug!("Listing modules in search path '{}'", search_path.path(db));
let mut lister = Lister::new(db, search_path.path(db));
match search_path.path(db).as_path() {
SystemOrVendoredPathRef::System(system_search_path) => {
// Read the revision on the corresponding file root to
// register an explicit dependency on this directory. When
// the revision gets bumped, the cache that Salsa creates
// for this routine will be invalidated.
let root = db.files().expect_root(db, system_search_path);
let _ = root.revision(db);
let Ok(it) = db.system().read_directory(system_search_path) else {
return vec![];
};
for result in it {
let Ok(entry) = result else { continue };
lister.add_path(&entry.path().into(), entry.file_type().into());
}
}
SystemOrVendoredPathRef::Vendored(vendored_search_path) => {
for entry in db.vendored().read_directory(vendored_search_path) {
lister.add_path(&entry.path().into(), entry.file_type().into());
}
}
}
lister.into_modules()
}
/// An implementation helper for "list all modules."
///
/// This is responsible for accumulating modules indexed by
/// module name. It also handles precedence by implementing the
/// rules that determine which module gets priority when there is
/// otherwise ambiguity (e.g., `foo.py` versus `foo/__init__.py`
/// in the same directory).
struct Lister<'db> {
db: &'db dyn Db,
search_path: &'db SearchPath,
modules: BTreeMap<&'db ModuleName, Module<'db>>,
}
impl<'db> Lister<'db> {
/// Create new state that can accumulate modules from a list
/// of file paths.
fn new(db: &'db dyn Db, search_path: &'db SearchPath) -> Lister<'db> {
Lister {
db,
search_path,
modules: BTreeMap::new(),
}
}
/// Returns the modules collected, sorted by module name.
fn into_modules(self) -> Vec<Module<'db>> {
self.modules.into_values().collect()
}
/// Add the given `path` as a possible module to this lister. The
/// `file_type` should be the type of `path` (file, directory or
/// symlink).
///
/// This may decide that the given path does not correspond to
/// a valid Python module. In which case, it is dropped and this
/// is a no-op.
///
/// Callers must ensure that the path given came from the same
/// `SearchPath` used to create this `Lister`.
fn add_path(&mut self, path: &SystemOrVendoredPathRef<'_>, file_type: FileType) {
let mut has_py_extension = false;
// We must have no extension, a Python source file extension (`.py`)
// or a Python stub file extension (`.pyi`).
if let Some(ext) = path.extension() {
has_py_extension = is_python_extension(ext);
if !has_py_extension {
return;
}
}
let Some(name) = path.file_name() else { return };
let mut module_path = self.search_path.to_module_path();
module_path.push(name);
let Some(module_name) = module_path.to_module_name() else {
return;
};
// Some modules cannot shadow a subset of special
// modules from the standard library.
if !self.search_path.is_standard_library() && self.is_non_shadowable(&module_name) {
return;
}
if file_type.is_possibly_directory() {
if module_path.is_regular_package(&self.context()) {
module_path.push("__init__");
if let Some(file) = resolve_file_module(&module_path, &self.context()) {
self.add_module(
&module_path,
Module::file_module(
self.db,
module_name,
ModuleKind::Package,
self.search_path.clone(),
file,
),
);
return;
}
module_path.pop();
}
// Otherwise, we kind of have to assume that we have a
// namespace package, which can be any directory that
// *doesn't* contain an `__init__.{py,pyi}`. We do need to
// know if we have a real directory or not. If we have a
// symlink, then this requires hitting the file system.
//
// Note though that if we find a "regular" module in a
// lower priority search path, that will be allowed to
// overwrite this namespace package.
//
// We only do this when in a standard library search
// path, which matches how the "resolve this module"
// implementation works. In particular, typeshed doesn't
// use any namespace packages at time of writing
// (2025-08-08), so if we're in a standard library search
// path, we "know" this can't actually be a package.
//
// NOTE: Note that the
// `module_path.is_regular_package()` check above takes
// `VERSIONS` into consideration. Which means it can return
// `false` even when, say, `package/__init__.py` exists. In
// that case, outside of a standard library search path,
// we'd incorrectly report it here as a namespace package.
// HOWEVER, `VERSIONS` is only applicable for typeshed, so
// this ends up working okay. But if typeshed ever uses
// namespace packages, then this will need to be accounted
// for.
let is_dir =
file_type.is_definitely_directory() || module_path.is_directory(&self.context());
if is_dir {
if !self.search_path.is_standard_library() {
self.add_module(
&module_path,
Module::namespace_package(self.db, module_name),
);
}
return;
}
// At this point, we have a symlink that we know is not a
// directory, so press on as if it were a regular file...
}
// At this point, we're looking for a file module.
// For a file module, we require a `.py` or `.pyi`
// extension.
if !has_py_extension {
return;
}
// We also require stub packages to be packages, not
// single-file modules.
if module_path.is_stub_package() {
return;
}
let Some(file) = module_path.to_file(&self.context()) else {
return;
};
self.add_module(
&module_path,
Module::file_module(
self.db,
module_name,
ModuleKind::Module,
self.search_path.clone(),
file,
),
);
}
/// Adds the given module to the collection.
///
/// If the module had already been added and shouldn't override any
/// existing entry, then this is a no-op. That is, this assumes that the
/// caller looks for modules in search path priority order.
fn add_module(&mut self, path: &ModulePath, module: Module<'db>) {
let mut entry = match self.modules.entry(module.name(self.db)) {
Entry::Vacant(entry) => {
entry.insert(module);
return;
}
Entry::Occupied(entry) => entry,
};
let existing = entry.get();
match (existing.search_path(self.db), module.search_path(self.db)) {
// When we had a namespace package and now try to
// insert a non-namespace package, the latter always
// takes precedent, even if it's in a lower priority
// search path.
(None, Some(_)) => {
entry.insert(module);
}
(Some(_), Some(_)) => {
// Merging across search paths is only necessary for
// namespace packages. For all other modules, entries
// from earlier search paths take precedence. Thus, all
// of the cases below require that we're in the same
// directory. ... Which is true here, because a `Lister`
// only works for one specific search path.
// When we have a `foo/__init__.py` and a `foo.py` in
// the same directory, the former takes precedent.
// (This case can only occur when both have a search
// path.)
if existing.kind(self.db) == ModuleKind::Module
&& module.kind(self.db) == ModuleKind::Package
{
entry.insert(module);
return;
}
// Or if we have two file modules and the new one
// is a stub, then the stub takes priority.
if existing.kind(self.db) == ModuleKind::Module
&& module.kind(self.db) == ModuleKind::Module
&& path.is_stub_file()
{
entry.insert(module);
return;
}
// Or... if we have a stub package, the stub package
// always gets priority.
if path.is_stub_package() {
entry.insert(module);
}
}
_ => {}
}
}
/// Returns true if the given module name cannot be shadowable.
fn is_non_shadowable(&self, name: &ModuleName) -> bool {
ModuleResolveMode::StubsAllowed
.is_non_shadowable(self.python_version().minor, name.as_str())
}
/// Returns the Python version we want to perform module resolution
/// with.
fn python_version(&self) -> PythonVersion {
self.db.python_version()
}
/// Constructs a resolver context for use with some APIs that require it.
fn context(&self) -> ResolverContext<'db> {
ResolverContext {
db: self.db,
python_version: self.python_version(),
// We don't currently support listing modules
// in a "no stubs allowed" mode.
mode: ModuleResolveMode::StubsAllowed,
}
}
}
/// The type of a file.
#[derive(Clone, Copy, Debug)]
enum FileType {
File,
Directory,
Symlink,
}
impl FileType {
fn is_possibly_directory(self) -> bool {
matches!(self, FileType::Directory | FileType::Symlink)
}
fn is_definitely_directory(self) -> bool {
matches!(self, FileType::Directory)
}
}
impl From<ruff_db::vendored::FileType> for FileType {
fn from(ft: ruff_db::vendored::FileType) -> FileType {
match ft {
ruff_db::vendored::FileType::File => FileType::File,
ruff_db::vendored::FileType::Directory => FileType::Directory,
}
}
}
impl From<ruff_db::system::FileType> for FileType {
fn from(ft: ruff_db::system::FileType) -> FileType {
match ft {
ruff_db::system::FileType::File => FileType::File,
ruff_db::system::FileType::Directory => FileType::Directory,
ruff_db::system::FileType::Symlink => FileType::Symlink,
}
}
}
/// Returns true if and only if the given file extension corresponds
/// to a Python source or stub file.
fn is_python_extension(ext: &str) -> bool {
matches!(ext, "py" | "pyi")
}
#[cfg(test)]
mod tests {
#![expect(
clippy::disallowed_methods,
reason = "These are tests, so it's fine to do I/O by-passing System."
)]
use camino::{Utf8Component, Utf8Path};
use ruff_db::Db as _;
use ruff_db::files::{File, FilePath, FileRootKind};
use ruff_db::system::{DbWithTestSystem, DbWithWritableSystem, SystemPath, SystemPathBuf};
use ruff_db::testing::assert_function_query_was_not_run;
use ruff_python_ast::PythonVersion;
use crate::db::{Db, tests::TestDb};
use crate::module::Module;
use crate::resolve::{
ModuleResolveMode, ModuleResolveModeIngredient, dynamic_resolution_paths,
};
use crate::settings::SearchPathSettings;
use crate::testing::{FileSpec, MockedTypeshed, TestCase, TestCaseBuilder};
use super::list_modules;
struct ModuleDebugSnapshot<'db> {
db: &'db dyn Db,
module: Module<'db>,
}
impl std::fmt::Debug for ModuleDebugSnapshot<'_> {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
match self.module {
Module::Namespace(pkg) => {
write!(f, "Module::Namespace({name:?})", name = pkg.name(self.db))
}
Module::File(module) => {
// For snapshots, just normalize all paths to using
// Unix slashes for simplicity.
let path_components = match module.file(self.db).path(self.db) {
FilePath::System(path) => path.as_path().components(),
FilePath::Vendored(path) => path.as_path().components(),
FilePath::SystemVirtual(path) => Utf8Path::new(path.as_str()).components(),
};
let nice_path = path_components
// Avoid including a root component, since that
// results in a platform dependent separator.
// Convert to an empty string so that we get a
// path beginning with `/` regardless of platform.
.map(|component| {
if let Utf8Component::RootDir = component {
Utf8Component::Normal("")
} else {
component
}
})
.map(|component| component.as_str())
.collect::<Vec<&str>>()
.join("/");
write!(
f,
"Module::File({name:?}, {search_path:?}, {path:?}, {kind:?}, {known:?})",
name = module.name(self.db).as_str(),
search_path = module.search_path(self.db).debug_kind(),
path = nice_path,
kind = module.kind(self.db),
known = module.known(self.db),
)
}
}
}
}
fn sorted_list(db: &dyn Db) -> Vec<Module<'_>> {
let mut modules = list_modules(db);
modules.sort_by(|m1, m2| m1.name(db).cmp(m2.name(db)));
modules
}
fn list_snapshot(db: &dyn Db) -> Vec<ModuleDebugSnapshot<'_>> {
list_snapshot_filter(db, |_| true)
}
fn list_snapshot_filter<'db>(
db: &'db dyn Db,
predicate: impl Fn(&Module<'db>) -> bool,
) -> Vec<ModuleDebugSnapshot<'db>> {
sorted_list(db)
.into_iter()
.filter(predicate)
.map(|module| ModuleDebugSnapshot { db, module })
.collect()
}
#[test]
fn first_party_module() {
let TestCase { db, .. } = TestCaseBuilder::new()
.with_src_files(&[("foo.py", "")])
.build();
insta::assert_debug_snapshot!(
list_snapshot(&db),
@r#"
[
Module::File("foo", "first-party", "/src/foo.py", Module, None),
]
"#,
);
}
#[test]
fn stubs_over_module_source() {
let TestCase { db, .. } = TestCaseBuilder::new()
.with_src_files(&[("foo.py", ""), ("foo.pyi", "")])
.build();
insta::assert_debug_snapshot!(
list_snapshot(&db),
@r#"
[
Module::File("foo", "first-party", "/src/foo.pyi", Module, None),
]
"#,
);
}
#[test]
fn stubs_over_package_source() {
let TestCase { db, .. } = TestCaseBuilder::new()
.with_src_files(&[("foo/__init__.py", ""), ("foo.pyi", "")])
.build();
// NOTE: This matches the behavior of the "resolve this module"
// implementation, even though it seems inconsistent with the
// `stubs_over_module_source` test.
//
// TODO: Check what other type checkers do. It seems like this (and
// "resolve this module") should prefer the stub file, although the
// typing spec isn't perfectly clear on this point:
// https://typing.python.org/en/latest/spec/distributing.html#stub-files
insta::assert_debug_snapshot!(
list_snapshot(&db),
@r#"
[
Module::File("foo", "first-party", "/src/foo/__init__.py", Package, None),
]
"#,
);
}
/// Tests that if we have a `foo.py` and a `foo/__init__.py`, then the
/// latter takes precedence.
///
/// This is somewhat difficult to test using the in-memory file system,
/// since it always returns directory entries in lexicographic order. This
/// in turn implies that `foo` will always appear before `foo.py`. But to
/// truly test this, we would like to also be correct in the case where
/// `foo.py` appears before `foo` (which can certainly happen in the real
/// world).
#[test]
fn package_over_module1() {
let TestCase { db, .. } = TestCaseBuilder::new()
.with_src_files(&[("foo.py", ""), ("foo/__init__.py", "")])
.build();
insta::assert_debug_snapshot!(
list_snapshot(&db),
@r#"
[
Module::File("foo", "first-party", "/src/foo/__init__.py", Package, None),
]
"#,
);
}
/// Similar to `package_over_module1`, but flips the order of files.
///
/// (At time of writing, 2025-08-07, this doesn't actually make a
/// difference since the in-memory file system sorts directory entries.)
#[test]
fn package_over_module2() {
let TestCase { db, .. } = TestCaseBuilder::new()
.with_src_files(&[("foo/__init__.py", ""), ("foo.py", "")])
.build();
insta::assert_debug_snapshot!(
list_snapshot(&db),
@r#"
[
Module::File("foo", "first-party", "/src/foo/__init__.py", Package, None),
]
"#,
);
}
#[test]
fn builtins_vendored() {
let TestCase { db, .. } = TestCaseBuilder::new()
.with_vendored_typeshed()
.with_src_files(&[("builtins.py", "FOOOO = 42")])
.build();
insta::assert_debug_snapshot!(
list_snapshot_filter(&db, |m| m.name(&db).as_str() == "builtins"),
@r#"
[
Module::File("builtins", "std-vendored", "stdlib/builtins.pyi", Module, Some(Builtins)),
]
"#,
);
}
#[test]
fn builtins_custom() {
const TYPESHED: MockedTypeshed = MockedTypeshed {
stdlib_files: &[("builtins.pyi", "def min(a, b): ...")],
versions: "builtins: 3.8-",
};
const SRC: &[FileSpec] = &[("builtins.py", "FOOOO = 42")];
let TestCase { db, .. } = TestCaseBuilder::new()
.with_src_files(SRC)
.with_mocked_typeshed(TYPESHED)
.with_python_version(PythonVersion::PY38)
.build();
insta::assert_debug_snapshot!(
list_snapshot(&db),
@r#"
[
Module::File("builtins", "std-custom", "/typeshed/stdlib/builtins.pyi", Module, Some(Builtins)),
]
"#,
);
}
#[test]
fn stdlib() {
const TYPESHED: MockedTypeshed = MockedTypeshed {
stdlib_files: &[("functools.pyi", "def update_wrapper(): ...")],
versions: "functools: 3.8-",
};
let TestCase { db, .. } = TestCaseBuilder::new()
.with_mocked_typeshed(TYPESHED)
.with_python_version(PythonVersion::PY38)
.build();
insta::assert_debug_snapshot!(
list_snapshot(&db),
@r#"
[
Module::File("functools", "std-custom", "/typeshed/stdlib/functools.pyi", Module, None),
]
"#,
);
}
#[test]
fn stdlib_resolution_respects_versions_file_py38_existing_modules() {
const VERSIONS: &str = "\
asyncio: 3.8- # 'Regular' package on py38+
asyncio.tasks: 3.9-3.11 # Submodule on py39+ only
functools: 3.8- # Top-level single-file module
random: 3.8- # 'Regular' file module on py38+
xml: 3.8-3.8 # Namespace package on py38 only
";
const STDLIB: &[FileSpec] = &[
("asyncio/__init__.pyi", ""),
("asyncio/tasks.pyi", ""),
("functools.pyi", ""),
("random.pyi", ""),
("xml/etree.pyi", ""),
];
const TYPESHED: MockedTypeshed = MockedTypeshed {
stdlib_files: STDLIB,
versions: VERSIONS,
};
let TestCase { db, .. } = TestCaseBuilder::new()
.with_mocked_typeshed(TYPESHED)
.with_python_version(PythonVersion::PY38)
.build();
// NOTE: This currently doesn't return `xml` since
// the implementation assumes that typeshed doesn't
// have namespace packages. But our test setup (copied
// from the "resolve this module" tests) does.
insta::assert_debug_snapshot!(
list_snapshot(&db),
@r#"
[
Module::File("asyncio", "std-custom", "/typeshed/stdlib/asyncio/__init__.pyi", Package, None),
Module::File("functools", "std-custom", "/typeshed/stdlib/functools.pyi", Module, None),
Module::File("random", "std-custom", "/typeshed/stdlib/random.pyi", Module, None),
]
"#,
);
}
#[test]
fn stdlib_resolution_respects_versions_file_py38_nonexisting_modules() {
const VERSIONS: &str = "\
asyncio: 3.8- # 'Regular' package on py38+
asyncio.tasks: 3.9-3.11 # Submodule on py39+ only
collections: 3.9- # 'Regular' package on py39+
importlib: 3.9- # Namespace package on py39+
random: 3.9- # 'Regular' file module on py39+
xml: 3.8-3.8 # Namespace package on 3.8 only
foo: 3.9-
";
const STDLIB: &[FileSpec] = &[
("collections/__init__.pyi", ""),
("asyncio/__init__.pyi", ""),
("asyncio/tasks.pyi", ""),
("importlib/abc.pyi", ""),
("random.pyi", ""),
("xml/etree.pyi", ""),
];
const TYPESHED: MockedTypeshed = MockedTypeshed {
stdlib_files: STDLIB,
versions: VERSIONS,
};
let TestCase { db, .. } = TestCaseBuilder::new()
.with_mocked_typeshed(TYPESHED)
.with_python_version(PythonVersion::PY38)
.build();
// NOTE: This currently doesn't return any of the namespace
// packages defined above in our mock typeshed (that is,
// `importlib` and `xml`) because our implementation assumes
// namespace packages cannot occur in typeshed.
//
// Relatedly, `collections` and `random` should not appear
// because they are limited to 3.9+.
insta::assert_debug_snapshot!(
list_snapshot(&db),
@r#"
[
Module::File("asyncio", "std-custom", "/typeshed/stdlib/asyncio/__init__.pyi", Package, None),
]
"#,
);
}
#[test]
fn stdlib_resolution_respects_versions_file_py39_existing_modules() {
const VERSIONS: &str = "\
asyncio: 3.8- # 'Regular' package on py38+
asyncio.tasks: 3.9-3.11 # Submodule on py39+ only
collections: 3.9- # 'Regular' package on py39+
functools: 3.8- # Top-level single-file module
importlib: 3.9- # Namespace package on py39+
";
const STDLIB: &[FileSpec] = &[
("asyncio/__init__.pyi", ""),
("asyncio/tasks.pyi", ""),
("collections/__init__.pyi", ""),
("functools.pyi", ""),
("importlib/abc.pyi", ""),
];
const TYPESHED: MockedTypeshed = MockedTypeshed {
stdlib_files: STDLIB,
versions: VERSIONS,
};
let TestCase { db, .. } = TestCaseBuilder::new()
.with_mocked_typeshed(TYPESHED)
.with_python_version(PythonVersion::PY39)
.build();
// NOTE: This currently doesn't return any of the namespace
// packages defined above in our mock typeshed (that is,
// `importlib`) because our implementation assumes namespace
// packages cannot occur in typeshed.
insta::assert_debug_snapshot!(
list_snapshot(&db),
@r#"
[
Module::File("asyncio", "std-custom", "/typeshed/stdlib/asyncio/__init__.pyi", Package, None),
Module::File("collections", "std-custom", "/typeshed/stdlib/collections/__init__.pyi", Package, Some(Collections)),
Module::File("functools", "std-custom", "/typeshed/stdlib/functools.pyi", Module, None),
]
"#,
);
}
#[test]
fn stdlib_resolution_respects_versions_file_py39_nonexisting_modules() {
const VERSIONS: &str = "\
importlib: 3.9- # 'Regular' package on py39+
xml: 3.8-3.8 # 'Regular' package on 3.8 only
";
// Since our implementation assumes typeshed doesn't contain
// any namespace packages (as an optimization), this test case
// is modified from the corresponding test in the "resolve a
// file" implementation so that both namespace packages are
// just regular packages. ---AG
const STDLIB: &[FileSpec] = &[
("importlib/__init__.pyi", ""),
("importlib/abc.pyi", ""),
("xml/__init__.pyi", ""),
("xml/etree.pyi", ""),
];
const TYPESHED: MockedTypeshed = MockedTypeshed {
stdlib_files: STDLIB,
versions: VERSIONS,
};
let TestCase { db, .. } = TestCaseBuilder::new()
.with_mocked_typeshed(TYPESHED)
.with_python_version(PythonVersion::PY39)
.build();
insta::assert_debug_snapshot!(
list_snapshot(&db),
@r#"
[
Module::File("importlib", "std-custom", "/typeshed/stdlib/importlib/__init__.pyi", Package, Some(ImportLib)),
]
"#,
);
}
#[test]
fn first_party_precedence_over_stdlib() {
const SRC: &[FileSpec] = &[("functools.py", "def update_wrapper(): ...")];
const TYPESHED: MockedTypeshed = MockedTypeshed {
stdlib_files: &[("functools.pyi", "def update_wrapper(): ...")],
versions: "functools: 3.8-",
};
let TestCase { db, .. } = TestCaseBuilder::new()
.with_src_files(SRC)
.with_mocked_typeshed(TYPESHED)
.with_python_version(PythonVersion::PY38)
.build();
insta::assert_debug_snapshot!(
list_snapshot(&db),
@r#"
[
Module::File("functools", "first-party", "/src/functools.py", Module, None),
]
"#,
);
}
#[test]
fn stdlib_uses_vendored_typeshed_when_no_custom_typeshed_supplied() {
let TestCase { db, .. } = TestCaseBuilder::new().with_vendored_typeshed().build();
insta::assert_debug_snapshot!(
list_snapshot_filter(&db, |m| m.name(&db).as_str().contains("pydoc_data")),
@r#"
[
Module::File("pydoc_data", "std-vendored", "stdlib/pydoc_data/__init__.pyi", Package, None),
]
"#,
);
}
#[test]
fn resolve_package() {
let TestCase { db, .. } = TestCaseBuilder::new()
.with_src_files(&[("foo/__init__.py", "print('Hello, world!'")])
.build();
insta::assert_debug_snapshot!(
list_snapshot(&db),
@r#"
[
Module::File("foo", "first-party", "/src/foo/__init__.py", Package, None),
]
"#,
);
}
#[test]
fn package_priority_over_module() {
const SRC: &[FileSpec] = &[
("foo/__init__.py", "print('Hello, world!')"),
("foo.py", "print('Hello, world!')"),
];
let TestCase { db, .. } = TestCaseBuilder::new().with_src_files(SRC).build();
insta::assert_debug_snapshot!(
list_snapshot(&db),
@r#"
[
Module::File("foo", "first-party", "/src/foo/__init__.py", Package, None),
]
"#,
);
}
#[test]
fn typing_stub_over_module() {
const SRC: &[FileSpec] = &[("foo.py", "print('Hello, world!')"), ("foo.pyi", "x: int")];
let TestCase { db, .. } = TestCaseBuilder::new().with_src_files(SRC).build();
insta::assert_debug_snapshot!(
list_snapshot(&db),
@r#"
[
Module::File("foo", "first-party", "/src/foo.pyi", Module, None),
]
"#,
);
}
#[test]
fn sub_packages() {
const SRC: &[FileSpec] = &[
("foo/__init__.py", ""),
("foo/bar/__init__.py", ""),
("foo/bar/baz.py", "print('Hello, world!)'"),
];
let TestCase { db, .. } = TestCaseBuilder::new().with_src_files(SRC).build();
insta::assert_debug_snapshot!(
list_snapshot(&db),
@r#"
[
Module::File("foo", "first-party", "/src/foo/__init__.py", Package, None),
]
"#,
);
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | true |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ty_module_resolver/src/testing.rs | crates/ty_module_resolver/src/testing.rs | use ruff_db::Db as _;
use ruff_db::files::FileRootKind;
use ruff_db::system::{
DbWithTestSystem as _, DbWithWritableSystem as _, SystemPath, SystemPathBuf,
};
use ruff_db::vendored::VendoredPathBuf;
use ruff_python_ast::PythonVersion;
use crate::db::tests::TestDb;
use crate::settings::SearchPathSettings;
/// A test case for the module resolver.
///
/// You generally shouldn't construct instances of this struct directly;
/// instead, use the [`TestCaseBuilder`].
pub(crate) struct TestCase<T> {
pub(crate) db: TestDb,
pub(crate) src: SystemPathBuf,
pub(crate) stdlib: T,
// Most test cases only ever need a single `site-packages` directory,
// so this is a single directory instead of a `Vec` of directories,
// like it is in `SearchPaths`.
pub(crate) site_packages: SystemPathBuf,
pub(crate) python_version: PythonVersion,
}
/// A `(file_name, file_contents)` tuple
pub(crate) type FileSpec = (&'static str, &'static str);
/// Specification for a typeshed mock to be created as part of a test
#[derive(Debug, Clone, Copy, Default)]
pub(crate) struct MockedTypeshed {
/// The stdlib files to be created in the typeshed mock
pub(crate) stdlib_files: &'static [FileSpec],
/// The contents of the `stdlib/VERSIONS` file
/// to be created in the typeshed mock
pub(crate) versions: &'static str,
}
#[derive(Debug)]
pub(crate) struct VendoredTypeshed;
#[derive(Debug)]
pub(crate) struct UnspecifiedTypeshed;
/// A builder for a module-resolver test case.
///
/// The builder takes care of creating a [`TestDb`]
/// instance, applying the module resolver settings,
/// and creating mock directories for the stdlib, `site-packages`,
/// first-party code, etc.
///
/// For simple tests that do not involve typeshed,
/// test cases can be created as follows:
///
/// ```rs
/// let test_case = TestCaseBuilder::new()
/// .with_src_files(...)
/// .build();
///
/// let test_case2 = TestCaseBuilder::new()
/// .with_site_packages_files(...)
/// .build();
/// ```
///
/// Any tests can specify the target Python version that should be used
/// in the module resolver settings:
///
/// ```rs
/// let test_case = TestCaseBuilder::new()
/// .with_src_files(...)
/// .with_python_version(...)
/// .build();
/// ```
///
/// For tests checking that standard-library module resolution is working
/// correctly, you should usually create a [`MockedTypeshed`] instance
/// and pass it to the [`TestCaseBuilder::with_mocked_typeshed`] method.
/// If you need to check something that involves the vendored typeshed stubs
/// we include as part of the binary, you can instead use the
/// [`TestCaseBuilder::with_vendored_typeshed`] method.
/// For either of these, you should almost always try to be explicit
/// about the Python version you want to be specified in the module-resolver
/// settings for the test:
///
/// ```rs
/// const TYPESHED = MockedTypeshed { ... };
///
/// let test_case = resolver_test_case()
/// .with_mocked_typeshed(TYPESHED)
/// .with_python_version(...)
/// .build();
///
/// let test_case2 = resolver_test_case()
/// .with_vendored_typeshed()
/// .with_python_version(...)
/// .build();
/// ```
///
/// If you have not called one of those options, the `stdlib` field
/// on the [`TestCase`] instance created from `.build()` will be set
/// to `()`.
pub(crate) struct TestCaseBuilder<T> {
typeshed_option: T,
python_version: PythonVersion,
first_party_files: Vec<FileSpec>,
site_packages_files: Vec<FileSpec>,
// Additional file roots (beyond site_packages, src and stdlib)
// that should be registered with the `Db` abstraction.
//
// This is necessary to make testing "list modules" work. Namely,
// "list modules" relies on caching via a file root's revision,
// and if file roots aren't registered, then the implementation
// can't access the root's revision.
roots: Vec<SystemPathBuf>,
}
impl<T> TestCaseBuilder<T> {
/// Specify files to be created in the `src` mock directory
pub(crate) fn with_src_files(mut self, files: &[FileSpec]) -> Self {
self.first_party_files.extend(files.iter().copied());
self
}
/// Specify files to be created in the `site-packages` mock directory
pub(crate) fn with_site_packages_files(mut self, files: &[FileSpec]) -> Self {
self.site_packages_files.extend(files.iter().copied());
self
}
/// Specify the Python version the module resolver should assume
pub(crate) fn with_python_version(mut self, python_version: PythonVersion) -> Self {
self.python_version = python_version;
self
}
/// Add a "library" root to this test case.
pub(crate) fn with_library_root(mut self, root: impl AsRef<SystemPath>) -> Self {
self.roots.push(root.as_ref().to_path_buf());
self
}
fn write_mock_directory(
db: &mut TestDb,
location: impl AsRef<SystemPath>,
files: impl IntoIterator<Item = FileSpec>,
) -> SystemPathBuf {
let root = location.as_ref().to_path_buf();
// Make sure to create the directory even if the list of files is empty:
db.memory_file_system().create_directory_all(&root).unwrap();
db.write_files(
files
.into_iter()
.map(|(relative_path, contents)| (root.join(relative_path), contents)),
)
.unwrap();
root
}
}
impl TestCaseBuilder<UnspecifiedTypeshed> {
pub(crate) fn new() -> TestCaseBuilder<UnspecifiedTypeshed> {
Self {
typeshed_option: UnspecifiedTypeshed,
python_version: PythonVersion::default(),
first_party_files: vec![],
site_packages_files: vec![],
roots: vec![],
}
}
/// Use the vendored stdlib stubs included in the Ruff binary for this test case
pub(crate) fn with_vendored_typeshed(self) -> TestCaseBuilder<VendoredTypeshed> {
let TestCaseBuilder {
typeshed_option: _,
python_version,
first_party_files,
site_packages_files,
roots,
} = self;
TestCaseBuilder {
typeshed_option: VendoredTypeshed,
python_version,
first_party_files,
site_packages_files,
roots,
}
}
/// Use a mock typeshed directory for this test case
pub(crate) fn with_mocked_typeshed(
self,
typeshed: MockedTypeshed,
) -> TestCaseBuilder<MockedTypeshed> {
let TestCaseBuilder {
typeshed_option: _,
python_version,
first_party_files,
site_packages_files,
roots,
} = self;
TestCaseBuilder {
typeshed_option: typeshed,
python_version,
first_party_files,
site_packages_files,
roots,
}
}
pub(crate) fn build(self) -> TestCase<()> {
let TestCase {
db,
src,
stdlib: _,
site_packages,
python_version,
} = self.with_mocked_typeshed(MockedTypeshed::default()).build();
TestCase {
db,
src,
stdlib: (),
site_packages,
python_version,
}
}
}
impl TestCaseBuilder<MockedTypeshed> {
pub(crate) fn build(self) -> TestCase<SystemPathBuf> {
let TestCaseBuilder {
typeshed_option,
python_version,
first_party_files,
site_packages_files,
roots,
} = self;
let mut db = TestDb::new().with_python_version(python_version);
let site_packages =
Self::write_mock_directory(&mut db, "/site-packages", site_packages_files);
let src = Self::write_mock_directory(&mut db, "/src", first_party_files);
let typeshed = Self::build_typeshed_mock(&mut db, &typeshed_option);
let stdlib = typeshed.join("stdlib");
let search_paths = SearchPathSettings {
src_roots: vec![src.clone()],
custom_typeshed: Some(typeshed),
site_packages_paths: vec![site_packages.clone()],
..SearchPathSettings::empty()
}
.to_search_paths(db.system(), db.vendored())
.expect("Valid search path settings");
db = db.with_search_paths(search_paths);
// This root is needed for correct Salsa tracking.
// Namely, a `SearchPath` is treated as an input, and
// thus the revision number must be bumped accordingly
// when the directory tree changes. We rely on detecting
// this revision from the file root. If we don't add them
// here, they won't get added.
//
// Roots for other search paths are added as part of
// search path initialization in `SearchPaths::from_settings`,
// and any remaining are added below.
db.files()
.try_add_root(&db, SystemPath::new("/src"), FileRootKind::Project);
db.files()
.try_add_root(&db, &stdlib, FileRootKind::LibrarySearchPath);
for root in &roots {
db.files()
.try_add_root(&db, root, FileRootKind::LibrarySearchPath);
}
TestCase {
db,
src,
stdlib,
site_packages,
python_version,
}
}
fn build_typeshed_mock(db: &mut TestDb, typeshed_to_build: &MockedTypeshed) -> SystemPathBuf {
let typeshed = SystemPathBuf::from("/typeshed");
let MockedTypeshed {
stdlib_files,
versions,
} = typeshed_to_build;
Self::write_mock_directory(
db,
typeshed.join("stdlib"),
stdlib_files
.iter()
.copied()
.chain(std::iter::once(("VERSIONS", *versions))),
);
typeshed
}
}
impl TestCaseBuilder<VendoredTypeshed> {
pub(crate) fn build(self) -> TestCase<VendoredPathBuf> {
let TestCaseBuilder {
typeshed_option: VendoredTypeshed,
python_version,
first_party_files,
site_packages_files,
roots,
} = self;
let mut db = TestDb::new().with_python_version(python_version);
let site_packages =
Self::write_mock_directory(&mut db, "/site-packages", site_packages_files);
let src = Self::write_mock_directory(&mut db, "/src", first_party_files);
let search_paths = SearchPathSettings {
src_roots: vec![src.clone()],
site_packages_paths: vec![site_packages.clone()],
..SearchPathSettings::empty()
}
.to_search_paths(db.system(), db.vendored())
.expect("Valid search path settings");
db = db.with_search_paths(search_paths);
db.files()
.try_add_root(&db, SystemPath::new("/src"), FileRootKind::Project);
for root in &roots {
db.files()
.try_add_root(&db, root, FileRootKind::LibrarySearchPath);
}
TestCase {
db,
src,
stdlib: VendoredPathBuf::from("stdlib"),
site_packages,
python_version,
}
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ty_module_resolver/src/resolve.rs | crates/ty_module_resolver/src/resolve.rs | /*!
This module principally provides several routines for resolving a particular module
name to a `Module`:
* [`file_to_module`][]: resolves the module `.<self>` (often as the first step in resolving `.`)
* [`resolve_module`][]: resolves an absolute module name
You may notice that we actually provide `resolve_(real)_(shadowable)_module_(confident)`.
You almost certainly just want [`resolve_module`][]. The other variations represent
restrictions to answer specific kinds of questions, usually to empower IDE features.
* The `real` variation disallows all stub files, including the vendored typeshed.
This enables the goto-definition ("real") vs goto-declaration ("stub or real") distinction.
* The `confident` variation disallows "desperate resolution", which is a fallback
mode where we start trying to use ancestor directories of the importing file
as search-paths, but only if we failed to resolve it with the normal search-paths.
This is mostly just a convenience for cases where we don't want to try to define
the importing file (resolving a `KnownModule` and tests).
* The `shadowable` variation disables some guards that prevents third-party code
from shadowing any vendored non-stdlib `KnownModule`. In particular `typing_extensions`,
which we vendor and heavily assume the contents of (and so don't ever want to shadow).
This enables checking if the user *actually* has `typing_extensions` installed,
in which case it's ok to suggest it in features like auto-imports.
There is some awkwardness to the structure of the code to specifically enable caching
of queries, as module resolution happens a lot and involves a lot of disk access.
For implementors, see `import-resolution-diagram.svg` for a flow diagram that
specifies ty's implementation of Python's import resolution algorithm.
*/
use std::borrow::Cow;
use std::fmt;
use std::iter::FusedIterator;
use std::str::Split;
use compact_str::format_compact;
use rustc_hash::{FxBuildHasher, FxHashSet};
use ruff_db::files::{File, FilePath, FileRootKind};
use ruff_db::system::{DirectoryEntry, System, SystemPath, SystemPathBuf};
use ruff_db::vendored::VendoredFileSystem;
use ruff_python_ast::{
self as ast, PySourceType, PythonVersion,
visitor::{Visitor, walk_body},
};
use crate::db::Db;
use crate::module::{Module, ModuleKind};
use crate::module_name::ModuleName;
use crate::path::{ModulePath, SearchPath, SystemOrVendoredPathRef};
use crate::typeshed::{TypeshedVersions, vendored_typeshed_versions};
use crate::{MisconfigurationMode, SearchPathSettings, SearchPathSettingsError};
/// Resolves a module name to a module.
pub fn resolve_module<'db>(
db: &'db dyn Db,
importing_file: File,
module_name: &ModuleName,
) -> Option<Module<'db>> {
let interned_name = ModuleNameIngredient::new(db, module_name, ModuleResolveMode::StubsAllowed);
resolve_module_query(db, interned_name)
.or_else(|| desperately_resolve_module(db, importing_file, interned_name))
}
/// Resolves a module name to a module, without desperate resolution available.
///
/// This is appropriate for resolving a `KnownModule`, or cases where for whatever reason
/// we don't have a well-defined importing file.
pub fn resolve_module_confident<'db>(
db: &'db dyn Db,
module_name: &ModuleName,
) -> Option<Module<'db>> {
let interned_name = ModuleNameIngredient::new(db, module_name, ModuleResolveMode::StubsAllowed);
resolve_module_query(db, interned_name)
}
/// Resolves a module name to a module (stubs not allowed).
pub fn resolve_real_module<'db>(
db: &'db dyn Db,
importing_file: File,
module_name: &ModuleName,
) -> Option<Module<'db>> {
let interned_name =
ModuleNameIngredient::new(db, module_name, ModuleResolveMode::StubsNotAllowed);
resolve_module_query(db, interned_name)
.or_else(|| desperately_resolve_module(db, importing_file, interned_name))
}
/// Resolves a module name to a module, without desperate resolution available (stubs not allowed).
///
/// This is appropriate for resolving a `KnownModule`, or cases where for whatever reason
/// we don't have a well-defined importing file.
pub fn resolve_real_module_confident<'db>(
db: &'db dyn Db,
module_name: &ModuleName,
) -> Option<Module<'db>> {
let interned_name =
ModuleNameIngredient::new(db, module_name, ModuleResolveMode::StubsNotAllowed);
resolve_module_query(db, interned_name)
}
/// Resolves a module name to a module (stubs not allowed, some shadowing is
/// allowed).
///
/// In particular, this allows `typing_extensions` to be shadowed by a
/// non-standard library module. This is useful in the context of the LSP
/// where we don't want to pretend as if these modules are always available at
/// runtime.
///
/// This should generally only be used within the context of the LSP. Using it
/// within ty proper risks being unable to resolve builtin modules since they
/// are involved in an import cycle with `builtins`.
pub fn resolve_real_shadowable_module<'db>(
db: &'db dyn Db,
importing_file: File,
module_name: &ModuleName,
) -> Option<Module<'db>> {
let interned_name = ModuleNameIngredient::new(
db,
module_name,
ModuleResolveMode::StubsNotAllowedSomeShadowingAllowed,
);
resolve_module_query(db, interned_name)
.or_else(|| desperately_resolve_module(db, importing_file, interned_name))
}
/// Which files should be visible when doing a module query
#[derive(Debug, Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, get_size2::GetSize)]
#[allow(clippy::enum_variant_names)]
pub enum ModuleResolveMode {
/// Stubs are allowed to appear.
///
/// This is the "normal" mode almost everything uses, as type checkers are in fact supposed
/// to *prefer* stubs over the actual implementations.
StubsAllowed,
/// Stubs are not allowed to appear.
///
/// This is the "goto definition" mode, where we need to ignore the typing spec and find actual
/// implementations. When querying searchpaths this also notably replaces typeshed with
/// the "real" stdlib.
StubsNotAllowed,
/// Like `StubsNotAllowed`, but permits some modules to be shadowed.
///
/// In particular, this allows `typing_extensions` to be shadowed by a
/// non-standard library module. This is useful in the context of the LSP
/// where we don't want to pretend as if these modules are always available
/// at runtime.
StubsNotAllowedSomeShadowingAllowed,
}
#[salsa::interned(heap_size=ruff_memory_usage::heap_size)]
#[derive(Debug)]
pub(crate) struct ModuleResolveModeIngredient<'db> {
mode: ModuleResolveMode,
}
impl ModuleResolveMode {
fn stubs_allowed(self) -> bool {
matches!(self, Self::StubsAllowed)
}
/// Returns `true` if the module name refers to a standard library module
/// which can't be shadowed by a first-party module.
///
/// This includes "builtin" modules, which can never be shadowed at runtime
/// either. Additionally, certain other modules that are involved in an
/// import cycle with `builtins` (`types`, `typing_extensions`, etc.) are
/// also considered non-shadowable, unless the module resolution mode
/// specifically opts into allowing some of them to be shadowed. This
/// latter set of modules cannot be allowed to be shadowed by first-party
/// or "extra-path" modules in ty proper, or we risk panics in unexpected
/// places due to being unable to resolve builtin symbols. This is similar
/// behaviour to other type checkers such as mypy:
/// <https://github.com/python/mypy/blob/3807423e9d98e678bf16b13ec8b4f909fe181908/mypy/build.py#L104-L117>
pub(super) fn is_non_shadowable(self, minor_version: u8, module_name: &str) -> bool {
// Builtin modules are never shadowable, no matter what.
if ruff_python_stdlib::sys::is_builtin_module(minor_version, module_name) {
return true;
}
// Similarly for `types`, which is always available at runtime.
if module_name == "types" {
return true;
}
// Otherwise, some modules should only be conditionally allowed
// to be shadowed, depending on the module resolution mode.
match self {
ModuleResolveMode::StubsAllowed | ModuleResolveMode::StubsNotAllowed => {
module_name == "typing_extensions"
}
ModuleResolveMode::StubsNotAllowedSomeShadowingAllowed => false,
}
}
}
/// Salsa query that resolves an interned [`ModuleNameIngredient`] to a module.
///
/// This query should not be called directly. Instead, use [`resolve_module`]. It only exists
/// because Salsa requires the module name to be an ingredient.
#[salsa::tracked(heap_size=ruff_memory_usage::heap_size)]
fn resolve_module_query<'db>(
db: &'db dyn Db,
module_name: ModuleNameIngredient<'db>,
) -> Option<Module<'db>> {
let name = module_name.name(db);
let mode = module_name.mode(db);
let _span = tracing::trace_span!("resolve_module", %name).entered();
let Some(resolved) = resolve_name(db, name, mode) else {
tracing::debug!("Module `{name}` not found in search paths");
return None;
};
let module = match resolved {
ResolvedName::FileModule(module) => {
tracing::trace!(
"Resolved module `{name}` to `{path}`",
path = module.file.path(db)
);
Module::file_module(
db,
name.clone(),
module.kind,
module.search_path,
module.file,
)
}
ResolvedName::NamespacePackage => {
tracing::trace!("Module `{name}` is a namespace package");
Module::namespace_package(db, name.clone())
}
};
Some(module)
}
/// Like `resolve_module_query` but for cases where it failed to resolve the module
/// and we are now Getting Desperate and willing to try the ancestor directories of
/// the `importing_file` as potential temporary search paths that are private
/// to this import.
///
/// The reason this is split out is because in 99.9% of cases `resolve_module_query`
/// will find the right answer (or no valid answer exists), and we want it to be
/// aggressively cached. Including the `importing_file` as part of that query would
/// trash the caching of import resolution between files.
///
/// TODO: should (some) of this also be cached? If an entire directory of python files
/// is misunderstood we'll end up in here a lot.
fn desperately_resolve_module<'db>(
db: &'db dyn Db,
importing_file: File,
module_name: ModuleNameIngredient<'db>,
) -> Option<Module<'db>> {
let name = module_name.name(db);
let mode = module_name.mode(db);
let _span = tracing::trace_span!("desperately_resolve_module", %name).entered();
let Some(resolved) = desperately_resolve_name(db, importing_file, name, mode) else {
let extra = match module_name.mode(db) {
ModuleResolveMode::StubsAllowed => "neither stub nor real module file",
ModuleResolveMode::StubsNotAllowed => "stubs not allowed",
ModuleResolveMode::StubsNotAllowedSomeShadowingAllowed => {
"stubs not allowed but some shadowing allowed"
}
};
tracing::debug!("Module `{name}` not found while looking in parent dirs ({extra})");
return None;
};
let module = match resolved {
ResolvedName::FileModule(module) => {
tracing::trace!(
"Resolved module `{name}` to `{path}`",
path = module.file.path(db)
);
Module::file_module(
db,
name.clone(),
module.kind,
module.search_path,
module.file,
)
}
ResolvedName::NamespacePackage => {
tracing::trace!("Module `{name}` is a namespace package");
Module::namespace_package(db, name.clone())
}
};
Some(module)
}
/// Resolves the module for the given path.
///
/// Returns `None` if the path is not a module locatable via any of the known search paths.
#[allow(unused)]
pub(crate) fn path_to_module<'db>(db: &'db dyn Db, path: &FilePath) -> Option<Module<'db>> {
// It's not entirely clear on first sight why this method calls `file_to_module` instead of
// it being the other way round, considering that the first thing that `file_to_module` does
// is to retrieve the file's path.
//
// The reason is that `file_to_module` is a tracked Salsa query and salsa queries require that
// all arguments are Salsa ingredients (something stored in Salsa). `Path`s aren't salsa ingredients but
// `VfsFile` is. So what we do here is to retrieve the `path`'s `VfsFile` so that we can make
// use of Salsa's caching and invalidation.
let file = path.to_file(db)?;
file_to_module(db, file)
}
/// Resolves the module for the file with the given id.
///
/// Returns `None` if the file is not a module locatable via any of the known search paths.
///
/// This function can be understood as essentially resolving `import .<self>` in the file itself,
/// and indeed, one of its primary jobs is resolving `.<self>` to derive the module name of `.`.
/// This intuition is particularly useful for understanding why it's correct that we pass
/// the file itself as `importing_file` to various subroutines.
#[salsa::tracked(heap_size=ruff_memory_usage::heap_size)]
pub fn file_to_module(db: &dyn Db, file: File) -> Option<Module<'_>> {
let _span = tracing::trace_span!("file_to_module", ?file).entered();
let path = SystemOrVendoredPathRef::try_from_file(db, file)?;
file_to_module_impl(
db,
file,
path,
search_paths(db, ModuleResolveMode::StubsAllowed),
)
.or_else(|| {
file_to_module_impl(
db,
file,
path,
relative_desperate_search_paths(db, file).iter(),
)
})
}
fn file_to_module_impl<'db, 'a>(
db: &'db dyn Db,
file: File,
path: SystemOrVendoredPathRef<'a>,
mut search_paths: impl Iterator<Item = &'a SearchPath>,
) -> Option<Module<'db>> {
let module_name = search_paths.find_map(|candidate: &SearchPath| {
let relative_path = match path {
SystemOrVendoredPathRef::System(path) => candidate.relativize_system_path(path),
SystemOrVendoredPathRef::Vendored(path) => candidate.relativize_vendored_path(path),
}?;
relative_path.to_module_name()
})?;
// Resolve the module name to see if Python would resolve the name to the same path.
// If it doesn't, then that means that multiple modules have the same name in different
// root paths, but that the module corresponding to `path` is in a lower priority search path,
// in which case we ignore it.
let module = resolve_module(db, file, &module_name)?;
let module_file = module.file(db)?;
if file.path(db) == module_file.path(db) {
return Some(module);
} else if file.source_type(db) == PySourceType::Python
&& module_file.source_type(db) == PySourceType::Stub
{
// If a .py and .pyi are both defined, the .pyi will be the one returned by `resolve_module().file`,
// which would make us erroneously believe the `.py` is *not* also this module (breaking things
// like relative imports). So here we try `resolve_real_module().file` to cover both cases.
let module = resolve_real_module(db, file, &module_name)?;
let module_file = module.file(db)?;
if file.path(db) == module_file.path(db) {
return Some(module);
}
}
// This path is for a module with the same name but with a different precedence. For example:
// ```
// src/foo.py
// src/foo/__init__.py
// ```
// The module name of `src/foo.py` is `foo`, but the module loaded by Python is `src/foo/__init__.py`.
// That means we need to ignore `src/foo.py` even though it resolves to the same module name.
None
}
pub fn search_paths(db: &dyn Db, resolve_mode: ModuleResolveMode) -> SearchPathIterator<'_> {
db.search_paths().iter(db, resolve_mode)
}
/// Get the search-paths for desperate resolution of absolute imports in this file.
///
/// Currently this is "all ancestor directories that don't contain an `__init__.py(i)`"
/// (from closest-to-importing-file to farthest).
///
/// (For paranoia purposes, all relative desperate search-paths are also absolute
/// valid desperate search-paths, but don't worry about that.)
///
/// We exclude `__init__.py(i)` dirs to avoid truncating packages.
#[salsa::tracked(heap_size=ruff_memory_usage::heap_size)]
fn absolute_desperate_search_paths(db: &dyn Db, importing_file: File) -> Option<Vec<SearchPath>> {
let system = db.system();
let importing_path = importing_file.path(db).as_system_path()?;
// Only allow this if the importing_file is under the first-party search path
let (base_path, rel_path) =
search_paths(db, ModuleResolveMode::StubsAllowed).find_map(|search_path| {
if !search_path.is_first_party() {
return None;
}
Some((
search_path.as_system_path()?,
search_path.relativize_system_path_only(importing_path)?,
))
})?;
// Read the revision on the corresponding file root to
// register an explicit dependency on this directory. When
// the revision gets bumped, the cache that Salsa creates
// for this routine will be invalidated.
//
// (This is conditional because ruff uses this code too and doesn't set roots)
if let Some(root) = db.files().root(db, base_path) {
let _ = root.revision(db);
}
// Only allow searching up to the first-party path's root
let mut search_paths = Vec::new();
for rel_dir in rel_path.ancestors() {
let candidate_path = base_path.join(rel_dir);
if !system.is_directory(&candidate_path) {
continue;
}
// Any dir that isn't a proper package is plausibly some test/script dir that could be
// added as a search-path at runtime. Notably this reflects pytest's default mode where
// it adds every dir with a .py to the search-paths (making all test files root modules),
// unless they see an `__init__.py`, in which case they assume you don't want that.
let isnt_regular_package = !system.is_file(&candidate_path.join("__init__.py"))
&& !system.is_file(&candidate_path.join("__init__.pyi"));
// Any dir with a pyproject.toml or ty.toml is a valid relative desperate search-path and
// we want all of those to also be valid absolute desperate search-paths. It doesn't
// make any sense for a folder to have `pyproject.toml` and `__init__.py` but let's
// not let something cursed and spooky happen, ok? d
if isnt_regular_package
|| system.is_file(&candidate_path.join("pyproject.toml"))
|| system.is_file(&candidate_path.join("ty.toml"))
{
let search_path = SearchPath::first_party(system, candidate_path).ok()?;
search_paths.push(search_path);
}
}
if search_paths.is_empty() {
None
} else {
Some(search_paths)
}
}
/// Get the search-paths for desperate resolution of relative imports in this file.
///
/// Currently this is "the closest ancestor dir that contains a pyproject.toml (or ty.toml)",
/// which is a completely arbitrary decision. However it's fairly important that relative
/// desperate search-paths pick a single "best" answer because every one is *valid* but one
/// that's too long or too short may cause problems.
///
/// For now this works well in common cases where we have some larger workspace that contains
/// one or more python projects in sub-directories, and those python projects assume that
/// absolute imports resolve relative to the pyproject.toml they live under.
///
/// Being so strict minimizes concerns about this going off a lot and doing random
/// chaotic things. In particular, all files under a given pyproject.toml will currently
/// agree on this being their desperate search-path, which is really nice.
#[salsa::tracked(heap_size=ruff_memory_usage::heap_size)]
fn relative_desperate_search_paths(db: &dyn Db, importing_file: File) -> Option<SearchPath> {
let system = db.system();
let importing_path = importing_file.path(db).as_system_path()?;
// Only allow this if the importing_file is under the first-party search path
let (base_path, rel_path) =
search_paths(db, ModuleResolveMode::StubsAllowed).find_map(|search_path| {
if !search_path.is_first_party() {
return None;
}
Some((
search_path.as_system_path()?,
search_path.relativize_system_path_only(importing_path)?,
))
})?;
// Read the revision on the corresponding file root to
// register an explicit dependency on this directory. When
// the revision gets bumped, the cache that Salsa creates
// for this routine will be invalidated.
//
// (This is conditional because ruff uses this code too and doesn't set roots)
if let Some(root) = db.files().root(db, base_path) {
let _ = root.revision(db);
}
// Only allow searching up to the first-party path's root
for rel_dir in rel_path.ancestors() {
let candidate_path = base_path.join(rel_dir);
// Any dir with a pyproject.toml or ty.toml might be a project root
if system.is_file(&candidate_path.join("pyproject.toml"))
|| system.is_file(&candidate_path.join("ty.toml"))
{
let search_path = SearchPath::first_party(system, candidate_path).ok()?;
return Some(search_path);
}
}
None
}
#[derive(Clone, Debug, PartialEq, Eq, get_size2::GetSize)]
pub struct SearchPaths {
/// Search paths that have been statically determined purely from reading
/// ty's configuration settings. These shouldn't ever change unless the
/// config settings themselves change.
static_paths: Vec<SearchPath>,
/// Path to typeshed, which should come immediately after static paths.
///
/// This can currently only be None if the `SystemPath` this points to is already in `static_paths`.
stdlib_path: Option<SearchPath>,
/// Path to the real stdlib, this replaces typeshed (`stdlib_path`) for goto-definition searches
/// ([`ModuleResolveMode::StubsNotAllowed`]).
real_stdlib_path: Option<SearchPath>,
/// site-packages paths are not included in the above fields:
/// if there are multiple site-packages paths, editable installations can appear
/// *between* the site-packages paths on `sys.path` at runtime.
/// That means we can't know where a second or third `site-packages` path should sit
/// in terms of module-resolution priority until we've discovered the editable installs
/// for the first `site-packages` path
site_packages: Vec<SearchPath>,
typeshed_versions: TypeshedVersions,
}
impl SearchPaths {
/// Validate and normalize the raw settings given by the user
/// into settings we can use for module resolution
///
/// This method also implements the typing spec's [module resolution order].
///
/// [module resolution order]: https://typing.python.org/en/latest/spec/distributing.html#import-resolution-ordering
pub fn from_settings(
settings: &SearchPathSettings,
system: &dyn System,
vendored: &VendoredFileSystem,
) -> Result<Self, SearchPathSettingsError> {
fn canonicalize(path: &SystemPath, system: &dyn System) -> SystemPathBuf {
system
.canonicalize_path(path)
.unwrap_or_else(|_| path.to_path_buf())
}
let SearchPathSettings {
extra_paths,
src_roots,
custom_typeshed: typeshed,
site_packages_paths,
real_stdlib_path,
misconfiguration_mode,
} = settings;
let mut static_paths = vec![];
for path in extra_paths {
let path = canonicalize(path, system);
tracing::debug!("Adding extra search-path `{path}`");
match SearchPath::extra(system, path) {
Ok(path) => static_paths.push(path),
Err(err) => {
if *misconfiguration_mode == MisconfigurationMode::UseDefault {
tracing::debug!("Skipping invalid extra search-path: {err}");
} else {
return Err(err.into());
}
}
}
}
for src_root in src_roots {
tracing::debug!("Adding first-party search path `{src_root}`");
match SearchPath::first_party(system, src_root.to_path_buf()) {
Ok(path) => static_paths.push(path),
Err(err) => {
if *misconfiguration_mode == MisconfigurationMode::UseDefault {
tracing::debug!("Skipping invalid first-party search-path: {err}");
} else {
return Err(err.into());
}
}
}
}
let (typeshed_versions, stdlib_path) = if let Some(typeshed) = typeshed {
let typeshed = canonicalize(typeshed, system);
tracing::debug!("Adding custom-stdlib search path `{typeshed}`");
let versions_path = typeshed.join("stdlib/VERSIONS");
let results = system
.read_to_string(&versions_path)
.map_err(|error| SearchPathSettingsError::FailedToReadVersionsFile {
path: versions_path,
error,
})
.and_then(|versions_content| Ok(versions_content.parse()?))
.and_then(|parsed| Ok((parsed, SearchPath::custom_stdlib(system, &typeshed)?)));
match results {
Ok(results) => results,
Err(err) => {
if settings.misconfiguration_mode == MisconfigurationMode::UseDefault {
tracing::debug!("Skipping custom-stdlib search-path: {err}");
(
vendored_typeshed_versions(vendored),
SearchPath::vendored_stdlib(),
)
} else {
return Err(err);
}
}
}
} else {
tracing::debug!("Using vendored stdlib");
(
vendored_typeshed_versions(vendored),
SearchPath::vendored_stdlib(),
)
};
let real_stdlib_path = if let Some(path) = real_stdlib_path {
match SearchPath::real_stdlib(system, path.clone()) {
Ok(path) => Some(path),
Err(err) => {
if *misconfiguration_mode == MisconfigurationMode::UseDefault {
tracing::debug!("Skipping invalid real-stdlib search-path: {err}");
None
} else {
return Err(err.into());
}
}
}
} else {
None
};
let mut site_packages: Vec<_> = Vec::with_capacity(site_packages_paths.len());
for path in site_packages_paths {
tracing::debug!("Adding site-packages search path `{path}`");
match SearchPath::site_packages(system, path.clone()) {
Ok(path) => site_packages.push(path),
Err(err) => {
if settings.misconfiguration_mode == MisconfigurationMode::UseDefault {
tracing::debug!("Skipping invalid site-packages search-path: {err}");
} else {
return Err(err.into());
}
}
}
}
// TODO vendor typeshed's third-party stubs as well as the stdlib and
// fallback to them as a final step?
//
// See: <https://github.com/astral-sh/ruff/pull/19620#discussion_r2240609135>
// Filter out module resolution paths that point to the same directory
// on disk (the same invariant maintained by [`sys.path` at runtime]).
// (Paths may, however, *overlap* -- e.g. you could have both `src/`
// and `src/foo` as module resolution paths simultaneously.)
//
// This code doesn't use an `IndexSet` because the key is the system
// path and not the search root.
//
// [`sys.path` at runtime]: https://docs.python.org/3/library/site.html#module-site
let mut seen_paths = FxHashSet::with_capacity_and_hasher(static_paths.len(), FxBuildHasher);
static_paths.retain(|path| {
if let Some(path) = path.as_system_path() {
seen_paths.insert(path.to_path_buf())
} else {
true
}
});
// Users probably shouldn't do this but... if they've shadowed their stdlib we should deduplicate it away.
// This notably will mess up anything that checks if a search path "is the standard library" as we won't
// "remember" that fact for static paths.
//
// (We used to shove these into static_paths, so the above retain implicitly did this. I am opting to
// preserve this behaviour to avoid getting into the weeds of corner cases.)
let stdlib_path_is_shadowed = stdlib_path
.as_system_path()
.is_some_and(|path| seen_paths.contains(path));
let real_stdlib_path_is_shadowed = real_stdlib_path
.as_ref()
.and_then(SearchPath::as_system_path)
.is_some_and(|path| seen_paths.contains(path));
let stdlib_path = if stdlib_path_is_shadowed {
None
} else {
Some(stdlib_path)
};
let real_stdlib_path = if real_stdlib_path_is_shadowed {
None
} else {
real_stdlib_path
};
Ok(SearchPaths {
static_paths,
stdlib_path,
real_stdlib_path,
site_packages,
typeshed_versions,
})
}
/// Returns a new `SearchPaths` with no search paths configured.
///
/// This is primarily useful for testing.
pub fn empty(vendored: &VendoredFileSystem) -> Self {
Self {
static_paths: vec![],
stdlib_path: Some(SearchPath::vendored_stdlib()),
real_stdlib_path: None,
site_packages: vec![],
typeshed_versions: vendored_typeshed_versions(vendored),
}
}
/// Registers the file roots for all non-dynamically discovered search paths that aren't first-party.
pub fn try_register_static_roots(&self, db: &dyn Db) {
let files = db.files();
for path in self
.static_paths
.iter()
.chain(self.site_packages.iter())
.chain(&self.stdlib_path)
{
if let Some(system_path) = path.as_system_path() {
if !path.is_first_party() {
files.try_add_root(db, system_path, FileRootKind::LibrarySearchPath);
}
}
}
}
pub(super) fn iter<'a>(
&'a self,
db: &'a dyn Db,
mode: ModuleResolveMode,
) -> SearchPathIterator<'a> {
let stdlib_path = self.stdlib(mode);
SearchPathIterator {
db,
static_paths: self.static_paths.iter(),
stdlib_path,
dynamic_paths: None,
mode: ModuleResolveModeIngredient::new(db, mode),
}
}
pub(crate) fn stdlib(&self, mode: ModuleResolveMode) -> Option<&SearchPath> {
match mode {
ModuleResolveMode::StubsAllowed => self.stdlib_path.as_ref(),
ModuleResolveMode::StubsNotAllowed
| ModuleResolveMode::StubsNotAllowedSomeShadowingAllowed => {
self.real_stdlib_path.as_ref()
}
}
}
pub fn custom_stdlib(&self) -> Option<&SystemPath> {
self.stdlib_path
.as_ref()
.and_then(SearchPath::as_system_path)
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | true |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ty_module_resolver/src/typeshed.rs | crates/ty_module_resolver/src/typeshed.rs | use std::collections::BTreeMap;
use std::fmt;
use std::num::{NonZeroU16, NonZeroUsize};
use std::ops::{RangeFrom, RangeInclusive};
use std::str::FromStr;
use ruff_db::vendored::VendoredFileSystem;
use ruff_python_ast::{PythonVersion, PythonVersionDeserializationError};
use rustc_hash::FxHashMap;
use crate::db::Db;
use crate::module_name::ModuleName;
pub fn vendored_typeshed_versions(vendored: &VendoredFileSystem) -> TypeshedVersions {
TypeshedVersions::from_str(
&vendored
.read_to_string("stdlib/VERSIONS")
.expect("The vendored typeshed stubs should contain a VERSIONS file"),
)
.expect("The VERSIONS file in the vendored typeshed stubs should be well-formed")
}
pub(crate) fn typeshed_versions(db: &dyn Db) -> &TypeshedVersions {
db.search_paths().typeshed_versions()
}
#[derive(Debug, PartialEq, Eq, Clone)]
pub struct TypeshedVersionsParseError {
line_number: Option<NonZeroU16>,
reason: TypeshedVersionsParseErrorKind,
}
impl fmt::Display for TypeshedVersionsParseError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let TypeshedVersionsParseError {
line_number,
reason,
} = self;
if let Some(line_number) = line_number {
write!(
f,
"Error while parsing line {line_number} of typeshed's VERSIONS file: {reason}"
)
} else {
write!(f, "Error while parsing typeshed's VERSIONS file: {reason}")
}
}
}
impl std::error::Error for TypeshedVersionsParseError {
fn source(&self) -> Option<&(dyn std::error::Error + 'static)> {
if let TypeshedVersionsParseErrorKind::VersionParseError(err) = &self.reason {
err.source()
} else {
None
}
}
}
#[derive(Debug, PartialEq, Eq, Clone, thiserror::Error)]
pub enum TypeshedVersionsParseErrorKind {
#[error("File has too many lines ({0}); maximum allowed is {max_allowed}", max_allowed = NonZeroU16::MAX)]
TooManyLines(NonZeroUsize),
#[error("Expected every non-comment line to have exactly one colon")]
UnexpectedNumberOfColons,
#[error("Expected all components of '{0}' to be valid Python identifiers")]
InvalidModuleName(String),
#[error("Expected every non-comment line to have exactly one '-' character")]
UnexpectedNumberOfHyphens,
#[error("{0}")]
VersionParseError(#[from] PythonVersionDeserializationError),
}
#[derive(Clone, Debug, PartialEq, Eq, get_size2::GetSize)]
pub struct TypeshedVersions(FxHashMap<ModuleName, PyVersionRange>);
impl TypeshedVersions {
#[must_use]
pub fn exact(&self, module_name: &ModuleName) -> Option<&PyVersionRange> {
self.0.get(module_name)
}
#[must_use]
pub(crate) fn query_module(
&self,
module: &ModuleName,
python_version: PythonVersion,
) -> TypeshedVersionsQueryResult {
if let Some(range) = self.exact(module) {
if range.contains(python_version) {
TypeshedVersionsQueryResult::Exists
} else {
TypeshedVersionsQueryResult::DoesNotExist
}
} else {
let mut module = module.parent();
while let Some(module_to_try) = module {
if let Some(range) = self.exact(&module_to_try) {
return {
if range.contains(python_version) {
TypeshedVersionsQueryResult::MaybeExists
} else {
TypeshedVersionsQueryResult::DoesNotExist
}
};
}
module = module_to_try.parent();
}
TypeshedVersionsQueryResult::DoesNotExist
}
}
}
/// Possible answers [`TypeshedVersions::query_module()`] could give to the question:
/// "Does this module exist in the stdlib at runtime on a certain target version?"
#[derive(Debug, Copy, PartialEq, Eq, Clone, Hash)]
pub(crate) enum TypeshedVersionsQueryResult {
/// The module definitely exists in the stdlib at runtime on the user-specified target version.
///
/// For example:
/// - The target version is Python 3.8
/// - We're querying whether the `asyncio.tasks` module exists in the stdlib
/// - The VERSIONS file contains the line `asyncio.tasks: 3.8-`
Exists,
/// The module definitely does not exist in the stdlib on the user-specified target version.
///
/// For example:
/// - We're querying whether the `foo` module exists in the stdlib
/// - There is no top-level `foo` module in VERSIONS
///
/// OR:
/// - The target version is Python 3.8
/// - We're querying whether the module `importlib.abc` exists in the stdlib
/// - The VERSIONS file contains the line `importlib.abc: 3.10-`,
/// indicating that the module was added in 3.10
///
/// OR:
/// - The target version is Python 3.8
/// - We're querying whether the module `collections.abc` exists in the stdlib
/// - The VERSIONS file does not contain any information about the `collections.abc` submodule,
/// but *does* contain the line `collections: 3.10-`,
/// indicating that the entire `collections` package was added in Python 3.10.
DoesNotExist,
/// The module potentially exists in the stdlib and, if it does,
/// it definitely exists on the user-specified target version.
///
/// This variant is only relevant for submodules,
/// for which the typeshed VERSIONS file does not provide comprehensive information.
/// (The VERSIONS file is guaranteed to provide information about all top-level stdlib modules and packages,
/// but not necessarily about all submodules within each top-level package.)
///
/// For example:
/// - The target version is Python 3.8
/// - We're querying whether the `asyncio.staggered` module exists in the stdlib
/// - The typeshed VERSIONS file contains the line `asyncio: 3.8`,
/// indicating that the `asyncio` package was added in Python 3.8,
/// but does not contain any explicit information about the `asyncio.staggered` submodule.
MaybeExists,
}
impl FromStr for TypeshedVersions {
type Err = TypeshedVersionsParseError;
fn from_str(s: &str) -> Result<Self, Self::Err> {
let mut map = FxHashMap::default();
for (line_index, line) in s.lines().enumerate() {
// humans expect line numbers to be 1-indexed
let line_number = NonZeroUsize::new(line_index.saturating_add(1)).unwrap();
let Ok(line_number) = NonZeroU16::try_from(line_number) else {
return Err(TypeshedVersionsParseError {
line_number: None,
reason: TypeshedVersionsParseErrorKind::TooManyLines(line_number),
});
};
let Some(content) = line.split('#').map(str::trim).next() else {
continue;
};
if content.is_empty() {
continue;
}
let mut parts = content.split(':').map(str::trim);
let (Some(module_name), Some(rest), None) = (parts.next(), parts.next(), parts.next())
else {
return Err(TypeshedVersionsParseError {
line_number: Some(line_number),
reason: TypeshedVersionsParseErrorKind::UnexpectedNumberOfColons,
});
};
let Some(module_name) = ModuleName::new(module_name) else {
return Err(TypeshedVersionsParseError {
line_number: Some(line_number),
reason: TypeshedVersionsParseErrorKind::InvalidModuleName(
module_name.to_string(),
),
});
};
match PyVersionRange::from_str(rest) {
Ok(version) => map.insert(module_name, version),
Err(reason) => {
return Err(TypeshedVersionsParseError {
line_number: Some(line_number),
reason,
});
}
};
}
Ok(Self(map))
}
}
impl fmt::Display for TypeshedVersions {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let sorted_items: BTreeMap<&ModuleName, &PyVersionRange> = self.0.iter().collect();
for (module_name, range) in sorted_items {
writeln!(f, "{module_name}: {range}")?;
}
Ok(())
}
}
#[derive(Debug, Clone, Eq, PartialEq, Hash, get_size2::GetSize)]
pub enum PyVersionRange {
AvailableFrom(RangeFrom<PythonVersion>),
AvailableWithin(RangeInclusive<PythonVersion>),
}
impl PyVersionRange {
#[must_use]
pub fn contains(&self, version: PythonVersion) -> bool {
match self {
Self::AvailableFrom(inner) => inner.contains(&version),
Self::AvailableWithin(inner) => inner.contains(&version),
}
}
/// Display the version range in a way that is suitable for rendering in user-facing diagnostics.
pub fn diagnostic_display(&self) -> impl std::fmt::Display {
struct DiagnosticDisplay<'a>(&'a PyVersionRange);
impl fmt::Display for DiagnosticDisplay<'_> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self.0 {
PyVersionRange::AvailableFrom(range_from) => write!(f, "{}+", range_from.start),
PyVersionRange::AvailableWithin(range_inclusive) => {
// Don't trust the start Python version if it's 3.0 or lower.
// Typeshed doesn't attempt to give accurate start versions if a module was added
// in the Python 2 era.
if range_inclusive.start() <= &(PythonVersion { major: 3, minor: 0 }) {
write!(f, "<={}", range_inclusive.end())
} else {
write!(f, "{}-{}", range_inclusive.start(), range_inclusive.end())
}
}
}
}
}
DiagnosticDisplay(self)
}
}
impl FromStr for PyVersionRange {
type Err = TypeshedVersionsParseErrorKind;
fn from_str(s: &str) -> Result<Self, Self::Err> {
let mut parts = s.split('-').map(str::trim);
match (parts.next(), parts.next(), parts.next()) {
(Some(lower), Some(""), None) => {
let lower = PythonVersion::from_str(lower)?;
Ok(Self::AvailableFrom(lower..))
}
(Some(lower), Some(upper), None) => {
let lower = PythonVersion::from_str(lower)?;
let upper = PythonVersion::from_str(upper)?;
Ok(Self::AvailableWithin(lower..=upper))
}
_ => Err(TypeshedVersionsParseErrorKind::UnexpectedNumberOfHyphens),
}
}
}
impl fmt::Display for PyVersionRange {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
Self::AvailableFrom(range_from) => write!(f, "{}-", range_from.start),
Self::AvailableWithin(range_inclusive) => {
write!(f, "{}-{}", range_inclusive.start(), range_inclusive.end())
}
}
}
}
#[cfg(test)]
mod tests {
#![expect(
clippy::disallowed_methods,
reason = "These are tests, so it's fine to do I/O by-passing System."
)]
use std::fmt::Write as _;
use std::num::{IntErrorKind, NonZeroU16};
use std::path::Path;
use super::*;
use insta::assert_snapshot;
const TYPESHED_STDLIB_DIR: &str = "stdlib";
const ONE: Option<NonZeroU16> = Some(NonZeroU16::new(1).unwrap());
impl TypeshedVersions {
#[must_use]
fn contains_exact(&self, module: &ModuleName) -> bool {
self.exact(module).is_some()
}
#[must_use]
fn len(&self) -> usize {
self.0.len()
}
}
#[test]
fn can_parse_vendored_versions_file() {
let versions = vendored_typeshed_versions(ty_vendored::file_system());
assert!(versions.len() > 100);
assert!(versions.len() < 1000);
let asyncio = ModuleName::new_static("asyncio").unwrap();
let asyncio_staggered = ModuleName::new_static("asyncio.staggered").unwrap();
let audioop = ModuleName::new_static("audioop").unwrap();
assert!(versions.contains_exact(&asyncio));
assert_eq!(
versions.query_module(&asyncio, PythonVersion::PY310),
TypeshedVersionsQueryResult::Exists
);
assert!(versions.contains_exact(&asyncio_staggered));
assert_eq!(
versions.query_module(&asyncio_staggered, PythonVersion::PY38),
TypeshedVersionsQueryResult::Exists
);
assert_eq!(
versions.query_module(&asyncio_staggered, PythonVersion::PY37),
TypeshedVersionsQueryResult::DoesNotExist
);
assert!(versions.contains_exact(&audioop));
assert_eq!(
versions.query_module(&audioop, PythonVersion::PY312),
TypeshedVersionsQueryResult::Exists
);
assert_eq!(
versions.query_module(&audioop, PythonVersion::PY313),
TypeshedVersionsQueryResult::DoesNotExist
);
}
#[test]
fn typeshed_versions_consistent_with_vendored_stubs() {
let vendored_typeshed_versions = vendored_typeshed_versions(ty_vendored::file_system());
let vendored_typeshed_dir =
Path::new(env!("CARGO_MANIFEST_DIR")).join("../ty_vendored/vendor/typeshed");
let mut empty_iterator = true;
let stdlib_stubs_path = vendored_typeshed_dir.join(TYPESHED_STDLIB_DIR);
for entry in std::fs::read_dir(&stdlib_stubs_path).unwrap() {
empty_iterator = false;
let entry = entry.unwrap();
let absolute_path = entry.path();
let relative_path = absolute_path
.strip_prefix(&stdlib_stubs_path)
.unwrap_or_else(|_| panic!("Expected path to be a child of {stdlib_stubs_path:?} but found {absolute_path:?}"));
let relative_path_str = relative_path.as_os_str().to_str().unwrap_or_else(|| {
panic!("Expected all typeshed paths to be valid UTF-8; got {relative_path:?}")
});
if relative_path_str == "VERSIONS" {
continue;
}
let top_level_module = if let Some(extension) = relative_path.extension() {
// It was a file; strip off the file extension to get the module name:
let extension = extension
.to_str()
.unwrap_or_else(||panic!("Expected all file extensions to be UTF-8; was not true for {relative_path:?}"));
relative_path_str
.strip_suffix(extension)
.and_then(|string| string.strip_suffix('.')).unwrap_or_else(|| {
panic!("Expected path {relative_path_str:?} to end with computed extension {extension:?}")
})
} else {
// It was a directory; no need to do anything to get the module name
relative_path_str
};
let top_level_module = ModuleName::new(top_level_module)
.unwrap_or_else(|| panic!("{top_level_module:?} was not a valid module name!"));
assert!(vendored_typeshed_versions.contains_exact(&top_level_module));
}
assert!(
!empty_iterator,
"Expected there to be at least one file or directory in the vendored typeshed stubs"
);
}
#[test]
fn can_parse_mock_versions_file() {
const VERSIONS: &str = "\
# a comment
# some more comment
# yet more comment
# and some more comment
bar: 2.7-3.10
# more comment
bar.baz: 3.1-3.9
foo: 3.8- # trailing comment
";
let parsed_versions = TypeshedVersions::from_str(VERSIONS).unwrap();
assert_eq!(parsed_versions.len(), 3);
assert_snapshot!(parsed_versions.to_string(), @r"
bar: 2.7-3.10
bar.baz: 3.1-3.9
foo: 3.8-
"
);
}
#[test]
fn version_within_range_parsed_correctly() {
let parsed_versions = TypeshedVersions::from_str("bar: 2.7-3.10").unwrap();
let bar = ModuleName::new_static("bar").unwrap();
assert!(parsed_versions.contains_exact(&bar));
assert_eq!(
parsed_versions.query_module(&bar, PythonVersion::PY37),
TypeshedVersionsQueryResult::Exists
);
assert_eq!(
parsed_versions.query_module(&bar, PythonVersion::PY310),
TypeshedVersionsQueryResult::Exists
);
assert_eq!(
parsed_versions.query_module(&bar, PythonVersion::PY311),
TypeshedVersionsQueryResult::DoesNotExist
);
}
#[test]
fn version_from_range_parsed_correctly() {
let parsed_versions = TypeshedVersions::from_str("foo: 3.8-").unwrap();
let foo = ModuleName::new_static("foo").unwrap();
assert!(parsed_versions.contains_exact(&foo));
assert_eq!(
parsed_versions.query_module(&foo, PythonVersion::PY37),
TypeshedVersionsQueryResult::DoesNotExist
);
assert_eq!(
parsed_versions.query_module(&foo, PythonVersion::PY38),
TypeshedVersionsQueryResult::Exists
);
assert_eq!(
parsed_versions.query_module(&foo, PythonVersion::PY311),
TypeshedVersionsQueryResult::Exists
);
}
#[test]
fn explicit_submodule_parsed_correctly() {
let parsed_versions = TypeshedVersions::from_str("bar.baz: 3.1-3.9").unwrap();
let bar_baz = ModuleName::new_static("bar.baz").unwrap();
assert!(parsed_versions.contains_exact(&bar_baz));
assert_eq!(
parsed_versions.query_module(&bar_baz, PythonVersion::PY37),
TypeshedVersionsQueryResult::Exists
);
assert_eq!(
parsed_versions.query_module(&bar_baz, PythonVersion::PY39),
TypeshedVersionsQueryResult::Exists
);
assert_eq!(
parsed_versions.query_module(&bar_baz, PythonVersion::PY310),
TypeshedVersionsQueryResult::DoesNotExist
);
}
#[test]
fn implicit_submodule_queried_correctly() {
let parsed_versions = TypeshedVersions::from_str("bar: 2.7-3.10").unwrap();
let bar_eggs = ModuleName::new_static("bar.eggs").unwrap();
assert!(!parsed_versions.contains_exact(&bar_eggs));
assert_eq!(
parsed_versions.query_module(&bar_eggs, PythonVersion::PY37),
TypeshedVersionsQueryResult::MaybeExists
);
assert_eq!(
parsed_versions.query_module(&bar_eggs, PythonVersion::PY310),
TypeshedVersionsQueryResult::MaybeExists
);
assert_eq!(
parsed_versions.query_module(&bar_eggs, PythonVersion::PY311),
TypeshedVersionsQueryResult::DoesNotExist
);
}
#[test]
fn nonexistent_module_queried_correctly() {
let parsed_versions = TypeshedVersions::from_str("eggs: 3.8-").unwrap();
let spam = ModuleName::new_static("spam").unwrap();
assert!(!parsed_versions.contains_exact(&spam));
assert_eq!(
parsed_versions.query_module(&spam, PythonVersion::PY37),
TypeshedVersionsQueryResult::DoesNotExist
);
assert_eq!(
parsed_versions.query_module(&spam, PythonVersion::PY313),
TypeshedVersionsQueryResult::DoesNotExist
);
}
#[test]
fn invalid_huge_versions_file() {
let offset = 100;
let too_many = u16::MAX as usize + offset;
let mut massive_versions_file = String::new();
for i in 0..too_many {
let _ = writeln!(&mut massive_versions_file, "x{i}: 3.8-");
}
assert_eq!(
TypeshedVersions::from_str(&massive_versions_file),
Err(TypeshedVersionsParseError {
line_number: None,
reason: TypeshedVersionsParseErrorKind::TooManyLines(
NonZeroUsize::new(too_many + 1 - offset).unwrap()
)
})
);
}
#[test]
fn invalid_typeshed_versions_bad_colon_number() {
assert_eq!(
TypeshedVersions::from_str("foo 3.7"),
Err(TypeshedVersionsParseError {
line_number: ONE,
reason: TypeshedVersionsParseErrorKind::UnexpectedNumberOfColons
})
);
assert_eq!(
TypeshedVersions::from_str("foo:: 3.7"),
Err(TypeshedVersionsParseError {
line_number: ONE,
reason: TypeshedVersionsParseErrorKind::UnexpectedNumberOfColons
})
);
}
#[test]
fn invalid_typeshed_versions_non_identifier_modules() {
assert_eq!(
TypeshedVersions::from_str("not!an!identifier!: 3.7"),
Err(TypeshedVersionsParseError {
line_number: ONE,
reason: TypeshedVersionsParseErrorKind::InvalidModuleName(
"not!an!identifier!".to_string()
)
})
);
assert_eq!(
TypeshedVersions::from_str("(also_not).(an_identifier): 3.7"),
Err(TypeshedVersionsParseError {
line_number: ONE,
reason: TypeshedVersionsParseErrorKind::InvalidModuleName(
"(also_not).(an_identifier)".to_string()
)
})
);
}
#[test]
fn invalid_typeshed_versions_bad_hyphen_number() {
assert_eq!(
TypeshedVersions::from_str("foo: 3.8"),
Err(TypeshedVersionsParseError {
line_number: ONE,
reason: TypeshedVersionsParseErrorKind::UnexpectedNumberOfHyphens
})
);
assert_eq!(
TypeshedVersions::from_str("foo: 3.8--"),
Err(TypeshedVersionsParseError {
line_number: ONE,
reason: TypeshedVersionsParseErrorKind::UnexpectedNumberOfHyphens
})
);
assert_eq!(
TypeshedVersions::from_str("foo: 3.8--3.9"),
Err(TypeshedVersionsParseError {
line_number: ONE,
reason: TypeshedVersionsParseErrorKind::UnexpectedNumberOfHyphens
})
);
}
#[test]
fn invalid_typeshed_versions_bad_period_number() {
assert_eq!(
TypeshedVersions::from_str("foo: 38-"),
Err(TypeshedVersionsParseError {
line_number: ONE,
reason: TypeshedVersionsParseErrorKind::VersionParseError(
PythonVersionDeserializationError::WrongPeriodNumber(Box::from("38"))
)
})
);
assert_eq!(
TypeshedVersions::from_str("foo: 3..8-"),
Err(TypeshedVersionsParseError {
line_number: ONE,
reason: TypeshedVersionsParseErrorKind::VersionParseError(
PythonVersionDeserializationError::WrongPeriodNumber(Box::from("3..8"))
)
})
);
assert_eq!(
TypeshedVersions::from_str("foo: 3.8-3..11"),
Err(TypeshedVersionsParseError {
line_number: ONE,
reason: TypeshedVersionsParseErrorKind::VersionParseError(
PythonVersionDeserializationError::WrongPeriodNumber(Box::from("3..11"))
)
})
);
}
#[test]
fn invalid_typeshed_versions_non_digits() {
let err = TypeshedVersions::from_str("foo: 1.two-").unwrap_err();
assert_eq!(err.line_number, ONE);
let TypeshedVersionsParseErrorKind::VersionParseError(
PythonVersionDeserializationError::InvalidMinorVersion(invalid_minor, parse_error),
) = err.reason
else {
panic!(
"Expected an invalid-minor-version parse error, got `{}`",
err.reason
)
};
assert_eq!(&*invalid_minor, "two");
assert_eq!(*parse_error.kind(), IntErrorKind::InvalidDigit);
let err = TypeshedVersions::from_str("foo: 3.8-four.9").unwrap_err();
assert_eq!(err.line_number, ONE);
let TypeshedVersionsParseErrorKind::VersionParseError(
PythonVersionDeserializationError::InvalidMajorVersion(invalid_major, parse_error),
) = err.reason
else {
panic!(
"Expected an invalid-major-version parse error, got `{}`",
err.reason
)
};
assert_eq!(&*invalid_major, "four");
assert_eq!(*parse_error.kind(), IntErrorKind::InvalidDigit);
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_python_importer/src/lib.rs | crates/ruff_python_importer/src/lib.rs | /*!
Low-level helpers for manipulating Python import statements.
*/
pub use self::insertion::Insertion;
mod insertion;
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_python_importer/src/insertion.rs | crates/ruff_python_importer/src/insertion.rs | //! Insert statements into Python code.
use std::ops::Add;
use ruff_diagnostics::Edit;
use ruff_python_ast::Stmt;
use ruff_python_ast::helpers::is_docstring_stmt;
use ruff_python_ast::token::{TokenKind, Tokens};
use ruff_python_codegen::Stylist;
use ruff_python_trivia::is_python_whitespace;
use ruff_python_trivia::{PythonWhitespace, textwrap::indent};
use ruff_source_file::{LineRanges, UniversalNewlineIterator};
use ruff_text_size::{Ranged, TextRange, TextSize};
#[derive(Debug, Clone, PartialEq, Eq)]
pub(super) enum Placement<'a> {
/// The content will be inserted inline with the existing code (i.e., within semicolon-delimited
/// statements).
Inline,
/// The content will be inserted on its own line.
OwnLine,
/// The content will be inserted as an indented block.
Indented(&'a str),
}
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct Insertion<'a> {
/// The content to add before the insertion.
prefix: &'a str,
/// The location at which to insert.
location: TextSize,
/// The content to add after the insertion.
suffix: &'a str,
/// The line placement of insertion.
placement: Placement<'a>,
}
impl<'a> Insertion<'a> {
/// Create an [`Insertion`] to insert (e.g.) an import statement at the start of a given
/// file or cell, along with a prefix and suffix to use for the insertion.
///
/// For example, given the following code:
///
/// ```python
/// """Hello, world!"""
///
/// import os
/// ```
///
/// The insertion returned will begin at the start of the `import os` statement, and will
/// include a trailing newline.
///
/// If `within_range` is set, the insertion will be limited to the specified range. That is,
/// the insertion is constrained to the given range rather than the start of the file.
/// This is used for insertions in notebook cells where the source code and AST are for
/// the entire notebook but the insertion should be constrained to a specific cell.
pub fn start_of_file(
body: &[Stmt],
contents: &str,
stylist: &Stylist,
within_range: Option<TextRange>,
) -> Insertion<'static> {
let body = within_range
.map(|range| {
let start = body.partition_point(|stmt| stmt.start() < range.start());
let end = body.partition_point(|stmt| stmt.end() <= range.end());
&body[start..end]
})
.unwrap_or(body);
// Skip over any docstrings.
let mut location = if let Some(mut location) = match_docstring_end(body) {
// If the first token after the docstring is a semicolon, insert after the semicolon as
// an inline statement.
if let Some(offset) = match_semicolon(&contents[location.to_usize()..]) {
return Insertion::inline(" ", location.add(offset).add(TextSize::of(';')), ";");
}
// While the first token after the docstring is a continuation character (i.e. "\"), advance
// additional rows to prevent inserting in the same logical line.
while match_continuation(&contents[location.to_usize()..]).is_some() {
location = contents.full_line_end(location);
}
// Otherwise, advance to the next row.
contents.full_line_end(location)
} else if let Some(range) = within_range
&& range.start() != TextSize::ZERO
{
range.start()
} else {
contents.bom_start_offset()
};
// Skip over commented lines, with whitespace separation.
for line in
UniversalNewlineIterator::with_offset(&contents[location.to_usize()..], location)
{
let trimmed_line = line.trim_whitespace_start();
if trimmed_line.is_empty() {
continue;
}
if trimmed_line.starts_with('#') {
location = line.full_end();
} else {
break;
}
}
Insertion::own_line("", location, stylist.line_ending().as_str())
}
/// Create an [`Insertion`] to insert (e.g.) an import after the end of the given
/// [`Stmt`], along with a prefix and suffix to use for the insertion.
///
/// For example, given the following code:
///
/// ```python
/// """Hello, world!"""
///
/// import os
/// import math
///
///
/// def foo():
/// pass
/// ```
///
/// The insertion returned will begin after the newline after the last import statement, which
/// in this case is the line after `import math`, and will include a trailing newline.
///
/// The statement itself is assumed to be at the top-level of the module.
pub fn end_of_statement(stmt: &Stmt, contents: &str, stylist: &Stylist) -> Insertion<'static> {
let location = stmt.end();
if let Some(offset) = match_semicolon(&contents[location.to_usize()..]) {
// If the first token after the statement is a semicolon, insert after the semicolon as
// an inline statement.
Insertion::inline(" ", location.add(offset).add(TextSize::of(';')), ";")
} else if match_continuation(&contents[location.to_usize()..]).is_some() {
// If the first token after the statement is a continuation, insert after the statement
// with a semicolon.
Insertion::inline("; ", location, "")
} else {
// Otherwise, insert on the next line.
Insertion::own_line(
"",
contents.full_line_end(location),
stylist.line_ending().as_str(),
)
}
}
/// Create an [`Insertion`] to insert an additional member to import
/// into a `from <module> import member1, member2, ...` statement.
///
/// For example, given the following code:
///
/// ```python
/// """Hello, world!"""
///
/// from collections import Counter
///
///
/// def foo():
/// pass
/// ```
///
/// The insertion returned will begin after `Counter` but before the
/// newline terminator. Callers can then call [`Insertion::into_edit`]
/// with the additional member to add. A comma delimiter is handled
/// automatically.
///
/// The statement itself is assumed to be at the top-level of the module.
///
/// This returns `None` when `stmt` isn't a `from ... import ...`
/// statement.
pub fn existing_import(stmt: &Stmt, tokens: &Tokens) -> Option<Insertion<'static>> {
let Stmt::ImportFrom(ref import_from) = *stmt else {
return None;
};
if let Some(at) = import_from.names.last().map(Ranged::end) {
return Some(Insertion::inline(", ", at, ""));
}
// Our AST can deal with partial `from ... import`
// statements, so we might not have any members
// yet. In this case, we don't need the comma.
//
// ... however, unless we can be certain that
// inserting this name leads to a valid AST, we
// give up.
let at = import_from.end();
if !matches!(
tokens
.before(at)
.last()
.map(ruff_python_ast::token::Token::kind),
Some(TokenKind::Import)
) {
return None;
}
Some(Insertion::inline(" ", at, ""))
}
/// Create an [`Insertion`] to insert (e.g.) an import statement at the start of a given
/// block, along with a prefix and suffix to use for the insertion.
///
/// For example, given the following code:
///
/// ```python
/// if TYPE_CHECKING:
/// import os
/// ```
///
/// The insertion returned will begin at the start of the `import os` statement, and will
/// include a trailing newline.
///
/// The block itself is assumed to be at the top-level of the module.
pub fn start_of_block(
mut location: TextSize,
contents: &'a str,
stylist: &Stylist,
tokens: &Tokens,
) -> Insertion<'a> {
enum Awaiting {
Colon(u32),
Newline,
Indent,
}
let mut state = Awaiting::Colon(0);
for token in tokens.after(location) {
match state {
// Iterate until we find the colon indicating the start of the block body.
Awaiting::Colon(depth) => match token.kind() {
TokenKind::Colon if depth == 0 => {
state = Awaiting::Newline;
}
TokenKind::Lpar | TokenKind::Lbrace | TokenKind::Lsqb => {
state = Awaiting::Colon(depth.saturating_add(1));
}
TokenKind::Rpar | TokenKind::Rbrace | TokenKind::Rsqb => {
state = Awaiting::Colon(depth.saturating_sub(1));
}
_ => {}
},
// Once we've seen the colon, we're looking for a newline; otherwise, there's no
// block body (e.g. `if True: pass`).
Awaiting::Newline => match token.kind() {
TokenKind::Comment => {}
TokenKind::Newline => {
state = Awaiting::Indent;
}
_ => {
location = token.start();
break;
}
},
// Once we've seen the newline, we're looking for the indentation of the block body.
Awaiting::Indent => match token.kind() {
TokenKind::Comment => {}
TokenKind::NonLogicalNewline => {}
TokenKind::Indent => {
// This is like:
// ```python
// if True:
// pass
// ```
// Where `range` is the indentation before the `pass` token.
return Insertion::indented(
"",
token.start(),
stylist.line_ending().as_str(),
&contents[token.range()],
);
}
_ => {
location = token.start();
break;
}
},
}
}
// This is like: `if True: pass`, where `location` is the start of the `pass` token.
Insertion::inline("", location, "; ")
}
/// Convert this [`Insertion`] into an [`Edit`] that inserts the given content.
pub fn into_edit(self, content: &str) -> Edit {
let Insertion {
prefix,
location,
suffix,
placement,
} = self;
let content = format!("{prefix}{content}{suffix}");
Edit::insertion(
match placement {
Placement::Indented(indentation) if !indentation.is_empty() => {
indent(&content, indentation).to_string()
}
_ => content,
},
location,
)
}
/// Returns `true` if this [`Insertion`] is inline.
pub fn is_inline(&self) -> bool {
matches!(self.placement, Placement::Inline)
}
/// Create an [`Insertion`] that inserts content inline (i.e., within semicolon-delimited
/// statements).
fn inline(prefix: &'a str, location: TextSize, suffix: &'a str) -> Self {
Self {
prefix,
location,
suffix,
placement: Placement::Inline,
}
}
/// Create an [`Insertion`] that starts on its own line.
fn own_line(prefix: &'a str, location: TextSize, suffix: &'a str) -> Self {
Self {
prefix,
location,
suffix,
placement: Placement::OwnLine,
}
}
/// Create an [`Insertion`] that starts on its own line, with the given indentation.
fn indented(
prefix: &'a str,
location: TextSize,
suffix: &'a str,
indentation: &'a str,
) -> Self {
Self {
prefix,
location,
suffix,
placement: Placement::Indented(indentation),
}
}
}
/// Find the end of the docstring (first string statement).
fn match_docstring_end(body: &[Stmt]) -> Option<TextSize> {
let stmt = body.first()?;
if !is_docstring_stmt(stmt) {
return None;
}
Some(stmt.end())
}
/// If the next token is a semicolon, return its offset.
fn match_semicolon(s: &str) -> Option<TextSize> {
for (offset, c) in s.char_indices() {
match c {
_ if is_python_whitespace(c) => continue,
';' => return Some(TextSize::try_from(offset).unwrap()),
_ => break,
}
}
None
}
/// If the next token is a continuation (`\`), return its offset.
fn match_continuation(s: &str) -> Option<TextSize> {
for (offset, c) in s.char_indices() {
match c {
_ if is_python_whitespace(c) => continue,
'\\' => return Some(TextSize::try_from(offset).unwrap()),
_ => break,
}
}
None
}
#[cfg(test)]
mod tests {
use anyhow::Result;
use ruff_python_codegen::Stylist;
use ruff_python_parser::parse_module;
use ruff_source_file::LineEnding;
use ruff_text_size::{Ranged, TextSize};
use super::Insertion;
#[test]
fn start_of_file() -> Result<()> {
fn insert(contents: &str) -> Result<Insertion<'_>> {
let parsed = parse_module(contents)?;
let stylist = Stylist::from_tokens(parsed.tokens(), contents);
Ok(Insertion::start_of_file(
parsed.suite(),
contents,
&stylist,
None,
))
}
let contents = "";
assert_eq!(
insert(contents)?,
Insertion::own_line("", TextSize::from(0), LineEnding::default().as_str())
);
let contents = r#"
"""Hello, world!""""#
.trim_start();
assert_eq!(
insert(contents)?,
Insertion::own_line("", TextSize::from(19), LineEnding::default().as_str())
);
let contents = r#"
"""Hello, world!"""
"#
.trim_start();
assert_eq!(
insert(contents)?,
Insertion::own_line("", TextSize::from(20), "\n")
);
let contents = r#"
"""Hello, world!"""
"""Hello, world!"""
"#
.trim_start();
assert_eq!(
insert(contents)?,
Insertion::own_line("", TextSize::from(20), "\n")
);
let contents = r#"
"""Hello, world!"""\
"#
.trim_start();
assert_eq!(
insert(contents)?,
Insertion::own_line("", TextSize::from(22), "\n")
);
let contents = r#"
"""Hello, world!"""\
\
"#
.trim_start();
assert_eq!(
insert(contents)?,
Insertion::own_line("", TextSize::from(24), "\n")
);
let contents = r"
x = 1
"
.trim_start();
assert_eq!(
insert(contents)?,
Insertion::own_line("", TextSize::from(0), "\n")
);
let contents = r"
#!/usr/bin/env python3
"
.trim_start();
assert_eq!(
insert(contents)?,
Insertion::own_line("", TextSize::from(23), "\n")
);
let contents = r#"
#!/usr/bin/env python3
"""Hello, world!"""
"#
.trim_start();
assert_eq!(
insert(contents)?,
Insertion::own_line("", TextSize::from(43), "\n")
);
let contents = r#"
"""Hello, world!"""
#!/usr/bin/env python3
"#
.trim_start();
assert_eq!(
insert(contents)?,
Insertion::own_line("", TextSize::from(43), "\n")
);
let contents = r#"
"""%s""" % "Hello, world!"
"#
.trim_start();
assert_eq!(
insert(contents)?,
Insertion::own_line("", TextSize::from(0), "\n")
);
let contents = r#"
"""Hello, world!"""; x = 1
"#
.trim_start();
assert_eq!(
insert(contents)?,
Insertion::inline(" ", TextSize::from(20), ";")
);
let contents = r#"
"""Hello, world!"""; x = 1; y = \
2
"#
.trim_start();
assert_eq!(
insert(contents)?,
Insertion::inline(" ", TextSize::from(20), ";")
);
Ok(())
}
#[test]
fn start_of_block() {
fn insert(contents: &str, offset: TextSize) -> Insertion<'_> {
let parsed = parse_module(contents).unwrap();
let stylist = Stylist::from_tokens(parsed.tokens(), contents);
Insertion::start_of_block(offset, contents, &stylist, parsed.tokens())
}
let contents = "if True: pass";
assert_eq!(
insert(contents, TextSize::from(0)),
Insertion::inline("", TextSize::from(9), "; ")
);
let contents = r"
if True:
pass
"
.trim_start();
assert_eq!(
insert(contents, TextSize::from(0)),
Insertion::indented("", TextSize::from(9), "\n", " ")
);
}
#[test]
fn existing_import_works() {
fn snapshot(content: &str, member: &str) -> String {
let parsed = parse_module(content).unwrap();
let edit = Insertion::existing_import(parsed.suite().first().unwrap(), parsed.tokens())
.unwrap()
.into_edit(member);
let insert_text = edit.content().expect("edit should be non-empty");
let mut content = content.to_string();
content.replace_range(edit.range().to_std_range(), insert_text);
content
}
let source = r#"
from collections import Counter
"#;
insta::assert_snapshot!(
snapshot(source, "defaultdict"),
@r"
from collections import Counter, defaultdict
",
);
let source = r#"
from collections import Counter, OrderedDict
"#;
insta::assert_snapshot!(
snapshot(source, "defaultdict"),
@r"
from collections import Counter, OrderedDict, defaultdict
",
);
let source = r#"
from collections import (Counter)
"#;
insta::assert_snapshot!(
snapshot(source, "defaultdict"),
@"from collections import (Counter, defaultdict)",
);
let source = r#"
from collections import (Counter, OrderedDict)
"#;
insta::assert_snapshot!(
snapshot(source, "defaultdict"),
@"from collections import (Counter, OrderedDict, defaultdict)",
);
let source = r#"
from collections import (Counter,)
"#;
insta::assert_snapshot!(
snapshot(source, "defaultdict"),
@"from collections import (Counter, defaultdict,)",
);
let source = r#"
from collections import (Counter, OrderedDict,)
"#;
insta::assert_snapshot!(
snapshot(source, "defaultdict"),
@"from collections import (Counter, OrderedDict, defaultdict,)",
);
let source = r#"
from collections import (
Counter
)
"#;
insta::assert_snapshot!(
snapshot(source, "defaultdict"),
@r"
from collections import (
Counter, defaultdict
)
",
);
let source = r#"
from collections import (
Counter,
)
"#;
insta::assert_snapshot!(
snapshot(source, "defaultdict"),
@r"
from collections import (
Counter, defaultdict,
)
",
);
let source = r#"
from collections import (
Counter,
OrderedDict
)
"#;
insta::assert_snapshot!(
snapshot(source, "defaultdict"),
@r"
from collections import (
Counter,
OrderedDict, defaultdict
)
",
);
let source = r#"
from collections import (
Counter,
OrderedDict,
)
"#;
insta::assert_snapshot!(
snapshot(source, "defaultdict"),
@r"
from collections import (
Counter,
OrderedDict, defaultdict,
)
",
);
let source = r#"
from collections import \
Counter
"#;
insta::assert_snapshot!(
snapshot(source, "defaultdict"),
@r"
from collections import \
Counter, defaultdict
",
);
let source = r#"
from collections import \
Counter, OrderedDict
"#;
insta::assert_snapshot!(
snapshot(source, "defaultdict"),
@r"
from collections import \
Counter, OrderedDict, defaultdict
",
);
let source = r#"
from collections import \
Counter, \
OrderedDict
"#;
insta::assert_snapshot!(
snapshot(source, "defaultdict"),
@r"
from collections import \
Counter, \
OrderedDict, defaultdict
",
);
/*
from collections import (
Collector # comment
)
from collections import (
Collector, # comment
)
from collections import (
Collector # comment
,
)
from collections import (
Collector
# comment
,
)
*/
let source = r#"
from collections import (
Counter # comment
)
"#;
insta::assert_snapshot!(
snapshot(source, "defaultdict"),
@r"
from collections import (
Counter, defaultdict # comment
)
",
);
let source = r#"
from collections import (
Counter, # comment
)
"#;
insta::assert_snapshot!(
snapshot(source, "defaultdict"),
@r"
from collections import (
Counter, defaultdict, # comment
)
",
);
let source = r#"
from collections import (
Counter # comment
,
)
"#;
insta::assert_snapshot!(
snapshot(source, "defaultdict"),
@r"
from collections import (
Counter, defaultdict # comment
,
)
",
);
let source = r#"
from collections import (
Counter
# comment
,
)
"#;
insta::assert_snapshot!(
snapshot(source, "defaultdict"),
@r"
from collections import (
Counter, defaultdict
# comment
,
)
",
);
let source = r#"
from collections import (
# comment 1
Counter # comment 2
# comment 3
)
"#;
insta::assert_snapshot!(
snapshot(source, "defaultdict"),
@r"
from collections import (
# comment 1
Counter, defaultdict # comment 2
# comment 3
)
",
);
let source = r#"
from collections import Counter # comment
"#;
insta::assert_snapshot!(
snapshot(source, "defaultdict"),
@"from collections import Counter, defaultdict # comment",
);
let source = r#"
from collections import Counter, OrderedDict # comment
"#;
insta::assert_snapshot!(
snapshot(source, "defaultdict"),
@"from collections import Counter, OrderedDict, defaultdict # comment",
);
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_python_literal/src/cformat.rs | crates/ruff_python_literal/src/cformat.rs | //! Implementation of Printf-Style string formatting
//! as per the [Python Docs](https://docs.python.org/3/library/stdtypes.html#printf-style-string-formatting).
use std::{
fmt,
iter::{Enumerate, Peekable},
str::FromStr,
};
use bitflags::bitflags;
use crate::Case;
#[derive(Debug, PartialEq)]
pub enum CFormatErrorType {
UnmatchedKeyParentheses,
MissingModuloSign,
UnsupportedFormatChar(char),
IncompleteFormat,
IntTooBig,
// Unimplemented,
}
// also contains how many chars the parsing function consumed
pub type ParsingError = (CFormatErrorType, usize);
#[derive(Debug, PartialEq)]
pub struct CFormatError {
pub typ: CFormatErrorType, // FIXME
pub index: usize,
}
impl fmt::Display for CFormatError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
use CFormatErrorType::{
IncompleteFormat, IntTooBig, UnmatchedKeyParentheses, UnsupportedFormatChar,
};
match self.typ {
UnmatchedKeyParentheses => write!(f, "incomplete format key"),
IncompleteFormat => write!(f, "incomplete format"),
UnsupportedFormatChar(c) => write!(
f,
"unsupported format character '{}' ({:#x}) at index {}",
c, c as u32, self.index
),
IntTooBig => write!(f, "width/precision too big"),
CFormatErrorType::MissingModuloSign => {
write!(f, "unexpected error parsing format string")
}
}
}
}
pub type CFormatConversion = super::format::FormatConversion;
#[derive(Debug, PartialEq)]
pub enum CNumberType {
Decimal,
Octal,
Hex(Case),
}
#[derive(Debug, PartialEq)]
pub enum CFloatType {
Exponent(Case),
PointDecimal(Case),
General(Case),
}
#[derive(Debug, PartialEq)]
pub enum CFormatType {
Number(CNumberType),
Float(CFloatType),
Character,
String(CFormatConversion),
}
#[derive(Debug, PartialEq)]
pub enum CFormatPrecision {
Quantity(CFormatQuantity),
Dot,
}
impl From<CFormatQuantity> for CFormatPrecision {
fn from(quantity: CFormatQuantity) -> Self {
CFormatPrecision::Quantity(quantity)
}
}
bitflags! {
#[derive(Copy, Clone, Debug, PartialEq)]
pub struct CConversionFlags: u32 {
const ALTERNATE_FORM = 1 << 0;
const ZERO_PAD = 1 << 1;
const LEFT_ADJUST = 1 << 2;
const BLANK_SIGN = 1 << 3;
const SIGN_CHAR = 1 << 4;
}
}
#[derive(Debug, PartialEq)]
pub enum CFormatQuantity {
Amount(usize),
FromValuesTuple,
}
#[derive(Debug, PartialEq)]
pub struct CFormatSpec {
pub mapping_key: Option<String>,
pub flags: CConversionFlags,
pub min_field_width: Option<CFormatQuantity>,
pub precision: Option<CFormatPrecision>,
pub format_type: CFormatType,
pub format_char: char,
// chars_consumed: usize,
}
impl FromStr for CFormatSpec {
type Err = ParsingError;
fn from_str(text: &str) -> Result<Self, Self::Err> {
let mut chars = text.chars().enumerate().peekable();
if chars.next().map(|x| x.1) != Some('%') {
return Err((CFormatErrorType::MissingModuloSign, 1));
}
CFormatSpec::parse(&mut chars)
}
}
pub type ParseIter<I> = Peekable<Enumerate<I>>;
impl CFormatSpec {
pub fn parse<T, I>(iter: &mut ParseIter<I>) -> Result<Self, ParsingError>
where
T: Into<char> + Copy,
I: Iterator<Item = T>,
{
let mapping_key = parse_spec_mapping_key(iter)?;
let flags = parse_flags(iter);
let min_field_width = parse_quantity(iter)?;
let precision = parse_precision(iter)?;
consume_length(iter);
let (format_type, format_char) = parse_format_type(iter)?;
Ok(CFormatSpec {
mapping_key,
flags,
min_field_width,
precision,
format_type,
format_char,
})
}
}
fn parse_spec_mapping_key<T, I>(iter: &mut ParseIter<I>) -> Result<Option<String>, ParsingError>
where
T: Into<char> + Copy,
I: Iterator<Item = T>,
{
if let Some(&(index, c)) = iter.peek() {
if c.into() == '(' {
iter.next().unwrap();
return match parse_text_inside_parentheses(iter) {
Some(key) => Ok(Some(key)),
None => Err((CFormatErrorType::UnmatchedKeyParentheses, index)),
};
}
}
Ok(None)
}
fn parse_flags<T, I>(iter: &mut ParseIter<I>) -> CConversionFlags
where
T: Into<char> + Copy,
I: Iterator<Item = T>,
{
let mut flags = CConversionFlags::empty();
while let Some(&(_, c)) = iter.peek() {
let flag = match c.into() {
'#' => CConversionFlags::ALTERNATE_FORM,
'0' => CConversionFlags::ZERO_PAD,
'-' => CConversionFlags::LEFT_ADJUST,
' ' => CConversionFlags::BLANK_SIGN,
'+' => CConversionFlags::SIGN_CHAR,
_ => break,
};
iter.next().unwrap();
flags |= flag;
}
flags
}
fn consume_length<T, I>(iter: &mut ParseIter<I>)
where
T: Into<char> + Copy,
I: Iterator<Item = T>,
{
if let Some(&(_, c)) = iter.peek() {
let c = c.into();
if c == 'h' || c == 'l' || c == 'L' {
iter.next().unwrap();
}
}
}
fn parse_format_type<T, I>(iter: &mut ParseIter<I>) -> Result<(CFormatType, char), ParsingError>
where
T: Into<char>,
I: Iterator<Item = T>,
{
use CFloatType::{Exponent, General, PointDecimal};
use CNumberType::{Decimal, Hex, Octal};
let (index, c) = match iter.next() {
Some((index, c)) => (index, c.into()),
None => {
return Err((
CFormatErrorType::IncompleteFormat,
iter.peek().map_or(0, |x| x.0),
));
}
};
let format_type = match c {
'd' | 'i' | 'u' => CFormatType::Number(Decimal),
'o' => CFormatType::Number(Octal),
'x' => CFormatType::Number(Hex(Case::Lower)),
'X' => CFormatType::Number(Hex(Case::Upper)),
'e' => CFormatType::Float(Exponent(Case::Lower)),
'E' => CFormatType::Float(Exponent(Case::Upper)),
'f' => CFormatType::Float(PointDecimal(Case::Lower)),
'F' => CFormatType::Float(PointDecimal(Case::Upper)),
'g' => CFormatType::Float(General(Case::Lower)),
'G' => CFormatType::Float(General(Case::Upper)),
'c' => CFormatType::Character,
'r' => CFormatType::String(CFormatConversion::Repr),
's' => CFormatType::String(CFormatConversion::Str),
'b' => CFormatType::String(CFormatConversion::Bytes),
'a' => CFormatType::String(CFormatConversion::Ascii),
_ => return Err((CFormatErrorType::UnsupportedFormatChar(c), index)),
};
Ok((format_type, c))
}
#[expect(clippy::cast_possible_wrap)]
fn parse_quantity<T, I>(iter: &mut ParseIter<I>) -> Result<Option<CFormatQuantity>, ParsingError>
where
T: Into<char> + Copy,
I: Iterator<Item = T>,
{
if let Some(&(_, c)) = iter.peek() {
let c: char = c.into();
if c == '*' {
iter.next().unwrap();
return Ok(Some(CFormatQuantity::FromValuesTuple));
}
if let Some(i) = c.to_digit(10) {
let mut num = i as i32;
iter.next().unwrap();
while let Some(&(index, c)) = iter.peek() {
if let Some(i) = c.into().to_digit(10) {
num = num
.checked_mul(10)
.and_then(|num| num.checked_add(i as i32))
.ok_or((CFormatErrorType::IntTooBig, index))?;
iter.next().unwrap();
} else {
break;
}
}
return Ok(Some(CFormatQuantity::Amount(num.unsigned_abs() as usize)));
}
}
Ok(None)
}
fn parse_precision<T, I>(iter: &mut ParseIter<I>) -> Result<Option<CFormatPrecision>, ParsingError>
where
T: Into<char> + Copy,
I: Iterator<Item = T>,
{
if let Some(&(_, c)) = iter.peek() {
if c.into() == '.' {
iter.next().unwrap();
let quantity = parse_quantity(iter)?;
let precision = quantity.map_or(CFormatPrecision::Dot, CFormatPrecision::Quantity);
return Ok(Some(precision));
}
}
Ok(None)
}
fn parse_text_inside_parentheses<T, I>(iter: &mut ParseIter<I>) -> Option<String>
where
T: Into<char>,
I: Iterator<Item = T>,
{
let mut counter: i32 = 1;
let mut contained_text = String::new();
loop {
let (_, c) = iter.next()?;
let c = c.into();
match c {
_ if c == '(' => {
counter += 1;
}
_ if c == ')' => {
counter -= 1;
}
_ => (),
}
if counter > 0 {
contained_text.push(c);
} else {
break;
}
}
Some(contained_text)
}
#[derive(Debug, PartialEq)]
pub enum CFormatPart<T> {
Literal(T),
Spec(CFormatSpec),
}
#[derive(Debug, PartialEq)]
pub struct CFormatStrOrBytes<S> {
parts: Vec<(usize, CFormatPart<S>)>,
}
impl<S> CFormatStrOrBytes<S> {
#[inline]
pub fn iter(&self) -> impl Iterator<Item = &(usize, CFormatPart<S>)> {
self.parts.iter()
}
#[inline]
pub fn iter_mut(&mut self) -> impl Iterator<Item = &mut (usize, CFormatPart<S>)> {
self.parts.iter_mut()
}
}
pub type CFormatBytes = CFormatStrOrBytes<Vec<u8>>;
impl CFormatBytes {
pub fn parse<I: Iterator<Item = u8>>(iter: &mut ParseIter<I>) -> Result<Self, CFormatError> {
let mut parts = vec![];
let mut literal = vec![];
let mut part_index = 0;
while let Some((index, c)) = iter.next() {
if c == b'%' {
if let Some(&(_, second)) = iter.peek() {
if second == b'%' {
iter.next().unwrap();
literal.push(b'%');
continue;
}
if !literal.is_empty() {
parts.push((
part_index,
CFormatPart::Literal(std::mem::take(&mut literal)),
));
}
let spec = CFormatSpec::parse(iter).map_err(|err| CFormatError {
typ: err.0,
index: err.1,
})?;
parts.push((index, CFormatPart::Spec(spec)));
if let Some(&(index, _)) = iter.peek() {
part_index = index;
}
} else {
return Err(CFormatError {
typ: CFormatErrorType::IncompleteFormat,
index: index + 1,
});
}
} else {
literal.push(c);
}
}
if !literal.is_empty() {
parts.push((part_index, CFormatPart::Literal(literal)));
}
Ok(Self { parts })
}
}
pub type CFormatString = CFormatStrOrBytes<String>;
impl FromStr for CFormatString {
type Err = CFormatError;
fn from_str(text: &str) -> Result<Self, Self::Err> {
let mut iter = text.chars().enumerate().peekable();
Self::parse(&mut iter)
}
}
impl CFormatString {
pub fn parse<I: Iterator<Item = char>>(iter: &mut ParseIter<I>) -> Result<Self, CFormatError> {
let mut parts = vec![];
let mut literal = String::new();
let mut part_index = 0;
while let Some((index, c)) = iter.next() {
if c == '%' {
if let Some(&(_, second)) = iter.peek() {
if second == '%' {
iter.next().unwrap();
literal.push('%');
continue;
}
if !literal.is_empty() {
parts.push((
part_index,
CFormatPart::Literal(std::mem::take(&mut literal)),
));
}
let spec = CFormatSpec::parse(iter).map_err(|err| CFormatError {
typ: err.0,
index: err.1,
})?;
parts.push((index, CFormatPart::Spec(spec)));
if let Some(&(index, _)) = iter.peek() {
part_index = index;
}
} else {
return Err(CFormatError {
typ: CFormatErrorType::IncompleteFormat,
index: index + 1,
});
}
} else {
literal.push(c);
}
}
if !literal.is_empty() {
parts.push((part_index, CFormatPart::Literal(literal)));
}
Ok(Self { parts })
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_parse_key() {
let expected = Ok(CFormatSpec {
mapping_key: Some("amount".to_owned()),
format_type: CFormatType::Number(CNumberType::Decimal),
format_char: 'd',
min_field_width: None,
precision: None,
flags: CConversionFlags::empty(),
});
assert_eq!("%(amount)d".parse::<CFormatSpec>(), expected);
let expected = Ok(CFormatSpec {
mapping_key: Some("m((u(((l((((ti))))p)))l))e".to_owned()),
format_type: CFormatType::Number(CNumberType::Decimal),
format_char: 'd',
min_field_width: None,
precision: None,
flags: CConversionFlags::empty(),
});
assert_eq!(
"%(m((u(((l((((ti))))p)))l))e)d".parse::<CFormatSpec>(),
expected
);
}
#[test]
fn test_format_parse_key_fail() {
assert_eq!(
"%(aged".parse::<CFormatString>(),
Err(CFormatError {
typ: CFormatErrorType::UnmatchedKeyParentheses,
index: 1
})
);
}
#[test]
fn test_format_parse_type_fail() {
assert_eq!(
"Hello %n".parse::<CFormatString>(),
Err(CFormatError {
typ: CFormatErrorType::UnsupportedFormatChar('n'),
index: 7
})
);
}
#[test]
fn test_incomplete_format_fail() {
assert_eq!(
"Hello %".parse::<CFormatString>(),
Err(CFormatError {
typ: CFormatErrorType::IncompleteFormat,
index: 7
})
);
}
#[test]
fn test_parse_flags() {
let expected = Ok(CFormatSpec {
format_type: CFormatType::Number(CNumberType::Decimal),
format_char: 'd',
min_field_width: Some(CFormatQuantity::Amount(10)),
precision: None,
mapping_key: None,
flags: CConversionFlags::all(),
});
let parsed = "% 0 -+++###10d".parse::<CFormatSpec>();
assert_eq!(parsed, expected);
}
#[test]
fn test_format_parse() {
let fmt = "Hello, my name is %s and I'm %d years old";
let expected = Ok(CFormatString {
parts: vec![
(0, CFormatPart::Literal("Hello, my name is ".to_owned())),
(
18,
CFormatPart::Spec(CFormatSpec {
format_type: CFormatType::String(CFormatConversion::Str),
format_char: 's',
mapping_key: None,
min_field_width: None,
precision: None,
flags: CConversionFlags::empty(),
}),
),
(20, CFormatPart::Literal(" and I'm ".to_owned())),
(
29,
CFormatPart::Spec(CFormatSpec {
format_type: CFormatType::Number(CNumberType::Decimal),
format_char: 'd',
mapping_key: None,
min_field_width: None,
precision: None,
flags: CConversionFlags::empty(),
}),
),
(31, CFormatPart::Literal(" years old".to_owned())),
],
});
let result = fmt.parse::<CFormatString>();
assert_eq!(
result, expected,
"left = {result:#?} \n\n\n right = {expected:#?}"
);
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_python_literal/src/lib.rs | crates/ruff_python_literal/src/lib.rs | pub mod cformat;
pub mod char;
pub mod escape;
pub mod float;
pub mod format;
#[derive(Debug, PartialEq, Eq, Clone, Copy)]
pub enum Case {
Lower,
Upper,
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_python_literal/src/char.rs | crates/ruff_python_literal/src/char.rs | use unic_ucd_category::GeneralCategory;
/// According to python following categories aren't printable:
/// * Cc (Other, Control)
/// * Cf (Other, Format)
/// * Cs (Other, Surrogate)
/// * Co (Other, Private Use)
/// * Cn (Other, Not Assigned)
/// * Zl Separator, Line ('\u2028', LINE SEPARATOR)
/// * Zp Separator, Paragraph ('\u2029', PARAGRAPH SEPARATOR)
/// * Zs (Separator, Space) other than ASCII space('\x20').
pub fn is_printable(c: char) -> bool {
let cat = GeneralCategory::of(c);
!(cat.is_other() || cat.is_separator())
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_python_literal/src/format.rs | crates/ruff_python_literal/src/format.rs | use itertools::{Itertools, PeekingNext};
use std::error::Error;
use std::str::FromStr;
use crate::Case;
trait FormatParse {
fn parse(text: &str) -> (Option<Self>, &str)
where
Self: Sized;
}
#[derive(Debug, Copy, Clone, PartialEq)]
pub enum FormatConversion {
Str,
Repr,
Ascii,
Bytes,
}
impl FormatParse for FormatConversion {
fn parse(text: &str) -> (Option<Self>, &str) {
let Some(conversion) = Self::from_string(text) else {
return (None, text);
};
let mut chars = text.chars();
chars.next(); // Consume the bang
chars.next(); // Consume one r,s,a char
(Some(conversion), chars.as_str())
}
}
impl FormatConversion {
pub fn from_char(c: char) -> Option<FormatConversion> {
match c {
's' => Some(FormatConversion::Str),
'r' => Some(FormatConversion::Repr),
'a' => Some(FormatConversion::Ascii),
'b' => Some(FormatConversion::Bytes),
_ => None,
}
}
fn from_string(text: &str) -> Option<FormatConversion> {
let mut chars = text.chars();
if chars.next() != Some('!') {
return None;
}
FormatConversion::from_char(chars.next()?)
}
}
#[derive(Debug, Copy, Clone, PartialEq)]
pub enum FormatAlign {
Left,
Right,
AfterSign,
Center,
}
impl FormatAlign {
fn from_char(c: char) -> Option<FormatAlign> {
match c {
'<' => Some(FormatAlign::Left),
'>' => Some(FormatAlign::Right),
'=' => Some(FormatAlign::AfterSign),
'^' => Some(FormatAlign::Center),
_ => None,
}
}
}
impl FormatParse for FormatAlign {
fn parse(text: &str) -> (Option<Self>, &str) {
let mut chars = text.chars();
if let Some(maybe_align) = chars.next().and_then(Self::from_char) {
(Some(maybe_align), chars.as_str())
} else {
(None, text)
}
}
}
#[derive(Debug, Copy, Clone, PartialEq)]
pub enum FormatSign {
Plus,
Minus,
MinusOrSpace,
}
impl FormatParse for FormatSign {
fn parse(text: &str) -> (Option<Self>, &str) {
let mut chars = text.chars();
match chars.next() {
Some('-') => (Some(Self::Minus), chars.as_str()),
Some('+') => (Some(Self::Plus), chars.as_str()),
Some(' ') => (Some(Self::MinusOrSpace), chars.as_str()),
_ => (None, text),
}
}
}
#[derive(Debug, PartialEq)]
pub enum FormatGrouping {
Comma,
Underscore,
}
impl FormatParse for FormatGrouping {
fn parse(text: &str) -> (Option<Self>, &str) {
let mut chars = text.chars();
match chars.next() {
Some('_') => (Some(Self::Underscore), chars.as_str()),
Some(',') => (Some(Self::Comma), chars.as_str()),
_ => (None, text),
}
}
}
#[derive(Copy, Clone, Debug, PartialEq)]
pub enum FormatType {
String,
Binary,
Character,
Decimal,
Octal,
Number(Case),
Hex(Case),
Exponent(Case),
GeneralFormat(Case),
FixedPoint(Case),
Percentage,
}
impl From<&FormatType> for char {
fn from(from: &FormatType) -> char {
match from {
FormatType::String => 's',
FormatType::Binary => 'b',
FormatType::Character => 'c',
FormatType::Decimal => 'd',
FormatType::Octal => 'o',
FormatType::Number(Case::Lower) => 'n',
FormatType::Number(Case::Upper) => 'N',
FormatType::Hex(Case::Lower) => 'x',
FormatType::Hex(Case::Upper) => 'X',
FormatType::Exponent(Case::Lower) => 'e',
FormatType::Exponent(Case::Upper) => 'E',
FormatType::GeneralFormat(Case::Lower) => 'g',
FormatType::GeneralFormat(Case::Upper) => 'G',
FormatType::FixedPoint(Case::Lower) => 'f',
FormatType::FixedPoint(Case::Upper) => 'F',
FormatType::Percentage => '%',
}
}
}
impl FormatParse for FormatType {
fn parse(text: &str) -> (Option<Self>, &str) {
let mut chars = text.chars();
match chars.next() {
Some('s') => (Some(Self::String), chars.as_str()),
Some('b') => (Some(Self::Binary), chars.as_str()),
Some('c') => (Some(Self::Character), chars.as_str()),
Some('d') => (Some(Self::Decimal), chars.as_str()),
Some('o') => (Some(Self::Octal), chars.as_str()),
Some('n') => (Some(Self::Number(Case::Lower)), chars.as_str()),
Some('N') => (Some(Self::Number(Case::Upper)), chars.as_str()),
Some('x') => (Some(Self::Hex(Case::Lower)), chars.as_str()),
Some('X') => (Some(Self::Hex(Case::Upper)), chars.as_str()),
Some('e') => (Some(Self::Exponent(Case::Lower)), chars.as_str()),
Some('E') => (Some(Self::Exponent(Case::Upper)), chars.as_str()),
Some('f') => (Some(Self::FixedPoint(Case::Lower)), chars.as_str()),
Some('F') => (Some(Self::FixedPoint(Case::Upper)), chars.as_str()),
Some('g') => (Some(Self::GeneralFormat(Case::Lower)), chars.as_str()),
Some('G') => (Some(Self::GeneralFormat(Case::Upper)), chars.as_str()),
Some('%') => (Some(Self::Percentage), chars.as_str()),
Some(_) => (None, chars.as_str()),
_ => (None, text),
}
}
}
/// The format specification component of a format field
///
/// For example the content would be parsed from `<20` in:
/// ```python
/// "hello {name:<20}".format(name="test")
/// ```
///
/// Format specifications allow nested placeholders for dynamic formatting.
/// For example, the following statements are equivalent:
/// ```python
/// "hello {name:{fmt}}".format(name="test", fmt="<20")
/// "hello {name:{align}{width}}".format(name="test", align="<", width="20")
/// "hello {name:<20{empty}>}".format(name="test", empty="")
/// ```
///
/// Nested placeholders can include additional format specifiers.
/// ```python
/// "hello {name:{fmt:*>}}".format(name="test", fmt="<20")
/// ```
///
/// However, placeholders can only be singly nested (preserving our sanity).
/// A [`FormatSpecError::PlaceholderRecursionExceeded`] will be raised while parsing in this case.
/// ```python
/// "hello {name:{fmt:{not_allowed}}}".format(name="test", fmt="<20") # Syntax error
/// ```
///
/// When placeholders are present in a format specification, parsing will return a [`DynamicFormatSpec`]
/// and avoid attempting to parse any of the clauses. Otherwise, a [`StaticFormatSpec`] will be used.
#[derive(Debug, PartialEq)]
pub enum FormatSpec {
Static(StaticFormatSpec),
Dynamic(DynamicFormatSpec),
}
#[derive(Debug, PartialEq)]
pub struct StaticFormatSpec {
// Ex) `!s` in `'{!s}'`
conversion: Option<FormatConversion>,
// Ex) `*` in `'{:*^30}'`
fill: Option<char>,
// Ex) `<` in `'{:<30}'`
align: Option<FormatAlign>,
// Ex) `+` in `'{:+f}'`
sign: Option<FormatSign>,
// Ex) `#` in `'{:#x}'`
alternate_form: bool,
// Ex) `30` in `'{:<30}'`
width: Option<usize>,
// Ex) `,` in `'{:,}'`
grouping_option: Option<FormatGrouping>,
// Ex) `2` in `'{:.2}'`
precision: Option<usize>,
// Ex) `f` in `'{:+f}'`
format_type: Option<FormatType>,
}
#[derive(Debug, PartialEq)]
pub struct DynamicFormatSpec {
// Ex) `x` and `y` in `'{:*{x},{y}b}'`
pub placeholders: Vec<FormatPart>,
}
#[derive(Copy, Clone, Debug, PartialEq, Default)]
pub enum AllowPlaceholderNesting {
#[default]
Yes,
No,
AllowPlaceholderNesting,
}
fn get_num_digits(text: &str) -> usize {
for (index, character) in text.char_indices() {
if !character.is_ascii_digit() {
return index;
}
}
text.len()
}
fn parse_fill_and_align(text: &str) -> (Option<char>, Option<FormatAlign>, &str) {
let char_indices: Vec<(usize, char)> = text.char_indices().take(3).collect();
if char_indices.is_empty() {
(None, None, text)
} else if char_indices.len() == 1 {
let (maybe_align, remaining) = FormatAlign::parse(text);
(None, maybe_align, remaining)
} else {
let (maybe_align, remaining) = FormatAlign::parse(&text[char_indices[1].0..]);
if maybe_align.is_some() {
(Some(char_indices[0].1), maybe_align, remaining)
} else {
let (only_align, only_align_remaining) = FormatAlign::parse(text);
(None, only_align, only_align_remaining)
}
}
}
fn parse_number(text: &str) -> Result<(Option<usize>, &str), FormatSpecError> {
let num_digits: usize = get_num_digits(text);
if num_digits == 0 {
return Ok((None, text));
}
if let Ok(num) = text[..num_digits].parse::<usize>() {
Ok((Some(num), &text[num_digits..]))
} else {
// NOTE: this condition is different from CPython
Err(FormatSpecError::DecimalDigitsTooMany)
}
}
fn parse_alternate_form(text: &str) -> (bool, &str) {
let mut chars = text.chars();
match chars.next() {
Some('#') => (true, chars.as_str()),
_ => (false, text),
}
}
fn parse_zero(text: &str) -> (bool, &str) {
let mut chars = text.chars();
match chars.next() {
Some('0') => (true, chars.as_str()),
_ => (false, text),
}
}
fn parse_precision(text: &str) -> Result<(Option<usize>, &str), FormatSpecError> {
let mut chars = text.chars();
Ok(match chars.next() {
Some('.') => {
let (size, remaining) = parse_number(chars.as_str())?;
if let Some(size) = size {
if size > i32::MAX as usize {
return Err(FormatSpecError::PrecisionTooBig);
}
(Some(size), remaining)
} else {
(None, text)
}
}
_ => (None, text),
})
}
/// Parses a placeholder format part within a format specification
fn parse_nested_placeholder(text: &str) -> Result<Option<(FormatPart, &str)>, FormatSpecError> {
match FormatString::parse_spec(text, AllowPlaceholderNesting::No) {
// Not a nested placeholder, OK
Err(FormatParseError::MissingStartBracket) => Ok(None),
Err(err) => Err(FormatSpecError::InvalidPlaceholder(err)),
Ok((format_part, text)) => Ok(Some((format_part, text))),
}
}
/// Parse all placeholders in a format specification
/// If no placeholders are present, an empty vector will be returned
fn parse_nested_placeholders(mut text: &str) -> Result<Vec<FormatPart>, FormatSpecError> {
let mut placeholders = vec![];
while let Some(bracket) = text.find('{') {
if let Some((format_part, rest)) = parse_nested_placeholder(&text[bracket..])? {
text = rest;
placeholders.push(format_part);
} else {
text = &text[bracket + 1..];
}
}
Ok(placeholders)
}
impl FormatSpec {
pub fn parse(text: &str) -> Result<Self, FormatSpecError> {
let placeholders = parse_nested_placeholders(text)?;
if !placeholders.is_empty() {
return Ok(FormatSpec::Dynamic(DynamicFormatSpec { placeholders }));
}
let (conversion, text) = FormatConversion::parse(text);
let (mut fill, mut align, text) = parse_fill_and_align(text);
let (sign, text) = FormatSign::parse(text);
let (alternate_form, text) = parse_alternate_form(text);
let (zero, text) = parse_zero(text);
let (width, text) = parse_number(text)?;
let (grouping_option, text) = FormatGrouping::parse(text);
let (precision, text) = parse_precision(text)?;
let (format_type, _text) = if text.is_empty() {
(None, text)
} else {
// If there's any remaining text, we should yield a valid format type and consume it
// all.
let (format_type, text) = FormatType::parse(text);
if format_type.is_none() {
return Err(FormatSpecError::InvalidFormatType);
}
if !text.is_empty() {
return Err(FormatSpecError::InvalidFormatSpecifier);
}
(format_type, text)
};
if zero && fill.is_none() {
fill.replace('0');
align = align.or(Some(FormatAlign::AfterSign));
}
Ok(FormatSpec::Static(StaticFormatSpec {
conversion,
fill,
align,
sign,
alternate_form,
width,
grouping_option,
precision,
format_type,
}))
}
}
#[derive(Debug, PartialEq)]
pub enum FormatSpecError {
DecimalDigitsTooMany,
PrecisionTooBig,
InvalidFormatSpecifier,
InvalidFormatType,
InvalidPlaceholder(FormatParseError),
PlaceholderRecursionExceeded,
UnspecifiedFormat(char, char),
UnknownFormatCode(char, &'static str),
PrecisionNotAllowed,
NotAllowed(&'static str),
UnableToConvert,
CodeNotInRange,
NotImplemented(char, &'static str),
}
#[derive(Debug, PartialEq)]
pub enum FormatParseError {
UnmatchedBracket,
MissingStartBracket,
UnescapedStartBracketInLiteral,
PlaceholderRecursionExceeded,
UnknownConversion,
EmptyAttribute,
MissingRightBracket,
InvalidCharacterAfterRightBracket,
}
impl std::fmt::Display for FormatParseError {
fn fmt(&self, fmt: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
Self::UnmatchedBracket => {
std::write!(fmt, "unmatched bracket in format string")
}
Self::MissingStartBracket => {
std::write!(fmt, "missing start bracket in format string")
}
Self::UnescapedStartBracketInLiteral => {
std::write!(fmt, "unescaped start bracket in literal")
}
Self::PlaceholderRecursionExceeded => {
std::write!(fmt, "multiply nested placeholder not allowed")
}
Self::UnknownConversion => {
std::write!(fmt, "unknown conversion")
}
Self::EmptyAttribute => {
std::write!(fmt, "empty attribute")
}
Self::MissingRightBracket => {
std::write!(fmt, "missing right bracket")
}
Self::InvalidCharacterAfterRightBracket => {
std::write!(fmt, "invalid character after right bracket")
}
}
}
}
impl Error for FormatParseError {}
impl FromStr for FormatSpec {
type Err = FormatSpecError;
fn from_str(s: &str) -> Result<Self, Self::Err> {
FormatSpec::parse(s)
}
}
#[derive(Debug, PartialEq)]
pub enum FieldNamePart {
Attribute(String),
Index(usize),
StringIndex(String),
}
impl FieldNamePart {
fn parse_part(
chars: &mut impl PeekingNext<Item = char>,
) -> Result<Option<FieldNamePart>, FormatParseError> {
chars
.next()
.map(|ch| match ch {
'.' => {
let mut attribute = String::new();
for ch in chars.peeking_take_while(|ch| *ch != '.' && *ch != '[') {
attribute.push(ch);
}
if attribute.is_empty() {
Err(FormatParseError::EmptyAttribute)
} else {
Ok(FieldNamePart::Attribute(attribute))
}
}
'[' => {
let mut index = String::new();
for ch in chars {
if ch == ']' {
return if index.is_empty() {
Err(FormatParseError::EmptyAttribute)
} else if let Ok(index) = index.parse::<usize>() {
Ok(FieldNamePart::Index(index))
} else {
Ok(FieldNamePart::StringIndex(index))
};
}
index.push(ch);
}
Err(FormatParseError::MissingRightBracket)
}
_ => Err(FormatParseError::InvalidCharacterAfterRightBracket),
})
.transpose()
}
}
#[derive(Debug, PartialEq)]
pub enum FieldType {
Auto,
Index(usize),
Keyword(String),
}
#[derive(Debug, PartialEq)]
pub struct FieldName {
pub field_type: FieldType,
pub parts: Vec<FieldNamePart>,
}
impl FieldName {
pub fn parse(text: &str) -> Result<FieldName, FormatParseError> {
let mut chars = text.chars().peekable();
let mut first = String::new();
for ch in chars.peeking_take_while(|ch| *ch != '.' && *ch != '[') {
first.push(ch);
}
let field_type = if first.is_empty() {
FieldType::Auto
} else if let Ok(index) = first.parse::<usize>() {
FieldType::Index(index)
} else {
FieldType::Keyword(first)
};
let mut parts = Vec::new();
while let Some(part) = FieldNamePart::parse_part(&mut chars)? {
parts.push(part);
}
Ok(FieldName { field_type, parts })
}
}
#[derive(Debug, PartialEq)]
pub enum FormatPart {
Field {
field_name: String,
conversion_spec: Option<char>,
format_spec: String,
},
Literal(String),
}
#[derive(Debug, PartialEq)]
pub struct FormatString {
pub format_parts: Vec<FormatPart>,
}
impl FormatString {
fn parse_literal_single(text: &str) -> Result<(char, &str), FormatParseError> {
let mut chars = text.chars();
// This should never be called with an empty str
let first_char = chars.next().unwrap();
// isn't this detectable only with bytes operation?
if first_char == '{' || first_char == '}' {
let maybe_next_char = chars.next();
// if we see a bracket, it has to be escaped by doubling up to be in a literal
return if maybe_next_char.is_none() || maybe_next_char.unwrap() != first_char {
Err(FormatParseError::UnescapedStartBracketInLiteral)
} else {
Ok((first_char, chars.as_str()))
};
}
Ok((first_char, chars.as_str()))
}
fn parse_literal(text: &str) -> Result<(FormatPart, &str), FormatParseError> {
let mut cur_text = text;
let mut result_string = String::new();
let mut pending_escape = false;
while !cur_text.is_empty() {
if pending_escape
&& let Some((unicode_string, remaining)) =
FormatString::parse_escaped_unicode_string(cur_text)
{
result_string.push_str(unicode_string);
cur_text = remaining;
pending_escape = false;
continue;
}
match FormatString::parse_literal_single(cur_text) {
Ok((next_char, remaining)) => {
result_string.push(next_char);
cur_text = remaining;
pending_escape = next_char == '\\' && !pending_escape;
}
Err(err) => {
return if result_string.is_empty() {
Err(err)
} else {
Ok((FormatPart::Literal(result_string), cur_text))
};
}
}
}
Ok((FormatPart::Literal(result_string), ""))
}
fn parse_part_in_brackets(text: &str) -> Result<FormatPart, FormatParseError> {
let parts: Vec<&str> = text.splitn(2, ':').collect();
// before the comma is a keyword or arg index, after the comma is maybe a spec.
let arg_part = parts[0];
let format_spec = if parts.len() > 1 {
parts[1].to_owned()
} else {
String::new()
};
// On parts[0] can still be the conversion (!r, !s, !a)
let parts: Vec<&str> = arg_part.splitn(2, '!').collect();
// before the bang is a keyword or arg index, after the comma is maybe a conversion spec.
let arg_part = parts[0];
let conversion_spec = parts
.get(1)
.map(|conversion| {
// conversions are only every one character
conversion
.chars()
.exactly_one()
.map_err(|_| FormatParseError::UnknownConversion)
})
.transpose()?;
Ok(FormatPart::Field {
field_name: arg_part.to_owned(),
conversion_spec,
format_spec,
})
}
fn parse_spec(
text: &str,
allow_nesting: AllowPlaceholderNesting,
) -> Result<(FormatPart, &str), FormatParseError> {
let Some(text) = text.strip_prefix('{') else {
return Err(FormatParseError::MissingStartBracket);
};
let mut nested = false;
let mut left = String::new();
for (idx, c) in text.char_indices() {
if c == '{' {
// There may be one layer nesting brackets in spec
if nested || allow_nesting == AllowPlaceholderNesting::No {
return Err(FormatParseError::PlaceholderRecursionExceeded);
}
nested = true;
left.push(c);
continue;
} else if c == '}' {
if nested {
nested = false;
left.push(c);
continue;
}
let (_, right) = text.split_at(idx + 1);
let format_part = FormatString::parse_part_in_brackets(&left)?;
return Ok((format_part, right));
}
left.push(c);
}
Err(FormatParseError::UnmatchedBracket)
}
fn parse_escaped_unicode_string(text: &str) -> Option<(&str, &str)> {
text.strip_prefix("N{")?.find('}').map(|idx| {
let end_idx = idx + 3; // 3 for "N{"
(&text[..end_idx], &text[end_idx..])
})
}
}
pub trait FromTemplate<'a>: Sized {
type Err;
fn from_str(s: &'a str) -> Result<Self, Self::Err>;
}
impl<'a> FromTemplate<'a> for FormatString {
type Err = FormatParseError;
fn from_str(text: &'a str) -> Result<Self, Self::Err> {
let mut cur_text: &str = text;
let mut parts: Vec<FormatPart> = Vec::new();
while !cur_text.is_empty() {
// Try to parse both literals and bracketed format parts until we
// run out of text
cur_text = FormatString::parse_literal(cur_text)
.or_else(|_| FormatString::parse_spec(cur_text, AllowPlaceholderNesting::Yes))
.map(|(part, new_text)| {
parts.push(part);
new_text
})?;
}
Ok(FormatString {
format_parts: parts,
})
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_fill_and_align() {
assert_eq!(
parse_fill_and_align(" <"),
(Some(' '), Some(FormatAlign::Left), "")
);
assert_eq!(
parse_fill_and_align(" <22"),
(Some(' '), Some(FormatAlign::Left), "22")
);
assert_eq!(
parse_fill_and_align("<22"),
(None, Some(FormatAlign::Left), "22")
);
assert_eq!(
parse_fill_and_align(" ^^"),
(Some(' '), Some(FormatAlign::Center), "^")
);
assert_eq!(
parse_fill_and_align("==="),
(Some('='), Some(FormatAlign::AfterSign), "=")
);
}
#[test]
fn test_width_only() {
let expected = Ok(FormatSpec::Static(StaticFormatSpec {
conversion: None,
fill: None,
align: None,
sign: None,
alternate_form: false,
width: Some(33),
grouping_option: None,
precision: None,
format_type: None,
}));
assert_eq!(FormatSpec::parse("33"), expected);
}
#[test]
fn test_fill_and_width() {
let expected = Ok(FormatSpec::Static(StaticFormatSpec {
conversion: None,
fill: Some('<'),
align: Some(FormatAlign::Right),
sign: None,
alternate_form: false,
width: Some(33),
grouping_option: None,
precision: None,
format_type: None,
}));
assert_eq!(FormatSpec::parse("<>33"), expected);
}
#[test]
fn test_format_part() {
let expected = Ok(FormatSpec::Dynamic(DynamicFormatSpec {
placeholders: vec![FormatPart::Field {
field_name: "x".to_string(),
conversion_spec: None,
format_spec: String::new(),
}],
}));
assert_eq!(FormatSpec::parse("{x}"), expected);
}
#[test]
fn test_dynamic_format_spec() {
let expected = Ok(FormatSpec::Dynamic(DynamicFormatSpec {
placeholders: vec![
FormatPart::Field {
field_name: "x".to_string(),
conversion_spec: None,
format_spec: String::new(),
},
FormatPart::Field {
field_name: "y".to_string(),
conversion_spec: None,
format_spec: "<2".to_string(),
},
FormatPart::Field {
field_name: "z".to_string(),
conversion_spec: None,
format_spec: String::new(),
},
],
}));
assert_eq!(FormatSpec::parse("{x}{y:<2}{z}"), expected);
}
#[test]
fn test_dynamic_format_spec_with_others() {
let expected = Ok(FormatSpec::Dynamic(DynamicFormatSpec {
placeholders: vec![FormatPart::Field {
field_name: "x".to_string(),
conversion_spec: None,
format_spec: String::new(),
}],
}));
assert_eq!(FormatSpec::parse("<{x}20b"), expected);
}
#[test]
fn test_all() {
let expected = Ok(FormatSpec::Static(StaticFormatSpec {
conversion: None,
fill: Some('<'),
align: Some(FormatAlign::Right),
sign: Some(FormatSign::Minus),
alternate_form: true,
width: Some(23),
grouping_option: Some(FormatGrouping::Comma),
precision: Some(11),
format_type: Some(FormatType::Binary),
}));
assert_eq!(FormatSpec::parse("<>-#23,.11b"), expected);
}
#[test]
fn test_format_parse() {
let expected = Ok(FormatString {
format_parts: vec![
FormatPart::Literal("abcd".to_owned()),
FormatPart::Field {
field_name: "1".to_owned(),
conversion_spec: None,
format_spec: String::new(),
},
FormatPart::Literal(":".to_owned()),
FormatPart::Field {
field_name: "key".to_owned(),
conversion_spec: None,
format_spec: String::new(),
},
],
});
assert_eq!(FormatString::from_str("abcd{1}:{key}"), expected);
}
#[test]
fn test_format_parse_nested_placeholder() {
let expected = Ok(FormatString {
format_parts: vec![
FormatPart::Literal("abcd".to_owned()),
FormatPart::Field {
field_name: "1".to_owned(),
conversion_spec: None,
format_spec: "{a}".to_owned(),
},
],
});
assert_eq!(FormatString::from_str("abcd{1:{a}}"), expected);
}
#[test]
fn test_format_parse_multi_byte_char() {
assert!(FormatString::from_str("{a:%ЫйЯЧ}").is_ok());
}
#[test]
fn test_format_parse_fail() {
assert_eq!(
FormatString::from_str("{s"),
Err(FormatParseError::UnmatchedBracket)
);
}
#[test]
fn test_format_parse_escape() {
let expected = Ok(FormatString {
format_parts: vec![
FormatPart::Literal("{".to_owned()),
FormatPart::Field {
field_name: "key".to_owned(),
conversion_spec: None,
format_spec: String::new(),
},
FormatPart::Literal("}ddfe".to_owned()),
],
});
assert_eq!(FormatString::from_str("{{{key}}}ddfe"), expected);
}
#[test]
fn test_format_invalid_specification() {
assert_eq!(
FormatSpec::parse("%3"),
Err(FormatSpecError::InvalidFormatSpecifier)
);
assert_eq!(
FormatSpec::parse(".2fa"),
Err(FormatSpecError::InvalidFormatSpecifier)
);
assert_eq!(
FormatSpec::parse("ds"),
Err(FormatSpecError::InvalidFormatSpecifier)
);
assert_eq!(
FormatSpec::parse("x+"),
Err(FormatSpecError::InvalidFormatSpecifier)
);
assert_eq!(
FormatSpec::parse("b4"),
Err(FormatSpecError::InvalidFormatSpecifier)
);
assert_eq!(
FormatSpec::parse("o!"),
Err(FormatSpecError::InvalidFormatSpecifier)
);
assert_eq!(
FormatSpec::parse("{"),
Err(FormatSpecError::InvalidPlaceholder(
FormatParseError::UnmatchedBracket
))
);
assert_eq!(
FormatSpec::parse("{x"),
Err(FormatSpecError::InvalidPlaceholder(
FormatParseError::UnmatchedBracket
))
);
assert_eq!(
FormatSpec::parse("}"),
Err(FormatSpecError::InvalidFormatType)
);
assert_eq!(
FormatSpec::parse("{}}"),
// Note this should be an `InvalidFormatType` but we give up
// on all other parsing validation when we see a placeholder
Ok(FormatSpec::Dynamic(DynamicFormatSpec {
placeholders: vec![FormatPart::Field {
field_name: String::new(),
conversion_spec: None,
format_spec: String::new()
}]
}))
);
assert_eq!(
FormatSpec::parse("{{x}}"),
Err(FormatSpecError::InvalidPlaceholder(
FormatParseError::PlaceholderRecursionExceeded
))
);
assert_eq!(
FormatSpec::parse("d "),
Err(FormatSpecError::InvalidFormatSpecifier)
);
assert_eq!(
FormatSpec::parse("z"),
Err(FormatSpecError::InvalidFormatType)
);
}
#[test]
fn test_parse_field_name() {
assert_eq!(
FieldName::parse(""),
Ok(FieldName {
field_type: FieldType::Auto,
parts: Vec::new(),
})
);
assert_eq!(
FieldName::parse("0"),
Ok(FieldName {
field_type: FieldType::Index(0),
parts: Vec::new(),
})
);
assert_eq!(
FieldName::parse("key"),
Ok(FieldName {
field_type: FieldType::Keyword("key".to_owned()),
parts: Vec::new(),
})
);
assert_eq!(
FieldName::parse("key.attr[0][string]"),
Ok(FieldName {
field_type: FieldType::Keyword("key".to_owned()),
parts: vec![
FieldNamePart::Attribute("attr".to_owned()),
FieldNamePart::Index(0),
FieldNamePart::StringIndex("string".to_owned())
],
})
);
assert_eq!(
FieldName::parse("key.."),
Err(FormatParseError::EmptyAttribute)
);
assert_eq!(
FieldName::parse("key[]"),
Err(FormatParseError::EmptyAttribute)
);
assert_eq!(
FieldName::parse("key["),
Err(FormatParseError::MissingRightBracket)
);
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | true |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_python_literal/src/float.rs | crates/ruff_python_literal/src/float.rs | use std::f64;
fn is_integer(v: f64) -> bool {
(v - v.round()).abs() < f64::EPSILON
}
// TODO: rewrite using format_general
pub fn to_string(value: f64) -> String {
let lit = format!("{value:e}");
if let Some(position) = lit.find('e') {
let significand = &lit[..position];
let exponent = &lit[position + 1..];
let exponent = exponent.parse::<i32>().unwrap();
if exponent < 16 && exponent > -5 {
if is_integer(value) {
format!("{value:.1?}")
} else {
value.to_string()
}
} else {
format!("{significand}e{exponent:+#03}")
}
} else {
let mut s = value.to_string();
s.make_ascii_lowercase();
s
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_python_literal/src/escape.rs | crates/ruff_python_literal/src/escape.rs | use ruff_python_ast::{
BytesLiteralFlags, StringFlags, StringLiteralFlags,
str::{Quote, TripleQuotes},
};
pub struct EscapeLayout {
pub quote: Quote,
pub len: Option<usize>,
}
pub trait Escape {
fn source_len(&self) -> usize;
fn layout(&self) -> &EscapeLayout;
fn changed(&self) -> bool {
self.layout().len != Some(self.source_len())
}
fn write_source(&self, formatter: &mut impl std::fmt::Write) -> std::fmt::Result;
fn write_body_slow(&self, formatter: &mut impl std::fmt::Write) -> std::fmt::Result;
fn write_body(&self, formatter: &mut impl std::fmt::Write) -> std::fmt::Result {
if self.changed() {
self.write_body_slow(formatter)
} else {
self.write_source(formatter)
}
}
}
/// Returns the outer quotes to use and the number of quotes that need to be
/// escaped.
pub(crate) const fn choose_quote(
single_count: usize,
double_count: usize,
preferred_quote: Quote,
) -> (Quote, usize) {
let (primary_count, secondary_count) = match preferred_quote {
Quote::Single => (single_count, double_count),
Quote::Double => (double_count, single_count),
};
// always use primary unless we have primary but no secondary
let use_secondary = primary_count > 0 && secondary_count == 0;
if use_secondary {
(preferred_quote.opposite(), secondary_count)
} else {
(preferred_quote, primary_count)
}
}
pub struct UnicodeEscape<'a> {
source: &'a str,
layout: EscapeLayout,
}
impl<'a> UnicodeEscape<'a> {
#[inline]
pub fn with_preferred_quote(source: &'a str, quote: Quote) -> Self {
let layout = Self::repr_layout(source, quote);
Self { source, layout }
}
#[inline]
pub fn new_repr(source: &'a str) -> Self {
Self::with_preferred_quote(source, Quote::Single)
}
#[inline]
pub fn str_repr<'r>(&'a self, triple_quotes: TripleQuotes) -> StrRepr<'r, 'a> {
StrRepr {
escape: self,
triple_quotes,
}
}
}
pub struct StrRepr<'r, 'a> {
escape: &'r UnicodeEscape<'a>,
triple_quotes: TripleQuotes,
}
impl StrRepr<'_, '_> {
pub fn write(&self, formatter: &mut impl std::fmt::Write) -> std::fmt::Result {
let flags = StringLiteralFlags::empty()
.with_quote_style(self.escape.layout().quote)
.with_triple_quotes(self.triple_quotes);
formatter.write_str(flags.quote_str())?;
self.escape.write_body(formatter)?;
formatter.write_str(flags.quote_str())?;
Ok(())
}
pub fn to_string(&self) -> Option<String> {
let mut s = String::with_capacity(self.escape.layout().len?);
self.write(&mut s).unwrap();
Some(s)
}
}
impl std::fmt::Display for StrRepr<'_, '_> {
fn fmt(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
self.write(formatter)
}
}
impl UnicodeEscape<'_> {
const REPR_RESERVED_LEN: usize = 2; // for quotes
#[expect(clippy::cast_possible_wrap, clippy::cast_sign_loss)]
pub fn repr_layout(source: &str, preferred_quote: Quote) -> EscapeLayout {
Self::output_layout_with_checker(source, preferred_quote, |a, b| {
Some((a as isize).checked_add(b as isize)? as usize)
})
}
fn output_layout_with_checker(
source: &str,
preferred_quote: Quote,
length_add: impl Fn(usize, usize) -> Option<usize>,
) -> EscapeLayout {
let mut out_len = Self::REPR_RESERVED_LEN;
let mut single_count = 0;
let mut double_count = 0;
for ch in source.chars() {
let incr = match ch {
'\'' => {
single_count += 1;
1
}
'"' => {
double_count += 1;
1
}
c => Self::escaped_char_len(c),
};
let Some(new_len) = length_add(out_len, incr) else {
#[cold]
fn stop(
single_count: usize,
double_count: usize,
preferred_quote: Quote,
) -> EscapeLayout {
EscapeLayout {
quote: choose_quote(single_count, double_count, preferred_quote).0,
len: None,
}
}
return stop(single_count, double_count, preferred_quote);
};
out_len = new_len;
}
let (quote, num_escaped_quotes) = choose_quote(single_count, double_count, preferred_quote);
// we'll be adding backslashes in front of the existing inner quotes
let Some(out_len) = length_add(out_len, num_escaped_quotes) else {
return EscapeLayout { quote, len: None };
};
EscapeLayout {
quote,
len: Some(out_len - Self::REPR_RESERVED_LEN),
}
}
fn escaped_char_len(ch: char) -> usize {
match ch {
'\\' | '\t' | '\r' | '\n' => 2,
ch if ch < ' ' || ch as u32 == 0x7f => 4, // \xHH
ch if ch.is_ascii() => 1,
ch if crate::char::is_printable(ch) => {
// max = std::cmp::max(ch, max);
ch.len_utf8()
}
ch if (ch as u32) < 0x100 => 4, // \xHH
ch if (ch as u32) < 0x10000 => 6, // \uHHHH
_ => 10, // \uHHHHHHHH
}
}
fn write_char(
ch: char,
quote: Quote,
formatter: &mut impl std::fmt::Write,
) -> std::fmt::Result {
match ch {
'\n' => formatter.write_str("\\n"),
'\t' => formatter.write_str("\\t"),
'\r' => formatter.write_str("\\r"),
// these 2 branches *would* be handled below, but we shouldn't have to do a
// unicodedata lookup just for ascii characters
'\x20'..='\x7e' => {
// printable ascii range
if ch == quote.as_char() || ch == '\\' {
formatter.write_char('\\')?;
}
formatter.write_char(ch)
}
ch if ch.is_ascii() => {
write!(formatter, "\\x{:02x}", ch as u8)
}
ch if crate::char::is_printable(ch) => formatter.write_char(ch),
'\0'..='\u{ff}' => {
write!(formatter, "\\x{:02x}", ch as u32)
}
'\0'..='\u{ffff}' => {
write!(formatter, "\\u{:04x}", ch as u32)
}
_ => {
write!(formatter, "\\U{:08x}", ch as u32)
}
}
}
}
impl Escape for UnicodeEscape<'_> {
fn source_len(&self) -> usize {
self.source.len()
}
fn layout(&self) -> &EscapeLayout {
&self.layout
}
fn write_source(&self, formatter: &mut impl std::fmt::Write) -> std::fmt::Result {
formatter.write_str(self.source)
}
#[cold]
fn write_body_slow(&self, formatter: &mut impl std::fmt::Write) -> std::fmt::Result {
for ch in self.source.chars() {
Self::write_char(ch, self.layout().quote, formatter)?;
}
Ok(())
}
}
pub struct AsciiEscape<'a> {
source: &'a [u8],
layout: EscapeLayout,
}
impl<'a> AsciiEscape<'a> {
#[inline]
pub fn new(source: &'a [u8], layout: EscapeLayout) -> Self {
Self { source, layout }
}
#[inline]
pub fn with_preferred_quote(source: &'a [u8], quote: Quote) -> Self {
let layout = Self::repr_layout(source, quote);
Self { source, layout }
}
#[inline]
pub fn new_repr(source: &'a [u8]) -> Self {
Self::with_preferred_quote(source, Quote::Single)
}
#[inline]
pub fn bytes_repr<'r>(&'a self, triple_quotes: TripleQuotes) -> BytesRepr<'r, 'a> {
BytesRepr {
escape: self,
triple_quotes,
}
}
}
impl AsciiEscape<'_> {
#[expect(clippy::cast_possible_wrap, clippy::cast_sign_loss)]
pub fn repr_layout(source: &[u8], preferred_quote: Quote) -> EscapeLayout {
Self::output_layout_with_checker(source, preferred_quote, 3, |a, b| {
Some((a as isize).checked_add(b as isize)? as usize)
})
}
fn output_layout_with_checker(
source: &[u8],
preferred_quote: Quote,
reserved_len: usize,
length_add: impl Fn(usize, usize) -> Option<usize>,
) -> EscapeLayout {
let mut out_len = reserved_len;
let mut single_count = 0;
let mut double_count = 0;
for ch in source {
let incr = match ch {
b'\'' => {
single_count += 1;
1
}
b'"' => {
double_count += 1;
1
}
c => Self::escaped_char_len(*c),
};
let Some(new_len) = length_add(out_len, incr) else {
#[cold]
fn stop(
single_count: usize,
double_count: usize,
preferred_quote: Quote,
) -> EscapeLayout {
EscapeLayout {
quote: choose_quote(single_count, double_count, preferred_quote).0,
len: None,
}
}
return stop(single_count, double_count, preferred_quote);
};
out_len = new_len;
}
let (quote, num_escaped_quotes) = choose_quote(single_count, double_count, preferred_quote);
// we'll be adding backslashes in front of the existing inner quotes
let Some(out_len) = length_add(out_len, num_escaped_quotes) else {
return EscapeLayout { quote, len: None };
};
EscapeLayout {
quote,
len: Some(out_len - reserved_len),
}
}
fn escaped_char_len(ch: u8) -> usize {
match ch {
b'\\' | b'\t' | b'\r' | b'\n' => 2,
0x20..=0x7e => 1,
_ => 4, // \xHH
}
}
fn write_char(ch: u8, quote: Quote, formatter: &mut impl std::fmt::Write) -> std::fmt::Result {
match ch {
b'\t' => formatter.write_str("\\t"),
b'\n' => formatter.write_str("\\n"),
b'\r' => formatter.write_str("\\r"),
0x20..=0x7e => {
// printable ascii range
if ch == quote.as_byte() || ch == b'\\' {
formatter.write_char('\\')?;
}
formatter.write_char(ch as char)
}
ch => write!(formatter, "\\x{ch:02x}"),
}
}
}
impl Escape for AsciiEscape<'_> {
fn source_len(&self) -> usize {
self.source.len()
}
fn layout(&self) -> &EscapeLayout {
&self.layout
}
fn write_source(&self, formatter: &mut impl std::fmt::Write) -> std::fmt::Result {
// OK because function must be called only when source is printable ascii characters.
let string = std::str::from_utf8(self.source).expect("ASCII bytes");
formatter.write_str(string)
}
#[cold]
fn write_body_slow(&self, formatter: &mut impl std::fmt::Write) -> std::fmt::Result {
for ch in self.source {
Self::write_char(*ch, self.layout().quote, formatter)?;
}
Ok(())
}
}
pub struct BytesRepr<'r, 'a> {
escape: &'r AsciiEscape<'a>,
triple_quotes: TripleQuotes,
}
impl BytesRepr<'_, '_> {
pub fn write(&self, formatter: &mut impl std::fmt::Write) -> std::fmt::Result {
let flags = BytesLiteralFlags::empty()
.with_quote_style(self.escape.layout().quote)
.with_triple_quotes(self.triple_quotes);
formatter.write_char('b')?;
formatter.write_str(flags.quote_str())?;
self.escape.write_body(formatter)?;
formatter.write_str(flags.quote_str())?;
Ok(())
}
pub fn to_string(&self) -> Option<String> {
let mut s = String::with_capacity(self.escape.layout().len?);
self.write(&mut s).unwrap();
Some(s)
}
}
impl std::fmt::Display for BytesRepr<'_, '_> {
fn fmt(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
self.write(formatter)
}
}
#[cfg(test)]
mod unicode_escape_tests {
use super::*;
#[test]
fn changed() {
fn test(s: &str) -> bool {
UnicodeEscape::new_repr(s).changed()
}
assert!(!test("hello"));
assert!(!test("'hello'"));
assert!(!test("\"hello\""));
assert!(test("'\"hello"));
assert!(test("hello\n"));
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_graph/src/db.rs | crates/ruff_graph/src/db.rs | use anyhow::{Context, Result};
use std::sync::Arc;
use zip::CompressionMethod;
use ruff_db::Db as SourceDb;
use ruff_db::files::{File, Files};
use ruff_db::system::{OsSystem, System, SystemPathBuf};
use ruff_db::vendored::{VendoredFileSystem, VendoredFileSystemBuilder};
use ruff_python_ast::PythonVersion;
use ty_module_resolver::{SearchPathSettings, SearchPaths};
use ty_python_semantic::lint::{LintRegistry, RuleSelection};
use ty_python_semantic::{
AnalysisSettings, Db, Program, ProgramSettings, PythonEnvironment, PythonPlatform,
PythonVersionSource, PythonVersionWithSource, SysPrefixPathOrigin, default_lint_registry,
};
static EMPTY_VENDORED: std::sync::LazyLock<VendoredFileSystem> = std::sync::LazyLock::new(|| {
let mut builder = VendoredFileSystemBuilder::new(CompressionMethod::Stored);
builder.add_file("stdlib/VERSIONS", "\n").unwrap();
builder.finish().unwrap()
});
#[salsa::db]
#[derive(Default, Clone)]
pub struct ModuleDb {
storage: salsa::Storage<Self>,
files: Files,
system: OsSystem,
rule_selection: Arc<RuleSelection>,
analysis_settings: Arc<AnalysisSettings>,
}
impl ModuleDb {
/// Initialize a [`ModuleDb`] from the given source root.
pub fn from_src_roots(
src_roots: Vec<SystemPathBuf>,
python_version: PythonVersion,
venv_path: Option<SystemPathBuf>,
) -> Result<Self> {
let db = Self::default();
let mut search_paths = SearchPathSettings::new(src_roots);
// TODO: Consider calling `PythonEnvironment::discover` if the `venv_path` is not provided.
if let Some(venv_path) = venv_path {
let environment =
PythonEnvironment::new(venv_path, SysPrefixPathOrigin::PythonCliFlag, db.system())?;
search_paths.site_packages_paths = environment
.site_packages_paths(db.system())
.context("Failed to discover the site-packages directory")?
.into_vec();
}
let search_paths = search_paths
.to_search_paths(db.system(), db.vendored())
.context("Invalid search path settings")?;
Program::from_settings(
&db,
ProgramSettings {
python_version: PythonVersionWithSource {
version: python_version,
source: PythonVersionSource::default(),
},
python_platform: PythonPlatform::default(),
search_paths,
},
);
Ok(db)
}
}
#[salsa::db]
impl SourceDb for ModuleDb {
fn vendored(&self) -> &VendoredFileSystem {
&EMPTY_VENDORED
}
fn system(&self) -> &dyn System {
&self.system
}
fn files(&self) -> &Files {
&self.files
}
fn python_version(&self) -> PythonVersion {
Program::get(self).python_version(self)
}
}
#[salsa::db]
impl ty_module_resolver::Db for ModuleDb {
fn search_paths(&self) -> &SearchPaths {
Program::get(self).search_paths(self)
}
}
#[salsa::db]
impl Db for ModuleDb {
fn should_check_file(&self, file: File) -> bool {
!file.path(self).is_vendored_path()
}
fn rule_selection(&self, _file: File) -> &RuleSelection {
&self.rule_selection
}
fn lint_registry(&self) -> &LintRegistry {
default_lint_registry()
}
fn verbose(&self) -> bool {
false
}
fn analysis_settings(&self) -> &AnalysisSettings {
&self.analysis_settings
}
}
#[salsa::db]
impl salsa::Database for ModuleDb {}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_graph/src/settings.rs | crates/ruff_graph/src/settings.rs | use ruff_linter::display_settings;
use ruff_linter::settings::types::{ExtensionMapping, FilePatternSet, PreviewMode};
use ruff_macros::CacheKey;
use ruff_python_ast::PythonVersion;
use std::collections::BTreeMap;
use std::fmt;
use std::path::PathBuf;
#[derive(Debug, Clone, CacheKey)]
pub struct AnalyzeSettings {
pub exclude: FilePatternSet,
pub preview: PreviewMode,
pub target_version: PythonVersion,
pub string_imports: StringImports,
pub include_dependencies: BTreeMap<PathBuf, (PathBuf, Vec<String>)>,
pub extension: ExtensionMapping,
pub type_checking_imports: bool,
}
impl Default for AnalyzeSettings {
fn default() -> Self {
Self {
exclude: FilePatternSet::default(),
preview: PreviewMode::default(),
target_version: PythonVersion::default(),
string_imports: StringImports::default(),
include_dependencies: BTreeMap::default(),
extension: ExtensionMapping::default(),
type_checking_imports: true,
}
}
}
impl fmt::Display for AnalyzeSettings {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
writeln!(f, "\n# Analyze Settings")?;
display_settings! {
formatter = f,
namespace = "analyze",
fields = [
self.exclude,
self.preview,
self.target_version,
self.string_imports,
self.extension | debug,
self.include_dependencies | debug,
self.type_checking_imports,
]
}
Ok(())
}
}
#[derive(Debug, Copy, Clone, CacheKey)]
pub struct StringImports {
pub enabled: bool,
pub min_dots: usize,
}
impl Default for StringImports {
fn default() -> Self {
Self {
enabled: false,
min_dots: 2,
}
}
}
impl fmt::Display for StringImports {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
if self.enabled {
write!(f, "enabled (min_dots: {})", self.min_dots)
} else {
write!(f, "disabled")
}
}
}
#[derive(Default, Debug, Copy, Clone, PartialEq, Eq, CacheKey)]
#[cfg_attr(
feature = "serde",
derive(serde::Serialize, serde::Deserialize),
serde(rename_all = "kebab-case")
)]
#[cfg_attr(feature = "schemars", derive(schemars::JsonSchema))]
#[cfg_attr(feature = "clap", derive(clap::ValueEnum))]
pub enum Direction {
/// Construct a map from module to its dependencies (i.e., the modules that it imports).
#[default]
#[cfg_attr(feature = "serde", serde(alias = "Dependencies"))]
Dependencies,
/// Construct a map from module to its dependents (i.e., the modules that import it).
#[cfg_attr(feature = "serde", serde(alias = "Dependents"))]
Dependents,
}
impl fmt::Display for Direction {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
Self::Dependencies => write!(f, "\"dependencies\""),
Self::Dependents => write!(f, "\"dependents\""),
}
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_graph/src/lib.rs | crates/ruff_graph/src/lib.rs | use std::collections::{BTreeMap, BTreeSet};
use anyhow::Result;
use ruff_db::system::{SystemPath, SystemPathBuf};
use ruff_python_ast::PySourceType;
use ruff_python_ast::helpers::to_module_path;
use ruff_python_parser::{ParseOptions, parse};
use crate::collector::Collector;
pub use crate::db::ModuleDb;
use crate::resolver::Resolver;
pub use crate::settings::{AnalyzeSettings, Direction, StringImports};
mod collector;
mod db;
mod resolver;
mod settings;
#[derive(Debug, Default)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
pub struct ModuleImports(BTreeSet<SystemPathBuf>);
impl ModuleImports {
/// Detect the [`ModuleImports`] for a given Python file.
pub fn detect(
db: &ModuleDb,
source: &str,
source_type: PySourceType,
path: &SystemPath,
package: Option<&SystemPath>,
string_imports: StringImports,
type_checking_imports: bool,
) -> Result<Self> {
// Parse the source code.
let parsed = parse(source, ParseOptions::from(source_type))?;
let module_path =
package.and_then(|package| to_module_path(package.as_std_path(), path.as_std_path()));
// Collect the imports.
let imports = Collector::new(
module_path.as_deref(),
string_imports,
type_checking_imports,
)
.collect(parsed.syntax());
// Resolve the imports.
let mut resolved_imports = ModuleImports::default();
for import in imports {
for resolved in Resolver::new(db, path).resolve(import) {
if let Some(path) = resolved.as_system_path() {
resolved_imports.insert(path.to_path_buf());
}
}
}
Ok(resolved_imports)
}
/// Insert a file path into the module imports.
pub fn insert(&mut self, path: SystemPathBuf) {
self.0.insert(path);
}
/// Extend the module imports with additional file paths.
pub fn extend(&mut self, paths: impl IntoIterator<Item = SystemPathBuf>) {
self.0.extend(paths);
}
/// Returns `true` if the module imports are empty.
pub fn is_empty(&self) -> bool {
self.0.is_empty()
}
/// Returns the number of module imports.
pub fn len(&self) -> usize {
self.0.len()
}
/// Convert the file paths to be relative to a given path.
#[must_use]
pub fn relative_to(self, path: &SystemPath) -> Self {
Self(
self.0
.into_iter()
.map(|import| {
import
.strip_prefix(path)
.map(SystemPath::to_path_buf)
.unwrap_or(import)
})
.collect(),
)
}
}
#[derive(Debug, Default)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
pub struct ImportMap(BTreeMap<SystemPathBuf, ModuleImports>);
impl ImportMap {
/// Create an [`ImportMap`] of file to its dependencies.
///
/// Assumes that the input is a collection of unique file paths and their imports.
pub fn dependencies(imports: impl IntoIterator<Item = (SystemPathBuf, ModuleImports)>) -> Self {
let mut map = ImportMap::default();
for (path, imports) in imports {
map.0.insert(path, imports);
}
map
}
/// Create an [`ImportMap`] of file to its dependents.
///
/// Assumes that the input is a collection of unique file paths and their imports.
pub fn dependents(imports: impl IntoIterator<Item = (SystemPathBuf, ModuleImports)>) -> Self {
let mut reverse = ImportMap::default();
for (path, imports) in imports {
for import in imports.0 {
reverse.0.entry(import).or_default().insert(path.clone());
}
reverse.0.entry(path).or_default();
}
reverse
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_graph/src/collector.rs | crates/ruff_graph/src/collector.rs | use crate::StringImports;
use ruff_python_ast::visitor::source_order::{
SourceOrderVisitor, walk_expr, walk_module, walk_stmt,
};
use ruff_python_ast::{self as ast, Expr, Mod, Stmt};
use ty_module_resolver::ModuleName;
/// Collect all imports for a given Python file.
#[derive(Default, Debug)]
pub(crate) struct Collector<'a> {
/// The path to the current module.
module_path: Option<&'a [String]>,
/// Whether to detect imports from string literals.
string_imports: StringImports,
/// The collected imports from the Python AST.
imports: Vec<CollectedImport>,
/// Whether to detect type checking imports
type_checking_imports: bool,
}
impl<'a> Collector<'a> {
pub(crate) fn new(
module_path: Option<&'a [String]>,
string_imports: StringImports,
type_checking_imports: bool,
) -> Self {
Self {
module_path,
string_imports,
imports: Vec::new(),
type_checking_imports,
}
}
#[must_use]
pub(crate) fn collect(mut self, module: &Mod) -> Vec<CollectedImport> {
walk_module(&mut self, module);
self.imports
}
}
impl<'ast> SourceOrderVisitor<'ast> for Collector<'_> {
fn visit_stmt(&mut self, stmt: &'ast Stmt) {
match stmt {
Stmt::ImportFrom(ast::StmtImportFrom {
names,
module,
level,
range: _,
node_index: _,
}) => {
let module = module.as_deref();
let level = *level;
for alias in names {
let mut components = vec![];
if level > 0 {
// If we're resolving a relative import, we must have a module path.
let Some(module_path) = self.module_path else {
return;
};
// Start with the containing module.
components.extend(module_path.iter().map(String::as_str));
// Remove segments based on the number of dots.
for _ in 0..level {
if components.is_empty() {
return;
}
components.pop();
}
}
// Add the module path.
if let Some(module) = module {
components.extend(module.split('.'));
}
// Add the alias name, unless it's a wildcard import.
if alias.name.as_str() != "*" {
components.push(alias.name.as_str());
}
if let Some(module_name) = ModuleName::from_components(components) {
self.imports.push(CollectedImport::ImportFrom(module_name));
}
}
}
Stmt::Import(ast::StmtImport {
names,
range: _,
node_index: _,
}) => {
for alias in names {
if let Some(module_name) = ModuleName::new(alias.name.as_str()) {
self.imports.push(CollectedImport::Import(module_name));
}
}
}
Stmt::If(ast::StmtIf {
test,
body,
elif_else_clauses,
range: _,
node_index: _,
}) => {
// Skip TYPE_CHECKING blocks if not requested
if self.type_checking_imports || !is_type_checking_condition(test) {
self.visit_body(body);
}
for clause in elif_else_clauses {
self.visit_elif_else_clause(clause);
}
}
Stmt::FunctionDef(_)
| Stmt::ClassDef(_)
| Stmt::While(_)
| Stmt::With(_)
| Stmt::Match(_)
| Stmt::Try(_)
| Stmt::For(_) => {
// Always traverse into compound statements.
walk_stmt(self, stmt);
}
Stmt::Return(_)
| Stmt::Delete(_)
| Stmt::Assign(_)
| Stmt::AugAssign(_)
| Stmt::AnnAssign(_)
| Stmt::TypeAlias(_)
| Stmt::Raise(_)
| Stmt::Assert(_)
| Stmt::Global(_)
| Stmt::Nonlocal(_)
| Stmt::Expr(_)
| Stmt::Pass(_)
| Stmt::Break(_)
| Stmt::Continue(_)
| Stmt::IpyEscapeCommand(_) => {
// Only traverse simple statements when string imports is enabled.
if self.string_imports.enabled {
walk_stmt(self, stmt);
}
}
}
}
fn visit_expr(&mut self, expr: &'ast Expr) {
if self.string_imports.enabled {
if let Expr::StringLiteral(ast::ExprStringLiteral {
value,
range: _,
node_index: _,
}) = expr
{
let value = value.to_str();
// Determine whether the string literal "looks like" an import statement: contains
// the requisite number of dots, and consists solely of valid Python identifiers.
if self.string_imports.min_dots == 0
|| memchr::memchr_iter(b'.', value.as_bytes()).count()
>= self.string_imports.min_dots
{
if let Some(module_name) = ModuleName::new(value) {
self.imports.push(CollectedImport::Import(module_name));
}
}
}
walk_expr(self, expr);
}
}
}
/// Check if an expression is a `TYPE_CHECKING` condition.
///
/// Returns `true` for:
/// - `TYPE_CHECKING`
/// - `typing.TYPE_CHECKING`
///
/// NOTE: Aliased `TYPE_CHECKING`, i.e. `import typing.TYPE_CHECKING as TC; if TC: ...`
/// will not be detected!
fn is_type_checking_condition(expr: &Expr) -> bool {
match expr {
// `if TYPE_CHECKING:`
Expr::Name(ast::ExprName { id, .. }) => id.as_str() == "TYPE_CHECKING",
// `if typing.TYPE_CHECKING:`
Expr::Attribute(ast::ExprAttribute { value, attr, .. }) => {
attr.as_str() == "TYPE_CHECKING"
&& matches!(
value.as_ref(),
Expr::Name(ast::ExprName { id, .. }) if id.as_str() == "typing"
)
}
_ => false,
}
}
#[derive(Debug)]
pub(crate) enum CollectedImport {
/// The import was part of an `import` statement.
Import(ModuleName),
/// The import was part of an `import from` statement.
ImportFrom(ModuleName),
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_graph/src/resolver.rs | crates/ruff_graph/src/resolver.rs | use ruff_db::files::{File, FilePath, system_path_to_file};
use ruff_db::system::SystemPath;
use ty_module_resolver::{
ModuleName, resolve_module, resolve_module_confident, resolve_real_module,
resolve_real_module_confident,
};
use crate::ModuleDb;
use crate::collector::CollectedImport;
/// Collect all imports for a given Python file.
pub(crate) struct Resolver<'a> {
db: &'a ModuleDb,
file: Option<File>,
}
impl<'a> Resolver<'a> {
/// Initialize a [`Resolver`] with a given [`ModuleDb`].
pub(crate) fn new(db: &'a ModuleDb, path: &SystemPath) -> Self {
// If we know the importing file we can potentially resolve more imports
let file = system_path_to_file(db, path).ok();
Self { db, file }
}
/// Resolve the [`CollectedImport`] into a [`FilePath`].
pub(crate) fn resolve(&self, import: CollectedImport) -> impl Iterator<Item = &'a FilePath> {
match import {
CollectedImport::Import(import) => {
// Attempt to resolve the module (e.g., given `import foo`, look for `foo`).
let file = self.resolve_module(&import);
// If the file is a stub, look for the corresponding source file.
let source_file = file
.is_some_and(|file| file.extension() == Some("pyi"))
.then(|| self.resolve_real_module(&import))
.flatten();
std::iter::once(file)
.chain(std::iter::once(source_file))
.flatten()
}
CollectedImport::ImportFrom(import) => {
// Attempt to resolve the member (e.g., given `from foo import bar`, look for `foo.bar`).
if let Some(file) = self.resolve_module(&import) {
// If the file is a stub, look for the corresponding source file.
let source_file = (file.extension() == Some("pyi"))
.then(|| self.resolve_real_module(&import))
.flatten();
return std::iter::once(Some(file))
.chain(std::iter::once(source_file))
.flatten();
}
// Attempt to resolve the module (e.g., given `from foo import bar`, look for `foo`).
let parent = import.parent();
let file = parent
.as_ref()
.and_then(|parent| self.resolve_module(parent));
// If the file is a stub, look for the corresponding source file.
let source_file = file
.is_some_and(|file| file.extension() == Some("pyi"))
.then(|| {
parent
.as_ref()
.and_then(|parent| self.resolve_real_module(parent))
})
.flatten();
std::iter::once(file)
.chain(std::iter::once(source_file))
.flatten()
}
}
}
/// Resolves a module name to a module.
pub(crate) fn resolve_module(&self, module_name: &ModuleName) -> Option<&'a FilePath> {
let module = if let Some(file) = self.file {
resolve_module(self.db, file, module_name)?
} else {
resolve_module_confident(self.db, module_name)?
};
Some(module.file(self.db)?.path(self.db))
}
/// Resolves a module name to a module (stubs not allowed).
fn resolve_real_module(&self, module_name: &ModuleName) -> Option<&'a FilePath> {
let module = if let Some(file) = self.file {
resolve_real_module(self.db, file, module_name)?
} else {
resolve_real_module_confident(self.db, module_name)?
};
Some(module.file(self.db)?.path(self.db))
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_wasm/src/lib.rs | crates/ruff_wasm/src/lib.rs | use std::path::Path;
use js_sys::Error;
use ruff_linter::settings::types::PythonVersion;
use ruff_linter::suppression::Suppressions;
use serde::{Deserialize, Serialize};
use wasm_bindgen::prelude::*;
use ruff_formatter::printer::SourceMapGeneration;
use ruff_formatter::{FormatResult, Formatted, IndentStyle};
use ruff_linter::Locator;
use ruff_linter::directives;
use ruff_linter::line_width::{IndentWidth, LineLength};
use ruff_linter::linter::check_path;
use ruff_linter::settings::{DEFAULT_SELECTORS, DUMMY_VARIABLE_RGX, flags};
use ruff_linter::source_kind::SourceKind;
use ruff_python_ast::{Mod, PySourceType};
use ruff_python_codegen::Stylist;
use ruff_python_formatter::{PyFormatContext, QuoteStyle, format_module_ast, pretty_comments};
use ruff_python_index::Indexer;
use ruff_python_parser::{Mode, ParseOptions, Parsed, parse, parse_unchecked};
use ruff_python_trivia::CommentRanges;
use ruff_source_file::{OneIndexed, PositionEncoding as SourcePositionEncoding, SourceLocation};
use ruff_text_size::Ranged;
use ruff_workspace::Settings;
use ruff_workspace::configuration::Configuration;
use ruff_workspace::options::{FormatOptions, LintCommonOptions, LintOptions, Options};
#[wasm_bindgen(typescript_custom_section)]
const TYPES: &'static str = r#"
export interface Diagnostic {
code: string | null;
message: string;
start_location: {
row: number;
column: number;
};
end_location: {
row: number;
column: number;
};
fix: {
message: string | null;
edits: {
content: string | null;
location: {
row: number;
column: number;
};
end_location: {
row: number;
column: number;
};
}[];
} | null;
}
"#;
#[derive(Serialize, Deserialize, Eq, PartialEq, Debug)]
pub struct ExpandedMessage {
pub code: String,
pub message: String,
pub start_location: Location,
pub end_location: Location,
pub fix: Option<ExpandedFix>,
}
#[derive(Serialize, Deserialize, Eq, PartialEq, Debug)]
pub struct ExpandedFix {
message: Option<String>,
edits: Vec<ExpandedEdit>,
}
#[derive(Serialize, Deserialize, Eq, PartialEq, Debug)]
struct ExpandedEdit {
location: Location,
end_location: Location,
content: Option<String>,
}
/// Perform global constructor initialization.
#[cfg(target_family = "wasm")]
#[expect(unsafe_code)]
pub fn before_main() {
unsafe extern "C" {
fn __wasm_call_ctors();
}
// Salsa uses the `inventory` crate, which registers global constructors that may need to be
// called explicitly on WASM. See <https://github.com/dtolnay/inventory/blob/master/src/lib.rs#L105>
// for details.
unsafe {
__wasm_call_ctors();
}
}
#[cfg(not(target_family = "wasm"))]
pub fn before_main() {}
#[wasm_bindgen(start)]
pub fn run() {
use log::Level;
before_main();
// When the `console_error_panic_hook` feature is enabled, we can call the
// `set_panic_hook` function at least once during initialization, and then
// we will get better error messages if our code ever panics.
//
// For more details see
// https://github.com/rustwasm/console_error_panic_hook#readme
#[cfg(feature = "console_error_panic_hook")]
console_error_panic_hook::set_once();
console_log::init_with_level(Level::Debug).expect("Initializing logger went wrong.");
}
#[wasm_bindgen]
pub struct Workspace {
settings: Settings,
position_encoding: SourcePositionEncoding,
}
#[wasm_bindgen]
impl Workspace {
pub fn version() -> String {
ruff_linter::VERSION.to_string()
}
#[wasm_bindgen(constructor)]
pub fn new(options: JsValue, position_encoding: PositionEncoding) -> Result<Workspace, Error> {
let options: Options = serde_wasm_bindgen::from_value(options).map_err(into_error)?;
let configuration =
Configuration::from_options(options, Some(Path::new(".")), Path::new("."))
.map_err(into_error)?;
let settings = configuration
.into_settings(Path::new("."))
.map_err(into_error)?;
Ok(Workspace {
settings,
position_encoding: position_encoding.into(),
})
}
#[wasm_bindgen(js_name = defaultSettings)]
pub fn default_settings() -> Result<JsValue, Error> {
serde_wasm_bindgen::to_value(&Options {
preview: Some(false),
// Propagate defaults.
builtins: Some(Vec::default()),
line_length: Some(LineLength::default()),
indent_width: Some(IndentWidth::default()),
target_version: Some(PythonVersion::default()),
lint: Some(LintOptions {
common: LintCommonOptions {
allowed_confusables: Some(Vec::default()),
dummy_variable_rgx: Some(DUMMY_VARIABLE_RGX.as_str().to_string()),
ignore: Some(Vec::default()),
select: Some(DEFAULT_SELECTORS.to_vec()),
extend_fixable: Some(Vec::default()),
extend_select: Some(Vec::default()),
external: Some(Vec::default()),
..LintCommonOptions::default()
},
..LintOptions::default()
}),
format: Some(FormatOptions {
indent_style: Some(IndentStyle::Space),
quote_style: Some(QuoteStyle::Double),
..FormatOptions::default()
}),
..Options::default()
})
.map_err(into_error)
}
pub fn check(&self, contents: &str) -> Result<JsValue, Error> {
let source_type = PySourceType::default();
// TODO(dhruvmanila): Support Jupyter Notebooks
let source_kind = SourceKind::Python(contents.to_string());
// Use the unresolved version because we don't have a file path.
let target_version = self.settings.linter.unresolved_target_version;
// Parse once.
let options =
ParseOptions::from(source_type).with_target_version(target_version.parser_version());
let parsed = parse_unchecked(source_kind.source_code(), options)
.try_into_module()
.expect("`PySourceType` always parses to a `ModModule`.");
// Map row and column locations to byte slices (lazily).
let locator = Locator::new(contents);
// Detect the current code style (lazily).
let stylist = Stylist::from_tokens(parsed.tokens(), locator.contents());
// Extra indices from the code.
let indexer = Indexer::from_tokens(parsed.tokens(), locator.contents());
// Extract the `# noqa` and `# isort: skip` directives from the source.
let directives = directives::extract_directives(
parsed.tokens(),
directives::Flags::from_settings(&self.settings.linter),
&locator,
&indexer,
);
let suppressions =
Suppressions::from_tokens(&self.settings.linter, locator.contents(), parsed.tokens());
// Generate checks.
let diagnostics = check_path(
Path::new("<filename>"),
None,
&locator,
&stylist,
&indexer,
&directives,
&self.settings.linter,
flags::Noqa::Enabled,
&source_kind,
source_type,
&parsed,
target_version,
&suppressions,
);
let source_code = locator.to_source_code();
let messages: Vec<ExpandedMessage> = diagnostics
.into_iter()
.map(|msg| {
let range = msg.range().unwrap_or_default();
ExpandedMessage {
code: msg.secondary_code_or_id().to_string(),
message: msg.concise_message().to_string(),
start_location: source_code
.source_location(range.start(), self.position_encoding)
.into(),
end_location: source_code
.source_location(range.end(), self.position_encoding)
.into(),
fix: msg.fix().map(|fix| ExpandedFix {
message: msg.first_help_text().map(ToString::to_string),
edits: fix
.edits()
.iter()
.map(|edit| ExpandedEdit {
location: source_code
.source_location(edit.start(), self.position_encoding)
.into(),
end_location: source_code
.source_location(edit.end(), self.position_encoding)
.into(),
content: edit.content().map(ToString::to_string),
})
.collect(),
}),
}
})
.collect();
serde_wasm_bindgen::to_value(&messages).map_err(into_error)
}
pub fn format(&self, contents: &str) -> Result<String, Error> {
let parsed = ParsedModule::from_source(contents)?;
let formatted = parsed.format(&self.settings).map_err(into_error)?;
let printed = formatted.print().map_err(into_error)?;
Ok(printed.into_code())
}
pub fn format_ir(&self, contents: &str) -> Result<String, Error> {
let parsed = ParsedModule::from_source(contents)?;
let formatted = parsed.format(&self.settings).map_err(into_error)?;
Ok(format!("{formatted}"))
}
pub fn comments(&self, contents: &str) -> Result<String, Error> {
let parsed = ParsedModule::from_source(contents)?;
let comment_ranges = CommentRanges::from(parsed.parsed.tokens());
let comments = pretty_comments(parsed.parsed.syntax(), &comment_ranges, contents);
Ok(comments)
}
/// Parses the content and returns its AST
pub fn parse(&self, contents: &str) -> Result<String, Error> {
let parsed = parse_unchecked(contents, ParseOptions::from(Mode::Module));
Ok(format!("{:#?}", parsed.into_syntax()))
}
pub fn tokens(&self, contents: &str) -> Result<String, Error> {
let parsed = parse_unchecked(contents, ParseOptions::from(Mode::Module));
Ok(format!("{:#?}", parsed.tokens().as_ref()))
}
}
pub(crate) fn into_error<E: std::fmt::Display>(err: E) -> Error {
Error::new(&err.to_string())
}
struct ParsedModule<'a> {
source_code: &'a str,
parsed: Parsed<Mod>,
comment_ranges: CommentRanges,
}
impl<'a> ParsedModule<'a> {
fn from_source(source_code: &'a str) -> Result<Self, Error> {
let parsed = parse(source_code, ParseOptions::from(Mode::Module)).map_err(into_error)?;
let comment_ranges = CommentRanges::from(parsed.tokens());
Ok(Self {
source_code,
parsed,
comment_ranges,
})
}
fn format(&self, settings: &Settings) -> FormatResult<Formatted<PyFormatContext<'_>>> {
// TODO(konstin): Add an options for py/pyi to the UI (2/2)
let options = settings
.formatter
.to_format_options(PySourceType::default(), self.source_code, None)
.with_source_map_generation(SourceMapGeneration::Enabled);
format_module_ast(
&self.parsed,
&self.comment_ranges,
self.source_code,
options,
)
}
}
#[derive(Serialize, Deserialize, Eq, PartialEq, Debug)]
pub struct Location {
pub row: OneIndexed,
/// The character offset from the start of the line.
///
/// The semantic of the offset depends on the [`PositionEncoding`] used when creating
/// the [`Workspace`].
pub column: OneIndexed,
}
impl From<SourceLocation> for Location {
fn from(value: SourceLocation) -> Self {
Self {
row: value.line,
column: value.character_offset,
}
}
}
#[derive(Default, Copy, Clone)]
#[wasm_bindgen]
pub enum PositionEncoding {
#[default]
Utf8,
Utf16,
Utf32,
}
impl From<PositionEncoding> for SourcePositionEncoding {
fn from(value: PositionEncoding) -> Self {
match value {
PositionEncoding::Utf8 => Self::Utf8,
PositionEncoding::Utf16 => Self::Utf16,
PositionEncoding::Utf32 => Self::Utf32,
}
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_wasm/tests/api.rs | crates/ruff_wasm/tests/api.rs | #![cfg(target_arch = "wasm32")]
use wasm_bindgen_test::wasm_bindgen_test;
use ruff_linter::registry::Rule;
use ruff_source_file::OneIndexed;
use ruff_wasm::{ExpandedMessage, Location, PositionEncoding, Workspace};
macro_rules! check {
($source:expr, $config:expr, $expected:expr) => {{
let config = js_sys::JSON::parse($config).unwrap();
match Workspace::new(config, PositionEncoding::Utf8)
.unwrap()
.check($source)
{
Ok(output) => {
let result: Vec<ExpandedMessage> = serde_wasm_bindgen::from_value(output).unwrap();
assert_eq!(result, $expected);
}
Err(e) => assert!(false, "{:#?}", e),
}
}};
}
#[wasm_bindgen_test]
fn empty_config() {
ruff_wasm::before_main();
check!(
"if (1, 2):\n pass",
r#"{}"#,
[ExpandedMessage {
code: Rule::IfTuple.noqa_code().to_string(),
message: "If test is a tuple, which is always `True`".to_string(),
start_location: Location {
row: OneIndexed::from_zero_indexed(0),
column: OneIndexed::from_zero_indexed(3)
},
end_location: Location {
row: OneIndexed::from_zero_indexed(0),
column: OneIndexed::from_zero_indexed(9)
},
fix: None,
}]
);
}
#[wasm_bindgen_test]
fn syntax_error() {
ruff_wasm::before_main();
check!(
"x =\ny = 1\n",
r#"{}"#,
[ExpandedMessage {
code: "invalid-syntax".to_string(),
message: "Expected an expression".to_string(),
start_location: Location {
row: OneIndexed::from_zero_indexed(0),
column: OneIndexed::from_zero_indexed(3)
},
end_location: Location {
row: OneIndexed::from_zero_indexed(1),
column: OneIndexed::from_zero_indexed(0)
},
fix: None,
}]
);
}
#[wasm_bindgen_test]
fn unsupported_syntax_error() {
ruff_wasm::before_main();
check!(
"match 2:\n case 1: ...",
r#"{"target-version": "py39"}"#,
[ExpandedMessage {
code: "invalid-syntax".to_string(),
message: "Cannot use `match` statement on Python 3.9 (syntax was added in Python 3.10)"
.to_string(),
start_location: Location {
row: OneIndexed::from_zero_indexed(0),
column: OneIndexed::from_zero_indexed(0)
},
end_location: Location {
row: OneIndexed::from_zero_indexed(0),
column: OneIndexed::from_zero_indexed(5)
},
fix: None,
}]
);
}
#[wasm_bindgen_test]
fn partial_config() {
ruff_wasm::before_main();
check!("if (1, 2):\n pass", r#"{"ignore": ["F"]}"#, []);
}
#[wasm_bindgen_test]
fn partial_nested_config() {
ruff_wasm::before_main();
let config = r#"{
"select": ["Q"],
"flake8-quotes": {
"inline-quotes": "single"
}
}"#;
check!(r#"print('hello world')"#, config, []);
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_db/src/source.rs | crates/ruff_db/src/source.rs | use std::ops::Deref;
use std::sync::Arc;
use ruff_notebook::Notebook;
use ruff_python_ast::PySourceType;
use ruff_source_file::LineIndex;
use crate::Db;
use crate::files::{File, FilePath};
use crate::system::System;
/// Reads the source text of a python text file (must be valid UTF8) or notebook.
#[salsa::tracked(heap_size=ruff_memory_usage::heap_size)]
pub fn source_text(db: &dyn Db, file: File) -> SourceText {
let path = file.path(db);
let _span = tracing::trace_span!("source_text", file = %path).entered();
let mut read_error = None;
let kind = if is_notebook(db.system(), path) {
file.read_to_notebook(db)
.unwrap_or_else(|error| {
tracing::debug!("Failed to read notebook '{path}': {error}");
read_error = Some(SourceTextError::FailedToReadNotebook(error.to_string()));
Notebook::empty()
})
.into()
} else {
file.read_to_string(db)
.unwrap_or_else(|error| {
tracing::debug!("Failed to read file '{path}': {error}");
read_error = Some(SourceTextError::FailedToReadFile(error.to_string()));
String::new()
})
.into()
};
SourceText {
inner: Arc::new(SourceTextInner { kind, read_error }),
}
}
fn is_notebook(system: &dyn System, path: &FilePath) -> bool {
let source_type = match path {
FilePath::System(path) => system.source_type(path),
FilePath::SystemVirtual(system_virtual) => system.virtual_path_source_type(system_virtual),
FilePath::Vendored(_) => return false,
};
let with_extension_fallback =
source_type.or_else(|| PySourceType::try_from_extension(path.extension()?));
with_extension_fallback == Some(PySourceType::Ipynb)
}
/// The source text of a file containing python code.
///
/// The file containing the source text can either be a text file or a notebook.
///
/// Cheap cloneable in `O(1)`.
#[derive(Clone, Eq, PartialEq, get_size2::GetSize)]
pub struct SourceText {
inner: Arc<SourceTextInner>,
}
impl SourceText {
/// Returns the python code as a `str`.
pub fn as_str(&self) -> &str {
match &self.inner.kind {
SourceTextKind::Text(source) => source,
SourceTextKind::Notebook { notebook } => notebook.source_code(),
}
}
/// Returns the underlying notebook if this is a notebook file.
pub fn as_notebook(&self) -> Option<&Notebook> {
match &self.inner.kind {
SourceTextKind::Notebook { notebook } => Some(notebook),
SourceTextKind::Text(_) => None,
}
}
/// Returns `true` if this is a notebook source file.
pub fn is_notebook(&self) -> bool {
matches!(&self.inner.kind, SourceTextKind::Notebook { .. })
}
/// Returns `true` if there was an error when reading the content of the file.
pub fn read_error(&self) -> Option<&SourceTextError> {
self.inner.read_error.as_ref()
}
}
impl Deref for SourceText {
type Target = str;
fn deref(&self) -> &str {
self.as_str()
}
}
impl std::fmt::Debug for SourceText {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut dbg = f.debug_tuple("SourceText");
match &self.inner.kind {
SourceTextKind::Text(text) => {
dbg.field(text);
}
SourceTextKind::Notebook { notebook } => {
dbg.field(notebook);
}
}
dbg.finish()
}
}
#[derive(Eq, PartialEq, get_size2::GetSize)]
struct SourceTextInner {
kind: SourceTextKind,
read_error: Option<SourceTextError>,
}
#[derive(Eq, PartialEq, get_size2::GetSize)]
enum SourceTextKind {
Text(String),
Notebook {
// Jupyter notebooks are not very relevant for memory profiling, and contain
// arbitrary JSON values that do not implement the `GetSize` trait.
#[get_size(ignore)]
notebook: Box<Notebook>,
},
}
impl From<String> for SourceTextKind {
fn from(value: String) -> Self {
SourceTextKind::Text(value)
}
}
impl From<Notebook> for SourceTextKind {
fn from(notebook: Notebook) -> Self {
SourceTextKind::Notebook {
notebook: Box::new(notebook),
}
}
}
#[derive(Debug, thiserror::Error, PartialEq, Eq, Clone, get_size2::GetSize)]
pub enum SourceTextError {
#[error("Failed to read notebook: {0}`")]
FailedToReadNotebook(String),
#[error("Failed to read file: {0}")]
FailedToReadFile(String),
}
/// Computes the [`LineIndex`] for `file`.
#[salsa::tracked(heap_size=ruff_memory_usage::heap_size)]
pub fn line_index(db: &dyn Db, file: File) -> LineIndex {
let _span = tracing::trace_span!("line_index", ?file).entered();
let source = source_text(db, file);
LineIndex::from_source_text(&source)
}
#[cfg(test)]
mod tests {
use salsa::EventKind;
use salsa::Setter as _;
use ruff_source_file::OneIndexed;
use ruff_text_size::TextSize;
use crate::files::system_path_to_file;
use crate::source::{line_index, source_text};
use crate::system::{DbWithWritableSystem as _, SystemPath};
use crate::tests::TestDb;
#[test]
fn re_runs_query_when_file_revision_changes() -> crate::system::Result<()> {
let mut db = TestDb::new();
let path = SystemPath::new("test.py");
db.write_file(path, "x = 10")?;
let file = system_path_to_file(&db, path).unwrap();
assert_eq!(source_text(&db, file).as_str(), "x = 10");
db.write_file(path, "x = 20").unwrap();
assert_eq!(source_text(&db, file).as_str(), "x = 20");
Ok(())
}
#[test]
fn text_is_cached_if_revision_is_unchanged() -> crate::system::Result<()> {
let mut db = TestDb::new();
let path = SystemPath::new("test.py");
db.write_file(path, "x = 10")?;
let file = system_path_to_file(&db, path).unwrap();
assert_eq!(source_text(&db, file).as_str(), "x = 10");
// Change the file permission only
file.set_permissions(&mut db).to(Some(0o777));
db.clear_salsa_events();
assert_eq!(source_text(&db, file).as_str(), "x = 10");
let events = db.take_salsa_events();
assert!(
!events
.iter()
.any(|event| matches!(event.kind, EventKind::WillExecute { .. }))
);
Ok(())
}
#[test]
fn line_index_for_source() -> crate::system::Result<()> {
let mut db = TestDb::new();
let path = SystemPath::new("test.py");
db.write_file(path, "x = 10\ny = 20")?;
let file = system_path_to_file(&db, path).unwrap();
let index = line_index(&db, file);
let source = source_text(&db, file);
assert_eq!(index.line_count(), 2);
assert_eq!(
index.line_start(OneIndexed::from_zero_indexed(0), source.as_str()),
TextSize::new(0)
);
Ok(())
}
#[test]
fn notebook() -> crate::system::Result<()> {
let mut db = TestDb::new();
let path = SystemPath::new("test.ipynb");
db.write_file(
path,
r#"
{
"cells": [{"cell_type": "code", "source": ["x = 10"], "metadata": {}, "outputs": []}],
"metadata": {
"kernelspec": {
"display_name": "Python (ruff)",
"language": "python",
"name": "ruff"
},
"language_info": {
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.11.3"
}
},
"nbformat": 4,
"nbformat_minor": 4
}"#,
)?;
let file = system_path_to_file(&db, path).unwrap();
let source = source_text(&db, file);
assert!(source.is_notebook());
assert_eq!(source.as_str(), "x = 10\n");
assert!(source.as_notebook().is_some());
Ok(())
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_db/src/lib.rs | crates/ruff_db/src/lib.rs | #![warn(
clippy::disallowed_methods,
reason = "Prefer System trait methods over std methods"
)]
use crate::files::Files;
use crate::system::System;
use crate::vendored::VendoredFileSystem;
use ruff_python_ast::PythonVersion;
use rustc_hash::FxHasher;
use std::hash::BuildHasherDefault;
use std::num::NonZeroUsize;
use ty_static::EnvVars;
pub mod cancellation;
pub mod diagnostic;
pub mod display;
pub mod file_revision;
pub mod files;
pub mod panic;
pub mod parsed;
pub mod source;
pub mod system;
#[cfg(feature = "testing")]
pub mod testing;
pub mod vendored;
#[cfg(not(target_arch = "wasm32"))]
pub use std::time::{Instant, SystemTime, SystemTimeError};
#[cfg(target_arch = "wasm32")]
pub use web_time::{Instant, SystemTime, SystemTimeError};
pub type FxDashMap<K, V> = dashmap::DashMap<K, V, BuildHasherDefault<FxHasher>>;
pub type FxDashSet<K> = dashmap::DashSet<K, BuildHasherDefault<FxHasher>>;
static VERSION: std::sync::OnceLock<String> = std::sync::OnceLock::new();
/// Returns the version of the executing program if set.
pub fn program_version() -> Option<&'static str> {
VERSION.get().map(|version| version.as_str())
}
/// Sets the version of the executing program.
///
/// ## Errors
/// If the version has already been initialized (can only be set once).
pub fn set_program_version(version: String) -> Result<(), String> {
VERSION.set(version)
}
/// Most basic database that gives access to files, the host system, source code, and parsed AST.
#[salsa::db]
pub trait Db: salsa::Database {
fn vendored(&self) -> &VendoredFileSystem;
fn system(&self) -> &dyn System;
fn files(&self) -> &Files;
fn python_version(&self) -> PythonVersion;
}
/// Returns the maximum number of tasks that ty is allowed
/// to process in parallel.
///
/// Returns [`std::thread::available_parallelism`], unless the environment
/// variable `TY_MAX_PARALLELISM` or `RAYON_NUM_THREADS` is set. `TY_MAX_PARALLELISM` takes
/// precedence over `RAYON_NUM_THREADS`.
///
/// Falls back to `1` if `available_parallelism` is not available.
///
/// Setting `TY_MAX_PARALLELISM` to `2` only restricts the number of threads that ty spawns
/// to process work in parallel. For example, to index a directory or checking the files of a project.
/// ty can still spawn more threads for other tasks, e.g. to wait for a Ctrl+C signal or
/// watching the files for changes.
#[expect(
clippy::disallowed_methods,
reason = "We don't have access to System here, but this is also only used by the CLI and the server which always run on a real system."
)]
pub fn max_parallelism() -> NonZeroUsize {
std::env::var(EnvVars::TY_MAX_PARALLELISM)
.or_else(|_| std::env::var(EnvVars::RAYON_NUM_THREADS))
.ok()
.and_then(|s| s.parse().ok())
.unwrap_or_else(|| {
std::thread::available_parallelism().unwrap_or_else(|_| NonZeroUsize::new(1).unwrap())
})
}
/// Trait for types that can provide Rust documentation.
///
/// Use `derive(RustDoc)` to automatically implement this trait for types that have a static string documentation.
pub trait RustDoc {
fn rust_doc() -> &'static str;
}
#[cfg(test)]
mod tests {
use std::sync::{Arc, Mutex};
use crate::Db;
use crate::files::Files;
use crate::system::TestSystem;
use crate::system::{DbWithTestSystem, System};
use crate::vendored::VendoredFileSystem;
type Events = Arc<Mutex<Vec<salsa::Event>>>;
/// Database that can be used for testing.
///
/// Uses an in memory filesystem and it stubs out the vendored files by default.
#[salsa::db]
#[derive(Default, Clone)]
pub(crate) struct TestDb {
storage: salsa::Storage<Self>,
files: Files,
system: TestSystem,
vendored: VendoredFileSystem,
events: Events,
}
impl TestDb {
pub(crate) fn new() -> Self {
let events = Events::default();
Self {
storage: salsa::Storage::new(Some(Box::new({
let events = events.clone();
move |event| {
tracing::trace!("event: {:?}", event);
let mut events = events.lock().unwrap();
events.push(event);
}
}))),
system: TestSystem::default(),
vendored: VendoredFileSystem::default(),
events,
files: Files::default(),
}
}
/// Empties the internal store of salsa events that have been emitted,
/// and returns them as a `Vec` (equivalent to [`std::mem::take`]).
pub(crate) fn take_salsa_events(&mut self) -> Vec<salsa::Event> {
let mut events = self.events.lock().unwrap();
std::mem::take(&mut *events)
}
/// Clears the emitted salsa events.
pub(crate) fn clear_salsa_events(&mut self) {
self.take_salsa_events();
}
pub(crate) fn with_vendored(&mut self, vendored_file_system: VendoredFileSystem) {
self.vendored = vendored_file_system;
}
}
#[salsa::db]
impl Db for TestDb {
fn vendored(&self) -> &VendoredFileSystem {
&self.vendored
}
fn system(&self) -> &dyn System {
&self.system
}
fn files(&self) -> &Files {
&self.files
}
fn python_version(&self) -> ruff_python_ast::PythonVersion {
ruff_python_ast::PythonVersion::latest_ty()
}
}
impl DbWithTestSystem for TestDb {
fn test_system(&self) -> &TestSystem {
&self.system
}
fn test_system_mut(&mut self) -> &mut TestSystem {
&mut self.system
}
}
#[salsa::db]
impl salsa::Database for TestDb {}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_db/src/cancellation.rs | crates/ruff_db/src/cancellation.rs | use std::sync::Arc;
use std::sync::atomic::AtomicBool;
/// Signals a [`CancellationToken`] that it should be canceled.
#[derive(Debug, Clone)]
pub struct CancellationTokenSource {
cancelled: Arc<AtomicBool>,
}
impl Default for CancellationTokenSource {
fn default() -> Self {
Self::new()
}
}
impl CancellationTokenSource {
pub fn new() -> Self {
Self {
cancelled: Arc::new(AtomicBool::new(false)),
}
}
pub fn is_cancellation_requested(&self) -> bool {
self.cancelled.load(std::sync::atomic::Ordering::Relaxed)
}
/// Creates a new token that uses this source.
pub fn token(&self) -> CancellationToken {
CancellationToken {
cancelled: self.cancelled.clone(),
}
}
/// Requests cancellation for operations using this token.
pub fn cancel(&self) {
self.cancelled
.store(true, std::sync::atomic::Ordering::Relaxed);
}
}
/// Token signals whether an operation should be canceled.
#[derive(Debug, Clone)]
pub struct CancellationToken {
cancelled: Arc<AtomicBool>,
}
impl CancellationToken {
pub fn is_cancelled(&self) -> bool {
self.cancelled.load(std::sync::atomic::Ordering::Relaxed)
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_db/src/file_revision.rs | crates/ruff_db/src/file_revision.rs | use crate::system::file_time_now;
/// A number representing the revision of a file.
///
/// Two revisions that don't compare equal signify that the file has been modified.
/// Revisions aren't guaranteed to be monotonically increasing or in any specific order.
///
/// Possible revisions are:
/// * The last modification time of the file.
/// * The hash of the file's content.
/// * The revision as it comes from an external system, for example the LSP.
#[derive(Copy, Clone, Debug, Eq, PartialEq, Default, get_size2::GetSize)]
pub struct FileRevision(u128);
impl FileRevision {
pub fn new(value: u128) -> Self {
Self(value)
}
pub fn now() -> Self {
Self::from(file_time_now())
}
pub const fn zero() -> Self {
Self(0)
}
#[must_use]
pub fn as_u128(self) -> u128 {
self.0
}
}
impl From<u128> for FileRevision {
fn from(value: u128) -> Self {
FileRevision(value)
}
}
impl From<u64> for FileRevision {
fn from(value: u64) -> Self {
FileRevision(u128::from(value))
}
}
impl From<filetime::FileTime> for FileRevision {
fn from(value: filetime::FileTime) -> Self {
let seconds = value.seconds() as u128;
let seconds = seconds << 64;
let nanos = u128::from(value.nanoseconds());
FileRevision(seconds | nanos)
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn revision_from_file_time() {
let file_time = file_time_now();
let revision = FileRevision::from(file_time);
let revision = revision.as_u128();
let nano = revision & 0xFFFF_FFFF_FFFF_FFFF;
let seconds = revision >> 64;
assert_eq!(file_time.nanoseconds(), nano as u32);
assert_eq!(file_time.seconds(), seconds as i64);
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_db/src/panic.rs | crates/ruff_db/src/panic.rs | use std::any::Any;
use std::backtrace::BacktraceStatus;
use std::cell::Cell;
use std::panic::Location;
use std::sync::OnceLock;
#[derive(Debug)]
pub struct PanicError {
pub location: Option<String>,
pub payload: Payload,
pub backtrace: Option<std::backtrace::Backtrace>,
pub salsa_backtrace: Option<salsa::Backtrace>,
}
#[derive(Debug)]
pub struct Payload(Box<dyn std::any::Any + Send>);
impl Payload {
pub fn as_str(&self) -> Option<&str> {
if let Some(s) = self.0.downcast_ref::<String>() {
Some(s)
} else if let Some(s) = self.0.downcast_ref::<&str>() {
Some(s)
} else {
None
}
}
pub fn downcast_ref<R: Any>(&self) -> Option<&R> {
self.0.downcast_ref::<R>()
}
}
impl PanicError {
pub fn to_diagnostic_message(&self, path: Option<impl std::fmt::Display>) -> String {
use std::fmt::Write;
let mut message = String::new();
message.push_str("Panicked");
if let Some(location) = &self.location {
let _ = write!(&mut message, " at {location}");
}
if let Some(path) = path {
let _ = write!(&mut message, " when checking `{path}`");
}
if let Some(payload) = self.payload.as_str() {
let _ = write!(&mut message, ": `{payload}`");
}
message
}
}
impl std::fmt::Display for PanicError {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "panicked at")?;
if let Some(location) = &self.location {
write!(f, " {location}")?;
}
if let Some(payload) = self.payload.as_str() {
write!(f, ":\n{payload}")?;
}
if let Some(query_trace) = self.salsa_backtrace.as_ref() {
let _ = writeln!(f, "{query_trace}");
}
if let Some(backtrace) = &self.backtrace {
match backtrace.status() {
BacktraceStatus::Disabled => {
writeln!(
f,
"\nrun with `RUST_BACKTRACE=1` environment variable to display a backtrace"
)?;
}
BacktraceStatus::Captured => {
writeln!(f, "\nBacktrace: {backtrace}")?;
}
_ => {}
}
}
Ok(())
}
}
#[derive(Default)]
struct CapturedPanicInfo {
backtrace: Option<std::backtrace::Backtrace>,
location: Option<String>,
salsa_backtrace: Option<salsa::Backtrace>,
}
thread_local! {
static CAPTURE_PANIC_INFO: Cell<bool> = const { Cell::new(false) };
static LAST_BACKTRACE: Cell<CapturedPanicInfo> = const {
Cell::new(CapturedPanicInfo { backtrace: None, location: None, salsa_backtrace: None })
};
}
fn install_hook() {
static ONCE: OnceLock<()> = OnceLock::new();
ONCE.get_or_init(|| {
let prev = std::panic::take_hook();
std::panic::set_hook(Box::new(move |info| {
let should_capture = CAPTURE_PANIC_INFO.with(Cell::get);
if !should_capture {
return (*prev)(info);
}
let location = info.location().map(Location::to_string);
let backtrace = Some(std::backtrace::Backtrace::capture());
LAST_BACKTRACE.set(CapturedPanicInfo {
backtrace,
location,
salsa_backtrace: salsa::Backtrace::capture(),
});
}));
});
}
/// Invokes a closure, capturing and returning the cause of an unwinding panic if one occurs.
///
/// ### Thread safety
///
/// This is implemented by installing a custom [panic hook](std::panic::set_hook). This panic hook
/// is a global resource. The hook that we install captures panic info in a thread-safe manner,
/// and also ensures that any threads that are _not_ currently using this `catch_unwind` wrapper
/// still use the previous hook (typically the default hook, which prints out panic information to
/// stderr).
///
/// We assume that there is nothing else running in this process that needs to install a competing
/// panic hook. We are careful to install our custom hook only once, and we do not ever restore
/// the previous hook (since you can always retain the previous hook's behavior by not calling this
/// wrapper).
pub fn catch_unwind<F, R>(f: F) -> Result<R, PanicError>
where
F: FnOnce() -> R + std::panic::UnwindSafe,
{
install_hook();
let prev_should_capture = CAPTURE_PANIC_INFO.replace(true);
let result = std::panic::catch_unwind(f).map_err(|payload| {
// Try to get the backtrace and location from our custom panic hook.
// The custom panic hook only runs once when `panic!` is called (or similar). It doesn't
// run when the panic is propagated with `std::panic::resume_unwind`. The panic hook
// is also not called when the panic is raised with `std::panic::resum_unwind` as is the
// case for salsa unwinds (see the ignored test below).
// Because of that, always take the payload from `catch_unwind` because it may have been transformed
// by an inner `std::panic::catch_unwind` handlers and only use the information
// from the custom handler to enrich the error with the backtrace and location.
let CapturedPanicInfo {
location,
backtrace,
salsa_backtrace,
} = LAST_BACKTRACE.with(Cell::take);
PanicError {
location,
payload: Payload(payload),
backtrace,
salsa_backtrace,
}
});
CAPTURE_PANIC_INFO.set(prev_should_capture);
result
}
#[cfg(test)]
mod tests {
use salsa::{Database, Durability};
#[test]
#[ignore = "super::catch_unwind installs a custom panic handler, which could effect test isolation"]
fn no_backtrace_for_salsa_cancelled() {
#[salsa::input]
struct Input {
value: u32,
}
#[salsa::tracked]
fn test_query(db: &dyn Database, input: Input) -> u32 {
loop {
// This should throw a cancelled error
let _ = input.value(db);
}
}
let db = salsa::DatabaseImpl::new();
let input = Input::new(&db, 42);
let result = std::thread::scope(move |scope| {
{
let mut db = db.clone();
scope.spawn(move || {
// This will cancel the other thread by throwing a `salsa::Cancelled` error.
db.synthetic_write(Durability::MEDIUM);
});
}
{
scope.spawn(move || {
super::catch_unwind(|| {
test_query(&db, input);
})
})
}
.join()
.unwrap()
});
match result {
Ok(_) => panic!("Expected query to panic"),
Err(err) => {
// Panics triggered with `resume_unwind` have no backtrace.
assert!(err.backtrace.is_none());
}
}
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_db/src/vendored.rs | crates/ruff_db/src/vendored.rs | use std::borrow::Cow;
use std::collections::BTreeMap;
use std::fmt::{self, Debug};
use std::io::{self, Read, Write};
use std::sync::{Arc, Mutex, MutexGuard};
use zip::result::ZipResult;
use zip::write::FileOptions;
use zip::{CompressionMethod, ZipArchive, ZipWriter, read::ZipFile};
pub use self::path::{VendoredPath, VendoredPathBuf};
use crate::file_revision::FileRevision;
mod path;
type Result<T> = io::Result<T>;
type LockedZipArchive<'a> = MutexGuard<'a, VendoredZipArchive>;
/// File system that stores all content in a static zip archive
/// bundled as part of the Ruff binary.
///
/// "Files" in the `VendoredFileSystem` are read-only and immutable.
/// Directories are supported, but symlinks and hardlinks cannot exist.
///
/// # Path separators
///
/// At time of writing (2025-07-11), this implementation always uses `/` as a
/// path separator, even in Windows environments where `\` is traditionally
/// used as a file path separator. Namely, this is only currently used with zip
/// files built by `crates/ty_vendored/build.rs`.
///
/// Callers using this may provide paths that use a `\` as a separator. It will
/// be transparently normalized to `/`.
///
/// This is particularly important because the presence of a trailing separator
/// in a zip file is conventionally used to indicate a directory entry.
#[derive(Clone)]
pub struct VendoredFileSystem {
inner: Arc<Mutex<VendoredZipArchive>>,
}
impl VendoredFileSystem {
pub fn new_static(raw_bytes: &'static [u8]) -> Result<Self> {
Self::new_impl(Cow::Borrowed(raw_bytes))
}
pub fn new(raw_bytes: Vec<u8>) -> Result<Self> {
Self::new_impl(Cow::Owned(raw_bytes))
}
fn new_impl(data: Cow<'static, [u8]>) -> Result<Self> {
Ok(Self {
inner: Arc::new(Mutex::new(VendoredZipArchive::new(data)?)),
})
}
pub fn exists(&self, path: impl AsRef<VendoredPath>) -> bool {
fn exists(fs: &VendoredFileSystem, path: &VendoredPath) -> bool {
let normalized = NormalizedVendoredPath::from(path);
let mut archive = fs.lock_archive();
// Must probe the zipfile twice, as "stdlib" and "stdlib/" are considered
// different paths in a zip file, but we want to abstract over that difference here
// so that paths relative to the `VendoredFileSystem`
// work the same as other paths in Ruff.
archive.lookup_path(&normalized).is_ok()
|| archive
.lookup_path(&normalized.with_trailing_slash())
.is_ok()
}
exists(self, path.as_ref())
}
pub fn metadata(&self, path: impl AsRef<VendoredPath>) -> Result<Metadata> {
fn metadata(fs: &VendoredFileSystem, path: &VendoredPath) -> Result<Metadata> {
let normalized = NormalizedVendoredPath::from(path);
let mut archive = fs.lock_archive();
// Must probe the zipfile twice, as "stdlib" and "stdlib/" are considered
// different paths in a zip file, but we want to abstract over that difference here
// so that paths relative to the `VendoredFileSystem`
// work the same as other paths in Ruff.
if let Ok(zip_file) = archive.lookup_path(&normalized) {
return Ok(Metadata::from_zip_file(zip_file));
}
let zip_file = archive.lookup_path(&normalized.with_trailing_slash())?;
Ok(Metadata::from_zip_file(zip_file))
}
metadata(self, path.as_ref())
}
pub fn is_directory(&self, path: impl AsRef<VendoredPath>) -> bool {
self.metadata(path)
.is_ok_and(|metadata| metadata.kind().is_directory())
}
pub fn is_file(&self, path: impl AsRef<VendoredPath>) -> bool {
self.metadata(path)
.is_ok_and(|metadata| metadata.kind().is_file())
}
/// Read the entire contents of the zip file at `path` into a string
///
/// Returns an Err() if any of the following are true:
/// - The path does not exist in the underlying zip archive
/// - The path exists in the underlying zip archive, but represents a directory
/// - The contents of the zip file at `path` contain invalid UTF-8
pub fn read_to_string(&self, path: impl AsRef<VendoredPath>) -> Result<String> {
fn read_to_string(fs: &VendoredFileSystem, path: &VendoredPath) -> Result<String> {
let mut archive = fs.lock_archive();
let mut zip_file = archive.lookup_path(&NormalizedVendoredPath::from(path))?;
// Pre-allocate the buffer with the size specified in the ZIP file metadata
// because `read_to_string` passes `None` as the size hint.
// But let's not trust the zip file metadata (even though it's vendored)
// and limit it to a reasonable size.
let mut buffer = String::with_capacity(
usize::try_from(zip_file.size())
.unwrap_or(usize::MAX)
.min(10_000_000),
);
zip_file.read_to_string(&mut buffer)?;
Ok(buffer)
}
read_to_string(self, path.as_ref())
}
/// Read the direct children of the directory
/// identified by `path`.
///
/// If `path` is not a directory, then this will
/// return an empty `Vec`.
pub fn read_directory(&self, dir: impl AsRef<VendoredPath>) -> Vec<DirectoryEntry> {
// N.B. We specifically do not return an iterator here to avoid
// holding a lock for the lifetime of the iterator returned.
// That is, it seems like a footgun to keep the zip archive
// locked during iteration, since the unit of work for each
// item in the iterator could be arbitrarily long. Allocating
// up front and stuffing all entries into it is probably the
// simplest solution and what we do here. If this becomes
// a problem, there are other strategies we could pursue.
// (Amortizing allocs, using a different synchronization
// behavior or even exposing additional APIs.) ---AG
fn read_directory(fs: &VendoredFileSystem, dir: &VendoredPath) -> Vec<DirectoryEntry> {
let mut normalized = NormalizedVendoredPath::from(dir);
if !normalized.as_str().ends_with('/') {
normalized = normalized.with_trailing_slash();
}
let archive = fs.lock_archive();
let mut entries = vec![];
for name in archive.0.file_names() {
// Any entry that doesn't have the `path` (with a
// trailing slash) as a prefix cannot possibly be in
// the directory referenced by `path`.
let Some(without_dir_prefix) = name.strip_prefix(normalized.as_str()) else {
continue;
};
// Filter out an entry equivalent to the path given
// since we only want children of the directory.
if without_dir_prefix.is_empty() {
continue;
}
// We only want *direct* children. Files that are
// direct children cannot have any slashes (or else
// they are not direct children). Directories that
// are direct children can only have one slash and
// it must be at the end.
//
// (We do this manually ourselves to avoid doing a
// full file lookup and metadata retrieval via the
// `zip` crate.)
let file_type = FileType::from_zip_file_name(without_dir_prefix);
let slash_count = without_dir_prefix.matches('/').count();
match file_type {
FileType::File if slash_count > 0 => continue,
FileType::Directory if slash_count > 1 => continue,
_ => {}
}
entries.push(DirectoryEntry {
path: VendoredPathBuf::from(name),
file_type,
});
}
entries
}
read_directory(self, dir.as_ref())
}
/// Acquire a lock on the underlying zip archive.
/// The call will block until it is able to acquire the lock.
///
/// ## Panics:
/// If the current thread already holds the lock.
fn lock_archive(&self) -> LockedZipArchive<'_> {
self.inner.lock().unwrap()
}
}
impl fmt::Debug for VendoredFileSystem {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let mut archive = self.lock_archive();
if f.alternate() {
let mut paths: Vec<String> = archive.0.file_names().map(String::from).collect();
paths.sort();
let debug_info: BTreeMap<String, ZipFileDebugInfo> = paths
.iter()
.map(|path| {
(
path.to_owned(),
ZipFileDebugInfo::from(archive.0.by_name(path).unwrap()),
)
})
.collect();
f.debug_struct("VendoredFileSystem")
.field("inner_mutex_poisoned", &self.inner.is_poisoned())
.field("paths", &paths)
.field("data_by_path", &debug_info)
.finish()
} else {
write!(f, "VendoredFileSystem(<{} paths>)", archive.len())
}
}
}
impl Default for VendoredFileSystem {
fn default() -> Self {
let mut bytes: Vec<u8> = Vec::new();
let mut cursor = io::Cursor::new(&mut bytes);
{
let mut writer = ZipWriter::new(&mut cursor);
writer.finish().unwrap();
}
VendoredFileSystem::new(bytes).unwrap()
}
}
/// Private struct only used in `Debug` implementations
///
/// This could possibly be unified with the `Metadata` struct,
/// but that is deliberately kept small, and only exposes metadata
/// that users of the `VendoredFileSystem` could realistically need.
/// For debugging purposes, however, we want to have all information
/// available.
#[expect(unused)]
#[derive(Debug)]
struct ZipFileDebugInfo {
crc32_hash: u32,
compressed_size: u64,
uncompressed_size: u64,
kind: FileType,
}
impl<'a> From<ZipFile<'a>> for ZipFileDebugInfo {
fn from(value: ZipFile<'a>) -> Self {
Self {
crc32_hash: value.crc32(),
compressed_size: value.compressed_size(),
uncompressed_size: value.size(),
kind: if value.is_dir() {
FileType::Directory
} else {
FileType::File
},
}
}
}
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)]
pub enum FileType {
/// The path exists in the zip archive and represents a vendored file
File,
/// The path exists in the zip archive and represents a vendored directory of files
Directory,
}
impl FileType {
fn from_zip_file_name(name: &str) -> FileType {
if name.ends_with('/') {
FileType::Directory
} else {
FileType::File
}
}
pub const fn is_file(self) -> bool {
matches!(self, Self::File)
}
pub const fn is_directory(self) -> bool {
matches!(self, Self::Directory)
}
}
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub struct Metadata {
kind: FileType,
revision: FileRevision,
}
impl Metadata {
fn from_zip_file(zip_file: ZipFile) -> Self {
let kind = if zip_file.is_dir() {
FileType::Directory
} else {
FileType::File
};
Self {
kind,
revision: FileRevision::new(u128::from(zip_file.crc32())),
}
}
pub fn kind(&self) -> FileType {
self.kind
}
pub fn revision(&self) -> FileRevision {
self.revision
}
}
#[derive(Debug, PartialEq, Eq)]
pub struct DirectoryEntry {
path: VendoredPathBuf,
file_type: FileType,
}
impl DirectoryEntry {
pub fn new(path: VendoredPathBuf, file_type: FileType) -> Self {
Self { path, file_type }
}
pub fn into_path(self) -> VendoredPathBuf {
self.path
}
pub fn path(&self) -> &VendoredPath {
&self.path
}
pub fn file_type(&self) -> FileType {
self.file_type
}
}
/// Newtype wrapper around a ZipArchive.
#[derive(Debug)]
struct VendoredZipArchive(ZipArchive<io::Cursor<Cow<'static, [u8]>>>);
impl VendoredZipArchive {
fn new(data: Cow<'static, [u8]>) -> Result<Self> {
Ok(Self(ZipArchive::new(io::Cursor::new(data))?))
}
fn lookup_path(&mut self, path: &NormalizedVendoredPath) -> Result<ZipFile<'_>> {
Ok(self.0.by_name(path.as_str())?)
}
fn len(&self) -> usize {
self.0.len()
}
}
/// A path that has been normalized via the `normalize_vendored_path` function.
///
/// Trailing slashes are normalized away by `camino::Utf8PathBuf`s,
/// but trailing slashes are crucial for distinguishing between
/// files and directories inside zip archives.
#[derive(Debug, Clone, PartialEq, Eq)]
struct NormalizedVendoredPath<'a>(Cow<'a, str>);
impl NormalizedVendoredPath<'_> {
fn with_trailing_slash(self) -> Self {
debug_assert!(!self.0.ends_with('/'));
let mut data = self.0.into_owned();
data.push('/');
Self(Cow::Owned(data))
}
fn as_str(&self) -> &str {
&self.0
}
}
impl<'a> From<&'a VendoredPath> for NormalizedVendoredPath<'a> {
/// Normalize the path.
///
/// The normalizations are:
/// - Remove `.` and `..` components
/// - Strip trailing slashes
/// - Normalize `\\` separators to `/`
/// - Validate that the path does not have any unsupported components
///
/// ## Panics:
/// If a path with an unsupported component for vendored paths is passed.
/// Unsupported components are path prefixes and path root directories.
fn from(path: &'a VendoredPath) -> Self {
/// Remove `.` and `..` components, and validate that unsupported components are not present.
///
/// This inner routine also strips trailing slashes,
/// and normalizes paths to use Unix `/` separators.
/// However, it always allocates, so avoid calling it if possible.
/// In most cases, the path should already be normalized.
fn normalize_unnormalized_path(path: &VendoredPath) -> String {
let mut normalized_parts = Vec::new();
for component in path.components() {
match component {
camino::Utf8Component::Normal(part) => normalized_parts.push(part),
camino::Utf8Component::CurDir => continue,
camino::Utf8Component::ParentDir => {
// `VendoredPath("")`, `VendoredPath("..")` and `VendoredPath("../..")`
// all resolve to the same path relative to the zip archive
// (see https://github.com/astral-sh/ruff/pull/11991#issuecomment-2185278014)
normalized_parts.pop();
}
unsupported => {
panic!("Unsupported component in a vendored path: {unsupported}")
}
}
}
normalized_parts.join("/")
}
let path_str = path.as_str();
if std::path::MAIN_SEPARATOR == '\\' && path_str.contains('\\') {
// Normalize paths so that they always use Unix path separators
NormalizedVendoredPath(Cow::Owned(normalize_unnormalized_path(path)))
} else if !path
.components()
.all(|component| matches!(component, camino::Utf8Component::Normal(_)))
{
// Remove non-`Normal` components
NormalizedVendoredPath(Cow::Owned(normalize_unnormalized_path(path)))
} else {
// Strip trailing slashes from the path
NormalizedVendoredPath(Cow::Borrowed(path_str.trim_end_matches('/')))
}
}
}
pub struct VendoredFileSystemBuilder {
writer: ZipWriter<io::Cursor<Vec<u8>>>,
compression_method: CompressionMethod,
}
impl VendoredFileSystemBuilder {
pub fn new(compression_method: CompressionMethod) -> Self {
let buffer = io::Cursor::new(Vec::new());
Self {
writer: ZipWriter::new(buffer),
compression_method,
}
}
pub fn add_file(
&mut self,
path: impl AsRef<VendoredPath>,
content: &str,
) -> std::io::Result<()> {
self.writer
.start_file(path.as_ref().as_str(), self.options())?;
self.writer.write_all(content.as_bytes())
}
pub fn add_directory(&mut self, path: impl AsRef<VendoredPath>) -> ZipResult<()> {
self.writer
.add_directory(path.as_ref().as_str(), self.options())
}
pub fn finish(mut self) -> Result<VendoredFileSystem> {
let buffer = self.writer.finish()?;
VendoredFileSystem::new(buffer.into_inner())
}
fn options(&self) -> FileOptions {
FileOptions::default()
.compression_method(self.compression_method)
.unix_permissions(0o644)
}
}
#[cfg(test)]
pub(crate) mod tests {
use insta::assert_snapshot;
use super::*;
const FUNCTOOLS_CONTENTS: &str = "def update_wrapper(): ...";
const ASYNCIO_TASKS_CONTENTS: &str = "class Task: ...";
fn mock_typeshed() -> VendoredFileSystem {
let mut builder = VendoredFileSystemBuilder::new(CompressionMethod::Stored);
builder.add_directory("stdlib/").unwrap();
builder
.add_file("stdlib/functools.pyi", FUNCTOOLS_CONTENTS)
.unwrap();
builder.add_directory("stdlib/asyncio/").unwrap();
builder
.add_file("stdlib/asyncio/tasks.pyi", ASYNCIO_TASKS_CONTENTS)
.unwrap();
builder.finish().unwrap()
}
#[test]
fn filesystem_debug_implementation() {
assert_snapshot!(
format!("{:?}", mock_typeshed()),
@"VendoredFileSystem(<4 paths>)"
);
}
#[test]
fn filesystem_debug_implementation_alternate() {
assert_snapshot!(format!("{:#?}", mock_typeshed()), @r#"
VendoredFileSystem {
inner_mutex_poisoned: false,
paths: [
"stdlib/",
"stdlib/asyncio/",
"stdlib/asyncio/tasks.pyi",
"stdlib/functools.pyi",
],
data_by_path: {
"stdlib/": ZipFileDebugInfo {
crc32_hash: 0,
compressed_size: 0,
uncompressed_size: 0,
kind: Directory,
},
"stdlib/asyncio/": ZipFileDebugInfo {
crc32_hash: 0,
compressed_size: 0,
uncompressed_size: 0,
kind: Directory,
},
"stdlib/asyncio/tasks.pyi": ZipFileDebugInfo {
crc32_hash: 2826547428,
compressed_size: 15,
uncompressed_size: 15,
kind: File,
},
"stdlib/functools.pyi": ZipFileDebugInfo {
crc32_hash: 1099005079,
compressed_size: 25,
uncompressed_size: 25,
kind: File,
},
},
}
"#);
}
fn test_directory(dirname: &str) {
let mock_typeshed = mock_typeshed();
let path = VendoredPath::new(dirname);
assert!(mock_typeshed.exists(path));
assert!(mock_typeshed.read_to_string(path).is_err());
let metadata = mock_typeshed.metadata(path).unwrap();
assert!(metadata.kind().is_directory());
}
#[test]
fn stdlib_dir_no_trailing_slash() {
test_directory("stdlib")
}
#[test]
fn stdlib_dir_trailing_slash() {
test_directory("stdlib/")
}
#[test]
fn asyncio_dir_no_trailing_slash() {
test_directory("stdlib/asyncio")
}
#[test]
fn asyncio_dir_trailing_slash() {
test_directory("stdlib/asyncio/")
}
#[test]
fn stdlib_dir_parent_components() {
test_directory("stdlib/asyncio/../../stdlib")
}
#[test]
fn asyncio_dir_odd_components() {
test_directory("./stdlib/asyncio/../asyncio/")
}
fn readdir_snapshot(fs: &VendoredFileSystem, path: &str) -> String {
let mut paths = fs
.read_directory(VendoredPath::new(path))
.into_iter()
.map(|entry| entry.path().to_string())
.collect::<Vec<String>>();
paths.sort();
paths.join("\n")
}
#[test]
fn read_directory_stdlib() {
let mock_typeshed = mock_typeshed();
assert_snapshot!(readdir_snapshot(&mock_typeshed, "stdlib"), @r"
vendored://stdlib/asyncio/
vendored://stdlib/functools.pyi
");
assert_snapshot!(readdir_snapshot(&mock_typeshed, "stdlib/"), @r"
vendored://stdlib/asyncio/
vendored://stdlib/functools.pyi
");
assert_snapshot!(readdir_snapshot(&mock_typeshed, "./stdlib"), @r"
vendored://stdlib/asyncio/
vendored://stdlib/functools.pyi
");
assert_snapshot!(readdir_snapshot(&mock_typeshed, "./stdlib/"), @r"
vendored://stdlib/asyncio/
vendored://stdlib/functools.pyi
");
}
#[test]
fn read_directory_asyncio() {
let mock_typeshed = mock_typeshed();
assert_snapshot!(
readdir_snapshot(&mock_typeshed, "stdlib/asyncio"),
@"vendored://stdlib/asyncio/tasks.pyi",
);
assert_snapshot!(
readdir_snapshot(&mock_typeshed, "./stdlib/asyncio"),
@"vendored://stdlib/asyncio/tasks.pyi",
);
assert_snapshot!(
readdir_snapshot(&mock_typeshed, "stdlib/asyncio/"),
@"vendored://stdlib/asyncio/tasks.pyi",
);
assert_snapshot!(
readdir_snapshot(&mock_typeshed, "./stdlib/asyncio/"),
@"vendored://stdlib/asyncio/tasks.pyi",
);
}
fn test_nonexistent_path(path: &str) {
let mock_typeshed = mock_typeshed();
let path = VendoredPath::new(path);
assert!(!mock_typeshed.exists(path));
assert!(mock_typeshed.metadata(path).is_err());
assert!(
mock_typeshed
.read_to_string(path)
.is_err_and(|err| err.to_string().contains("file not found"))
);
}
#[test]
fn simple_nonexistent_path() {
test_nonexistent_path("foo")
}
#[test]
fn nonexistent_path_with_extension() {
test_nonexistent_path("foo.pyi")
}
#[test]
fn nonexistent_path_with_trailing_slash() {
test_nonexistent_path("foo/")
}
#[test]
fn nonexistent_path_with_fancy_components() {
test_nonexistent_path("./foo/../../../foo")
}
fn test_file(mock_typeshed: &VendoredFileSystem, path: &VendoredPath) {
assert!(mock_typeshed.exists(path));
let metadata = mock_typeshed.metadata(path).unwrap();
assert!(metadata.kind().is_file());
}
#[test]
fn functools_file_contents() {
let mock_typeshed = mock_typeshed();
let path = VendoredPath::new("stdlib/functools.pyi");
test_file(&mock_typeshed, path);
let functools_stub = mock_typeshed.read_to_string(path).unwrap();
assert_eq!(functools_stub.as_str(), FUNCTOOLS_CONTENTS);
// Test that using the RefCell doesn't mutate
// the internal state of the underlying zip archive incorrectly:
let functools_stub_again = mock_typeshed.read_to_string(path).unwrap();
assert_eq!(functools_stub_again.as_str(), FUNCTOOLS_CONTENTS);
}
#[test]
fn functools_file_other_path() {
test_file(
&mock_typeshed(),
VendoredPath::new("stdlib/../stdlib/../stdlib/functools.pyi"),
)
}
#[test]
fn asyncio_file_contents() {
let mock_typeshed = mock_typeshed();
let path = VendoredPath::new("stdlib/asyncio/tasks.pyi");
test_file(&mock_typeshed, path);
let asyncio_stub = mock_typeshed.read_to_string(path).unwrap();
assert_eq!(asyncio_stub.as_str(), ASYNCIO_TASKS_CONTENTS);
}
#[test]
fn asyncio_file_other_path() {
test_file(
&mock_typeshed(),
VendoredPath::new("./stdlib/asyncio/../asyncio/tasks.pyi"),
)
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_db/src/display.rs | crates/ruff_db/src/display.rs | use std::fmt::{self, Display, Formatter};
pub trait FormatterJoinExtension<'b> {
fn join<'a>(&'a mut self, separator: &'static str) -> Join<'a, 'b>;
}
impl<'b> FormatterJoinExtension<'b> for Formatter<'b> {
fn join<'a>(&'a mut self, separator: &'static str) -> Join<'a, 'b> {
Join {
fmt: self,
separator,
result: fmt::Result::Ok(()),
seen_first: false,
}
}
}
pub struct Join<'a, 'b> {
fmt: &'a mut Formatter<'b>,
separator: &'static str,
result: fmt::Result,
seen_first: bool,
}
impl Join<'_, '_> {
pub fn entry(&mut self, item: &dyn Display) -> &mut Self {
if self.seen_first {
self.result = self
.result
.and_then(|()| self.fmt.write_str(self.separator));
} else {
self.seen_first = true;
}
self.result = self.result.and_then(|()| item.fmt(self.fmt));
self
}
pub fn entries<I, F>(&mut self, items: I) -> &mut Self
where
I: IntoIterator<Item = F>,
F: Display,
{
for item in items {
self.entry(&item);
}
self
}
pub fn finish(&mut self) -> fmt::Result {
self.result
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_db/src/parsed.rs | crates/ruff_db/src/parsed.rs | use std::fmt::Formatter;
use std::sync::Arc;
use arc_swap::ArcSwapOption;
use get_size2::GetSize;
use ruff_python_ast::{AnyRootNodeRef, ModModule, NodeIndex};
use ruff_python_parser::{ParseOptions, Parsed, parse_unchecked};
use crate::Db;
use crate::files::File;
use crate::source::source_text;
/// Returns the parsed AST of `file`, including its token stream.
///
/// The query uses Ruff's error-resilient parser. That means that the parser always succeeds to produce an
/// AST even if the file contains syntax errors. The parse errors
/// are then accessible through [`Parsed::errors`].
///
/// The query is only cached when the [`source_text()`] hasn't changed. This is because
/// comparing two ASTs is a non-trivial operation and every offset change is directly
/// reflected in the changed AST offsets.
/// The other reason is that Ruff's AST doesn't implement `Eq` which Salsa requires
/// for determining if a query result is unchanged.
///
/// The LRU capacity of 200 was picked without any empirical evidence that it's optimal,
/// instead it's a wild guess that it should be unlikely that incremental changes involve
/// more than 200 modules. Parsed ASTs within the same revision are never evicted by Salsa.
#[salsa::tracked(returns(ref), no_eq, heap_size=ruff_memory_usage::heap_size, lru=200)]
pub fn parsed_module(db: &dyn Db, file: File) -> ParsedModule {
let _span = tracing::trace_span!("parsed_module", ?file).entered();
let parsed = parsed_module_impl(db, file);
ParsedModule::new(file, parsed)
}
pub fn parsed_module_impl(db: &dyn Db, file: File) -> Parsed<ModModule> {
let source = source_text(db, file);
let ty = file.source_type(db);
let target_version = db.python_version();
let options = ParseOptions::from(ty).with_target_version(target_version);
parse_unchecked(&source, options)
.try_into_module()
.expect("PySourceType always parses into a module")
}
/// A wrapper around a parsed module.
///
/// This type manages instances of the module AST. A particular instance of the AST
/// is represented with the [`ParsedModuleRef`] type.
#[derive(Clone, get_size2::GetSize)]
pub struct ParsedModule {
file: File,
#[get_size(size_fn = arc_swap_size)]
inner: Arc<ArcSwapOption<indexed::IndexedModule>>,
}
impl ParsedModule {
pub fn new(file: File, parsed: Parsed<ModModule>) -> Self {
Self {
file,
inner: Arc::new(ArcSwapOption::new(Some(indexed::IndexedModule::new(
parsed,
)))),
}
}
/// Loads a reference to the parsed module.
///
/// Note that holding on to the reference will prevent garbage collection
/// of the AST. This method will reparse the module if it has been collected.
pub fn load(&self, db: &dyn Db) -> ParsedModuleRef {
let parsed = match self.inner.load_full() {
Some(parsed) => parsed,
None => {
// Re-parse the file.
let parsed = indexed::IndexedModule::new(parsed_module_impl(db, self.file));
tracing::debug!(
"File `{}` was reparsed after being collected in the current Salsa revision",
self.file.path(db)
);
self.inner.store(Some(parsed.clone()));
parsed
}
};
ParsedModuleRef {
module: self.clone(),
indexed: parsed,
}
}
/// Clear the parsed module, dropping the AST once all references to it are dropped.
pub fn clear(&self) {
self.inner.store(None);
}
/// Returns the file to which this module belongs.
pub fn file(&self) -> File {
self.file
}
}
impl std::fmt::Debug for ParsedModule {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
f.debug_tuple("ParsedModule").field(&self.inner).finish()
}
}
impl PartialEq for ParsedModule {
fn eq(&self, other: &Self) -> bool {
Arc::ptr_eq(&self.inner, &other.inner)
}
}
impl Eq for ParsedModule {}
/// Cheap cloneable wrapper around an instance of a module AST.
#[derive(Clone)]
pub struct ParsedModuleRef {
module: ParsedModule,
indexed: Arc<indexed::IndexedModule>,
}
impl ParsedModuleRef {
/// Returns a reference to the [`ParsedModule`] that this instance was loaded from.
pub fn module(&self) -> &ParsedModule {
&self.module
}
/// Returns a reference to the AST node at the given index.
pub fn get_by_index<'ast>(&'ast self, index: NodeIndex) -> AnyRootNodeRef<'ast> {
self.indexed.get_by_index(index)
}
}
impl std::ops::Deref for ParsedModuleRef {
type Target = Parsed<ModModule>;
fn deref(&self) -> &Self::Target {
&self.indexed.parsed
}
}
/// Returns the heap-size of the currently stored `T` in the `ArcSwap`.
fn arc_swap_size<T>(arc_swap: &Arc<ArcSwapOption<T>>) -> usize
where
T: GetSize,
{
if let Some(value) = &*arc_swap.load() {
T::get_heap_size(value)
} else {
0
}
}
mod indexed {
use std::sync::Arc;
use ruff_python_ast::visitor::source_order::*;
use ruff_python_ast::*;
use ruff_python_parser::Parsed;
/// A wrapper around the AST that allows access to AST nodes by index.
#[derive(Debug, get_size2::GetSize)]
pub struct IndexedModule {
index: Box<[AnyRootNodeRef<'static>]>,
pub parsed: Parsed<ModModule>,
}
impl IndexedModule {
/// Create a new [`IndexedModule`] from the given AST.
#[allow(clippy::unnecessary_cast)]
pub fn new(parsed: Parsed<ModModule>) -> Arc<Self> {
let mut visitor = Visitor {
nodes: Vec::new(),
index: 0,
};
let mut inner = Arc::new(IndexedModule {
parsed,
index: Box::new([]),
});
AnyNodeRef::from(inner.parsed.syntax()).visit_source_order(&mut visitor);
let index: Box<[AnyRootNodeRef<'_>]> = visitor.nodes.into_boxed_slice();
// SAFETY: We cast from `Box<[AnyRootNodeRef<'_>]>` to `Box<[AnyRootNodeRef<'static>]>`,
// faking the 'static lifetime to create the self-referential struct. The node references
// are into the `Arc<Parsed<ModModule>>`, so are valid for as long as the `IndexedModule`
// is alive. We make sure to restore the correct lifetime in `get_by_index`.
//
// Note that we can never move the data within the `Arc` after this point.
Arc::get_mut(&mut inner).unwrap().index =
unsafe { Box::from_raw(Box::into_raw(index) as *mut [AnyRootNodeRef<'static>]) };
inner
}
/// Returns the node at the given index.
pub fn get_by_index<'ast>(&'ast self, index: NodeIndex) -> AnyRootNodeRef<'ast> {
let index = index
.as_u32()
.expect("attempted to access uninitialized `NodeIndex`");
// Note that this method restores the correct lifetime: the nodes are valid for as
// long as the reference to `IndexedModule` is alive.
self.index[index as usize]
}
}
/// A visitor that collects nodes in source order.
pub struct Visitor<'a> {
pub index: u32,
pub nodes: Vec<AnyRootNodeRef<'a>>,
}
impl<'a> Visitor<'a> {
fn visit_node<T>(&mut self, node: &'a T)
where
T: HasNodeIndex + std::fmt::Debug,
AnyRootNodeRef<'a>: From<&'a T>,
{
node.node_index().set(NodeIndex::from(self.index));
self.nodes.push(AnyRootNodeRef::from(node));
self.index += 1;
}
}
impl<'a> SourceOrderVisitor<'a> for Visitor<'a> {
#[inline]
fn visit_mod(&mut self, module: &'a Mod) {
self.visit_node(module);
walk_module(self, module);
}
#[inline]
fn visit_stmt(&mut self, stmt: &'a Stmt) {
self.visit_node(stmt);
walk_stmt(self, stmt);
}
#[inline]
fn visit_annotation(&mut self, expr: &'a Expr) {
self.visit_node(expr);
walk_annotation(self, expr);
}
#[inline]
fn visit_expr(&mut self, expr: &'a Expr) {
self.visit_node(expr);
walk_expr(self, expr);
}
#[inline]
fn visit_decorator(&mut self, decorator: &'a Decorator) {
self.visit_node(decorator);
walk_decorator(self, decorator);
}
#[inline]
fn visit_comprehension(&mut self, comprehension: &'a Comprehension) {
self.visit_node(comprehension);
walk_comprehension(self, comprehension);
}
#[inline]
fn visit_except_handler(&mut self, except_handler: &'a ExceptHandler) {
self.visit_node(except_handler);
walk_except_handler(self, except_handler);
}
#[inline]
fn visit_arguments(&mut self, arguments: &'a Arguments) {
self.visit_node(arguments);
walk_arguments(self, arguments);
}
#[inline]
fn visit_parameters(&mut self, parameters: &'a Parameters) {
self.visit_node(parameters);
walk_parameters(self, parameters);
}
#[inline]
fn visit_parameter(&mut self, arg: &'a Parameter) {
self.visit_node(arg);
walk_parameter(self, arg);
}
fn visit_parameter_with_default(
&mut self,
parameter_with_default: &'a ParameterWithDefault,
) {
self.visit_node(parameter_with_default);
walk_parameter_with_default(self, parameter_with_default);
}
#[inline]
fn visit_keyword(&mut self, keyword: &'a Keyword) {
self.visit_node(keyword);
walk_keyword(self, keyword);
}
#[inline]
fn visit_alias(&mut self, alias: &'a Alias) {
self.visit_node(alias);
walk_alias(self, alias);
}
#[inline]
fn visit_with_item(&mut self, with_item: &'a WithItem) {
self.visit_node(with_item);
walk_with_item(self, with_item);
}
#[inline]
fn visit_type_params(&mut self, type_params: &'a TypeParams) {
self.visit_node(type_params);
walk_type_params(self, type_params);
}
#[inline]
fn visit_type_param(&mut self, type_param: &'a TypeParam) {
self.visit_node(type_param);
walk_type_param(self, type_param);
}
#[inline]
fn visit_match_case(&mut self, match_case: &'a MatchCase) {
self.visit_node(match_case);
walk_match_case(self, match_case);
}
#[inline]
fn visit_pattern(&mut self, pattern: &'a Pattern) {
self.visit_node(pattern);
walk_pattern(self, pattern);
}
#[inline]
fn visit_pattern_arguments(&mut self, pattern_arguments: &'a PatternArguments) {
self.visit_node(pattern_arguments);
walk_pattern_arguments(self, pattern_arguments);
}
#[inline]
fn visit_pattern_keyword(&mut self, pattern_keyword: &'a PatternKeyword) {
self.visit_node(pattern_keyword);
walk_pattern_keyword(self, pattern_keyword);
}
#[inline]
fn visit_elif_else_clause(&mut self, elif_else_clause: &'a ElifElseClause) {
self.visit_node(elif_else_clause);
walk_elif_else_clause(self, elif_else_clause);
}
#[inline]
fn visit_f_string(&mut self, f_string: &'a FString) {
self.visit_node(f_string);
walk_f_string(self, f_string);
}
#[inline]
fn visit_interpolated_string_element(
&mut self,
interpolated_string_element: &'a InterpolatedStringElement,
) {
self.visit_node(interpolated_string_element);
walk_interpolated_string_element(self, interpolated_string_element);
}
#[inline]
fn visit_t_string(&mut self, t_string: &'a TString) {
self.visit_node(t_string);
walk_t_string(self, t_string);
}
#[inline]
fn visit_string_literal(&mut self, string_literal: &'a StringLiteral) {
self.visit_node(string_literal);
walk_string_literal(self, string_literal);
}
#[inline]
fn visit_bytes_literal(&mut self, bytes_literal: &'a BytesLiteral) {
self.visit_node(bytes_literal);
walk_bytes_literal(self, bytes_literal);
}
#[inline]
fn visit_identifier(&mut self, identifier: &'a Identifier) {
self.visit_node(identifier);
walk_identifier(self, identifier);
}
}
}
#[cfg(test)]
mod tests {
use crate::Db;
use crate::files::{system_path_to_file, vendored_path_to_file};
use crate::parsed::parsed_module;
use crate::system::{
DbWithTestSystem, DbWithWritableSystem as _, SystemPath, SystemVirtualPath,
};
use crate::tests::TestDb;
use crate::vendored::{VendoredFileSystemBuilder, VendoredPath};
use zip::CompressionMethod;
#[test]
fn python_file() -> crate::system::Result<()> {
let mut db = TestDb::new();
let path = "test.py";
db.write_file(path, "x = 10")?;
let file = system_path_to_file(&db, path).unwrap();
let parsed = parsed_module(&db, file).load(&db);
assert!(parsed.has_valid_syntax());
Ok(())
}
#[test]
fn python_ipynb_file() -> crate::system::Result<()> {
let mut db = TestDb::new();
let path = SystemPath::new("test.ipynb");
db.write_file(path, "%timeit a = b")?;
let file = system_path_to_file(&db, path).unwrap();
let parsed = parsed_module(&db, file).load(&db);
assert!(parsed.has_valid_syntax());
Ok(())
}
#[test]
fn virtual_python_file() -> crate::system::Result<()> {
let mut db = TestDb::new();
let path = SystemVirtualPath::new("untitled:Untitled-1");
db.write_virtual_file(path, "x = 10");
let virtual_file = db.files().virtual_file(&db, path);
let parsed = parsed_module(&db, virtual_file.file()).load(&db);
assert!(parsed.has_valid_syntax());
Ok(())
}
#[test]
fn virtual_ipynb_file() -> crate::system::Result<()> {
let mut db = TestDb::new();
let path = SystemVirtualPath::new("untitled:Untitled-1.ipynb");
db.write_virtual_file(path, "%timeit a = b");
let virtual_file = db.files().virtual_file(&db, path);
let parsed = parsed_module(&db, virtual_file.file()).load(&db);
assert!(parsed.has_valid_syntax());
Ok(())
}
#[test]
fn vendored_file() {
let mut db = TestDb::new();
let mut vendored_builder = VendoredFileSystemBuilder::new(CompressionMethod::Stored);
vendored_builder
.add_file(
"path.pyi",
r#"
import sys
if sys.platform == "win32":
from ntpath import *
from ntpath import __all__ as __all__
else:
from posixpath import *
from posixpath import __all__ as __all__"#,
)
.unwrap();
let vendored = vendored_builder.finish().unwrap();
db.with_vendored(vendored);
let file = vendored_path_to_file(&db, VendoredPath::new("path.pyi")).unwrap();
let parsed = parsed_module(&db, file).load(&db);
assert!(parsed.has_valid_syntax());
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_db/src/system.rs | crates/ruff_db/src/system.rs | pub use glob::PatternError;
pub use memory_fs::MemoryFileSystem;
#[cfg(all(feature = "testing", feature = "os"))]
pub use os::testing::UserConfigDirectoryOverrideGuard;
#[cfg(feature = "os")]
pub use os::OsSystem;
use filetime::FileTime;
use ruff_notebook::{Notebook, NotebookError};
use ruff_python_ast::PySourceType;
use std::error::Error;
use std::fmt::{Debug, Formatter};
use std::path::{Path, PathBuf};
use std::{fmt, io};
pub use test::{DbWithTestSystem, DbWithWritableSystem, InMemorySystem, TestSystem};
use walk_directory::WalkDirectoryBuilder;
pub use self::path::{
DeduplicatedNestedPathsIter, SystemPath, SystemPathBuf, SystemVirtualPath,
SystemVirtualPathBuf, deduplicate_nested_paths,
};
use crate::file_revision::FileRevision;
mod memory_fs;
#[cfg(feature = "os")]
mod os;
mod path;
mod test;
pub mod walk_directory;
pub type Result<T> = std::io::Result<T>;
/// The system on which Ruff runs.
///
/// Ruff supports running on the CLI, in a language server, and in a browser (WASM). Each of these
/// host-systems differ in what system operations they support and how they interact with the file system:
/// * Language server:
/// * Reading a file's content should take into account that it might have unsaved changes because it's open in the editor.
/// * Use structured representations for notebooks, making deserializing a notebook from a string unnecessary.
/// * Use their own file watching infrastructure.
/// * WASM (Browser):
/// * There are ways to emulate a file system in WASM but a native memory-filesystem is more efficient.
/// * Doesn't support a current working directory
/// * File watching isn't supported.
///
/// Abstracting the system also enables tests to use a more efficient in-memory file system.
pub trait System: Debug + Sync + Send {
/// Reads the metadata of the file or directory at `path`.
///
/// This function will traverse symbolic links to query information about the destination file.
fn path_metadata(&self, path: &SystemPath) -> Result<Metadata>;
/// Returns the canonical, absolute form of a path with all intermediate components normalized
/// and symbolic links resolved.
///
/// # Errors
/// This function will return an error in the following situations, but is not limited to just these cases:
/// * `path` does not exist.
/// * A non-final component in `path` is not a directory.
/// * the symlink target path is not valid Unicode.
///
/// ## Windows long-paths
/// Unlike `std::fs::canonicalize`, this function does remove UNC prefixes if possible.
/// See [dunce::canonicalize] for more information.
fn canonicalize_path(&self, path: &SystemPath) -> Result<SystemPathBuf>;
/// Returns the source type for `path` if known or `None`.
///
/// The default is to always return `None`, assuming the system
/// has no additional information and that the caller should
/// rely on the file extension instead.
///
/// This is primarily used for the LSP integration to respect
/// the chosen language (or the fact that it is a notebook) in
/// the editor.
fn source_type(&self, path: &SystemPath) -> Option<PySourceType> {
let _ = path;
None
}
/// Returns the source type for `path` if known or `None`.
///
/// The default is to always return `None`, assuming the system
/// has no additional information and that the caller should
/// rely on the file extension instead.
///
/// This is primarily used for the LSP integration to respect
/// the chosen language (or the fact that it is a notebook) in
/// the editor.
fn virtual_path_source_type(&self, path: &SystemVirtualPath) -> Option<PySourceType> {
let _ = path;
None
}
/// Reads the content of the file at `path` into a [`String`].
fn read_to_string(&self, path: &SystemPath) -> Result<String>;
/// Reads the content of the file at `path` as a Notebook.
///
/// This method optimizes for the case where the system holds a structured representation of a [`Notebook`],
/// allowing to skip the notebook deserialization. Systems that don't use a structured
/// representation fall-back to deserializing the notebook from a string.
fn read_to_notebook(&self, path: &SystemPath) -> std::result::Result<Notebook, NotebookError>;
/// Reads the content of the virtual file at `path` into a [`String`].
fn read_virtual_path_to_string(&self, path: &SystemVirtualPath) -> Result<String>;
/// Reads the content of the virtual file at `path` as a [`Notebook`].
fn read_virtual_path_to_notebook(
&self,
path: &SystemVirtualPath,
) -> std::result::Result<Notebook, NotebookError>;
/// Returns `true` if `path` exists.
fn path_exists(&self, path: &SystemPath) -> bool {
self.path_metadata(path).is_ok()
}
/// Returns `true` if `path` exists on disk using the exact casing as specified in `path` for the parts after `prefix`.
///
/// This is the same as [`Self::path_exists`] on case-sensitive systems.
///
/// ## The use of prefix
///
/// Prefix is only intended as an optimization for systems that can't efficiently check
/// if an entire path exists with the exact casing as specified in `path`. However,
/// implementations are allowed to check the casing of the entire path if they can do so efficiently.
fn path_exists_case_sensitive(&self, path: &SystemPath, prefix: &SystemPath) -> bool;
/// Returns the [`CaseSensitivity`] of the system's file system.
fn case_sensitivity(&self) -> CaseSensitivity;
/// Returns `true` if `path` exists and is a directory.
fn is_directory(&self, path: &SystemPath) -> bool {
self.path_metadata(path)
.is_ok_and(|metadata| metadata.file_type.is_directory())
}
/// Returns `true` if `path` exists and is a file.
fn is_file(&self, path: &SystemPath) -> bool {
self.path_metadata(path)
.is_ok_and(|metadata| metadata.file_type.is_file())
}
/// Returns the current working directory
fn current_directory(&self) -> &SystemPath;
/// Returns the directory path where user configurations are stored.
///
/// Returns `None` if no such convention exists for the system.
fn user_config_directory(&self) -> Option<SystemPathBuf>;
/// Returns the directory path where cached files are stored.
///
/// Returns `None` if no such convention exists for the system.
fn cache_dir(&self) -> Option<SystemPathBuf>;
/// Iterate over the contents of the directory at `path`.
///
/// The returned iterator must have the following properties:
/// - It only iterates over the top level of the directory,
/// i.e., it does not recurse into subdirectories.
/// - It skips the current and parent directories (`.` and `..`
/// respectively).
/// - The iterator yields `std::io::Result<DirEntry>` instances.
/// For each instance, an `Err` variant may signify that the path
/// of the entry was not valid UTF8, in which case it should be an
/// [`std::io::Error`] with the ErrorKind set to
/// [`std::io::ErrorKind::InvalidData`] and the payload set to a
/// [`camino::FromPathBufError`]. It may also indicate that
/// "some sort of intermittent IO error occurred during iteration"
/// (language taken from the [`std::fs::read_dir`] documentation).
///
/// # Errors
/// Returns an error:
/// - if `path` does not exist in the system,
/// - if `path` does not point to a directory,
/// - if the process does not have sufficient permissions to
/// view the contents of the directory at `path`
/// - May also return an error in some other situations as well.
fn read_directory<'a>(
&'a self,
path: &SystemPath,
) -> Result<Box<dyn Iterator<Item = Result<DirectoryEntry>> + 'a>>;
/// Recursively walks the content of `path`.
///
/// It is allowed to pass a `path` that points to a file. In this case, the walker
/// yields a single entry for that file.
fn walk_directory(&self, path: &SystemPath) -> WalkDirectoryBuilder;
/// Return an iterator that produces all the `Path`s that match the given
/// pattern using default match options, which may be absolute or relative to
/// the current working directory.
///
/// This may return an error if the pattern is invalid.
fn glob(
&self,
pattern: &str,
) -> std::result::Result<
Box<dyn Iterator<Item = std::result::Result<SystemPathBuf, GlobError>> + '_>,
PatternError,
>;
/// Fetches the environment variable `key` from the current process.
///
/// # Errors
///
/// Returns [`std::env::VarError::NotPresent`] if:
/// - The variable is not set.
/// - The variable's name contains an equal sign or NUL (`'='` or `'\0'`).
///
/// Returns [`std::env::VarError::NotUnicode`] if the variable's value is not valid
/// Unicode.
fn env_var(&self, name: &str) -> std::result::Result<String, std::env::VarError> {
let _ = name;
Err(std::env::VarError::NotPresent)
}
/// Returns a handle to a [`WritableSystem`] if this system is writeable.
fn as_writable(&self) -> Option<&dyn WritableSystem>;
fn as_any(&self) -> &dyn std::any::Any;
fn as_any_mut(&mut self) -> &mut dyn std::any::Any;
fn dyn_clone(&self) -> Box<dyn System>;
}
#[derive(Debug, Default, Copy, Clone, Eq, PartialEq)]
pub enum CaseSensitivity {
/// The case sensitivity of the file system is unknown.
///
/// The file system is either case-sensitive or case-insensitive. A caller
/// should not assume either case.
#[default]
Unknown,
/// The file system is case-sensitive.
CaseSensitive,
/// The file system is case-insensitive.
CaseInsensitive,
}
impl CaseSensitivity {
/// Returns `true` if the file system is known to be case-sensitive.
pub const fn is_case_sensitive(self) -> bool {
matches!(self, Self::CaseSensitive)
}
}
impl fmt::Display for CaseSensitivity {
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
match self {
CaseSensitivity::Unknown => f.write_str("unknown"),
CaseSensitivity::CaseSensitive => f.write_str("case-sensitive"),
CaseSensitivity::CaseInsensitive => f.write_str("case-insensitive"),
}
}
}
/// System trait for non-readonly systems.
pub trait WritableSystem: System {
/// Creates a file at the given path.
///
/// Returns an error if the file already exists.
fn create_new_file(&self, path: &SystemPath) -> Result<()>;
/// Writes the given content to the file at the given path.
fn write_file(&self, path: &SystemPath, content: &str) -> Result<()>;
/// Creates a directory at `path` as well as any intermediate directories.
fn create_directory_all(&self, path: &SystemPath) -> Result<()>;
/// Reads the provided file from the system cache, or creates the file if necessary.
///
/// Returns `Ok(None)` if the system does not expose a suitable cache directory.
fn get_or_cache(
&self,
path: &SystemPath,
read_contents: &dyn Fn() -> Result<String>,
) -> Result<Option<SystemPathBuf>> {
let Some(cache_dir) = self.cache_dir() else {
return Ok(None);
};
let cache_path = cache_dir.join(path);
// The file has already been cached.
if self.is_file(&cache_path) {
return Ok(Some(cache_path));
}
// Read the file contents.
let contents = read_contents()?;
// Create the parent directory.
self.create_directory_all(cache_path.parent().unwrap())?;
// Create and write to the file on the system.
//
// Note that `create_new_file` will fail if the file has already been created. This
// ensures that only one thread/process ever attempts to write to it to avoid corrupting
// the cache.
self.create_new_file(&cache_path)?;
self.write_file(&cache_path, &contents)?;
Ok(Some(cache_path))
}
}
#[derive(Clone, Debug, Eq, PartialEq)]
pub struct Metadata {
revision: FileRevision,
permissions: Option<u32>,
file_type: FileType,
}
impl Metadata {
pub fn new(revision: FileRevision, permissions: Option<u32>, file_type: FileType) -> Self {
Self {
revision,
permissions,
file_type,
}
}
pub fn revision(&self) -> FileRevision {
self.revision
}
pub fn permissions(&self) -> Option<u32> {
self.permissions
}
pub fn file_type(&self) -> FileType {
self.file_type
}
}
#[derive(Copy, Clone, Eq, PartialEq, Debug, Hash)]
pub enum FileType {
File,
Directory,
Symlink,
}
impl FileType {
pub const fn is_file(self) -> bool {
matches!(self, FileType::File)
}
pub const fn is_directory(self) -> bool {
matches!(self, FileType::Directory)
}
pub const fn is_symlink(self) -> bool {
matches!(self, FileType::Symlink)
}
}
#[derive(Debug, PartialEq, Eq)]
pub struct DirectoryEntry {
path: SystemPathBuf,
file_type: FileType,
}
impl DirectoryEntry {
pub fn new(path: SystemPathBuf, file_type: FileType) -> Self {
Self { path, file_type }
}
pub fn into_path(self) -> SystemPathBuf {
self.path
}
pub fn path(&self) -> &SystemPath {
&self.path
}
pub fn file_type(&self) -> FileType {
self.file_type
}
}
/// A glob iteration error.
///
/// This is typically returned when a particular path cannot be read
/// to determine if its contents match the glob pattern. This is possible
/// if the program lacks the appropriate permissions, for example.
#[derive(Debug)]
pub struct GlobError {
path: PathBuf,
error: GlobErrorKind,
}
impl GlobError {
/// The Path that the error corresponds to.
pub fn path(&self) -> &Path {
&self.path
}
pub fn kind(&self) -> &GlobErrorKind {
&self.error
}
}
impl Error for GlobError {}
impl fmt::Display for GlobError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match &self.error {
GlobErrorKind::IOError(error) => {
write!(
f,
"attempting to read `{}` resulted in an error: {error}",
self.path.display(),
)
}
GlobErrorKind::NonUtf8Path => {
write!(f, "`{}` is not a valid UTF-8 path", self.path.display(),)
}
}
}
}
impl From<glob::GlobError> for GlobError {
fn from(value: glob::GlobError) -> Self {
Self {
path: value.path().to_path_buf(),
error: GlobErrorKind::IOError(value.into_error()),
}
}
}
#[derive(Debug)]
pub enum GlobErrorKind {
IOError(io::Error),
NonUtf8Path,
}
#[cfg(not(target_arch = "wasm32"))]
pub fn file_time_now() -> FileTime {
FileTime::now()
}
#[cfg(target_arch = "wasm32")]
pub fn file_time_now() -> FileTime {
// Copied from FileTime::from_system_time()
let time = web_time::SystemTime::now();
time.duration_since(web_time::UNIX_EPOCH)
.map(|d| FileTime::from_unix_time(d.as_secs() as i64, d.subsec_nanos()))
.unwrap_or_else(|e| {
let until_epoch = e.duration();
let (sec_offset, nanos) = if until_epoch.subsec_nanos() == 0 {
(0, 0)
} else {
(-1, 1_000_000_000 - until_epoch.subsec_nanos())
};
FileTime::from_unix_time(-(until_epoch.as_secs() as i64) + sec_offset, nanos)
})
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_db/src/testing.rs | crates/ruff_db/src/testing.rs | //! Test helpers for working with Salsa databases
use tracing_subscriber::EnvFilter;
use tracing_subscriber::layer::SubscriberExt;
pub fn assert_function_query_was_not_run<Db, Q, QDb, I, R>(
db: &Db,
query: Q,
input: I,
events: &[salsa::Event],
) where
Db: salsa::Database,
Q: Fn(QDb, I) -> R,
I: salsa::plumbing::AsId + std::fmt::Debug + Copy,
{
let id = input.as_id();
let (query_name, will_execute_event) = find_will_execute_event(db, query, input, events);
db.attach(|_| {
if let Some(will_execute_event) = will_execute_event {
panic!("Expected query {query_name}({id:?}) not to have run but it did: {will_execute_event:?}\n\n{events:#?}");
}
});
}
pub fn assert_const_function_query_was_not_run<Db, Q, QDb, R>(
db: &Db,
query: Q,
events: &[salsa::Event],
) where
Db: salsa::Database,
Q: Fn(QDb) -> R,
{
// Salsa now interns singleton ingredients. But we know that it is a singleton, so we can just search for
// any event of that ingredient.
let query_name = query_name(&query);
let event = events.iter().find(|event| {
if let salsa::EventKind::WillExecute { database_key } = event.kind {
db.ingredient_debug_name(database_key.ingredient_index()) == query_name
} else {
false
}
});
db.attach(|_| {
if let Some(will_execute_event) = event {
panic!(
"Expected query {query_name}() not to have run but it did: {will_execute_event:?}\n\n{events:#?}"
);
}
});
}
/// Assert that the Salsa query described by the generic parameter `C`
/// was executed at least once with the input `input`
/// in the history span represented by `events`.
pub fn assert_function_query_was_run<Db, Q, QDb, I, R>(
db: &Db,
query: Q,
input: I,
events: &[salsa::Event],
) where
Db: salsa::Database,
Q: Fn(QDb, I) -> R,
I: salsa::plumbing::AsId + std::fmt::Debug + Copy,
{
let id = input.as_id();
let (query_name, will_execute_event) = find_will_execute_event(db, query, input, events);
db.attach(|_| {
assert!(
will_execute_event.is_some(),
"Expected query {query_name}({id:?}) to have run but it did not:\n{events:#?}"
);
});
}
pub fn find_will_execute_event<'a, Q, I>(
db: &dyn salsa::Database,
query: Q,
input: I,
events: &'a [salsa::Event],
) -> (&'static str, Option<&'a salsa::Event>)
where
I: salsa::plumbing::AsId,
{
let query_name = query_name(&query);
let event = events.iter().find(|event| {
if let salsa::EventKind::WillExecute { database_key } = event.kind {
db.ingredient_debug_name(database_key.ingredient_index()) == query_name
&& database_key.key_index() == input.as_id()
} else {
false
}
});
(query_name, event)
}
fn query_name<Q>(_query: &Q) -> &'static str {
let full_qualified_query_name = std::any::type_name::<Q>();
full_qualified_query_name
.rsplit_once("::")
.map(|(_, name)| name)
.unwrap_or(full_qualified_query_name)
}
/// Sets up logging for the current thread. It captures all `ty` and `ruff` events.
///
/// Useful for capturing the tracing output in a failing test.
///
/// # Examples
/// ```
/// use ruff_db::testing::setup_logging;
/// let _logging = setup_logging();
///
/// tracing::info!("This message will be printed to stderr");
/// ```
pub fn setup_logging() -> LoggingGuard {
LoggingBuilder::new().build()
}
/// Sets up logging for the current thread and uses the passed filter to filter the shown events.
/// Useful for capturing the tracing output in a failing test.
///
/// # Examples
/// ```
/// use ruff_db::testing::setup_logging_with_filter;
/// let _logging = setup_logging_with_filter("ty_module_resolver::resolver");
/// ```
///
/// # Filter
/// See [`tracing_subscriber::EnvFilter`] for the `filter`'s syntax.
///
pub fn setup_logging_with_filter(filter: &str) -> Option<LoggingGuard> {
LoggingBuilder::with_filter(filter).map(LoggingBuilder::build)
}
#[derive(Debug)]
pub struct LoggingBuilder {
filter: EnvFilter,
}
impl LoggingBuilder {
pub fn new() -> Self {
Self {
filter: EnvFilter::default()
.add_directive("ty=trace".parse().expect("Hardcoded directive to be valid"))
.add_directive(
"ruff=trace"
.parse()
.expect("Hardcoded directive to be valid"),
),
}
}
pub fn with_filter(filter: &str) -> Option<Self> {
let filter = EnvFilter::builder().parse(filter).ok()?;
Some(Self { filter })
}
pub fn build(self) -> LoggingGuard {
let registry = tracing_subscriber::registry().with(self.filter);
let subscriber = registry.with(
tracing_subscriber::fmt::layer()
.compact()
.with_writer(std::io::stderr)
.with_timer(tracing_subscriber::fmt::time()),
);
let guard = tracing::subscriber::set_default(subscriber);
LoggingGuard { _guard: guard }
}
}
impl Default for LoggingBuilder {
fn default() -> Self {
Self::new()
}
}
#[must_use = "Dropping the guard unregisters the tracing subscriber."]
pub struct LoggingGuard {
_guard: tracing::subscriber::DefaultGuard,
}
#[test]
fn query_was_not_run() {
use crate::tests::TestDb;
use salsa::prelude::*;
#[salsa::input(debug)]
struct Input {
text: String,
}
#[salsa::tracked]
fn len(db: &dyn salsa::Database, input: Input) -> usize {
input.text(db).len()
}
let mut db = TestDb::new();
let hello = Input::new(&db, "Hello, world!".to_string());
let goodbye = Input::new(&db, "Goodbye!".to_string());
assert_eq!(len(&db, hello), 13);
assert_eq!(len(&db, goodbye), 8);
// Change the input of one query
goodbye.set_text(&mut db).to("Bye".to_string());
db.clear_salsa_events();
assert_eq!(len(&db, goodbye), 3);
let events = db.take_salsa_events();
assert_function_query_was_run(&db, len, goodbye, &events);
assert_function_query_was_not_run(&db, len, hello, &events);
}
#[test]
#[should_panic(expected = "Expected query len(Id(0)) not to have run but it did:")]
fn query_was_not_run_fails_if_query_was_run() {
use crate::tests::TestDb;
use salsa::prelude::*;
#[salsa::input(debug)]
struct Input {
text: String,
}
#[salsa::tracked]
fn len(db: &dyn salsa::Database, input: Input) -> usize {
input.text(db).len()
}
let mut db = TestDb::new();
let hello = Input::new(&db, "Hello, world!".to_string());
assert_eq!(len(&db, hello), 13);
// Change the input
hello.set_text(&mut db).to("Hy".to_string());
db.clear_salsa_events();
assert_eq!(len(&db, hello), 2);
let events = db.take_salsa_events();
assert_function_query_was_not_run(&db, len, hello, &events);
}
#[test]
#[should_panic(expected = "Expected query len() not to have run but it did:")]
fn const_query_was_not_run_fails_if_query_was_run() {
use crate::tests::TestDb;
use salsa::prelude::*;
#[salsa::input]
struct Input {
text: String,
}
#[salsa::tracked]
fn len(db: &dyn salsa::Database) -> usize {
db.report_untracked_read();
5
}
let mut db = TestDb::new();
let hello = Input::new(&db, "Hello, world!".to_string());
assert_eq!(len(&db), 5);
// Create a new revision
db.clear_salsa_events();
hello.set_text(&mut db).to("Hy".to_string());
assert_eq!(len(&db), 5);
let events = db.take_salsa_events();
assert_const_function_query_was_not_run(&db, len, &events);
}
#[test]
#[should_panic(expected = "Expected query len(Id(0)) to have run but it did not:")]
fn query_was_run_fails_if_query_was_not_run() {
use crate::tests::TestDb;
use salsa::prelude::*;
#[salsa::input(debug)]
struct Input {
text: String,
}
#[salsa::tracked]
fn len(db: &dyn salsa::Database, input: Input) -> usize {
input.text(db).len()
}
let mut db = TestDb::new();
let hello = Input::new(&db, "Hello, world!".to_string());
let goodbye = Input::new(&db, "Goodbye!".to_string());
assert_eq!(len(&db, hello), 13);
assert_eq!(len(&db, goodbye), 8);
// Change the input of one query
goodbye.set_text(&mut db).to("Bye".to_string());
db.clear_salsa_events();
assert_eq!(len(&db, goodbye), 3);
let events = db.take_salsa_events();
assert_function_query_was_run(&db, len, hello, &events);
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_db/src/files.rs | crates/ruff_db/src/files.rs | use std::fmt;
use std::sync::Arc;
use dashmap::mapref::entry::Entry;
pub use file_root::{FileRoot, FileRootKind};
pub use path::FilePath;
use ruff_notebook::{Notebook, NotebookError};
use ruff_python_ast::PySourceType;
use ruff_text_size::{Ranged, TextRange};
use salsa::plumbing::AsId;
use salsa::{Durability, Setter};
use crate::diagnostic::{Span, UnifiedFile};
use crate::file_revision::FileRevision;
use crate::files::file_root::FileRoots;
use crate::files::private::FileStatus;
use crate::system::{SystemPath, SystemPathBuf, SystemVirtualPath, SystemVirtualPathBuf};
use crate::vendored::{VendoredPath, VendoredPathBuf};
use crate::{Db, FxDashMap, vendored};
mod file_root;
mod path;
/// Interns a file system path and returns a salsa `File` ingredient.
///
/// Returns `Err` if the path doesn't exist, isn't accessible, or if the path points to a directory.
#[inline]
pub fn system_path_to_file(db: &dyn Db, path: impl AsRef<SystemPath>) -> Result<File, FileError> {
let file = db.files().system(db, path.as_ref());
// It's important that `vfs.file_system` creates a `VfsFile` even for files that don't exist or don't
// exist anymore so that Salsa can track that the caller of this function depends on the existence of
// that file. This function filters out files that don't exist, but Salsa will know that it must
// re-run the calling query whenever the `file`'s status changes (because of the `.status` call here).
match file.status(db) {
FileStatus::Exists => Ok(file),
FileStatus::IsADirectory => Err(FileError::IsADirectory),
FileStatus::NotFound => Err(FileError::NotFound),
}
}
/// Interns a vendored file path. Returns `Some` if the vendored file for `path` exists and `None` otherwise.
#[inline]
pub fn vendored_path_to_file(
db: &dyn Db,
path: impl AsRef<VendoredPath>,
) -> Result<File, FileError> {
db.files().vendored(db, path.as_ref())
}
/// Lookup table that maps [file paths](`FilePath`) to salsa interned [`File`] instances.
#[derive(Default, Clone)]
pub struct Files {
inner: Arc<FilesInner>,
}
#[derive(Default)]
struct FilesInner {
/// Lookup table that maps [`SystemPathBuf`]s to salsa interned [`File`] instances.
///
/// The map also stores entries for files that don't exist on the file system. This is necessary
/// so that queries that depend on the existence of a file are re-executed when the file is created.
system_by_path: FxDashMap<SystemPathBuf, File>,
/// Lookup table that maps [`SystemVirtualPathBuf`]s to [`VirtualFile`] instances.
system_virtual_by_path: FxDashMap<SystemVirtualPathBuf, VirtualFile>,
/// Lookup table that maps vendored files to the salsa [`File`] ingredients.
vendored_by_path: FxDashMap<VendoredPathBuf, File>,
/// Lookup table that maps file paths to their [`FileRoot`].
roots: std::sync::RwLock<FileRoots>,
}
impl Files {
/// Looks up a file by its `path`.
///
/// For a non-existing file, creates a new salsa [`File`] ingredient and stores it for future lookups.
///
/// The operation always succeeds even if the path doesn't exist on disk, isn't accessible or if the path points to a directory.
/// In these cases, a file with status [`FileStatus::NotFound`] is returned.
fn system(&self, db: &dyn Db, path: &SystemPath) -> File {
let absolute = SystemPath::absolute(path, db.system().current_directory());
*self
.inner
.system_by_path
.entry(absolute.clone())
.or_insert_with(|| {
let metadata = db.system().path_metadata(path);
tracing::trace!("Adding file '{absolute}'");
let durability = self
.root(db, &absolute)
.map_or(Durability::default(), |root| root.durability(db));
let builder = File::builder(FilePath::System(absolute))
.durability(durability)
.path_durability(Durability::HIGH);
let builder = match metadata {
Ok(metadata) if metadata.file_type().is_file() => builder
.permissions(metadata.permissions())
.revision(metadata.revision()),
Ok(metadata) if metadata.file_type().is_directory() => {
builder.status(FileStatus::IsADirectory)
}
_ => builder
.status(FileStatus::NotFound)
.status_durability(Durability::MEDIUM.max(durability)),
};
builder.new(db)
})
}
/// Tries to look up the file for the given system path, returns `None` if no such file exists yet
pub fn try_system(&self, db: &dyn Db, path: &SystemPath) -> Option<File> {
let absolute = SystemPath::absolute(path, db.system().current_directory());
self.inner
.system_by_path
.get(&absolute)
.map(|entry| *entry.value())
}
/// Looks up a vendored file by its path. Returns `Some` if a vendored file for the given path
/// exists and `None` otherwise.
fn vendored(&self, db: &dyn Db, path: &VendoredPath) -> Result<File, FileError> {
let file = match self.inner.vendored_by_path.entry(path.to_path_buf()) {
Entry::Occupied(entry) => *entry.get(),
Entry::Vacant(entry) => {
let metadata = match db.vendored().metadata(path) {
Ok(metadata) => match metadata.kind() {
vendored::FileType::File => metadata,
vendored::FileType::Directory => return Err(FileError::IsADirectory),
},
Err(_) => return Err(FileError::NotFound),
};
tracing::trace!("Adding vendored file `{}`", path);
let file = File::builder(FilePath::Vendored(path.to_path_buf()))
.permissions(Some(0o444))
.revision(metadata.revision())
.durability(Durability::HIGH)
.new(db);
entry.insert(file);
file
}
};
Ok(file)
}
/// Create a new virtual file at the given path and store it for future lookups.
///
/// This will always create a new file, overwriting any existing file at `path` in the internal
/// storage.
pub fn virtual_file(&self, db: &dyn Db, path: &SystemVirtualPath) -> VirtualFile {
tracing::trace!("Adding virtual file {}", path);
let virtual_file = VirtualFile(
File::builder(FilePath::SystemVirtual(path.to_path_buf()))
.path_durability(Durability::HIGH)
.status(FileStatus::Exists)
.revision(FileRevision::zero())
.permissions(None)
.permissions_durability(Durability::HIGH)
.new(db),
);
self.inner
.system_virtual_by_path
.insert(path.to_path_buf(), virtual_file);
virtual_file
}
/// Tries to look up a virtual file by its path. Returns `None` if no such file exists yet.
pub fn try_virtual_file(&self, path: &SystemVirtualPath) -> Option<VirtualFile> {
self.inner
.system_virtual_by_path
.get(&path.to_path_buf())
.map(|entry| *entry.value())
}
/// Looks up the closest root for `path`. Returns `None` if `path` isn't enclosed by any source root.
///
/// Roots can be nested, in which case the closest root is returned.
pub fn root(&self, db: &dyn Db, path: &SystemPath) -> Option<FileRoot> {
let roots = self.inner.roots.read().unwrap();
let absolute = SystemPath::absolute(path, db.system().current_directory());
roots.at(&absolute)
}
/// The same as [`Self::root`] but panics if no root is found.
#[track_caller]
pub fn expect_root(&self, db: &dyn Db, path: &SystemPath) -> FileRoot {
if let Some(root) = self.root(db, path) {
return root;
}
let roots = self.inner.roots.read().unwrap();
panic!("No root found for path '{path}'. Known roots: {roots:#?}");
}
/// Adds a new root for `path` and returns the root.
///
/// The root isn't added nor is the file root's kind updated if a root for `path` already exists.
pub fn try_add_root(&self, db: &dyn Db, path: &SystemPath, kind: FileRootKind) -> FileRoot {
let mut roots = self.inner.roots.write().unwrap();
let absolute = SystemPath::absolute(path, db.system().current_directory());
roots.try_add(db, absolute, kind)
}
/// Updates the revision of the root for `path`.
pub fn touch_root(db: &mut dyn Db, path: &SystemPath) {
if let Some(root) = db.files().root(db, path) {
root.set_revision(db).to(FileRevision::now());
}
}
/// Refreshes the state of all known files under `path` recursively.
///
/// The most common use case is to update the [`Files`] state after removing or moving a directory.
///
/// # Performance
/// Refreshing the state of every file under `path` is expensive. It requires iterating over all known files
/// and making system calls to get the latest status of each file in `path`.
/// That's why [`File::sync_path`] and [`File::sync_path`] is preferred if it is known that the path is a file.
pub fn sync_recursively(db: &mut dyn Db, path: &SystemPath) {
let path = SystemPath::absolute(path, db.system().current_directory());
tracing::debug!("Syncing all files in '{path}'");
let inner = Arc::clone(&db.files().inner);
for entry in inner.system_by_path.iter_mut() {
if entry.key().starts_with(&path) {
File::sync_system_path(db, entry.key(), Some(*entry.value()));
}
}
let roots = inner.roots.read().unwrap();
for root in roots.all() {
if path.starts_with(root.path(db)) {
root.set_revision(db).to(FileRevision::now());
}
}
}
/// Refreshes the state of all known files.
///
/// This is a last-resort method that should only be used when more granular updates aren't possible
/// (for example, because the file watcher failed to observe some changes). Use responsibly!
///
/// # Performance
/// Refreshing the state of every file is expensive. It requires iterating over all known files and
/// issuing a system call to get the latest status of each file.
pub fn sync_all(db: &mut dyn Db) {
tracing::debug!("Syncing all files");
let inner = Arc::clone(&db.files().inner);
for entry in inner.system_by_path.iter_mut() {
File::sync_system_path(db, entry.key(), Some(*entry.value()));
}
let roots = inner.roots.read().unwrap();
for root in roots.all() {
root.set_revision(db).to(FileRevision::now());
}
}
}
impl fmt::Debug for Files {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
if f.alternate() {
let mut map = f.debug_map();
for entry in self.inner.system_by_path.iter() {
map.entry(entry.key(), entry.value());
}
map.finish()
} else {
f.debug_struct("Files")
.field("system_by_path", &self.inner.system_by_path.len())
.field(
"system_virtual_by_path",
&self.inner.system_virtual_by_path.len(),
)
.field("vendored_by_path", &self.inner.vendored_by_path.len())
.finish()
}
}
}
impl std::panic::RefUnwindSafe for Files {}
/// A file that's either stored on the host system's file system or in the vendored file system.
///
/// # Ordering
/// Ordering is based on the file's salsa-assigned id and not on its values.
/// The id may change between runs.
#[salsa::input(heap_size=ruff_memory_usage::heap_size)]
#[derive(PartialOrd, Ord)]
pub struct File {
/// The path of the file (immutable).
#[returns(ref)]
pub path: FilePath,
/// The unix permissions of the file. Only supported on unix systems. Always `None` on Windows
/// or when the file has been deleted.
#[default]
pub permissions: Option<u32>,
/// The file revision. A file has changed if the revisions don't compare equal.
#[default]
pub revision: FileRevision,
/// The status of the file.
///
/// Salsa doesn't support deleting inputs. The only way to signal dependent queries that
/// the file has been deleted is to change the status to `Deleted`.
#[default]
status: FileStatus,
}
// The Salsa heap is tracked separately.
impl get_size2::GetSize for File {}
impl File {
/// Reads the content of the file into a [`String`].
///
/// Reading the same file multiple times isn't guaranteed to return the same content. It's possible
/// that the file has been modified in between the reads.
pub fn read_to_string(&self, db: &dyn Db) -> crate::system::Result<String> {
let path = self.path(db);
match path {
FilePath::System(system) => {
// Add a dependency on the revision to ensure the operation gets re-executed when the file changes.
let _ = self.revision(db);
db.system().read_to_string(system)
}
FilePath::Vendored(vendored) => db.vendored().read_to_string(vendored),
FilePath::SystemVirtual(system_virtual) => {
// Add a dependency on the revision to ensure the operation gets re-executed when the file changes.
let _ = self.revision(db);
db.system().read_virtual_path_to_string(system_virtual)
}
}
}
/// Reads the content of the file into a [`Notebook`].
///
/// Reading the same file multiple times isn't guaranteed to return the same content. It's possible
/// that the file has been modified in between the reads.
pub fn read_to_notebook(&self, db: &dyn Db) -> Result<Notebook, NotebookError> {
let path = self.path(db);
match path {
FilePath::System(system) => {
// Add a dependency on the revision to ensure the operation gets re-executed when the file changes.
let _ = self.revision(db);
db.system().read_to_notebook(system)
}
FilePath::Vendored(_) => Err(NotebookError::Io(std::io::Error::new(
std::io::ErrorKind::InvalidInput,
"Reading a notebook from the vendored file system is not supported.",
))),
FilePath::SystemVirtual(system_virtual) => {
// Add a dependency on the revision to ensure the operation gets re-executed when the file changes.
let _ = self.revision(db);
db.system().read_virtual_path_to_notebook(system_virtual)
}
}
}
/// Refreshes the file metadata by querying the file system if needed.
///
/// This also "touches" the file root associated with the given path.
/// This means that any Salsa queries that depend on the corresponding
/// root's revision will become invalidated.
pub fn sync_path(db: &mut dyn Db, path: &SystemPath) {
let absolute = SystemPath::absolute(path, db.system().current_directory());
Files::touch_root(db, &absolute);
Self::sync_system_path(db, &absolute, None);
}
/// Refreshes *only* the file metadata by querying the file system if needed.
///
/// This specifically does not touch any file root associated with the
/// given file path.
pub fn sync_path_only(db: &mut dyn Db, path: &SystemPath) {
let absolute = SystemPath::absolute(path, db.system().current_directory());
Self::sync_system_path(db, &absolute, None);
}
/// Increments the revision for the virtual file at `path`.
pub fn sync_virtual_path(db: &mut dyn Db, path: &SystemVirtualPath) {
if let Some(virtual_file) = db.files().try_virtual_file(path) {
virtual_file.sync(db);
}
}
/// Syncs the [`File`]'s state with the state of the file on the system.
pub fn sync(self, db: &mut dyn Db) {
let path = self.path(db).clone();
match path {
FilePath::System(system) => {
Files::touch_root(db, &system);
Self::sync_system_path(db, &system, Some(self));
}
FilePath::Vendored(_) => {
// Readonly, can never be out of date.
}
FilePath::SystemVirtual(_) => {
VirtualFile(self).sync(db);
}
}
}
/// Private method providing the implementation for [`Self::sync_path`] and [`Self::sync`] for
/// system paths.
fn sync_system_path(db: &mut dyn Db, path: &SystemPath, file: Option<File>) {
let Some(file) = file.or_else(|| db.files().try_system(db, path)) else {
return;
};
let (status, revision, permission) = match db.system().path_metadata(path) {
Ok(metadata) if metadata.file_type().is_file() => (
FileStatus::Exists,
metadata.revision(),
metadata.permissions(),
),
Ok(metadata) if metadata.file_type().is_directory() => {
(FileStatus::IsADirectory, FileRevision::zero(), None)
}
_ => (FileStatus::NotFound, FileRevision::zero(), None),
};
if file.status(db) != status {
tracing::debug!("Updating the status of `{}`", file.path(db));
file.set_status(db).to(status);
}
if file.revision(db) != revision {
tracing::debug!("Updating the revision of `{}`", file.path(db));
file.set_revision(db).to(revision);
}
if file.permissions(db) != permission {
tracing::debug!("Updating the permissions of `{}`", file.path(db));
file.set_permissions(db).to(permission);
}
}
/// Returns `true` if the file exists.
pub fn exists(self, db: &dyn Db) -> bool {
self.status(db) == FileStatus::Exists
}
/// Returns `true` if the file should be analyzed as a type stub.
pub fn is_stub(self, db: &dyn Db) -> bool {
self.source_type(db).is_stub()
}
/// Returns `true` if the file is an `__init__.pyi`
pub fn is_package_stub(self, db: &dyn Db) -> bool {
self.path(db).as_str().ends_with("__init__.pyi")
}
/// Returns `true` if the file is an `__init__.pyi`
pub fn is_package(self, db: &dyn Db) -> bool {
let path = self.path(db).as_str();
path.ends_with("__init__.pyi") || path.ends_with("__init__.py")
}
pub fn source_type(self, db: &dyn Db) -> PySourceType {
match self.path(db) {
FilePath::System(path) => path
.extension()
.map_or(PySourceType::Python, PySourceType::from_extension),
FilePath::Vendored(_) => PySourceType::Stub,
FilePath::SystemVirtual(path) => path
.extension()
.map_or(PySourceType::Python, PySourceType::from_extension),
}
}
}
impl fmt::Debug for File {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
salsa::with_attached_database(|db| {
if f.alternate() {
f.debug_struct("File")
.field("path", &self.path(db))
.field("status", &self.status(db))
.field("permissions", &self.permissions(db))
.field("revision", &self.revision(db))
.finish()
} else {
f.debug_tuple("File").field(&self.path(db)).finish()
}
})
.unwrap_or_else(|| f.debug_tuple("file").field(&self.as_id()).finish())
}
}
/// A virtual file that doesn't exist on the file system.
///
/// This is a wrapper around a [`File`] that provides additional methods to interact with a virtual
/// file.
#[derive(Copy, Clone, Debug)]
pub struct VirtualFile(File);
impl VirtualFile {
/// Returns the underlying [`File`].
pub fn file(&self) -> File {
self.0
}
/// Increments the revision of the underlying [`File`].
fn sync(&self, db: &mut dyn Db) {
let file = self.0;
tracing::debug!("Updating the revision of `{}`", file.path(db));
let current_revision = file.revision(db);
file.set_revision(db)
.to(FileRevision::new(current_revision.as_u128() + 1));
}
/// Closes the virtual file.
pub fn close(&self, db: &mut dyn Db) {
tracing::debug!("Closing virtual file `{}`", self.0.path(db));
self.0.set_status(db).to(FileStatus::NotFound);
}
}
// The types in here need to be public because they're salsa ingredients but we
// don't want them to be publicly accessible. That's why we put them into a private module.
mod private {
#[derive(Copy, Clone, Debug, Eq, PartialEq, Default, get_size2::GetSize)]
pub enum FileStatus {
/// The file exists.
#[default]
Exists,
/// The path isn't a file and instead points to a directory.
IsADirectory,
/// The path doesn't exist, isn't accessible, or no longer exists.
NotFound,
}
}
#[derive(Copy, Clone, Debug, Eq, PartialEq)]
pub enum FileError {
IsADirectory,
NotFound,
}
impl fmt::Display for FileError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> std::fmt::Result {
match self {
FileError::IsADirectory => f.write_str("Is a directory"),
FileError::NotFound => f.write_str("Not found"),
}
}
}
impl std::error::Error for FileError {}
/// Range with its corresponding file.
#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)]
pub struct FileRange {
file: File,
range: TextRange,
}
impl FileRange {
pub const fn new(file: File, range: TextRange) -> Self {
Self { file, range }
}
pub const fn file(&self) -> File {
self.file
}
}
impl Ranged for FileRange {
#[inline]
fn range(&self) -> TextRange {
self.range
}
}
impl TryFrom<&Span> for FileRange {
type Error = ();
fn try_from(value: &Span) -> Result<Self, Self::Error> {
let UnifiedFile::Ty(file) = value.file() else {
return Err(());
};
Ok(Self {
file: *file,
range: value.range().ok_or(())?,
})
}
}
impl TryFrom<Span> for FileRange {
type Error = ();
fn try_from(value: Span) -> Result<Self, Self::Error> {
Self::try_from(&value)
}
}
#[cfg(test)]
mod tests {
use crate::file_revision::FileRevision;
use crate::files::{FileError, system_path_to_file, vendored_path_to_file};
use crate::system::DbWithWritableSystem as _;
use crate::tests::TestDb;
use crate::vendored::VendoredFileSystemBuilder;
use zip::CompressionMethod;
#[test]
fn system_existing_file() -> crate::system::Result<()> {
let mut db = TestDb::new();
db.write_file("test.py", "print('Hello world')")?;
let test = system_path_to_file(&db, "test.py").expect("File to exist.");
assert_eq!(test.permissions(&db), Some(0o755));
assert_ne!(test.revision(&db), FileRevision::zero());
assert_eq!(&test.read_to_string(&db)?, "print('Hello world')");
Ok(())
}
#[test]
fn system_non_existing_file() {
let db = TestDb::new();
let test = system_path_to_file(&db, "test.py");
assert_eq!(test, Err(FileError::NotFound));
}
#[test]
fn system_normalize_paths() {
let db = TestDb::new();
assert_eq!(
system_path_to_file(&db, "test.py"),
system_path_to_file(&db, "/test.py")
);
assert_eq!(
system_path_to_file(&db, "/root/.././test.py"),
system_path_to_file(&db, "/root/test.py")
);
}
#[test]
fn stubbed_vendored_file() -> crate::system::Result<()> {
let mut db = TestDb::new();
let mut vendored_builder = VendoredFileSystemBuilder::new(CompressionMethod::Stored);
vendored_builder
.add_file("test.pyi", "def foo() -> str")
.unwrap();
let vendored = vendored_builder.finish().unwrap();
db.with_vendored(vendored);
let test = vendored_path_to_file(&db, "test.pyi").expect("Vendored file to exist.");
assert_eq!(test.permissions(&db), Some(0o444));
assert_ne!(test.revision(&db), FileRevision::zero());
assert_eq!(&test.read_to_string(&db)?, "def foo() -> str");
Ok(())
}
#[test]
fn stubbed_vendored_file_non_existing() {
let db = TestDb::new();
assert_eq!(
vendored_path_to_file(&db, "test.py"),
Err(FileError::NotFound)
);
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_db/src/system/test.rs | crates/ruff_db/src/system/test.rs | use glob::PatternError;
use ruff_notebook::{Notebook, NotebookError};
use rustc_hash::FxHashMap;
use std::panic::RefUnwindSafe;
use std::sync::{Arc, Mutex};
use crate::Db;
use crate::files::File;
use crate::system::{
CaseSensitivity, DirectoryEntry, GlobError, MemoryFileSystem, Metadata, Result, System,
SystemPath, SystemPathBuf, SystemVirtualPath,
};
use super::WritableSystem;
use super::walk_directory::WalkDirectoryBuilder;
/// System implementation intended for testing.
///
/// It uses a memory-file system by default, but can be switched to the real file system for tests
/// verifying more advanced file system features.
///
/// ## Warning
/// Don't use this system for production code. It's intended for testing only.
#[derive(Debug)]
pub struct TestSystem {
inner: Arc<dyn WritableSystem + RefUnwindSafe + Send + Sync>,
/// Environment variable overrides. If a key is present here, it takes precedence
/// over the inner system's environment variables.
env_overrides: Arc<Mutex<FxHashMap<String, Option<String>>>>,
}
impl Clone for TestSystem {
fn clone(&self) -> Self {
Self {
inner: self.inner.clone(),
env_overrides: self.env_overrides.clone(),
}
}
}
impl TestSystem {
pub fn new(inner: impl WritableSystem + RefUnwindSafe + Send + Sync + 'static) -> Self {
Self {
inner: Arc::new(inner),
env_overrides: Arc::new(Mutex::new(FxHashMap::default())),
}
}
/// Sets an environment variable override. This takes precedence over the inner system.
pub fn set_env_var(&self, name: impl Into<String>, value: impl Into<String>) {
self.env_overrides
.lock()
.unwrap()
.insert(name.into(), Some(value.into()));
}
/// Removes an environment variable override, making it appear as not set.
pub fn remove_env_var(&self, name: impl Into<String>) {
self.env_overrides.lock().unwrap().insert(name.into(), None);
}
/// Returns the [`InMemorySystem`].
///
/// ## Panics
/// If the underlying test system isn't the [`InMemorySystem`].
pub fn in_memory(&self) -> &InMemorySystem {
self.as_in_memory()
.expect("The test db is not using a memory file system")
}
/// Returns the `InMemorySystem` or `None` if the underlying test system isn't the [`InMemorySystem`].
pub fn as_in_memory(&self) -> Option<&InMemorySystem> {
self.system().as_any().downcast_ref::<InMemorySystem>()
}
/// Returns the memory file system.
///
/// ## Panics
/// If the underlying test system isn't the [`InMemorySystem`].
pub fn memory_file_system(&self) -> &MemoryFileSystem {
self.in_memory().fs()
}
fn use_system<S>(&mut self, system: S)
where
S: WritableSystem + Send + Sync + RefUnwindSafe + 'static,
{
self.inner = Arc::new(system);
}
pub fn system(&self) -> &dyn WritableSystem {
&*self.inner
}
}
impl System for TestSystem {
fn path_metadata(&self, path: &SystemPath) -> Result<Metadata> {
self.system().path_metadata(path)
}
fn canonicalize_path(&self, path: &SystemPath) -> Result<SystemPathBuf> {
self.system().canonicalize_path(path)
}
fn read_to_string(&self, path: &SystemPath) -> Result<String> {
self.system().read_to_string(path)
}
fn read_to_notebook(&self, path: &SystemPath) -> std::result::Result<Notebook, NotebookError> {
self.system().read_to_notebook(path)
}
fn read_virtual_path_to_string(&self, path: &SystemVirtualPath) -> Result<String> {
self.system().read_virtual_path_to_string(path)
}
fn read_virtual_path_to_notebook(
&self,
path: &SystemVirtualPath,
) -> std::result::Result<Notebook, NotebookError> {
self.system().read_virtual_path_to_notebook(path)
}
fn current_directory(&self) -> &SystemPath {
self.system().current_directory()
}
fn user_config_directory(&self) -> Option<SystemPathBuf> {
self.system().user_config_directory()
}
fn cache_dir(&self) -> Option<SystemPathBuf> {
self.system().cache_dir()
}
fn read_directory<'a>(
&'a self,
path: &SystemPath,
) -> Result<Box<dyn Iterator<Item = Result<DirectoryEntry>> + 'a>> {
self.system().read_directory(path)
}
fn walk_directory(&self, path: &SystemPath) -> WalkDirectoryBuilder {
self.system().walk_directory(path)
}
fn glob(
&self,
pattern: &str,
) -> std::result::Result<
Box<dyn Iterator<Item = std::result::Result<SystemPathBuf, GlobError>> + '_>,
PatternError,
> {
self.system().glob(pattern)
}
fn as_writable(&self) -> Option<&dyn WritableSystem> {
Some(self)
}
fn as_any(&self) -> &dyn std::any::Any {
self
}
fn as_any_mut(&mut self) -> &mut dyn std::any::Any {
self
}
fn path_exists_case_sensitive(&self, path: &SystemPath, prefix: &SystemPath) -> bool {
self.system().path_exists_case_sensitive(path, prefix)
}
fn case_sensitivity(&self) -> CaseSensitivity {
self.system().case_sensitivity()
}
fn env_var(&self, name: &str) -> std::result::Result<String, std::env::VarError> {
// Check overrides first
if let Some(override_value) = self.env_overrides.lock().unwrap().get(name) {
return match override_value {
Some(value) => Ok(value.clone()),
None => Err(std::env::VarError::NotPresent),
};
}
// Fall back to inner system
self.system().env_var(name)
}
fn dyn_clone(&self) -> Box<dyn System> {
Box::new(self.clone())
}
}
impl Default for TestSystem {
fn default() -> Self {
Self {
inner: Arc::new(InMemorySystem::default()),
env_overrides: Arc::new(Mutex::new(FxHashMap::default())),
}
}
}
impl WritableSystem for TestSystem {
fn create_new_file(&self, path: &SystemPath) -> Result<()> {
self.system().create_new_file(path)
}
fn write_file(&self, path: &SystemPath, content: &str) -> Result<()> {
self.system().write_file(path, content)
}
fn create_directory_all(&self, path: &SystemPath) -> Result<()> {
self.system().create_directory_all(path)
}
}
/// Extension trait for databases that use a [`WritableSystem`].
///
/// Provides various helper function that ease testing.
pub trait DbWithWritableSystem: Db + Sized {
type System: WritableSystem;
fn writable_system(&self) -> &Self::System;
/// Writes the content of the given file and notifies the Db about the change.
fn write_file(&mut self, path: impl AsRef<SystemPath>, content: impl AsRef<str>) -> Result<()> {
let path = path.as_ref();
match self.writable_system().write_file(path, content.as_ref()) {
Ok(()) => {
File::sync_path(self, path);
Ok(())
}
Err(error) if error.kind() == std::io::ErrorKind::NotFound => {
if let Some(parent) = path.parent() {
self.writable_system().create_directory_all(parent)?;
for ancestor in parent.ancestors() {
File::sync_path(self, ancestor);
}
self.writable_system().write_file(path, content.as_ref())?;
File::sync_path(self, path);
Ok(())
} else {
Err(error)
}
}
err => err,
}
}
/// Writes auto-dedented text to a file.
fn write_dedented(&mut self, path: &str, content: &str) -> Result<()> {
self.write_file(path, ruff_python_trivia::textwrap::dedent(content))?;
Ok(())
}
/// Writes the content of the given files and notifies the Db about the change.
fn write_files<P, C, I>(&mut self, files: I) -> Result<()>
where
I: IntoIterator<Item = (P, C)>,
P: AsRef<SystemPath>,
C: AsRef<str>,
{
for (path, content) in files {
self.write_file(path, content)?;
}
Ok(())
}
}
/// Extension trait for databases that use [`TestSystem`].
///
/// Provides various helper function that ease testing.
pub trait DbWithTestSystem: Db + Sized {
fn test_system(&self) -> &TestSystem;
fn test_system_mut(&mut self) -> &mut TestSystem;
/// Writes the content of the given virtual file.
///
/// ## Panics
/// If the db isn't using the [`InMemorySystem`].
fn write_virtual_file(&mut self, path: impl AsRef<SystemVirtualPath>, content: impl ToString) {
let path = path.as_ref();
self.test_system()
.memory_file_system()
.write_virtual_file(path, content);
}
/// Uses the given system instead of the testing system.
///
/// This useful for testing advanced file system features like permissions, symlinks, etc.
///
/// Note that any files written to the memory file system won't be copied over.
fn use_system<S>(&mut self, os: S)
where
S: WritableSystem + Send + Sync + RefUnwindSafe + 'static,
{
self.test_system_mut().use_system(os);
}
/// Returns the memory file system.
///
/// ## Panics
/// If the underlying test system isn't the [`InMemorySystem`].
fn memory_file_system(&self) -> &MemoryFileSystem {
self.test_system().memory_file_system()
}
}
impl<T> DbWithWritableSystem for T
where
T: DbWithTestSystem,
{
type System = TestSystem;
fn writable_system(&self) -> &Self::System {
self.test_system()
}
}
#[derive(Default, Debug)]
pub struct InMemorySystem {
user_config_directory: Mutex<Option<SystemPathBuf>>,
memory_fs: MemoryFileSystem,
}
impl InMemorySystem {
pub fn new(cwd: SystemPathBuf) -> Self {
Self {
user_config_directory: Mutex::new(None),
memory_fs: MemoryFileSystem::with_current_directory(cwd),
}
}
pub fn from_memory_fs(memory_fs: MemoryFileSystem) -> Self {
Self {
user_config_directory: Mutex::new(None),
memory_fs,
}
}
pub fn fs(&self) -> &MemoryFileSystem {
&self.memory_fs
}
pub fn set_user_configuration_directory(&self, directory: Option<SystemPathBuf>) {
let mut user_directory = self.user_config_directory.lock().unwrap();
*user_directory = directory;
}
}
impl System for InMemorySystem {
fn path_metadata(&self, path: &SystemPath) -> Result<Metadata> {
self.memory_fs.metadata(path)
}
fn canonicalize_path(&self, path: &SystemPath) -> Result<SystemPathBuf> {
self.memory_fs.canonicalize(path)
}
fn read_to_string(&self, path: &SystemPath) -> Result<String> {
self.memory_fs.read_to_string(path)
}
fn read_to_notebook(&self, path: &SystemPath) -> std::result::Result<Notebook, NotebookError> {
let content = self.read_to_string(path)?;
Notebook::from_source_code(&content)
}
fn read_virtual_path_to_string(&self, path: &SystemVirtualPath) -> Result<String> {
self.memory_fs.read_virtual_path_to_string(path)
}
fn read_virtual_path_to_notebook(
&self,
path: &SystemVirtualPath,
) -> std::result::Result<Notebook, NotebookError> {
let content = self.read_virtual_path_to_string(path)?;
Notebook::from_source_code(&content)
}
fn current_directory(&self) -> &SystemPath {
self.memory_fs.current_directory()
}
fn user_config_directory(&self) -> Option<SystemPathBuf> {
self.user_config_directory.lock().unwrap().clone()
}
fn cache_dir(&self) -> Option<SystemPathBuf> {
None
}
fn read_directory<'a>(
&'a self,
path: &SystemPath,
) -> Result<Box<dyn Iterator<Item = Result<DirectoryEntry>> + 'a>> {
Ok(Box::new(self.memory_fs.read_directory(path)?))
}
fn walk_directory(&self, path: &SystemPath) -> WalkDirectoryBuilder {
self.memory_fs.walk_directory(path)
}
fn glob(
&self,
pattern: &str,
) -> std::result::Result<
Box<dyn Iterator<Item = std::result::Result<SystemPathBuf, GlobError>> + '_>,
PatternError,
> {
let iterator = self.memory_fs.glob(pattern)?;
Ok(Box::new(iterator))
}
fn as_writable(&self) -> Option<&dyn WritableSystem> {
Some(self)
}
fn as_any(&self) -> &dyn std::any::Any {
self
}
fn as_any_mut(&mut self) -> &mut dyn std::any::Any {
self
}
#[inline]
fn path_exists_case_sensitive(&self, path: &SystemPath, _prefix: &SystemPath) -> bool {
// The memory file system is case-sensitive.
self.path_exists(path)
}
fn case_sensitivity(&self) -> CaseSensitivity {
CaseSensitivity::CaseSensitive
}
fn dyn_clone(&self) -> Box<dyn System> {
Box::new(Self {
user_config_directory: Mutex::new(self.user_config_directory.lock().unwrap().clone()),
memory_fs: self.memory_fs.clone(),
})
}
}
impl WritableSystem for InMemorySystem {
fn create_new_file(&self, path: &SystemPath) -> Result<()> {
self.memory_fs.create_new_file(path)
}
fn write_file(&self, path: &SystemPath, content: &str) -> Result<()> {
self.memory_fs.write_file(path, content)
}
fn create_directory_all(&self, path: &SystemPath) -> Result<()> {
self.memory_fs.create_directory_all(path)
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_db/src/system/path.rs | crates/ruff_db/src/system/path.rs | use camino::{Utf8Path, Utf8PathBuf};
use std::borrow::Borrow;
use std::fmt::Formatter;
use std::ops::Deref;
use std::path::{Path, PathBuf, StripPrefixError};
/// A slice of a path on [`System`](super::System) (akin to [`str`]).
///
/// The path is guaranteed to be valid UTF-8.
#[repr(transparent)]
#[derive(Eq, PartialEq, Hash, PartialOrd, Ord)]
pub struct SystemPath(Utf8Path);
impl SystemPath {
pub fn new(path: &(impl AsRef<Utf8Path> + ?Sized)) -> &Self {
let path = path.as_ref();
// SAFETY: FsPath is marked as #[repr(transparent)] so the conversion from a
// *const Utf8Path to a *const FsPath is valid.
unsafe { &*(path as *const Utf8Path as *const SystemPath) }
}
/// Takes any path, and when possible, converts Windows UNC paths to regular paths.
/// If the path can't be converted, it's returned unmodified.
///
/// On non-Windows this is no-op.
///
/// `\\?\C:\Windows` will be converted to `C:\Windows`,
/// but `\\?\C:\COM` will be left as-is (due to a reserved filename).
///
/// Use this to pass arbitrary paths to programs that may not be UNC-aware.
///
/// It's generally safe to pass UNC paths to legacy programs, because
/// these paths contain a reserved prefix, so will gracefully fail
/// if used with legacy APIs that don't support UNC.
///
/// This function does not perform any I/O.
///
/// Currently paths with unpaired surrogates aren't converted even if they
/// could be, due to limitations of Rust's `OsStr` API.
///
/// To check if a path remained as UNC, use `path.as_os_str().as_encoded_bytes().starts_with(b"\\\\")`.
#[inline]
pub fn simplified(&self) -> &SystemPath {
// SAFETY: simplified only trims the path, that means the returned path must be a valid UTF-8 path.
SystemPath::from_std_path(dunce::simplified(self.as_std_path())).unwrap()
}
/// Returns `true` if the `SystemPath` is absolute, i.e., if it is independent of
/// the current directory.
///
/// * On Unix, a path is absolute if it starts with the root, so
/// `is_absolute` and [`has_root`] are equivalent.
///
/// * On Windows, a path is absolute if it has a prefix and starts with the
/// root: `c:\windows` is absolute, while `c:temp` and `\temp` are not.
///
/// # Examples
///
/// ```
/// use ruff_db::system::SystemPath;
///
/// assert!(!SystemPath::new("foo.txt").is_absolute());
/// ```
///
/// [`has_root`]: Utf8Path::has_root
#[inline]
#[must_use]
pub fn is_absolute(&self) -> bool {
self.0.is_absolute()
}
/// Extracts the file extension, if possible.
///
/// The extension is:
///
/// * [`None`], if there is no file name;
/// * [`None`], if there is no embedded `.`;
/// * [`None`], if the file name begins with `.` and has no other `.`s within;
/// * Otherwise, the portion of the file name after the final `.`
///
/// # Examples
///
/// ```
/// use ruff_db::system::SystemPath;
///
/// assert_eq!("rs", SystemPath::new("foo.rs").extension().unwrap());
/// assert_eq!("gz", SystemPath::new("foo.tar.gz").extension().unwrap());
/// ```
///
/// See [`Path::extension`] for more details.
#[inline]
#[must_use]
pub fn extension(&self) -> Option<&str> {
self.0.extension()
}
/// Determines whether `base` is a prefix of `self`.
///
/// Only considers whole path components to match.
///
/// # Examples
///
/// ```
/// use ruff_db::system::SystemPath;
///
/// let path = SystemPath::new("/etc/passwd");
///
/// assert!(path.starts_with("/etc"));
/// assert!(path.starts_with("/etc/"));
/// assert!(path.starts_with("/etc/passwd"));
/// assert!(path.starts_with("/etc/passwd/")); // extra slash is okay
/// assert!(path.starts_with("/etc/passwd///")); // multiple extra slashes are okay
///
/// assert!(!path.starts_with("/e"));
/// assert!(!path.starts_with("/etc/passwd.txt"));
///
/// assert!(!SystemPath::new("/etc/foo.rs").starts_with("/etc/foo"));
/// ```
#[inline]
#[must_use]
pub fn starts_with(&self, base: impl AsRef<SystemPath>) -> bool {
self.0.starts_with(base.as_ref())
}
/// Determines whether `child` is a suffix of `self`.
///
/// Only considers whole path components to match.
///
/// # Examples
///
/// ```
/// use ruff_db::system::SystemPath;
///
/// let path = SystemPath::new("/etc/resolv.conf");
///
/// assert!(path.ends_with("resolv.conf"));
/// assert!(path.ends_with("etc/resolv.conf"));
/// assert!(path.ends_with("/etc/resolv.conf"));
///
/// assert!(!path.ends_with("/resolv.conf"));
/// assert!(!path.ends_with("conf")); // use .extension() instead
/// ```
#[inline]
#[must_use]
pub fn ends_with(&self, child: impl AsRef<SystemPath>) -> bool {
self.0.ends_with(child.as_ref())
}
/// Returns the `FileSystemPath` without its final component, if there is one.
///
/// Returns [`None`] if the path terminates in a root or prefix.
///
/// # Examples
///
/// ```
/// use ruff_db::system::SystemPath;
///
/// let path = SystemPath::new("/foo/bar");
/// let parent = path.parent().unwrap();
/// assert_eq!(parent, SystemPath::new("/foo"));
///
/// let grand_parent = parent.parent().unwrap();
/// assert_eq!(grand_parent, SystemPath::new("/"));
/// assert_eq!(grand_parent.parent(), None);
/// ```
#[inline]
#[must_use]
pub fn parent(&self) -> Option<&SystemPath> {
self.0.parent().map(SystemPath::new)
}
/// Produces an iterator over `SystemPath` and its ancestors.
///
/// The iterator will yield the `SystemPath` that is returned if the [`parent`] method is used zero
/// or more times. That means, the iterator will yield `&self`, `&self.parent().unwrap()`,
/// `&self.parent().unwrap().parent().unwrap()` and so on. If the [`parent`] method returns
/// [`None`], the iterator will do likewise. The iterator will always yield at least one value,
/// namely `&self`.
///
/// # Examples
///
/// ```
/// use ruff_db::system::SystemPath;
///
/// let mut ancestors = SystemPath::new("/foo/bar").ancestors();
/// assert_eq!(ancestors.next(), Some(SystemPath::new("/foo/bar")));
/// assert_eq!(ancestors.next(), Some(SystemPath::new("/foo")));
/// assert_eq!(ancestors.next(), Some(SystemPath::new("/")));
/// assert_eq!(ancestors.next(), None);
///
/// let mut ancestors = SystemPath::new("../foo/bar").ancestors();
/// assert_eq!(ancestors.next(), Some(SystemPath::new("../foo/bar")));
/// assert_eq!(ancestors.next(), Some(SystemPath::new("../foo")));
/// assert_eq!(ancestors.next(), Some(SystemPath::new("..")));
/// assert_eq!(ancestors.next(), Some(SystemPath::new("")));
/// assert_eq!(ancestors.next(), None);
/// ```
///
/// [`parent`]: SystemPath::parent
#[inline]
pub fn ancestors(&self) -> impl Iterator<Item = &SystemPath> {
self.0.ancestors().map(SystemPath::new)
}
/// Produces an iterator over the [`camino::Utf8Component`]s of the path.
///
/// When parsing the path, there is a small amount of normalization:
///
/// * Repeated separators are ignored, so `a/b` and `a//b` both have
/// `a` and `b` as components.
///
/// * Occurrences of `.` are normalized away, except if they are at the
/// beginning of the path. For example, `a/./b`, `a/b/`, `a/b/.` and
/// `a/b` all have `a` and `b` as components, but `./a/b` starts with
/// an additional [`CurDir`] component.
///
/// * A trailing slash is normalized away, `/a/b` and `/a/b/` are equivalent.
///
/// Note that no other normalization takes place; in particular, `a/c`
/// and `a/b/../c` are distinct, to account for the possibility that `b`
/// is a symbolic link (so its parent isn't `a`).
///
/// # Examples
///
/// ```
/// use camino::{Utf8Component};
/// use ruff_db::system::SystemPath;
///
/// let mut components = SystemPath::new("/tmp/foo.txt").components();
///
/// assert_eq!(components.next(), Some(Utf8Component::RootDir));
/// assert_eq!(components.next(), Some(Utf8Component::Normal("tmp")));
/// assert_eq!(components.next(), Some(Utf8Component::Normal("foo.txt")));
/// assert_eq!(components.next(), None)
/// ```
///
/// [`CurDir`]: camino::Utf8Component::CurDir
#[inline]
pub fn components(&self) -> camino::Utf8Components<'_> {
self.0.components()
}
/// Returns the final component of the `FileSystemPath`, if there is one.
///
/// If the path is a normal file, this is the file name. If it's the path of a directory, this
/// is the directory name.
///
/// Returns [`None`] if the path terminates in `..`.
///
/// # Examples
///
/// ```
/// use camino::Utf8Path;
/// use ruff_db::system::SystemPath;
///
/// assert_eq!(Some("bin"), SystemPath::new("/usr/bin/").file_name());
/// assert_eq!(Some("foo.txt"), SystemPath::new("tmp/foo.txt").file_name());
/// assert_eq!(Some("foo.txt"), SystemPath::new("foo.txt/.").file_name());
/// assert_eq!(Some("foo.txt"), SystemPath::new("foo.txt/.//").file_name());
/// assert_eq!(None, SystemPath::new("foo.txt/..").file_name());
/// assert_eq!(None, SystemPath::new("/").file_name());
/// ```
#[inline]
#[must_use]
pub fn file_name(&self) -> Option<&str> {
self.0.file_name()
}
/// Extracts the stem (non-extension) portion of [`self.file_name`].
///
/// [`self.file_name`]: SystemPath::file_name
///
/// The stem is:
///
/// * [`None`], if there is no file name;
/// * The entire file name if there is no embedded `.`;
/// * The entire file name if the file name begins with `.` and has no other `.`s within;
/// * Otherwise, the portion of the file name before the final `.`
///
/// # Examples
///
/// ```
/// use ruff_db::system::SystemPath;
///
/// assert_eq!("foo", SystemPath::new("foo.rs").file_stem().unwrap());
/// assert_eq!("foo.tar", SystemPath::new("foo.tar.gz").file_stem().unwrap());
/// ```
#[inline]
#[must_use]
pub fn file_stem(&self) -> Option<&str> {
self.0.file_stem()
}
/// Returns a path that, when joined onto `base`, yields `self`.
///
/// # Errors
///
/// If `base` is not a prefix of `self` (i.e., [`starts_with`]
/// returns `false`), returns [`Err`].
///
/// [`starts_with`]: SystemPath::starts_with
///
/// # Examples
///
/// ```
/// use ruff_db::system::{SystemPath, SystemPathBuf};
///
/// let path = SystemPath::new("/test/haha/foo.txt");
///
/// assert_eq!(path.strip_prefix("/"), Ok(SystemPath::new("test/haha/foo.txt")));
/// assert_eq!(path.strip_prefix("/test"), Ok(SystemPath::new("haha/foo.txt")));
/// assert_eq!(path.strip_prefix("/test/"), Ok(SystemPath::new("haha/foo.txt")));
/// assert_eq!(path.strip_prefix("/test/haha/foo.txt"), Ok(SystemPath::new("")));
/// assert_eq!(path.strip_prefix("/test/haha/foo.txt/"), Ok(SystemPath::new("")));
///
/// assert!(path.strip_prefix("test").is_err());
/// assert!(path.strip_prefix("/haha").is_err());
///
/// let prefix = SystemPathBuf::from("/test/");
/// assert_eq!(path.strip_prefix(prefix), Ok(SystemPath::new("haha/foo.txt")));
/// ```
#[inline]
pub fn strip_prefix(
&self,
base: impl AsRef<SystemPath>,
) -> std::result::Result<&SystemPath, StripPrefixError> {
self.0.strip_prefix(base.as_ref()).map(SystemPath::new)
}
/// Creates an owned [`SystemPathBuf`] with `path` adjoined to `self`.
///
/// See [`std::path::PathBuf::push`] for more details on what it means to adjoin a path.
///
/// # Examples
///
/// ```
/// use ruff_db::system::{SystemPath, SystemPathBuf};
///
/// assert_eq!(SystemPath::new("/etc").join("passwd"), SystemPathBuf::from("/etc/passwd"));
/// ```
#[inline]
#[must_use]
pub fn join(&self, path: impl AsRef<SystemPath>) -> SystemPathBuf {
SystemPathBuf::from_utf8_path_buf(self.0.join(&path.as_ref().0))
}
/// Creates an owned [`SystemPathBuf`] like `self` but with the given extension.
///
/// See [`std::path::PathBuf::set_extension`] for more details.
///
/// # Examples
///
/// ```
/// use ruff_db::system::{SystemPath, SystemPathBuf};
///
/// let path = SystemPath::new("foo.rs");
/// assert_eq!(path.with_extension("txt"), SystemPathBuf::from("foo.txt"));
///
/// let path = SystemPath::new("foo.tar.gz");
/// assert_eq!(path.with_extension(""), SystemPathBuf::from("foo.tar"));
/// assert_eq!(path.with_extension("xz"), SystemPathBuf::from("foo.tar.xz"));
/// assert_eq!(path.with_extension("").with_extension("txt"), SystemPathBuf::from("foo.txt"));
/// ```
#[inline]
pub fn with_extension(&self, extension: &str) -> SystemPathBuf {
SystemPathBuf::from_utf8_path_buf(self.0.with_extension(extension))
}
/// Converts the path to an owned [`SystemPathBuf`].
pub fn to_path_buf(&self) -> SystemPathBuf {
SystemPathBuf(self.0.to_path_buf())
}
/// Returns the path as a string slice.
#[inline]
pub fn as_str(&self) -> &str {
self.0.as_str()
}
/// Returns the std path for the file.
#[inline]
pub fn as_std_path(&self) -> &Path {
self.0.as_std_path()
}
/// Returns the [`Utf8Path`] for the file.
#[inline]
pub fn as_utf8_path(&self) -> &Utf8Path {
&self.0
}
pub fn from_std_path(path: &Path) -> Option<&SystemPath> {
Some(SystemPath::new(Utf8Path::from_path(path)?))
}
/// Makes a path absolute and normalizes it without accessing the file system.
///
/// Adapted from [cargo](https://github.com/rust-lang/cargo/blob/fede83ccf973457de319ba6fa0e36ead454d2e20/src/cargo/util/paths.rs#L61)
///
/// # Examples
///
/// ## Posix paths
///
/// ```
/// # #[cfg(unix)]
/// # fn main() {
/// use ruff_db::system::{SystemPath, SystemPathBuf};
///
/// // Relative to absolute
/// let absolute = SystemPath::absolute("foo/./bar", "/tmp");
/// assert_eq!(absolute, SystemPathBuf::from("/tmp/foo/bar"));
///
/// // Path's going past the root are normalized to the root
/// let absolute = SystemPath::absolute("../../../", "/tmp");
/// assert_eq!(absolute, SystemPathBuf::from("/"));
///
/// // Absolute to absolute
/// let absolute = SystemPath::absolute("/foo//test/.././bar.rs", "/tmp");
/// assert_eq!(absolute, SystemPathBuf::from("/foo/bar.rs"));
/// # }
/// # #[cfg(not(unix))]
/// # fn main() {}
/// ```
///
/// ## Windows paths
///
/// ```
/// # #[cfg(windows)]
/// # fn main() {
/// use ruff_db::system::{SystemPath, SystemPathBuf};
///
/// // Relative to absolute
/// let absolute = SystemPath::absolute(r"foo\.\bar", r"C:\tmp");
/// assert_eq!(absolute, SystemPathBuf::from(r"C:\tmp\foo\bar"));
///
/// // Path's going past the root are normalized to the root
/// let absolute = SystemPath::absolute(r"..\..\..\", r"C:\tmp");
/// assert_eq!(absolute, SystemPathBuf::from(r"C:\"));
///
/// // Absolute to absolute
/// let absolute = SystemPath::absolute(r"C:\foo//test\..\./bar.rs", r"C:\tmp");
/// assert_eq!(absolute, SystemPathBuf::from(r"C:\foo\bar.rs"));
/// # }
/// # #[cfg(not(windows))]
/// # fn main() {}
/// ```
pub fn absolute(path: impl AsRef<SystemPath>, cwd: impl AsRef<SystemPath>) -> SystemPathBuf {
fn absolute(path: &SystemPath, cwd: &SystemPath) -> SystemPathBuf {
let path = &path.0;
let mut components = path.components().peekable();
let mut ret = if let Some(
c @ (camino::Utf8Component::Prefix(..) | camino::Utf8Component::RootDir),
) = components.peek().cloned()
{
components.next();
Utf8PathBuf::from(c.as_str())
} else {
cwd.0.to_path_buf()
};
for component in components {
match component {
camino::Utf8Component::Prefix(..) => unreachable!(),
camino::Utf8Component::RootDir => {
ret.push(component);
}
camino::Utf8Component::CurDir => {}
camino::Utf8Component::ParentDir => {
ret.pop();
}
camino::Utf8Component::Normal(c) => {
ret.push(c);
}
}
}
SystemPathBuf::from_utf8_path_buf(ret)
}
absolute(path.as_ref(), cwd.as_ref())
}
}
impl ToOwned for SystemPath {
type Owned = SystemPathBuf;
fn to_owned(&self) -> Self::Owned {
self.to_path_buf()
}
}
/// An owned, mutable path on [`System`](`super::System`) (akin to [`String`]).
///
/// The path is guaranteed to be valid UTF-8.
#[repr(transparent)]
#[derive(Eq, PartialEq, Clone, Hash, PartialOrd, Ord)]
#[cfg_attr(
feature = "serde",
derive(serde::Serialize, serde::Deserialize),
serde(transparent)
)]
#[cfg_attr(feature = "schemars", derive(schemars::JsonSchema))]
pub struct SystemPathBuf(#[cfg_attr(feature = "schemars", schemars(with = "String"))] Utf8PathBuf);
impl get_size2::GetSize for SystemPathBuf {
fn get_heap_size_with_tracker<T: get_size2::GetSizeTracker>(&self, tracker: T) -> (usize, T) {
(self.0.capacity(), tracker)
}
}
impl SystemPathBuf {
pub fn new() -> Self {
Self(Utf8PathBuf::new())
}
pub fn from_utf8_path_buf(path: Utf8PathBuf) -> Self {
Self(path)
}
pub fn from_path_buf(
path: std::path::PathBuf,
) -> std::result::Result<Self, std::path::PathBuf> {
Utf8PathBuf::from_path_buf(path).map(Self)
}
/// Extends `self` with `path`.
///
/// If `path` is absolute, it replaces the current path.
///
/// On Windows:
///
/// * if `path` has a root but no prefix (e.g., `\windows`), it
/// replaces everything except for the prefix (if any) of `self`.
/// * if `path` has a prefix but no root, it replaces `self`.
///
/// # Examples
///
/// Pushing a relative path extends the existing path:
///
/// ```
/// use ruff_db::system::SystemPathBuf;
///
/// let mut path = SystemPathBuf::from("/tmp");
/// path.push("file.bk");
/// assert_eq!(path, SystemPathBuf::from("/tmp/file.bk"));
/// ```
///
/// Pushing an absolute path replaces the existing path:
///
/// ```
///
/// use ruff_db::system::SystemPathBuf;
///
/// let mut path = SystemPathBuf::from("/tmp");
/// path.push("/etc");
/// assert_eq!(path, SystemPathBuf::from("/etc"));
/// ```
pub fn push(&mut self, path: impl AsRef<SystemPath>) {
self.0.push(&path.as_ref().0);
}
pub fn into_utf8_path_buf(self) -> Utf8PathBuf {
self.0
}
pub fn into_std_path_buf(self) -> PathBuf {
self.0.into_std_path_buf()
}
pub fn into_string(self) -> String {
self.0.into_string()
}
#[inline]
pub fn as_path(&self) -> &SystemPath {
SystemPath::new(&self.0)
}
}
impl Borrow<SystemPath> for SystemPathBuf {
fn borrow(&self) -> &SystemPath {
self.as_path()
}
}
impl From<&str> for SystemPathBuf {
fn from(value: &str) -> Self {
SystemPathBuf::from_utf8_path_buf(Utf8PathBuf::from(value))
}
}
impl From<String> for SystemPathBuf {
fn from(value: String) -> Self {
SystemPathBuf::from_utf8_path_buf(Utf8PathBuf::from(value))
}
}
impl Default for SystemPathBuf {
fn default() -> Self {
Self::new()
}
}
impl AsRef<SystemPath> for SystemPathBuf {
#[inline]
fn as_ref(&self) -> &SystemPath {
self.as_path()
}
}
impl AsRef<SystemPath> for SystemPath {
#[inline]
fn as_ref(&self) -> &SystemPath {
self
}
}
impl AsRef<SystemPath> for Utf8Path {
#[inline]
fn as_ref(&self) -> &SystemPath {
SystemPath::new(self)
}
}
impl AsRef<SystemPath> for Utf8PathBuf {
#[inline]
fn as_ref(&self) -> &SystemPath {
SystemPath::new(self.as_path())
}
}
impl AsRef<SystemPath> for camino::Utf8Component<'_> {
#[inline]
fn as_ref(&self) -> &SystemPath {
SystemPath::new(self.as_str())
}
}
impl AsRef<SystemPath> for str {
#[inline]
fn as_ref(&self) -> &SystemPath {
SystemPath::new(self)
}
}
impl AsRef<SystemPath> for String {
#[inline]
fn as_ref(&self) -> &SystemPath {
SystemPath::new(self)
}
}
impl AsRef<Path> for SystemPath {
#[inline]
fn as_ref(&self) -> &Path {
self.0.as_std_path()
}
}
impl Deref for SystemPathBuf {
type Target = SystemPath;
#[inline]
fn deref(&self) -> &Self::Target {
self.as_path()
}
}
impl AsRef<Path> for SystemPathBuf {
#[inline]
fn as_ref(&self) -> &Path {
self.0.as_std_path()
}
}
impl<P: AsRef<SystemPath>> FromIterator<P> for SystemPathBuf {
fn from_iter<I: IntoIterator<Item = P>>(iter: I) -> Self {
let mut buf = SystemPathBuf::new();
buf.extend(iter);
buf
}
}
impl<P: AsRef<SystemPath>> Extend<P> for SystemPathBuf {
fn extend<I: IntoIterator<Item = P>>(&mut self, iter: I) {
for path in iter {
self.push(path);
}
}
}
impl std::fmt::Debug for SystemPath {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
self.0.fmt(f)
}
}
impl std::fmt::Display for SystemPath {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
self.0.fmt(f)
}
}
impl std::fmt::Debug for SystemPathBuf {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
self.0.fmt(f)
}
}
impl std::fmt::Display for SystemPathBuf {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
self.0.fmt(f)
}
}
#[cfg(feature = "cache")]
impl ruff_cache::CacheKey for SystemPath {
fn cache_key(&self, hasher: &mut ruff_cache::CacheKeyHasher) {
self.0.as_str().cache_key(hasher);
}
}
#[cfg(feature = "cache")]
impl ruff_cache::CacheKey for SystemPathBuf {
fn cache_key(&self, hasher: &mut ruff_cache::CacheKeyHasher) {
self.as_path().cache_key(hasher);
}
}
/// A slice of a virtual path on [`System`](super::System) (akin to [`str`]).
#[repr(transparent)]
#[derive(Eq, PartialEq, Hash, PartialOrd, Ord)]
pub struct SystemVirtualPath(str);
impl SystemVirtualPath {
pub const fn new(path: &str) -> &SystemVirtualPath {
// SAFETY: SystemVirtualPath is marked as #[repr(transparent)] so the conversion from a
// *const str to a *const SystemVirtualPath is valid.
unsafe { &*(path as *const str as *const SystemVirtualPath) }
}
/// Converts the path to an owned [`SystemVirtualPathBuf`].
pub fn to_path_buf(&self) -> SystemVirtualPathBuf {
SystemVirtualPathBuf(self.0.to_string())
}
/// Extracts the file extension, if possible.
///
/// # Examples
///
/// ```
/// use ruff_db::system::SystemVirtualPath;
///
/// assert_eq!(None, SystemVirtualPath::new("untitled:Untitled-1").extension());
/// assert_eq!("ipynb", SystemVirtualPath::new("untitled:Untitled-1.ipynb").extension().unwrap());
/// assert_eq!("ipynb", SystemVirtualPath::new("vscode-notebook-cell:Untitled-1.ipynb").extension().unwrap());
/// ```
///
/// See [`Path::extension`] for more details.
pub fn extension(&self) -> Option<&str> {
Path::new(&self.0).extension().and_then(|ext| ext.to_str())
}
/// Returns the path as a string slice.
#[inline]
pub fn as_str(&self) -> &str {
&self.0
}
}
/// An owned, virtual path on [`System`](`super::System`) (akin to [`String`]).
#[derive(Eq, PartialEq, Clone, Hash, PartialOrd, Ord, get_size2::GetSize)]
pub struct SystemVirtualPathBuf(String);
impl SystemVirtualPathBuf {
#[inline]
pub const fn as_path(&self) -> &SystemVirtualPath {
SystemVirtualPath::new(self.0.as_str())
}
}
impl From<String> for SystemVirtualPathBuf {
fn from(value: String) -> Self {
SystemVirtualPathBuf(value)
}
}
impl AsRef<SystemVirtualPath> for SystemVirtualPathBuf {
#[inline]
fn as_ref(&self) -> &SystemVirtualPath {
self.as_path()
}
}
impl AsRef<SystemVirtualPath> for SystemVirtualPath {
#[inline]
fn as_ref(&self) -> &SystemVirtualPath {
self
}
}
impl AsRef<SystemVirtualPath> for str {
#[inline]
fn as_ref(&self) -> &SystemVirtualPath {
SystemVirtualPath::new(self)
}
}
impl AsRef<SystemVirtualPath> for String {
#[inline]
fn as_ref(&self) -> &SystemVirtualPath {
SystemVirtualPath::new(self)
}
}
impl Deref for SystemVirtualPathBuf {
type Target = SystemVirtualPath;
fn deref(&self) -> &Self::Target {
self.as_path()
}
}
impl std::fmt::Debug for SystemVirtualPath {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
self.0.fmt(f)
}
}
impl std::fmt::Display for SystemVirtualPath {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
self.0.fmt(f)
}
}
impl std::fmt::Debug for SystemVirtualPathBuf {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
self.0.fmt(f)
}
}
impl std::fmt::Display for SystemVirtualPathBuf {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
self.0.fmt(f)
}
}
#[cfg(feature = "cache")]
impl ruff_cache::CacheKey for SystemVirtualPath {
fn cache_key(&self, hasher: &mut ruff_cache::CacheKeyHasher) {
self.as_str().cache_key(hasher);
}
}
#[cfg(feature = "cache")]
impl ruff_cache::CacheKey for SystemVirtualPathBuf {
fn cache_key(&self, hasher: &mut ruff_cache::CacheKeyHasher) {
self.as_path().cache_key(hasher);
}
}
impl Borrow<SystemVirtualPath> for SystemVirtualPathBuf {
fn borrow(&self) -> &SystemVirtualPath {
self.as_path()
}
}
/// Deduplicates identical paths and removes nested paths.
///
/// # Examples
/// ```rust
/// use ruff_db::system::{SystemPath, deduplicate_nested_paths};///
///
/// let paths = vec![SystemPath::new("/a/b/c"), SystemPath::new("/a/b"), SystemPath::new("/a/beta"), SystemPath::new("/a/b/c")];
/// assert_eq!(deduplicate_nested_paths(paths).collect::<Vec<_>>(), &[SystemPath::new("/a/b"), SystemPath::new("/a/beta")]);
/// ```
pub fn deduplicate_nested_paths<P, I>(paths: I) -> DeduplicatedNestedPathsIter<P>
where
I: IntoIterator<Item = P>,
P: AsRef<SystemPath>,
{
DeduplicatedNestedPathsIter::new(paths)
}
pub struct DeduplicatedNestedPathsIter<P> {
inner: std::vec::IntoIter<P>,
next: Option<P>,
}
impl<P> DeduplicatedNestedPathsIter<P>
where
P: AsRef<SystemPath>,
{
fn new<I>(paths: I) -> Self
where
I: IntoIterator<Item = P>,
{
let mut paths = paths.into_iter().collect::<Vec<_>>();
// Sort the path to ensure that e.g. `/a/b/c`, comes right after `/a/b`.
paths.sort_unstable_by(|left, right| left.as_ref().cmp(right.as_ref()));
let mut iter = paths.into_iter();
Self {
next: iter.next(),
inner: iter,
}
}
}
impl<P> Iterator for DeduplicatedNestedPathsIter<P>
where
P: AsRef<SystemPath>,
{
type Item = P;
fn next(&mut self) -> Option<Self::Item> {
let current = self.next.take()?;
for next in self.inner.by_ref() {
// Skip all paths that have the same prefix as the current path
if !next.as_ref().starts_with(current.as_ref()) {
self.next = Some(next);
break;
}
}
Some(current)
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_db/src/system/walk_directory.rs | crates/ruff_db/src/system/walk_directory.rs | use crate::system::SystemPathBuf;
use std::fmt::{Display, Formatter};
use std::path::PathBuf;
use super::{FileType, SystemPath};
/// A builder for constructing a directory recursive traversal.
pub struct WalkDirectoryBuilder {
/// The implementation that does the directory walking.
walker: Box<dyn DirectoryWalker>,
/// The paths that should be walked.
paths: Vec<SystemPathBuf>,
ignore_hidden: bool,
standard_filters: bool,
}
impl WalkDirectoryBuilder {
pub fn new<W>(path: impl AsRef<SystemPath>, walker: W) -> Self
where
W: DirectoryWalker + 'static,
{
Self {
walker: Box::new(walker),
paths: vec![path.as_ref().to_path_buf()],
ignore_hidden: true,
standard_filters: true,
}
}
/// Adds a path that should be traversed recursively.
///
/// Each additional path is traversed recursively.
/// This should be preferred over building multiple
/// walkers since it enables reusing resources.
#[expect(clippy::should_implement_trait)]
pub fn add(mut self, path: impl AsRef<SystemPath>) -> Self {
self.paths.push(path.as_ref().to_path_buf());
self
}
/// Whether hidden files should be ignored.
///
/// The definition of what a hidden file depends on the [`System`](super::System) and can be platform-dependent.
///
/// This is enabled by default.
pub fn ignore_hidden(mut self, hidden: bool) -> Self {
self.ignore_hidden = hidden;
self
}
/// Enables all the standard ignore filters.
///
/// This toggles, as a group, all the filters that are enabled by default:
/// * [`hidden`](Self::ignore_hidden)
/// * Any [`System`](super::System) specific filters according (e.g., respecting `.ignore`, `.gitignore`, files).
///
/// Defaults to `true`.
pub fn standard_filters(mut self, standard_filters: bool) -> Self {
self.standard_filters = standard_filters;
self.ignore_hidden = standard_filters;
self
}
/// Runs the directory traversal and calls the passed `builder` to create visitors
/// that do the visiting. The walker may run multiple threads to visit the directories.
pub fn run<'s, F>(self, builder: F)
where
F: FnMut() -> FnVisitor<'s>,
{
self.visit(&mut FnBuilder { builder });
}
/// Runs the directory traversal and calls the passed `builder` to create visitors
/// that do the visiting. The walker may run multiple threads to visit the directories.
pub fn visit(self, builder: &mut dyn WalkDirectoryVisitorBuilder) {
let configuration = WalkDirectoryConfiguration {
paths: self.paths,
ignore_hidden: self.ignore_hidden,
standard_filters: self.standard_filters,
};
self.walker.walk(builder, configuration);
}
}
/// Concrete walker that performs the directory walking.
pub trait DirectoryWalker {
fn walk(
&self,
builder: &mut dyn WalkDirectoryVisitorBuilder,
configuration: WalkDirectoryConfiguration,
);
}
/// Creates a visitor for each thread that does the visiting.
pub trait WalkDirectoryVisitorBuilder<'s> {
fn build(&mut self) -> Box<dyn WalkDirectoryVisitor + 's>;
}
/// Visitor handling the individual directory entries.
pub trait WalkDirectoryVisitor: Send {
fn visit(&mut self, entry: std::result::Result<DirectoryEntry, Error>) -> WalkState;
}
struct FnBuilder<F> {
builder: F,
}
impl<'s, F> WalkDirectoryVisitorBuilder<'s> for FnBuilder<F>
where
F: FnMut() -> FnVisitor<'s>,
{
fn build(&mut self) -> Box<dyn WalkDirectoryVisitor + 's> {
let visitor = (self.builder)();
Box::new(FnVisitorImpl(visitor))
}
}
type FnVisitor<'s> =
Box<dyn FnMut(std::result::Result<DirectoryEntry, Error>) -> WalkState + Send + 's>;
struct FnVisitorImpl<'s>(FnVisitor<'s>);
impl WalkDirectoryVisitor for FnVisitorImpl<'_> {
fn visit(&mut self, entry: std::result::Result<DirectoryEntry, Error>) -> WalkState {
(self.0)(entry)
}
}
pub struct WalkDirectoryConfiguration {
pub paths: Vec<SystemPathBuf>,
pub ignore_hidden: bool,
pub standard_filters: bool,
}
/// An entry in a directory.
#[derive(Debug, Clone)]
pub struct DirectoryEntry {
pub(super) path: SystemPathBuf,
pub(super) file_type: FileType,
pub(super) depth: usize,
}
impl DirectoryEntry {
/// The full path that this entry represents.
pub fn path(&self) -> &SystemPath {
&self.path
}
/// The full path that this entry represents.
/// Analogous to [`DirectoryEntry::path`], but moves ownership of the path.
pub fn into_path(self) -> SystemPathBuf {
self.path
}
/// Return the file type for the file that this entry points to.
pub fn file_type(&self) -> FileType {
self.file_type
}
/// Returns the depth at which this entry was created relative to the root.
pub fn depth(&self) -> usize {
self.depth
}
}
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
pub enum WalkState {
/// Continue walking as normal
Continue,
/// If the entry given is a directory, don't descend into it.
/// In all other cases, this has no effect.
Skip,
/// Quit the entire iterator as soon as possible.
///
/// Note: This is an inherently asynchronous action. It's possible
/// for more entries to be yielded even after instructing the iterator to quit.
Quit,
}
pub struct Error {
pub(super) depth: Option<usize>,
pub(super) kind: ErrorKind,
}
impl Error {
pub fn depth(&self) -> Option<usize> {
self.depth
}
pub fn kind(&self) -> &ErrorKind {
&self.kind
}
}
impl Display for Error {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
match &self.kind {
ErrorKind::Loop { ancestor, child } => {
write!(
f,
"File system loop found: {child} points to an ancestor {ancestor}",
)
}
ErrorKind::Io {
path: Some(path),
err,
} => {
write!(f, "IO error for operation on {path}: {err}")
}
ErrorKind::Io { path: None, err } => err.fmt(f),
ErrorKind::NonUtf8Path { path } => {
write!(f, "Non-UTF8 path: {}", path.display())
}
}
}
}
impl std::fmt::Debug for Error {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
std::fmt::Display::fmt(self, f)
}
}
impl std::error::Error for Error {}
#[derive(Debug)]
pub enum ErrorKind {
/// An error that occurs when a file loop is detected when traversing
/// symbolic links.
Loop {
ancestor: SystemPathBuf,
child: SystemPathBuf,
},
/// An error that occurs when doing I/O
Io {
path: Option<SystemPathBuf>,
err: std::io::Error,
},
/// A path is not a valid UTF-8 path.
NonUtf8Path { path: PathBuf },
}
#[cfg(test)]
pub(super) mod tests {
use crate::system::walk_directory::{DirectoryEntry, Error};
use crate::system::{FileType, SystemPathBuf};
use std::collections::BTreeMap;
/// Test helper that creates a visual representation of the visited directory entries.
pub(crate) struct DirectoryEntryToString {
root_path: SystemPathBuf,
inner: std::sync::Mutex<DirectoryEntryToStringInner>,
}
impl DirectoryEntryToString {
pub(crate) fn new(root_path: SystemPathBuf) -> Self {
Self {
root_path,
inner: std::sync::Mutex::new(DirectoryEntryToStringInner::default()),
}
}
pub(crate) fn write_entry(&self, entry: Result<DirectoryEntry, Error>) {
let mut inner = self.inner.lock().unwrap();
let DirectoryEntryToStringInner { errors, visited } = &mut *inner;
match entry {
Ok(entry) => {
let relative_path = entry
.path()
.strip_prefix(&self.root_path)
.unwrap_or(entry.path());
let unix_path = relative_path
.components()
.map(|component| component.as_str())
.collect::<Vec<_>>()
.join("/");
visited.insert(unix_path, (entry.file_type, entry.depth));
}
Err(error) => {
errors.push_str(&error.to_string());
errors.push('\n');
}
}
}
}
impl std::fmt::Display for DirectoryEntryToString {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let inner = self.inner.lock().unwrap();
write!(f, "{paths:#?}", paths = inner.visited)?;
if !inner.errors.is_empty() {
writeln!(f, "\n\n{errors}", errors = inner.errors).unwrap();
}
Ok(())
}
}
#[derive(Default)]
struct DirectoryEntryToStringInner {
errors: String,
/// Stores the visited path. The key is the relative path to the root, using `/` as path separator.
visited: BTreeMap<String, (FileType, usize)>,
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_db/src/system/os.rs | crates/ruff_db/src/system/os.rs | #![allow(clippy::disallowed_methods)]
use super::walk_directory::{
self, DirectoryWalker, WalkDirectoryBuilder, WalkDirectoryConfiguration,
WalkDirectoryVisitorBuilder, WalkState,
};
use crate::max_parallelism;
use crate::system::{
CaseSensitivity, DirectoryEntry, FileType, GlobError, GlobErrorKind, Metadata, Result, System,
SystemPath, SystemPathBuf, SystemVirtualPath, WritableSystem,
};
use filetime::FileTime;
use ruff_notebook::{Notebook, NotebookError};
use rustc_hash::FxHashSet;
use std::num::NonZeroUsize;
use std::panic::RefUnwindSafe;
use std::sync::Arc;
use std::{any::Any, path::PathBuf};
/// A system implementation that uses the OS file system.
#[derive(Debug, Clone)]
pub struct OsSystem {
inner: Arc<OsSystemInner>,
}
#[derive(Default, Debug)]
struct OsSystemInner {
cwd: SystemPathBuf,
real_case_cache: CaseSensitivePathsCache,
case_sensitivity: CaseSensitivity,
/// Overrides the user's configuration directory for testing.
/// This is an `Option<Option<..>>` to allow setting an override of `None`.
#[cfg(feature = "testing")]
user_config_directory_override: std::sync::Mutex<Option<Option<SystemPathBuf>>>,
}
impl OsSystem {
pub fn new(cwd: impl AsRef<SystemPath>) -> Self {
let cwd = cwd.as_ref();
assert!(cwd.as_utf8_path().is_absolute());
let case_sensitivity = detect_case_sensitivity(cwd);
tracing::debug!(
"Architecture: {}, OS: {}, case-sensitive: {case_sensitivity}",
std::env::consts::ARCH,
std::env::consts::OS,
);
Self {
// Spreading `..Default` because it isn't possible to feature gate the initializer of a single field.
inner: Arc::new(OsSystemInner {
cwd: cwd.to_path_buf(),
case_sensitivity,
..Default::default()
}),
}
}
#[cfg(unix)]
fn permissions(metadata: &std::fs::Metadata) -> Option<u32> {
use std::os::unix::fs::PermissionsExt;
Some(metadata.permissions().mode())
}
#[cfg(not(unix))]
fn permissions(_metadata: &std::fs::Metadata) -> Option<u32> {
None
}
}
impl System for OsSystem {
fn path_metadata(&self, path: &SystemPath) -> Result<Metadata> {
let metadata = path.as_std_path().metadata()?;
let last_modified = FileTime::from_last_modification_time(&metadata);
Ok(Metadata {
revision: last_modified.into(),
permissions: Self::permissions(&metadata),
file_type: metadata.file_type().into(),
})
}
fn canonicalize_path(&self, path: &SystemPath) -> Result<SystemPathBuf> {
path.as_utf8_path().canonicalize_utf8().map(|path| {
SystemPathBuf::from_utf8_path_buf(path)
.simplified()
.to_path_buf()
})
}
fn read_to_string(&self, path: &SystemPath) -> Result<String> {
std::fs::read_to_string(path.as_std_path())
}
fn read_to_notebook(&self, path: &SystemPath) -> std::result::Result<Notebook, NotebookError> {
Notebook::from_path(path.as_std_path())
}
fn read_virtual_path_to_string(&self, _path: &SystemVirtualPath) -> Result<String> {
Err(not_found())
}
fn read_virtual_path_to_notebook(
&self,
_path: &SystemVirtualPath,
) -> std::result::Result<Notebook, NotebookError> {
Err(NotebookError::from(not_found()))
}
fn path_exists(&self, path: &SystemPath) -> bool {
path.as_std_path().exists()
}
fn path_exists_case_sensitive(&self, path: &SystemPath, prefix: &SystemPath) -> bool {
if self.case_sensitivity().is_case_sensitive() {
self.path_exists(path)
} else {
self.path_exists_case_sensitive_fast(path)
.unwrap_or_else(|| self.path_exists_case_sensitive_slow(path, prefix))
}
}
fn case_sensitivity(&self) -> CaseSensitivity {
self.inner.case_sensitivity
}
fn current_directory(&self) -> &SystemPath {
&self.inner.cwd
}
#[cfg(not(target_arch = "wasm32"))]
fn user_config_directory(&self) -> Option<SystemPathBuf> {
// In testing, we allow overriding the user configuration directory by using a
// thread local because overriding the environment variables breaks test isolation
// (tests run concurrently) and mutating environment variable in a multithreaded
// application is inherently unsafe.
#[cfg(feature = "testing")]
if let Ok(directory_override) = self.try_get_user_config_directory_override() {
return directory_override;
}
use etcetera::BaseStrategy as _;
let strategy = etcetera::base_strategy::choose_base_strategy().ok()?;
SystemPathBuf::from_path_buf(strategy.config_dir()).ok()
}
// TODO: Remove this feature gating once `ruff_wasm` no longer indirectly depends on `ruff_db` with the
// `os` feature enabled (via `ruff_workspace` -> `ruff_graph` -> `ruff_db`).
#[cfg(target_arch = "wasm32")]
fn user_config_directory(&self) -> Option<SystemPathBuf> {
#[cfg(feature = "testing")]
if let Ok(directory_override) = self.try_get_user_config_directory_override() {
return directory_override;
}
None
}
/// Returns an absolute cache directory on the system.
///
/// On Linux and macOS, uses `$XDG_CACHE_HOME/ty` or `.cache/ty`.
/// On Windows, uses `C:\Users\User\AppData\Local\ty\cache`.
#[cfg(not(target_arch = "wasm32"))]
fn cache_dir(&self) -> Option<SystemPathBuf> {
use etcetera::BaseStrategy as _;
let cache_dir = etcetera::base_strategy::choose_base_strategy()
.ok()
.map(|dirs| dirs.cache_dir().join("ty"))
.map(|cache_dir| {
if cfg!(windows) {
// On Windows, we append `cache` to the LocalAppData directory, i.e., prefer
// `C:\Users\User\AppData\Local\ty\cache` over `C:\Users\User\AppData\Local\ty`.
cache_dir.join("cache")
} else {
cache_dir
}
})
.and_then(|path| SystemPathBuf::from_path_buf(path).ok())
.unwrap_or_else(|| SystemPathBuf::from(".ty_cache"));
Some(cache_dir)
}
// TODO: Remove this feature gating once `ruff_wasm` no longer indirectly depends on `ruff_db` with the
// `os` feature enabled (via `ruff_workspace` -> `ruff_graph` -> `ruff_db`).
#[cfg(target_arch = "wasm32")]
fn cache_dir(&self) -> Option<SystemPathBuf> {
None
}
/// Creates a builder to recursively walk `path`.
///
/// The walker ignores files according to [`ignore::WalkBuilder::standard_filters`]
/// when setting [`WalkDirectoryBuilder::standard_filters`] to true.
fn walk_directory(&self, path: &SystemPath) -> WalkDirectoryBuilder {
WalkDirectoryBuilder::new(
path,
OsDirectoryWalker {
cwd: self.current_directory().to_path_buf(),
},
)
}
fn glob(
&self,
pattern: &str,
) -> std::result::Result<
Box<dyn Iterator<Item = std::result::Result<SystemPathBuf, GlobError>>>,
glob::PatternError,
> {
glob::glob(pattern).map(|inner| {
let iterator = inner.map(|result| {
let path = result?;
let system_path = SystemPathBuf::from_path_buf(path).map_err(|path| GlobError {
path,
error: GlobErrorKind::NonUtf8Path,
})?;
Ok(system_path)
});
let boxed: Box<dyn Iterator<Item = _>> = Box::new(iterator);
boxed
})
}
fn as_writable(&self) -> Option<&dyn WritableSystem> {
Some(self)
}
fn as_any(&self) -> &dyn Any {
self
}
fn as_any_mut(&mut self) -> &mut dyn Any {
self
}
fn read_directory(
&self,
path: &SystemPath,
) -> Result<Box<dyn Iterator<Item = Result<DirectoryEntry>>>> {
Ok(Box::new(path.as_utf8_path().read_dir_utf8()?.map(|res| {
let res = res?;
let file_type = res.file_type()?;
Ok(DirectoryEntry {
path: SystemPathBuf::from_utf8_path_buf(res.into_path()),
file_type: file_type.into(),
})
})))
}
fn env_var(&self, name: &str) -> std::result::Result<String, std::env::VarError> {
std::env::var(name)
}
fn dyn_clone(&self) -> Box<dyn System> {
Box::new(self.clone())
}
}
impl OsSystem {
/// Path sensitive testing if a path exists by canonicalization the path and comparing it with `path`.
///
/// This is faster than the slow path, because it requires a single system call for each path
/// instead of at least one system call for each component between `path` and `prefix`.
///
/// However, using `canonicalize` to resolve the path's casing doesn't work in two cases:
/// * if `path` is a symlink, `canonicalize` returns the symlink's target and not the symlink's source path.
/// * on Windows: If `path` is a mapped network drive, `canonicalize` returns the UNC path
/// (e.g. `Z:\` is mapped to `\\server\share` and `canonicalize` returns `\\?\UNC\server\share`).
///
/// Symlinks and mapped network drives should be rare enough that this fast path is worth trying first,
/// even if it comes at a cost for those rare use cases.
fn path_exists_case_sensitive_fast(&self, path: &SystemPath) -> Option<bool> {
// This is a more forgiving version of `dunce::simplified` that removes all `\\?\` prefixes on Windows.
// We use this more forgiving version because we don't intend on using either path for anything other than comparison
// and the prefix is only relevant when passing the path to other programs and it's longer than 200 something
// characters.
fn simplify_ignore_verbatim(path: &SystemPath) -> &SystemPath {
if cfg!(windows) {
if path.as_utf8_path().as_str().starts_with(r"\\?\") {
SystemPath::new(&path.as_utf8_path().as_str()[r"\\?\".len()..])
} else {
path
}
} else {
path
}
}
let Ok(canonicalized) = path.as_std_path().canonicalize() else {
// The path doesn't exist or can't be accessed. The path doesn't exist.
return Some(false);
};
let Ok(canonicalized) = SystemPathBuf::from_path_buf(canonicalized) else {
// The original path is valid UTF8 but the canonicalized path isn't. This definitely suggests
// that a symlink is involved. Fall back to the slow path.
tracing::debug!(
"Falling back to the slow case-sensitive path existence check because the canonicalized path of `{path}` is not valid UTF-8"
);
return None;
};
let simplified_canonicalized = simplify_ignore_verbatim(&canonicalized);
let simplified = simplify_ignore_verbatim(path);
// Test if the paths differ by anything other than casing. If so, that suggests that
// `path` pointed to a symlink (or some other none reversible path normalization happened).
// In this case, fall back to the slow path.
if simplified_canonicalized.as_str().to_lowercase() != simplified.as_str().to_lowercase() {
tracing::debug!(
"Falling back to the slow case-sensitive path existence check for `{simplified}` because the canonicalized path `{simplified_canonicalized}` differs not only by casing"
);
return None;
}
// If there are no symlinks involved, then `path` exists only if it is the same as the canonicalized path.
Some(simplified_canonicalized == simplified)
}
fn path_exists_case_sensitive_slow(&self, path: &SystemPath, prefix: &SystemPath) -> bool {
// Iterate over the sub-paths up to prefix and check if they match the casing as on disk.
for ancestor in path.ancestors() {
if ancestor == prefix {
break;
}
match self.inner.real_case_cache.has_name_case(ancestor) {
Ok(true) => {
// Component has correct casing, continue with next component
}
Ok(false) => {
// Component has incorrect casing
return false;
}
Err(_) => {
// Directory doesn't exist or can't be accessed. We can assume that the file with
// the given casing doesn't exist.
return false;
}
}
}
true
}
}
impl WritableSystem for OsSystem {
fn create_new_file(&self, path: &SystemPath) -> Result<()> {
std::fs::File::create_new(path).map(drop)
}
fn write_file(&self, path: &SystemPath, content: &str) -> Result<()> {
std::fs::write(path.as_std_path(), content)
}
fn create_directory_all(&self, path: &SystemPath) -> Result<()> {
std::fs::create_dir_all(path.as_std_path())
}
}
impl Default for OsSystem {
fn default() -> Self {
Self::new(
SystemPathBuf::from_path_buf(std::env::current_dir().unwrap_or_default())
.unwrap_or_default(),
)
}
}
#[derive(Debug, Default)]
struct CaseSensitivePathsCache {
by_lower_case: dashmap::DashMap<SystemPathBuf, ListedDirectory>,
}
impl CaseSensitivePathsCache {
/// Test if `path`'s file name uses the exact same casing as the file on disk.
///
/// Returns `false` if the file doesn't exist.
///
/// Components other than the file portion are ignored.
fn has_name_case(&self, path: &SystemPath) -> Result<bool> {
let Some(parent) = path.parent() else {
// The root path is always considered to exist.
return Ok(true);
};
let Some(file_name) = path.file_name() else {
// We can only get here for paths ending in `..` or the root path. Root paths are handled above.
// Return `true` for paths ending in `..` because `..` is the same regardless of casing.
return Ok(true);
};
let lower_case_path = SystemPathBuf::from(parent.as_str().to_lowercase());
let last_modification_time =
FileTime::from_last_modification_time(&parent.as_std_path().metadata()?);
let entry = self.by_lower_case.entry(lower_case_path);
if let dashmap::Entry::Occupied(entry) = &entry {
// Only do a cached lookup if the directory hasn't changed.
if entry.get().last_modification_time == last_modification_time {
tracing::trace!("Use cached case-sensitive entry for directory `{}`", parent);
return Ok(entry.get().names.contains(file_name));
}
}
tracing::trace!(
"Reading directory `{}` for its case-sensitive filenames",
parent
);
let start = std::time::Instant::now();
let mut names = FxHashSet::default();
for entry in parent.as_std_path().read_dir()? {
let Ok(entry) = entry else {
continue;
};
let Ok(name) = entry.file_name().into_string() else {
continue;
};
names.insert(name.into_boxed_str());
}
let directory = entry.insert(ListedDirectory {
last_modification_time,
names,
});
tracing::debug!(
"Caching the case-sensitive paths for directory `{parent}` took {:?}",
start.elapsed()
);
Ok(directory.names.contains(file_name))
}
}
impl RefUnwindSafe for CaseSensitivePathsCache {}
#[derive(Debug, Eq, PartialEq)]
struct ListedDirectory {
last_modification_time: FileTime,
names: FxHashSet<Box<str>>,
}
#[derive(Debug)]
struct OsDirectoryWalker {
cwd: SystemPathBuf,
}
impl DirectoryWalker for OsDirectoryWalker {
fn walk(
&self,
visitor_builder: &mut dyn WalkDirectoryVisitorBuilder,
configuration: WalkDirectoryConfiguration,
) {
let WalkDirectoryConfiguration {
paths,
ignore_hidden: hidden,
standard_filters,
} = configuration;
let Some((first, additional)) = paths.split_first() else {
return;
};
let mut builder = ignore::WalkBuilder::new(first.as_std_path());
builder.current_dir(self.cwd.as_std_path());
builder.standard_filters(standard_filters);
builder.hidden(hidden);
for additional_path in additional {
builder.add(additional_path.as_std_path());
}
builder.threads(max_parallelism().min(NonZeroUsize::new(12).unwrap()).get());
builder.build_parallel().run(|| {
let mut visitor = visitor_builder.build();
Box::new(move |entry| {
match entry {
Ok(entry) => {
// SAFETY: The walkdir crate supports `stdin` files and `file_type` can be `None` for these files.
// We don't make use of this feature, which is why unwrapping here is ok.
let file_type = entry.file_type().unwrap();
let depth = entry.depth();
// `walkdir` reports errors related to parsing ignore files as part of the entry.
// These aren't fatal for us. We should keep going even if an ignore file contains a syntax error.
// But we log the error here for better visibility (same as ripgrep, Ruff ignores it)
if let Some(error) = entry.error() {
tracing::warn!("{error}");
}
match SystemPathBuf::from_path_buf(entry.into_path()) {
Ok(path) => {
let directory_entry = walk_directory::DirectoryEntry {
path,
file_type: file_type.into(),
depth,
};
visitor.visit(Ok(directory_entry)).into()
}
Err(path) => {
visitor.visit(Err(walk_directory::Error {
depth: Some(depth),
kind: walk_directory::ErrorKind::NonUtf8Path { path },
}));
// Skip the entire directory because all the paths won't be UTF-8 paths.
ignore::WalkState::Skip
}
}
}
Err(error) => match ignore_to_walk_directory_error(error, None, None) {
Ok(error) => visitor.visit(Err(error)).into(),
Err(error) => {
// This should only be reached when the error is a `.ignore` file related error
// (which, should not be reported here but the `ignore` crate doesn't distinguish between ignore and IO errors).
// Let's log the error to at least make it visible.
tracing::warn!("Failed to traverse directory: {error}.");
ignore::WalkState::Continue
}
},
}
})
});
}
}
#[cold]
fn ignore_to_walk_directory_error(
error: ignore::Error,
path: Option<PathBuf>,
depth: Option<usize>,
) -> std::result::Result<walk_directory::Error, ignore::Error> {
use ignore::Error;
match error {
Error::WithPath { path, err } => ignore_to_walk_directory_error(*err, Some(path), depth),
Error::WithDepth { err, depth } => ignore_to_walk_directory_error(*err, path, Some(depth)),
Error::WithLineNumber { err, .. } => ignore_to_walk_directory_error(*err, path, depth),
Error::Loop { child, ancestor } => {
match (
SystemPathBuf::from_path_buf(child),
SystemPathBuf::from_path_buf(ancestor),
) {
(Ok(child), Ok(ancestor)) => Ok(walk_directory::Error {
depth,
kind: walk_directory::ErrorKind::Loop { child, ancestor },
}),
(Err(child), _) => Ok(walk_directory::Error {
depth,
kind: walk_directory::ErrorKind::NonUtf8Path { path: child },
}),
// We should never reach this because we should never traverse into a non UTF8 path but handle it anyway.
(_, Err(ancestor)) => Ok(walk_directory::Error {
depth,
kind: walk_directory::ErrorKind::NonUtf8Path { path: ancestor },
}),
}
}
Error::Io(err) => match path.map(SystemPathBuf::from_path_buf).transpose() {
Ok(path) => Ok(walk_directory::Error {
depth,
kind: walk_directory::ErrorKind::Io { path, err },
}),
Err(path) => Ok(walk_directory::Error {
depth,
kind: walk_directory::ErrorKind::NonUtf8Path { path },
}),
},
// Ignore related errors, we warn about them but we don't abort iteration because of them.
error @ (Error::Glob { .. }
| Error::UnrecognizedFileType(_)
| Error::InvalidDefinition
| Error::Partial(..)) => Err(error),
}
}
impl From<std::fs::FileType> for FileType {
fn from(file_type: std::fs::FileType) -> Self {
if file_type.is_file() {
FileType::File
} else if file_type.is_dir() {
FileType::Directory
} else {
FileType::Symlink
}
}
}
impl From<WalkState> for ignore::WalkState {
fn from(value: WalkState) -> Self {
match value {
WalkState::Continue => ignore::WalkState::Continue,
WalkState::Skip => ignore::WalkState::Skip,
WalkState::Quit => ignore::WalkState::Quit,
}
}
}
fn not_found() -> std::io::Error {
std::io::Error::new(std::io::ErrorKind::NotFound, "No such file or directory")
}
#[cfg(feature = "testing")]
pub(super) mod testing {
use crate::system::{OsSystem, SystemPathBuf};
impl OsSystem {
/// Overrides the user configuration directory for the current scope
/// (for as long as the returned override is not dropped).
pub fn with_user_config_directory(
&self,
directory: Option<SystemPathBuf>,
) -> UserConfigDirectoryOverrideGuard {
let mut directory_override = self.inner.user_config_directory_override.lock().unwrap();
let previous = directory_override.replace(directory);
UserConfigDirectoryOverrideGuard {
previous,
system: self.clone(),
}
}
/// Returns [`Ok`] if any override is set and [`Err`] otherwise.
pub(super) fn try_get_user_config_directory_override(
&self,
) -> Result<Option<SystemPathBuf>, ()> {
let directory_override = self.inner.user_config_directory_override.lock().unwrap();
match directory_override.as_ref() {
Some(directory_override) => Ok(directory_override.clone()),
None => Err(()),
}
}
}
/// A scoped override of the [user's configuration directory](crate::System::user_config_directory) for the [`OsSystem`].
///
/// Prefer overriding the user's configuration directory for tests that require
/// spawning a new process (e.g. CLI tests) by setting the `APPDATA` (windows)
/// or `XDG_CONFIG_HOME` (unix and other platforms) environment variables.
/// For example, by setting the environment variables when invoking the CLI with insta.
///
/// Requires the `testing` feature.
#[must_use]
pub struct UserConfigDirectoryOverrideGuard {
previous: Option<Option<SystemPathBuf>>,
system: OsSystem,
}
impl Drop for UserConfigDirectoryOverrideGuard {
fn drop(&mut self) {
if let Ok(mut directory_override) =
self.system.inner.user_config_directory_override.try_lock()
{
*directory_override = self.previous.take();
}
}
}
}
#[cfg(not(unix))]
fn detect_case_sensitivity(_path: &SystemPath) -> CaseSensitivity {
// 99% of windows systems aren't case sensitive Don't bother checking.
CaseSensitivity::Unknown
}
#[cfg(unix)]
fn detect_case_sensitivity(path: &SystemPath) -> CaseSensitivity {
use std::os::unix::fs::MetadataExt;
let Ok(original_case_metadata) = path.as_std_path().metadata() else {
return CaseSensitivity::Unknown;
};
let upper_case = SystemPathBuf::from(path.as_str().to_uppercase());
if &*upper_case == path {
return CaseSensitivity::Unknown;
}
match upper_case.as_std_path().metadata() {
Ok(uppercase_meta) => {
// The file system is case insensitive if the upper case and mixed case paths have the same inode.
if uppercase_meta.ino() == original_case_metadata.ino() {
CaseSensitivity::CaseInsensitive
} else {
CaseSensitivity::CaseSensitive
}
}
// In the error case, the file system is case sensitive if the file in all upper case doesn't exist.
Err(error) => {
if error.kind() == std::io::ErrorKind::NotFound {
CaseSensitivity::CaseSensitive
} else {
CaseSensitivity::Unknown
}
}
}
}
#[cfg(test)]
mod tests {
use tempfile::TempDir;
use crate::system::DirectoryEntry;
use crate::system::walk_directory::tests::DirectoryEntryToString;
use super::*;
#[test]
fn read_directory() {
let tempdir = TempDir::new().unwrap();
let tempdir_path = tempdir.path();
std::fs::create_dir_all(tempdir_path.join("a/foo")).unwrap();
let files = &["b.ts", "a/bar.py", "d.rs", "a/foo/bar.py", "a/baz.pyi"];
for path in files {
std::fs::File::create(tempdir_path.join(path)).unwrap();
}
let tempdir_path = SystemPath::from_std_path(tempdir_path).unwrap();
let fs = OsSystem::new(tempdir_path);
let mut sorted_contents: Vec<DirectoryEntry> = fs
.read_directory(&tempdir_path.join("a"))
.unwrap()
.map(Result::unwrap)
.collect();
sorted_contents.sort_by(|a, b| a.path.cmp(&b.path));
let expected_contents = vec![
DirectoryEntry::new(tempdir_path.join("a/bar.py"), FileType::File),
DirectoryEntry::new(tempdir_path.join("a/baz.pyi"), FileType::File),
DirectoryEntry::new(tempdir_path.join("a/foo"), FileType::Directory),
];
assert_eq!(sorted_contents, expected_contents)
}
#[test]
fn read_directory_nonexistent() {
let tempdir = TempDir::new().unwrap();
let fs = OsSystem::new(SystemPath::from_std_path(tempdir.path()).unwrap());
let result = fs.read_directory(SystemPath::new("doesnt_exist"));
assert!(result.is_err_and(|error| error.kind() == std::io::ErrorKind::NotFound));
}
#[test]
fn read_directory_on_file() {
let tempdir = TempDir::new().unwrap();
let tempdir_path = tempdir.path();
std::fs::File::create(tempdir_path.join("a.py")).unwrap();
let tempdir_path = SystemPath::from_std_path(tempdir_path).unwrap();
let fs = OsSystem::new(tempdir_path);
let result = fs.read_directory(&tempdir_path.join("a.py"));
let Err(error) = result else {
panic!("Expected the read_dir() call to fail!");
};
// We can't assert the error kind here because it's apparently an unstable feature!
// https://github.com/rust-lang/rust/issues/86442
// assert_eq!(error.kind(), std::io::ErrorKind::NotADirectory);
// We can't even assert the error message on all platforms, as it's different on Windows,
// where the message is "The directory name is invalid" rather than "Not a directory".
if cfg!(unix) {
assert!(error.to_string().contains("Not a directory"));
}
}
#[test]
fn walk_directory() -> std::io::Result<()> {
let tempdir = TempDir::new()?;
let root = tempdir.path();
std::fs::create_dir_all(root.join("a/b"))?;
std::fs::write(root.join("foo.py"), "print('foo')")?;
std::fs::write(root.join("a/bar.py"), "print('bar')")?;
std::fs::write(root.join("a/baz.py"), "print('baz')")?;
std::fs::write(root.join("a/b/c.py"), "print('c')")?;
let root_sys = SystemPath::from_std_path(root).unwrap();
let system = OsSystem::new(root_sys);
let writer = DirectoryEntryToString::new(root_sys.to_path_buf());
system.walk_directory(root_sys).run(|| {
Box::new(|entry| {
writer.write_entry(entry);
WalkState::Continue
})
});
assert_eq!(
writer.to_string(),
r#"{
"": (
Directory,
0,
),
"a": (
Directory,
1,
),
"a/b": (
Directory,
2,
),
"a/b/c.py": (
File,
3,
),
"a/bar.py": (
File,
2,
),
"a/baz.py": (
File,
2,
),
"foo.py": (
File,
1,
),
}"#
);
Ok(())
}
#[test]
fn walk_directory_ignore() -> std::io::Result<()> {
let tempdir = TempDir::new()?;
let root = tempdir.path();
std::fs::create_dir_all(root.join("a/b"))?;
std::fs::write(root.join("foo.py"), "print('foo')\n")?;
std::fs::write(root.join("a/bar.py"), "print('bar')\n")?;
std::fs::write(root.join("a/baz.py"), "print('baz')\n")?;
// Exclude the `b` directory.
std::fs::write(root.join("a/.ignore"), "b/\n")?;
std::fs::write(root.join("a/b/c.py"), "print('c')\n")?;
let root_sys = SystemPath::from_std_path(root).unwrap();
let system = OsSystem::new(root_sys);
let writer = DirectoryEntryToString::new(root_sys.to_path_buf());
system
.walk_directory(root_sys)
.standard_filters(true)
.run(|| {
Box::new(|entry| {
writer.write_entry(entry);
WalkState::Continue
})
});
assert_eq!(
writer.to_string(),
r#"{
"": (
Directory,
0,
),
"a": (
Directory,
1,
),
"a/bar.py": (
File,
2,
),
"a/baz.py": (
File,
2,
),
"foo.py": (
File,
1,
),
}"#
);
Ok(())
}
#[test]
fn walk_directory_file() -> std::io::Result<()> {
let tempdir = TempDir::new()?;
let root = tempdir.path();
std::fs::write(root.join("foo.py"), "print('foo')\n")?;
let root_sys = SystemPath::from_std_path(root).unwrap();
let system = OsSystem::new(root_sys);
let writer = DirectoryEntryToString::new(root_sys.to_path_buf());
system
.walk_directory(&root_sys.join("foo.py"))
.standard_filters(true)
.run(|| {
Box::new(|entry| {
writer.write_entry(entry);
WalkState::Continue
})
});
assert_eq!(
writer.to_string(),
r#"{
"foo.py": (
File,
0,
),
}"#
);
Ok(())
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_db/src/system/memory_fs.rs | crates/ruff_db/src/system/memory_fs.rs | use std::collections::{BTreeMap, btree_map};
use std::io;
use std::iter::FusedIterator;
use std::sync::{Arc, RwLock, RwLockWriteGuard};
use camino::{Utf8Path, Utf8PathBuf};
use filetime::FileTime;
use rustc_hash::FxHashMap;
use crate::system::{
DirectoryEntry, FileType, GlobError, GlobErrorKind, Metadata, Result, SystemPath,
SystemPathBuf, SystemVirtualPath, SystemVirtualPathBuf, file_time_now, walk_directory,
};
use super::walk_directory::{
DirectoryWalker, ErrorKind, WalkDirectoryBuilder, WalkDirectoryConfiguration,
WalkDirectoryVisitor, WalkDirectoryVisitorBuilder, WalkState,
};
/// File system that stores all content in memory.
///
/// The file system supports files and directories. Paths are case-sensitive.
///
/// The implementation doesn't aim at fully capturing the behavior of a real file system.
/// The implementation intentionally doesn't support:
/// * symlinks
/// * hardlinks
/// * permissions: All files and directories have the permission 0755.
///
/// Use a tempdir with the real file system to test these advanced file system features and behavior.
#[derive(Clone)]
pub struct MemoryFileSystem {
inner: Arc<MemoryFileSystemInner>,
}
impl MemoryFileSystem {
/// Permission used by all files and directories
const PERMISSION: u32 = 0o755;
pub fn new() -> Self {
Self::with_current_directory("/")
}
/// Creates a new, empty in memory file system with the given current working directory.
pub fn with_current_directory(cwd: impl AsRef<SystemPath>) -> Self {
let cwd = cwd.as_ref().to_path_buf();
assert!(
cwd.starts_with("/"),
"The current working directory must be an absolute path."
);
let fs = Self {
inner: Arc::new(MemoryFileSystemInner {
by_path: RwLock::new(BTreeMap::default()),
virtual_files: RwLock::new(FxHashMap::default()),
cwd: cwd.clone(),
}),
};
fs.create_directory_all(&cwd).unwrap();
fs
}
pub fn current_directory(&self) -> &SystemPath {
&self.inner.cwd
}
pub fn metadata(&self, path: impl AsRef<SystemPath>) -> Result<Metadata> {
fn metadata(fs: &MemoryFileSystem, path: &SystemPath) -> Result<Metadata> {
let by_path = fs.inner.by_path.read().unwrap();
let normalized = fs.normalize_path(path);
let entry = by_path.get(&normalized).ok_or_else(not_found)?;
let metadata = match entry {
Entry::File(file) => Metadata {
revision: file.last_modified.into(),
permissions: Some(MemoryFileSystem::PERMISSION),
file_type: FileType::File,
},
Entry::Directory(directory) => Metadata {
revision: directory.last_modified.into(),
permissions: Some(MemoryFileSystem::PERMISSION),
file_type: FileType::Directory,
},
};
Ok(metadata)
}
metadata(self, path.as_ref())
}
pub fn canonicalize(&self, path: impl AsRef<SystemPath>) -> Result<SystemPathBuf> {
let path = path.as_ref();
// Mimic the behavior of a real FS where canonicalize errors if the `path` doesn't exist
self.metadata(path)?;
Ok(SystemPathBuf::from_utf8_path_buf(self.normalize_path(path)))
}
pub fn is_file(&self, path: impl AsRef<SystemPath>) -> bool {
let by_path = self.inner.by_path.read().unwrap();
let normalized = self.normalize_path(path.as_ref());
matches!(by_path.get(&normalized), Some(Entry::File(_)))
}
pub fn is_directory(&self, path: impl AsRef<SystemPath>) -> bool {
let by_path = self.inner.by_path.read().unwrap();
let normalized = self.normalize_path(path.as_ref());
matches!(by_path.get(&normalized), Some(Entry::Directory(_)))
}
pub fn read_to_string(&self, path: impl AsRef<SystemPath>) -> Result<String> {
fn read_to_string(fs: &MemoryFileSystem, path: &SystemPath) -> Result<String> {
let by_path = fs.inner.by_path.read().unwrap();
let normalized = fs.normalize_path(path);
let entry = by_path.get(&normalized).ok_or_else(not_found)?;
match entry {
Entry::File(file) => Ok(file.content.clone()),
Entry::Directory(_) => Err(is_a_directory()),
}
}
read_to_string(self, path.as_ref())
}
pub(crate) fn read_virtual_path_to_string(
&self,
path: impl AsRef<SystemVirtualPath>,
) -> Result<String> {
let virtual_files = self.inner.virtual_files.read().unwrap();
let file = virtual_files
.get(&path.as_ref().to_path_buf())
.ok_or_else(not_found)?;
Ok(file.content.clone())
}
pub fn exists(&self, path: &SystemPath) -> bool {
let by_path = self.inner.by_path.read().unwrap();
let normalized = self.normalize_path(path);
by_path.contains_key(&normalized)
}
pub fn virtual_path_exists(&self, path: &SystemVirtualPath) -> bool {
let virtual_files = self.inner.virtual_files.read().unwrap();
virtual_files.contains_key(&path.to_path_buf())
}
pub(crate) fn create_new_file(&self, path: &SystemPath) -> Result<()> {
let normalized = self.normalize_path(path);
let mut by_path = self.inner.by_path.write().unwrap();
match by_path.entry(normalized) {
btree_map::Entry::Vacant(entry) => {
entry.insert(Entry::File(File {
content: String::new(),
last_modified: file_time_now(),
}));
Ok(())
}
btree_map::Entry::Occupied(_) => Err(io::Error::new(
io::ErrorKind::AlreadyExists,
"File already exists",
)),
}
}
/// Stores a new file in the file system.
///
/// The operation overrides the content for an existing file with the same normalized `path`.
pub fn write_file(&self, path: impl AsRef<SystemPath>, content: impl ToString) -> Result<()> {
let mut by_path = self.inner.by_path.write().unwrap();
let normalized = self.normalize_path(path.as_ref());
let file = get_or_create_file(&mut by_path, &normalized)?;
file.content = content.to_string();
file.last_modified = file_time_now();
Ok(())
}
/// Writes the files to the file system.
///
/// The operation overrides existing files with the same normalized path.
///
/// Enclosing directories are automatically created if they don't exist.
pub fn write_files_all<P, C>(&self, files: impl IntoIterator<Item = (P, C)>) -> Result<()>
where
P: AsRef<SystemPath>,
C: ToString,
{
for (path, content) in files {
self.write_file_all(path.as_ref(), content.to_string())?;
}
Ok(())
}
/// Stores a new file in the file system.
///
/// The operation overrides the content for an existing file with the same normalized `path`.
///
/// Enclosing directories are automatically created if they don't exist.
pub fn write_file_all(
&self,
path: impl AsRef<SystemPath>,
content: impl ToString,
) -> Result<()> {
let path = path.as_ref();
if let Some(parent) = path.parent() {
self.create_directory_all(parent)?;
}
self.write_file(path, content)
}
/// Stores a new virtual file in the file system.
///
/// The operation overrides the content for an existing virtual file with the same `path`.
pub fn write_virtual_file(&self, path: impl AsRef<SystemVirtualPath>, content: impl ToString) {
let path = path.as_ref();
let mut virtual_files = self.inner.virtual_files.write().unwrap();
match virtual_files.entry(path.to_path_buf()) {
std::collections::hash_map::Entry::Vacant(entry) => {
entry.insert(File {
content: content.to_string(),
last_modified: file_time_now(),
});
}
std::collections::hash_map::Entry::Occupied(mut entry) => {
entry.get_mut().content = content.to_string();
}
}
}
/// Returns a builder for walking the directory tree of `path`.
///
/// The only files that are ignored when setting `WalkDirectoryBuilder::standard_filters`
/// are hidden files (files with a name starting with a `.`).
pub fn walk_directory(&self, path: impl AsRef<SystemPath>) -> WalkDirectoryBuilder {
WalkDirectoryBuilder::new(path, MemoryWalker { fs: self.clone() })
}
pub fn glob(
&self,
pattern: &str,
) -> std::result::Result<
impl Iterator<Item = std::result::Result<SystemPathBuf, GlobError>> + '_,
glob::PatternError,
> {
// Very naive implementation that iterates over all files and collects all that match the given pattern.
let normalized = self.normalize_path(pattern);
let pattern = glob::Pattern::new(normalized.as_str())?;
let matches = std::sync::Mutex::new(Vec::new());
self.walk_directory("/").standard_filters(false).run(|| {
Box::new(|entry| {
match entry {
Ok(entry) => {
if pattern.matches_path(entry.path().as_std_path()) {
matches.lock().unwrap().push(Ok(entry.into_path()));
}
}
Err(error) => match error.kind {
ErrorKind::Loop { .. } => {
unreachable!("Loops aren't possible in the memory file system because it doesn't support symlinks.")
}
ErrorKind::Io { err, path } => {
matches.lock().unwrap().push(Err(GlobError { path: path.expect("walk_directory to always set a path").into_std_path_buf(), error: GlobErrorKind::IOError(err)}));
}
ErrorKind::NonUtf8Path { path } => {
matches.lock().unwrap().push(Err(GlobError { path, error: GlobErrorKind::NonUtf8Path}));
}
},
}
WalkState::Continue
})
});
Ok(matches.into_inner().unwrap().into_iter())
}
pub fn remove_file(&self, path: impl AsRef<SystemPath>) -> Result<()> {
fn remove_file(fs: &MemoryFileSystem, path: &SystemPath) -> Result<()> {
let mut by_path = fs.inner.by_path.write().unwrap();
let normalized = fs.normalize_path(path);
match by_path.entry(normalized) {
btree_map::Entry::Occupied(entry) => match entry.get() {
Entry::File(_) => {
entry.remove();
Ok(())
}
Entry::Directory(_) => Err(is_a_directory()),
},
btree_map::Entry::Vacant(_) => Err(not_found()),
}
}
remove_file(self, path.as_ref())
}
pub fn remove_virtual_file(&self, path: impl AsRef<SystemVirtualPath>) -> Result<()> {
let mut virtual_files = self.inner.virtual_files.write().unwrap();
match virtual_files.entry(path.as_ref().to_path_buf()) {
std::collections::hash_map::Entry::Occupied(entry) => {
entry.remove();
Ok(())
}
std::collections::hash_map::Entry::Vacant(_) => Err(not_found()),
}
}
/// Sets the last modified timestamp of the file stored at `path` to now.
///
/// Creates a new file if the file at `path` doesn't exist.
pub fn touch(&self, path: impl AsRef<SystemPath>) -> Result<()> {
let mut by_path = self.inner.by_path.write().unwrap();
let normalized = self.normalize_path(path.as_ref());
get_or_create_file(&mut by_path, &normalized)?.last_modified = file_time_now();
Ok(())
}
/// Creates a directory at `path`. All enclosing directories are created if they don't exist.
pub fn create_directory_all(&self, path: impl AsRef<SystemPath>) -> Result<()> {
let mut by_path = self.inner.by_path.write().unwrap();
let normalized = self.normalize_path(path.as_ref());
create_dir_all(&mut by_path, &normalized)
}
/// Deletes the directory at `path`.
///
/// ## Errors
/// * If the directory is not empty
/// * The `path` is not a directory
/// * The `path` does not exist
pub fn remove_directory(&self, path: impl AsRef<SystemPath>) -> Result<()> {
fn remove_directory(fs: &MemoryFileSystem, path: &SystemPath) -> Result<()> {
let mut by_path = fs.inner.by_path.write().unwrap();
let normalized = fs.normalize_path(path);
// Test if the directory is empty
// Skip the directory path itself
for (maybe_child, _) in by_path.range(normalized.clone()..).skip(1) {
if maybe_child.starts_with(&normalized) {
return Err(directory_not_empty());
} else if !maybe_child.as_str().starts_with(normalized.as_str()) {
break;
}
}
match by_path.entry(normalized.clone()) {
btree_map::Entry::Occupied(entry) => match entry.get() {
Entry::Directory(_) => {
entry.remove();
Ok(())
}
Entry::File(_) => Err(not_a_directory()),
},
btree_map::Entry::Vacant(_) => Err(not_found()),
}
}
remove_directory(self, path.as_ref())
}
fn normalize_path(&self, path: impl AsRef<SystemPath>) -> Utf8PathBuf {
let normalized = SystemPath::absolute(path, &self.inner.cwd);
normalized.into_utf8_path_buf()
}
pub fn read_directory(&self, path: impl AsRef<SystemPath>) -> Result<ReadDirectory> {
let by_path = self.inner.by_path.read().unwrap();
let normalized = self.normalize_path(path.as_ref());
let entry = by_path.get(&normalized).ok_or_else(not_found)?;
if entry.is_file() {
return Err(not_a_directory());
};
// Collect the entries into a vector to avoid deadlocks when the
// consumer calls into other file system methods while iterating over the
// directory entries.
let collected = by_path
.range(normalized.clone()..)
.skip(1)
.take_while(|(path, _)| path.starts_with(&normalized))
.filter_map(|(path, entry)| {
if path.parent()? == normalized {
Some(Ok(DirectoryEntry {
path: SystemPathBuf::from_utf8_path_buf(path.to_owned()),
file_type: entry.file_type(),
}))
} else {
None
}
})
.collect();
Ok(ReadDirectory::new(collected))
}
/// Removes all files and directories except the current working directory.
pub fn remove_all(&self) {
self.inner.virtual_files.write().unwrap().clear();
self.inner
.by_path
.write()
.unwrap()
.retain(|key, _| key == self.inner.cwd.as_utf8_path());
}
}
impl Default for MemoryFileSystem {
fn default() -> Self {
MemoryFileSystem::new()
}
}
impl std::fmt::Debug for MemoryFileSystem {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let paths = self.inner.by_path.read().unwrap();
f.debug_map().entries(paths.iter()).finish()
}
}
struct MemoryFileSystemInner {
by_path: RwLock<BTreeMap<Utf8PathBuf, Entry>>,
virtual_files: RwLock<FxHashMap<SystemVirtualPathBuf, File>>,
cwd: SystemPathBuf,
}
#[derive(Debug)]
enum Entry {
File(File),
Directory(Directory),
}
impl Entry {
const fn is_file(&self) -> bool {
matches!(self, Entry::File(_))
}
const fn file_type(&self) -> FileType {
match self {
Self::File(_) => FileType::File,
Self::Directory(_) => FileType::Directory,
}
}
}
#[derive(Debug)]
struct File {
content: String,
last_modified: FileTime,
}
#[derive(Debug)]
struct Directory {
last_modified: FileTime,
}
fn not_found() -> std::io::Error {
std::io::Error::new(std::io::ErrorKind::NotFound, "No such file or directory")
}
fn is_a_directory() -> std::io::Error {
// Note: Rust returns `ErrorKind::IsADirectory` for this error but this is a nightly only variant :(.
// So we have to use other for now.
std::io::Error::other("Is a directory")
}
fn not_a_directory() -> std::io::Error {
// Note: Rust returns `ErrorKind::NotADirectory` for this error but this is a nightly only variant :(.
// So we have to use `Other` for now.
std::io::Error::other("Not a directory")
}
fn directory_not_empty() -> std::io::Error {
std::io::Error::other("directory not empty")
}
fn create_dir_all(
paths: &mut RwLockWriteGuard<BTreeMap<Utf8PathBuf, Entry>>,
normalized: &Utf8Path,
) -> Result<()> {
let mut path = Utf8PathBuf::new();
for component in normalized.components() {
path.push(component);
let entry = paths.entry(path.clone()).or_insert_with(|| {
Entry::Directory(Directory {
last_modified: file_time_now(),
})
});
if entry.is_file() {
return Err(not_a_directory());
}
}
Ok(())
}
fn get_or_create_file<'a>(
paths: &'a mut RwLockWriteGuard<BTreeMap<Utf8PathBuf, Entry>>,
normalized: &Utf8Path,
) -> Result<&'a mut File> {
if let Some(parent) = normalized.parent() {
let parent_entry = paths.get(parent).ok_or_else(not_found)?;
if parent_entry.is_file() {
return Err(not_a_directory());
}
}
let entry = paths.entry(normalized.to_path_buf()).or_insert_with(|| {
Entry::File(File {
content: String::new(),
last_modified: file_time_now(),
})
});
match entry {
Entry::File(file) => Ok(file),
Entry::Directory(_) => Err(is_a_directory()),
}
}
#[derive(Debug)]
pub struct ReadDirectory {
entries: std::vec::IntoIter<Result<DirectoryEntry>>,
}
impl ReadDirectory {
fn new(entries: Vec<Result<DirectoryEntry>>) -> Self {
Self {
entries: entries.into_iter(),
}
}
}
impl Iterator for ReadDirectory {
type Item = std::io::Result<DirectoryEntry>;
fn next(&mut self) -> Option<Self::Item> {
self.entries.next()
}
}
impl FusedIterator for ReadDirectory {}
/// Recursively walks a directory in the memory file system.
#[derive(Debug)]
struct MemoryWalker {
fs: MemoryFileSystem,
}
impl MemoryWalker {
fn visit_entry(
&self,
visitor: &mut dyn WalkDirectoryVisitor,
entry: walk_directory::DirectoryEntry,
queue: &mut Vec<WalkerState>,
ignore_hidden: bool,
) -> WalkState {
if entry.file_type().is_directory() {
let path = entry.path.clone();
let depth = entry.depth;
let state = visitor.visit(Ok(entry));
if matches!(state, WalkState::Continue) {
queue.push(WalkerState::Nested {
path,
depth: depth + 1,
});
}
state
} else if ignore_hidden
&& entry
.path
.file_name()
.is_some_and(|name| name.starts_with('.'))
{
WalkState::Skip
} else {
visitor.visit(Ok(entry))
}
}
}
impl DirectoryWalker for MemoryWalker {
fn walk(
&self,
builder: &mut dyn WalkDirectoryVisitorBuilder,
configuration: WalkDirectoryConfiguration,
) {
let WalkDirectoryConfiguration {
paths,
ignore_hidden,
standard_filters: _,
} = configuration;
let mut visitor = builder.build();
let mut queue: Vec<_> = paths
.into_iter()
.map(|path| WalkerState::Start { path })
.collect();
while let Some(state) = queue.pop() {
let (path, depth) = match state {
WalkerState::Start { path } => {
match self.fs.metadata(&path) {
Ok(metadata) => {
let entry = walk_directory::DirectoryEntry {
file_type: metadata.file_type,
depth: 0,
path,
};
if self.visit_entry(&mut *visitor, entry, &mut queue, ignore_hidden)
== WalkState::Quit
{
return;
}
}
Err(error) => {
visitor.visit(Err(walk_directory::Error {
depth: Some(0),
kind: walk_directory::ErrorKind::Io {
path: Some(path),
err: error,
},
}));
}
}
continue;
}
WalkerState::Nested { path, depth } => (path, depth),
};
// Use `read_directory` here instead of locking `by_path` to avoid deadlocks
// when the `visitor` calls any file system operations.
let entries = match self.fs.read_directory(&path) {
Ok(entries) => entries,
Err(error) => {
visitor.visit(Err(walk_directory::Error {
depth: Some(depth),
kind: walk_directory::ErrorKind::Io {
path: Some(path),
err: error,
},
}));
continue;
}
};
for entry in entries {
match entry {
Ok(entry) => {
let entry = walk_directory::DirectoryEntry {
file_type: entry.file_type,
depth,
path: entry.path,
};
if self.visit_entry(&mut *visitor, entry, &mut queue, ignore_hidden)
== WalkState::Quit
{
return;
}
}
Err(error) => {
visitor.visit(Err(walk_directory::Error {
depth: Some(depth),
kind: walk_directory::ErrorKind::Io {
path: Some(path.clone()),
err: error,
},
}));
}
}
}
}
}
}
#[derive(Debug)]
enum WalkerState {
/// An entry path that was directly provided to the walker. Always has depth 0.
Start { path: SystemPathBuf },
/// Traverse into the directory with the given path at the given depth.
Nested { path: SystemPathBuf, depth: usize },
}
#[cfg(test)]
mod tests {
use std::io::ErrorKind;
use std::time::Duration;
use crate::system::walk_directory::WalkState;
use crate::system::walk_directory::tests::DirectoryEntryToString;
use crate::system::{
DirectoryEntry, FileType, MemoryFileSystem, Result, SystemPath, SystemPathBuf,
SystemVirtualPath,
};
/// Creates a file system with the given files.
///
/// The content of all files will be empty.
fn with_files<P>(files: impl IntoIterator<Item = P>) -> super::MemoryFileSystem
where
P: AsRef<SystemPath>,
{
let fs = MemoryFileSystem::new();
fs.write_files_all(files.into_iter().map(|path| (path, "")))
.unwrap();
fs
}
#[test]
fn is_file() {
let path = SystemPath::new("a.py");
let fs = with_files([path]);
assert!(fs.is_file(path));
assert!(!fs.is_directory(path));
}
#[test]
fn exists() {
let fs = with_files(["a.py"]);
assert!(fs.exists(SystemPath::new("a.py")));
assert!(!fs.exists(SystemPath::new("b.py")));
}
#[test]
fn exists_directories() {
let fs = with_files(["a/b/c.py"]);
assert!(fs.exists(SystemPath::new("a")));
assert!(fs.exists(SystemPath::new("a/b")));
assert!(fs.exists(SystemPath::new("a/b/c.py")));
}
#[test]
fn path_normalization() {
let fs = with_files(["a.py"]);
assert!(fs.exists(SystemPath::new("a.py")));
assert!(fs.exists(SystemPath::new("/a.py")));
assert!(fs.exists(SystemPath::new("/b/./../a.py")));
}
#[test]
fn permissions() -> Result<()> {
let fs = with_files(["a.py"]);
// The default permissions match the default on Linux: 0755
assert_eq!(
fs.metadata(SystemPath::new("a.py"))?.permissions(),
Some(MemoryFileSystem::PERMISSION)
);
Ok(())
}
#[test]
fn touch() -> Result<()> {
let fs = MemoryFileSystem::new();
let path = SystemPath::new("a.py");
// Creates a file if it doesn't exist
fs.touch(path)?;
assert!(fs.exists(path));
let timestamp1 = fs.metadata(path)?.revision();
// Sleep to ensure that the timestamp changes
std::thread::sleep(Duration::from_millis(1));
fs.touch(path)?;
let timestamp2 = fs.metadata(path)?.revision();
assert_ne!(timestamp1, timestamp2);
Ok(())
}
#[test]
fn create_dir_all() {
let fs = MemoryFileSystem::new();
fs.create_directory_all(SystemPath::new("a/b/c")).unwrap();
assert!(fs.is_directory(SystemPath::new("a")));
assert!(fs.is_directory(SystemPath::new("a/b")));
assert!(fs.is_directory(SystemPath::new("a/b/c")));
// Should not fail if the directory already exists
fs.create_directory_all(SystemPath::new("a/b/c")).unwrap();
}
#[test]
fn create_dir_all_fails_if_a_component_is_a_file() {
let fs = with_files(["a/b.py"]);
let error = fs
.create_directory_all(SystemPath::new("a/b.py/c"))
.unwrap_err();
assert_eq!(error.kind(), ErrorKind::Other);
}
#[test]
fn write_file_fails_if_a_parent_directory_is_missing() {
let fs = with_files(["c.py"]);
let error = fs
.write_file(SystemPath::new("a/b.py"), "content".to_string())
.unwrap_err();
assert_eq!(error.kind(), ErrorKind::NotFound);
}
#[test]
fn write_file_all_fails_if_a_component_is_a_file() {
let fs = with_files(["a/b.py"]);
let error = fs
.write_file_all(SystemPath::new("a/b.py/c"), "content".to_string())
.unwrap_err();
assert_eq!(error.kind(), ErrorKind::Other);
}
#[test]
fn write_virtual_file() {
let fs = MemoryFileSystem::new();
fs.write_virtual_file("a", "content");
let error = fs.read_to_string("a").unwrap_err();
assert_eq!(error.kind(), ErrorKind::NotFound);
assert_eq!(fs.read_virtual_path_to_string("a").unwrap(), "content");
}
#[test]
fn read() -> Result<()> {
let fs = MemoryFileSystem::new();
let path = SystemPath::new("a.py");
fs.write_file_all(path, "Test content".to_string())?;
assert_eq!(fs.read_to_string(path)?, "Test content");
Ok(())
}
#[test]
fn read_fails_if_path_is_a_directory() -> Result<()> {
let fs = MemoryFileSystem::new();
fs.create_directory_all("a")?;
let error = fs.read_to_string(SystemPath::new("a")).unwrap_err();
assert_eq!(error.kind(), ErrorKind::Other);
Ok(())
}
#[test]
fn read_fails_if_path_doesnt_exist() -> Result<()> {
let fs = MemoryFileSystem::new();
let error = fs.read_to_string(SystemPath::new("a")).unwrap_err();
assert_eq!(error.kind(), ErrorKind::NotFound);
Ok(())
}
#[test]
fn write_file_fails_if_path_points_to_a_directory() -> Result<()> {
let fs = MemoryFileSystem::new();
fs.create_directory_all("a")?;
let error = fs
.write_file(SystemPath::new("a"), "content".to_string())
.unwrap_err();
assert_eq!(error.kind(), ErrorKind::Other);
Ok(())
}
#[test]
fn read_fails_if_virtual_path_doesnt_exit() {
let fs = MemoryFileSystem::new();
let error = fs.read_virtual_path_to_string("a").unwrap_err();
assert_eq!(error.kind(), ErrorKind::NotFound);
}
#[test]
fn remove_file() -> Result<()> {
let fs = with_files(["a/a.py", "b.py"]);
fs.remove_file("a/a.py")?;
assert!(!fs.exists(SystemPath::new("a/a.py")));
// It doesn't delete the enclosing directories
assert!(fs.exists(SystemPath::new("a")));
// It doesn't delete unrelated files.
assert!(fs.exists(SystemPath::new("b.py")));
Ok(())
}
#[test]
fn remove_virtual_file() {
let fs = MemoryFileSystem::new();
fs.write_virtual_file("a", "content");
fs.write_virtual_file("b", "content");
fs.remove_virtual_file("a").unwrap();
assert!(!fs.virtual_path_exists(SystemVirtualPath::new("a")));
assert!(fs.virtual_path_exists(SystemVirtualPath::new("b")));
}
#[test]
fn remove_non_existing_file() {
let fs = with_files(["b.py"]);
let error = fs.remove_file("a.py").unwrap_err();
assert_eq!(error.kind(), ErrorKind::NotFound);
}
#[test]
fn remove_file_that_is_a_directory() -> Result<()> {
let fs = MemoryFileSystem::new();
fs.create_directory_all("a")?;
let error = fs.remove_file("a").unwrap_err();
assert_eq!(error.kind(), ErrorKind::Other);
Ok(())
}
#[test]
fn remove_directory() -> Result<()> {
let fs = with_files(["b.py"]);
fs.create_directory_all("a")?;
fs.remove_directory("a")?;
assert!(!fs.exists(SystemPath::new("a")));
// It doesn't delete unrelated files.
assert!(fs.exists(SystemPath::new("b.py")));
Ok(())
}
#[test]
fn remove_non_empty_directory() {
let fs = with_files(["a/a.py"]);
let error = fs.remove_directory("a").unwrap_err();
assert_eq!(error.kind(), ErrorKind::Other);
}
#[test]
fn remove_directory_with_files_that_start_with_the_same_string() -> Result<()> {
let fs = with_files(["foo_bar.py", "foob.py"]);
fs.create_directory_all("foo")?;
fs.remove_directory("foo").unwrap();
assert!(!fs.exists(SystemPath::new("foo")));
assert!(fs.exists(SystemPath::new("foo_bar.py")));
assert!(fs.exists(SystemPath::new("foob.py")));
Ok(())
}
#[test]
fn remove_non_existing_directory() {
let fs = MemoryFileSystem::new();
let error = fs.remove_directory("a").unwrap_err();
assert_eq!(error.kind(), ErrorKind::NotFound);
}
#[test]
fn remove_directory_that_is_a_file() {
let fs = with_files(["a"]);
let error = fs.remove_directory("a").unwrap_err();
assert_eq!(error.kind(), ErrorKind::Other);
}
#[test]
fn read_directory() {
let fs = with_files(["b.ts", "a/bar.py", "d.rs", "a/foo/bar.py", "a/baz.pyi"]);
let contents: Vec<DirectoryEntry> = fs
.read_directory("a")
.unwrap()
.map(Result::unwrap)
.collect();
let expected_contents = vec![
DirectoryEntry::new(SystemPathBuf::from("/a/bar.py"), FileType::File),
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | true |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_db/src/vendored/path.rs | crates/ruff_db/src/vendored/path.rs | use std::borrow::Borrow;
use std::fmt::Formatter;
use std::ops::Deref;
use std::path;
use camino::{Utf8Components, Utf8Path, Utf8PathBuf};
#[repr(transparent)]
#[derive(Debug, Eq, PartialEq, Hash)]
pub struct VendoredPath(Utf8Path);
impl VendoredPath {
pub fn new(path: &(impl AsRef<Utf8Path> + ?Sized)) -> &Self {
let path = path.as_ref();
// SAFETY: VendoredPath is marked as #[repr(transparent)] so the conversion from a
// *const Utf8Path to a *const VendoredPath is valid.
unsafe { &*(path as *const Utf8Path as *const VendoredPath) }
}
pub fn file_name(&self) -> Option<&str> {
self.0.file_name()
}
pub fn to_path_buf(&self) -> VendoredPathBuf {
VendoredPathBuf(self.0.to_path_buf())
}
pub fn as_str(&self) -> &str {
self.0.as_str()
}
pub fn as_utf8_path(&self) -> &camino::Utf8Path {
&self.0
}
pub fn as_std_path(&self) -> &path::Path {
self.0.as_std_path()
}
pub fn components(&self) -> Utf8Components<'_> {
self.0.components()
}
#[must_use]
pub fn extension(&self) -> Option<&str> {
self.0.extension()
}
#[must_use]
pub fn with_pyi_extension(&self) -> VendoredPathBuf {
VendoredPathBuf(self.0.with_extension("pyi"))
}
#[must_use]
pub fn join(&self, other: impl AsRef<VendoredPath>) -> VendoredPathBuf {
VendoredPathBuf(self.0.join(other.as_ref()))
}
#[must_use]
pub fn ends_with(&self, suffix: impl AsRef<VendoredPath>) -> bool {
self.0.ends_with(suffix.as_ref())
}
#[must_use]
pub fn parent(&self) -> Option<&Self> {
self.0.parent().map(Self::new)
}
#[must_use]
pub fn file_stem(&self) -> Option<&str> {
self.0.file_stem()
}
pub fn strip_prefix(
&self,
prefix: impl AsRef<VendoredPath>,
) -> Result<&Self, path::StripPrefixError> {
self.0.strip_prefix(prefix.as_ref()).map(Self::new)
}
}
impl ToOwned for VendoredPath {
type Owned = VendoredPathBuf;
fn to_owned(&self) -> VendoredPathBuf {
self.to_path_buf()
}
}
#[repr(transparent)]
#[derive(Debug, Eq, PartialEq, Clone, Hash)]
pub struct VendoredPathBuf(Utf8PathBuf);
impl get_size2::GetSize for VendoredPathBuf {
fn get_heap_size_with_tracker<T: get_size2::GetSizeTracker>(&self, tracker: T) -> (usize, T) {
(self.0.capacity(), tracker)
}
}
impl Default for VendoredPathBuf {
fn default() -> Self {
Self::new()
}
}
impl VendoredPathBuf {
pub fn new() -> Self {
Self(Utf8PathBuf::new())
}
#[inline]
pub fn as_path(&self) -> &VendoredPath {
VendoredPath::new(&self.0)
}
pub fn push(&mut self, component: impl AsRef<VendoredPath>) {
self.0.push(component.as_ref())
}
}
impl Borrow<VendoredPath> for VendoredPathBuf {
fn borrow(&self) -> &VendoredPath {
self.as_path()
}
}
impl AsRef<VendoredPath> for VendoredPathBuf {
fn as_ref(&self) -> &VendoredPath {
self.as_path()
}
}
impl AsRef<VendoredPath> for VendoredPath {
#[inline]
fn as_ref(&self) -> &VendoredPath {
self
}
}
impl AsRef<VendoredPath> for Utf8Path {
#[inline]
fn as_ref(&self) -> &VendoredPath {
VendoredPath::new(self)
}
}
impl AsRef<VendoredPath> for Utf8PathBuf {
#[inline]
fn as_ref(&self) -> &VendoredPath {
VendoredPath::new(self.as_path())
}
}
impl AsRef<VendoredPath> for str {
#[inline]
fn as_ref(&self) -> &VendoredPath {
VendoredPath::new(self)
}
}
impl AsRef<VendoredPath> for String {
#[inline]
fn as_ref(&self) -> &VendoredPath {
VendoredPath::new(self)
}
}
impl AsRef<path::Path> for VendoredPath {
#[inline]
fn as_ref(&self) -> &path::Path {
self.0.as_std_path()
}
}
impl AsRef<Utf8Path> for VendoredPath {
#[inline]
fn as_ref(&self) -> &Utf8Path {
&self.0
}
}
impl Deref for VendoredPathBuf {
type Target = VendoredPath;
fn deref(&self) -> &Self::Target {
self.as_path()
}
}
impl From<&str> for VendoredPathBuf {
fn from(value: &str) -> Self {
Self(Utf8PathBuf::from(value))
}
}
impl<'a> TryFrom<&'a path::Path> for &'a VendoredPath {
type Error = camino::FromPathError;
fn try_from(value: &'a path::Path) -> Result<Self, Self::Error> {
Ok(VendoredPath::new(<&camino::Utf8Path>::try_from(value)?))
}
}
impl TryFrom<path::PathBuf> for VendoredPathBuf {
type Error = camino::FromPathBufError;
fn try_from(value: path::PathBuf) -> Result<Self, Self::Error> {
Ok(VendoredPathBuf(camino::Utf8PathBuf::try_from(value)?))
}
}
impl std::fmt::Display for VendoredPath {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
write!(f, "vendored://{}", &self.0)
}
}
impl std::fmt::Display for VendoredPathBuf {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
std::fmt::Display::fmt(self.as_path(), f)
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_db/src/files/path.rs | crates/ruff_db/src/files/path.rs | use crate::Db;
use crate::files::{File, system_path_to_file, vendored_path_to_file};
use crate::system::{SystemPath, SystemPathBuf, SystemVirtualPath, SystemVirtualPathBuf};
use crate::vendored::{VendoredPath, VendoredPathBuf};
use std::fmt::{Display, Formatter};
/// Path to a file.
///
/// The path abstracts that files in Ruff can come from different sources:
///
/// * a file stored on the [host system](crate::system::System).
/// * a virtual file stored on the [host system](crate::system::System).
/// * a vendored file stored in the [vendored file system](crate::vendored::VendoredFileSystem).
#[derive(Clone, Debug, Eq, PartialEq, Hash, get_size2::GetSize)]
pub enum FilePath {
/// Path to a file on the [host system](crate::system::System).
System(SystemPathBuf),
/// Path to a virtual file on the [host system](crate::system::System).
SystemVirtual(SystemVirtualPathBuf),
/// Path to a file vendored as part of Ruff. Stored in the [vendored file system](crate::vendored::VendoredFileSystem).
Vendored(VendoredPathBuf),
}
impl FilePath {
/// Create a new path to a file on the file system.
#[must_use]
pub fn system(path: impl AsRef<SystemPath>) -> Self {
FilePath::System(path.as_ref().to_path_buf())
}
/// Returns `Some` if the path is a file system path that points to a path on disk.
#[must_use]
#[inline]
pub fn into_system_path_buf(self) -> Option<SystemPathBuf> {
match self {
FilePath::System(path) => Some(path),
FilePath::Vendored(_) | FilePath::SystemVirtual(_) => None,
}
}
#[must_use]
#[inline]
pub fn as_system_path(&self) -> Option<&SystemPath> {
match self {
FilePath::System(path) => Some(path.as_path()),
FilePath::Vendored(_) | FilePath::SystemVirtual(_) => None,
}
}
/// Returns `true` if the path is a file system path that points to a path on disk.
#[must_use]
#[inline]
pub const fn is_system_path(&self) -> bool {
matches!(self, FilePath::System(_))
}
/// Returns `true` if the path is a file system path that is virtual i.e., it doesn't exists on
/// disk.
#[must_use]
#[inline]
pub const fn is_system_virtual_path(&self) -> bool {
matches!(self, FilePath::SystemVirtual(_))
}
/// Returns `true` if the path is a vendored path.
#[must_use]
#[inline]
pub const fn is_vendored_path(&self) -> bool {
matches!(self, FilePath::Vendored(_))
}
#[must_use]
#[inline]
pub fn as_vendored_path(&self) -> Option<&VendoredPath> {
match self {
FilePath::Vendored(path) => Some(path.as_path()),
FilePath::System(_) | FilePath::SystemVirtual(_) => None,
}
}
/// Yields the underlying [`str`] slice.
pub fn as_str(&self) -> &str {
match self {
FilePath::System(path) => path.as_str(),
FilePath::Vendored(path) => path.as_str(),
FilePath::SystemVirtual(path) => path.as_str(),
}
}
/// Interns a virtual file system path and returns a salsa [`File`] ingredient.
///
/// Returns `Some` if a file for `path` exists and is accessible by the user. Returns `None` otherwise.
///
/// See [`system_path_to_file`] or [`vendored_path_to_file`] if you always have either a file
/// system or vendored path.
#[inline]
pub fn to_file(&self, db: &dyn Db) -> Option<File> {
match self {
FilePath::System(path) => system_path_to_file(db, path).ok(),
FilePath::Vendored(path) => vendored_path_to_file(db, path).ok(),
FilePath::SystemVirtual(_) => None,
}
}
#[must_use]
pub fn extension(&self) -> Option<&str> {
match self {
FilePath::System(path) => path.extension(),
FilePath::Vendored(path) => path.extension(),
FilePath::SystemVirtual(_) => None,
}
}
}
impl AsRef<str> for FilePath {
fn as_ref(&self) -> &str {
self.as_str()
}
}
impl From<SystemPathBuf> for FilePath {
fn from(value: SystemPathBuf) -> Self {
Self::System(value)
}
}
impl From<&SystemPath> for FilePath {
fn from(value: &SystemPath) -> Self {
FilePath::System(value.to_path_buf())
}
}
impl From<VendoredPathBuf> for FilePath {
fn from(value: VendoredPathBuf) -> Self {
Self::Vendored(value)
}
}
impl From<&VendoredPath> for FilePath {
fn from(value: &VendoredPath) -> Self {
Self::Vendored(value.to_path_buf())
}
}
impl From<&SystemVirtualPath> for FilePath {
fn from(value: &SystemVirtualPath) -> Self {
FilePath::SystemVirtual(value.to_path_buf())
}
}
impl From<SystemVirtualPathBuf> for FilePath {
fn from(value: SystemVirtualPathBuf) -> Self {
FilePath::SystemVirtual(value)
}
}
impl PartialEq<SystemPath> for FilePath {
#[inline]
fn eq(&self, other: &SystemPath) -> bool {
self.as_system_path()
.is_some_and(|self_path| self_path == other)
}
}
impl PartialEq<FilePath> for SystemPath {
#[inline]
fn eq(&self, other: &FilePath) -> bool {
other == self
}
}
impl PartialEq<SystemPathBuf> for FilePath {
#[inline]
fn eq(&self, other: &SystemPathBuf) -> bool {
self == other.as_path()
}
}
impl PartialEq<FilePath> for SystemPathBuf {
fn eq(&self, other: &FilePath) -> bool {
other == self
}
}
impl PartialEq<VendoredPath> for FilePath {
#[inline]
fn eq(&self, other: &VendoredPath) -> bool {
self.as_vendored_path()
.is_some_and(|self_path| self_path == other)
}
}
impl PartialEq<FilePath> for VendoredPath {
#[inline]
fn eq(&self, other: &FilePath) -> bool {
other == self
}
}
impl PartialEq<VendoredPathBuf> for FilePath {
#[inline]
fn eq(&self, other: &VendoredPathBuf) -> bool {
other.as_path() == self
}
}
impl PartialEq<FilePath> for VendoredPathBuf {
#[inline]
fn eq(&self, other: &FilePath) -> bool {
other == self
}
}
impl Display for FilePath {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
match self {
FilePath::System(path) => std::fmt::Display::fmt(path, f),
FilePath::SystemVirtual(path) => std::fmt::Display::fmt(path, f),
FilePath::Vendored(path) => std::fmt::Display::fmt(path, f),
}
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_db/src/files/file_root.rs | crates/ruff_db/src/files/file_root.rs | use std::fmt::Formatter;
use path_slash::PathExt;
use salsa::Durability;
use crate::Db;
use crate::file_revision::FileRevision;
use crate::system::{SystemPath, SystemPathBuf};
/// A root path for files tracked by the database.
///
/// We currently create roots for:
/// * static module resolution paths
/// * the project root
///
/// The main usage of file roots is to determine a file's durability. But it can also be used
/// to make a salsa query dependent on whether a file in a root has changed without writing any
/// manual invalidation logic.
#[salsa::input(debug, heap_size=ruff_memory_usage::heap_size)]
pub struct FileRoot {
/// The path of a root is guaranteed to never change.
#[returns(deref)]
pub path: SystemPathBuf,
/// The kind of the root at the time of its creation.
pub kind_at_time_of_creation: FileRootKind,
/// A revision that changes when the contents of the source root change.
///
/// The revision changes when a new file was added, removed, or changed inside this source root.
pub revision: FileRevision,
}
impl FileRoot {
pub fn durability(self, db: &dyn Db) -> salsa::Durability {
self.kind_at_time_of_creation(db).durability()
}
}
#[derive(Copy, Clone, Debug, Eq, PartialEq, get_size2::GetSize)]
pub enum FileRootKind {
/// The root of a project.
Project,
/// A non-project module resolution search path.
LibrarySearchPath,
}
impl FileRootKind {
const fn durability(self) -> Durability {
match self {
FileRootKind::Project => Durability::LOW,
FileRootKind::LibrarySearchPath => Durability::HIGH,
}
}
}
#[derive(Default)]
pub(super) struct FileRoots {
by_path: matchit::Router<FileRoot>,
roots: Vec<FileRoot>,
}
impl FileRoots {
/// Tries to add a new root for `path` and returns the root.
///
/// The root isn't added nor is the file root's kind updated if a root for `path` already exists.
pub(super) fn try_add(
&mut self,
db: &dyn Db,
path: SystemPathBuf,
kind: FileRootKind,
) -> FileRoot {
// SAFETY: Guaranteed to succeed because `path` is a UTF-8 that only contains Unicode characters.
let normalized_path = path.as_std_path().to_slash().unwrap();
if let Ok(existing) = self.by_path.at(&normalized_path) {
// Only if it is an exact match
if existing.value.path(db) == &*path {
return *existing.value;
}
}
tracing::debug!("Adding new file root '{path}' of kind {kind:?}");
// normalize the path to use `/` separators and escape the '{' and '}' characters,
// which matchit uses for routing parameters
let mut route = normalized_path.replace('{', "{{").replace('}', "}}");
// Insert a new source root
let root = FileRoot::builder(path, kind, FileRevision::now())
.durability(Durability::HIGH)
.revision_durability(kind.durability())
.new(db);
// Insert a path that matches the root itself
self.by_path.insert(route.clone(), root).unwrap();
// Insert a path that matches all subdirectories and files
if !route.ends_with("/") {
route.push('/');
}
route.push_str("{*filepath}");
self.by_path.insert(route, root).unwrap();
self.roots.push(root);
root
}
/// Returns the closest root for `path` or `None` if no root contains `path`.
pub(super) fn at(&self, path: &SystemPath) -> Option<FileRoot> {
// SAFETY: Guaranteed to succeed because `path` is a UTF-8 that only contains Unicode characters.
let normalized_path = path.as_std_path().to_slash().unwrap();
let entry = self.by_path.at(&normalized_path).ok()?;
Some(*entry.value)
}
pub(super) fn all(&self) -> impl Iterator<Item = FileRoot> + '_ {
self.roots.iter().copied()
}
}
impl std::fmt::Debug for FileRoots {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
f.debug_tuple("FileRoots").field(&self.roots).finish()
}
}
impl PartialEq for FileRoots {
fn eq(&self, other: &Self) -> bool {
self.roots.eq(&other.roots)
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_db/src/diagnostic/stylesheet.rs | crates/ruff_db/src/diagnostic/stylesheet.rs | use anstyle::{AnsiColor, Effects, Style};
use std::fmt::Formatter;
pub(super) const fn fmt_styled<'a, T>(
content: T,
style: anstyle::Style,
) -> impl std::fmt::Display + 'a
where
T: std::fmt::Display + 'a,
{
struct FmtStyled<T> {
content: T,
style: anstyle::Style,
}
impl<T> std::fmt::Display for FmtStyled<T>
where
T: std::fmt::Display,
{
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
write!(
f,
"{style_start}{content}{style_end}",
style_start = self.style.render(),
content = self.content,
style_end = self.style.render_reset()
)
}
}
FmtStyled { content, style }
}
pub(super) fn fmt_with_hyperlink<'a, T>(
content: T,
url: Option<&'a str>,
stylesheet: &DiagnosticStylesheet,
) -> impl std::fmt::Display + 'a
where
T: std::fmt::Display + 'a,
{
struct FmtHyperlink<'a, T> {
content: T,
url: Option<&'a str>,
}
impl<T> std::fmt::Display for FmtHyperlink<'_, T>
where
T: std::fmt::Display,
{
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
if let Some(url) = self.url {
write!(f, "\x1B]8;;{url}\x1B\\")?;
}
self.content.fmt(f)?;
if self.url.is_some() {
f.write_str("\x1B]8;;\x1B\\")?;
}
Ok(())
}
}
let url = if stylesheet.hyperlink { url } else { None };
FmtHyperlink { content, url }
}
#[derive(Clone, Debug)]
pub struct DiagnosticStylesheet {
pub(crate) error: Style,
pub(crate) warning: Style,
pub(crate) info: Style,
pub(crate) note: Style,
pub(crate) help: Style,
pub(crate) line_no: Style,
pub(crate) emphasis: Style,
pub(crate) none: Style,
pub(crate) separator: Style,
pub(crate) secondary_code: Style,
pub(crate) insertion: Style,
pub(crate) deletion: Style,
pub(crate) insertion_line_no: Style,
pub(crate) deletion_line_no: Style,
pub(crate) hyperlink: bool,
}
impl Default for DiagnosticStylesheet {
fn default() -> Self {
Self::plain()
}
}
impl DiagnosticStylesheet {
/// Default terminal styling
pub fn styled() -> Self {
let bright_blue = AnsiColor::BrightBlue.on_default();
let hyperlink = supports_hyperlinks::supports_hyperlinks();
Self {
error: AnsiColor::BrightRed.on_default().effects(Effects::BOLD),
warning: AnsiColor::Yellow.on_default().effects(Effects::BOLD),
info: bright_blue.effects(Effects::BOLD),
note: AnsiColor::BrightGreen.on_default().effects(Effects::BOLD),
help: AnsiColor::BrightCyan.on_default().effects(Effects::BOLD),
line_no: bright_blue.effects(Effects::BOLD),
emphasis: Style::new().effects(Effects::BOLD),
none: Style::new(),
separator: AnsiColor::Cyan.on_default(),
secondary_code: AnsiColor::Red.on_default().effects(Effects::BOLD),
insertion: AnsiColor::Green.on_default(),
deletion: AnsiColor::Red.on_default(),
insertion_line_no: AnsiColor::Green.on_default().effects(Effects::BOLD),
deletion_line_no: AnsiColor::Red.on_default().effects(Effects::BOLD),
hyperlink,
}
}
pub fn plain() -> Self {
Self {
error: Style::new(),
warning: Style::new(),
info: Style::new(),
note: Style::new(),
help: Style::new(),
line_no: Style::new(),
emphasis: Style::new(),
none: Style::new(),
separator: Style::new(),
secondary_code: Style::new(),
insertion: Style::new(),
deletion: Style::new(),
insertion_line_no: Style::new(),
deletion_line_no: Style::new(),
hyperlink: false,
}
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_db/src/diagnostic/render.rs | crates/ruff_db/src/diagnostic/render.rs | use std::borrow::Cow;
use std::collections::BTreeMap;
use std::path::Path;
use full::FullRenderer;
use ruff_annotate_snippets::{
Annotation as AnnotateAnnotation, Level as AnnotateLevel, Message as AnnotateMessage,
Snippet as AnnotateSnippet,
};
use ruff_notebook::{Notebook, NotebookIndex};
use ruff_source_file::{LineIndex, OneIndexed, SourceCode};
use ruff_text_size::{TextLen, TextRange, TextSize};
use crate::{
Db,
files::File,
source::{SourceText, line_index, source_text},
};
use super::{
Annotation, Diagnostic, DiagnosticFormat, DiagnosticSource, DisplayDiagnosticConfig,
SubDiagnostic, UnifiedFile,
};
use azure::AzureRenderer;
use concise::ConciseRenderer;
use github::GithubRenderer;
use pylint::PylintRenderer;
mod azure;
mod concise;
mod full;
pub mod github;
#[cfg(feature = "serde")]
mod gitlab;
#[cfg(feature = "serde")]
mod json;
#[cfg(feature = "serde")]
mod json_lines;
#[cfg(feature = "junit")]
mod junit;
mod pylint;
#[cfg(feature = "serde")]
mod rdjson;
/// A type that implements `std::fmt::Display` for diagnostic rendering.
///
/// It is created via [`Diagnostic::display`].
///
/// The lifetime parameter, `'a`, refers to the shorter of:
///
/// * The lifetime of the rendering configuration.
/// * The lifetime of the resolver used to load the contents of `Span`
/// values. When using Salsa, this most commonly corresponds to the lifetime
/// of a Salsa `Db`.
/// * The lifetime of the diagnostic being rendered.
pub struct DisplayDiagnostic<'a> {
config: &'a DisplayDiagnosticConfig,
resolver: &'a dyn FileResolver,
diag: &'a Diagnostic,
}
impl<'a> DisplayDiagnostic<'a> {
pub(crate) fn new(
resolver: &'a dyn FileResolver,
config: &'a DisplayDiagnosticConfig,
diag: &'a Diagnostic,
) -> DisplayDiagnostic<'a> {
DisplayDiagnostic {
config,
resolver,
diag,
}
}
}
impl std::fmt::Display for DisplayDiagnostic<'_> {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
DisplayDiagnostics::new(self.resolver, self.config, std::slice::from_ref(self.diag)).fmt(f)
}
}
/// A type that implements `std::fmt::Display` for rendering a collection of diagnostics.
///
/// It is intended for collections of diagnostics that need to be serialized together, as is the
/// case for JSON, for example.
///
/// See [`DisplayDiagnostic`] for rendering individual `Diagnostic`s and details about the lifetime
/// constraints.
pub struct DisplayDiagnostics<'a> {
config: &'a DisplayDiagnosticConfig,
resolver: &'a dyn FileResolver,
diagnostics: &'a [Diagnostic],
}
impl<'a> DisplayDiagnostics<'a> {
pub fn new(
resolver: &'a dyn FileResolver,
config: &'a DisplayDiagnosticConfig,
diagnostics: &'a [Diagnostic],
) -> DisplayDiagnostics<'a> {
DisplayDiagnostics {
config,
resolver,
diagnostics,
}
}
}
impl std::fmt::Display for DisplayDiagnostics<'_> {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
match self.config.format {
DiagnosticFormat::Concise => {
ConciseRenderer::new(self.resolver, self.config).render(f, self.diagnostics)?;
}
DiagnosticFormat::Full => {
FullRenderer::new(self.resolver, self.config).render(f, self.diagnostics)?;
}
DiagnosticFormat::Azure => {
AzureRenderer::new(self.resolver).render(f, self.diagnostics)?;
}
#[cfg(feature = "serde")]
DiagnosticFormat::Json => {
json::JsonRenderer::new(self.resolver, self.config).render(f, self.diagnostics)?;
}
#[cfg(feature = "serde")]
DiagnosticFormat::JsonLines => {
json_lines::JsonLinesRenderer::new(self.resolver, self.config)
.render(f, self.diagnostics)?;
}
#[cfg(feature = "serde")]
DiagnosticFormat::Rdjson => {
rdjson::RdjsonRenderer::new(self.resolver).render(f, self.diagnostics)?;
}
DiagnosticFormat::Pylint => {
PylintRenderer::new(self.resolver).render(f, self.diagnostics)?;
}
#[cfg(feature = "junit")]
DiagnosticFormat::Junit => {
junit::JunitRenderer::new(self.resolver).render(f, self.diagnostics)?;
}
#[cfg(feature = "serde")]
DiagnosticFormat::Gitlab => {
gitlab::GitlabRenderer::new(self.resolver).render(f, self.diagnostics)?;
}
DiagnosticFormat::Github => {
GithubRenderer::new(self.resolver, "ty").render(f, self.diagnostics)?;
}
}
Ok(())
}
}
/// A sequence of resolved diagnostics.
///
/// Resolving a diagnostic refers to the process of restructuring its internal
/// data in a way that enables rendering decisions. For example, a `Span`
/// on an `Annotation` in a `Diagnostic` is intentionally very minimal, and
/// thus doesn't have information like line numbers or even the actual file
/// path. Resolution retrieves this information and puts it into a structured
/// representation specifically intended for diagnostic rendering.
///
/// The lifetime `'a` refers to the shorter of the lifetimes between the file
/// resolver and the diagnostic itself. (The resolved types borrow data from
/// both.)
#[derive(Debug)]
struct Resolved<'a> {
diagnostics: Vec<ResolvedDiagnostic<'a>>,
}
impl<'a> Resolved<'a> {
/// Creates a new resolved set of diagnostics.
fn new(
resolver: &'a dyn FileResolver,
diag: &'a Diagnostic,
config: &DisplayDiagnosticConfig,
) -> Resolved<'a> {
let mut diagnostics = vec![];
diagnostics.push(ResolvedDiagnostic::from_diagnostic(resolver, config, diag));
for sub in &diag.inner.subs {
diagnostics.push(ResolvedDiagnostic::from_sub_diagnostic(resolver, sub));
}
Resolved { diagnostics }
}
/// Creates a value that is amenable to rendering directly.
fn to_renderable(&self, context: usize) -> Renderable<'_> {
Renderable {
diagnostics: self
.diagnostics
.iter()
.map(|diag| diag.to_renderable(context))
.collect(),
}
}
}
/// A single resolved diagnostic.
///
/// The lifetime `'a` refers to the shorter of the lifetimes between the file
/// resolver and the diagnostic itself. (The resolved types borrow data from
/// both.)
#[derive(Debug)]
struct ResolvedDiagnostic<'a> {
level: AnnotateLevel,
id: Option<String>,
documentation_url: Option<String>,
message: String,
annotations: Vec<ResolvedAnnotation<'a>>,
is_fixable: bool,
header_offset: usize,
}
impl<'a> ResolvedDiagnostic<'a> {
/// Resolve a single diagnostic.
fn from_diagnostic(
resolver: &'a dyn FileResolver,
config: &DisplayDiagnosticConfig,
diag: &'a Diagnostic,
) -> ResolvedDiagnostic<'a> {
let annotations: Vec<_> = diag
.inner
.annotations
.iter()
.filter_map(|ann| {
let path = ann
.span
.file
.relative_path(resolver)
.to_str()
.unwrap_or_else(|| ann.span.file.path(resolver));
let diagnostic_source = ann.span.file.diagnostic_source(resolver);
ResolvedAnnotation::new(path, &diagnostic_source, ann, resolver)
})
.collect();
let id = if config.hide_severity {
// Either the rule code alone (e.g. `F401`), or the lint id with a colon (e.g.
// `invalid-syntax:`). When Ruff gets real severities, we should put the colon back in
// `DisplaySet::format_annotation` for both cases, but this is a small hack to improve
// the formatting of syntax errors for now. This should also be kept consistent with the
// concise formatting.
diag.secondary_code().map_or_else(
|| format!("{id}:", id = diag.inner.id),
|code| code.to_string(),
)
} else {
diag.inner.id.to_string()
};
let level = if config.hide_severity {
AnnotateLevel::None
} else {
diag.inner.severity.to_annotate()
};
ResolvedDiagnostic {
level,
id: Some(id),
documentation_url: diag.documentation_url().map(ToString::to_string),
message: diag.inner.message.as_str().to_string(),
annotations,
is_fixable: config.show_fix_status && diag.has_applicable_fix(config),
header_offset: diag.inner.header_offset,
}
}
/// Resolve a single sub-diagnostic.
fn from_sub_diagnostic(
resolver: &'a dyn FileResolver,
diag: &'a SubDiagnostic,
) -> ResolvedDiagnostic<'a> {
let annotations: Vec<_> = diag
.inner
.annotations
.iter()
.filter_map(|ann| {
let path = ann
.span
.file
.relative_path(resolver)
.to_str()
.unwrap_or_else(|| ann.span.file.path(resolver));
let diagnostic_source = ann.span.file.diagnostic_source(resolver);
ResolvedAnnotation::new(path, &diagnostic_source, ann, resolver)
})
.collect();
ResolvedDiagnostic {
level: diag.inner.severity.to_annotate(),
id: None,
documentation_url: None,
message: diag.inner.message.as_str().to_string(),
annotations,
is_fixable: false,
header_offset: 0,
}
}
/// Create a diagnostic amenable for rendering.
///
/// `context` refers to the number of lines both before and after to show
/// for each snippet.
fn to_renderable<'r>(&'r self, context: usize) -> RenderableDiagnostic<'r> {
let mut ann_by_path: BTreeMap<&'a str, Vec<&ResolvedAnnotation<'a>>> = BTreeMap::new();
for ann in &self.annotations {
ann_by_path.entry(ann.path).or_default().push(ann);
}
for anns in ann_by_path.values_mut() {
anns.sort_by_key(|ann1| ann1.range.start());
}
let mut snippet_by_path: BTreeMap<&'a str, Vec<Vec<&ResolvedAnnotation<'a>>>> =
BTreeMap::new();
for (path, anns) in ann_by_path {
let mut snippet = vec![];
for ann in anns {
let Some(prev) = snippet.last() else {
snippet.push(ann);
continue;
};
let prev_context_ends = context_after(
&prev.diagnostic_source.as_source_code(),
context,
prev.line_end,
prev.notebook_index.as_ref(),
)
.get();
let this_context_begins = context_before(
&ann.diagnostic_source.as_source_code(),
context,
ann.line_start,
ann.notebook_index.as_ref(),
)
.get();
// For notebooks, check whether the end of the
// previous annotation and the start of the current
// annotation are in different cells.
let prev_cell_index = prev.notebook_index.as_ref().map(|notebook_index| {
let prev_end = prev
.diagnostic_source
.as_source_code()
.line_column(prev.range.end());
notebook_index.cell(prev_end.line).unwrap_or_default().get()
});
let this_cell_index = ann.notebook_index.as_ref().map(|notebook_index| {
let this_start = ann
.diagnostic_source
.as_source_code()
.line_column(ann.range.start());
notebook_index
.cell(this_start.line)
.unwrap_or_default()
.get()
});
let in_different_cells = prev_cell_index != this_cell_index;
// The boundary case here is when `prev_context_ends`
// is exactly one less than `this_context_begins`. In
// that case, the context windows are adjacent and we
// should fall through below to add this annotation to
// the existing snippet.
//
// For notebooks, also check that the context windows
// are in the same cell. Windows from different cells
// should never be considered adjacent.
if in_different_cells || this_context_begins.saturating_sub(prev_context_ends) > 1 {
snippet_by_path
.entry(path)
.or_default()
.push(std::mem::take(&mut snippet));
}
snippet.push(ann);
}
if !snippet.is_empty() {
snippet_by_path.entry(path).or_default().push(snippet);
}
}
let mut snippets_by_input = vec![];
for (path, snippets) in snippet_by_path {
snippets_by_input.push(RenderableSnippets::new(context, path, &snippets));
}
snippets_by_input
.sort_by(|snips1, snips2| snips1.has_primary.cmp(&snips2.has_primary).reverse());
RenderableDiagnostic {
level: self.level,
id: self.id.as_deref(),
documentation_url: self.documentation_url.as_deref(),
message: &self.message,
snippets_by_input,
is_fixable: self.is_fixable,
header_offset: self.header_offset,
}
}
}
/// A resolved annotation with information needed for rendering.
///
/// For example, this annotation has the corresponding file path, entire
/// source code and the line numbers corresponding to its range in the source
/// code. This information can be used to create renderable data and also
/// sort/organize the annotations into snippets.
#[derive(Debug)]
struct ResolvedAnnotation<'a> {
path: &'a str,
diagnostic_source: DiagnosticSource,
range: TextRange,
line_start: OneIndexed,
line_end: OneIndexed,
message: Option<&'a str>,
is_primary: bool,
hide_snippet: bool,
notebook_index: Option<NotebookIndex>,
}
impl<'a> ResolvedAnnotation<'a> {
/// Resolve an annotation.
///
/// `path` is the path of the file that this annotation points to.
///
/// `input` is the contents of the file that this annotation points to.
fn new(
path: &'a str,
diagnostic_source: &DiagnosticSource,
ann: &'a Annotation,
resolver: &'a dyn FileResolver,
) -> Option<ResolvedAnnotation<'a>> {
let source = diagnostic_source.as_source_code();
let (range, line_start, line_end) = match (ann.span.range(), ann.message.is_some()) {
// An annotation with no range AND no message is probably(?)
// meaningless, but we should try to render it anyway.
(None, _) => (
TextRange::empty(TextSize::new(0)),
OneIndexed::MIN,
OneIndexed::MIN,
),
(Some(range), _) => {
let line_start = source.line_index(range.start());
let mut line_end = source.line_index(range.end());
// As a special case, if the *end* of our range comes
// right after a line terminator, we say that the last
// line number for this annotation is the previous
// line and not the next line. In other words, in this
// case, we treat our line number as an inclusive
// upper bound.
if source.slice(range).ends_with(['\r', '\n']) {
line_end = line_end.saturating_sub(1).max(line_start);
}
(range, line_start, line_end)
}
};
Some(ResolvedAnnotation {
path,
diagnostic_source: diagnostic_source.clone(),
range,
line_start,
line_end,
message: ann.get_message(),
is_primary: ann.is_primary,
hide_snippet: ann.hide_snippet,
notebook_index: resolver.notebook_index(&ann.span.file),
})
}
}
/// A single unit of rendering consisting of one or more diagnostics.
///
/// There is always exactly one "main" diagnostic that comes first, followed by
/// zero or more sub-diagnostics.
///
/// The lifetime parameter `'r` refers to the lifetime of whatever created this
/// renderable value. This is usually the lifetime of `Resolved`.
#[derive(Debug)]
struct Renderable<'r> {
diagnostics: Vec<RenderableDiagnostic<'r>>,
}
/// A single diagnostic amenable to rendering.
#[derive(Debug)]
struct RenderableDiagnostic<'r> {
/// The severity of the diagnostic.
level: AnnotateLevel,
/// The ID of the diagnostic. The ID can usually be used on the CLI or in a
/// config file to change the severity of a lint.
///
/// An ID is always present for top-level diagnostics and always absent for
/// sub-diagnostics.
id: Option<&'r str>,
documentation_url: Option<&'r str>,
/// The message emitted with the diagnostic, before any snippets are
/// rendered.
message: &'r str,
/// A collection of collections of snippets. Each collection of snippets
/// should be from the same file, and none of the snippets inside of a
/// collection should overlap with one another or be directly adjacent.
snippets_by_input: Vec<RenderableSnippets<'r>>,
/// Whether or not the diagnostic is fixable.
///
/// This is rendered as a `[*]` indicator after the diagnostic ID.
is_fixable: bool,
/// Offset to align the header sigil (`-->`) with the subsequent line number separators.
///
/// This is only needed for formatter diagnostics where we don't render a snippet via
/// `annotate-snippets` and thus the alignment isn't computed automatically.
header_offset: usize,
}
impl RenderableDiagnostic<'_> {
/// Convert this to an "annotate" snippet.
fn to_annotate(&self) -> AnnotateMessage<'_> {
let snippets = self.snippets_by_input.iter().flat_map(|snippets| {
let path = snippets.path;
snippets
.snippets
.iter()
.map(|snippet| snippet.to_annotate(path))
});
let mut message = self
.level
.title(self.message)
.is_fixable(self.is_fixable)
.lineno_offset(self.header_offset);
if let Some(id) = self.id {
message = message.id_with_url(id, self.documentation_url);
}
message.snippets(snippets)
}
}
/// A collection of renderable snippets for a single file.
#[derive(Debug)]
struct RenderableSnippets<'r> {
/// The path to the file from which all snippets originate from.
path: &'r str,
/// The snippets, the in order of desired rendering.
snippets: Vec<RenderableSnippet<'r>>,
/// Whether this contains any snippets with any annotations marked
/// as primary. This is useful for re-sorting snippets such that
/// the ones with primary annotations are rendered first.
has_primary: bool,
}
impl<'r> RenderableSnippets<'r> {
/// Creates a new collection of renderable snippets.
///
/// `context` is the number of lines to include before and after each
/// snippet.
///
/// `path` is the file path containing the given snippets. (They should all
/// come from the same file path.)
///
/// The lifetime parameter `'r` refers to the lifetime of the resolved
/// annotation given (since the renderable snippet returned borrows from
/// the resolved annotation's `Input`). This is no longer than the lifetime
/// of the resolver that produced the resolved annotation.
///
/// # Panics
///
/// When `resolved_snippets.is_empty()`.
fn new<'a>(
context: usize,
path: &'r str,
resolved_snippets: &'a [Vec<&'r ResolvedAnnotation<'r>>],
) -> RenderableSnippets<'r> {
assert!(!resolved_snippets.is_empty());
let mut has_primary = false;
let mut snippets = vec![];
for anns in resolved_snippets {
let snippet = RenderableSnippet::new(context, anns);
has_primary = has_primary || snippet.has_primary;
snippets.push(snippet);
}
snippets.sort_by(|s1, s2| s1.has_primary.cmp(&s2.has_primary).reverse());
RenderableSnippets {
path,
snippets,
has_primary,
}
}
}
/// A single snippet of code that is rendered as part of a diagnostic message.
///
/// The intent is that a snippet for one diagnostic does not overlap (or is
/// even directly adjacent to) any other snippets for that same diagnostic.
/// Callers creating a `RenderableSnippet` should enforce this guarantee by
/// grouping annotations according to the lines on which they start and stop.
///
/// Snippets from different diagnostics (including sub-diagnostics) may
/// overlap.
#[derive(Debug)]
struct RenderableSnippet<'r> {
/// The actual snippet text.
snippet: Cow<'r, str>,
/// The absolute line number corresponding to where this
/// snippet begins.
line_start: OneIndexed,
/// A non-zero number of annotations on this snippet.
annotations: Vec<RenderableAnnotation<'r>>,
/// Whether this snippet contains at least one primary
/// annotation.
has_primary: bool,
/// The cell index in a Jupyter notebook, if this snippet refers to a notebook.
///
/// This is used for rendering annotations with offsets like `cell 1:2:3` instead of simple row
/// and column numbers.
cell_index: Option<usize>,
}
impl<'r> RenderableSnippet<'r> {
/// Creates a new snippet with one or more annotations that is ready to be
/// rendered.
///
/// The first line of the snippet is the smallest line number on which one
/// of the annotations begins, minus the context window size. The last line
/// is the largest line number on which one of the annotations ends, plus
/// the context window size.
///
/// For Jupyter notebooks, the context window may also be truncated at cell
/// boundaries. If multiple annotations are present, and they point to
/// different cells, these will have already been split into separate
/// snippets by `ResolvedDiagnostic::to_renderable`.
///
/// Callers should guarantee that the `input` on every `ResolvedAnnotation`
/// given is identical.
///
/// The lifetime of the snippet returned is only tied to the lifetime of
/// the borrowed resolved annotation given (which is no longer than the
/// lifetime of the resolver that produced the resolved annotation).
///
/// # Panics
///
/// When `anns.is_empty()`.
fn new<'a>(context: usize, anns: &'a [&'r ResolvedAnnotation<'r>]) -> RenderableSnippet<'r> {
assert!(
!anns.is_empty(),
"creating a renderable snippet requires a non-zero number of annotations",
);
let diagnostic_source = &anns[0].diagnostic_source;
let notebook_index = anns[0].notebook_index.as_ref();
let source = diagnostic_source.as_source_code();
let has_primary = anns.iter().any(|ann| ann.is_primary);
let content_start_index = anns.iter().map(|ann| ann.line_start).min().unwrap();
let line_start = context_before(&source, context, content_start_index, notebook_index);
let start = source.line_column(anns[0].range.start());
let cell_index = notebook_index
.map(|notebook_index| notebook_index.cell(start.line).unwrap_or_default().get());
let content_end_index = anns.iter().map(|ann| ann.line_end).max().unwrap();
let line_end = context_after(&source, context, content_end_index, notebook_index);
let snippet_start = source.line_start(line_start);
let snippet_end = source.line_end(line_end);
let snippet = diagnostic_source
.as_source_code()
.slice(TextRange::new(snippet_start, snippet_end));
// Strip the BOM from the beginning of the snippet, if present. Doing this here saves us the
// trouble of updating the annotation ranges in `replace_unprintable`, and also allows us to
// check that the BOM is at the very beginning of the file, not just the beginning of the
// snippet.
const BOM: char = '\u{feff}';
let bom_len = BOM.text_len();
let (snippet, snippet_start) =
if snippet_start == TextSize::ZERO && snippet.starts_with(BOM) {
(
&snippet[bom_len.to_usize()..],
snippet_start + TextSize::new(bom_len.to_u32()),
)
} else {
(snippet, snippet_start)
};
let annotations = anns
.iter()
.map(|ann| RenderableAnnotation::new(snippet_start, ann))
.collect();
let EscapedSourceCode {
text: snippet,
annotations,
} = replace_unprintable(snippet, annotations).fix_up_empty_spans_after_line_terminator();
let line_start = notebook_index.map_or(line_start, |notebook_index| {
notebook_index
.cell_row(line_start)
.unwrap_or(OneIndexed::MIN)
});
RenderableSnippet {
snippet,
line_start,
annotations,
has_primary,
cell_index,
}
}
/// Convert this to an "annotate" snippet.
fn to_annotate<'a>(&'a self, path: &'a str) -> AnnotateSnippet<'a> {
AnnotateSnippet::source(&self.snippet)
.origin(path)
.line_start(self.line_start.get())
.annotations(
self.annotations
.iter()
.map(RenderableAnnotation::to_annotate),
)
.cell_index(self.cell_index)
}
}
/// A single annotation represented in a way that is amenable to rendering.
#[derive(Debug)]
struct RenderableAnnotation<'r> {
/// The range of the annotation relative to the snippet
/// it points to. This is *not* the absolute range in the
/// corresponding file.
range: TextRange,
/// An optional message or label associated with this annotation.
message: Option<&'r str>,
/// Whether this annotation is considered "primary" or not.
is_primary: bool,
/// Whether the snippet for this annotation should be hidden instead of rendered.
hide_snippet: bool,
}
impl<'r> RenderableAnnotation<'r> {
/// Create a new renderable annotation.
///
/// `snippet_start` should be the absolute offset at which the snippet
/// pointing to by the given annotation begins.
///
/// The lifetime of the resolved annotation does not matter. The `'r`
/// lifetime parameter here refers to the lifetime of the resolver that
/// created the given `ResolvedAnnotation`.
fn new(snippet_start: TextSize, ann: &'_ ResolvedAnnotation<'r>) -> RenderableAnnotation<'r> {
// This should only ever saturate if a BOM is present _and_ the annotation range points
// before the BOM (i.e. at offset 0). In Ruff this typically results from the use of
// `TextRange::default()` for a diagnostic range instead of a range relative to file
// contents.
let range = ann.range.checked_sub(snippet_start).unwrap_or(ann.range);
RenderableAnnotation {
range,
message: ann.message,
is_primary: ann.is_primary,
hide_snippet: ann.hide_snippet,
}
}
/// Convert this to an "annotate" annotation.
fn to_annotate(&self) -> AnnotateAnnotation<'_> {
// This is not really semantically meaningful, but
// it does currently result in roughly the message
// we want to convey.
//
// TODO: While this means primary annotations use `^` and
// secondary annotations use `-` (which is fine), this does
// result in coloring for primary annotations that looks like
// an error (red) and coloring for secondary annotations that
// looks like a warning (yellow). This is perhaps not quite in
// line with what we want, but fixing this probably requires
// changes to `ruff_annotate_snippets`, so we punt for now.
let level = if self.is_primary {
AnnotateLevel::Error
} else {
AnnotateLevel::Warning
};
let mut ann = level.span(self.range.into());
if let Some(message) = self.message {
ann = ann.label(message);
}
ann.hide_snippet(self.hide_snippet)
}
}
/// A trait that facilitates the retrieval of source code from a `Span`.
///
/// At present, this is tightly coupled with a Salsa database. In the future,
/// it is intended for this resolver to become an abstraction providing a
/// similar API. We define things this way for now to keep the Salsa coupling
/// at "arm's" length, and to make it easier to do the actual de-coupling in
/// the future.
///
/// For example, at time of writing (2025-03-07), the plan is (roughly) for
/// Ruff to grow its own interner of file paths so that a `Span` can store an
/// interned ID instead of a (roughly) `Arc<Path>`. This interner is planned
/// to be entirely separate from the Salsa interner used by ty, and so,
/// callers will need to pass in a different "resolver" for turning `Span`s
/// into actual file paths/contents. The infrastructure for this isn't fully in
/// place, but this type serves to demarcate the intended abstraction boundary.
pub trait FileResolver {
/// Returns the path associated with the file given.
fn path(&self, file: File) -> &str;
/// Returns the input contents associated with the file given.
fn input(&self, file: File) -> Input;
/// Returns the [`NotebookIndex`] associated with the file given, if it's a Jupyter notebook.
fn notebook_index(&self, file: &UnifiedFile) -> Option<NotebookIndex>;
/// Returns whether the file given is a Jupyter notebook.
fn is_notebook(&self, file: &UnifiedFile) -> bool;
/// Returns the current working directory.
fn current_directory(&self) -> &Path;
}
impl<T> FileResolver for T
where
T: Db,
{
fn path(&self, file: File) -> &str {
file.path(self).as_str()
}
fn input(&self, file: File) -> Input {
Input {
text: source_text(self, file),
line_index: line_index(self, file),
}
}
fn notebook_index(&self, file: &UnifiedFile) -> Option<NotebookIndex> {
match file {
UnifiedFile::Ty(file) => self
.input(*file)
.text
.as_notebook()
.map(Notebook::index)
.cloned(),
UnifiedFile::Ruff(_) => unimplemented!("Expected an interned ty file"),
}
}
fn is_notebook(&self, file: &UnifiedFile) -> bool {
match file {
UnifiedFile::Ty(file) => self.input(*file).text.as_notebook().is_some(),
UnifiedFile::Ruff(_) => unimplemented!("Expected an interned ty file"),
}
}
fn current_directory(&self) -> &Path {
self.system().current_directory().as_std_path()
}
}
impl FileResolver for &dyn Db {
fn path(&self, file: File) -> &str {
file.path(*self).as_str()
}
fn input(&self, file: File) -> Input {
Input {
text: source_text(*self, file),
line_index: line_index(*self, file),
}
}
fn notebook_index(&self, file: &UnifiedFile) -> Option<NotebookIndex> {
match file {
UnifiedFile::Ty(file) => self
.input(*file)
.text
.as_notebook()
.map(Notebook::index)
.cloned(),
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | true |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_db/src/diagnostic/mod.rs | crates/ruff_db/src/diagnostic/mod.rs | use std::{borrow::Cow, fmt::Formatter, path::Path, sync::Arc};
use ruff_diagnostics::{Applicability, Fix};
use ruff_source_file::{LineColumn, SourceCode, SourceFile};
use ruff_annotate_snippets::Level as AnnotateLevel;
use ruff_text_size::{Ranged, TextRange, TextSize};
pub use self::render::{
DisplayDiagnostic, DisplayDiagnostics, DummyFileResolver, FileResolver, Input,
ceil_char_boundary,
github::{DisplayGithubDiagnostics, GithubRenderer},
};
use crate::cancellation::CancellationToken;
use crate::{Db, files::File};
mod render;
mod stylesheet;
/// A collection of information that can be rendered into a diagnostic.
///
/// A diagnostic is a collection of information gathered by a tool intended
/// for presentation to an end user, and which describes a group of related
/// characteristics in the inputs given to the tool. Typically, but not always,
/// a characteristic is a deficiency. An example of a characteristic that is
/// _not_ a deficiency is the `reveal_type` diagnostic for our type checker.
#[derive(Debug, Clone, Eq, PartialEq, Hash, get_size2::GetSize)]
pub struct Diagnostic {
/// The actual diagnostic.
///
/// We box the diagnostic since it is somewhat big.
inner: Arc<DiagnosticInner>,
}
impl Diagnostic {
/// Create a new diagnostic with the given identifier, severity and
/// message.
///
/// The identifier should be something that uniquely identifies the _type_
/// of diagnostic being reported. It should be usable as a reference point
/// for humans communicating about diagnostic categories. It will also
/// appear in the output when this diagnostic is rendered.
///
/// The severity should describe the assumed level of importance to an end
/// user.
///
/// The message is meant to be read by end users. The primary message
/// is meant to be a single terse description (usually a short phrase)
/// describing the group of related characteristics that the diagnostic
/// describes. Stated differently, if only one thing from a diagnostic can
/// be shown to an end user in a particular context, it is the primary
/// message.
///
/// # Types implementing `IntoDiagnosticMessage`
///
/// Callers can pass anything that implements `std::fmt::Display`
/// directly. If callers want or need to avoid cloning the diagnostic
/// message, then they can also pass a `DiagnosticMessage` directly.
pub fn new<'a>(
id: DiagnosticId,
severity: Severity,
message: impl IntoDiagnosticMessage + 'a,
) -> Diagnostic {
let inner = Arc::new(DiagnosticInner {
id,
severity,
message: message.into_diagnostic_message(),
custom_concise_message: None,
documentation_url: None,
annotations: vec![],
subs: vec![],
fix: None,
parent: None,
noqa_offset: None,
secondary_code: None,
header_offset: 0,
});
Diagnostic { inner }
}
/// Creates a `Diagnostic` for a syntax error.
///
/// Unlike the more general [`Diagnostic::new`], this requires a [`Span`] and a [`TextRange`]
/// attached to it.
///
/// This should _probably_ be a method on the syntax errors, but
/// at time of writing, `ruff_db` depends on `ruff_python_parser` instead of
/// the other way around. And since we want to do this conversion in a couple
/// places, it makes sense to centralize it _somewhere_. So it's here for now.
pub fn invalid_syntax(
span: impl Into<Span>,
message: impl IntoDiagnosticMessage,
range: impl Ranged,
) -> Diagnostic {
let mut diag = Diagnostic::new(DiagnosticId::InvalidSyntax, Severity::Error, message);
let span = span.into().with_range(range.range());
diag.annotate(Annotation::primary(span));
diag
}
/// Add an annotation to this diagnostic.
///
/// Annotations for a diagnostic are optional, but if any are added,
/// callers should strive to make at least one of them primary. That is, it
/// should be constructed via [`Annotation::primary`]. A diagnostic with no
/// primary annotations is allowed, but its rendering may be sub-optimal.
pub fn annotate(&mut self, ann: Annotation) {
Arc::make_mut(&mut self.inner).annotations.push(ann);
}
/// Adds an "info" sub-diagnostic with the given message.
///
/// If callers want to add an "info" sub-diagnostic with annotations, then
/// create a [`SubDiagnostic`] manually and use [`Diagnostic::sub`] to
/// attach it to a parent diagnostic.
///
/// An "info" diagnostic is useful when contextualizing or otherwise
/// helpful information can be added to help end users understand the
/// main diagnostic message better. For example, if a the main diagnostic
/// message is about a function call being invalid, a useful "info"
/// sub-diagnostic could show the function definition (or only the relevant
/// parts of it).
///
/// # Types implementing `IntoDiagnosticMessage`
///
/// Callers can pass anything that implements `std::fmt::Display`
/// directly. If callers want or need to avoid cloning the diagnostic
/// message, then they can also pass a `DiagnosticMessage` directly.
pub fn info<'a>(&mut self, message: impl IntoDiagnosticMessage + 'a) {
self.sub(SubDiagnostic::new(SubDiagnosticSeverity::Info, message));
}
/// Adds a "help" sub-diagnostic with the given message.
///
/// See the closely related [`Diagnostic::info`] method for more details.
pub fn help<'a>(&mut self, message: impl IntoDiagnosticMessage + 'a) {
self.sub(SubDiagnostic::new(SubDiagnosticSeverity::Help, message));
}
/// Adds a "sub" diagnostic to this diagnostic.
///
/// This is useful when a sub diagnostic has its own annotations attached
/// to it. For the simpler case of a sub-diagnostic with only a message,
/// using a method like [`Diagnostic::info`] may be more convenient.
pub fn sub(&mut self, sub: SubDiagnostic) {
Arc::make_mut(&mut self.inner).subs.push(sub);
}
/// Return a `std::fmt::Display` implementation that renders this
/// diagnostic into a human readable format.
///
/// Note that this `Display` impl includes a trailing line terminator, so
/// callers should prefer using this with `write!` instead of `writeln!`.
pub fn display<'a>(
&'a self,
resolver: &'a dyn FileResolver,
config: &'a DisplayDiagnosticConfig,
) -> DisplayDiagnostic<'a> {
DisplayDiagnostic::new(resolver, config, self)
}
/// Returns the identifier for this diagnostic.
pub fn id(&self) -> DiagnosticId {
self.inner.id
}
/// Returns the primary message for this diagnostic.
///
/// A diagnostic always has a message, but it may be empty.
pub fn primary_message(&self) -> &str {
self.inner.message.as_str()
}
/// Introspects this diagnostic and returns what kind of "primary" message
/// it contains for concise formatting.
///
/// When we concisely format diagnostics, we likely want to not only
/// include the primary diagnostic message but also the message attached
/// to the primary annotation. In particular, the primary annotation often
/// contains *essential* information or context for understanding the
/// diagnostic.
///
/// The type returned implements the `std::fmt::Display` trait. In most
/// cases, just converting it to a string (or printing it) will do what
/// you want.
pub fn concise_message(&self) -> ConciseMessage<'_> {
if let Some(custom_message) = &self.inner.custom_concise_message {
return ConciseMessage::Custom(custom_message.as_str());
}
let main = self.inner.message.as_str();
let annotation = self
.primary_annotation()
.and_then(|ann| ann.get_message())
.unwrap_or_default();
if annotation.is_empty() {
ConciseMessage::MainDiagnostic(main)
} else {
ConciseMessage::Both { main, annotation }
}
}
/// Set a custom message for the concise formatting of this diagnostic.
///
/// This overrides the default behavior of generating a concise message
/// from the main diagnostic message and the primary annotation.
pub fn set_concise_message(&mut self, message: impl IntoDiagnosticMessage) {
Arc::make_mut(&mut self.inner).custom_concise_message =
Some(message.into_diagnostic_message());
}
/// Returns the severity of this diagnostic.
///
/// Note that this may be different than the severity of sub-diagnostics.
pub fn severity(&self) -> Severity {
self.inner.severity
}
/// Returns a shared borrow of the "primary" annotation of this diagnostic
/// if one exists.
///
/// When there are multiple primary annotations, then the first one that
/// was added to this diagnostic is returned.
pub fn primary_annotation(&self) -> Option<&Annotation> {
self.inner.annotations.iter().find(|ann| ann.is_primary)
}
/// Returns a mutable borrow of the "primary" annotation of this diagnostic
/// if one exists.
///
/// When there are multiple primary annotations, then the first one that
/// was added to this diagnostic is returned.
pub fn primary_annotation_mut(&mut self) -> Option<&mut Annotation> {
Arc::make_mut(&mut self.inner)
.annotations
.iter_mut()
.find(|ann| ann.is_primary)
}
/// Returns a mutable borrow of all annotations of this diagnostic.
pub fn annotations_mut(&mut self) -> impl Iterator<Item = &mut Annotation> {
Arc::make_mut(&mut self.inner).annotations.iter_mut()
}
/// Returns the "primary" span of this diagnostic if one exists.
///
/// When there are multiple primary spans, then the first one that was
/// added to this diagnostic is returned.
pub fn primary_span(&self) -> Option<Span> {
self.primary_annotation().map(|ann| ann.span.clone())
}
/// Returns a reference to the primary span of this diagnostic.
pub fn primary_span_ref(&self) -> Option<&Span> {
self.primary_annotation().map(|ann| &ann.span)
}
/// Returns the tags from the primary annotation of this diagnostic if it exists.
pub fn primary_tags(&self) -> Option<&[DiagnosticTag]> {
self.primary_annotation().map(|ann| ann.tags.as_slice())
}
/// Returns the "primary" span of this diagnostic, panicking if it does not exist.
///
/// This should typically only be used when working with diagnostics in Ruff, where diagnostics
/// are currently required to have a primary span.
///
/// See [`Diagnostic::primary_span`] for more details.
pub fn expect_primary_span(&self) -> Span {
self.primary_span().expect("Expected a primary span")
}
/// Returns a key that can be used to sort two diagnostics into the canonical order
/// in which they should appear when rendered.
pub fn rendering_sort_key<'a>(&'a self, db: &'a dyn Db) -> impl Ord + 'a {
RenderingSortKey {
db,
diagnostic: self,
}
}
/// Returns all annotations, skipping the first primary annotation.
pub fn secondary_annotations(&self) -> impl Iterator<Item = &Annotation> {
let mut seen_primary = false;
self.inner.annotations.iter().filter(move |ann| {
if seen_primary {
true
} else if ann.is_primary {
seen_primary = true;
false
} else {
true
}
})
}
pub fn sub_diagnostics(&self) -> &[SubDiagnostic] {
&self.inner.subs
}
/// Returns a mutable borrow of the sub-diagnostics of this diagnostic.
pub fn sub_diagnostics_mut(&mut self) -> impl Iterator<Item = &mut SubDiagnostic> {
Arc::make_mut(&mut self.inner).subs.iter_mut()
}
/// Returns the fix for this diagnostic if it exists.
pub fn fix(&self) -> Option<&Fix> {
self.inner.fix.as_ref()
}
#[cfg(test)]
pub(crate) fn fix_mut(&mut self) -> Option<&mut Fix> {
Arc::make_mut(&mut self.inner).fix.as_mut()
}
/// Set the fix for this diagnostic.
pub fn set_fix(&mut self, fix: Fix) {
debug_assert!(
self.primary_span().is_some(),
"Expected a source file for a diagnostic with a fix"
);
Arc::make_mut(&mut self.inner).fix = Some(fix);
}
/// If `fix` is `Some`, set the fix for this diagnostic.
pub fn set_optional_fix(&mut self, fix: Option<Fix>) {
if let Some(fix) = fix {
self.set_fix(fix);
}
}
/// Remove the fix for this diagnostic.
pub fn remove_fix(&mut self) {
Arc::make_mut(&mut self.inner).fix = None;
}
/// Returns `true` if the diagnostic contains a [`Fix`].
pub fn fixable(&self) -> bool {
self.fix().is_some()
}
/// Returns `true` if the diagnostic is [`fixable`](Diagnostic::fixable) and applies at the
/// configured applicability level.
pub fn has_applicable_fix(&self, config: &DisplayDiagnosticConfig) -> bool {
self.fix()
.is_some_and(|fix| fix.applies(config.fix_applicability))
}
pub fn documentation_url(&self) -> Option<&str> {
self.inner.documentation_url.as_deref()
}
pub fn set_documentation_url(&mut self, url: Option<String>) {
Arc::make_mut(&mut self.inner).documentation_url = url;
}
/// Returns the offset of the parent statement for this diagnostic if it exists.
///
/// This is primarily used for checking noqa/secondary code suppressions.
pub fn parent(&self) -> Option<TextSize> {
self.inner.parent
}
/// Set the offset of the diagnostic's parent statement.
pub fn set_parent(&mut self, parent: TextSize) {
Arc::make_mut(&mut self.inner).parent = Some(parent);
}
/// Returns the remapped offset for a suppression comment if it exists.
///
/// Like [`Diagnostic::parent`], this is used for noqa code suppression comments in Ruff.
pub fn noqa_offset(&self) -> Option<TextSize> {
self.inner.noqa_offset
}
/// Set the remapped offset for a suppression comment.
pub fn set_noqa_offset(&mut self, noqa_offset: TextSize) {
Arc::make_mut(&mut self.inner).noqa_offset = Some(noqa_offset);
}
/// Returns the secondary code for the diagnostic if it exists.
///
/// The "primary" code for the diagnostic is its lint name. Diagnostics in ty don't have
/// secondary codes (yet), but in Ruff the noqa code is used.
pub fn secondary_code(&self) -> Option<&SecondaryCode> {
self.inner.secondary_code.as_ref()
}
/// Returns the secondary code for the diagnostic if it exists, or the lint name otherwise.
///
/// This is a common pattern for Ruff diagnostics, which want to use the noqa code in general,
/// but fall back on the `invalid-syntax` identifier for syntax errors, which don't have
/// secondary codes.
pub fn secondary_code_or_id(&self) -> &str {
self.secondary_code()
.map_or_else(|| self.inner.id.as_str(), SecondaryCode::as_str)
}
/// Set the secondary code for this diagnostic.
pub fn set_secondary_code(&mut self, code: SecondaryCode) {
Arc::make_mut(&mut self.inner).secondary_code = Some(code);
}
/// Returns the name used to represent the diagnostic.
pub fn name(&self) -> &'static str {
self.id().as_str()
}
/// Returns `true` if `self` is a syntax error message.
pub fn is_invalid_syntax(&self) -> bool {
self.id().is_invalid_syntax()
}
/// Returns the message of the first sub-diagnostic with a `Help` severity.
///
/// Note that this is used as the fix title/suggestion for some of Ruff's output formats, but in
/// general this is not the guaranteed meaning of such a message.
pub fn first_help_text(&self) -> Option<&str> {
self.sub_diagnostics()
.iter()
.find(|sub| matches!(sub.inner.severity, SubDiagnosticSeverity::Help))
.map(|sub| sub.inner.message.as_str())
}
/// Returns the filename for the message.
///
/// Panics if the diagnostic has no primary span, or if its file is not a `SourceFile`.
pub fn expect_ruff_filename(&self) -> String {
self.expect_primary_span()
.expect_ruff_file()
.name()
.to_string()
}
/// Computes the start source location for the message.
///
/// Returns None if the diagnostic has no primary span, if its file is not a `SourceFile`,
/// or if the span has no range.
pub fn ruff_start_location(&self) -> Option<LineColumn> {
Some(
self.ruff_source_file()?
.to_source_code()
.line_column(self.range()?.start()),
)
}
/// Computes the end source location for the message.
///
/// Returns None if the diagnostic has no primary span, if its file is not a `SourceFile`,
/// or if the span has no range.
pub fn ruff_end_location(&self) -> Option<LineColumn> {
Some(
self.ruff_source_file()?
.to_source_code()
.line_column(self.range()?.end()),
)
}
/// Returns the [`SourceFile`] which the message belongs to.
pub fn ruff_source_file(&self) -> Option<&SourceFile> {
self.primary_span_ref()?.as_ruff_file()
}
/// Returns the [`SourceFile`] which the message belongs to.
///
/// Panics if the diagnostic has no primary span, or if its file is not a `SourceFile`.
pub fn expect_ruff_source_file(&self) -> &SourceFile {
self.ruff_source_file()
.expect("Expected a ruff source file")
}
/// Returns the [`TextRange`] for the diagnostic.
pub fn range(&self) -> Option<TextRange> {
self.primary_span()?.range()
}
/// Returns the ordering of diagnostics based on the start of their ranges, if they have any.
///
/// Panics if either diagnostic has no primary span, or if its file is not a `SourceFile`.
pub fn ruff_start_ordering(&self, other: &Self) -> std::cmp::Ordering {
let a = (
self.severity().is_fatal(),
self.expect_ruff_source_file(),
self.range().map(|r| r.start()),
);
let b = (
other.severity().is_fatal(),
other.expect_ruff_source_file(),
other.range().map(|r| r.start()),
);
a.cmp(&b)
}
/// Add an offset for aligning the header sigil with the line number separators in a diff.
pub fn set_header_offset(&mut self, offset: usize) {
Arc::make_mut(&mut self.inner).header_offset = offset;
}
}
#[derive(Debug, Clone, Eq, PartialEq, Hash, get_size2::GetSize)]
struct DiagnosticInner {
id: DiagnosticId,
documentation_url: Option<String>,
severity: Severity,
message: DiagnosticMessage,
custom_concise_message: Option<DiagnosticMessage>,
annotations: Vec<Annotation>,
subs: Vec<SubDiagnostic>,
fix: Option<Fix>,
parent: Option<TextSize>,
noqa_offset: Option<TextSize>,
secondary_code: Option<SecondaryCode>,
header_offset: usize,
}
struct RenderingSortKey<'a> {
db: &'a dyn Db,
diagnostic: &'a Diagnostic,
}
impl Ord for RenderingSortKey<'_> {
// We sort diagnostics in a way that keeps them in source order
// and grouped by file. After that, we fall back to severity
// (with fatal messages sorting before info messages) and then
// finally the diagnostic ID.
fn cmp(&self, other: &Self) -> std::cmp::Ordering {
if let (Some(span1), Some(span2)) = (
self.diagnostic.primary_span(),
other.diagnostic.primary_span(),
) {
let order = span1.file().path(&self.db).cmp(span2.file().path(&self.db));
if order.is_ne() {
return order;
}
if let (Some(range1), Some(range2)) = (span1.range(), span2.range()) {
let order = range1.start().cmp(&range2.start());
if order.is_ne() {
return order;
}
}
}
// Reverse so that, e.g., Fatal sorts before Info.
let order = self
.diagnostic
.severity()
.cmp(&other.diagnostic.severity())
.reverse();
if order.is_ne() {
return order;
}
self.diagnostic.id().cmp(&other.diagnostic.id())
}
}
impl PartialOrd for RenderingSortKey<'_> {
fn partial_cmp(&self, other: &Self) -> Option<std::cmp::Ordering> {
Some(self.cmp(other))
}
}
impl PartialEq for RenderingSortKey<'_> {
fn eq(&self, other: &Self) -> bool {
self.cmp(other).is_eq()
}
}
impl Eq for RenderingSortKey<'_> {}
/// A collection of information subservient to a diagnostic.
///
/// A sub-diagnostic is always rendered after the parent diagnostic it is
/// attached to. A parent diagnostic may have many sub-diagnostics, and it is
/// guaranteed that they will not interleave with one another in rendering.
///
/// Currently, the order in which sub-diagnostics are rendered relative to one
/// another (for a single parent diagnostic) is the order in which they were
/// attached to the diagnostic.
#[derive(Debug, Clone, Eq, PartialEq, Hash, get_size2::GetSize)]
pub struct SubDiagnostic {
/// Like with `Diagnostic`, we box the `SubDiagnostic` to make it
/// pointer-sized.
inner: Box<SubDiagnosticInner>,
}
impl SubDiagnostic {
/// Create a new sub-diagnostic with the given severity and message.
///
/// The severity should describe the assumed level of importance to an end
/// user.
///
/// The message is meant to be read by end users. The primary message
/// is meant to be a single terse description (usually a short phrase)
/// describing the group of related characteristics that the sub-diagnostic
/// describes. Stated differently, if only one thing from a diagnostic can
/// be shown to an end user in a particular context, it is the primary
/// message.
///
/// # Types implementing `IntoDiagnosticMessage`
///
/// Callers can pass anything that implements `std::fmt::Display`
/// directly. If callers want or need to avoid cloning the diagnostic
/// message, then they can also pass a `DiagnosticMessage` directly.
pub fn new<'a>(
severity: SubDiagnosticSeverity,
message: impl IntoDiagnosticMessage + 'a,
) -> SubDiagnostic {
let inner = Box::new(SubDiagnosticInner {
severity,
message: message.into_diagnostic_message(),
annotations: vec![],
});
SubDiagnostic { inner }
}
/// Add an annotation to this sub-diagnostic.
///
/// Annotations for a sub-diagnostic, like for a diagnostic, are optional.
/// If any are added, callers should strive to make at least one of them
/// primary. That is, it should be constructed via [`Annotation::primary`].
/// A diagnostic with no primary annotations is allowed, but its rendering
/// may be sub-optimal.
///
/// Note that it is expected to be somewhat more common for sub-diagnostics
/// to have no annotations (e.g., a simple note) than for a diagnostic to
/// have no annotations.
pub fn annotate(&mut self, ann: Annotation) {
self.inner.annotations.push(ann);
}
pub fn annotations(&self) -> &[Annotation] {
&self.inner.annotations
}
/// Returns a mutable borrow of the annotations of this sub-diagnostic.
pub fn annotations_mut(&mut self) -> impl Iterator<Item = &mut Annotation> {
self.inner.annotations.iter_mut()
}
/// Returns a shared borrow of the "primary" annotation of this diagnostic
/// if one exists.
///
/// When there are multiple primary annotations, then the first one that
/// was added to this diagnostic is returned.
pub fn primary_annotation(&self) -> Option<&Annotation> {
self.inner.annotations.iter().find(|ann| ann.is_primary)
}
/// Introspects this diagnostic and returns what kind of "primary" message
/// it contains for concise formatting.
///
/// When we concisely format diagnostics, we likely want to not only
/// include the primary diagnostic message but also the message attached
/// to the primary annotation. In particular, the primary annotation often
/// contains *essential* information or context for understanding the
/// diagnostic.
///
/// The type returned implements the `std::fmt::Display` trait. In most
/// cases, just converting it to a string (or printing it) will do what
/// you want.
pub fn concise_message(&self) -> ConciseMessage<'_> {
let main = self.inner.message.as_str();
let annotation = self
.primary_annotation()
.and_then(|ann| ann.get_message())
.unwrap_or_default();
if annotation.is_empty() {
ConciseMessage::MainDiagnostic(main)
} else {
ConciseMessage::Both { main, annotation }
}
}
}
#[derive(Debug, Clone, Eq, PartialEq, Hash, get_size2::GetSize)]
struct SubDiagnosticInner {
severity: SubDiagnosticSeverity,
message: DiagnosticMessage,
annotations: Vec<Annotation>,
}
/// A pointer to a subsequence in the end user's input.
///
/// Also known as an annotation, the pointer can optionally contain a short
/// message, typically describing in general terms what is being pointed to.
///
/// An annotation is either primary or secondary, depending on whether it was
/// constructed via [`Annotation::primary`] or [`Annotation::secondary`].
/// Semantically, a primary annotation is meant to point to the "locus" of a
/// diagnostic. Visually, the difference between a primary and a secondary
/// annotation is usually just a different form of highlighting on the
/// corresponding span.
///
/// # Advice
///
/// The span on an annotation should be as _specific_ as possible. For example,
/// if there is a problem with a function call because one of its arguments has
/// an invalid type, then the span should point to the specific argument and
/// not to the entire function call.
///
/// Messages attached to annotations should also be as brief and specific as
/// possible. Long messages could negative impact the quality of rendering.
#[derive(Debug, Clone, Eq, PartialEq, Hash, get_size2::GetSize)]
pub struct Annotation {
/// The span of this annotation, corresponding to some subsequence of the
/// user's input that we want to highlight.
span: Span,
/// An optional message associated with this annotation's span.
///
/// When present, rendering will include this message in the output and
/// draw a line between the highlighted span and the message.
message: Option<DiagnosticMessage>,
/// Whether this annotation is "primary" or not. When it isn't primary, an
/// annotation is said to be "secondary."
is_primary: bool,
/// The diagnostic tags associated with this annotation.
tags: Vec<DiagnosticTag>,
/// Whether the snippet for this annotation should be hidden.
///
/// When set, rendering will only include the file's name and (optional) range. Everything else
/// is omitted, including any file snippet or message.
hide_snippet: bool,
}
impl Annotation {
/// Create a "primary" annotation.
///
/// A primary annotation is meant to highlight the "locus" of a diagnostic.
/// That is, it should point to something in the end user's input that is
/// the subject or "point" of a diagnostic.
///
/// A diagnostic may have many primary annotations. A diagnostic may not
/// have any annotations, but if it does, at least one _ought_ to be
/// primary.
pub fn primary(span: Span) -> Annotation {
Annotation {
span,
message: None,
is_primary: true,
tags: Vec::new(),
hide_snippet: false,
}
}
/// Create a "secondary" annotation.
///
/// A secondary annotation is meant to highlight relevant context for a
/// diagnostic, but not to point to the "locus" of the diagnostic.
///
/// A diagnostic with only secondary annotations is usually not sensible,
/// but it is allowed and will produce a reasonable rendering.
pub fn secondary(span: Span) -> Annotation {
Annotation {
span,
message: None,
is_primary: false,
tags: Vec::new(),
hide_snippet: false,
}
}
/// Attach a message to this annotation.
///
/// An annotation without a message will still have a presence in
/// rendering. In particular, it will highlight the span association with
/// this annotation in some way.
///
/// When a message is attached to an annotation, then it will be associated
/// with the highlighted span in some way during rendering.
///
/// # Types implementing `IntoDiagnosticMessage`
///
/// Callers can pass anything that implements `std::fmt::Display`
/// directly. If callers want or need to avoid cloning the diagnostic
/// message, then they can also pass a `DiagnosticMessage` directly.
pub fn message<'a>(self, message: impl IntoDiagnosticMessage + 'a) -> Annotation {
let message = Some(message.into_diagnostic_message());
Annotation { message, ..self }
}
/// Sets the message on this annotation.
///
/// If one was already set, then this overwrites it.
///
/// This is useful if one needs to set the message on an annotation,
/// and all one has is a `&mut Annotation`. For example, via
/// `Diagnostic::primary_annotation_mut`.
pub fn set_message<'a>(&mut self, message: impl IntoDiagnosticMessage + 'a) {
self.message = Some(message.into_diagnostic_message());
}
/// Returns the message attached to this annotation, if one exists.
pub fn get_message(&self) -> Option<&str> {
self.message.as_ref().map(|m| m.as_str())
}
/// Returns the `Span` associated with this annotation.
pub fn get_span(&self) -> &Span {
&self.span
}
/// Sets the span on this annotation.
pub fn set_span(&mut self, span: Span) {
self.span = span;
}
/// Returns the tags associated with this annotation.
pub fn get_tags(&self) -> &[DiagnosticTag] {
&self.tags
}
/// Attaches this tag to this annotation.
///
/// It will not replace any existing tags.
pub fn tag(mut self, tag: DiagnosticTag) -> Annotation {
self.tags.push(tag);
self
}
/// Attaches an additional tag to this annotation.
pub fn push_tag(&mut self, tag: DiagnosticTag) {
self.tags.push(tag);
}
/// Set whether or not the snippet on this annotation should be suppressed when rendering.
///
/// Such annotations are only rendered with their file name and range, if available. This is
/// intended for backwards compatibility with Ruff diagnostics, which historically used
/// `TextRange::default` to indicate a file-level diagnostic. In the new diagnostic model, a
/// [`Span`] with a range of `None` should be used instead, as mentioned in the `Span`
/// documentation.
///
/// TODO(brent) update this usage in Ruff and remove `is_file_level` entirely. See
/// <https://github.com/astral-sh/ruff/issues/19688>, especially my first comment, for more
/// details. As of 2025-09-26 we also use this to suppress snippet rendering for formatter
/// diagnostics, which also need to have a range, so we probably can't eliminate this entirely.
pub fn hide_snippet(&mut self, yes: bool) {
self.hide_snippet = yes;
}
pub fn is_primary(&self) -> bool {
self.is_primary
}
}
/// Tags that can be associated with an annotation.
///
/// These tags are used to provide additional information about the annotation.
/// and are passed through to the language server protocol.
#[derive(Debug, Clone, Eq, PartialEq, Hash, get_size2::GetSize)]
pub enum DiagnosticTag {
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | true |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_db/src/diagnostic/render/junit.rs | crates/ruff_db/src/diagnostic/render/junit.rs | use std::{collections::BTreeMap, ops::Deref, path::Path};
use quick_junit::{NonSuccessKind, Report, TestCase, TestCaseStatus, TestSuite, XmlString};
use ruff_source_file::LineColumn;
use crate::diagnostic::{Diagnostic, SecondaryCode, render::FileResolver};
/// A renderer for diagnostics in the [JUnit] format.
///
/// See [`junit.xsd`] for the specification in the JUnit repository and an annotated [version]
/// linked from the [`quick_junit`] docs.
///
/// [JUnit]: https://junit.org/
/// [`junit.xsd`]: https://github.com/junit-team/junit-framework/blob/2870b7d8fd5bf7c1efe489d3991d3ed3900e82bb/platform-tests/src/test/resources/jenkins-junit.xsd
/// [version]: https://llg.cubic.org/docs/junit/
/// [`quick_junit`]: https://docs.rs/quick-junit/latest/quick_junit/
pub struct JunitRenderer<'a> {
resolver: &'a dyn FileResolver,
}
impl<'a> JunitRenderer<'a> {
pub fn new(resolver: &'a dyn FileResolver) -> Self {
Self { resolver }
}
pub(super) fn render(
&self,
f: &mut std::fmt::Formatter,
diagnostics: &[Diagnostic],
) -> std::fmt::Result {
let mut report = Report::new("ruff");
if diagnostics.is_empty() {
let mut test_suite = TestSuite::new("ruff");
test_suite
.extra
.insert(XmlString::new("package"), XmlString::new("org.ruff"));
let mut case = TestCase::new("No errors found", TestCaseStatus::success());
case.set_classname("ruff");
test_suite.add_test_case(case);
report.add_test_suite(test_suite);
} else {
for (filename, diagnostics) in group_diagnostics_by_filename(diagnostics, self.resolver)
{
let mut test_suite = TestSuite::new(filename);
test_suite
.extra
.insert(XmlString::new("package"), XmlString::new("org.ruff"));
let classname = Path::new(filename).with_extension("");
for diagnostic in diagnostics {
let DiagnosticWithLocation {
diagnostic,
start_location: location,
} = diagnostic;
let mut status = TestCaseStatus::non_success(NonSuccessKind::Failure);
status.set_message(diagnostic.concise_message().to_str());
if let Some(location) = location {
status.set_description(format!(
"line {row}, col {col}, {body}",
row = location.line,
col = location.column,
body = diagnostic.concise_message()
));
} else {
status.set_description(diagnostic.concise_message().to_str());
}
let code = diagnostic
.secondary_code()
.map_or_else(|| diagnostic.name(), SecondaryCode::as_str);
let mut case = TestCase::new(format!("org.ruff.{code}"), status);
case.set_classname(classname.to_str().unwrap());
if let Some(location) = location {
case.extra.insert(
XmlString::new("line"),
XmlString::new(location.line.to_string()),
);
case.extra.insert(
XmlString::new("column"),
XmlString::new(location.column.to_string()),
);
}
test_suite.add_test_case(case);
}
report.add_test_suite(test_suite);
}
}
let adapter = FmtAdapter { fmt: f };
report.serialize(adapter).map_err(|_| std::fmt::Error)
}
}
// TODO(brent) this and `group_diagnostics_by_filename` are also used by the `grouped` output
// format. I think they'd make more sense in that file, but I started here first. I'll move them to
// that module when adding the `grouped` output format.
struct DiagnosticWithLocation<'a> {
diagnostic: &'a Diagnostic,
start_location: Option<LineColumn>,
}
impl Deref for DiagnosticWithLocation<'_> {
type Target = Diagnostic;
fn deref(&self) -> &Self::Target {
self.diagnostic
}
}
fn group_diagnostics_by_filename<'a>(
diagnostics: &'a [Diagnostic],
resolver: &'a dyn FileResolver,
) -> BTreeMap<&'a str, Vec<DiagnosticWithLocation<'a>>> {
let mut grouped_diagnostics = BTreeMap::default();
for diagnostic in diagnostics {
let (filename, start_location) = diagnostic
.primary_span_ref()
.map(|span| {
let file = span.file();
let start_location =
span.range()
.filter(|_| !resolver.is_notebook(file))
.map(|range| {
file.diagnostic_source(resolver)
.as_source_code()
.line_column(range.start())
});
(span.file().path(resolver), start_location)
})
.unwrap_or_default();
grouped_diagnostics
.entry(filename)
.or_insert_with(Vec::new)
.push(DiagnosticWithLocation {
diagnostic,
start_location,
});
}
grouped_diagnostics
}
struct FmtAdapter<'a> {
fmt: &'a mut dyn std::fmt::Write,
}
impl std::io::Write for FmtAdapter<'_> {
fn write(&mut self, buf: &[u8]) -> std::io::Result<usize> {
self.fmt
.write_str(std::str::from_utf8(buf).map_err(|_| {
std::io::Error::new(
std::io::ErrorKind::InvalidData,
"Invalid UTF-8 in JUnit report",
)
})?)
.map_err(std::io::Error::other)?;
Ok(buf.len())
}
fn flush(&mut self) -> std::io::Result<()> {
Ok(())
}
fn write_fmt(&mut self, args: std::fmt::Arguments<'_>) -> std::io::Result<()> {
self.fmt.write_fmt(args).map_err(std::io::Error::other)
}
}
#[cfg(test)]
mod tests {
use crate::diagnostic::{
DiagnosticFormat,
render::tests::{create_diagnostics, create_syntax_error_diagnostics},
};
#[test]
fn output() {
let (env, diagnostics) = create_diagnostics(DiagnosticFormat::Junit);
insta::assert_snapshot!(env.render_diagnostics(&diagnostics));
}
#[test]
fn syntax_errors() {
let (env, diagnostics) = create_syntax_error_diagnostics(DiagnosticFormat::Junit);
insta::assert_snapshot!(env.render_diagnostics(&diagnostics));
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_db/src/diagnostic/render/azure.rs | crates/ruff_db/src/diagnostic/render/azure.rs | use ruff_source_file::LineColumn;
use crate::diagnostic::{Diagnostic, Severity};
use super::FileResolver;
pub(super) struct AzureRenderer<'a> {
resolver: &'a dyn FileResolver,
}
impl<'a> AzureRenderer<'a> {
pub(super) fn new(resolver: &'a dyn FileResolver) -> Self {
Self { resolver }
}
}
impl AzureRenderer<'_> {
pub(super) fn render(
&self,
f: &mut std::fmt::Formatter,
diagnostics: &[Diagnostic],
) -> std::fmt::Result {
for diag in diagnostics {
let severity = match diag.severity() {
Severity::Info | Severity::Warning => "warning",
Severity::Error | Severity::Fatal => "error",
};
write!(f, "##vso[task.logissue type={severity};")?;
if let Some(span) = diag.primary_span() {
let filename = span.file().path(self.resolver);
write!(f, "sourcepath={filename};")?;
if let Some(range) = span.range() {
let location = if self.resolver.notebook_index(span.file()).is_some() {
// We can't give a reasonable location for the structured formats,
// so we show one that's clearly a fallback
LineColumn::default()
} else {
span.file()
.diagnostic_source(self.resolver)
.as_source_code()
.line_column(range.start())
};
write!(
f,
"linenumber={line};columnnumber={col};",
line = location.line,
col = location.column,
)?;
}
}
writeln!(
f,
"code={code};]{body}",
code = diag.secondary_code_or_id(),
body = diag.concise_message(),
)?;
}
Ok(())
}
}
#[cfg(test)]
mod tests {
use crate::diagnostic::{
DiagnosticFormat,
render::tests::{create_diagnostics, create_syntax_error_diagnostics},
};
#[test]
fn output() {
let (env, diagnostics) = create_diagnostics(DiagnosticFormat::Azure);
insta::assert_snapshot!(env.render_diagnostics(&diagnostics));
}
#[test]
fn syntax_errors() {
let (env, diagnostics) = create_syntax_error_diagnostics(DiagnosticFormat::Azure);
insta::assert_snapshot!(env.render_diagnostics(&diagnostics));
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_db/src/diagnostic/render/gitlab.rs | crates/ruff_db/src/diagnostic/render/gitlab.rs | use std::{
collections::HashSet,
hash::{DefaultHasher, Hash, Hasher},
path::Path,
};
use ruff_source_file::LineColumn;
use serde::{Serialize, Serializer, ser::SerializeSeq};
use crate::diagnostic::{Diagnostic, Severity};
use super::FileResolver;
pub(super) struct GitlabRenderer<'a> {
resolver: &'a dyn FileResolver,
}
impl<'a> GitlabRenderer<'a> {
pub(super) fn new(resolver: &'a dyn FileResolver) -> Self {
Self { resolver }
}
}
impl GitlabRenderer<'_> {
pub(super) fn render(
&self,
f: &mut std::fmt::Formatter,
diagnostics: &[Diagnostic],
) -> std::fmt::Result {
write!(
f,
"{}",
serde_json::to_string_pretty(&SerializedMessages {
diagnostics,
resolver: self.resolver,
#[expect(
clippy::disallowed_methods,
reason = "We don't have access to a `System` here, \
and this is only intended for use by GitLab CI, \
which runs on a real `System`."
)]
project_dir: std::env::var("CI_PROJECT_DIR").ok().as_deref(),
})
.unwrap()
)
}
}
struct SerializedMessages<'a> {
diagnostics: &'a [Diagnostic],
resolver: &'a dyn FileResolver,
project_dir: Option<&'a str>,
}
impl Serialize for SerializedMessages<'_> {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
let mut s = serializer.serialize_seq(Some(self.diagnostics.len()))?;
let mut fingerprints = HashSet::<u64>::with_capacity(self.diagnostics.len());
for diagnostic in self.diagnostics {
let location = diagnostic
.primary_span()
.map(|span| {
let file = span.file();
let positions = if self.resolver.is_notebook(file) {
// We can't give a reasonable location for the structured formats,
// so we show one that's clearly a fallback
Default::default()
} else {
let diagnostic_source = file.diagnostic_source(self.resolver);
let source_code = diagnostic_source.as_source_code();
span.range()
.map(|range| Positions {
begin: source_code.line_column(range.start()),
end: source_code.line_column(range.end()),
})
.unwrap_or_default()
};
let path = self.project_dir.as_ref().map_or_else(
|| file.relative_path(self.resolver).display().to_string(),
|project_dir| relativize_path_to(file.path(self.resolver), project_dir),
);
Location { path, positions }
})
.unwrap_or_default();
let mut message_fingerprint = fingerprint(diagnostic, &location.path, 0);
// Make sure that we do not get a fingerprint that is already in use
// by adding in the previously generated one.
while fingerprints.contains(&message_fingerprint) {
message_fingerprint = fingerprint(diagnostic, &location.path, message_fingerprint);
}
fingerprints.insert(message_fingerprint);
let description = diagnostic.concise_message();
let check_name = diagnostic.secondary_code_or_id();
let severity = match diagnostic.severity() {
Severity::Info => "info",
Severity::Warning => "minor",
Severity::Error => "major",
// Another option here is `blocker`
Severity::Fatal => "critical",
};
let value = Message {
check_name,
// GitLab doesn't display the separate `check_name` field in a Code Quality report,
// so prepend it to the description too.
description: format!("{check_name}: {description}"),
severity,
fingerprint: format!("{:x}", message_fingerprint),
location,
};
s.serialize_element(&value)?;
}
s.end()
}
}
#[derive(Serialize)]
struct Message<'a> {
check_name: &'a str,
description: String,
severity: &'static str,
fingerprint: String,
location: Location,
}
/// The place in the source code where the issue was discovered.
///
/// According to the CodeClimate report format [specification] linked from the GitLab [docs], this
/// field is required, so we fall back on a default `path` and position if the diagnostic doesn't
/// have a primary span.
///
/// [specification]: https://github.com/codeclimate/platform/blob/master/spec/analyzers/SPEC.md#data-types
/// [docs]: https://docs.gitlab.com/ci/testing/code_quality/#code-quality-report-format
#[derive(Default, Serialize)]
struct Location {
path: String,
positions: Positions,
}
#[derive(Default, Serialize)]
struct Positions {
begin: LineColumn,
end: LineColumn,
}
/// Generate a unique fingerprint to identify a violation.
fn fingerprint(diagnostic: &Diagnostic, project_path: &str, salt: u64) -> u64 {
let mut hasher = DefaultHasher::new();
salt.hash(&mut hasher);
diagnostic.name().hash(&mut hasher);
project_path.hash(&mut hasher);
hasher.finish()
}
/// Convert an absolute path to be relative to the specified project root.
fn relativize_path_to<P: AsRef<Path>, R: AsRef<Path>>(path: P, project_root: R) -> String {
format!(
"{}",
pathdiff::diff_paths(&path, project_root)
.expect("Could not diff paths")
.display()
)
}
#[cfg(test)]
mod tests {
use crate::diagnostic::{
DiagnosticFormat,
render::tests::{create_diagnostics, create_syntax_error_diagnostics},
};
const FINGERPRINT_FILTERS: [(&str, &str); 1] = [(
r#""fingerprint": "[a-z0-9]+","#,
r#""fingerprint": "<redacted>","#,
)];
#[test]
fn output() {
let (env, diagnostics) = create_diagnostics(DiagnosticFormat::Gitlab);
insta::with_settings!({filters => FINGERPRINT_FILTERS}, {
insta::assert_snapshot!(env.render_diagnostics(&diagnostics));
});
}
#[test]
fn syntax_errors() {
let (env, diagnostics) = create_syntax_error_diagnostics(DiagnosticFormat::Gitlab);
insta::with_settings!({filters => FINGERPRINT_FILTERS}, {
insta::assert_snapshot!(env.render_diagnostics(&diagnostics));
});
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_db/src/diagnostic/render/json.rs | crates/ruff_db/src/diagnostic/render/json.rs | use serde::{Serialize, Serializer, ser::SerializeSeq};
use serde_json::{Value, json};
use ruff_diagnostics::{Applicability, Edit};
use ruff_notebook::NotebookIndex;
use ruff_source_file::{LineColumn, OneIndexed};
use ruff_text_size::Ranged;
use crate::diagnostic::{ConciseMessage, Diagnostic, DiagnosticSource, DisplayDiagnosticConfig};
use super::FileResolver;
pub(super) struct JsonRenderer<'a> {
resolver: &'a dyn FileResolver,
config: &'a DisplayDiagnosticConfig,
}
impl<'a> JsonRenderer<'a> {
pub(super) fn new(resolver: &'a dyn FileResolver, config: &'a DisplayDiagnosticConfig) -> Self {
Self { resolver, config }
}
}
impl JsonRenderer<'_> {
pub(super) fn render(
&self,
f: &mut std::fmt::Formatter,
diagnostics: &[Diagnostic],
) -> std::fmt::Result {
write!(
f,
"{:#}",
diagnostics_to_json_value(diagnostics, self.resolver, self.config)
)
}
}
fn diagnostics_to_json_value<'a>(
diagnostics: impl IntoIterator<Item = &'a Diagnostic>,
resolver: &dyn FileResolver,
config: &DisplayDiagnosticConfig,
) -> Value {
let values: Vec<_> = diagnostics
.into_iter()
.map(|diag| diagnostic_to_json(diag, resolver, config))
.collect();
json!(values)
}
pub(super) fn diagnostic_to_json<'a>(
diagnostic: &'a Diagnostic,
resolver: &'a dyn FileResolver,
config: &'a DisplayDiagnosticConfig,
) -> JsonDiagnostic<'a> {
let span = diagnostic.primary_span_ref();
let filename = span.map(|span| span.file().path(resolver));
let range = span.and_then(|span| span.range());
let diagnostic_source = span.map(|span| span.file().diagnostic_source(resolver));
let source_code = diagnostic_source
.as_ref()
.map(|diagnostic_source| diagnostic_source.as_source_code());
let notebook_index = span.and_then(|span| resolver.notebook_index(span.file()));
let mut start_location = None;
let mut end_location = None;
let mut noqa_location = None;
let mut notebook_cell_index = None;
if let Some(source_code) = source_code {
noqa_location = diagnostic
.noqa_offset()
.map(|offset| source_code.line_column(offset));
if let Some(range) = range {
let mut start = source_code.line_column(range.start());
let mut end = source_code.line_column(range.end());
if let Some(notebook_index) = ¬ebook_index {
notebook_cell_index =
Some(notebook_index.cell(start.line).unwrap_or(OneIndexed::MIN));
start = notebook_index.translate_line_column(&start);
end = notebook_index.translate_line_column(&end);
noqa_location =
noqa_location.map(|location| notebook_index.translate_line_column(&location));
}
start_location = Some(start);
end_location = Some(end);
}
}
let fix = diagnostic.fix().map(|fix| JsonFix {
applicability: fix.applicability(),
message: diagnostic.first_help_text(),
edits: ExpandedEdits {
edits: fix.edits(),
notebook_index,
config,
diagnostic_source,
},
});
// In preview, the locations and filename can be optional.
if config.preview {
JsonDiagnostic {
code: diagnostic.secondary_code_or_id(),
url: diagnostic.documentation_url(),
message: diagnostic.concise_message(),
fix,
cell: notebook_cell_index,
location: start_location.map(JsonLocation::from),
end_location: end_location.map(JsonLocation::from),
filename,
noqa_row: noqa_location.map(|location| location.line),
}
} else {
JsonDiagnostic {
code: diagnostic.secondary_code_or_id(),
url: diagnostic.documentation_url(),
message: diagnostic.concise_message(),
fix,
cell: notebook_cell_index,
location: Some(start_location.unwrap_or_default().into()),
end_location: Some(end_location.unwrap_or_default().into()),
filename: Some(filename.unwrap_or_default()),
noqa_row: noqa_location.map(|location| location.line),
}
}
}
struct ExpandedEdits<'a> {
edits: &'a [Edit],
notebook_index: Option<NotebookIndex>,
config: &'a DisplayDiagnosticConfig,
diagnostic_source: Option<DiagnosticSource>,
}
impl Serialize for ExpandedEdits<'_> {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
let mut s = serializer.serialize_seq(Some(self.edits.len()))?;
for edit in self.edits {
let (location, end_location) = if let Some(diagnostic_source) = &self.diagnostic_source
{
let source_code = diagnostic_source.as_source_code();
let mut location = source_code.line_column(edit.start());
let mut end_location = source_code.line_column(edit.end());
if let Some(notebook_index) = &self.notebook_index {
// There exists a newline between each cell's source code in the
// concatenated source code in Ruff. This newline doesn't actually
// exists in the JSON source field.
//
// Now, certain edits may try to remove this newline, which means
// the edit will spill over to the first character of the next cell.
// If it does, we need to translate the end location to the last
// character of the previous cell.
match (
notebook_index.cell(location.line),
notebook_index.cell(end_location.line),
) {
(Some(start_cell), Some(end_cell)) if start_cell != end_cell => {
debug_assert_eq!(end_location.column.get(), 1);
let prev_row = end_location.line.saturating_sub(1);
end_location = LineColumn {
line: notebook_index.cell_row(prev_row).unwrap_or(OneIndexed::MIN),
column: source_code
.line_column(source_code.line_end_exclusive(prev_row))
.column,
};
}
(Some(_), None) => {
debug_assert_eq!(end_location.column.get(), 1);
let prev_row = end_location.line.saturating_sub(1);
end_location = LineColumn {
line: notebook_index.cell_row(prev_row).unwrap_or(OneIndexed::MIN),
column: source_code
.line_column(source_code.line_end_exclusive(prev_row))
.column,
};
}
_ => {
end_location = notebook_index.translate_line_column(&end_location);
}
}
location = notebook_index.translate_line_column(&location);
}
(Some(location), Some(end_location))
} else {
(None, None)
};
// In preview, the locations can be optional.
let value = if self.config.preview {
JsonEdit {
content: edit.content().unwrap_or_default(),
location: location.map(JsonLocation::from),
end_location: end_location.map(JsonLocation::from),
}
} else {
JsonEdit {
content: edit.content().unwrap_or_default(),
location: Some(location.unwrap_or_default().into()),
end_location: Some(end_location.unwrap_or_default().into()),
}
};
s.serialize_element(&value)?;
}
s.end()
}
}
/// A serializable version of `Diagnostic`.
///
/// The `Old` variant only exists to preserve backwards compatibility. Both this and `JsonEdit`
/// should become structs with the `New` definitions in a future Ruff release.
#[derive(Serialize)]
pub(crate) struct JsonDiagnostic<'a> {
cell: Option<OneIndexed>,
code: &'a str,
end_location: Option<JsonLocation>,
filename: Option<&'a str>,
fix: Option<JsonFix<'a>>,
location: Option<JsonLocation>,
message: ConciseMessage<'a>,
noqa_row: Option<OneIndexed>,
url: Option<&'a str>,
}
#[derive(Serialize)]
struct JsonFix<'a> {
applicability: Applicability,
edits: ExpandedEdits<'a>,
message: Option<&'a str>,
}
#[derive(Serialize)]
struct JsonLocation {
column: OneIndexed,
row: OneIndexed,
}
impl From<LineColumn> for JsonLocation {
fn from(location: LineColumn) -> Self {
JsonLocation {
row: location.line,
column: location.column,
}
}
}
#[derive(Serialize)]
struct JsonEdit<'a> {
content: &'a str,
end_location: Option<JsonLocation>,
location: Option<JsonLocation>,
}
#[cfg(test)]
mod tests {
use crate::diagnostic::{
DiagnosticFormat,
render::tests::{
TestEnvironment, create_diagnostics, create_notebook_diagnostics,
create_syntax_error_diagnostics,
},
};
#[test]
fn output() {
let (env, diagnostics) = create_diagnostics(DiagnosticFormat::Json);
insta::assert_snapshot!(env.render_diagnostics(&diagnostics));
}
#[test]
fn syntax_errors() {
let (env, diagnostics) = create_syntax_error_diagnostics(DiagnosticFormat::Json);
insta::assert_snapshot!(env.render_diagnostics(&diagnostics));
}
#[test]
fn notebook_output() {
let (env, diagnostics) = create_notebook_diagnostics(DiagnosticFormat::Json);
insta::assert_snapshot!(env.render_diagnostics(&diagnostics));
}
#[test]
fn missing_file_stable() {
let mut env = TestEnvironment::new();
env.format(DiagnosticFormat::Json);
env.preview(false);
let diag = env
.err()
.documentation_url("https://docs.astral.sh/ruff/rules/test-diagnostic")
.build();
insta::assert_snapshot!(
env.render(&diag),
@r#"
[
{
"cell": null,
"code": "test-diagnostic",
"end_location": {
"column": 1,
"row": 1
},
"filename": "",
"fix": null,
"location": {
"column": 1,
"row": 1
},
"message": "main diagnostic message",
"noqa_row": null,
"url": "https://docs.astral.sh/ruff/rules/test-diagnostic"
}
]
"#,
);
}
#[test]
fn missing_file_preview() {
let mut env = TestEnvironment::new();
env.format(DiagnosticFormat::Json);
env.preview(true);
let diag = env
.err()
.documentation_url("https://docs.astral.sh/ruff/rules/test-diagnostic")
.build();
insta::assert_snapshot!(
env.render(&diag),
@r#"
[
{
"cell": null,
"code": "test-diagnostic",
"end_location": null,
"filename": null,
"fix": null,
"location": null,
"message": "main diagnostic message",
"noqa_row": null,
"url": "https://docs.astral.sh/ruff/rules/test-diagnostic"
}
]
"#,
);
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_db/src/diagnostic/render/pylint.rs | crates/ruff_db/src/diagnostic/render/pylint.rs | use crate::diagnostic::{Diagnostic, SecondaryCode, render::FileResolver};
/// Generate violations in Pylint format.
///
/// The format is given by this string:
///
/// ```python
/// "%(path)s:%(row)d: [%(code)s] %(text)s"
/// ```
///
/// See: [Flake8 documentation](https://flake8.pycqa.org/en/latest/internal/formatters.html#pylint-formatter)
pub(super) struct PylintRenderer<'a> {
resolver: &'a dyn FileResolver,
}
impl<'a> PylintRenderer<'a> {
pub(super) fn new(resolver: &'a dyn FileResolver) -> Self {
Self { resolver }
}
}
impl PylintRenderer<'_> {
pub(super) fn render(
&self,
f: &mut std::fmt::Formatter,
diagnostics: &[Diagnostic],
) -> std::fmt::Result {
for diagnostic in diagnostics {
let (filename, row) = diagnostic
.primary_span_ref()
.map(|span| {
let file = span.file();
let row = span
.range()
.filter(|_| !self.resolver.is_notebook(file))
.map(|range| {
file.diagnostic_source(self.resolver)
.as_source_code()
.line_column(range.start())
.line
});
(file.relative_path(self.resolver).to_string_lossy(), row)
})
.unwrap_or_default();
let code = diagnostic
.secondary_code()
.map_or_else(|| diagnostic.name(), SecondaryCode::as_str);
let row = row.unwrap_or_default();
writeln!(
f,
"{path}:{row}: [{code}] {body}",
path = filename,
body = diagnostic.concise_message()
)?;
}
Ok(())
}
}
#[cfg(test)]
mod tests {
use crate::diagnostic::{
DiagnosticFormat,
render::tests::{TestEnvironment, create_diagnostics, create_syntax_error_diagnostics},
};
#[test]
fn output() {
let (env, diagnostics) = create_diagnostics(DiagnosticFormat::Pylint);
insta::assert_snapshot!(env.render_diagnostics(&diagnostics));
}
#[test]
fn syntax_errors() {
let (env, diagnostics) = create_syntax_error_diagnostics(DiagnosticFormat::Pylint);
insta::assert_snapshot!(env.render_diagnostics(&diagnostics));
}
#[test]
fn missing_file() {
let mut env = TestEnvironment::new();
env.format(DiagnosticFormat::Pylint);
let diag = env.err().build();
insta::assert_snapshot!(
env.render(&diag),
@":1: [test-diagnostic] main diagnostic message",
);
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_db/src/diagnostic/render/github.rs | crates/ruff_db/src/diagnostic/render/github.rs | use crate::diagnostic::{Diagnostic, FileResolver, Severity};
pub struct GithubRenderer<'a> {
resolver: &'a dyn FileResolver,
program: &'a str,
}
impl<'a> GithubRenderer<'a> {
pub fn new(resolver: &'a dyn FileResolver, program: &'a str) -> Self {
Self { resolver, program }
}
pub(super) fn render(
&self,
f: &mut std::fmt::Formatter,
diagnostics: &[Diagnostic],
) -> std::fmt::Result {
for diagnostic in diagnostics {
let severity = match diagnostic.severity() {
Severity::Info => "notice",
Severity::Warning => "warning",
Severity::Error | Severity::Fatal => "error",
};
write!(
f,
"::{severity} title={program} ({code})",
program = self.program,
code = diagnostic.secondary_code_or_id()
)?;
if let Some(span) = diagnostic.primary_span() {
let file = span.file();
write!(f, ",file={file}", file = file.path(self.resolver))?;
let (start_location, end_location) = if self.resolver.is_notebook(file) {
// We can't give a reasonable location for the structured formats,
// so we show one that's clearly a fallback
None
} else {
let diagnostic_source = file.diagnostic_source(self.resolver);
let source_code = diagnostic_source.as_source_code();
span.range().map(|range| {
(
source_code.line_column(range.start()),
source_code.line_column(range.end()),
)
})
}
.unwrap_or_default();
// GitHub Actions workflow commands have constraints on error annotations:
// - `col` and `endColumn` cannot be set if `line` and `endLine` are different
// See: https://github.com/astral-sh/ruff/issues/22074
if start_location.line == end_location.line {
write!(
f,
",line={row},col={column},endLine={end_row},endColumn={end_column}::",
row = start_location.line,
column = start_location.column,
end_row = end_location.line,
end_column = end_location.column,
)?;
} else {
write!(
f,
",line={row},endLine={end_row}::",
row = start_location.line,
end_row = end_location.line,
)?;
}
write!(
f,
"{path}:{row}:{column}: ",
path = file.relative_path(self.resolver).display(),
row = start_location.line,
column = start_location.column,
)?;
} else {
write!(f, "::")?;
}
if let Some(code) = diagnostic.secondary_code() {
write!(f, "{code}")?;
} else {
write!(f, "{id}:", id = diagnostic.id())?;
}
writeln!(f, " {}", diagnostic.concise_message())?;
}
Ok(())
}
}
pub struct DisplayGithubDiagnostics<'a> {
renderer: &'a GithubRenderer<'a>,
diagnostics: &'a [Diagnostic],
}
impl<'a> DisplayGithubDiagnostics<'a> {
pub fn new(renderer: &'a GithubRenderer<'a>, diagnostics: &'a [Diagnostic]) -> Self {
Self {
renderer,
diagnostics,
}
}
}
impl std::fmt::Display for DisplayGithubDiagnostics<'_> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
self.renderer.render(f, self.diagnostics)
}
}
#[cfg(test)]
mod tests {
use crate::diagnostic::{
DiagnosticFormat,
render::tests::{TestEnvironment, create_diagnostics, create_syntax_error_diagnostics},
};
#[test]
fn output() {
let (env, diagnostics) = create_diagnostics(DiagnosticFormat::Github);
insta::assert_snapshot!(env.render_diagnostics(&diagnostics));
}
#[test]
fn syntax_errors() {
let (env, diagnostics) = create_syntax_error_diagnostics(DiagnosticFormat::Github);
insta::assert_snapshot!(env.render_diagnostics(&diagnostics));
}
#[test]
fn missing_file() {
let mut env = TestEnvironment::new();
env.format(DiagnosticFormat::Github);
let diag = env.err().build();
insta::assert_snapshot!(
env.render(&diag),
@"::error title=ty (test-diagnostic)::test-diagnostic: main diagnostic message",
);
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_db/src/diagnostic/render/rdjson.rs | crates/ruff_db/src/diagnostic/render/rdjson.rs | use serde::ser::SerializeSeq;
use serde::{Serialize, Serializer};
use ruff_diagnostics::{Edit, Fix};
use ruff_source_file::{LineColumn, SourceCode};
use ruff_text_size::Ranged;
use crate::diagnostic::{ConciseMessage, Diagnostic};
use super::FileResolver;
pub struct RdjsonRenderer<'a> {
resolver: &'a dyn FileResolver,
}
impl<'a> RdjsonRenderer<'a> {
pub(super) fn new(resolver: &'a dyn FileResolver) -> Self {
Self { resolver }
}
pub(super) fn render(
&self,
f: &mut std::fmt::Formatter,
diagnostics: &[Diagnostic],
) -> std::fmt::Result {
write!(
f,
"{:#}",
serde_json::json!(RdjsonDiagnostics::new(diagnostics, self.resolver))
)
}
}
struct ExpandedDiagnostics<'a> {
resolver: &'a dyn FileResolver,
diagnostics: &'a [Diagnostic],
}
impl Serialize for ExpandedDiagnostics<'_> {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
let mut s = serializer.serialize_seq(Some(self.diagnostics.len()))?;
for diagnostic in self.diagnostics {
let value = diagnostic_to_rdjson(diagnostic, self.resolver);
s.serialize_element(&value)?;
}
s.end()
}
}
fn diagnostic_to_rdjson<'a>(
diagnostic: &'a Diagnostic,
resolver: &'a dyn FileResolver,
) -> RdjsonDiagnostic<'a> {
let span = diagnostic.primary_span_ref();
let source_file = span.map(|span| {
let file = span.file();
(file.path(resolver), file.diagnostic_source(resolver))
});
let location = source_file.as_ref().map(|(path, source)| {
let range = diagnostic.range().map(|range| {
let source_code = source.as_source_code();
let start = source_code.line_column(range.start());
let end = source_code.line_column(range.end());
RdjsonRange::new(start, end)
});
RdjsonLocation { path, range }
});
let edits = diagnostic.fix().map(Fix::edits).unwrap_or_default();
RdjsonDiagnostic {
message: diagnostic.concise_message(),
location,
code: RdjsonCode {
value: diagnostic
.secondary_code()
.map_or_else(|| diagnostic.name(), |code| code.as_str()),
url: diagnostic.documentation_url(),
},
suggestions: rdjson_suggestions(
edits,
source_file
.as_ref()
.map(|(_, source)| source.as_source_code()),
),
}
}
fn rdjson_suggestions<'a>(
edits: &'a [Edit],
source_code: Option<SourceCode>,
) -> Vec<RdjsonSuggestion<'a>> {
if edits.is_empty() {
return Vec::new();
}
let Some(source_code) = source_code else {
debug_assert!(false, "Expected a source file for a diagnostic with a fix");
return Vec::new();
};
edits
.iter()
.map(|edit| {
let start = source_code.line_column(edit.start());
let end = source_code.line_column(edit.end());
let range = RdjsonRange::new(start, end);
RdjsonSuggestion {
range,
text: edit.content().unwrap_or_default(),
}
})
.collect()
}
#[derive(Serialize)]
struct RdjsonDiagnostics<'a> {
diagnostics: ExpandedDiagnostics<'a>,
severity: &'static str,
source: RdjsonSource,
}
impl<'a> RdjsonDiagnostics<'a> {
fn new(diagnostics: &'a [Diagnostic], resolver: &'a dyn FileResolver) -> Self {
Self {
source: RdjsonSource {
name: "ruff",
url: env!("CARGO_PKG_HOMEPAGE"),
},
severity: "WARNING",
diagnostics: ExpandedDiagnostics {
diagnostics,
resolver,
},
}
}
}
#[derive(Serialize)]
struct RdjsonSource {
name: &'static str,
url: &'static str,
}
#[derive(Serialize)]
struct RdjsonDiagnostic<'a> {
code: RdjsonCode<'a>,
#[serde(skip_serializing_if = "Option::is_none")]
location: Option<RdjsonLocation<'a>>,
message: ConciseMessage<'a>,
#[serde(skip_serializing_if = "Vec::is_empty")]
suggestions: Vec<RdjsonSuggestion<'a>>,
}
#[derive(Serialize)]
struct RdjsonLocation<'a> {
path: &'a str,
#[serde(skip_serializing_if = "Option::is_none")]
range: Option<RdjsonRange>,
}
#[derive(Default, Serialize)]
struct RdjsonRange {
end: LineColumn,
start: LineColumn,
}
impl RdjsonRange {
fn new(start: LineColumn, end: LineColumn) -> Self {
Self { start, end }
}
}
#[derive(Serialize)]
struct RdjsonCode<'a> {
#[serde(skip_serializing_if = "Option::is_none")]
url: Option<&'a str>,
value: &'a str,
}
#[derive(Serialize)]
struct RdjsonSuggestion<'a> {
range: RdjsonRange,
text: &'a str,
}
#[cfg(test)]
mod tests {
use crate::diagnostic::{
DiagnosticFormat,
render::tests::{TestEnvironment, create_diagnostics, create_syntax_error_diagnostics},
};
#[test]
fn output() {
let (env, diagnostics) = create_diagnostics(DiagnosticFormat::Rdjson);
insta::assert_snapshot!(env.render_diagnostics(&diagnostics));
}
#[test]
fn syntax_errors() {
let (env, diagnostics) = create_syntax_error_diagnostics(DiagnosticFormat::Rdjson);
insta::assert_snapshot!(env.render_diagnostics(&diagnostics));
}
#[test]
fn missing_file_stable() {
let mut env = TestEnvironment::new();
env.format(DiagnosticFormat::Rdjson);
env.preview(false);
let diag = env
.err()
.documentation_url("https://docs.astral.sh/ruff/rules/test-diagnostic")
.build();
insta::assert_snapshot!(env.render(&diag));
}
#[test]
fn missing_file_preview() {
let mut env = TestEnvironment::new();
env.format(DiagnosticFormat::Rdjson);
env.preview(true);
let diag = env
.err()
.documentation_url("https://docs.astral.sh/ruff/rules/test-diagnostic")
.build();
insta::assert_snapshot!(env.render(&diag));
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_db/src/diagnostic/render/json_lines.rs | crates/ruff_db/src/diagnostic/render/json_lines.rs | use crate::diagnostic::{Diagnostic, DisplayDiagnosticConfig, render::json::diagnostic_to_json};
use super::FileResolver;
pub(super) struct JsonLinesRenderer<'a> {
resolver: &'a dyn FileResolver,
config: &'a DisplayDiagnosticConfig,
}
impl<'a> JsonLinesRenderer<'a> {
pub(super) fn new(resolver: &'a dyn FileResolver, config: &'a DisplayDiagnosticConfig) -> Self {
Self { resolver, config }
}
}
impl JsonLinesRenderer<'_> {
pub(super) fn render(
&self,
f: &mut std::fmt::Formatter,
diagnostics: &[Diagnostic],
) -> std::fmt::Result {
for diag in diagnostics {
writeln!(
f,
"{}",
serde_json::json!(diagnostic_to_json(diag, self.resolver, self.config))
)?;
}
Ok(())
}
}
#[cfg(test)]
mod tests {
use crate::diagnostic::{
DiagnosticFormat,
render::tests::{
create_diagnostics, create_notebook_diagnostics, create_syntax_error_diagnostics,
},
};
#[test]
fn output() {
let (env, diagnostics) = create_diagnostics(DiagnosticFormat::JsonLines);
insta::assert_snapshot!(env.render_diagnostics(&diagnostics));
}
#[test]
fn syntax_errors() {
let (env, diagnostics) = create_syntax_error_diagnostics(DiagnosticFormat::JsonLines);
insta::assert_snapshot!(env.render_diagnostics(&diagnostics));
}
#[test]
fn notebook_output() {
let (env, diagnostics) = create_notebook_diagnostics(DiagnosticFormat::JsonLines);
insta::assert_snapshot!(env.render_diagnostics(&diagnostics));
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_db/src/diagnostic/render/concise.rs | crates/ruff_db/src/diagnostic/render/concise.rs | use crate::diagnostic::{
Diagnostic, DisplayDiagnosticConfig, Severity,
stylesheet::{DiagnosticStylesheet, fmt_styled, fmt_with_hyperlink},
};
use super::FileResolver;
pub(super) struct ConciseRenderer<'a> {
resolver: &'a dyn FileResolver,
config: &'a DisplayDiagnosticConfig,
}
impl<'a> ConciseRenderer<'a> {
pub(super) fn new(resolver: &'a dyn FileResolver, config: &'a DisplayDiagnosticConfig) -> Self {
Self { resolver, config }
}
pub(super) fn render(
&self,
f: &mut std::fmt::Formatter,
diagnostics: &[Diagnostic],
) -> std::fmt::Result {
let stylesheet = if self.config.color {
DiagnosticStylesheet::styled()
} else {
DiagnosticStylesheet::plain()
};
let sep = fmt_styled(":", stylesheet.separator);
for diag in diagnostics {
if self.config.is_canceled() {
return Ok(());
}
if let Some(span) = diag.primary_span() {
write!(
f,
"{path}",
path = fmt_styled(
span.file().relative_path(self.resolver).to_string_lossy(),
stylesheet.emphasis
)
)?;
if let Some(range) = span.range() {
let diagnostic_source = span.file().diagnostic_source(self.resolver);
let start = diagnostic_source
.as_source_code()
.line_column(range.start());
if let Some(notebook_index) = self.resolver.notebook_index(span.file()) {
write!(
f,
"{sep}cell {cell}{sep}{line}{sep}{col}",
cell = notebook_index.cell(start.line).unwrap_or_default(),
line = notebook_index.cell_row(start.line).unwrap_or_default(),
col = start.column,
)?;
} else {
write!(
f,
"{sep}{line}{sep}{col}",
line = start.line,
col = start.column,
)?;
}
}
write!(f, "{sep} ")?;
}
if self.config.hide_severity {
if let Some(code) = diag.secondary_code() {
write!(
f,
"{code} ",
code = fmt_styled(
fmt_with_hyperlink(&code, diag.documentation_url(), &stylesheet),
stylesheet.secondary_code
)
)?;
} else {
write!(
f,
"{id}: ",
id = fmt_styled(
fmt_with_hyperlink(
&diag.inner.id,
diag.documentation_url(),
&stylesheet
),
stylesheet.secondary_code
)
)?;
}
if self.config.show_fix_status {
// Do not display an indicator for inapplicable fixes
if diag.has_applicable_fix(self.config) {
write!(f, "[{fix}] ", fix = fmt_styled("*", stylesheet.separator))?;
}
}
} else {
let (severity, severity_style) = match diag.severity() {
Severity::Info => ("info", stylesheet.info),
Severity::Warning => ("warning", stylesheet.warning),
Severity::Error => ("error", stylesheet.error),
Severity::Fatal => ("fatal", stylesheet.error),
};
write!(
f,
"{severity}[{id}] ",
severity = fmt_styled(severity, severity_style),
id = fmt_styled(
fmt_with_hyperlink(&diag.id(), diag.documentation_url(), &stylesheet),
stylesheet.emphasis
)
)?;
}
writeln!(f, "{message}", message = diag.concise_message())?;
}
Ok(())
}
}
#[cfg(test)]
mod tests {
use ruff_diagnostics::Applicability;
use crate::diagnostic::{
DiagnosticFormat,
render::tests::{
TestEnvironment, create_diagnostics, create_notebook_diagnostics,
create_syntax_error_diagnostics,
},
};
#[test]
fn output() {
let (env, diagnostics) = create_diagnostics(DiagnosticFormat::Concise);
insta::assert_snapshot!(env.render_diagnostics(&diagnostics), @r"
fib.py:1:8: error[unused-import] `os` imported but unused
fib.py:6:5: error[unused-variable] Local variable `x` is assigned to but never used
undef.py:1:4: error[undefined-name] Undefined name `a`
");
}
#[test]
fn show_fixes() {
let (mut env, diagnostics) = create_diagnostics(DiagnosticFormat::Concise);
env.hide_severity(true);
env.show_fix_status(true);
env.fix_applicability(Applicability::DisplayOnly);
insta::assert_snapshot!(env.render_diagnostics(&diagnostics), @r"
fib.py:1:8: F401 [*] `os` imported but unused
fib.py:6:5: F841 [*] Local variable `x` is assigned to but never used
undef.py:1:4: F821 Undefined name `a`
");
}
#[test]
fn show_fixes_preview() {
let (mut env, diagnostics) = create_diagnostics(DiagnosticFormat::Concise);
env.hide_severity(true);
env.show_fix_status(true);
env.fix_applicability(Applicability::DisplayOnly);
env.preview(true);
insta::assert_snapshot!(env.render_diagnostics(&diagnostics), @r"
fib.py:1:8: F401 [*] `os` imported but unused
fib.py:6:5: F841 [*] Local variable `x` is assigned to but never used
undef.py:1:4: F821 Undefined name `a`
");
}
#[test]
fn show_fixes_syntax_errors() {
let (mut env, diagnostics) = create_syntax_error_diagnostics(DiagnosticFormat::Concise);
env.hide_severity(true);
env.show_fix_status(true);
env.fix_applicability(Applicability::DisplayOnly);
insta::assert_snapshot!(env.render_diagnostics(&diagnostics), @r"
syntax_errors.py:1:15: invalid-syntax: Expected one or more symbol names after import
syntax_errors.py:3:12: invalid-syntax: Expected ')', found newline
");
}
#[test]
fn syntax_errors() {
let (env, diagnostics) = create_syntax_error_diagnostics(DiagnosticFormat::Concise);
insta::assert_snapshot!(env.render_diagnostics(&diagnostics), @r"
syntax_errors.py:1:15: error[invalid-syntax] Expected one or more symbol names after import
syntax_errors.py:3:12: error[invalid-syntax] Expected ')', found newline
");
}
#[test]
fn notebook_output() {
let (env, diagnostics) = create_notebook_diagnostics(DiagnosticFormat::Concise);
insta::assert_snapshot!(env.render_diagnostics(&diagnostics), @r"
notebook.ipynb:cell 1:2:8: error[unused-import] `os` imported but unused
notebook.ipynb:cell 2:2:8: error[unused-import] `math` imported but unused
notebook.ipynb:cell 3:4:5: error[unused-variable] Local variable `x` is assigned to but never used
");
}
#[test]
fn missing_file() {
let mut env = TestEnvironment::new();
env.format(DiagnosticFormat::Concise);
let diag = env.err().build();
insta::assert_snapshot!(
env.render(&diag),
@"error[test-diagnostic] main diagnostic message",
);
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_db/src/diagnostic/render/full.rs | crates/ruff_db/src/diagnostic/render/full.rs | use std::borrow::Cow;
use std::num::NonZeroUsize;
use similar::{ChangeTag, TextDiff};
use ruff_annotate_snippets::Renderer as AnnotateRenderer;
use ruff_diagnostics::{Applicability, Fix};
use ruff_notebook::NotebookIndex;
use ruff_source_file::OneIndexed;
use ruff_text_size::{Ranged, TextLen, TextRange, TextSize};
use crate::diagnostic::render::{FileResolver, Resolved};
use crate::diagnostic::stylesheet::{DiagnosticStylesheet, fmt_styled};
use crate::diagnostic::{Diagnostic, DiagnosticSource, DisplayDiagnosticConfig};
pub(super) struct FullRenderer<'a> {
resolver: &'a dyn FileResolver,
config: &'a DisplayDiagnosticConfig,
}
impl<'a> FullRenderer<'a> {
pub(super) fn new(resolver: &'a dyn FileResolver, config: &'a DisplayDiagnosticConfig) -> Self {
Self { resolver, config }
}
pub(super) fn render(
&self,
f: &mut std::fmt::Formatter,
diagnostics: &[Diagnostic],
) -> std::fmt::Result {
let stylesheet = if self.config.color {
DiagnosticStylesheet::styled()
} else {
DiagnosticStylesheet::plain()
};
let mut renderer = if self.config.color {
AnnotateRenderer::styled()
} else {
AnnotateRenderer::plain()
}
.cut_indicator("…");
renderer = renderer
.error(stylesheet.error)
.warning(stylesheet.warning)
.info(stylesheet.info)
.note(stylesheet.note)
.help(stylesheet.help)
.line_no(stylesheet.line_no)
.emphasis(stylesheet.emphasis)
.none(stylesheet.none)
.hyperlink(stylesheet.hyperlink);
for diag in diagnostics {
if self.config.is_canceled() {
return Ok(());
}
let resolved = Resolved::new(self.resolver, diag, self.config);
let renderable = resolved.to_renderable(self.config.context);
for diag in renderable.diagnostics.iter() {
writeln!(f, "{}", renderer.render(diag.to_annotate()))?;
}
if self.config.show_fix_diff
&& diag.has_applicable_fix(self.config)
&& let Some(diff) = Diff::from_diagnostic(diag, &stylesheet, self.resolver)
{
write!(f, "{diff}")?;
}
writeln!(f)?;
}
Ok(())
}
}
/// Renders a diff that shows the code fixes.
///
/// The implementation isn't fully fledged out and only used by tests. Before using in production, try
/// * Improve layout
/// * Replace tabs with spaces for a consistent experience across terminals
/// * Replace zero-width whitespaces
/// * Print a simpler diff if only a single line has changed
/// * Compute the diff from the `Edit` because diff calculation is expensive.
struct Diff<'a> {
fix: &'a Fix,
diagnostic_source: DiagnosticSource,
notebook_index: Option<NotebookIndex>,
stylesheet: &'a DiagnosticStylesheet,
}
impl<'a> Diff<'a> {
fn from_diagnostic(
diagnostic: &'a Diagnostic,
stylesheet: &'a DiagnosticStylesheet,
resolver: &'a dyn FileResolver,
) -> Option<Diff<'a>> {
let file = &diagnostic.primary_span_ref()?.file;
Some(Diff {
fix: diagnostic.fix()?,
diagnostic_source: file.diagnostic_source(resolver),
notebook_index: resolver.notebook_index(file),
stylesheet,
})
}
}
impl std::fmt::Display for Diff<'_> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let source_code = self.diagnostic_source.as_source_code();
let source_text = source_code.text();
// Partition the source code into end offsets for each cell. If `self.notebook_index` is
// `None`, indicating a regular script file, all the lines will be in one "cell" under the
// `None` key.
let cells = if let Some(notebook_index) = &self.notebook_index {
let mut last_cell_index = OneIndexed::MIN;
let mut cells: Vec<(Option<OneIndexed>, TextSize)> = Vec::new();
for cell in notebook_index.iter() {
if cell.cell_index() != last_cell_index {
let offset = source_code.line_start(cell.start_row());
cells.push((Some(last_cell_index), offset));
last_cell_index = cell.cell_index();
}
}
cells.push((Some(last_cell_index), source_text.text_len()));
cells
} else {
vec![(None, source_text.text_len())]
};
let mut last_end = TextSize::ZERO;
for (cell, offset) in cells {
let range = TextRange::new(last_end, offset);
last_end = offset;
let input = source_code.slice(range);
let mut output = String::with_capacity(input.len());
let mut last_end = range.start();
let mut applied = 0;
for edit in self.fix.edits() {
if range.contains_range(edit.range()) {
output.push_str(source_code.slice(TextRange::new(last_end, edit.start())));
output.push_str(edit.content().unwrap_or_default());
last_end = edit.end();
applied += 1;
}
}
// No edits were applied, so there's no need to diff.
if applied == 0 {
continue;
}
output.push_str(&source_text[usize::from(last_end)..usize::from(range.end())]);
let diff = TextDiff::from_lines(input, &output);
let grouped_ops = diff.grouped_ops(3);
// Find the new line number with the largest number of digits to align all of the line
// number separators.
let last_op = grouped_ops.last().and_then(|group| group.last());
let largest_new = last_op.map(|op| op.new_range().end).unwrap_or_default();
let digit_with = OneIndexed::new(largest_new).unwrap_or_default().digits();
if let Some(cell) = cell {
// Room for 1 digit, 1 space, 1 `|`, and 1 more following space. This centers the
// three colons on the pipe.
writeln!(f, "{:>1$} cell {cell}", ":::", digit_with.get() + 3)?;
}
for (idx, group) in grouped_ops.iter().enumerate() {
if idx > 0 {
writeln!(f, "{:-^1$}", "-", 80)?;
}
for op in group {
for change in diff.iter_inline_changes(op) {
let (sign, style, line_no_style, index) = match change.tag() {
ChangeTag::Delete => (
"-",
self.stylesheet.deletion,
self.stylesheet.deletion_line_no,
None,
),
ChangeTag::Insert => (
"+",
self.stylesheet.insertion,
self.stylesheet.insertion_line_no,
change.new_index(),
),
ChangeTag::Equal => (
"|",
self.stylesheet.none,
self.stylesheet.line_no,
change.new_index(),
),
};
let line = Line {
index: index.map(OneIndexed::from_zero_indexed),
width: digit_with,
};
write!(
f,
"{line} {sign} ",
line = fmt_styled(line, self.stylesheet.line_no),
sign = fmt_styled(sign, line_no_style),
)?;
for (emphasized, value) in change.iter_strings_lossy() {
let value = show_nonprinting(&value);
let styled = fmt_styled(value, style);
if emphasized {
write!(f, "{}", fmt_styled(styled, self.stylesheet.emphasis))?;
} else {
write!(f, "{styled}")?;
}
}
if change.missing_newline() {
writeln!(f)?;
}
}
}
}
}
match self.fix.applicability() {
Applicability::Safe => {}
Applicability::Unsafe => {
writeln!(
f,
"{note}: {msg}",
note = fmt_styled("note", self.stylesheet.warning),
msg = fmt_styled(
"This is an unsafe fix and may change runtime behavior",
self.stylesheet.emphasis
)
)?;
}
Applicability::DisplayOnly => {
// Note that this is still only used in tests. There's no `--display-only-fixes`
// analog to `--unsafe-fixes` for users to activate this or see the styling.
writeln!(
f,
"{note}: {msg}",
note = fmt_styled("note", self.stylesheet.error),
msg = fmt_styled(
"This is a display-only fix and is likely to be incorrect",
self.stylesheet.emphasis
)
)?;
}
}
Ok(())
}
}
struct Line {
index: Option<OneIndexed>,
width: NonZeroUsize,
}
impl std::fmt::Display for Line {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
match self.index {
None => {
for _ in 0..self.width.get() {
f.write_str(" ")?;
}
Ok(())
}
Some(idx) => write!(f, "{:<width$}", idx, width = self.width.get()),
}
}
}
fn show_nonprinting(s: &str) -> Cow<'_, str> {
if s.find(['\x07', '\x08', '\x1b', '\x7f']).is_some() {
Cow::Owned(
s.replace('\x07', "␇")
.replace('\x08', "␈")
.replace('\x1b', "␛")
.replace('\x7f', "␡"),
)
} else {
Cow::Borrowed(s)
}
}
#[cfg(test)]
mod tests {
use ruff_diagnostics::{Applicability, Edit, Fix};
use ruff_text_size::{TextLen, TextRange, TextSize};
use crate::diagnostic::{
Annotation, DiagnosticFormat, Severity,
render::tests::{
NOTEBOOK, TestEnvironment, create_diagnostics, create_notebook_diagnostics,
create_syntax_error_diagnostics,
},
};
#[test]
fn output() {
let (env, diagnostics) = create_diagnostics(DiagnosticFormat::Full);
insta::assert_snapshot!(env.render_diagnostics(&diagnostics), @r#"
error[unused-import]: `os` imported but unused
--> fib.py:1:8
|
1 | import os
| ^^
|
help: Remove unused import: `os`
error[unused-variable]: Local variable `x` is assigned to but never used
--> fib.py:6:5
|
4 | def fibonacci(n):
5 | """Compute the nth number in the Fibonacci sequence."""
6 | x = 1
| ^
7 | if n == 0:
8 | return 0
|
help: Remove assignment to unused variable `x`
error[undefined-name]: Undefined name `a`
--> undef.py:1:4
|
1 | if a == 1: pass
| ^
|
"#);
}
#[test]
fn syntax_errors() {
let (env, diagnostics) = create_syntax_error_diagnostics(DiagnosticFormat::Full);
insta::assert_snapshot!(env.render_diagnostics(&diagnostics), @r"
error[invalid-syntax]: Expected one or more symbol names after import
--> syntax_errors.py:1:15
|
1 | from os import
| ^
2 |
3 | if call(foo
|
error[invalid-syntax]: Expected ')', found newline
--> syntax_errors.py:3:12
|
1 | from os import
2 |
3 | if call(foo
| ^
4 | def bar():
5 | pass
|
");
}
#[test]
fn hide_severity_output() {
let (mut env, diagnostics) = create_diagnostics(DiagnosticFormat::Full);
env.hide_severity(true);
env.show_fix_status(true);
env.fix_applicability(Applicability::DisplayOnly);
insta::assert_snapshot!(env.render_diagnostics(&diagnostics), @r#"
F401 [*] `os` imported but unused
--> fib.py:1:8
|
1 | import os
| ^^
|
help: Remove unused import: `os`
F841 [*] Local variable `x` is assigned to but never used
--> fib.py:6:5
|
4 | def fibonacci(n):
5 | """Compute the nth number in the Fibonacci sequence."""
6 | x = 1
| ^
7 | if n == 0:
8 | return 0
|
help: Remove assignment to unused variable `x`
F821 Undefined name `a`
--> undef.py:1:4
|
1 | if a == 1: pass
| ^
|
"#);
}
#[test]
fn hide_severity_syntax_errors() {
let (mut env, diagnostics) = create_syntax_error_diagnostics(DiagnosticFormat::Full);
env.hide_severity(true);
insta::assert_snapshot!(env.render_diagnostics(&diagnostics), @r"
invalid-syntax: Expected one or more symbol names after import
--> syntax_errors.py:1:15
|
1 | from os import
| ^
2 |
3 | if call(foo
|
invalid-syntax: Expected ')', found newline
--> syntax_errors.py:3:12
|
1 | from os import
2 |
3 | if call(foo
| ^
4 | def bar():
5 | pass
|
");
}
/// Check that the new `full` rendering code in `ruff_db` handles cases fixed by commit c9b99e4.
///
/// For example, without the fix, we get diagnostics like this:
///
/// ```
/// error[no-indented-block]: Expected an indented block
/// --> example.py:3:1
/// |
/// 2 | if False:
/// | ^
/// 3 | print()
/// |
/// ```
///
/// where the caret points to the end of the previous line instead of the start of the next.
#[test]
fn empty_span_after_line_terminator() {
let mut env = TestEnvironment::new();
env.add(
"example.py",
r#"
if False:
print()
"#,
);
env.format(DiagnosticFormat::Full);
let diagnostic = env
.builder(
"no-indented-block",
Severity::Error,
"Expected an indented block",
)
.primary("example.py", "3:0", "3:0", "")
.build();
insta::assert_snapshot!(env.render(&diagnostic), @r"
error[no-indented-block]: Expected an indented block
--> example.py:3:1
|
2 | if False:
3 | print()
| ^
|
");
}
/// Check that the new `full` rendering code in `ruff_db` handles cases fixed by commit 2922490.
///
/// For example, without the fix, we get diagnostics like this:
///
/// ```
/// error[invalid-character-sub]: Invalid unescaped character SUB, use "\x1a" instead
/// --> example.py:1:25
/// |
/// 1 | nested_fstrings = f'␈{f'{f'␛'}'}'
/// | ^
/// |
/// ```
///
/// where the caret points to the `f` in the f-string instead of the start of the invalid
/// character (`^Z`).
#[test]
fn unprintable_characters() {
let mut env = TestEnvironment::new();
env.add("example.py", "nested_fstrings = f'{f'{f''}'}'");
env.format(DiagnosticFormat::Full);
let diagnostic = env
.builder(
"invalid-character-sub",
Severity::Error,
r#"Invalid unescaped character SUB, use "\x1a" instead"#,
)
.primary("example.py", "1:24", "1:24", "")
.build();
insta::assert_snapshot!(env.render(&diagnostic), @r#"
error[invalid-character-sub]: Invalid unescaped character SUB, use "\x1a" instead
--> example.py:1:25
|
1 | nested_fstrings = f'␈{f'{f'␛'}'}'
| ^
|
"#);
}
#[test]
fn multiple_unprintable_characters() -> std::io::Result<()> {
let mut env = TestEnvironment::new();
env.add("example.py", "");
env.format(DiagnosticFormat::Full);
let diagnostic = env
.builder(
"invalid-character-sub",
Severity::Error,
r#"Invalid unescaped character SUB, use "\x1a" instead"#,
)
.primary("example.py", "1:1", "1:1", "")
.build();
insta::assert_snapshot!(env.render(&diagnostic), @r#"
error[invalid-character-sub]: Invalid unescaped character SUB, use "\x1a" instead
--> example.py:1:2
|
1 | ␈␛
| ^
|
"#);
Ok(())
}
/// Ensure that the header column matches the column in the user's input, even if we've replaced
/// tabs with spaces for rendering purposes.
#[test]
fn tab_replacement() {
let mut env = TestEnvironment::new();
env.add("example.py", "def foo():\n\treturn 1");
env.format(DiagnosticFormat::Full);
let diagnostic = env.err().primary("example.py", "2:1", "2:9", "").build();
insta::assert_snapshot!(env.render(&diagnostic), @r"
error[test-diagnostic]: main diagnostic message
--> example.py:2:2
|
1 | def foo():
2 | return 1
| ^^^^^^^^
|
");
}
/// For file-level diagnostics, we expect to see the header line with the diagnostic information
/// and the `-->` line with the file information but no lines of source code.
#[test]
fn file_level() {
let mut env = TestEnvironment::new();
env.add("example.py", "");
env.format(DiagnosticFormat::Full);
let mut diagnostic = env.err().build();
let span = env.path("example.py").with_range(TextRange::default());
let mut annotation = Annotation::primary(span);
annotation.hide_snippet(true);
diagnostic.annotate(annotation);
insta::assert_snapshot!(env.render(&diagnostic), @r"
error[test-diagnostic]: main diagnostic message
--> example.py:1:1
");
}
/// Check that ranges in notebooks are remapped relative to the cells.
#[test]
fn notebook_output() {
let (mut env, diagnostics) = create_notebook_diagnostics(DiagnosticFormat::Full);
env.show_fix_status(true);
insta::assert_snapshot!(env.render_diagnostics(&diagnostics), @r"
error[unused-import][*]: `os` imported but unused
--> notebook.ipynb:cell 1:2:8
|
1 | # cell 1
2 | import os
| ^^
|
help: Remove unused import: `os`
error[unused-import][*]: `math` imported but unused
--> notebook.ipynb:cell 2:2:8
|
1 | # cell 2
2 | import math
| ^^^^
3 |
4 | print('hello world')
|
help: Remove unused import: `math`
error[unused-variable]: Local variable `x` is assigned to but never used
--> notebook.ipynb:cell 3:4:5
|
2 | def foo():
3 | print()
4 | x = 1
| ^
|
help: Remove assignment to unused variable `x`
");
}
/// Check notebook handling for multiple annotations in a single diagnostic that span cells.
#[test]
fn notebook_output_multiple_annotations() {
let mut env = TestEnvironment::new();
env.add("notebook.ipynb", NOTEBOOK);
let diagnostics = vec![
// adjacent context windows
env.builder("unused-import", Severity::Error, "`os` imported but unused")
.primary("notebook.ipynb", "2:7", "2:9", "")
.secondary("notebook.ipynb", "4:7", "4:11", "second cell")
.help("Remove unused import: `os`")
.build(),
// non-adjacent context windows
env.builder("unused-import", Severity::Error, "`os` imported but unused")
.primary("notebook.ipynb", "2:7", "2:9", "")
.secondary("notebook.ipynb", "10:4", "10:5", "second cell")
.help("Remove unused import: `os`")
.build(),
// adjacent context windows in the same cell
env.err()
.primary("notebook.ipynb", "4:7", "4:11", "second cell")
.secondary("notebook.ipynb", "6:0", "6:5", "print statement")
.help("Remove `print` statement")
.build(),
];
insta::assert_snapshot!(env.render_diagnostics(&diagnostics), @r"
error[unused-import]: `os` imported but unused
--> notebook.ipynb:cell 1:2:8
|
1 | # cell 1
2 | import os
| ^^
|
::: notebook.ipynb:cell 2:2:8
|
1 | # cell 2
2 | import math
| ---- second cell
3 |
4 | print('hello world')
|
help: Remove unused import: `os`
error[unused-import]: `os` imported but unused
--> notebook.ipynb:cell 1:2:8
|
1 | # cell 1
2 | import os
| ^^
|
::: notebook.ipynb:cell 3:4:5
|
2 | def foo():
3 | print()
4 | x = 1
| - second cell
|
help: Remove unused import: `os`
error[test-diagnostic]: main diagnostic message
--> notebook.ipynb:cell 2:2:8
|
1 | # cell 2
2 | import math
| ^^^^ second cell
3 |
4 | print('hello world')
| ----- print statement
|
help: Remove `print` statement
");
}
/// Test that we remap notebook cell line numbers in the diff as well as the main diagnostic.
#[test]
fn notebook_output_with_diff() {
let (mut env, diagnostics) = create_notebook_diagnostics(DiagnosticFormat::Full);
env.show_fix_diff(true);
env.show_fix_status(true);
env.fix_applicability(Applicability::DisplayOnly);
insta::assert_snapshot!(env.render_diagnostics(&diagnostics));
}
#[test]
fn notebook_output_with_diff_spanning_cells() {
let (mut env, mut diagnostics) = create_notebook_diagnostics(DiagnosticFormat::Full);
env.show_fix_diff(true);
env.show_fix_status(true);
env.fix_applicability(Applicability::DisplayOnly);
// Move all of the edits from the later diagnostics to the first diagnostic to simulate a
// single diagnostic with edits in different cells.
let mut diagnostic = diagnostics.swap_remove(0);
let fix = diagnostic.fix_mut().unwrap();
let mut edits = fix.edits().to_vec();
for diag in diagnostics {
edits.extend_from_slice(diag.fix().unwrap().edits());
}
*fix = Fix::unsafe_edits(edits.remove(0), edits);
insta::assert_snapshot!(env.render(&diagnostic));
}
/// Carriage return (`\r`) is a valid line-ending in Python, so we should normalize this to a
/// line feed (`\n`) for rendering. Otherwise we report a single long line for this case.
#[test]
fn normalize_carriage_return() {
let mut env = TestEnvironment::new();
env.add(
"example.py",
"# Keep parenthesis around preserved CR\rint(-\r 1)\rint(+\r 1)",
);
env.format(DiagnosticFormat::Full);
let mut diagnostic = env.err().build();
let span = env
.path("example.py")
.with_range(TextRange::at(TextSize::new(39), TextSize::new(0)));
let annotation = Annotation::primary(span);
diagnostic.annotate(annotation);
insta::assert_snapshot!(env.render(&diagnostic), @r"
error[test-diagnostic]: main diagnostic message
--> example.py:2:1
|
1 | # Keep parenthesis around preserved CR
2 | int(-
| ^
3 | 1)
4 | int(+
|
");
}
/// Without stripping the BOM, we report an error in column 2, unlike Ruff.
#[test]
fn strip_bom() {
let mut env = TestEnvironment::new();
env.add("example.py", "\u{feff}import foo");
env.format(DiagnosticFormat::Full);
let mut diagnostic = env.err().build();
let span = env
.path("example.py")
.with_range(TextRange::at(TextSize::new(3), TextSize::new(0)));
let annotation = Annotation::primary(span);
diagnostic.annotate(annotation);
insta::assert_snapshot!(env.render(&diagnostic), @r"
error[test-diagnostic]: main diagnostic message
--> example.py:1:1
|
1 | import foo
| ^
|
");
}
#[test]
fn bom_with_default_range() {
let mut env = TestEnvironment::new();
env.add("example.py", "\u{feff}import foo");
env.format(DiagnosticFormat::Full);
let mut diagnostic = env.err().build();
let span = env.path("example.py").with_range(TextRange::default());
let annotation = Annotation::primary(span);
diagnostic.annotate(annotation);
insta::assert_snapshot!(env.render(&diagnostic), @r"
error[test-diagnostic]: main diagnostic message
--> example.py:1:1
|
1 | import foo
| ^
|
");
}
/// We previously rendered this correctly, but the header was falling back to 1:1 for ranges
/// pointing to the final newline in a file. Like Ruff, we now use the offset of the first
/// character in the nonexistent final line in the header.
#[test]
fn end_of_file() {
let mut env = TestEnvironment::new();
let contents = "unexpected eof\n";
env.add("example.py", contents);
env.format(DiagnosticFormat::Full);
let mut diagnostic = env.err().build();
let span = env
.path("example.py")
.with_range(TextRange::at(contents.text_len(), TextSize::new(0)));
let annotation = Annotation::primary(span);
diagnostic.annotate(annotation);
insta::assert_snapshot!(env.render(&diagnostic), @r"
error[test-diagnostic]: main diagnostic message
--> example.py:2:1
|
1 | unexpected eof
| ^
|
");
}
/// Test that we handle the width calculation for the line number correctly even for context
/// lines at the end of a diff. For example, we want it to render like this:
///
/// ```
/// 8 |
/// 9 |
/// 10 |
/// ```
///
/// and not like this:
///
/// ```
/// 8 |
/// 9 |
/// 10 |
/// ```
#[test]
fn longer_line_number_end_of_context() {
let mut env = TestEnvironment::new();
let contents = "\
line 1
line 2
line 3
line 4
line 5
line 6
line 7
line 8
line 9
line 10
";
env.add("example.py", contents);
env.format(DiagnosticFormat::Full);
env.show_fix_diff(true);
env.show_fix_status(true);
env.fix_applicability(Applicability::DisplayOnly);
let mut diagnostic = env.err().primary("example.py", "3", "3", "label").build();
diagnostic.help("Start of diff:");
let target = "line 7";
let line9 = contents.find(target).unwrap();
let range = TextRange::at(TextSize::try_from(line9).unwrap(), target.text_len());
diagnostic.set_fix(Fix::unsafe_edit(Edit::range_replacement(
format!("fixed {target}"),
range,
)));
insta::assert_snapshot!(env.render(&diagnostic), @r"
error[test-diagnostic][*]: main diagnostic message
--> example.py:3:1
|
1 | line 1
2 | line 2
3 | line 3
| ^^^^^^ label
4 | line 4
5 | line 5
|
help: Start of diff:
4 | line 4
5 | line 5
6 | line 6
- line 7
7 + fixed line 7
8 | line 8
9 | line 9
10 | line 10
note: This is an unsafe fix and may change runtime behavior
");
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ty_vendored/build.rs | crates/ty_vendored/build.rs | //! Build script to package our vendored typeshed files
//! into a zip archive that can be included in the Ruff binary.
//!
//! This script should be automatically run at build time
//! whenever the script itself changes, or whenever any files
//! in `crates/ty_vendored/vendor/typeshed` change.
use std::fs::File;
use std::io::Write;
use std::path::Path;
use path_slash::PathExt;
use zip::CompressionMethod;
use zip::result::ZipResult;
use zip::write::{FileOptions, ZipWriter};
const TYPESHED_SOURCE_DIR: &str = "vendor/typeshed";
const TY_EXTENSIONS_STUBS: &str = "ty_extensions/ty_extensions.pyi";
const TYPESHED_ZIP_LOCATION: &str = "/zipped_typeshed.zip";
/// Recursively zip the contents of the entire typeshed directory and patch typeshed
/// on the fly to include the `ty_extensions` module.
///
/// This routine is adapted from a recipe at
/// <https://github.com/zip-rs/zip-old/blob/5d0f198124946b7be4e5969719a7f29f363118cd/examples/write_dir.rs>
fn write_zipped_typeshed_to(writer: File) -> ZipResult<File> {
let mut zip = ZipWriter::new(writer);
// Use deflated compression for WASM builds because compiling `zstd-sys` requires clang
// [source](https://github.com/gyscos/zstd-rs/wiki/Compile-for-WASM) which complicates the build
// by a lot. Deflated compression is slower but it shouldn't matter much for the WASM use case
// (WASM itself is already slower than a native build for a specific platform).
// We can't use `#[cfg(...)]` here because the target-arch in a build script is the
// architecture of the system running the build script and not the architecture of the build-target.
// That's why we use the `TARGET` environment variable here.
let method = if cfg!(feature = "zstd") {
CompressionMethod::Zstd
} else if cfg!(feature = "deflate") {
CompressionMethod::Deflated
} else {
CompressionMethod::Stored
};
let options = FileOptions::default()
.compression_method(method)
.unix_permissions(0o644);
for entry in walkdir::WalkDir::new(TYPESHED_SOURCE_DIR) {
let dir_entry = entry.unwrap();
let absolute_path = dir_entry.path();
let normalized_relative_path = absolute_path
.strip_prefix(Path::new(TYPESHED_SOURCE_DIR))
.unwrap()
.to_slash()
.expect("Unexpected non-utf8 typeshed path!");
// Write file or directory explicitly
// Some unzip tools unzip files with directory paths correctly, some do not!
if absolute_path.is_file() {
println!("adding file {absolute_path:?} as {normalized_relative_path:?} ...");
zip.start_file(&*normalized_relative_path, options)?;
let mut f = File::open(absolute_path)?;
std::io::copy(&mut f, &mut zip).unwrap();
// Patch the VERSIONS file to make `ty_extensions` available
if normalized_relative_path == "stdlib/VERSIONS" {
writeln!(&mut zip, "ty_extensions: 3.0-")?;
}
} else if !normalized_relative_path.is_empty() {
// Only if not root! Avoids path spec / warning
// and mapname conversion failed error on unzip
println!("adding dir {absolute_path:?} as {normalized_relative_path:?} ...");
zip.add_directory(normalized_relative_path, options)?;
}
}
// Patch typeshed and add the stubs for the `ty_extensions` module
println!("adding file {TY_EXTENSIONS_STUBS} as stdlib/ty_extensions.pyi ...");
zip.start_file("stdlib/ty_extensions.pyi", options)?;
let mut f = File::open(TY_EXTENSIONS_STUBS)?;
std::io::copy(&mut f, &mut zip).unwrap();
zip.finish()
}
fn main() {
assert!(
Path::new(TYPESHED_SOURCE_DIR).is_dir(),
"Where is typeshed?"
);
let out_dir = std::env::var("OUT_DIR").unwrap();
// N.B. Deliberately using `format!()` instead of `Path::join()` here,
// so that we use `/` as a path separator on all platforms.
// That enables us to load the typeshed zip at compile time in `module.rs`
// (otherwise we'd have to dynamically determine the exact path to the typeshed zip
// based on the default path separator for the specific platform we're on,
// which can't be done at compile time.)
let zipped_typeshed_location = format!("{out_dir}{TYPESHED_ZIP_LOCATION}");
let zipped_typeshed_file = File::create(zipped_typeshed_location).unwrap();
write_zipped_typeshed_to(zipped_typeshed_file).unwrap();
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ty_vendored/src/lib.rs | crates/ty_vendored/src/lib.rs | #![warn(
clippy::disallowed_methods,
reason = "Prefer System trait methods over std methods in ty crates"
)]
use ruff_db::vendored::VendoredFileSystem;
use std::sync::LazyLock;
/// The source commit of the vendored typeshed.
pub const SOURCE_COMMIT: &str =
include_str!("../../../crates/ty_vendored/vendor/typeshed/source_commit.txt").trim_ascii_end();
static_assertions::const_assert_eq!(SOURCE_COMMIT.len(), 40);
// The file path here is hardcoded in this crate's `build.rs` script.
// Luckily this crate will fail to build if this file isn't available at build time.
static TYPESHED_ZIP_BYTES: &[u8] = include_bytes!(concat!(env!("OUT_DIR"), "/zipped_typeshed.zip"));
pub fn file_system() -> &'static VendoredFileSystem {
static VENDORED_TYPESHED_STUBS: LazyLock<VendoredFileSystem> =
LazyLock::new(|| VendoredFileSystem::new_static(TYPESHED_ZIP_BYTES).unwrap());
&VENDORED_TYPESHED_STUBS
}
#[cfg(test)]
mod tests {
use std::io::{self, Read};
use std::path::Path;
use ruff_db::vendored::VendoredPath;
use super::*;
#[test]
fn typeshed_zip_created_at_build_time() {
let mut typeshed_zip_archive =
zip::ZipArchive::new(io::Cursor::new(TYPESHED_ZIP_BYTES)).unwrap();
let mut functools_module_stub = typeshed_zip_archive
.by_name("stdlib/functools.pyi")
.unwrap();
assert!(functools_module_stub.is_file());
let mut functools_module_stub_source = String::new();
functools_module_stub
.read_to_string(&mut functools_module_stub_source)
.unwrap();
assert!(functools_module_stub_source.contains("def update_wrapper("));
}
#[test]
fn typeshed_vfs_consistent_with_vendored_stubs() {
let vendored_typeshed_dir = Path::new("vendor/typeshed").canonicalize().unwrap();
let vendored_typeshed_stubs = file_system();
let mut empty_iterator = true;
for entry in walkdir::WalkDir::new(&vendored_typeshed_dir).min_depth(1) {
empty_iterator = false;
let entry = entry.unwrap();
let absolute_path = entry.path();
let file_type = entry.file_type();
let relative_path = absolute_path
.strip_prefix(&vendored_typeshed_dir)
.unwrap_or_else(|_| {
panic!("Expected {absolute_path:?} to be a child of {vendored_typeshed_dir:?}")
});
let vendored_path = <&VendoredPath>::try_from(relative_path)
.unwrap_or_else(|_| panic!("Expected {relative_path:?} to be valid UTF-8"));
assert!(
vendored_typeshed_stubs.exists(vendored_path),
"Expected {vendored_path:?} to exist in the `VendoredFileSystem`!
Vendored file system:
{vendored_typeshed_stubs:#?}
"
);
let vendored_path_kind = vendored_typeshed_stubs
.metadata(vendored_path)
.unwrap_or_else(|_| {
panic!(
"Expected metadata for {vendored_path:?} to be retrievable from the `VendoredFileSystem!
Vendored file system:
{vendored_typeshed_stubs:#?}
"
)
})
.kind();
assert_eq!(
vendored_path_kind.is_directory(),
file_type.is_dir(),
"{vendored_path:?} had type {vendored_path_kind:?}, inconsistent with fs path {relative_path:?}: {file_type:?}"
);
}
assert!(
!empty_iterator,
"Expected there to be at least one file or directory in the vendored typeshed stubs!"
);
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_python_formatter/src/db.rs | crates/ruff_python_formatter/src/db.rs | use ruff_db::{Db as SourceDb, files::File};
use crate::PyFormatOptions;
#[salsa::db]
pub trait Db: SourceDb {
/// Returns the formatting options
fn format_options(&self, file: File) -> PyFormatOptions;
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_python_formatter/src/shared_traits.rs | crates/ruff_python_formatter/src/shared_traits.rs | // These traits are copied verbatim from ../../ruff_formatter/shared_traits.rs.
// They should stay in sync. Originally, they were included via
// `include!("...")`, but this seems to break rust-analyzer as it treats the
// included file as unlinked. Since there isn't much to copy, we just do that.
/// Used to get an object that knows how to format this object.
pub trait AsFormat<Context> {
type Format<'a>: ruff_formatter::Format<Context>
where
Self: 'a;
/// Returns an object that is able to format this object.
fn format(&self) -> Self::Format<'_>;
}
/// Implement [`AsFormat`] for references to types that implement [`AsFormat`].
impl<T, C> AsFormat<C> for &T
where
T: AsFormat<C>,
{
type Format<'a>
= T::Format<'a>
where
Self: 'a;
fn format(&self) -> Self::Format<'_> {
AsFormat::format(&**self)
}
}
/// Used to convert this object into an object that can be formatted.
///
/// The difference to [`AsFormat`] is that this trait takes ownership of `self`.
pub trait IntoFormat<Context> {
type Format: ruff_formatter::Format<Context>;
fn into_format(self) -> Self::Format;
}
/// Implement [`IntoFormat`] for [`Option`] when `T` implements [`IntoFormat`]
///
/// Allows to call format on optional AST fields without having to unwrap the
/// field first.
impl<T, Context> IntoFormat<Context> for Option<T>
where
T: IntoFormat<Context>,
{
type Format = Option<T::Format>;
fn into_format(self) -> Self::Format {
self.map(IntoFormat::into_format)
}
}
/// Implement [`IntoFormat`] for references to types that implement [`AsFormat`].
impl<'a, T, C> IntoFormat<C> for &'a T
where
T: AsFormat<C>,
{
type Format = T::Format<'a>;
fn into_format(self) -> Self::Format {
AsFormat::format(self)
}
}
/// Formatting specific [`Iterator`] extensions
pub trait FormattedIterExt {
/// Converts every item to an object that knows how to format it.
fn formatted<Context>(self) -> FormattedIter<Self, Self::Item, Context>
where
Self: Iterator + Sized,
Self::Item: IntoFormat<Context>,
{
FormattedIter {
inner: self,
options: std::marker::PhantomData,
}
}
}
impl<I> FormattedIterExt for I where I: std::iter::Iterator {}
pub struct FormattedIter<Iter, Item, Context>
where
Iter: Iterator<Item = Item>,
{
inner: Iter,
options: std::marker::PhantomData<Context>,
}
impl<Iter, Item, Context> std::iter::Iterator for FormattedIter<Iter, Item, Context>
where
Iter: Iterator<Item = Item>,
Item: IntoFormat<Context>,
{
type Item = Item::Format;
fn next(&mut self) -> Option<Self::Item> {
Some(self.inner.next()?.into_format())
}
}
impl<Iter, Item, Context> std::iter::FusedIterator for FormattedIter<Iter, Item, Context>
where
Iter: std::iter::FusedIterator<Item = Item>,
Item: IntoFormat<Context>,
{
}
impl<Iter, Item, Context> std::iter::ExactSizeIterator for FormattedIter<Iter, Item, Context>
where
Iter: Iterator<Item = Item> + std::iter::ExactSizeIterator,
Item: IntoFormat<Context>,
{
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_python_formatter/src/prelude.rs | crates/ruff_python_formatter/src/prelude.rs | pub(crate) use crate::{
AsFormat, FormatNodeRule, FormattedIterExt as _, IntoFormat, PyFormatContext, PyFormatter,
builders::PyFormatterExtensions,
};
pub(crate) use ruff_formatter::prelude::*;
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_python_formatter/src/lib.rs | crates/ruff_python_formatter/src/lib.rs | use ruff_db::diagnostic::{Diagnostic, DiagnosticId, Severity};
use ruff_db::files::File;
use ruff_db::parsed::parsed_module;
use ruff_db::source::source_text;
use thiserror::Error;
use tracing::Level;
pub use range::format_range;
use ruff_formatter::prelude::*;
use ruff_formatter::{FormatError, Formatted, PrintError, Printed, SourceCode, format, write};
use ruff_python_ast::{AnyNodeRef, Mod};
use ruff_python_parser::{ParseError, ParseOptions, Parsed, parse};
use ruff_python_trivia::CommentRanges;
use ruff_text_size::{Ranged, TextRange};
use crate::comments::{
Comments, SourceComment, has_skip_comment, leading_comments, trailing_comments,
};
pub use crate::context::PyFormatContext;
pub use crate::db::Db;
pub use crate::options::{
DocstringCode, DocstringCodeLineWidth, MagicTrailingComma, PreviewMode, PyFormatOptions,
QuoteStyle,
};
use crate::range::is_logical_line;
pub use crate::shared_traits::{AsFormat, FormattedIter, FormattedIterExt, IntoFormat};
use crate::verbatim::suppressed_node;
pub(crate) mod builders;
pub mod cli;
mod comments;
pub(crate) mod context;
mod db;
pub(crate) mod expression;
mod generated;
pub(crate) mod module;
mod options;
pub(crate) mod other;
pub(crate) mod pattern;
mod prelude;
mod preview;
mod range;
mod shared_traits;
pub(crate) mod statement;
pub(crate) mod string;
pub(crate) mod type_param;
mod verbatim;
/// 'ast is the lifetime of the source code (input), 'buf is the lifetime of the buffer (output)
pub(crate) type PyFormatter<'ast, 'buf> = Formatter<'buf, PyFormatContext<'ast>>;
/// Rule for formatting a Python AST node.
pub(crate) trait FormatNodeRule<N>
where
N: Ranged,
for<'a> AnyNodeRef<'a>: From<&'a N>,
{
fn fmt(&self, node: &N, f: &mut PyFormatter) -> FormatResult<()> {
let comments = f.context().comments().clone();
let node_ref = AnyNodeRef::from(node);
let node_comments = comments.leading_dangling_trailing(node_ref);
if self.is_suppressed(node_comments.trailing, f.context()) {
suppressed_node(node_ref).fmt(f)
} else {
leading_comments(node_comments.leading).fmt(f)?;
// Emit source map information for nodes that are valid "narrowing" targets
// in range formatting. Never emit source map information if they're disabled
// for performance reasons.
let emit_source_position = (is_logical_line(node_ref) || node_ref.is_mod_module())
&& f.options().source_map_generation().is_enabled();
emit_source_position
.then_some(source_position(node.start()))
.fmt(f)?;
self.fmt_fields(node, f)?;
debug_assert!(
node_comments
.dangling
.iter()
.all(SourceComment::is_formatted),
"The node has dangling comments that need to be formatted manually. Add the special dangling comments handling to `fmt_fields`."
);
write!(
f,
[
emit_source_position.then_some(source_position(node.end())),
trailing_comments(node_comments.trailing)
]
)
}
}
/// Formats the node's fields.
fn fmt_fields(&self, item: &N, f: &mut PyFormatter) -> FormatResult<()>;
fn is_suppressed(
&self,
_trailing_comments: &[SourceComment],
_context: &PyFormatContext,
) -> bool {
false
}
}
#[derive(Error, Debug, salsa::Update, PartialEq, Eq)]
pub enum FormatModuleError {
#[error(transparent)]
ParseError(#[from] ParseError),
#[error(transparent)]
FormatError(#[from] FormatError),
#[error(transparent)]
PrintError(#[from] PrintError),
}
impl FormatModuleError {
pub fn range(&self) -> Option<TextRange> {
match self {
FormatModuleError::ParseError(parse_error) => Some(parse_error.range()),
FormatModuleError::FormatError(_) | FormatModuleError::PrintError(_) => None,
}
}
}
impl From<&FormatModuleError> for Diagnostic {
fn from(error: &FormatModuleError) -> Self {
match error {
FormatModuleError::ParseError(parse_error) => Diagnostic::new(
DiagnosticId::InternalError,
Severity::Error,
&parse_error.error,
),
FormatModuleError::FormatError(format_error) => {
Diagnostic::new(DiagnosticId::InternalError, Severity::Error, format_error)
}
FormatModuleError::PrintError(print_error) => {
Diagnostic::new(DiagnosticId::InternalError, Severity::Error, print_error)
}
}
}
}
#[tracing::instrument(name = "format", level = Level::TRACE, skip_all)]
pub fn format_module_source(
source: &str,
options: PyFormatOptions,
) -> Result<Printed, FormatModuleError> {
let source_type = options.source_type();
let parsed = parse(source, ParseOptions::from(source_type))?;
let comment_ranges = CommentRanges::from(parsed.tokens());
let formatted = format_module_ast(&parsed, &comment_ranges, source, options)?;
Ok(formatted.print()?)
}
pub fn format_module_ast<'a>(
parsed: &'a Parsed<Mod>,
comment_ranges: &'a CommentRanges,
source: &'a str,
options: PyFormatOptions,
) -> FormatResult<Formatted<PyFormatContext<'a>>> {
format_node(parsed, comment_ranges, source, options)
}
fn format_node<'a, N>(
parsed: &'a Parsed<N>,
comment_ranges: &'a CommentRanges,
source: &'a str,
options: PyFormatOptions,
) -> FormatResult<Formatted<PyFormatContext<'a>>>
where
N: AsFormat<PyFormatContext<'a>>,
&'a N: Into<AnyNodeRef<'a>>,
{
let source_code = SourceCode::new(source);
let comments = Comments::from_ast(parsed.syntax(), source_code, comment_ranges);
let formatted = format!(
PyFormatContext::new(options, source, comments, parsed.tokens()),
[parsed.syntax().format()]
)?;
formatted
.context()
.comments()
.assert_all_formatted(source_code);
Ok(formatted)
}
pub fn formatted_file(db: &dyn Db, file: File) -> Result<Option<String>, FormatModuleError> {
let options = db.format_options(file);
let parsed = parsed_module(db, file).load(db);
if let Some(first) = parsed.errors().first() {
return Err(FormatModuleError::ParseError(first.clone()));
}
let comment_ranges = CommentRanges::from(parsed.tokens());
let source = source_text(db, file);
let formatted = format_node(&parsed, &comment_ranges, &source, options)?;
let printed = formatted.print()?;
if printed.as_code() == &*source {
Ok(None)
} else {
Ok(Some(printed.into_code()))
}
}
/// Public function for generating a printable string of the debug comments.
pub fn pretty_comments(module: &Mod, comment_ranges: &CommentRanges, source: &str) -> String {
let source_code = SourceCode::new(source);
let comments = Comments::from_ast(module, source_code, comment_ranges);
std::format!("{comments:#?}", comments = comments.debug(source_code))
}
#[cfg(test)]
mod tests {
use std::path::Path;
use anyhow::Result;
use insta::assert_snapshot;
use ruff_python_ast::PySourceType;
use ruff_python_parser::{ParseOptions, parse};
use ruff_python_trivia::CommentRanges;
use ruff_text_size::{TextRange, TextSize};
use crate::{PyFormatOptions, format_module_ast, format_module_source, format_range};
/// Very basic test intentionally kept very similar to the CLI
#[test]
fn basic() -> Result<()> {
let input = r"
# preceding
if True:
pass
# trailing
";
let expected = r"# preceding
if True:
pass
# trailing
";
let actual = format_module_source(input, PyFormatOptions::default())?
.as_code()
.to_string();
assert_eq!(expected, actual);
Ok(())
}
/// Use this test to debug the formatting of some snipped
#[ignore]
#[test]
fn quick_test() {
let source = r#"
def hello(): ...
@lambda _, /: _
class A: ...
"#;
let source_type = PySourceType::Python;
// Parse the AST.
let source_path = "code_inline.py";
let parsed = parse(source, ParseOptions::from(source_type)).unwrap();
let comment_ranges = CommentRanges::from(parsed.tokens());
let options = PyFormatOptions::from_extension(Path::new(source_path));
let formatted = format_module_ast(&parsed, &comment_ranges, source, options).unwrap();
// Uncomment the `dbg` to print the IR.
// Use `dbg_write!(f, []) instead of `write!(f, [])` in your formatting code to print some IR
// inside of a `Format` implementation
// use ruff_formatter::FormatContext;
// dbg!(formatted
// .document()
// .display(formatted.context().source_code()));
//
// dbg!(formatted
// .context()
// .comments()
// .debug(formatted.context().source_code()));
let printed = formatted.print().unwrap();
assert_eq!(
printed.as_code(),
r"for converter in connection.ops.get_db_converters(
expression
) + expression.get_db_converters(connection):
...
"
);
}
/// Use this test to quickly debug some formatting issue.
#[ignore]
#[test]
fn range_formatting_quick_test() {
let source = r#"def convert_str(value: str) -> str: # Trailing comment
"""Return a string as-is."""
<RANGE_START>
return value # Trailing comment
<RANGE_END>"#;
let mut source = source.to_string();
let start = TextSize::try_from(
source
.find("<RANGE_START>")
.expect("Start marker not found"),
)
.unwrap();
source.replace_range(
start.to_usize()..start.to_usize() + "<RANGE_START>".len(),
"",
);
let end =
TextSize::try_from(source.find("<RANGE_END>").expect("End marker not found")).unwrap();
source.replace_range(end.to_usize()..end.to_usize() + "<RANGE_END>".len(), "");
let source_type = PySourceType::Python;
let options = PyFormatOptions::from_source_type(source_type);
let printed = format_range(&source, TextRange::new(start, end), options).unwrap();
let mut formatted = source.clone();
formatted.replace_range(
std::ops::Range::<usize>::from(printed.source_range()),
printed.as_code(),
);
assert_eq!(
formatted,
r#"print ( "format me" )
print("format me")
print("format me")
print ( "format me" )
print ( "format me" )"#
);
}
#[test]
fn string_processing() {
use crate::prelude::*;
use ruff_formatter::{format, format_args, write};
struct FormatString<'a>(&'a str);
impl Format<SimpleFormatContext> for FormatString<'_> {
fn fmt(&self, f: &mut Formatter<SimpleFormatContext>) -> FormatResult<()> {
let format_str = format_with(|f| {
write!(f, [token("\"")])?;
let mut words = self.0.split_whitespace().peekable();
let mut fill = f.fill();
let separator = format_with(|f| {
group(&format_args![
if_group_breaks(&token("\"")),
soft_line_break_or_space(),
if_group_breaks(&token("\" "))
])
.fmt(f)
});
while let Some(word) = words.next() {
let is_last = words.peek().is_none();
let format_word = format_with(|f| {
write!(f, [text(word)])?;
if is_last {
write!(f, [token("\"")])?;
}
Ok(())
});
fill.entry(&separator, &format_word);
}
fill.finish()
});
write!(
f,
[group(&format_args![
if_group_breaks(&token("(")),
soft_block_indent(&format_str),
if_group_breaks(&token(")"))
])]
)
}
}
// 77 after g group (leading quote)
let fits =
r"aaaaaaaaaa bbbbbbbbbb cccccccccc dddddddddd eeeeeeeeee ffffffffff gggggggggg h";
let breaks =
r"aaaaaaaaaa bbbbbbbbbb cccccccccc dddddddddd eeeeeeeeee ffffffffff gggggggggg hh";
let output = format!(
SimpleFormatContext::default(),
[FormatString(fits), hard_line_break(), FormatString(breaks)]
)
.expect("Formatting to succeed");
assert_snapshot!(output.print().expect("Printing to succeed").as_code());
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_python_formatter/src/range.rs | crates/ruff_python_formatter/src/range.rs | use tracing::Level;
use ruff_formatter::printer::SourceMapGeneration;
use ruff_formatter::{
FormatContext, FormatError, FormatOptions, IndentStyle, PrintedRange, SourceCode, format,
};
use ruff_python_ast::visitor::source_order::{SourceOrderVisitor, TraversalSignal, walk_body};
use ruff_python_ast::{AnyNodeRef, Stmt, StmtMatch, StmtTry};
use ruff_python_parser::{ParseOptions, parse};
use ruff_python_trivia::{
BackwardsTokenizer, CommentRanges, SimpleToken, SimpleTokenKind, indentation_at_offset,
};
use ruff_text_size::{Ranged, TextLen, TextRange, TextSize};
use crate::comments::Comments;
use crate::context::{IndentLevel, NodeLevel};
use crate::prelude::*;
use crate::statement::suite::DocstringStmt;
use crate::verbatim::{ends_suppression, starts_suppression};
use crate::{FormatModuleError, PyFormatOptions, format_module_source};
/// Formats the given `range` in source rather than the entire file.
///
/// The returned formatted range guarantees to cover at least `range` (excluding whitespace), but the range might be larger.
/// Some cases in which the returned range is larger than `range` are:
/// * The logical lines in `range` use a indentation different from the configured [`IndentStyle`]
/// and [`IndentWidth`](ruff_formatter::IndentWidth).
/// * `range` is smaller than a logical lines and the formatter needs to format the entire logical line.
/// * `range` falls on a single line body.
///
/// The formatting of logical lines using range formatting should produce the same result as when formatting the entire document (for the same lines and options).
///
/// ## Implementation
///
/// This is an optimisation problem. The goal is to find the minimal range that fully covers `range`, is still formattable,
/// and produces the same result as when formatting the entire document.
///
/// The implementation performs the following steps:
/// 1. Find the deepest node that fully encloses `range`. The node with the minimum covering range.
/// 2. Try to narrow the range found in step one by searching its children and find node and comment start and end offsets that are closer to `range`'s start and end.
/// 3. Format the node from step 1 and use the source map information generated by the formatter to map the narrowed range in the source document to the range in the formatted output.
/// 4. Take the formatted code and return it.
///
/// # Error
/// Returns a range error if `range` lies outside of the source file.
///
/// # Panics
/// If `range` doesn't point to a valid char boundaries.
///
/// [`IndentWidth`]: `ruff_formatter::IndentWidth`
#[tracing::instrument(name = "format_range", level = Level::TRACE, skip_all)]
pub fn format_range(
source: &str,
range: TextRange,
options: PyFormatOptions,
) -> Result<PrintedRange, FormatModuleError> {
// Error if the specified range lies outside of the source file.
if source.text_len() < range.end() {
return Err(FormatModuleError::FormatError(FormatError::RangeError {
input: range,
tree: TextRange::up_to(source.text_len()),
}));
}
// Formatting an empty string always yields an empty string. Return directly.
if range.is_empty() {
return Ok(PrintedRange::empty());
}
if range == TextRange::up_to(source.text_len()) {
let formatted = format_module_source(source, options)?;
return Ok(PrintedRange::new(formatted.into_code(), range));
}
assert_valid_char_boundaries(range, source);
let parsed = parse(source, ParseOptions::from(options.source_type()))?;
let source_code = SourceCode::new(source);
let comment_ranges = CommentRanges::from(parsed.tokens());
let comments = Comments::from_ast(parsed.syntax(), source_code, &comment_ranges);
let mut context = PyFormatContext::new(
options.with_source_map_generation(SourceMapGeneration::Enabled),
source,
comments,
parsed.tokens(),
);
let (enclosing_node, base_indent) =
match find_enclosing_node(range, AnyNodeRef::from(parsed.syntax()), &context) {
EnclosingNode::Node { node, indent_level } => (node, indent_level),
EnclosingNode::Suppressed => {
// The entire range falls into a suppressed range. There's nothing to format.
return Ok(PrintedRange::empty());
}
};
let narrowed_range = narrow_range(range, enclosing_node, &context);
assert_valid_char_boundaries(narrowed_range, source);
// Correctly initialize the node level for the blank line rules.
if !enclosing_node.is_mod_module() {
context.set_node_level(NodeLevel::CompoundStatement);
context.set_indent_level(
// Plus 1 because `IndentLevel=0` equals the module level.
IndentLevel::new(base_indent.saturating_add(1)),
);
}
let formatted = format!(
context,
[FormatEnclosingNode {
root: enclosing_node
}]
)?;
let printed = formatted.print_with_indent(base_indent)?;
Ok(printed.slice_range(narrowed_range, source))
}
/// Finds the node with the minimum covering range of `range`.
///
/// It traverses the tree and returns the deepest node that fully encloses `range`.
///
/// ## Eligible nodes
/// The search is restricted to nodes that mark the start of a logical line to ensure
/// formatting a range results in the same formatting for that logical line as when formatting the entire document.
/// This property can't be guaranteed when supporting sub-expression formatting because
/// a) Adding parentheses around enclosing expressions can toggle an expression from non-splittable to splittable,
/// b) formatting a sub-expression has fewer split points than formatting the entire expressions.
///
/// ### Possible docstrings
/// Strings that are suspected to be docstrings are excluded from the search to format the enclosing
/// suite instead so that the formatter's docstring detection in
/// [`FormatSuite`](crate::statement::suite::FormatSuite) correctly detects and formats the
/// docstrings.
///
/// ### Compound statements with a simple statement body
/// Don't include simple-statement bodies of compound statements `if True: pass` because the formatter
/// must run `FormatClauseBody` to determine if the body should be collapsed or not.
///
/// ### Incorrectly indented code
/// Code that uses indentations that don't match the configured [`IndentStyle`] and
/// [`IndentWidth`](ruff_formatter::IndentWidth) are excluded from the search, because formatting
/// such nodes on their own can lead to indentation mismatch with its sibling nodes.
///
/// ## Suppression comments
/// The search ends when `range` falls into a suppressed range because there's nothing to format. It also avoids that the
/// formatter formats the statement because it doesn't see the suppression comment of the enclosing node.
///
/// The implementation doesn't handle `fmt: ignore` suppression comments because the statement's formatting logic
/// correctly detects the suppression comment and returns the statement text as is.
fn find_enclosing_node<'ast>(
range: TextRange,
root: AnyNodeRef<'ast>,
context: &PyFormatContext<'ast>,
) -> EnclosingNode<'ast> {
let mut visitor = FindEnclosingNode::new(range, context);
if visitor.enter_node(root).is_traverse() {
root.visit_source_order(&mut visitor);
}
visitor.leave_node(root);
visitor.closest
}
struct FindEnclosingNode<'a, 'ast> {
range: TextRange,
context: &'a PyFormatContext<'ast>,
/// The, to this point, deepest node that fully encloses `range`.
closest: EnclosingNode<'ast>,
/// Tracks if the current statement is suppressed
suppressed: Suppressed,
}
impl<'a, 'ast> FindEnclosingNode<'a, 'ast> {
fn new(range: TextRange, context: &'a PyFormatContext<'ast>) -> Self {
Self {
range,
context,
suppressed: Suppressed::No,
closest: EnclosingNode::Suppressed,
}
}
}
impl<'ast> SourceOrderVisitor<'ast> for FindEnclosingNode<'_, 'ast> {
fn enter_node(&mut self, node: AnyNodeRef<'ast>) -> TraversalSignal {
if !(is_logical_line(node) || node.is_mod_module()) {
return TraversalSignal::Skip;
}
// Handle `fmt: off` suppression comments for statements.
if node.is_statement() {
let leading_comments = self.context.comments().leading(node);
self.suppressed = Suppressed::from(match self.suppressed {
Suppressed::No => starts_suppression(leading_comments, self.context.source()),
Suppressed::Yes => !ends_suppression(leading_comments, self.context.source()),
});
}
if !node.range().contains_range(self.range) {
return TraversalSignal::Skip;
}
if self.suppressed.is_yes() && node.is_statement() {
self.closest = EnclosingNode::Suppressed;
return TraversalSignal::Skip;
}
// Don't pick potential docstrings as the closest enclosing node because `suite.rs` than fails to identify them as
// docstrings and docstring formatting won't kick in.
// Format the enclosing node instead and slice the formatted docstring from the result.
let is_maybe_docstring = node
.as_stmt_expr()
.is_some_and(|stmt| DocstringStmt::is_docstring_statement(stmt, self.context));
if is_maybe_docstring {
return TraversalSignal::Skip;
}
// Only computing the count here is sufficient because each enclosing node ensures that it has the necessary indent
// or we don't traverse otherwise.
let Some(indent_level) =
indent_level(node.start(), self.context.source(), self.context.options())
else {
// Non standard indent or a simple-statement body of a compound statement, format the enclosing node
return TraversalSignal::Skip;
};
self.closest = EnclosingNode::Node { node, indent_level };
TraversalSignal::Traverse
}
fn leave_node(&mut self, node: AnyNodeRef<'ast>) {
if node.is_statement() {
let trailing_comments = self.context.comments().trailing(node);
// Update the suppressed state for the next statement.
self.suppressed = Suppressed::from(match self.suppressed {
Suppressed::No => starts_suppression(trailing_comments, self.context.source()),
Suppressed::Yes => !ends_suppression(trailing_comments, self.context.source()),
});
}
}
fn visit_body(&mut self, body: &'ast [Stmt]) {
// We only visit statements that aren't suppressed that's why we don't need to track the suppression
// state in a stack. Assert that this assumption is safe.
debug_assert!(self.suppressed.is_no());
walk_body(self, body);
self.suppressed = Suppressed::No;
}
}
#[derive(Debug, Copy, Clone)]
enum EnclosingNode<'a> {
/// The entire range falls into a suppressed `fmt: off` range.
Suppressed,
/// The node outside of a suppression range that fully encloses the searched range.
Node {
node: AnyNodeRef<'a>,
indent_level: u16,
},
}
/// Narrows the formatting `range` to a smaller sub-range than the enclosing node's range.
///
/// The range is narrowed by searching the enclosing node's children and:
/// * Find the closest node or comment start or end offset to `range.start`
/// * Find the closest node or comment start or end offset, or the clause header's `:` end offset to `range.end`
///
/// The search is restricted to positions where the formatter emits source map entries because it guarantees
/// that we know the exact range in the formatted range and not just an approximation that could include other tokens.
///
/// ## Clause Headers
/// For clause headers like `if`, `while`, `match`, `case` etc. consider the `:` end position for narrowing `range.end`
/// to support formatting the clause header without its body.
///
/// ## Compound statements with simple statement bodies
/// Similar to [`find_enclosing_node`], exclude the compound statement's body if it is a simple statement (not a suite) from the search to format the entire clause header
/// with the body. This ensures that the formatter runs `FormatClauseBody` that determines if the body should be indented.
///
/// ## Non-standard indentation
/// Nodes that use an indentation that doesn't match the configured [`IndentStyle`] and
/// [`IndentWidth`](ruff_formatter::IndentWidth) are excluded from the search. This is because the
/// formatter always uses the configured [`IndentStyle`] and
/// [`IndentWidth`](ruff_formatter::IndentWidth), resulting in the formatted nodes using a different
/// indentation than the unformatted sibling nodes. This would be tolerable in non whitespace
/// sensitive languages like JavaScript but results in lexical errors in Python.
///
/// ## Implementation
/// It would probably be possible to merge this visitor with [`FindEnclosingNode`] but they are separate because
/// it avoids some unnecessary work for nodes that aren't the `enclosing_node` and I found reasoning
/// and debugging the visiting logic easier when they are separate.
///
/// [`IndentStyle`]: ruff_formatter::IndentStyle
/// [`IndentWidth`]: ruff_formatter::IndentWidth
fn narrow_range(
range: TextRange,
enclosing_node: AnyNodeRef,
context: &PyFormatContext,
) -> TextRange {
let enclosing_indent = indentation_at_offset(enclosing_node.start(), context.source())
.expect("Expected enclosing to never be a same line body statement.");
let mut visitor = NarrowRange {
context,
range,
narrowed_start: enclosing_node.start(),
narrowed_end: enclosing_node.end(),
enclosing_indent,
level: usize::from(!enclosing_node.is_mod_module()),
};
if visitor.enter_node(enclosing_node).is_traverse() {
enclosing_node.visit_source_order(&mut visitor);
}
visitor.leave_node(enclosing_node);
TextRange::new(visitor.narrowed_start, visitor.narrowed_end)
}
struct NarrowRange<'a> {
context: &'a PyFormatContext<'a>,
// The range to format
range: TextRange,
// The narrowed range
narrowed_start: TextSize,
narrowed_end: TextSize,
// Stated tracked by the visitor
enclosing_indent: &'a str,
level: usize,
}
impl SourceOrderVisitor<'_> for NarrowRange<'_> {
fn enter_node(&mut self, node: AnyNodeRef<'_>) -> TraversalSignal {
if !(is_logical_line(node) || node.is_mod_module()) {
return TraversalSignal::Skip;
}
// Find the start offset of the node that starts the closest to (and before) the start offset of the formatting range.
// We do this by iterating over known positions that emit source map entries and pick the start point that ends closest
// to the searched range's start.
let leading_comments = self.context.comments().leading(node);
self.narrow(leading_comments);
self.narrow([node]);
// Avoid traversing when it's known to not be able to narrow the range further to avoid traversing the entire tree (entire file in the worst case).
// If the node's range is entirely before the searched range, don't traverse because non of its children
// can be closer to `narrow_start` than the node itself (which we already narrowed).
//
// Don't traverse if the current node is passed the narrowed range (it's impossible to refine it further).
if node.end() < self.range.start()
|| (self.narrowed_start > node.start() && self.narrowed_end <= node.end())
{
return TraversalSignal::Skip;
}
// Handle nodes that have indented child-nodes that aren't a `Body` (which is handled by `visit_body`).
// Ideally, this would be handled as part of `visit_stmt` but `visit_stmt` doesn't get called for the `enclosing_node`
// because it's not possible to convert` AnyNodeRef` to `&Stmt` :(
match node {
AnyNodeRef::StmtMatch(StmtMatch {
subject: _,
cases,
range: _,
node_index: _,
}) => {
if let Some(saved_state) = self.enter_level(cases.first().map(AnyNodeRef::from)) {
for match_case in cases {
self.visit_match_case(match_case);
}
self.leave_level(saved_state);
}
// Already traversed as part of `enter_node`.
TraversalSignal::Skip
}
AnyNodeRef::StmtTry(StmtTry {
body,
handlers,
orelse,
finalbody,
is_star: _,
range: _,
node_index: _,
}) => {
self.visit_body(body);
if let Some(except_handler_saved) =
self.enter_level(handlers.first().map(AnyNodeRef::from))
{
for except_handler in handlers {
self.visit_except_handler(except_handler);
}
self.leave_level(except_handler_saved);
}
self.visit_body(orelse);
self.visit_body(finalbody);
// Already traversed as part of `enter_node`.
TraversalSignal::Skip
}
_ => TraversalSignal::Traverse,
}
}
fn leave_node(&mut self, node: AnyNodeRef<'_>) {
if !(is_logical_line(node) || node.is_mod_module()) {
return;
}
// Find the end offset of the closest node to the end offset of the formatting range.
// We do this by iterating over end positions that we know generate source map entries end pick the end
// that ends closest or after the searched range's end.
self.narrow(
self.context
.comments()
.trailing(node)
.iter()
.filter(|comment| comment.line_position().is_own_line()),
);
}
fn visit_body(&mut self, body: &'_ [Stmt]) {
if let Some(saved_state) = self.enter_level(body.first().map(AnyNodeRef::from)) {
walk_body(self, body);
self.leave_level(saved_state);
}
}
}
impl NarrowRange<'_> {
fn narrow<I, T>(&mut self, items: I)
where
I: IntoIterator<Item = T>,
T: Ranged,
{
for ranged in items {
self.narrow_offset(ranged.start());
self.narrow_offset(ranged.end());
}
}
fn narrow_offset(&mut self, offset: TextSize) {
self.narrow_start(offset);
self.narrow_end(offset);
}
fn narrow_start(&mut self, offset: TextSize) {
if offset <= self.range.start() {
self.narrowed_start = self.narrowed_start.max(offset);
}
}
fn narrow_end(&mut self, offset: TextSize) {
if offset >= self.range.end() {
self.narrowed_end = self.narrowed_end.min(offset);
}
}
fn enter_level(&mut self, first_child: Option<AnyNodeRef>) -> Option<SavedLevel> {
if let Some(first_child) = first_child {
// If this is a clause header and the `range` ends within the clause header, then avoid formatting the body.
// This prevents that we format an entire function definition when the selected range is fully enclosed by the parameters.
// ```python
// 1| def foo(<RANGE_START>a, b, c<RANGE_END>):
// 2| pass
// ```
// We don't want to format the body of the function.
if let Some(SimpleToken {
kind: SimpleTokenKind::Colon,
range: colon_range,
}) = BackwardsTokenizer::up_to(
first_child.start(),
self.context.source(),
self.context.comments().ranges(),
)
.skip_trivia()
.next()
{
self.narrow_offset(colon_range.end());
}
// It is necessary to format all statements if the statement or any of its parents don't use the configured indentation.
// ```python
// 0| def foo():
// 1| if True:
// 2| print("Hello")
// 3| print("More")
// 4| a = 10
// ```
// Here, the `if` statement uses the correct 4 spaces indentation, but the two `print` statements use a 2 spaces indentation.
// The formatter output uses 8 space indentation for the `print` statement which doesn't match the indentation of the statement on line 4 when
// replacing the source with the formatted code. That's why we expand the range in this case to cover the entire if-body range.
//
// I explored the alternative of using `indent(dedent(formatted))` to retain the correct indentation. It works pretty well except that it can change the
// content of multiline strings:
// ```python
// def test ():
// pass
// <RANGE_START>1 + 2
// """A Multiline string
// that uses the same indentation as the formatted code will. This should not be dedented."""
//
// print("Done")<RANGE_END>
// ```
// The challenge here is that the second line of the multiline string uses a 4 space indentation. Using `dedent` would
// dedent the second line to 0 spaces and the `indent` then adds a 2 space indentation to match the indentation in the source.
// This is incorrect because the leading whitespace is the content of the string and not indentation, resulting in changed string content.
if let Some(indentation) =
indentation_at_offset(first_child.start(), self.context.source())
{
let relative_indent = indentation.strip_prefix(self.enclosing_indent).unwrap();
let expected_indents = self.level;
// Each level must always add one level of indent. That's why an empty relative indent to the parent node tells us that the enclosing node is the Module.
let has_expected_indentation = match self.context.options().indent_style() {
IndentStyle::Tab => {
relative_indent.len() == expected_indents
&& relative_indent.chars().all(|c| c == '\t')
}
IndentStyle::Space => {
relative_indent.len()
== expected_indents
* self.context.options().indent_width().value() as usize
&& relative_indent.chars().all(|c| c == ' ')
}
};
if !has_expected_indentation {
return None;
}
} else {
// Simple-statement body of a compound statement (not a suite body).
// Don't narrow the range because the formatter must run `FormatClauseBody` to determine if the body should be collapsed or not.
return None;
}
}
let saved_level = self.level;
self.level += 1;
Some(SavedLevel { level: saved_level })
}
#[expect(clippy::needless_pass_by_value)]
fn leave_level(&mut self, saved_state: SavedLevel) {
self.level = saved_state.level;
}
}
pub(crate) const fn is_logical_line(node: AnyNodeRef) -> bool {
// Make sure to update [`FormatEnclosingLine`] when changing this.
node.is_statement()
|| node.is_decorator()
|| node.is_except_handler()
|| node.is_elif_else_clause()
|| node.is_match_case()
}
#[derive(Debug)]
struct SavedLevel {
level: usize,
}
#[derive(Copy, Clone, Default, Debug)]
enum Suppressed {
/// Code is not suppressed
#[default]
No,
/// The node is suppressed by a suppression comment in the same body block.
Yes,
}
impl Suppressed {
const fn is_no(self) -> bool {
matches!(self, Suppressed::No)
}
const fn is_yes(self) -> bool {
matches!(self, Suppressed::Yes)
}
}
impl From<bool> for Suppressed {
fn from(value: bool) -> Self {
if value {
Suppressed::Yes
} else {
Suppressed::No
}
}
}
fn assert_valid_char_boundaries(range: TextRange, source: &str) {
assert!(source.is_char_boundary(usize::from(range.start())));
assert!(source.is_char_boundary(usize::from(range.end())));
}
struct FormatEnclosingNode<'a> {
root: AnyNodeRef<'a>,
}
impl Format<PyFormatContext<'_>> for FormatEnclosingNode<'_> {
fn fmt(&self, f: &mut Formatter<PyFormatContext<'_>>) -> FormatResult<()> {
// Note: It's important that this supports formatting all nodes for which `is_logical_line`
// returns + the root `Mod` nodes.
match self.root {
AnyNodeRef::ModModule(node) => node.format().fmt(f),
AnyNodeRef::ModExpression(node) => node.format().fmt(f),
AnyNodeRef::StmtFunctionDef(node) => node.format().fmt(f),
AnyNodeRef::StmtClassDef(node) => node.format().fmt(f),
AnyNodeRef::StmtReturn(node) => node.format().fmt(f),
AnyNodeRef::StmtDelete(node) => node.format().fmt(f),
AnyNodeRef::StmtTypeAlias(node) => node.format().fmt(f),
AnyNodeRef::StmtAssign(node) => node.format().fmt(f),
AnyNodeRef::StmtAugAssign(node) => node.format().fmt(f),
AnyNodeRef::StmtAnnAssign(node) => node.format().fmt(f),
AnyNodeRef::StmtFor(node) => node.format().fmt(f),
AnyNodeRef::StmtWhile(node) => node.format().fmt(f),
AnyNodeRef::StmtIf(node) => node.format().fmt(f),
AnyNodeRef::StmtWith(node) => node.format().fmt(f),
AnyNodeRef::StmtMatch(node) => node.format().fmt(f),
AnyNodeRef::StmtRaise(node) => node.format().fmt(f),
AnyNodeRef::StmtTry(node) => node.format().fmt(f),
AnyNodeRef::StmtAssert(node) => node.format().fmt(f),
AnyNodeRef::StmtImport(node) => node.format().fmt(f),
AnyNodeRef::StmtImportFrom(node) => node.format().fmt(f),
AnyNodeRef::StmtGlobal(node) => node.format().fmt(f),
AnyNodeRef::StmtNonlocal(node) => node.format().fmt(f),
AnyNodeRef::StmtExpr(node) => node.format().fmt(f),
AnyNodeRef::StmtPass(node) => node.format().fmt(f),
AnyNodeRef::StmtBreak(node) => node.format().fmt(f),
AnyNodeRef::StmtContinue(node) => node.format().fmt(f),
AnyNodeRef::StmtIpyEscapeCommand(node) => node.format().fmt(f),
AnyNodeRef::ExceptHandlerExceptHandler(node) => node.format().fmt(f),
AnyNodeRef::MatchCase(node) => node.format().fmt(f),
AnyNodeRef::Decorator(node) => node.format().fmt(f),
AnyNodeRef::ElifElseClause(node) => node.format().fmt(f),
AnyNodeRef::ExprBoolOp(_)
| AnyNodeRef::ExprNamed(_)
| AnyNodeRef::ExprBinOp(_)
| AnyNodeRef::ExprUnaryOp(_)
| AnyNodeRef::ExprLambda(_)
| AnyNodeRef::ExprIf(_)
| AnyNodeRef::ExprDict(_)
| AnyNodeRef::ExprSet(_)
| AnyNodeRef::ExprListComp(_)
| AnyNodeRef::ExprSetComp(_)
| AnyNodeRef::ExprDictComp(_)
| AnyNodeRef::ExprGenerator(_)
| AnyNodeRef::ExprAwait(_)
| AnyNodeRef::ExprYield(_)
| AnyNodeRef::ExprYieldFrom(_)
| AnyNodeRef::ExprCompare(_)
| AnyNodeRef::ExprCall(_)
| AnyNodeRef::InterpolatedElement(_)
| AnyNodeRef::InterpolatedStringLiteralElement(_)
| AnyNodeRef::InterpolatedStringFormatSpec(_)
| AnyNodeRef::ExprFString(_)
| AnyNodeRef::ExprTString(_)
| AnyNodeRef::ExprStringLiteral(_)
| AnyNodeRef::ExprBytesLiteral(_)
| AnyNodeRef::ExprNumberLiteral(_)
| AnyNodeRef::ExprBooleanLiteral(_)
| AnyNodeRef::ExprNoneLiteral(_)
| AnyNodeRef::ExprEllipsisLiteral(_)
| AnyNodeRef::ExprAttribute(_)
| AnyNodeRef::ExprSubscript(_)
| AnyNodeRef::ExprStarred(_)
| AnyNodeRef::ExprName(_)
| AnyNodeRef::ExprList(_)
| AnyNodeRef::ExprTuple(_)
| AnyNodeRef::ExprSlice(_)
| AnyNodeRef::ExprIpyEscapeCommand(_)
| AnyNodeRef::FString(_)
| AnyNodeRef::StringLiteral(_)
| AnyNodeRef::TString(_)
| AnyNodeRef::PatternMatchValue(_)
| AnyNodeRef::PatternMatchSingleton(_)
| AnyNodeRef::PatternMatchSequence(_)
| AnyNodeRef::PatternMatchMapping(_)
| AnyNodeRef::PatternMatchClass(_)
| AnyNodeRef::PatternMatchStar(_)
| AnyNodeRef::PatternMatchAs(_)
| AnyNodeRef::PatternMatchOr(_)
| AnyNodeRef::PatternArguments(_)
| AnyNodeRef::PatternKeyword(_)
| AnyNodeRef::Comprehension(_)
| AnyNodeRef::Arguments(_)
| AnyNodeRef::Parameters(_)
| AnyNodeRef::Parameter(_)
| AnyNodeRef::ParameterWithDefault(_)
| AnyNodeRef::Keyword(_)
| AnyNodeRef::Alias(_)
| AnyNodeRef::WithItem(_)
| AnyNodeRef::TypeParams(_)
| AnyNodeRef::TypeParamTypeVar(_)
| AnyNodeRef::TypeParamTypeVarTuple(_)
| AnyNodeRef::TypeParamParamSpec(_)
| AnyNodeRef::Identifier(_)
| AnyNodeRef::BytesLiteral(_) => {
panic!("Range formatting only supports formatting logical lines")
}
}
}
}
/// Computes the level of indentation for `indentation` when using the configured [`IndentStyle`]
/// and [`IndentWidth`](ruff_formatter::IndentWidth).
///
/// Returns `None` if the indentation doesn't conform to the configured [`IndentStyle`] and
/// [`IndentWidth`](ruff_formatter::IndentWidth).
///
/// # Panics
/// If `offset` is outside of `source`.
fn indent_level(offset: TextSize, source: &str, options: &PyFormatOptions) -> Option<u16> {
let indentation = indentation_at_offset(offset, source)?;
let level = match options.indent_style() {
IndentStyle::Tab => {
if indentation.chars().all(|c| c == '\t') {
Some(indentation.len())
} else {
None
}
}
IndentStyle::Space => {
let indent_width = options.indent_width().value() as usize;
if indentation.chars().all(|c| c == ' ') && indentation.len() % indent_width == 0 {
Some(indentation.len() / indent_width)
} else {
None
}
}
};
level.map(|level| u16::try_from(level).unwrap_or(u16::MAX))
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_python_formatter/src/cli.rs | crates/ruff_python_formatter/src/cli.rs | #![allow(clippy::print_stdout)]
use std::path::{Path, PathBuf};
use anyhow::{Context, Result};
use clap::{Parser, ValueEnum};
use ruff_formatter::SourceCode;
use ruff_python_ast::{PySourceType, PythonVersion};
use ruff_python_parser::{ParseOptions, parse};
use ruff_python_trivia::CommentRanges;
use ruff_text_size::Ranged;
use crate::comments::collect_comments;
use crate::{MagicTrailingComma, PreviewMode, PyFormatOptions, format_module_ast};
#[derive(ValueEnum, Clone, Debug)]
pub enum Emit {
/// Write back to the original files
Files,
/// Write to stdout
Stdout,
}
#[derive(Parser)]
#[command(author, version, about, long_about = None)]
#[expect(clippy::struct_excessive_bools)] // It's only the dev cli anyways
pub struct Cli {
/// Python files to format. If there are none, stdin will be used. `-` as stdin is not supported
pub files: Vec<PathBuf>,
#[clap(long)]
pub emit: Option<Emit>,
/// Run in 'check' mode. Exits with 0 if input is formatted correctly. Exits with 1 and prints
/// a diff if formatting is required.
#[clap(long)]
pub check: bool,
#[clap(long)]
pub preview: bool,
#[clap(long)]
pub print_ir: bool,
#[clap(long)]
pub print_comments: bool,
#[clap(long, short = 'C')]
pub skip_magic_trailing_comma: bool,
#[clap(long)]
pub target_version: PythonVersion,
}
pub fn format_and_debug_print(source: &str, cli: &Cli, source_path: &Path) -> Result<String> {
let source_type = PySourceType::from(source_path);
// Parse the AST.
let parsed = parse(
source,
ParseOptions::from(source_type).with_target_version(cli.target_version),
)
.context("Syntax error in input")?;
let options = PyFormatOptions::from_extension(source_path)
.with_preview(if cli.preview {
PreviewMode::Enabled
} else {
PreviewMode::Disabled
})
.with_magic_trailing_comma(if cli.skip_magic_trailing_comma {
MagicTrailingComma::Ignore
} else {
MagicTrailingComma::Respect
})
.with_target_version(cli.target_version);
let source_code = SourceCode::new(source);
let comment_ranges = CommentRanges::from(parsed.tokens());
let formatted = format_module_ast(&parsed, &comment_ranges, source, options)
.context("Failed to format node")?;
if cli.print_ir {
println!("{}", formatted.document().display(source_code));
}
if cli.print_comments {
// Print preceding, following and enclosing nodes
let decorated_comments = collect_comments(parsed.syntax(), source_code, &comment_ranges);
if !decorated_comments.is_empty() {
println!("# Comment decoration: Range, Preceding, Following, Enclosing, Comment");
}
for comment in decorated_comments {
println!(
"{:?}, {:?}, {:?}, {:?}, {:?}",
comment.slice().range(),
comment
.preceding_node()
.map(|node| (node.kind(), node.range())),
comment
.following_node()
.map(|node| (node.kind(), node.range())),
(
comment.enclosing_node().kind(),
comment.enclosing_node().range()
),
comment.slice().text(source_code),
);
}
println!("{:#?}", formatted.context().comments().debug(source_code));
}
Ok(formatted
.print()
.context("Failed to print the formatter IR")?
.as_code()
.to_string())
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_python_formatter/src/builders.rs | crates/ruff_python_formatter/src/builders.rs | use ruff_formatter::{Argument, Arguments, format_args, write};
use ruff_text_size::{Ranged, TextRange, TextSize};
use crate::context::{NodeLevel, WithNodeLevel};
use crate::other::commas::has_magic_trailing_comma;
use crate::prelude::*;
/// Adds parentheses and indents `content` if it doesn't fit on a line.
pub(crate) fn parenthesize_if_expands<'ast, T>(content: &T) -> ParenthesizeIfExpands<'_, 'ast>
where
T: Format<PyFormatContext<'ast>>,
{
ParenthesizeIfExpands {
inner: Argument::new(content),
indent: true,
}
}
pub(crate) struct ParenthesizeIfExpands<'a, 'ast> {
inner: Argument<'a, PyFormatContext<'ast>>,
indent: bool,
}
impl ParenthesizeIfExpands<'_, '_> {
pub(crate) fn with_indent(mut self, indent: bool) -> Self {
self.indent = indent;
self
}
}
impl<'ast> Format<PyFormatContext<'ast>> for ParenthesizeIfExpands<'_, 'ast> {
fn fmt(&self, f: &mut Formatter<PyFormatContext<'ast>>) -> FormatResult<()> {
{
let mut f = WithNodeLevel::new(NodeLevel::ParenthesizedExpression, f);
if self.indent {
let parens_id = f.group_id("indented_parenthesize_if_expands");
group(&format_args![
if_group_breaks(&token("(")),
indent_if_group_breaks(
&format_args![soft_line_break(), &Arguments::from(&self.inner)],
parens_id
),
soft_line_break(),
if_group_breaks(&token(")"))
])
.with_id(Some(parens_id))
.fmt(&mut f)
} else {
group(&format_args![
if_group_breaks(&token("(")),
Arguments::from(&self.inner),
if_group_breaks(&token(")")),
])
.fmt(&mut f)
}
}
}
}
/// Provides Python specific extensions to [`Formatter`].
pub(crate) trait PyFormatterExtensions<'ast, 'buf> {
/// A builder that separates each element by a `,` and a [`soft_line_break_or_space`].
/// It emits a trailing `,` that is only shown if the enclosing group expands. It forces the enclosing
/// group to expand if the last item has a trailing `comma` and the magical comma option is enabled.
fn join_comma_separated<'fmt>(
&'fmt mut self,
sequence_end: TextSize,
) -> JoinCommaSeparatedBuilder<'fmt, 'ast, 'buf>;
}
impl<'buf, 'ast> PyFormatterExtensions<'ast, 'buf> for PyFormatter<'ast, 'buf> {
fn join_comma_separated<'fmt>(
&'fmt mut self,
sequence_end: TextSize,
) -> JoinCommaSeparatedBuilder<'fmt, 'ast, 'buf> {
JoinCommaSeparatedBuilder::new(self, sequence_end)
}
}
#[derive(Copy, Clone, Debug)]
enum Entries {
/// No previous entry
None,
/// One previous ending at the given position.
One(TextSize),
/// More than one entry, the last one ending at the specific position.
MoreThanOne(TextSize),
}
impl Entries {
fn position(self) -> Option<TextSize> {
match self {
Entries::None => None,
Entries::One(position) | Entries::MoreThanOne(position) => Some(position),
}
}
const fn is_one_or_more(self) -> bool {
!matches!(self, Entries::None)
}
const fn is_more_than_one(self) -> bool {
matches!(self, Entries::MoreThanOne(_))
}
const fn next(self, end_position: TextSize) -> Self {
match self {
Entries::None => Entries::One(end_position),
Entries::One(_) | Entries::MoreThanOne(_) => Entries::MoreThanOne(end_position),
}
}
}
#[derive(Debug, Copy, Clone, PartialEq, Eq, Default)]
pub(crate) enum TrailingComma {
/// Add a trailing comma if the group breaks and there's more than one element (or if the last
/// element has a trailing comma and the magical trailing comma option is enabled).
#[default]
MoreThanOne,
/// Add a trailing comma if the group breaks (or if the last element has a trailing comma and
/// the magical trailing comma option is enabled).
OneOrMore,
}
pub(crate) struct JoinCommaSeparatedBuilder<'fmt, 'ast, 'buf> {
result: FormatResult<()>,
fmt: &'fmt mut PyFormatter<'ast, 'buf>,
entries: Entries,
sequence_end: TextSize,
trailing_comma: TrailingComma,
}
impl<'fmt, 'ast, 'buf> JoinCommaSeparatedBuilder<'fmt, 'ast, 'buf> {
fn new(f: &'fmt mut PyFormatter<'ast, 'buf>, sequence_end: TextSize) -> Self {
Self {
fmt: f,
result: Ok(()),
entries: Entries::None,
sequence_end,
trailing_comma: TrailingComma::default(),
}
}
/// Set the trailing comma behavior for the builder. Trailing commas will only be inserted if
/// the group breaks, and will _always_ be inserted if the last element has a trailing comma
/// (and the magical trailing comma option is enabled). However, this setting dictates whether
/// trailing commas are inserted for single element groups.
pub(crate) fn with_trailing_comma(mut self, trailing_comma: TrailingComma) -> Self {
self.trailing_comma = trailing_comma;
self
}
pub(crate) fn entry<T>(
&mut self,
node: &T,
content: &dyn Format<PyFormatContext<'ast>>,
) -> &mut Self
where
T: Ranged,
{
self.entry_with_line_separator(node, content, soft_line_break_or_space())
}
pub(crate) fn entry_with_line_separator<N, Separator>(
&mut self,
node: &N,
content: &dyn Format<PyFormatContext<'ast>>,
separator: Separator,
) -> &mut Self
where
N: Ranged,
Separator: Format<PyFormatContext<'ast>>,
{
self.result = self.result.and_then(|()| {
if self.entries.is_one_or_more() {
write!(self.fmt, [token(","), separator])?;
}
self.entries = self.entries.next(node.end());
content.fmt(self.fmt)
});
self
}
pub(crate) fn entries<T, I, F>(&mut self, entries: I) -> &mut Self
where
T: Ranged,
F: Format<PyFormatContext<'ast>>,
I: IntoIterator<Item = (T, F)>,
{
for (node, content) in entries {
self.entry(&node, &content);
}
self
}
pub(crate) fn nodes<'a, T, I>(&mut self, entries: I) -> &mut Self
where
T: Ranged + AsFormat<PyFormatContext<'ast>> + 'a,
I: IntoIterator<Item = &'a T>,
{
for node in entries {
self.entry(node, &node.format());
}
self
}
pub(crate) fn finish(&mut self) -> FormatResult<()> {
self.result.and_then(|()| {
// Don't add a magic trailing comma when formatting an f-string or t-string expression
// that always must be flat because the `expand_parent` forces enclosing
// groups to expand, e.g. `print(f"{(a,)} ")` would format the f-string in
// flat mode but the `print` call gets expanded because of the `expand_parent`.
if self
.fmt
.context()
.interpolated_string_state()
.can_contain_line_breaks()
== Some(false)
{
return Ok(());
}
if let Some(last_end) = self.entries.position() {
let magic_trailing_comma = has_magic_trailing_comma(
TextRange::new(last_end, self.sequence_end),
self.fmt.context(),
);
// If there is a single entry, only keep the magic trailing comma, don't add it if
// it wasn't there -- unless the trailing comma behavior is set to one-or-more.
if magic_trailing_comma
|| self.trailing_comma == TrailingComma::OneOrMore
|| self.entries.is_more_than_one()
{
if_group_breaks(&token(",")).fmt(self.fmt)?;
}
if magic_trailing_comma {
expand_parent().fmt(self.fmt)?;
}
}
Ok(())
})
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_python_formatter/src/options.rs | crates/ruff_python_formatter/src/options.rs | use std::fmt;
use std::path::Path;
use std::str::FromStr;
use ruff_formatter::printer::{LineEnding, PrinterOptions, SourceMapGeneration};
use ruff_formatter::{FormatOptions, IndentStyle, IndentWidth, LineWidth};
use ruff_macros::CacheKey;
use ruff_python_ast::{self as ast, PySourceType};
/// Resolved options for formatting one individual file. The difference to `FormatterSettings`
/// is that `FormatterSettings` stores the settings for multiple files (the entire project, a subdirectory, ..)
#[derive(Clone, Debug)]
#[cfg_attr(
feature = "serde",
derive(serde::Serialize, serde::Deserialize),
serde(default, deny_unknown_fields)
)]
pub struct PyFormatOptions {
/// Whether we're in a `.py` file or `.pyi` file, which have different rules.
source_type: PySourceType,
/// The (minimum) Python version used to run the formatted code. This is used
/// to determine the supported Python syntax.
target_version: ast::PythonVersion,
/// Specifies the indent style:
/// * Either a tab
/// * or a specific amount of spaces
#[cfg_attr(feature = "serde", serde(default = "default_indent_style"))]
indent_style: IndentStyle,
/// The preferred line width at which the formatter should wrap lines.
#[cfg_attr(feature = "serde", serde(default = "default_line_width"))]
line_width: LineWidth,
/// The visual width of a tab character.
#[cfg_attr(feature = "serde", serde(default = "default_indent_width"))]
indent_width: IndentWidth,
line_ending: LineEnding,
/// The preferred quote style to use (single vs double quotes).
quote_style: QuoteStyle,
/// Whether to expand lists or elements if they have a trailing comma such as `(a, b,)`.
magic_trailing_comma: MagicTrailingComma,
/// Should the formatter generate a source map that allows mapping source positions to positions
/// in the formatted document.
source_map_generation: SourceMapGeneration,
/// Whether to format code snippets in docstrings or not.
///
/// By default this is disabled (opt-in), but the plan is to make this
/// enabled by default (opt-out) in the future.
docstring_code: DocstringCode,
/// The preferred line width at which the formatter should wrap lines in
/// docstring code examples. This only has an impact when `docstring_code`
/// is enabled.
docstring_code_line_width: DocstringCodeLineWidth,
/// Whether preview style formatting is enabled or not
preview: PreviewMode,
}
fn default_line_width() -> LineWidth {
LineWidth::try_from(88).unwrap()
}
fn default_indent_style() -> IndentStyle {
IndentStyle::Space
}
fn default_indent_width() -> IndentWidth {
IndentWidth::try_from(4).unwrap()
}
impl Default for PyFormatOptions {
fn default() -> Self {
Self {
source_type: PySourceType::default(),
target_version: ast::PythonVersion::default(),
indent_style: default_indent_style(),
line_width: default_line_width(),
indent_width: default_indent_width(),
quote_style: QuoteStyle::default(),
line_ending: LineEnding::default(),
magic_trailing_comma: MagicTrailingComma::default(),
source_map_generation: SourceMapGeneration::default(),
docstring_code: DocstringCode::default(),
docstring_code_line_width: DocstringCodeLineWidth::default(),
preview: PreviewMode::default(),
}
}
}
impl PyFormatOptions {
/// Otherwise sets the defaults. Returns none if the extension is unknown
pub fn from_extension(path: &Path) -> Self {
Self::from_source_type(PySourceType::from(path))
}
pub fn from_source_type(source_type: PySourceType) -> Self {
Self {
source_type,
..Self::default()
}
}
pub const fn target_version(&self) -> ast::PythonVersion {
self.target_version
}
pub const fn magic_trailing_comma(&self) -> MagicTrailingComma {
self.magic_trailing_comma
}
pub const fn quote_style(&self) -> QuoteStyle {
self.quote_style
}
pub const fn source_type(&self) -> PySourceType {
self.source_type
}
pub const fn source_map_generation(&self) -> SourceMapGeneration {
self.source_map_generation
}
pub const fn line_ending(&self) -> LineEnding {
self.line_ending
}
pub const fn docstring_code(&self) -> DocstringCode {
self.docstring_code
}
pub const fn docstring_code_line_width(&self) -> DocstringCodeLineWidth {
self.docstring_code_line_width
}
pub const fn preview(&self) -> PreviewMode {
self.preview
}
#[must_use]
pub fn with_target_version(mut self, target_version: ast::PythonVersion) -> Self {
self.target_version = target_version;
self
}
#[must_use]
pub fn with_indent_width(mut self, indent_width: IndentWidth) -> Self {
self.indent_width = indent_width;
self
}
#[must_use]
pub fn with_quote_style(mut self, style: QuoteStyle) -> Self {
self.quote_style = style;
self
}
#[must_use]
pub fn with_magic_trailing_comma(mut self, trailing_comma: MagicTrailingComma) -> Self {
self.magic_trailing_comma = trailing_comma;
self
}
#[must_use]
pub fn with_indent_style(mut self, indent_style: IndentStyle) -> Self {
self.indent_style = indent_style;
self
}
#[must_use]
pub fn with_line_width(mut self, line_width: LineWidth) -> Self {
self.line_width = line_width;
self
}
#[must_use]
pub fn with_line_ending(mut self, line_ending: LineEnding) -> Self {
self.line_ending = line_ending;
self
}
#[must_use]
pub fn with_docstring_code(mut self, docstring_code: DocstringCode) -> Self {
self.docstring_code = docstring_code;
self
}
#[must_use]
pub fn with_docstring_code_line_width(mut self, line_width: DocstringCodeLineWidth) -> Self {
self.docstring_code_line_width = line_width;
self
}
#[must_use]
pub fn with_preview(mut self, preview: PreviewMode) -> Self {
self.preview = preview;
self
}
#[must_use]
pub fn with_source_map_generation(mut self, source_map: SourceMapGeneration) -> Self {
self.source_map_generation = source_map;
self
}
}
impl FormatOptions for PyFormatOptions {
fn indent_style(&self) -> IndentStyle {
self.indent_style
}
fn indent_width(&self) -> IndentWidth {
self.indent_width
}
fn line_width(&self) -> LineWidth {
self.line_width
}
fn as_print_options(&self) -> PrinterOptions {
PrinterOptions {
indent_width: self.indent_width,
line_width: self.line_width,
line_ending: self.line_ending,
indent_style: self.indent_style,
}
}
}
#[derive(Copy, Clone, Debug, Default, Eq, PartialEq, CacheKey)]
#[cfg_attr(
feature = "serde",
derive(serde::Serialize, serde::Deserialize),
serde(rename_all = "kebab-case")
)]
#[cfg_attr(feature = "schemars", derive(schemars::JsonSchema))]
pub enum QuoteStyle {
Single,
#[default]
Double,
Preserve,
}
impl QuoteStyle {
pub const fn is_preserve(self) -> bool {
matches!(self, QuoteStyle::Preserve)
}
/// Returns the string representation of the quote style.
pub const fn as_str(&self) -> &'static str {
match self {
QuoteStyle::Single => "single",
QuoteStyle::Double => "double",
QuoteStyle::Preserve => "preserve",
}
}
}
impl fmt::Display for QuoteStyle {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.write_str(self.as_str())
}
}
impl FromStr for QuoteStyle {
type Err = &'static str;
fn from_str(s: &str) -> Result<Self, Self::Err> {
match s {
"\"" | "double" | "Double" => Ok(Self::Double),
"'" | "single" | "Single" => Ok(Self::Single),
"preserve" | "Preserve" => Ok(Self::Preserve),
// TODO: replace this error with a diagnostic
_ => Err("Value not supported for QuoteStyle"),
}
}
}
#[derive(Copy, Clone, Debug, Default, CacheKey)]
#[cfg_attr(
feature = "serde",
derive(serde::Serialize, serde::Deserialize),
serde(rename_all = "kebab-case")
)]
pub enum MagicTrailingComma {
#[default]
Respect,
Ignore,
}
impl MagicTrailingComma {
pub const fn is_respect(self) -> bool {
matches!(self, Self::Respect)
}
pub const fn is_ignore(self) -> bool {
matches!(self, Self::Ignore)
}
}
impl fmt::Display for MagicTrailingComma {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.write_str(match self {
MagicTrailingComma::Respect => "respect",
MagicTrailingComma::Ignore => "ignore",
})
}
}
impl FromStr for MagicTrailingComma {
type Err = &'static str;
fn from_str(s: &str) -> Result<Self, Self::Err> {
match s {
"respect" | "Respect" => Ok(Self::Respect),
"ignore" | "Ignore" => Ok(Self::Ignore),
// TODO: replace this error with a diagnostic
_ => Err("Value not supported for MagicTrailingComma"),
}
}
}
#[derive(Copy, Clone, Debug, Eq, PartialEq, Default, CacheKey)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
#[cfg_attr(feature = "serde", serde(rename_all = "lowercase"))]
pub enum PreviewMode {
#[default]
Disabled,
Enabled,
}
impl PreviewMode {
pub const fn is_enabled(self) -> bool {
matches!(self, PreviewMode::Enabled)
}
}
impl fmt::Display for PreviewMode {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
Self::Disabled => write!(f, "disabled"),
Self::Enabled => write!(f, "enabled"),
}
}
}
#[derive(Copy, Clone, Debug, Eq, PartialEq, Default, CacheKey)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
#[cfg_attr(feature = "serde", serde(rename_all = "lowercase"))]
#[cfg_attr(feature = "schemars", derive(schemars::JsonSchema))]
pub enum DocstringCode {
#[default]
Disabled,
Enabled,
}
impl DocstringCode {
pub const fn is_enabled(self) -> bool {
matches!(self, DocstringCode::Enabled)
}
}
impl fmt::Display for DocstringCode {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
Self::Disabled => write!(f, "disabled"),
Self::Enabled => write!(f, "enabled"),
}
}
}
#[derive(Copy, Clone, Default, Eq, PartialEq, CacheKey)]
#[cfg_attr(
feature = "serde",
derive(serde::Serialize, serde::Deserialize),
serde(untagged, rename_all = "lowercase")
)]
#[cfg_attr(feature = "schemars", derive(schemars::JsonSchema))]
pub enum DocstringCodeLineWidth {
/// Wrap docstring code examples at a fixed line width.
#[cfg_attr(feature = "schemars", schemars(schema_with = "schema::fixed"))]
Fixed(LineWidth),
/// Respect the line length limit setting for the surrounding Python code.
#[default]
#[cfg_attr(
feature = "serde",
serde(deserialize_with = "deserialize_docstring_code_line_width_dynamic")
)]
#[cfg_attr(feature = "schemars", schemars(schema_with = "schema::dynamic"))]
Dynamic,
}
#[cfg(feature = "schemars")]
mod schema {
use ruff_formatter::LineWidth;
use schemars::{Schema, SchemaGenerator};
use serde_json::Value;
/// A dummy type that is used to generate a schema for `DocstringCodeLineWidth::Dynamic`.
pub(super) fn dynamic(_: &mut SchemaGenerator) -> Schema {
schemars::json_schema!({ "const": "dynamic" })
}
// We use a manual schema for `fixed` even thought it isn't strictly necessary according to the
// JSON schema specification to work around a bug in Even Better TOML with `allOf`.
// https://github.com/astral-sh/ruff/issues/15978#issuecomment-2639547101
//
// The only difference to the automatically derived schema is that we use `oneOf` instead of
// `allOf`. There's no semantic difference between `allOf` and `oneOf` for single element lists.
pub(super) fn fixed(generator: &mut SchemaGenerator) -> Schema {
let schema = generator.subschema_for::<LineWidth>();
let mut schema_object = Schema::default();
let map = schema_object.ensure_object();
map.insert(
"description".to_string(),
Value::String("Wrap docstring code examples at a fixed line width.".to_string()),
);
map.insert("oneOf".to_string(), Value::Array(vec![schema.into()]));
schema_object
}
}
impl fmt::Debug for DocstringCodeLineWidth {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match *self {
DocstringCodeLineWidth::Fixed(v) => v.value().fmt(f),
DocstringCodeLineWidth::Dynamic => "dynamic".fmt(f),
}
}
}
impl fmt::Display for DocstringCodeLineWidth {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
Self::Fixed(width) => width.fmt(f),
Self::Dynamic => write!(f, "dynamic"),
}
}
}
/// Responsible for deserializing the `DocstringCodeLineWidth::Dynamic`
/// variant.
fn deserialize_docstring_code_line_width_dynamic<'de, D>(d: D) -> Result<(), D::Error>
where
D: serde::Deserializer<'de>,
{
use serde::{Deserialize, de::Error};
let value = String::deserialize(d)?;
match &*value {
"dynamic" => Ok(()),
s => Err(D::Error::invalid_value(
serde::de::Unexpected::Str(s),
&"dynamic",
)),
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_python_formatter/src/main.rs | crates/ruff_python_formatter/src/main.rs | use std::io::{Read, Write, stdout};
use std::path::Path;
use std::{fs, io};
use anyhow::{Context, Result, bail};
use clap::Parser as ClapParser;
use ruff_python_formatter::cli::{Cli, Emit, format_and_debug_print};
/// Read a `String` from `stdin`.
pub(crate) fn read_from_stdin() -> Result<String> {
let mut buffer = String::new();
io::stdin().lock().read_to_string(&mut buffer)?;
Ok(buffer)
}
fn main() -> Result<()> {
let cli: Cli = Cli::parse();
if cli.files.is_empty() {
if !matches!(cli.emit, None | Some(Emit::Stdout)) {
bail!(
"Can only write to stdout when formatting from stdin, but you asked for {:?}",
cli.emit
);
}
let source = read_from_stdin()?;
// It seems reasonable to give this a dummy name
let formatted = format_and_debug_print(&source, &cli, Path::new("stdin.py"))?;
if cli.check {
if formatted == source {
return Ok(());
}
bail!("Content not correctly formatted")
}
stdout().lock().write_all(formatted.as_bytes())?;
} else {
for file in &cli.files {
let source = fs::read_to_string(file)
.with_context(|| format!("Could not read {}: ", file.display()))?;
let formatted = format_and_debug_print(&source, &cli, file)?;
match cli.emit {
Some(Emit::Stdout) => stdout().lock().write_all(formatted.as_bytes())?,
None | Some(Emit::Files) => {
fs::write(file, formatted.as_bytes()).with_context(|| {
format!("Could not write to {}, exiting", file.display())
})?;
}
}
}
}
Ok(())
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_python_formatter/src/verbatim.rs | crates/ruff_python_formatter/src/verbatim.rs | use std::borrow::Cow;
use std::iter::FusedIterator;
use std::slice::Iter;
use ruff_formatter::{FormatError, write};
use ruff_python_ast::AnyNodeRef;
use ruff_python_ast::Stmt;
use ruff_python_ast::token::{Token as AstToken, TokenKind};
use ruff_python_trivia::lines_before;
use ruff_source_file::LineRanges;
use ruff_text_size::{Ranged, TextRange, TextSize};
use crate::comments::format::{empty_lines, format_comment};
use crate::comments::{SourceComment, leading_comments, trailing_comments};
use crate::prelude::*;
use crate::statement::clause::ClauseHeader;
use crate::statement::suite::SuiteChildStatement;
use crate::statement::trailing_semicolon;
/// Returns `true` if the statements coming after `leading_or_trailing_comments` are suppressed.
///
/// The result is only correct if called for statement comments in a non-suppressed range.
///
/// # Panics
/// If `leading_or_trailing_comments` contain any range that's outside of `source`.
pub(crate) fn starts_suppression(
leading_or_trailing_comments: &[SourceComment],
source: &str,
) -> bool {
let mut iter = CommentRangeIter::outside_suppression(leading_or_trailing_comments, source);
// Move the iter to the last element.
let _ = iter.by_ref().last();
matches!(iter.in_suppression, InSuppression::Yes)
}
/// Returns `true` if the statements coming after `leading_or_trailing_comments` are no longer suppressed.
///
/// The result is only correct if called for statement comments in a suppressed range.
///
/// # Panics
/// If `leading_or_trailing_comments` contain any range that's outside of `source`.
pub(crate) fn ends_suppression(
leading_or_trailing_comments: &[SourceComment],
source: &str,
) -> bool {
let mut iter = CommentRangeIter::in_suppression(leading_or_trailing_comments, source);
// Move the iter to the last element.
let _ = iter.by_ref().last();
!matches!(iter.in_suppression, InSuppression::Yes)
}
/// Disables formatting for all statements between the `first_suppressed` that has a leading `fmt: off` comment
/// and the first trailing or leading `fmt: on` comment. The statements are formatted as they appear in the source code.
///
/// Returns the last formatted statement.
///
/// ## Panics
/// If `first_suppressed` has no leading suppression comment.
#[cold]
pub(crate) fn write_suppressed_statements_starting_with_leading_comment<'a>(
// The first suppressed statement
first_suppressed: SuiteChildStatement<'a>,
statements: &mut std::slice::Iter<'a, Stmt>,
f: &mut PyFormatter,
) -> FormatResult<&'a Stmt> {
let comments = f.context().comments().clone();
let source = f.context().source();
let mut leading_comment_ranges =
CommentRangeIter::outside_suppression(comments.leading(first_suppressed), source);
let before_format_off = leading_comment_ranges
.next()
.expect("Suppressed node to have leading comments");
let (formatted_comments, format_off_comment) = before_format_off.unwrap_suppression_starts();
// Format the leading comments before the fmt off
// ```python
// # leading comment that gets formatted
// # fmt: off
// statement
// ```
write!(
f,
[
leading_comments(formatted_comments),
// Format the off comment without adding any trailing new lines
format_comment(format_off_comment)
]
)?;
format_off_comment.mark_formatted();
// Now inside a suppressed range
write_suppressed_statements(
format_off_comment,
first_suppressed,
leading_comment_ranges.as_slice(),
statements,
f,
)
}
/// Disables formatting for all statements between the `last_formatted` and the first trailing or leading `fmt: on` comment.
/// The statements are formatted as they appear in the source code.
///
/// Returns the last formatted statement.
///
/// ## Panics
/// If `last_formatted` has no trailing suppression comment.
#[cold]
pub(crate) fn write_suppressed_statements_starting_with_trailing_comment<'a>(
last_formatted: SuiteChildStatement<'a>,
statements: &mut std::slice::Iter<'a, Stmt>,
f: &mut PyFormatter,
) -> FormatResult<&'a Stmt> {
let comments = f.context().comments().clone();
let source = f.context().source();
let indentation = Indentation::from_stmt(last_formatted.statement(), source);
let trailing_node_comments = comments.trailing(last_formatted);
let mut trailing_comment_ranges =
CommentRangeIter::outside_suppression(trailing_node_comments, source);
// Formatted comments gets formatted as part of the statement.
let (_, mut format_off_comment) = trailing_comment_ranges
.next()
.expect("Suppressed statement to have trailing comments")
.unwrap_suppression_starts();
let maybe_suppressed = trailing_comment_ranges.as_slice();
// Mark them as formatted so that calling the node's formatting doesn't format the comments.
for comment in maybe_suppressed {
comment.mark_formatted();
}
format_off_comment.mark_formatted();
// Format the leading comments, the node, and the trailing comments up to the `fmt: off` comment.
last_formatted.fmt(f)?;
format_off_comment.mark_unformatted();
TrailingFormatOffComment(format_off_comment).fmt(f)?;
for range in trailing_comment_ranges {
match range {
// A `fmt: off`..`fmt: on` sequence. Disable formatting for the in-between comments.
// ```python
// def test():
// pass
// # fmt: off
// # haha
// # fmt: on
// # fmt: off (maybe)
// ```
SuppressionComments::SuppressionEnds {
suppressed_comments: _,
format_on_comment,
formatted_comments,
format_off_comment: new_format_off_comment,
} => {
format_on_comment.mark_unformatted();
for comment in formatted_comments {
comment.mark_unformatted();
}
write!(
f,
[
FormatVerbatimStatementRange {
verbatim_range: TextRange::new(
format_off_comment.end(),
format_on_comment.start(),
),
indentation
},
trailing_comments(std::slice::from_ref(format_on_comment)),
trailing_comments(formatted_comments),
]
)?;
// `fmt: off`..`fmt:on`..`fmt:off` sequence
// ```python
// def test():
// pass
// # fmt: off
// # haha
// # fmt: on
// # fmt: off
// ```
if let Some(new_format_off_comment) = new_format_off_comment {
new_format_off_comment.mark_unformatted();
TrailingFormatOffComment(new_format_off_comment).fmt(f)?;
format_off_comment = new_format_off_comment;
} else {
// `fmt: off`..`fmt:on` sequence. The suppression ends here. Start formatting the nodes again.
return Ok(last_formatted.statement());
}
}
// All comments in this range are suppressed
SuppressionComments::Suppressed { comments: _ } => {}
// SAFETY: Unreachable because the function returns as soon as it reaches the end of the suppressed range
SuppressionComments::SuppressionStarts { .. }
| SuppressionComments::Formatted { .. } => unreachable!(),
}
}
// The statement with the suppression comment isn't the last statement in the suite.
// Format the statements up to the first `fmt: on` comment (or end of the suite) as verbatim/suppressed.
// ```python
// a + b
// # fmt: off
//
// def a():
// pass
// ```
if let Some(first_suppressed) = statements.next() {
write_suppressed_statements(
format_off_comment,
SuiteChildStatement::Other(first_suppressed),
comments.leading(first_suppressed),
statements,
f,
)
}
// The suppression comment is the block's last node. Format any trailing comments as suppressed
// ```python
// def test():
// pass
// # fmt: off
// # a trailing comment
// ```
else if let Some(last_comment) = trailing_node_comments.last() {
FormatVerbatimStatementRange {
verbatim_range: TextRange::new(format_off_comment.end(), last_comment.end()),
indentation,
}
.fmt(f)?;
Ok(last_formatted.statement())
}
// The suppression comment is the very last code in the block. There's nothing more to format.
// ```python
// def test():
// pass
// # fmt: off
// ```
else {
Ok(last_formatted.statement())
}
}
/// Formats the statements from `first_suppressed` until the suppression ends (by a `fmt: on` comment)
/// as they appear in the source code.
fn write_suppressed_statements<'a>(
// The `fmt: off` comment that starts the suppressed range. Can be a leading comment of `first_suppressed` or
// a trailing comment of the previous node.
format_off_comment: &SourceComment,
// The first suppressed statement
first_suppressed: SuiteChildStatement<'a>,
// The leading comments of `first_suppressed` that come after the `format_off_comment`
first_suppressed_leading_comments: &[SourceComment],
// The remaining statements
statements: &mut std::slice::Iter<'a, Stmt>,
f: &mut PyFormatter,
) -> FormatResult<&'a Stmt> {
let comments = f.context().comments().clone();
let source = f.context().source();
let mut statement = first_suppressed;
let mut leading_node_comments = first_suppressed_leading_comments;
let mut format_off_comment = format_off_comment;
let indentation = Indentation::from_stmt(first_suppressed.statement(), source);
loop {
for range in CommentRangeIter::in_suppression(leading_node_comments, source) {
match range {
// All leading comments are suppressed
// ```python
// # suppressed comment
// statement
// ```
SuppressionComments::Suppressed { comments } => {
for comment in comments {
comment.mark_formatted();
}
}
// Node has a leading `fmt: on` comment and maybe another `fmt: off` comment
// ```python
// # suppressed comment (optional)
// # fmt: on
// # formatted comment (optional)
// # fmt: off (optional)
// statement
// ```
SuppressionComments::SuppressionEnds {
suppressed_comments,
format_on_comment,
formatted_comments,
format_off_comment: new_format_off_comment,
} => {
for comment in suppressed_comments {
comment.mark_formatted();
}
write!(
f,
[
FormatVerbatimStatementRange {
verbatim_range: TextRange::new(
format_off_comment.end(),
format_on_comment.start(),
),
indentation
},
leading_comments(std::slice::from_ref(format_on_comment)),
leading_comments(formatted_comments),
]
)?;
if let Some(new_format_off_comment) = new_format_off_comment {
format_off_comment = new_format_off_comment;
format_comment(format_off_comment).fmt(f)?;
format_off_comment.mark_formatted();
} else {
// Suppression ends here. Test if the node has a trailing suppression comment and, if so,
// recurse and format the trailing comments and the following statements as suppressed.
return if comments
.trailing(statement)
.iter()
.any(|comment| comment.is_suppression_off_comment(source))
{
// Node has a trailing suppression comment, hell yeah, start all over again.
write_suppressed_statements_starting_with_trailing_comment(
statement, statements, f,
)
} else {
// Formats the trailing comments
statement.fmt(f)?;
Ok(statement.statement())
};
}
}
// Unreachable because the function exits as soon as it reaches the end of the suppression
// and it already starts in a suppressed range.
SuppressionComments::SuppressionStarts { .. } => unreachable!(),
SuppressionComments::Formatted { .. } => unreachable!(),
}
}
comments.mark_verbatim_node_comments_formatted(AnyNodeRef::from(statement));
for range in CommentRangeIter::in_suppression(comments.trailing(statement), source) {
match range {
// All trailing comments are suppressed
// ```python
// statement
// # suppressed
// ```
SuppressionComments::Suppressed { comments } => {
for comment in comments {
comment.mark_formatted();
}
}
// Node has a trailing `fmt: on` comment and maybe another `fmt: off` comment
// ```python
// statement
// # suppressed comment (optional)
// # fmt: on
// # formatted comment (optional)
// # fmt: off (optional)
// ```
SuppressionComments::SuppressionEnds {
suppressed_comments,
format_on_comment,
formatted_comments,
format_off_comment: new_format_off_comment,
} => {
for comment in suppressed_comments {
comment.mark_formatted();
}
write!(
f,
[
FormatVerbatimStatementRange {
verbatim_range: TextRange::new(
format_off_comment.end(),
format_on_comment.start()
),
indentation
},
format_comment(format_on_comment),
hard_line_break(),
trailing_comments(formatted_comments),
]
)?;
format_on_comment.mark_formatted();
if let Some(new_format_off_comment) = new_format_off_comment {
format_off_comment = new_format_off_comment;
format_comment(format_off_comment).fmt(f)?;
format_off_comment.mark_formatted();
} else {
return Ok(statement.statement());
}
}
// Unreachable because the function exits as soon as it reaches the end of the suppression
// and it already starts in a suppressed range.
SuppressionComments::SuppressionStarts { .. } => unreachable!(),
SuppressionComments::Formatted { .. } => unreachable!(),
}
}
if let Some(next_statement) = statements.next() {
statement = SuiteChildStatement::Other(next_statement);
leading_node_comments = comments.leading(next_statement);
} else {
let mut current = AnyNodeRef::from(statement.statement());
// Expand the range of the statement to include any trailing comments or semicolons.
let end = loop {
if let Some(comment) = comments.trailing(current).last() {
break comment.end();
} else if let Some(child) = current.last_child_in_body() {
current = child;
} else {
break trailing_semicolon(current, source)
.map_or(statement.end(), TextRange::end);
}
};
FormatVerbatimStatementRange {
verbatim_range: TextRange::new(format_off_comment.end(), end),
indentation,
}
.fmt(f)?;
return Ok(statement.statement());
}
}
}
#[derive(Copy, Clone, Debug)]
enum InSuppression {
No,
Yes,
}
#[derive(Debug)]
enum SuppressionComments<'a> {
/// The first `fmt: off` comment.
SuppressionStarts {
/// The comments appearing before the `fmt: off` comment
formatted_comments: &'a [SourceComment],
format_off_comment: &'a SourceComment,
},
/// A `fmt: on` comment inside a suppressed range.
SuppressionEnds {
/// The comments before the `fmt: on` comment that should *not* be formatted.
suppressed_comments: &'a [SourceComment],
format_on_comment: &'a SourceComment,
/// The comments after the `fmt: on` comment (if any), that should be formatted.
formatted_comments: &'a [SourceComment],
/// Any following `fmt: off` comment if any.
/// * `None`: The suppression ends here (for good)
/// * `Some`: A `fmt: off`..`fmt: on` .. `fmt: off` sequence. The suppression continues after
/// the `fmt: off` comment.
format_off_comment: Option<&'a SourceComment>,
},
/// Comments that all fall into the suppressed range.
Suppressed { comments: &'a [SourceComment] },
/// Comments that all fall into the formatted range.
Formatted {
#[expect(unused)]
comments: &'a [SourceComment],
},
}
impl<'a> SuppressionComments<'a> {
fn unwrap_suppression_starts(&self) -> (&'a [SourceComment], &'a SourceComment) {
if let SuppressionComments::SuppressionStarts {
formatted_comments,
format_off_comment,
} = self
{
(formatted_comments, *format_off_comment)
} else {
panic!("Expected SuppressionStarts")
}
}
}
struct CommentRangeIter<'a> {
comments: &'a [SourceComment],
source: &'a str,
in_suppression: InSuppression,
}
impl<'a> CommentRangeIter<'a> {
fn in_suppression(comments: &'a [SourceComment], source: &'a str) -> Self {
Self {
comments,
in_suppression: InSuppression::Yes,
source,
}
}
fn outside_suppression(comments: &'a [SourceComment], source: &'a str) -> Self {
Self {
comments,
in_suppression: InSuppression::No,
source,
}
}
/// Returns a slice containing the remaining comments.
fn as_slice(&self) -> &'a [SourceComment] {
self.comments
}
}
impl<'a> Iterator for CommentRangeIter<'a> {
type Item = SuppressionComments<'a>;
fn next(&mut self) -> Option<Self::Item> {
if self.comments.is_empty() {
return None;
}
Some(match self.in_suppression {
// Inside of a suppressed range
InSuppression::Yes => {
if let Some(format_on_position) = self
.comments
.iter()
.position(|comment| comment.is_suppression_on_comment(self.source))
{
let (suppressed_comments, formatted) =
self.comments.split_at(format_on_position);
let (format_on_comment, rest) = formatted.split_first().unwrap();
let (formatted_comments, format_off_comment) =
if let Some(format_off_position) = rest
.iter()
.position(|comment| comment.is_suppression_off_comment(self.source))
{
let (formatted_comments, suppressed_comments) =
rest.split_at(format_off_position);
let (format_off_comment, rest) =
suppressed_comments.split_first().unwrap();
self.comments = rest;
(formatted_comments, Some(format_off_comment))
} else {
self.in_suppression = InSuppression::No;
self.comments = &[];
(rest, None)
};
SuppressionComments::SuppressionEnds {
suppressed_comments,
format_on_comment,
formatted_comments,
format_off_comment,
}
} else {
SuppressionComments::Suppressed {
comments: std::mem::take(&mut self.comments),
}
}
}
// Outside of a suppression
InSuppression::No => {
if let Some(format_off_position) = self
.comments
.iter()
.position(|comment| comment.is_suppression_off_comment(self.source))
{
self.in_suppression = InSuppression::Yes;
let (formatted_comments, suppressed) =
self.comments.split_at(format_off_position);
let format_off_comment = &suppressed[0];
self.comments = &suppressed[1..];
SuppressionComments::SuppressionStarts {
formatted_comments,
format_off_comment,
}
} else {
SuppressionComments::Formatted {
comments: std::mem::take(&mut self.comments),
}
}
}
})
}
}
impl FusedIterator for CommentRangeIter<'_> {}
struct TrailingFormatOffComment<'a>(&'a SourceComment);
impl Format<PyFormatContext<'_>> for TrailingFormatOffComment<'_> {
fn fmt(&self, f: &mut Formatter<PyFormatContext<'_>>) -> FormatResult<()> {
debug_assert!(self.0.is_unformatted());
let lines_before_comment = lines_before(self.0.start(), f.context().source());
write!(
f,
[empty_lines(lines_before_comment), format_comment(self.0)]
)?;
self.0.mark_formatted();
Ok(())
}
}
/// Stores the indentation of a statement by storing the number of indentation characters.
/// Storing the number of indentation characters is sufficient because:
/// * Two indentations are equal if they result in the same column, regardless of the used tab size.
/// This implementation makes use of this fact and assumes a tab size of 1.
/// * The source document is correctly indented because it is valid Python code (or the formatter would have failed parsing the code).
#[derive(Copy, Clone)]
struct Indentation(u32);
impl Indentation {
fn from_stmt(stmt: &Stmt, source: &str) -> Indentation {
let line_start = source.line_start(stmt.start());
let mut indentation = 0u32;
for c in source[TextRange::new(line_start, stmt.start())].chars() {
if is_indent_whitespace(c) {
indentation += 1;
} else {
break;
}
}
Indentation(indentation)
}
fn trim_indent(self, ranged: impl Ranged, source: &str) -> TextRange {
let range = ranged.range();
let mut start_offset = TextSize::default();
for c in source[range].chars().take(self.0 as usize) {
if is_indent_whitespace(c) {
start_offset += TextSize::new(1);
} else {
break;
}
}
TextRange::new(range.start() + start_offset, range.end())
}
}
/// Returns `true` for a space or tab character.
///
/// This is different than [`is_python_whitespace`](ruff_python_trivia::is_python_whitespace) in
/// that it returns `false` for a form feed character. Form feed characters are excluded because
/// they should be preserved in the suppressed output.
const fn is_indent_whitespace(c: char) -> bool {
matches!(c, ' ' | '\t')
}
/// Formats a verbatim range where the top-level nodes are statements (or statement-level comments).
///
/// Formats each statement as written in the source code, but adds the right indentation to match
/// the indentation of formatted statements:
///
/// ```python
/// def test():
/// print("formatted")
/// # fmt: off
/// (
/// not_formatted + b
/// )
/// # fmt: on
/// ```
///
/// Gets formatted as
///
/// ```python
/// def test():
/// print("formatted")
/// # fmt: off
/// (
/// not_formatted + b
/// )
/// # fmt: on
/// ```
///
/// Notice how the `not_formatted + b` expression statement gets the same indentation as the `print` statement above,
/// but the indentation of the expression remains unchanged. It changes the indentation to:
/// * Prevent syntax errors because of different indentation levels between formatted and suppressed statements.
/// * Align with the `fmt: skip` where statements are indented as well, but inner expressions are formatted as is.
struct FormatVerbatimStatementRange {
verbatim_range: TextRange,
indentation: Indentation,
}
impl Format<PyFormatContext<'_>> for FormatVerbatimStatementRange {
fn fmt(&self, f: &mut Formatter<PyFormatContext<'_>>) -> FormatResult<()> {
let logical_lines = LogicalLinesIter::new(
f.context().tokens().in_range(self.verbatim_range).iter(),
self.verbatim_range,
);
let mut first = true;
for logical_line in logical_lines {
let logical_line = logical_line?;
let trimmed_line_range = self
.indentation
.trim_indent(&logical_line, f.context().source());
// A line without any content, write an empty line, except for the first or last (indent only) line.
if trimmed_line_range.is_empty() {
if logical_line.has_trailing_newline {
if first {
hard_line_break().fmt(f)?;
} else {
empty_line().fmt(f)?;
}
}
} else {
// Non empty line, write the text of the line
write!(
f,
[
source_position(trimmed_line_range.start()),
verbatim_text(trimmed_line_range),
source_position(trimmed_line_range.end())
]
)?;
// Write the line separator that terminates the line, except if it is the last line (that isn't separated by a hard line break).
if logical_line.has_trailing_newline {
hard_line_break().fmt(f)?;
}
}
first = false;
}
Ok(())
}
}
struct LogicalLinesIter<'a> {
tokens: Iter<'a, AstToken>,
// The end of the last logical line
last_line_end: TextSize,
// The position where the content to lex ends.
content_end: TextSize,
}
impl<'a> LogicalLinesIter<'a> {
fn new(tokens: Iter<'a, AstToken>, verbatim_range: TextRange) -> Self {
Self {
tokens,
last_line_end: verbatim_range.start(),
content_end: verbatim_range.end(),
}
}
}
impl Iterator for LogicalLinesIter<'_> {
type Item = FormatResult<LogicalLine>;
fn next(&mut self) -> Option<Self::Item> {
let mut parens = 0u32;
let (content_end, full_end) = loop {
match self.tokens.next() {
Some(token) if token.kind() == TokenKind::Unknown => {
return Some(Err(FormatError::syntax_error(
"Unexpected token when lexing verbatim statement range.",
)));
}
Some(token) => match token.kind() {
TokenKind::Newline => break (token.start(), token.end()),
// Ignore if inside an expression
TokenKind::NonLogicalNewline if parens == 0 => {
break (token.start(), token.end());
}
TokenKind::Lbrace | TokenKind::Lpar | TokenKind::Lsqb => {
parens = parens.saturating_add(1);
}
TokenKind::Rbrace | TokenKind::Rpar | TokenKind::Rsqb => {
parens = parens.saturating_sub(1);
}
_ => {}
},
None => {
// Returns any content that comes after the last newline. This is mainly whitespace
// or characters that the `Lexer` skips, like a form-feed character.
return if self.last_line_end < self.content_end {
let content_start = self.last_line_end;
self.last_line_end = self.content_end;
Some(Ok(LogicalLine {
content_range: TextRange::new(content_start, self.content_end),
has_trailing_newline: false,
}))
} else {
None
};
}
}
};
let line_start = self.last_line_end;
self.last_line_end = full_end;
Some(Ok(LogicalLine {
content_range: TextRange::new(line_start, content_end),
has_trailing_newline: true,
}))
}
}
impl FusedIterator for LogicalLinesIter<'_> {}
/// A logical line or a comment (or form feed only) line
struct LogicalLine {
/// The range of this lines content (excluding the trailing newline)
content_range: TextRange,
/// Does this logical line have a trailing newline or does it just happen to be the last line.
has_trailing_newline: bool,
}
impl Ranged for LogicalLine {
fn range(&self) -> TextRange {
self.content_range
}
}
pub(crate) struct VerbatimText {
verbatim_range: TextRange,
}
pub(crate) fn verbatim_text<T>(item: T) -> VerbatimText
where
T: Ranged,
{
VerbatimText {
verbatim_range: item.range(),
}
}
impl Format<PyFormatContext<'_>> for VerbatimText {
fn fmt(&self, f: &mut PyFormatter) -> FormatResult<()> {
f.write_element(FormatElement::Tag(Tag::StartVerbatim(
tag::VerbatimKind::Verbatim {
length: self.verbatim_range.len(),
},
)));
match normalize_newlines(&f.context().source()[self.verbatim_range], ['\r']) {
Cow::Borrowed(_) => {
write!(f, [source_text_slice(self.verbatim_range)])?;
}
Cow::Owned(cleaned) => {
text(&cleaned).fmt(f)?;
}
}
f.write_element(FormatElement::Tag(Tag::EndVerbatim));
Ok(())
}
}
/// Disables formatting for `node` and instead uses the same formatting as the node has in source.
///
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | true |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_python_formatter/src/context.rs | crates/ruff_python_formatter/src/context.rs | use std::fmt::{Debug, Formatter};
use std::ops::{Deref, DerefMut};
use ruff_formatter::{Buffer, FormatContext, GroupId, IndentWidth, SourceCode};
use ruff_python_ast::str::Quote;
use ruff_python_ast::token::Tokens;
use crate::PyFormatOptions;
use crate::comments::Comments;
use crate::other::interpolated_string::InterpolatedStringContext;
pub struct PyFormatContext<'a> {
options: PyFormatOptions,
contents: &'a str,
comments: Comments<'a>,
tokens: &'a Tokens,
node_level: NodeLevel,
indent_level: IndentLevel,
/// Set to a non-None value when the formatter is running on a code
/// snippet within a docstring. The value should be the quote character of the
/// docstring containing the code snippet.
///
/// Various parts of the formatter may inspect this state to change how it
/// works. For example, multi-line strings will always be written with a
/// quote style that is inverted from the one here in order to ensure that
/// the formatted Python code will be valid.
docstring: Option<Quote>,
/// The state of the formatter with respect to f-strings and t-strings.
interpolated_string_state: InterpolatedStringState,
}
impl<'a> PyFormatContext<'a> {
pub(crate) fn new(
options: PyFormatOptions,
contents: &'a str,
comments: Comments<'a>,
tokens: &'a Tokens,
) -> Self {
Self {
options,
contents,
comments,
tokens,
node_level: NodeLevel::TopLevel(TopLevelStatementPosition::Other),
indent_level: IndentLevel::new(0),
docstring: None,
interpolated_string_state: InterpolatedStringState::Outside,
}
}
pub(crate) fn source(&self) -> &'a str {
self.contents
}
pub(crate) fn set_node_level(&mut self, level: NodeLevel) {
self.node_level = level;
}
pub(crate) fn node_level(&self) -> NodeLevel {
self.node_level
}
pub(crate) fn set_indent_level(&mut self, level: IndentLevel) {
self.indent_level = level;
}
pub(crate) fn indent_level(&self) -> IndentLevel {
self.indent_level
}
pub(crate) fn comments(&self) -> &Comments<'a> {
&self.comments
}
pub(crate) fn tokens(&self) -> &'a Tokens {
self.tokens
}
/// Returns a non-None value only if the formatter is running on a code
/// snippet within a docstring.
///
/// The quote character returned corresponds to the quoting used for the
/// docstring containing the code snippet currently being formatted.
pub(crate) fn docstring(&self) -> Option<Quote> {
self.docstring
}
/// Return a new context suitable for formatting code snippets within a
/// docstring.
///
/// The quote character given should correspond to the quote character used
/// for the docstring containing the code snippets.
pub(crate) fn in_docstring(self, quote: Quote) -> PyFormatContext<'a> {
PyFormatContext {
docstring: Some(quote),
..self
}
}
pub(crate) fn interpolated_string_state(&self) -> InterpolatedStringState {
self.interpolated_string_state
}
pub(crate) fn set_interpolated_string_state(
&mut self,
interpolated_string_state: InterpolatedStringState,
) {
self.interpolated_string_state = interpolated_string_state;
}
/// Returns `true` if preview mode is enabled.
pub(crate) const fn is_preview(&self) -> bool {
self.options.preview().is_enabled()
}
}
impl FormatContext for PyFormatContext<'_> {
type Options = PyFormatOptions;
fn options(&self) -> &Self::Options {
&self.options
}
fn source_code(&self) -> SourceCode<'_> {
SourceCode::new(self.contents)
}
}
impl Debug for PyFormatContext<'_> {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
f.debug_struct("PyFormatContext")
.field("options", &self.options)
.field("comments", &self.comments.debug(self.source_code()))
.field("node_level", &self.node_level)
.field("source", &self.contents)
.finish()
}
}
#[derive(Clone, Copy, Debug, Default)]
pub(crate) enum InterpolatedStringState {
/// The formatter is inside an f-string expression element i.e., between the
/// curly brace in `f"foo {x}"`.
///
/// The containing `FStringContext` is the surrounding f-string context.
InsideInterpolatedElement(InterpolatedStringContext),
/// The formatter is inside more than one nested f-string, such as in `nested` in:
///
/// ```py
/// f"{f'''{'nested'} inner'''} outer"
/// ```
NestedInterpolatedElement(InterpolatedStringContext),
/// The formatter is outside an f-string.
#[default]
Outside,
}
impl InterpolatedStringState {
pub(crate) fn can_contain_line_breaks(self) -> Option<bool> {
match self {
InterpolatedStringState::InsideInterpolatedElement(context)
| InterpolatedStringState::NestedInterpolatedElement(context) => {
Some(context.is_multiline())
}
InterpolatedStringState::Outside => None,
}
}
/// Returns `true` if the interpolated string state is [`Self::NestedInterpolatedElement`].
pub(crate) fn is_nested(self) -> bool {
matches!(self, Self::NestedInterpolatedElement(..))
}
}
/// The position of a top-level statement in the module.
#[derive(Copy, Clone, Debug, Eq, PartialEq, Default)]
pub(crate) enum TopLevelStatementPosition {
/// This is the last top-level statement in the module.
Last,
/// Any other top-level statement.
#[default]
Other,
}
/// What's the enclosing level of the outer node.
#[derive(Copy, Clone, Debug, Eq, PartialEq)]
pub(crate) enum NodeLevel {
/// Formatting statements on the module level.
TopLevel(TopLevelStatementPosition),
/// Formatting the body statements of a [compound statement](https://docs.python.org/3/reference/compound_stmts.html#compound-statements)
/// (`if`, `while`, `match`, etc.).
CompoundStatement,
/// The root or any sub-expression.
Expression(Option<GroupId>),
/// Formatting nodes that are enclosed by a parenthesized (any `[]`, `{}` or `()`) expression.
ParenthesizedExpression,
}
impl Default for NodeLevel {
fn default() -> Self {
Self::TopLevel(TopLevelStatementPosition::Other)
}
}
impl NodeLevel {
/// Returns `true` if the expression is in a parenthesized context.
pub(crate) const fn is_parenthesized(self) -> bool {
matches!(
self,
NodeLevel::Expression(Some(_)) | NodeLevel::ParenthesizedExpression
)
}
/// Returns `true` if this is the last top-level statement in the module.
pub(crate) const fn is_last_top_level_statement(self) -> bool {
matches!(self, NodeLevel::TopLevel(TopLevelStatementPosition::Last))
}
}
/// Change the [`NodeLevel`] of the formatter for the lifetime of this struct
pub(crate) struct WithNodeLevel<'ast, 'buf, B>
where
B: Buffer<Context = PyFormatContext<'ast>>,
{
buffer: &'buf mut B,
saved_level: NodeLevel,
}
impl<'ast, 'buf, B> WithNodeLevel<'ast, 'buf, B>
where
B: Buffer<Context = PyFormatContext<'ast>>,
{
pub(crate) fn new(level: NodeLevel, buffer: &'buf mut B) -> Self {
let context = buffer.state_mut().context_mut();
let saved_level = context.node_level();
context.set_node_level(level);
Self {
buffer,
saved_level,
}
}
}
impl<'ast, B> Deref for WithNodeLevel<'ast, '_, B>
where
B: Buffer<Context = PyFormatContext<'ast>>,
{
type Target = B;
fn deref(&self) -> &Self::Target {
self.buffer
}
}
impl<'ast, B> DerefMut for WithNodeLevel<'ast, '_, B>
where
B: Buffer<Context = PyFormatContext<'ast>>,
{
fn deref_mut(&mut self) -> &mut Self::Target {
self.buffer
}
}
impl<'ast, B> Drop for WithNodeLevel<'ast, '_, B>
where
B: Buffer<Context = PyFormatContext<'ast>>,
{
fn drop(&mut self) {
self.buffer
.state_mut()
.context_mut()
.set_node_level(self.saved_level);
}
}
/// The current indent level of the formatter.
///
/// One can determine the width of the indent itself (in number of ASCII
/// space characters) by multiplying the indent level by the configured indent
/// width.
///
/// This is specifically used inside the docstring code formatter for
/// implementing its "dynamic" line width mode. Namely, in the nested call to
/// the formatter, when "dynamic" mode is enabled, the line width is set to
/// `min(1, line_width - indent_level * indent_width)`, where `line_width` in
/// this context is the global line width setting.
#[derive(Copy, Clone, Debug, Eq, PartialEq)]
pub(crate) struct IndentLevel {
/// The numeric level. It is incremented for every whole indent in Python
/// source code.
///
/// Note that the first indentation level is actually 1, since this starts
/// at 0 and is incremented when the first top-level statement is seen. So
/// even though the first top-level statement in Python source will have no
/// indentation, its indentation level is 1.
level: u16,
}
impl IndentLevel {
/// Returns a new indent level for the given value.
pub(crate) fn new(level: u16) -> IndentLevel {
IndentLevel { level }
}
/// Returns the next indent level.
pub(crate) fn increment(self) -> IndentLevel {
IndentLevel {
level: self.level.saturating_add(1),
}
}
/// Convert this indent level into a specific number of ASCII whitespace
/// characters based on the given indent width.
pub(crate) fn to_ascii_spaces(self, width: IndentWidth) -> u16 {
let width = u16::try_from(width.value()).unwrap_or(u16::MAX);
// Why the subtraction? IndentLevel starts at 0 and asks for the "next"
// indent level before seeing the first top-level statement. So it's
// always 1 more than what we expect it to be.
let level = self.level.saturating_sub(1);
width.saturating_mul(level)
}
}
/// Change the [`IndentLevel`] of the formatter for the lifetime of this
/// struct.
pub(crate) struct WithIndentLevel<'a, B, D>
where
D: DerefMut<Target = B>,
B: Buffer<Context = PyFormatContext<'a>>,
{
buffer: D,
saved_level: IndentLevel,
}
impl<'a, B, D> WithIndentLevel<'a, B, D>
where
D: DerefMut<Target = B>,
B: Buffer<Context = PyFormatContext<'a>>,
{
pub(crate) fn new(level: IndentLevel, mut buffer: D) -> Self {
let context = buffer.state_mut().context_mut();
let saved_level = context.indent_level();
context.set_indent_level(level);
Self {
buffer,
saved_level,
}
}
}
impl<'a, B, D> Deref for WithIndentLevel<'a, B, D>
where
D: DerefMut<Target = B>,
B: Buffer<Context = PyFormatContext<'a>>,
{
type Target = B;
fn deref(&self) -> &Self::Target {
&self.buffer
}
}
impl<'a, B, D> DerefMut for WithIndentLevel<'a, B, D>
where
D: DerefMut<Target = B>,
B: Buffer<Context = PyFormatContext<'a>>,
{
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.buffer
}
}
impl<'a, B, D> Drop for WithIndentLevel<'a, B, D>
where
D: DerefMut<Target = B>,
B: Buffer<Context = PyFormatContext<'a>>,
{
fn drop(&mut self) {
self.buffer
.state_mut()
.context_mut()
.set_indent_level(self.saved_level);
}
}
pub(crate) struct WithInterpolatedStringState<'a, B, D>
where
D: DerefMut<Target = B>,
B: Buffer<Context = PyFormatContext<'a>>,
{
buffer: D,
saved_location: InterpolatedStringState,
}
impl<'a, B, D> WithInterpolatedStringState<'a, B, D>
where
D: DerefMut<Target = B>,
B: Buffer<Context = PyFormatContext<'a>>,
{
pub(crate) fn new(expr_location: InterpolatedStringState, mut buffer: D) -> Self {
let context = buffer.state_mut().context_mut();
let saved_location = context.interpolated_string_state();
context.set_interpolated_string_state(expr_location);
Self {
buffer,
saved_location,
}
}
}
impl<'a, B, D> Deref for WithInterpolatedStringState<'a, B, D>
where
D: DerefMut<Target = B>,
B: Buffer<Context = PyFormatContext<'a>>,
{
type Target = B;
fn deref(&self) -> &Self::Target {
&self.buffer
}
}
impl<'a, B, D> DerefMut for WithInterpolatedStringState<'a, B, D>
where
D: DerefMut<Target = B>,
B: Buffer<Context = PyFormatContext<'a>>,
{
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.buffer
}
}
impl<'a, B, D> Drop for WithInterpolatedStringState<'a, B, D>
where
D: DerefMut<Target = B>,
B: Buffer<Context = PyFormatContext<'a>>,
{
fn drop(&mut self) {
self.buffer
.state_mut()
.context_mut()
.set_interpolated_string_state(self.saved_location);
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_python_formatter/src/generated.rs | crates/ruff_python_formatter/src/generated.rs | //! This is a generated file. Don't modify it by hand! Run `crates/ruff_python_formatter/generate.py` to re-generate the file.
#![allow(unknown_lints, clippy::default_constructed_unit_structs)]
use crate::context::PyFormatContext;
use crate::{AsFormat, FormatNodeRule, IntoFormat, PyFormatter};
use ruff_formatter::{FormatOwnedWithRule, FormatRefWithRule, FormatResult, FormatRule};
use ruff_python_ast as ast;
impl FormatRule<ast::ModModule, PyFormatContext<'_>>
for crate::module::mod_module::FormatModModule
{
#[inline]
fn fmt(&self, node: &ast::ModModule, f: &mut PyFormatter) -> FormatResult<()> {
FormatNodeRule::<ast::ModModule>::fmt(self, node, f)
}
}
impl<'ast> AsFormat<PyFormatContext<'ast>> for ast::ModModule {
type Format<'a> = FormatRefWithRule<
'a,
ast::ModModule,
crate::module::mod_module::FormatModModule,
PyFormatContext<'ast>,
>;
fn format(&self) -> Self::Format<'_> {
FormatRefWithRule::new(self, crate::module::mod_module::FormatModModule::default())
}
}
impl<'ast> IntoFormat<PyFormatContext<'ast>> for ast::ModModule {
type Format = FormatOwnedWithRule<
ast::ModModule,
crate::module::mod_module::FormatModModule,
PyFormatContext<'ast>,
>;
fn into_format(self) -> Self::Format {
FormatOwnedWithRule::new(self, crate::module::mod_module::FormatModModule::default())
}
}
impl FormatRule<ast::ModExpression, PyFormatContext<'_>>
for crate::module::mod_expression::FormatModExpression
{
#[inline]
fn fmt(&self, node: &ast::ModExpression, f: &mut PyFormatter) -> FormatResult<()> {
FormatNodeRule::<ast::ModExpression>::fmt(self, node, f)
}
}
impl<'ast> AsFormat<PyFormatContext<'ast>> for ast::ModExpression {
type Format<'a> = FormatRefWithRule<
'a,
ast::ModExpression,
crate::module::mod_expression::FormatModExpression,
PyFormatContext<'ast>,
>;
fn format(&self) -> Self::Format<'_> {
FormatRefWithRule::new(
self,
crate::module::mod_expression::FormatModExpression::default(),
)
}
}
impl<'ast> IntoFormat<PyFormatContext<'ast>> for ast::ModExpression {
type Format = FormatOwnedWithRule<
ast::ModExpression,
crate::module::mod_expression::FormatModExpression,
PyFormatContext<'ast>,
>;
fn into_format(self) -> Self::Format {
FormatOwnedWithRule::new(
self,
crate::module::mod_expression::FormatModExpression::default(),
)
}
}
impl FormatRule<ast::StmtFunctionDef, PyFormatContext<'_>>
for crate::statement::stmt_function_def::FormatStmtFunctionDef
{
#[inline]
fn fmt(&self, node: &ast::StmtFunctionDef, f: &mut PyFormatter) -> FormatResult<()> {
FormatNodeRule::<ast::StmtFunctionDef>::fmt(self, node, f)
}
}
impl<'ast> AsFormat<PyFormatContext<'ast>> for ast::StmtFunctionDef {
type Format<'a> = FormatRefWithRule<
'a,
ast::StmtFunctionDef,
crate::statement::stmt_function_def::FormatStmtFunctionDef,
PyFormatContext<'ast>,
>;
fn format(&self) -> Self::Format<'_> {
FormatRefWithRule::new(
self,
crate::statement::stmt_function_def::FormatStmtFunctionDef::default(),
)
}
}
impl<'ast> IntoFormat<PyFormatContext<'ast>> for ast::StmtFunctionDef {
type Format = FormatOwnedWithRule<
ast::StmtFunctionDef,
crate::statement::stmt_function_def::FormatStmtFunctionDef,
PyFormatContext<'ast>,
>;
fn into_format(self) -> Self::Format {
FormatOwnedWithRule::new(
self,
crate::statement::stmt_function_def::FormatStmtFunctionDef::default(),
)
}
}
impl FormatRule<ast::StmtClassDef, PyFormatContext<'_>>
for crate::statement::stmt_class_def::FormatStmtClassDef
{
#[inline]
fn fmt(&self, node: &ast::StmtClassDef, f: &mut PyFormatter) -> FormatResult<()> {
FormatNodeRule::<ast::StmtClassDef>::fmt(self, node, f)
}
}
impl<'ast> AsFormat<PyFormatContext<'ast>> for ast::StmtClassDef {
type Format<'a> = FormatRefWithRule<
'a,
ast::StmtClassDef,
crate::statement::stmt_class_def::FormatStmtClassDef,
PyFormatContext<'ast>,
>;
fn format(&self) -> Self::Format<'_> {
FormatRefWithRule::new(
self,
crate::statement::stmt_class_def::FormatStmtClassDef::default(),
)
}
}
impl<'ast> IntoFormat<PyFormatContext<'ast>> for ast::StmtClassDef {
type Format = FormatOwnedWithRule<
ast::StmtClassDef,
crate::statement::stmt_class_def::FormatStmtClassDef,
PyFormatContext<'ast>,
>;
fn into_format(self) -> Self::Format {
FormatOwnedWithRule::new(
self,
crate::statement::stmt_class_def::FormatStmtClassDef::default(),
)
}
}
impl FormatRule<ast::StmtReturn, PyFormatContext<'_>>
for crate::statement::stmt_return::FormatStmtReturn
{
#[inline]
fn fmt(&self, node: &ast::StmtReturn, f: &mut PyFormatter) -> FormatResult<()> {
FormatNodeRule::<ast::StmtReturn>::fmt(self, node, f)
}
}
impl<'ast> AsFormat<PyFormatContext<'ast>> for ast::StmtReturn {
type Format<'a> = FormatRefWithRule<
'a,
ast::StmtReturn,
crate::statement::stmt_return::FormatStmtReturn,
PyFormatContext<'ast>,
>;
fn format(&self) -> Self::Format<'_> {
FormatRefWithRule::new(
self,
crate::statement::stmt_return::FormatStmtReturn::default(),
)
}
}
impl<'ast> IntoFormat<PyFormatContext<'ast>> for ast::StmtReturn {
type Format = FormatOwnedWithRule<
ast::StmtReturn,
crate::statement::stmt_return::FormatStmtReturn,
PyFormatContext<'ast>,
>;
fn into_format(self) -> Self::Format {
FormatOwnedWithRule::new(
self,
crate::statement::stmt_return::FormatStmtReturn::default(),
)
}
}
impl FormatRule<ast::StmtDelete, PyFormatContext<'_>>
for crate::statement::stmt_delete::FormatStmtDelete
{
#[inline]
fn fmt(&self, node: &ast::StmtDelete, f: &mut PyFormatter) -> FormatResult<()> {
FormatNodeRule::<ast::StmtDelete>::fmt(self, node, f)
}
}
impl<'ast> AsFormat<PyFormatContext<'ast>> for ast::StmtDelete {
type Format<'a> = FormatRefWithRule<
'a,
ast::StmtDelete,
crate::statement::stmt_delete::FormatStmtDelete,
PyFormatContext<'ast>,
>;
fn format(&self) -> Self::Format<'_> {
FormatRefWithRule::new(
self,
crate::statement::stmt_delete::FormatStmtDelete::default(),
)
}
}
impl<'ast> IntoFormat<PyFormatContext<'ast>> for ast::StmtDelete {
type Format = FormatOwnedWithRule<
ast::StmtDelete,
crate::statement::stmt_delete::FormatStmtDelete,
PyFormatContext<'ast>,
>;
fn into_format(self) -> Self::Format {
FormatOwnedWithRule::new(
self,
crate::statement::stmt_delete::FormatStmtDelete::default(),
)
}
}
impl FormatRule<ast::StmtTypeAlias, PyFormatContext<'_>>
for crate::statement::stmt_type_alias::FormatStmtTypeAlias
{
#[inline]
fn fmt(&self, node: &ast::StmtTypeAlias, f: &mut PyFormatter) -> FormatResult<()> {
FormatNodeRule::<ast::StmtTypeAlias>::fmt(self, node, f)
}
}
impl<'ast> AsFormat<PyFormatContext<'ast>> for ast::StmtTypeAlias {
type Format<'a> = FormatRefWithRule<
'a,
ast::StmtTypeAlias,
crate::statement::stmt_type_alias::FormatStmtTypeAlias,
PyFormatContext<'ast>,
>;
fn format(&self) -> Self::Format<'_> {
FormatRefWithRule::new(
self,
crate::statement::stmt_type_alias::FormatStmtTypeAlias::default(),
)
}
}
impl<'ast> IntoFormat<PyFormatContext<'ast>> for ast::StmtTypeAlias {
type Format = FormatOwnedWithRule<
ast::StmtTypeAlias,
crate::statement::stmt_type_alias::FormatStmtTypeAlias,
PyFormatContext<'ast>,
>;
fn into_format(self) -> Self::Format {
FormatOwnedWithRule::new(
self,
crate::statement::stmt_type_alias::FormatStmtTypeAlias::default(),
)
}
}
impl FormatRule<ast::StmtAssign, PyFormatContext<'_>>
for crate::statement::stmt_assign::FormatStmtAssign
{
#[inline]
fn fmt(&self, node: &ast::StmtAssign, f: &mut PyFormatter) -> FormatResult<()> {
FormatNodeRule::<ast::StmtAssign>::fmt(self, node, f)
}
}
impl<'ast> AsFormat<PyFormatContext<'ast>> for ast::StmtAssign {
type Format<'a> = FormatRefWithRule<
'a,
ast::StmtAssign,
crate::statement::stmt_assign::FormatStmtAssign,
PyFormatContext<'ast>,
>;
fn format(&self) -> Self::Format<'_> {
FormatRefWithRule::new(
self,
crate::statement::stmt_assign::FormatStmtAssign::default(),
)
}
}
impl<'ast> IntoFormat<PyFormatContext<'ast>> for ast::StmtAssign {
type Format = FormatOwnedWithRule<
ast::StmtAssign,
crate::statement::stmt_assign::FormatStmtAssign,
PyFormatContext<'ast>,
>;
fn into_format(self) -> Self::Format {
FormatOwnedWithRule::new(
self,
crate::statement::stmt_assign::FormatStmtAssign::default(),
)
}
}
impl FormatRule<ast::StmtAugAssign, PyFormatContext<'_>>
for crate::statement::stmt_aug_assign::FormatStmtAugAssign
{
#[inline]
fn fmt(&self, node: &ast::StmtAugAssign, f: &mut PyFormatter) -> FormatResult<()> {
FormatNodeRule::<ast::StmtAugAssign>::fmt(self, node, f)
}
}
impl<'ast> AsFormat<PyFormatContext<'ast>> for ast::StmtAugAssign {
type Format<'a> = FormatRefWithRule<
'a,
ast::StmtAugAssign,
crate::statement::stmt_aug_assign::FormatStmtAugAssign,
PyFormatContext<'ast>,
>;
fn format(&self) -> Self::Format<'_> {
FormatRefWithRule::new(
self,
crate::statement::stmt_aug_assign::FormatStmtAugAssign::default(),
)
}
}
impl<'ast> IntoFormat<PyFormatContext<'ast>> for ast::StmtAugAssign {
type Format = FormatOwnedWithRule<
ast::StmtAugAssign,
crate::statement::stmt_aug_assign::FormatStmtAugAssign,
PyFormatContext<'ast>,
>;
fn into_format(self) -> Self::Format {
FormatOwnedWithRule::new(
self,
crate::statement::stmt_aug_assign::FormatStmtAugAssign::default(),
)
}
}
impl FormatRule<ast::StmtAnnAssign, PyFormatContext<'_>>
for crate::statement::stmt_ann_assign::FormatStmtAnnAssign
{
#[inline]
fn fmt(&self, node: &ast::StmtAnnAssign, f: &mut PyFormatter) -> FormatResult<()> {
FormatNodeRule::<ast::StmtAnnAssign>::fmt(self, node, f)
}
}
impl<'ast> AsFormat<PyFormatContext<'ast>> for ast::StmtAnnAssign {
type Format<'a> = FormatRefWithRule<
'a,
ast::StmtAnnAssign,
crate::statement::stmt_ann_assign::FormatStmtAnnAssign,
PyFormatContext<'ast>,
>;
fn format(&self) -> Self::Format<'_> {
FormatRefWithRule::new(
self,
crate::statement::stmt_ann_assign::FormatStmtAnnAssign::default(),
)
}
}
impl<'ast> IntoFormat<PyFormatContext<'ast>> for ast::StmtAnnAssign {
type Format = FormatOwnedWithRule<
ast::StmtAnnAssign,
crate::statement::stmt_ann_assign::FormatStmtAnnAssign,
PyFormatContext<'ast>,
>;
fn into_format(self) -> Self::Format {
FormatOwnedWithRule::new(
self,
crate::statement::stmt_ann_assign::FormatStmtAnnAssign::default(),
)
}
}
impl FormatRule<ast::StmtFor, PyFormatContext<'_>> for crate::statement::stmt_for::FormatStmtFor {
#[inline]
fn fmt(&self, node: &ast::StmtFor, f: &mut PyFormatter) -> FormatResult<()> {
FormatNodeRule::<ast::StmtFor>::fmt(self, node, f)
}
}
impl<'ast> AsFormat<PyFormatContext<'ast>> for ast::StmtFor {
type Format<'a> = FormatRefWithRule<
'a,
ast::StmtFor,
crate::statement::stmt_for::FormatStmtFor,
PyFormatContext<'ast>,
>;
fn format(&self) -> Self::Format<'_> {
FormatRefWithRule::new(self, crate::statement::stmt_for::FormatStmtFor::default())
}
}
impl<'ast> IntoFormat<PyFormatContext<'ast>> for ast::StmtFor {
type Format = FormatOwnedWithRule<
ast::StmtFor,
crate::statement::stmt_for::FormatStmtFor,
PyFormatContext<'ast>,
>;
fn into_format(self) -> Self::Format {
FormatOwnedWithRule::new(self, crate::statement::stmt_for::FormatStmtFor::default())
}
}
impl FormatRule<ast::StmtWhile, PyFormatContext<'_>>
for crate::statement::stmt_while::FormatStmtWhile
{
#[inline]
fn fmt(&self, node: &ast::StmtWhile, f: &mut PyFormatter) -> FormatResult<()> {
FormatNodeRule::<ast::StmtWhile>::fmt(self, node, f)
}
}
impl<'ast> AsFormat<PyFormatContext<'ast>> for ast::StmtWhile {
type Format<'a> = FormatRefWithRule<
'a,
ast::StmtWhile,
crate::statement::stmt_while::FormatStmtWhile,
PyFormatContext<'ast>,
>;
fn format(&self) -> Self::Format<'_> {
FormatRefWithRule::new(
self,
crate::statement::stmt_while::FormatStmtWhile::default(),
)
}
}
impl<'ast> IntoFormat<PyFormatContext<'ast>> for ast::StmtWhile {
type Format = FormatOwnedWithRule<
ast::StmtWhile,
crate::statement::stmt_while::FormatStmtWhile,
PyFormatContext<'ast>,
>;
fn into_format(self) -> Self::Format {
FormatOwnedWithRule::new(
self,
crate::statement::stmt_while::FormatStmtWhile::default(),
)
}
}
impl FormatRule<ast::StmtIf, PyFormatContext<'_>> for crate::statement::stmt_if::FormatStmtIf {
#[inline]
fn fmt(&self, node: &ast::StmtIf, f: &mut PyFormatter) -> FormatResult<()> {
FormatNodeRule::<ast::StmtIf>::fmt(self, node, f)
}
}
impl<'ast> AsFormat<PyFormatContext<'ast>> for ast::StmtIf {
type Format<'a> = FormatRefWithRule<
'a,
ast::StmtIf,
crate::statement::stmt_if::FormatStmtIf,
PyFormatContext<'ast>,
>;
fn format(&self) -> Self::Format<'_> {
FormatRefWithRule::new(self, crate::statement::stmt_if::FormatStmtIf::default())
}
}
impl<'ast> IntoFormat<PyFormatContext<'ast>> for ast::StmtIf {
type Format = FormatOwnedWithRule<
ast::StmtIf,
crate::statement::stmt_if::FormatStmtIf,
PyFormatContext<'ast>,
>;
fn into_format(self) -> Self::Format {
FormatOwnedWithRule::new(self, crate::statement::stmt_if::FormatStmtIf::default())
}
}
impl FormatRule<ast::StmtWith, PyFormatContext<'_>>
for crate::statement::stmt_with::FormatStmtWith
{
#[inline]
fn fmt(&self, node: &ast::StmtWith, f: &mut PyFormatter) -> FormatResult<()> {
FormatNodeRule::<ast::StmtWith>::fmt(self, node, f)
}
}
impl<'ast> AsFormat<PyFormatContext<'ast>> for ast::StmtWith {
type Format<'a> = FormatRefWithRule<
'a,
ast::StmtWith,
crate::statement::stmt_with::FormatStmtWith,
PyFormatContext<'ast>,
>;
fn format(&self) -> Self::Format<'_> {
FormatRefWithRule::new(self, crate::statement::stmt_with::FormatStmtWith::default())
}
}
impl<'ast> IntoFormat<PyFormatContext<'ast>> for ast::StmtWith {
type Format = FormatOwnedWithRule<
ast::StmtWith,
crate::statement::stmt_with::FormatStmtWith,
PyFormatContext<'ast>,
>;
fn into_format(self) -> Self::Format {
FormatOwnedWithRule::new(self, crate::statement::stmt_with::FormatStmtWith::default())
}
}
impl FormatRule<ast::StmtMatch, PyFormatContext<'_>>
for crate::statement::stmt_match::FormatStmtMatch
{
#[inline]
fn fmt(&self, node: &ast::StmtMatch, f: &mut PyFormatter) -> FormatResult<()> {
FormatNodeRule::<ast::StmtMatch>::fmt(self, node, f)
}
}
impl<'ast> AsFormat<PyFormatContext<'ast>> for ast::StmtMatch {
type Format<'a> = FormatRefWithRule<
'a,
ast::StmtMatch,
crate::statement::stmt_match::FormatStmtMatch,
PyFormatContext<'ast>,
>;
fn format(&self) -> Self::Format<'_> {
FormatRefWithRule::new(
self,
crate::statement::stmt_match::FormatStmtMatch::default(),
)
}
}
impl<'ast> IntoFormat<PyFormatContext<'ast>> for ast::StmtMatch {
type Format = FormatOwnedWithRule<
ast::StmtMatch,
crate::statement::stmt_match::FormatStmtMatch,
PyFormatContext<'ast>,
>;
fn into_format(self) -> Self::Format {
FormatOwnedWithRule::new(
self,
crate::statement::stmt_match::FormatStmtMatch::default(),
)
}
}
impl FormatRule<ast::StmtRaise, PyFormatContext<'_>>
for crate::statement::stmt_raise::FormatStmtRaise
{
#[inline]
fn fmt(&self, node: &ast::StmtRaise, f: &mut PyFormatter) -> FormatResult<()> {
FormatNodeRule::<ast::StmtRaise>::fmt(self, node, f)
}
}
impl<'ast> AsFormat<PyFormatContext<'ast>> for ast::StmtRaise {
type Format<'a> = FormatRefWithRule<
'a,
ast::StmtRaise,
crate::statement::stmt_raise::FormatStmtRaise,
PyFormatContext<'ast>,
>;
fn format(&self) -> Self::Format<'_> {
FormatRefWithRule::new(
self,
crate::statement::stmt_raise::FormatStmtRaise::default(),
)
}
}
impl<'ast> IntoFormat<PyFormatContext<'ast>> for ast::StmtRaise {
type Format = FormatOwnedWithRule<
ast::StmtRaise,
crate::statement::stmt_raise::FormatStmtRaise,
PyFormatContext<'ast>,
>;
fn into_format(self) -> Self::Format {
FormatOwnedWithRule::new(
self,
crate::statement::stmt_raise::FormatStmtRaise::default(),
)
}
}
impl FormatRule<ast::StmtTry, PyFormatContext<'_>> for crate::statement::stmt_try::FormatStmtTry {
#[inline]
fn fmt(&self, node: &ast::StmtTry, f: &mut PyFormatter) -> FormatResult<()> {
FormatNodeRule::<ast::StmtTry>::fmt(self, node, f)
}
}
impl<'ast> AsFormat<PyFormatContext<'ast>> for ast::StmtTry {
type Format<'a> = FormatRefWithRule<
'a,
ast::StmtTry,
crate::statement::stmt_try::FormatStmtTry,
PyFormatContext<'ast>,
>;
fn format(&self) -> Self::Format<'_> {
FormatRefWithRule::new(self, crate::statement::stmt_try::FormatStmtTry::default())
}
}
impl<'ast> IntoFormat<PyFormatContext<'ast>> for ast::StmtTry {
type Format = FormatOwnedWithRule<
ast::StmtTry,
crate::statement::stmt_try::FormatStmtTry,
PyFormatContext<'ast>,
>;
fn into_format(self) -> Self::Format {
FormatOwnedWithRule::new(self, crate::statement::stmt_try::FormatStmtTry::default())
}
}
impl FormatRule<ast::StmtAssert, PyFormatContext<'_>>
for crate::statement::stmt_assert::FormatStmtAssert
{
#[inline]
fn fmt(&self, node: &ast::StmtAssert, f: &mut PyFormatter) -> FormatResult<()> {
FormatNodeRule::<ast::StmtAssert>::fmt(self, node, f)
}
}
impl<'ast> AsFormat<PyFormatContext<'ast>> for ast::StmtAssert {
type Format<'a> = FormatRefWithRule<
'a,
ast::StmtAssert,
crate::statement::stmt_assert::FormatStmtAssert,
PyFormatContext<'ast>,
>;
fn format(&self) -> Self::Format<'_> {
FormatRefWithRule::new(
self,
crate::statement::stmt_assert::FormatStmtAssert::default(),
)
}
}
impl<'ast> IntoFormat<PyFormatContext<'ast>> for ast::StmtAssert {
type Format = FormatOwnedWithRule<
ast::StmtAssert,
crate::statement::stmt_assert::FormatStmtAssert,
PyFormatContext<'ast>,
>;
fn into_format(self) -> Self::Format {
FormatOwnedWithRule::new(
self,
crate::statement::stmt_assert::FormatStmtAssert::default(),
)
}
}
impl FormatRule<ast::StmtImport, PyFormatContext<'_>>
for crate::statement::stmt_import::FormatStmtImport
{
#[inline]
fn fmt(&self, node: &ast::StmtImport, f: &mut PyFormatter) -> FormatResult<()> {
FormatNodeRule::<ast::StmtImport>::fmt(self, node, f)
}
}
impl<'ast> AsFormat<PyFormatContext<'ast>> for ast::StmtImport {
type Format<'a> = FormatRefWithRule<
'a,
ast::StmtImport,
crate::statement::stmt_import::FormatStmtImport,
PyFormatContext<'ast>,
>;
fn format(&self) -> Self::Format<'_> {
FormatRefWithRule::new(
self,
crate::statement::stmt_import::FormatStmtImport::default(),
)
}
}
impl<'ast> IntoFormat<PyFormatContext<'ast>> for ast::StmtImport {
type Format = FormatOwnedWithRule<
ast::StmtImport,
crate::statement::stmt_import::FormatStmtImport,
PyFormatContext<'ast>,
>;
fn into_format(self) -> Self::Format {
FormatOwnedWithRule::new(
self,
crate::statement::stmt_import::FormatStmtImport::default(),
)
}
}
impl FormatRule<ast::StmtImportFrom, PyFormatContext<'_>>
for crate::statement::stmt_import_from::FormatStmtImportFrom
{
#[inline]
fn fmt(&self, node: &ast::StmtImportFrom, f: &mut PyFormatter) -> FormatResult<()> {
FormatNodeRule::<ast::StmtImportFrom>::fmt(self, node, f)
}
}
impl<'ast> AsFormat<PyFormatContext<'ast>> for ast::StmtImportFrom {
type Format<'a> = FormatRefWithRule<
'a,
ast::StmtImportFrom,
crate::statement::stmt_import_from::FormatStmtImportFrom,
PyFormatContext<'ast>,
>;
fn format(&self) -> Self::Format<'_> {
FormatRefWithRule::new(
self,
crate::statement::stmt_import_from::FormatStmtImportFrom::default(),
)
}
}
impl<'ast> IntoFormat<PyFormatContext<'ast>> for ast::StmtImportFrom {
type Format = FormatOwnedWithRule<
ast::StmtImportFrom,
crate::statement::stmt_import_from::FormatStmtImportFrom,
PyFormatContext<'ast>,
>;
fn into_format(self) -> Self::Format {
FormatOwnedWithRule::new(
self,
crate::statement::stmt_import_from::FormatStmtImportFrom::default(),
)
}
}
impl FormatRule<ast::StmtGlobal, PyFormatContext<'_>>
for crate::statement::stmt_global::FormatStmtGlobal
{
#[inline]
fn fmt(&self, node: &ast::StmtGlobal, f: &mut PyFormatter) -> FormatResult<()> {
FormatNodeRule::<ast::StmtGlobal>::fmt(self, node, f)
}
}
impl<'ast> AsFormat<PyFormatContext<'ast>> for ast::StmtGlobal {
type Format<'a> = FormatRefWithRule<
'a,
ast::StmtGlobal,
crate::statement::stmt_global::FormatStmtGlobal,
PyFormatContext<'ast>,
>;
fn format(&self) -> Self::Format<'_> {
FormatRefWithRule::new(
self,
crate::statement::stmt_global::FormatStmtGlobal::default(),
)
}
}
impl<'ast> IntoFormat<PyFormatContext<'ast>> for ast::StmtGlobal {
type Format = FormatOwnedWithRule<
ast::StmtGlobal,
crate::statement::stmt_global::FormatStmtGlobal,
PyFormatContext<'ast>,
>;
fn into_format(self) -> Self::Format {
FormatOwnedWithRule::new(
self,
crate::statement::stmt_global::FormatStmtGlobal::default(),
)
}
}
impl FormatRule<ast::StmtNonlocal, PyFormatContext<'_>>
for crate::statement::stmt_nonlocal::FormatStmtNonlocal
{
#[inline]
fn fmt(&self, node: &ast::StmtNonlocal, f: &mut PyFormatter) -> FormatResult<()> {
FormatNodeRule::<ast::StmtNonlocal>::fmt(self, node, f)
}
}
impl<'ast> AsFormat<PyFormatContext<'ast>> for ast::StmtNonlocal {
type Format<'a> = FormatRefWithRule<
'a,
ast::StmtNonlocal,
crate::statement::stmt_nonlocal::FormatStmtNonlocal,
PyFormatContext<'ast>,
>;
fn format(&self) -> Self::Format<'_> {
FormatRefWithRule::new(
self,
crate::statement::stmt_nonlocal::FormatStmtNonlocal::default(),
)
}
}
impl<'ast> IntoFormat<PyFormatContext<'ast>> for ast::StmtNonlocal {
type Format = FormatOwnedWithRule<
ast::StmtNonlocal,
crate::statement::stmt_nonlocal::FormatStmtNonlocal,
PyFormatContext<'ast>,
>;
fn into_format(self) -> Self::Format {
FormatOwnedWithRule::new(
self,
crate::statement::stmt_nonlocal::FormatStmtNonlocal::default(),
)
}
}
impl FormatRule<ast::StmtExpr, PyFormatContext<'_>>
for crate::statement::stmt_expr::FormatStmtExpr
{
#[inline]
fn fmt(&self, node: &ast::StmtExpr, f: &mut PyFormatter) -> FormatResult<()> {
FormatNodeRule::<ast::StmtExpr>::fmt(self, node, f)
}
}
impl<'ast> AsFormat<PyFormatContext<'ast>> for ast::StmtExpr {
type Format<'a> = FormatRefWithRule<
'a,
ast::StmtExpr,
crate::statement::stmt_expr::FormatStmtExpr,
PyFormatContext<'ast>,
>;
fn format(&self) -> Self::Format<'_> {
FormatRefWithRule::new(self, crate::statement::stmt_expr::FormatStmtExpr::default())
}
}
impl<'ast> IntoFormat<PyFormatContext<'ast>> for ast::StmtExpr {
type Format = FormatOwnedWithRule<
ast::StmtExpr,
crate::statement::stmt_expr::FormatStmtExpr,
PyFormatContext<'ast>,
>;
fn into_format(self) -> Self::Format {
FormatOwnedWithRule::new(self, crate::statement::stmt_expr::FormatStmtExpr::default())
}
}
impl FormatRule<ast::StmtPass, PyFormatContext<'_>>
for crate::statement::stmt_pass::FormatStmtPass
{
#[inline]
fn fmt(&self, node: &ast::StmtPass, f: &mut PyFormatter) -> FormatResult<()> {
FormatNodeRule::<ast::StmtPass>::fmt(self, node, f)
}
}
impl<'ast> AsFormat<PyFormatContext<'ast>> for ast::StmtPass {
type Format<'a> = FormatRefWithRule<
'a,
ast::StmtPass,
crate::statement::stmt_pass::FormatStmtPass,
PyFormatContext<'ast>,
>;
fn format(&self) -> Self::Format<'_> {
FormatRefWithRule::new(self, crate::statement::stmt_pass::FormatStmtPass::default())
}
}
impl<'ast> IntoFormat<PyFormatContext<'ast>> for ast::StmtPass {
type Format = FormatOwnedWithRule<
ast::StmtPass,
crate::statement::stmt_pass::FormatStmtPass,
PyFormatContext<'ast>,
>;
fn into_format(self) -> Self::Format {
FormatOwnedWithRule::new(self, crate::statement::stmt_pass::FormatStmtPass::default())
}
}
impl FormatRule<ast::StmtBreak, PyFormatContext<'_>>
for crate::statement::stmt_break::FormatStmtBreak
{
#[inline]
fn fmt(&self, node: &ast::StmtBreak, f: &mut PyFormatter) -> FormatResult<()> {
FormatNodeRule::<ast::StmtBreak>::fmt(self, node, f)
}
}
impl<'ast> AsFormat<PyFormatContext<'ast>> for ast::StmtBreak {
type Format<'a> = FormatRefWithRule<
'a,
ast::StmtBreak,
crate::statement::stmt_break::FormatStmtBreak,
PyFormatContext<'ast>,
>;
fn format(&self) -> Self::Format<'_> {
FormatRefWithRule::new(
self,
crate::statement::stmt_break::FormatStmtBreak::default(),
)
}
}
impl<'ast> IntoFormat<PyFormatContext<'ast>> for ast::StmtBreak {
type Format = FormatOwnedWithRule<
ast::StmtBreak,
crate::statement::stmt_break::FormatStmtBreak,
PyFormatContext<'ast>,
>;
fn into_format(self) -> Self::Format {
FormatOwnedWithRule::new(
self,
crate::statement::stmt_break::FormatStmtBreak::default(),
)
}
}
impl FormatRule<ast::StmtContinue, PyFormatContext<'_>>
for crate::statement::stmt_continue::FormatStmtContinue
{
#[inline]
fn fmt(&self, node: &ast::StmtContinue, f: &mut PyFormatter) -> FormatResult<()> {
FormatNodeRule::<ast::StmtContinue>::fmt(self, node, f)
}
}
impl<'ast> AsFormat<PyFormatContext<'ast>> for ast::StmtContinue {
type Format<'a> = FormatRefWithRule<
'a,
ast::StmtContinue,
crate::statement::stmt_continue::FormatStmtContinue,
PyFormatContext<'ast>,
>;
fn format(&self) -> Self::Format<'_> {
FormatRefWithRule::new(
self,
crate::statement::stmt_continue::FormatStmtContinue::default(),
)
}
}
impl<'ast> IntoFormat<PyFormatContext<'ast>> for ast::StmtContinue {
type Format = FormatOwnedWithRule<
ast::StmtContinue,
crate::statement::stmt_continue::FormatStmtContinue,
PyFormatContext<'ast>,
>;
fn into_format(self) -> Self::Format {
FormatOwnedWithRule::new(
self,
crate::statement::stmt_continue::FormatStmtContinue::default(),
)
}
}
impl FormatRule<ast::StmtIpyEscapeCommand, PyFormatContext<'_>>
for crate::statement::stmt_ipy_escape_command::FormatStmtIpyEscapeCommand
{
#[inline]
fn fmt(&self, node: &ast::StmtIpyEscapeCommand, f: &mut PyFormatter) -> FormatResult<()> {
FormatNodeRule::<ast::StmtIpyEscapeCommand>::fmt(self, node, f)
}
}
impl<'ast> AsFormat<PyFormatContext<'ast>> for ast::StmtIpyEscapeCommand {
type Format<'a> = FormatRefWithRule<
'a,
ast::StmtIpyEscapeCommand,
crate::statement::stmt_ipy_escape_command::FormatStmtIpyEscapeCommand,
PyFormatContext<'ast>,
>;
fn format(&self) -> Self::Format<'_> {
FormatRefWithRule::new(
self,
crate::statement::stmt_ipy_escape_command::FormatStmtIpyEscapeCommand::default(),
)
}
}
impl<'ast> IntoFormat<PyFormatContext<'ast>> for ast::StmtIpyEscapeCommand {
type Format = FormatOwnedWithRule<
ast::StmtIpyEscapeCommand,
crate::statement::stmt_ipy_escape_command::FormatStmtIpyEscapeCommand,
PyFormatContext<'ast>,
>;
fn into_format(self) -> Self::Format {
FormatOwnedWithRule::new(
self,
crate::statement::stmt_ipy_escape_command::FormatStmtIpyEscapeCommand::default(),
)
}
}
impl FormatRule<ast::ExprBoolOp, PyFormatContext<'_>>
for crate::expression::expr_bool_op::FormatExprBoolOp
{
#[inline]
fn fmt(&self, node: &ast::ExprBoolOp, f: &mut PyFormatter) -> FormatResult<()> {
FormatNodeRule::<ast::ExprBoolOp>::fmt(self, node, f)
}
}
impl<'ast> AsFormat<PyFormatContext<'ast>> for ast::ExprBoolOp {
type Format<'a> = FormatRefWithRule<
'a,
ast::ExprBoolOp,
crate::expression::expr_bool_op::FormatExprBoolOp,
PyFormatContext<'ast>,
>;
fn format(&self) -> Self::Format<'_> {
FormatRefWithRule::new(
self,
crate::expression::expr_bool_op::FormatExprBoolOp::default(),
)
}
}
impl<'ast> IntoFormat<PyFormatContext<'ast>> for ast::ExprBoolOp {
type Format = FormatOwnedWithRule<
ast::ExprBoolOp,
crate::expression::expr_bool_op::FormatExprBoolOp,
PyFormatContext<'ast>,
>;
fn into_format(self) -> Self::Format {
FormatOwnedWithRule::new(
self,
crate::expression::expr_bool_op::FormatExprBoolOp::default(),
)
}
}
impl FormatRule<ast::ExprNamed, PyFormatContext<'_>>
for crate::expression::expr_named::FormatExprNamed
{
#[inline]
fn fmt(&self, node: &ast::ExprNamed, f: &mut PyFormatter) -> FormatResult<()> {
FormatNodeRule::<ast::ExprNamed>::fmt(self, node, f)
}
}
impl<'ast> AsFormat<PyFormatContext<'ast>> for ast::ExprNamed {
type Format<'a> = FormatRefWithRule<
'a,
ast::ExprNamed,
crate::expression::expr_named::FormatExprNamed,
PyFormatContext<'ast>,
>;
fn format(&self) -> Self::Format<'_> {
FormatRefWithRule::new(
self,
crate::expression::expr_named::FormatExprNamed::default(),
)
}
}
impl<'ast> IntoFormat<PyFormatContext<'ast>> for ast::ExprNamed {
type Format = FormatOwnedWithRule<
ast::ExprNamed,
crate::expression::expr_named::FormatExprNamed,
PyFormatContext<'ast>,
>;
fn into_format(self) -> Self::Format {
FormatOwnedWithRule::new(
self,
crate::expression::expr_named::FormatExprNamed::default(),
)
}
}
impl FormatRule<ast::ExprBinOp, PyFormatContext<'_>>
for crate::expression::expr_bin_op::FormatExprBinOp
{
#[inline]
fn fmt(&self, node: &ast::ExprBinOp, f: &mut PyFormatter) -> FormatResult<()> {
FormatNodeRule::<ast::ExprBinOp>::fmt(self, node, f)
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | true |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_python_formatter/src/preview.rs | crates/ruff_python_formatter/src/preview.rs | //! Helpers to test if a specific preview style is enabled or not.
//!
//! The motivation for these functions isn't to avoid code duplication but to ease promoting preview styles
//! to stable. The challenge with directly using [`is_preview`](PyFormatContext::is_preview) is that it is unclear
//! for which specific feature this preview check is for. Having named functions simplifies the promotion:
//! Simply delete the function and let Rust tell you which checks you have to remove.
use crate::PyFormatContext;
/// Returns `true` if the [`hug_parens_with_braces_and_square_brackets`](https://github.com/astral-sh/ruff/issues/8279) preview style is enabled.
pub(crate) const fn is_hug_parens_with_braces_and_square_brackets_enabled(
context: &PyFormatContext,
) -> bool {
context.is_preview()
}
/// Returns `true` if the [`no_chaperone_for_escaped_quote_in_triple_quoted_docstring`](https://github.com/astral-sh/ruff/pull/17216) preview style is enabled.
pub(crate) const fn is_no_chaperone_for_escaped_quote_in_triple_quoted_docstring_enabled(
context: &PyFormatContext,
) -> bool {
context.is_preview()
}
/// Returns `true` if the [`blank_line_before_decorated_class_in_stub`](https://github.com/astral-sh/ruff/issues/18865) preview style is enabled.
pub(crate) const fn is_blank_line_before_decorated_class_in_stub_enabled(
context: &PyFormatContext,
) -> bool {
context.is_preview()
}
/// Returns `true` if the
/// [`remove_parens_around_except_types`](https://github.com/astral-sh/ruff/pull/20768) preview
/// style is enabled.
pub(crate) const fn is_remove_parens_around_except_types_enabled(
context: &PyFormatContext,
) -> bool {
context.is_preview()
}
/// Returns `true` if the
/// [`allow_newline_after_block_open`](https://github.com/astral-sh/ruff/pull/21110) preview style
/// is enabled.
pub(crate) const fn is_allow_newline_after_block_open_enabled(context: &PyFormatContext) -> bool {
context.is_preview()
}
/// Returns `true` if the
/// [`avoid_parens_for_long_as_captures`](https://github.com/astral-sh/ruff/pull/21176) preview
/// style is enabled.
pub(crate) const fn is_avoid_parens_for_long_as_captures_enabled(
context: &PyFormatContext,
) -> bool {
context.is_preview()
}
/// Returns `true` if the
/// [`parenthesize_lambda_bodies`](https://github.com/astral-sh/ruff/pull/21385) preview style is
/// enabled.
pub(crate) const fn is_parenthesize_lambda_bodies_enabled(context: &PyFormatContext) -> bool {
context.is_preview()
}
/// Returns `true` if the
/// [`fluent_layout_split_first_call`](https://github.com/astral-sh/ruff/pull/21369) preview
/// style is enabled.
pub(crate) const fn is_fluent_layout_split_first_call_enabled(context: &PyFormatContext) -> bool {
context.is_preview()
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_python_formatter/src/string/docstring.rs | crates/ruff_python_formatter/src/string/docstring.rs | // This gives tons of false positives in this file because of
// "reStructuredText."
#![allow(clippy::doc_markdown)]
use std::cmp::Ordering;
use std::sync::LazyLock;
use std::{borrow::Cow, collections::VecDeque};
use itertools::Itertools;
use regex::Regex;
use ruff_formatter::printer::SourceMapGeneration;
use ruff_python_ast::{AnyStringFlags, StringFlags, str::Quote};
use ruff_python_parser::ParseOptions;
use ruff_python_trivia::CommentRanges;
use {
ruff_formatter::{FormatOptions, IndentStyle, LineWidth, Printed, write},
ruff_python_trivia::{PythonWhitespace, is_python_whitespace},
ruff_text_size::{Ranged, TextLen, TextRange, TextSize},
};
use super::NormalizedString;
use crate::preview::is_no_chaperone_for_escaped_quote_in_triple_quoted_docstring_enabled;
use crate::string::StringQuotes;
use crate::{DocstringCodeLineWidth, FormatModuleError, prelude::*};
/// Format a docstring by trimming whitespace and adjusting the indentation.
///
/// Summary of changes we make:
/// * Normalize the string like all other strings
/// * Ignore docstring that have an escaped newline
/// * Trim all trailing whitespace, except for a chaperone space that avoids quotes or backslashes
/// in the last line.
/// * Trim leading whitespace on the first line, again except for a chaperone space
/// * If there is only content in the first line and after that only whitespace, collapse the
/// docstring into one line
/// * Adjust the indentation (see below)
///
/// # Docstring indentation
///
/// Unlike any other string, like black we change the indentation of docstring lines.
///
/// We want to preserve the indentation inside the docstring relative to the suite statement/block
/// indent that the docstring statement is in, but also want to apply the change of the outer
/// indentation in the docstring, e.g.
/// ```python
/// def sparkle_sky():
/// """Make a pretty sparkly sky.
/// * * ✨ *. .
/// * * ✨ .
/// . * . ✨ * . .
/// """
/// ```
/// should become
/// ```python
/// def sparkle_sky():
/// """Make a pretty sparkly sky.
/// * * ✨ *. .
/// * * ✨ .
/// . * . ✨ * . .
/// """
/// ```
/// We can't compute the full indentation here since we don't know what the block indent of
/// the doc comment will be yet and which we can only have added by formatting each line
/// separately with a hard line break. This means we need to strip shared indentation from
/// docstring while preserving the in-docstring bigger-than-suite-statement indentation. Example:
/// ```python
/// def f():
/// """first line
/// line a
/// line b
/// """
/// ```
/// The docstring indentation is 2, the block indents will change this to 4 (but we can't
/// determine this at this point). The indentation of line a is 2, so we trim ` line a`
/// to `line a`. For line b it's 5, so we trim it to `line b` and pad with 5-2=3 spaces to
/// ` line b`. The closing quotes, being on their own line, are stripped get only the
/// default indentation. Fully formatted:
/// ```python
/// def f():
/// """first line
/// line a
/// line b
/// """
/// ```
///
/// Tabs are counted by padding them to the next multiple of 8 according to
/// [`str.expandtabs`](https://docs.python.org/3/library/stdtypes.html#str.expandtabs).
///
/// Additionally, if any line in the docstring has less indentation than the docstring
/// (effectively a negative indentation wrt. to the current level), we pad all lines to the
/// level of the docstring with spaces.
/// ```python
/// def f():
/// """first line
/// line a
/// line b
/// line c
/// """
/// ```
/// Here line a is 3 columns negatively indented, so we pad all lines by an extra 3 spaces:
/// ```python
/// def f():
/// """first line
/// line a
/// line b
/// line c
/// """
/// ```
/// The indentation is rewritten to all-spaces when using [`IndentStyle::Space`].
/// The formatter preserves tab-indentations when using [`IndentStyle::Tab`], but doesn't convert
/// `indent-width * spaces` to tabs because doing so could break ASCII art and other docstrings
/// that use spaces for alignment.
pub(crate) fn format(normalized: &NormalizedString, f: &mut PyFormatter) -> FormatResult<()> {
let docstring = &normalized.text();
// Black doesn't change the indentation of docstrings that contain an escaped newline
if contains_unescaped_newline(docstring) {
return normalized.fmt(f);
}
// is_borrowed is unstable :/
let already_normalized = matches!(docstring, Cow::Borrowed(_));
// Use `split` instead of `lines` to preserve the closing quotes on their own line
// if they have no indentation (in which case the last line is `\n` which
// `lines` omit for the last element).
let mut lines = docstring.split('\n').peekable();
// Start the string
let kind = normalized.flags();
let quotes = StringQuotes::from(kind);
write!(f, [kind.prefix(), quotes])?;
// We track where in the source docstring we are (in source code byte offsets)
let mut offset = normalized.start();
// The first line directly after the opening quotes has different rules than the rest, mainly
// that we remove all leading whitespace as there's no indentation
let first = lines.next().unwrap_or_default();
// Black trims whitespace using [`str.strip()`](https://docs.python.org/3/library/stdtypes.html#str.strip)
// https://github.com/psf/black/blob/b4dca26c7d93f930bbd5a7b552807370b60d4298/src/black/strings.py#L77-L85
// So we use the unicode whitespace definition through `trim_{start,end}` instead of the python
// tokenizer whitespace definition in `trim_whitespace_{start,end}`.
let trim_end = first.trim_end();
let trim_both = trim_end.trim_start();
// Edge case: The first line is `""" "content`, so we need to insert chaperone space that keep
// inner quotes and closing quotes from getting to close to avoid `""""content`
if trim_both.starts_with(quotes.quote_char.as_char()) {
space().fmt(f)?;
}
if !trim_end.is_empty() {
// For the first line of the docstring we strip the leading and trailing whitespace, e.g.
// `""" content ` to `"""content`
let leading_whitespace = trim_end.text_len() - trim_both.text_len();
let trimmed_line_range =
TextRange::at(offset, trim_end.text_len()).add_start(leading_whitespace);
if already_normalized {
source_text_slice(trimmed_line_range).fmt(f)?;
} else {
text(trim_both).fmt(f)?;
}
}
offset += first.text_len();
// Check if we have a single line (or empty) docstring
if docstring[first.len()..].trim().is_empty() {
// For `"""\n"""` or other whitespace between the quotes, black keeps a single whitespace,
// but `""""""` doesn't get one inserted.
if needs_chaperone_space(normalized.flags(), trim_end, f.context())
|| (trim_end.is_empty() && !docstring.is_empty())
{
space().fmt(f)?;
}
quotes.fmt(f)?;
return Ok(());
}
hard_line_break().fmt(f)?;
// We know that the normalized string has \n line endings
offset += "\n".text_len();
// If some line of the docstring is less indented than the function body, we pad all lines to
// align it with the docstring statement. Conversely, if all lines are over-indented, we strip
// the extra indentation. We call this stripped indentation since it's relative to the block
// indent printer-made indentation.
let stripped_indentation = lines
.clone()
// We don't want to count whitespace-only lines as miss-indented
.filter(|line| !line.trim().is_empty())
.map(Indentation::from_str)
.min_by_key(|indentation| indentation.columns())
.unwrap_or_default();
DocstringLinePrinter {
f,
action_queue: VecDeque::new(),
offset,
stripped_indentation,
already_normalized,
quote_char: quotes.quote_char,
code_example: CodeExample::default(),
}
.add_iter(lines)?;
// Same special case in the last line as for the first line
let trim_end = docstring
.as_ref()
.trim_end_matches(|c: char| c.is_whitespace() && c != '\n');
if needs_chaperone_space(normalized.flags(), trim_end, f.context()) {
space().fmt(f)?;
}
write!(f, [quotes])
}
fn contains_unescaped_newline(haystack: &str) -> bool {
let mut rest = haystack;
while let Some(index) = memchr::memchr(b'\\', rest.as_bytes()) {
rest = rest[index + 1..].trim_whitespace_start();
if rest.starts_with('\n') {
return true;
}
}
false
}
/// An abstraction for printing each line of a docstring.
struct DocstringLinePrinter<'ast, 'buf, 'fmt, 'src> {
f: &'fmt mut PyFormatter<'ast, 'buf>,
/// A queue of actions to perform.
///
/// Whenever we process a line, it is possible for it to generate multiple
/// actions to take. The most basic, and most common case, is for the line
/// to just simply be printed as-is. But in some cases, a line is part of
/// a code example that we'd like to reformat. In those cases, the actions
/// can be more complicated.
///
/// Actions are pushed on to the end of the queue and popped from the
/// beginning.
action_queue: VecDeque<CodeExampleAddAction<'src>>,
/// The source offset of the beginning of the line that is currently being
/// printed.
offset: TextSize,
/// Indentation alignment based on the least indented line in the
/// docstring.
stripped_indentation: Indentation,
/// Whether the docstring is overall already considered normalized. When it
/// is, the formatter can take a fast path.
already_normalized: bool,
/// The quote character used by the docstring being printed.
quote_char: Quote,
/// The current code example detected in the docstring.
code_example: CodeExample<'src>,
}
impl<'src> DocstringLinePrinter<'_, '_, '_, 'src> {
/// Print all of the lines in the given iterator to this
/// printer's formatter.
///
/// Note that callers may treat the first line specially, such that the
/// iterator given contains all lines except for the first.
fn add_iter(
&mut self,
mut lines: std::iter::Peekable<std::str::Split<'src, char>>,
) -> FormatResult<()> {
while let Some(line) = lines.next() {
let line = InputDocstringLine {
line,
offset: self.offset,
next: lines.peek().copied(),
};
// We know that the normalized string has \n line endings.
self.offset += line.line.text_len() + "\n".text_len();
self.add_one(line)?;
}
self.code_example.finish(&mut self.action_queue);
self.run_action_queue()
}
/// Adds the given line to this printer.
///
/// Depending on what's in the line, this may or may not print the line
/// immediately to the underlying buffer. If the line starts or is part
/// of an existing code snippet, then the lines will get buffered until
/// the code snippet is complete.
fn add_one(&mut self, line: InputDocstringLine<'src>) -> FormatResult<()> {
// Just pass through the line as-is without looking for a code snippet
// when docstring code formatting is disabled. And also when we are
// formatting a code snippet so as to avoid arbitrarily nested code
// snippet formatting. We avoid this because it's likely quite tricky
// to get right 100% of the time, although perhaps not impossible. It's
// not clear that it's worth the effort to support.
if !self.f.options().docstring_code().is_enabled() || self.f.context().docstring().is_some()
{
return self.print_one(&line.as_output());
}
self.code_example.add(line, &mut self.action_queue);
self.run_action_queue()
}
/// Process any actions in this printer's queue until the queue is empty.
fn run_action_queue(&mut self) -> FormatResult<()> {
while let Some(action) = self.action_queue.pop_front() {
match action {
CodeExampleAddAction::Print { original } => {
self.print_one(&original.as_output())?;
}
CodeExampleAddAction::Kept => {}
CodeExampleAddAction::Reset { code } => {
for codeline in code {
self.print_one(&codeline.original.as_output())?;
}
}
CodeExampleAddAction::Format { mut kind } => {
let Some(formatted_lines) = self.format(&mut kind)? else {
// Since we've failed to emit these lines, we need to
// put them back in the queue but have them jump to the
// front of the queue to get processed before any other
// action.
self.action_queue.push_front(CodeExampleAddAction::Reset {
code: kind.into_code(),
});
continue;
};
self.already_normalized = false;
match kind {
CodeExampleKind::Doctest(CodeExampleDoctest { ps1_indent, .. }) => {
let mut lines = formatted_lines.into_iter();
let Some(first) = lines.next() else { continue };
self.print_one(
&first.map(|line| std::format!("{ps1_indent}>>> {line}")),
)?;
for docline in lines {
self.print_one(
&docline.map(|line| std::format!("{ps1_indent}... {line}")),
)?;
}
}
CodeExampleKind::Rst(litblock) => {
let Some(min_indent) = litblock.min_indent else {
continue;
};
// This looks suspicious, but it's consistent with the whitespace
// normalization that will occur anyway.
let indent = " ".repeat(min_indent.columns());
for docline in formatted_lines {
self.print_one(
&docline.map(|line| std::format!("{indent}{line}")),
)?;
}
}
CodeExampleKind::Markdown(fenced) => {
// This looks suspicious, but it's consistent with the whitespace
// normalization that will occur anyway.
let indent = " ".repeat(fenced.opening_fence_indent.columns());
for docline in formatted_lines {
self.print_one(
&docline.map(|line| std::format!("{indent}{line}")),
)?;
}
}
}
}
}
}
Ok(())
}
/// Prints the single line given.
///
/// This mostly just handles indentation and ensuring line breaks are
/// inserted as appropriate before passing it on to the formatter to
/// print to the buffer.
fn print_one(&mut self, line: &OutputDocstringLine<'_>) -> FormatResult<()> {
let trim_end = line.line.trim_end();
if trim_end.is_empty() {
return if line.is_last {
// If the doc string ends with ` """`, the last line is
// ` `, but we don't want to insert an empty line (but close
// the docstring).
Ok(())
} else {
empty_line().fmt(self.f)
};
}
let indent_offset = match self.f.options().indent_style() {
// Normalize all indent to spaces.
IndentStyle::Space => {
let tab_or_non_ascii_space = trim_end
.chars()
.take_while(|c| c.is_whitespace())
.any(|c| c != ' ');
if tab_or_non_ascii_space {
None
} else {
// It's guaranteed that the `indent` is all spaces because `tab_or_non_ascii_space` is
// `false` (indent contains neither tabs nor non-space whitespace).
let stripped_indentation_len = self.stripped_indentation.text_len();
// Take the string with the trailing whitespace removed, then also
// skip the leading whitespace.
Some(stripped_indentation_len)
}
}
IndentStyle::Tab => {
let line_indent = Indentation::from_str(trim_end);
let non_ascii_whitespace = trim_end
.chars()
.take_while(|c| c.is_whitespace())
.any(|c| !matches!(c, ' ' | '\t'));
let trimmed = line_indent.trim_start(self.stripped_indentation);
// Preserve tabs that are used for indentation, but only if the indent isn't
// * a mix of tabs and spaces
// * the `stripped_indentation` is a prefix of the line's indent
// * the trimmed indent isn't spaces followed by tabs because that would result in a
// mixed tab, spaces, tab indentation, resulting in instabilities.
let preserve_indent = !non_ascii_whitespace
&& trimmed.is_some_and(|trimmed| !trimmed.is_spaces_tabs());
preserve_indent.then_some(self.stripped_indentation.text_len())
}
};
if let Some(indent_offset) = indent_offset {
// Take the string with the trailing whitespace removed, then also
// skip the leading whitespace.
if self.already_normalized {
let trimmed_line_range =
TextRange::at(line.offset, trim_end.text_len()).add_start(indent_offset);
source_text_slice(trimmed_line_range).fmt(self.f)?;
} else {
text(&trim_end[indent_offset.to_usize()..]).fmt(self.f)?;
}
} else {
// We strip the indentation that is shared with the docstring
// statement, unless a line was indented less than the docstring
// statement, in which case we strip only this much indentation to
// implicitly pad all lines by the difference, or all lines were
// overindented, in which case we strip the additional whitespace
// (see example in [`format_docstring`] doc comment). We then
// prepend the in-docstring indentation to the string.
let indent_len =
Indentation::from_str(trim_end).columns() - self.stripped_indentation.columns();
let in_docstring_indent = " ".repeat(indent_len) + trim_end.trim_start();
text(&in_docstring_indent).fmt(self.f)?;
}
// We handled the case that the closing quotes are on their own line
// above (the last line is empty except for whitespace). If they are on
// the same line as content, we don't insert a line break.
if !line.is_last {
hard_line_break().fmt(self.f)?;
}
Ok(())
}
/// Given a code example, format them and return
/// the formatted code as a sequence of owned docstring lines.
///
/// This may mutate the code example in place if extracting the lines of
/// code requires adjusting which part of each line is used for the actual
/// code bit.
///
/// This routine generally only returns an error when the recursive call
/// to the formatter itself returns a `FormatError`. In all other cases
/// (for example, if the code snippet is invalid Python or even if the
/// resulting reformatted code snippet is invalid Python), then `Ok(None)`
/// is returned. In this case, callers should assume that a reformatted
/// code snippet is unavailable and bail out of trying to format it.
///
/// Currently, when the above cases happen and `Ok(None)` is returned, the
/// routine is silent about it. So from the user's perspective, this will
/// fail silently. Ideally, this would at least emit a warning message,
/// but at time of writing, it wasn't clear to me how to best do that.
fn format(
&mut self,
kind: &mut CodeExampleKind<'_>,
) -> FormatResult<Option<Vec<OutputDocstringLine<'static>>>> {
let line_width = match self.f.options().docstring_code_line_width() {
DocstringCodeLineWidth::Fixed(width) => width,
DocstringCodeLineWidth::Dynamic => {
let global_line_width = self.f.options().line_width().value();
let indent_width = self.f.options().indent_width();
let indent_level = self.f.context().indent_level();
let mut current_indent = indent_level
.to_ascii_spaces(indent_width)
.saturating_add(kind.extra_indent_ascii_spaces());
// Add the in-docstring indentation
current_indent = current_indent.saturating_add(
u16::try_from(
kind.indent()
.columns()
.saturating_sub(self.stripped_indentation.columns()),
)
.unwrap_or(u16::MAX),
);
let width = std::cmp::max(1, global_line_width.saturating_sub(current_indent));
LineWidth::try_from(width).expect("width should be capped at a minimum of 1")
}
};
let code = kind.code();
let (Some(unformatted_first), Some(unformatted_last)) = (code.first(), code.last()) else {
return Ok(None);
};
let codeblob = code
.iter()
.map(|line| line.code)
.collect::<Vec<&str>>()
.join("\n");
let options = self
.f
.options()
.clone()
.with_line_width(line_width)
// It's perhaps a little odd to be hard-coding the indent
// style here, but I believe it is necessary as a result
// of the whitespace normalization otherwise done in
// docstrings. Namely, tabs are rewritten with ASCII
// spaces. If code examples in docstrings are formatted
// with tabs and those tabs end up getting rewritten, this
// winds up screwing with the indentation in ways that
// results in formatting no longer being idempotent. Since
// tabs will get erased anyway, we just clobber them here
// instead of later, and as a result, get more consistent
// results.
.with_indent_style(IndentStyle::Space)
.with_source_map_generation(SourceMapGeneration::Disabled);
let printed = match docstring_format_source(options, self.quote_char, &codeblob) {
Ok(printed) => printed,
Err(FormatModuleError::FormatError(err)) => return Err(err),
Err(FormatModuleError::ParseError(_) | FormatModuleError::PrintError(_)) => {
return Ok(None);
}
};
// This is a little hokey, but we want to determine whether the
// reformatted code snippet will lead to an overall invalid docstring.
// So attempt to parse it as Python code, but ensure it is wrapped
// within a docstring using the same quotes as the docstring we're in
// right now.
//
// This is an unfortunate stop-gap to attempt to prevent us from
// writing invalid Python due to some oddity of the code snippet within
// a docstring. As we fix corner cases over time, we can perhaps
// remove this check. See the `doctest_invalid_skipped` tests in
// `docstring_code_examples.py` for when this check is relevant.
let wrapped = match self.quote_char {
Quote::Single => std::format!("'''{}'''", printed.as_code()),
Quote::Double => {
std::format!(r#""""{}""""#, printed.as_code())
}
};
let result =
ruff_python_parser::parse(&wrapped, ParseOptions::from(self.f.options().source_type()));
// If the resulting code is not valid, then reset and pass through
// the docstring lines as-is.
if result.is_err() {
return Ok(None);
}
let mut lines = printed
.as_code()
.lines()
.map(|line| OutputDocstringLine {
line: Cow::Owned(line.to_string()),
offset: unformatted_first.original.offset,
is_last: false,
})
.collect::<Vec<_>>();
if let Some(reformatted_last) = lines.last_mut() {
reformatted_last.is_last = unformatted_last.original.is_last();
}
Ok(Some(lines))
}
}
/// Represents a single line in a docstring.
///
/// This type is only used to represent the original lines in a docstring.
/// Specifically, the line contained in this type has no changes from the input
/// source.
#[derive(Clone, Copy, Debug)]
struct InputDocstringLine<'src> {
/// The actual text of the line, not including the line terminator.
///
/// In practice, this line is borrowed when it corresponds to an original
/// unformatted line in a docstring, and owned when it corresponds to a
/// reformatted line (e.g., from a code snippet) in a docstring.
line: &'src str,
/// The offset into the source document which this line corresponds to.
offset: TextSize,
/// For any input line that isn't the last line, this contains a reference
/// to the line immediately following this one.
///
/// This is `None` if and only if this is the last line in the docstring.
next: Option<&'src str>,
}
impl<'src> InputDocstringLine<'src> {
/// Borrow this input docstring line as an output docstring line.
fn as_output(&self) -> OutputDocstringLine<'src> {
OutputDocstringLine {
line: Cow::Borrowed(self.line),
offset: self.offset,
is_last: self.is_last(),
}
}
/// Whether this is the last line in the docstring or not.
fn is_last(&self) -> bool {
self.next.is_none()
}
}
/// Represents a single reformatted code line in a docstring.
///
/// An input source line may be cheaply converted to an output source line.
/// This is the common case: an input source line is printed pretty much as it
/// is, with perhaps some whitespace normalization applied. The less common
/// case is that the output docstring line owns its `line` because it was
/// produced by reformatting a code snippet.
#[derive(Clone, Debug)]
struct OutputDocstringLine<'src> {
/// The output line.
///
/// This is an owned variant in precisely the cases where it corresponds to
/// a line from a reformatted code snippet. In other cases, it is borrowed
/// from the input docstring line as-is.
line: Cow<'src, str>,
/// The offset into the source document which this line corresponds to.
/// Currently, this is an estimate.
offset: TextSize,
/// Whether this is the last line in a docstring or not. This is determined
/// by whether the last line in the code snippet was also the last line in
/// the docstring. If it was, then it follows that the last line in the
/// reformatted code snippet is also the last line in the docstring.
is_last: bool,
}
impl OutputDocstringLine<'_> {
/// Return this reformatted line, but with the given function applied to
/// the text of the line.
fn map(self, mut map: impl FnMut(&str) -> String) -> OutputDocstringLine<'static> {
OutputDocstringLine {
line: Cow::Owned(map(&self.line)),
..self
}
}
}
/// A single code example extracted from a docstring.
///
/// This represents an intermediate state from when the code example was first
/// found all the way up until the point at which the code example has finished
/// and is reformatted.
///
/// Its default state is "empty." That is, that no code example is currently
/// being collected.
#[derive(Debug, Default)]
struct CodeExample<'src> {
/// The kind of code example being collected, or `None` if no code example
/// has been observed.
///
/// The kind is split out into a separate type so that we can pass it
/// around and have a guarantee that a code example actually exists.
kind: Option<CodeExampleKind<'src>>,
}
impl<'src> CodeExample<'src> {
/// Attempt to add an original line from a docstring to this code example.
///
/// Based on the line and the internal state of whether a code example is
/// currently being collected or not, this will push an "action" to the
/// given queue for the caller to perform. The typical case is a "print"
/// action, which instructs the caller to just print the line as though it
/// were not part of a code snippet.
fn add(
&mut self,
original: InputDocstringLine<'src>,
queue: &mut VecDeque<CodeExampleAddAction<'src>>,
) {
match self.kind.take() {
// There's no existing code example being built, so we look for
// the start of one or otherwise tell the caller we couldn't find
// anything.
None => {
self.add_start(original, queue);
}
Some(CodeExampleKind::Doctest(doctest)) => {
let Some(doctest) = doctest.add_code_line(original, queue) else {
self.add_start(original, queue);
return;
};
self.kind = Some(CodeExampleKind::Doctest(doctest));
}
Some(CodeExampleKind::Rst(litblock)) => {
let Some(litblock) = litblock.add_code_line(original, queue) else {
self.add_start(original, queue);
return;
};
self.kind = Some(CodeExampleKind::Rst(litblock));
}
Some(CodeExampleKind::Markdown(fenced)) => {
let Some(fenced) = fenced.add_code_line(original, queue) else {
// For Markdown, the last line in a block should be printed
// as-is. Especially since the last line in many Markdown
// fenced code blocks is identical to the start of a code
// block. So if we try to start a new code block with
// the last line, we risk opening another Markdown block
// inappropriately.
return;
};
self.kind = Some(CodeExampleKind::Markdown(fenced));
}
}
}
/// Finish the code example by generating any final actions if applicable.
///
/// This typically adds an action when the end of a code example coincides
/// with the end of the docstring.
fn finish(&mut self, queue: &mut VecDeque<CodeExampleAddAction<'src>>) {
let Some(kind) = self.kind.take() else { return };
queue.push_back(CodeExampleAddAction::Format { kind });
}
/// Looks for the start of a code example. If one was found, then the given
/// line is kept and added as part of the code example. Otherwise, the line
/// is pushed onto the queue unchanged to be printed as-is.
///
/// # Panics
///
/// This panics when the existing code-example is any non-None value. That
/// is, this routine assumes that there is no ongoing code example being
/// collected and looks for the beginning of another code example.
fn add_start(
&mut self,
original: InputDocstringLine<'src>,
queue: &mut VecDeque<CodeExampleAddAction<'src>>,
) {
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | true |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_python_formatter/src/string/implicit.rs | crates/ruff_python_formatter/src/string/implicit.rs | use itertools::Itertools;
use ruff_formatter::{FormatContext, format_args, write};
use ruff_python_ast::str::{Quote, TripleQuotes};
use ruff_python_ast::str_prefix::{
AnyStringPrefix, ByteStringPrefix, FStringPrefix, StringLiteralPrefix, TStringPrefix,
};
use ruff_python_ast::{
AnyStringFlags, FString, InterpolatedStringElement, StringFlags, StringLike, StringLikePart,
TString,
};
use ruff_source_file::LineRanges;
use ruff_text_size::{Ranged, TextRange};
use std::borrow::Cow;
use crate::comments::{leading_comments, trailing_comments};
use crate::expression::parentheses::in_parentheses_only_soft_line_break_or_space;
use crate::other::interpolated_string::{InterpolatedStringContext, InterpolatedStringLayout};
use crate::other::interpolated_string_element::FormatInterpolatedElement;
use crate::prelude::*;
use crate::string::docstring::needs_chaperone_space;
use crate::string::normalize::{
QuoteMetadata, is_fstring_with_quoted_debug_expression,
is_fstring_with_triple_quoted_literal_expression_containing_quotes,
is_interpolated_string_with_quoted_format_spec_and_debug,
};
use crate::string::{StringLikeExtensions, StringNormalizer, StringQuotes, normalize_string};
/// Formats any implicitly concatenated string. This could be any valid combination
/// of string, bytes, f-string, or t-string literals.
pub(crate) struct FormatImplicitConcatenatedString<'a> {
string: StringLike<'a>,
}
impl<'a> FormatImplicitConcatenatedString<'a> {
pub(crate) fn new(string: impl Into<StringLike<'a>>) -> Self {
Self {
string: string.into(),
}
}
}
impl Format<PyFormatContext<'_>> for FormatImplicitConcatenatedString<'_> {
fn fmt(&self, f: &mut PyFormatter) -> FormatResult<()> {
let flat = FormatImplicitConcatenatedStringFlat::new(self.string, f.context());
let expanded = FormatImplicitConcatenatedStringExpanded::new(
self.string,
if flat.is_some() {
ImplicitConcatenatedLayout::MaybeFlat
} else {
ImplicitConcatenatedLayout::Multipart
},
);
// If the string can be joined, try joining the implicit concatenated string into a single string
// if it fits on the line. Otherwise, parenthesize the string parts and format each part on its
// own line.
if let Some(flat) = flat {
write!(
f,
[if_group_fits_on_line(&flat), if_group_breaks(&expanded)]
)
} else {
expanded.fmt(f)
}
}
}
/// Formats an implicit concatenated string where parts are separated by a space or line break.
pub(crate) struct FormatImplicitConcatenatedStringExpanded<'a> {
string: StringLike<'a>,
layout: ImplicitConcatenatedLayout,
}
impl<'a> FormatImplicitConcatenatedStringExpanded<'a> {
pub(crate) fn new(string: StringLike<'a>, layout: ImplicitConcatenatedLayout) -> Self {
assert!(string.is_implicit_concatenated());
Self { string, layout }
}
}
impl Format<PyFormatContext<'_>> for FormatImplicitConcatenatedStringExpanded<'_> {
fn fmt(&self, f: &mut Formatter<PyFormatContext<'_>>) -> FormatResult<()> {
let comments = f.context().comments().clone();
// Keep implicit concatenated strings expanded unless they're already written on a single line.
if matches!(self.layout, ImplicitConcatenatedLayout::Multipart)
&& self.string.parts().tuple_windows().any(|(a, b)| {
f.context()
.source()
.contains_line_break(TextRange::new(a.end(), b.start()))
})
{
expand_parent().fmt(f)?;
}
let mut joiner = f.join_with(in_parentheses_only_soft_line_break_or_space());
for part in self.string.parts() {
let format_part = format_with(|f: &mut PyFormatter| match part {
StringLikePart::String(part) => part.format().fmt(f),
StringLikePart::Bytes(bytes_literal) => bytes_literal.format().fmt(f),
StringLikePart::FString(part) => part.format().fmt(f),
StringLikePart::TString(part) => part.format().fmt(f),
});
let part_comments = comments.leading_dangling_trailing(part);
joiner.entry(&format_args![
leading_comments(part_comments.leading),
format_part,
trailing_comments(part_comments.trailing)
]);
}
joiner.finish()
}
}
#[derive(Copy, Clone, Debug)]
pub(crate) enum ImplicitConcatenatedLayout {
/// The string might get joined into a single string if it fits on a single line.
MaybeFlat,
/// The string will remain a multipart string.
Multipart,
}
/// Formats an implicit concatenated string where parts are joined into a single string if possible.
pub(crate) struct FormatImplicitConcatenatedStringFlat<'a> {
string: StringLike<'a>,
flags: AnyStringFlags,
docstring: bool,
}
impl<'a> FormatImplicitConcatenatedStringFlat<'a> {
/// Creates a new formatter. Returns `None` if the string can't be merged into a single string.
pub(crate) fn new(string: StringLike<'a>, context: &PyFormatContext) -> Option<Self> {
fn merge_flags(string: StringLike, context: &PyFormatContext) -> Option<AnyStringFlags> {
// Multiline strings can never fit on a single line.
if string.is_multiline(context) {
return None;
}
let first_part = string.parts().next()?;
// The string is either a regular string, f-string, t-string, or bytes string.
let normalizer = StringNormalizer::from_context(context);
// Some if a part requires preserving its quotes.
let mut preserve_quotes_requirement: Option<Quote> = None;
// Early exit if it's known that this string can't be joined
for part in string.parts() {
// Similar to Black, don't collapse triple quoted and raw strings.
// We could technically join strings that are raw-strings and use the same quotes but lets not do this for now.
// Joining triple quoted strings is more complicated because an
// implicit concatenated string could become a docstring (if it's the first string in a block).
// That means the joined string formatting would have to call into
// the docstring formatting or otherwise guarantee that the output
// won't change on a second run.
if part.flags().is_triple_quoted() || part.flags().is_raw_string() {
return None;
}
// For now, preserve comments documenting a specific part over possibly
// collapsing onto a single line. Collapsing could result in pragma comments
// now covering more code.
if context.comments().leading_trailing(&part).next().is_some() {
return None;
}
match part {
StringLikePart::FString(fstring) => {
if matches!(string, StringLike::TString(_)) {
// Don't concatenate t-strings and f-strings
return None;
}
if context.options().target_version().supports_pep_701() {
if is_interpolated_string_with_quoted_format_spec_and_debug(
&fstring.elements,
fstring.flags.into(),
context,
) {
if preserve_quotes_requirement
.is_some_and(|quote| quote != part.flags().quote_style())
{
return None;
}
preserve_quotes_requirement = Some(part.flags().quote_style());
}
}
// Avoid invalid syntax for pre Python 312:
// * When joining parts that have debug expressions with quotes: `f"{10 + len('bar')=}" f'{10 + len("bar")=}'
// * When joining parts that contain triple quoted strings with quotes: `f"{'''test ' '''}" f'{"""other " """}'`
else if is_fstring_with_quoted_debug_expression(fstring, context)
|| is_fstring_with_triple_quoted_literal_expression_containing_quotes(
fstring, context,
)
{
if preserve_quotes_requirement
.is_some_and(|quote| quote != part.flags().quote_style())
{
return None;
}
preserve_quotes_requirement = Some(part.flags().quote_style());
}
}
StringLikePart::TString(tstring) => {
if is_interpolated_string_with_quoted_format_spec_and_debug(
&tstring.elements,
tstring.flags.into(),
context,
) {
if preserve_quotes_requirement
.is_some_and(|quote| quote != part.flags().quote_style())
{
return None;
}
preserve_quotes_requirement = Some(part.flags().quote_style());
}
}
StringLikePart::Bytes(_) | StringLikePart::String(_) => {}
}
}
// The string is either a regular string, f-string, or bytes string.
let mut merged_quotes: Option<QuoteMetadata> = None;
// Only preserve the string type but disregard the `u` and `r` prefixes.
// * It's not necessary to preserve the `r` prefix because Ruff doesn't support joining raw strings (we shouldn't get here).
// * It's not necessary to preserve the `u` prefix because Ruff discards the `u` prefix (it's meaningless in Python 3+)
let prefix = match string {
StringLike::String(_) => AnyStringPrefix::Regular(StringLiteralPrefix::Empty),
StringLike::Bytes(_) => AnyStringPrefix::Bytes(ByteStringPrefix::Regular),
StringLike::FString(_) => AnyStringPrefix::Format(FStringPrefix::Regular),
StringLike::TString(_) => AnyStringPrefix::Template(TStringPrefix::Regular),
};
let quote = if let Some(quote) = preserve_quotes_requirement {
quote
} else {
// Only determining the preferred quote for the first string is sufficient
// because we don't support joining triple quoted strings with non triple quoted strings.
if let Ok(preferred_quote) =
Quote::try_from(normalizer.preferred_quote_style(first_part))
{
for part in string.parts() {
let part_quote_metadata =
QuoteMetadata::from_part(part, context, preferred_quote);
if let Some(merged) = merged_quotes.as_mut() {
*merged = part_quote_metadata.merge(merged)?;
} else {
merged_quotes = Some(part_quote_metadata);
}
}
merged_quotes?.choose(preferred_quote)
} else {
// Use the quotes of the first part if the quotes should be preserved.
first_part.flags().quote_style()
}
};
Some(AnyStringFlags::new(prefix, quote, TripleQuotes::No))
}
if !string.is_implicit_concatenated() {
return None;
}
Some(Self {
flags: merge_flags(string, context)?,
string,
docstring: false,
})
}
pub(crate) fn set_docstring(&mut self, is_docstring: bool) {
self.docstring = is_docstring;
}
pub(crate) fn string(&self) -> StringLike<'a> {
self.string
}
}
impl Format<PyFormatContext<'_>> for FormatImplicitConcatenatedStringFlat<'_> {
fn fmt(&self, f: &mut Formatter<PyFormatContext<'_>>) -> FormatResult<()> {
// Merges all string parts into a single string.
let quotes = StringQuotes::from(self.flags);
write!(f, [self.flags.prefix(), quotes])?;
let mut parts = self.string.parts().peekable();
// Trim implicit concatenated strings in docstring positions.
// Skip over any trailing parts that are all whitespace.
// Leading parts are handled as part of the formatting loop below.
if self.docstring {
for part in self.string.parts().rev() {
assert!(part.is_string_literal());
if f.context().source()[part.content_range()].trim().is_empty() {
// Don't format the part.
parts.next_back();
} else {
break;
}
}
}
let mut first_non_empty = self.docstring;
while let Some(part) = parts.next() {
match part {
StringLikePart::String(_) | StringLikePart::Bytes(_) => {
FormatLiteralContent {
range: part.content_range(),
flags: self.flags,
is_interpolated_string: false,
trim_start: first_non_empty && self.docstring,
trim_end: self.docstring && parts.peek().is_none(),
}
.fmt(f)?;
if first_non_empty {
first_non_empty = f.context().source()[part.content_range()]
.trim_start()
.is_empty();
}
}
StringLikePart::FString(FString { elements, .. })
| StringLikePart::TString(TString { elements, .. }) => {
for element in elements {
match element {
InterpolatedStringElement::Literal(literal) => {
FormatLiteralContent {
range: literal.range(),
flags: self.flags,
is_interpolated_string: true,
trim_end: false,
trim_start: false,
}
.fmt(f)?;
}
// Formatting the expression here and in the expanded version is safe **only**
// because we assert that the f/t-string never contains any comments.
InterpolatedStringElement::Interpolation(expression) => {
let context = InterpolatedStringContext::new(
self.flags,
InterpolatedStringLayout::from_interpolated_string_elements(
elements,
f.context().source(),
),
);
FormatInterpolatedElement::new(expression, context).fmt(f)?;
}
}
}
}
}
}
quotes.fmt(f)
}
}
struct FormatLiteralContent {
range: TextRange,
flags: AnyStringFlags,
is_interpolated_string: bool,
trim_start: bool,
trim_end: bool,
}
impl Format<PyFormatContext<'_>> for FormatLiteralContent {
fn fmt(&self, f: &mut PyFormatter) -> FormatResult<()> {
let content = &f.context().source()[self.range];
let mut normalized = normalize_string(
content,
0,
self.flags,
self.flags.is_interpolated_string() && !self.is_interpolated_string,
);
// Trim the start and end of the string if it's the first or last part of a docstring.
// This is rare, so don't bother with optimizing to use `Cow`.
if self.trim_start {
let trimmed = normalized.trim_start();
if trimmed.len() < normalized.len() {
normalized = trimmed.to_string().into();
}
}
if self.trim_end {
let trimmed = normalized.trim_end();
if trimmed.len() < normalized.len() {
normalized = trimmed.to_string().into();
}
}
if !normalized.is_empty() {
match &normalized {
Cow::Borrowed(_) => source_text_slice(self.range).fmt(f)?,
Cow::Owned(normalized) => text(normalized).fmt(f)?,
}
if self.trim_end && needs_chaperone_space(self.flags, &normalized, f.context()) {
space().fmt(f)?;
}
}
Ok(())
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_python_formatter/src/string/mod.rs | crates/ruff_python_formatter/src/string/mod.rs | use memchr::memchr2;
pub(crate) use normalize::{NormalizedString, StringNormalizer, normalize_string};
use ruff_python_ast::StringLikePart;
use ruff_python_ast::str::{Quote, TripleQuotes};
use ruff_python_ast::{
self as ast, AnyStringFlags, StringFlags,
str_prefix::{AnyStringPrefix, StringLiteralPrefix},
};
use ruff_source_file::LineRanges;
use ruff_text_size::Ranged;
use crate::QuoteStyle;
use crate::prelude::*;
pub(crate) mod docstring;
pub(crate) mod implicit;
mod normalize;
impl Format<PyFormatContext<'_>> for AnyStringPrefix {
fn fmt(&self, f: &mut PyFormatter) -> FormatResult<()> {
// Remove the unicode prefix `u` if any because it is meaningless in Python 3+.
if !matches!(
self,
AnyStringPrefix::Regular(StringLiteralPrefix::Empty | StringLiteralPrefix::Unicode)
) {
token(self.as_str()).fmt(f)?;
}
Ok(())
}
}
#[derive(Copy, Clone, Debug)]
pub(crate) struct StringQuotes {
triple: bool,
quote_char: Quote,
}
impl Format<PyFormatContext<'_>> for StringQuotes {
fn fmt(&self, f: &mut PyFormatter) -> FormatResult<()> {
let quotes = match (self.quote_char, self.triple) {
(Quote::Single, false) => "'",
(Quote::Single, true) => "'''",
(Quote::Double, false) => "\"",
(Quote::Double, true) => "\"\"\"",
};
token(quotes).fmt(f)
}
}
impl From<AnyStringFlags> for StringQuotes {
fn from(value: AnyStringFlags) -> Self {
Self {
triple: value.is_triple_quoted(),
quote_char: value.quote_style(),
}
}
}
impl TryFrom<QuoteStyle> for Quote {
type Error = ();
fn try_from(style: QuoteStyle) -> Result<Quote, ()> {
match style {
QuoteStyle::Single => Ok(Quote::Single),
QuoteStyle::Double => Ok(Quote::Double),
QuoteStyle::Preserve => Err(()),
}
}
}
impl From<Quote> for QuoteStyle {
fn from(value: Quote) -> Self {
match value {
Quote::Single => QuoteStyle::Single,
Quote::Double => QuoteStyle::Double,
}
}
}
// Extension trait that adds formatter specific helper methods to `StringLike`.
pub(crate) trait StringLikeExtensions {
fn is_multiline(&self, context: &PyFormatContext) -> bool;
}
impl StringLikeExtensions for ast::StringLike<'_> {
fn is_multiline(&self, context: &PyFormatContext) -> bool {
// Helper for f-string and t-string parts
fn contains_line_break_or_comments(
elements: &ast::InterpolatedStringElements,
context: &PyFormatContext,
triple_quotes: TripleQuotes,
) -> bool {
elements.iter().any(|element| match element {
ast::InterpolatedStringElement::Literal(literal) => {
triple_quotes.is_yes() && context.source().contains_line_break(literal.range())
}
ast::InterpolatedStringElement::Interpolation(expression) => {
// Expressions containing comments can't be joined.
//
// Format specifiers needs to be checked as well. For example, the
// following should be considered multiline because the literal
// part of the format specifier contains a newline at the end
// (`.3f\n`):
//
// ```py
// x = f"hello {a + b + c + d:.3f
// } world"
// ```
context.comments().contains_comments(expression.into())
|| expression.format_spec.as_deref().is_some_and(|spec| {
contains_line_break_or_comments(&spec.elements, context, triple_quotes)
})
|| expression.debug_text.as_ref().is_some_and(|debug_text| {
memchr2(b'\n', b'\r', debug_text.leading.as_bytes()).is_some()
|| memchr2(b'\n', b'\r', debug_text.trailing.as_bytes()).is_some()
})
}
})
}
self.parts().any(|part| match part {
StringLikePart::String(_) | StringLikePart::Bytes(_) => {
part.flags().is_triple_quoted()
&& context.source().contains_line_break(part.range())
}
StringLikePart::FString(f_string) => contains_line_break_or_comments(
&f_string.elements,
context,
f_string.flags.triple_quotes(),
),
StringLikePart::TString(t_string) => contains_line_break_or_comments(
&t_string.elements,
context,
t_string.flags.triple_quotes(),
),
})
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.