Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
50 changes: 50 additions & 0 deletions crates/ruff_linter/src/rules/flake8_type_checking/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -517,6 +517,56 @@ mod tests {
",
"tc010_precedence_over_tc008"
)]
#[test_case(
r"
from __future__ import annotations

import importlib.abc
from typing import TYPE_CHECKING

if TYPE_CHECKING:
import importlib.machinery

class Foo(importlib.abc.MetaPathFinder):
def bar(self) -> importlib.machinery.ModuleSpec: ...
",
"github_issue_15723_regression_test"
)]
// FIXME: Currently whether or not we emit a TC004 for importlib.abc
// depends on the order of the two overlapping imports, however
// fixing this is not trivial, since we would need to group the
// overlapping bindings together, combine their references and
// for each reference check which binding it should belong to
Comment on lines +535 to +539
Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Actually it's still quite a bit more complex than that, since we can have partial overlaps and complete overlaps of bindings, so whether or not we can look at the references of a shadowing import depends on how it overlaps with our import and whether or not those overlapping imports are in a runtime context or not.

We also don't properly take into account the fact that the parent module will be fully imported by a submodule import. So for the parent module we would need to check if there are any bindings in runtime context at all in order to avoid false negatives, e.g. for a runtime use of os.sep with a typing only import of os.path. os.path should be flagged if there isn't also a runtime os import.

So for something like

import a
from typing import TYPE_CHECKING
if TYPE_CHECKING:
    import a.b
    import a.b.d

x = a.b  # leads to TC004 for `import a.b`

import a.b.c

y = a.b  # fine
z = a.b.d  # leads to TC004 for `import a.b.d`

For reference x we would need to detect that the shadowed binding a for import a doesn't include a.b, but for reference y we would need to detect that import a.b.c fully covers the shadowed import a.b, so we don't have to worry about it. But for z import a.b.c doesn't fully cover import a.b.d, so we do need to look at the references to import a.b.d for checking whether or not import a.b.d has any runtime references. So the combinatorics quickly get out of hand here.

The approach chosen in this PR still seems like a reasonable compromise however, since it will catch some things with submodule imports, while incorrectly flagging fewer imports that shouldn't be flagged. I.e. we raise the false negative rate slightly, but we get rid of the false positives we're most likely to encounter in the wild.

Anything more accurate would probably need multi-file analysis and some degree of type inference, so we actually know which attributes are currently available at runtime in the current scope and through which binding.

Another approach we could take is to just never emit TC004 if we're shadowing another binding. This would completely get rid of any false positives. But I feel the approach this PR has taken is more balanced.

#[test_case(
r"
from __future__ import annotations

from typing import TYPE_CHECKING

if TYPE_CHECKING:
import importlib.abc
import importlib.machinery

class Foo(importlib.abc.MetaPathFinder):
def bar(self) -> importlib.machinery.ModuleSpec: ...
",
"github_issue_15723_false_negative"
)]
#[test_case(
r"
from __future__ import annotations

from typing import TYPE_CHECKING

if TYPE_CHECKING:
import importlib.machinery
import importlib.abc

class Foo(importlib.abc.MetaPathFinder):
def bar(self) -> importlib.machinery.ModuleSpec: ...
",
"github_issue_15723_ideal_import_order"
)]
fn contents(contents: &str, snapshot: &str) {
let diagnostics = test_snippet(
contents,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@ use rustc_hash::FxHashMap;

use ruff_diagnostics::{Diagnostic, Fix, FixAvailability, Violation};
use ruff_macros::{derive_message_formats, ViolationMetadata};
use ruff_python_ast::{self as ast, Expr};
use ruff_python_semantic::{Imported, NodeId, Scope, ScopeId};
use ruff_text_size::Ranged;

Expand Down Expand Up @@ -110,7 +111,7 @@ pub(crate) fn runtime_import_in_type_checking_block(checker: &Checker, scope: &S
.lookup_symbol_in_scope("__getattr__", ScopeId::global(), false)
.is_some();

for binding_id in scope.binding_ids() {
for (_, binding_id) in scope.all_bindings() {
let binding = checker.semantic().binding(binding_id);

let Some(import) = binding.as_any_import() else {
Expand All @@ -124,9 +125,45 @@ pub(crate) fn runtime_import_in_type_checking_block(checker: &Checker, scope: &S
if binding.context.is_typing()
&& binding.references().any(|reference_id| {
let reference = checker.semantic().reference(reference_id);
if reference.in_typing_context() {
return false;
}

if ignore_dunder_all_references && reference.in_dunder_all_definition() {
return false;
}

// for submodule imports we need to check if the reference
// actually refers to this submodule, or a different one
let Some(submodule_import) = import.as_submodule_import() else {
return true;
};

reference.in_runtime_context()
&& !(ignore_dunder_all_references && reference.in_dunder_all_definition())
let Some(expression_id) = reference.expression_id() else {
return false;
};
// references should generally point towards an `Expr::Name` node
// so by walking the parent expressions in tandem with the segments
// of the qualified name should give us a `starts_with` check for
// the reference towards the import
for (segment, expr) in std::iter::zip(
submodule_import.qualified_name().segments(),
checker.semantic().expressions(expression_id),
)
// we discard the first pair, since it's guaranteed to match
// this also simplifies the loop logic, since we only have to
// handle `Expr::Attribute` nodes
.skip(1)
{
let Expr::Attribute(ast::ExprAttribute { attr, .. }) = expr else {
// we're past the attribute nodes, so we can stop
break;
};
if *segment != attr.as_str() {
return false;
}
}
true
})
{
let Some(node_id) = binding.source else {
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,4 @@
---
source: crates/ruff_linter/src/rules/flake8_type_checking/mod.rs
---

Original file line number Diff line number Diff line change
@@ -0,0 +1,26 @@
---
source: crates/ruff_linter/src/rules/flake8_type_checking/mod.rs
---
<filename>:8:12: TC004 [*] Move import `importlib.abc` out of type-checking block. Import is used for more than type hinting.
|
6 | if TYPE_CHECKING:
7 | import importlib.machinery
8 | import importlib.abc
| ^^^^^^^^^^^^^ TC004
9 |
10 | class Foo(importlib.abc.MetaPathFinder):
|
= help: Move out of type-checking block

ℹ Unsafe fix
2 2 | from __future__ import annotations
3 3 |
4 4 | from typing import TYPE_CHECKING
5 |+import importlib.abc
5 6 |
6 7 | if TYPE_CHECKING:
7 8 | import importlib.machinery
8 |- import importlib.abc
9 9 |
10 10 | class Foo(importlib.abc.MetaPathFinder):
11 11 | def bar(self) -> importlib.machinery.ModuleSpec: ...
Original file line number Diff line number Diff line change
@@ -0,0 +1,4 @@
---
source: crates/ruff_linter/src/rules/flake8_type_checking/mod.rs
---

Loading