mirror of
https://github.com/saymrwulf/pytorch.git
synced 2026-05-14 20:57:59 +00:00
Enable ruff's unused variable checking everywhere in pytorch (#136965)
Pull Request resolved: https://github.com/pytorch/pytorch/pull/136965 Approved by: https://github.com/cyyever, https://github.com/albanD
This commit is contained in:
parent
2293fe1024
commit
f1cbf4b1b5
24 changed files with 23 additions and 47 deletions
|
|
@ -221,7 +221,7 @@ def run_single_backend_sdpa(
|
|||
) -> ExperimentResults:
|
||||
backend_context = get_backend_context(backend)
|
||||
with backend_context:
|
||||
device = torch.device("cuda")
|
||||
_device = torch.device("cuda")
|
||||
eager_sdpa = generate_eager_sdpa(
|
||||
config.attn_type, config.shape, config.dtype, block_mask, score_mod
|
||||
)
|
||||
|
|
@ -372,8 +372,6 @@ def run_single_experiment(
|
|||
requires_grad=config.calculate_bwd_time,
|
||||
nested_tensors=config.attn_type == "document_mask",
|
||||
)
|
||||
is_decoding = q_seq_len == 1
|
||||
|
||||
score_mod = generate_score_mod(config.attn_type, config.shape)
|
||||
block_mask, mask_kwargs = generate_block_mask(config.attn_type, config.shape)
|
||||
kernel_options = get_kernel_options(config.attn_type, config.shape)
|
||||
|
|
|
|||
|
|
@ -64,7 +64,6 @@ ignore = [
|
|||
"E741",
|
||||
"EXE001",
|
||||
"F405",
|
||||
"F841",
|
||||
# these ignores are from flake8-logging-format; please fix!
|
||||
"G101",
|
||||
# these ignores are from ruff NPY; please fix!
|
||||
|
|
|
|||
|
|
@ -2696,7 +2696,7 @@ class CPythonContextManagerTestCase(torch._dynamo.test_case.TestCase):
|
|||
|
||||
@torch.compile(backend="eager", fullgraph=True)
|
||||
def fn(t):
|
||||
y = t.sum()
|
||||
_y = t.sum()
|
||||
with self.assertRaises(ZeroDivisionError):
|
||||
with woohoo() as x:
|
||||
self.assertEqual(state, [1])
|
||||
|
|
@ -2718,7 +2718,7 @@ class CPythonContextManagerTestCase(torch._dynamo.test_case.TestCase):
|
|||
@torch.compile(backend="eager", fullgraph=True)
|
||||
def fn(t):
|
||||
nonlocal frames
|
||||
y = t.sum()
|
||||
_y = t.sum()
|
||||
try:
|
||||
with f():
|
||||
1 / 0
|
||||
|
|
@ -2745,7 +2745,7 @@ class CPythonContextManagerTestCase(torch._dynamo.test_case.TestCase):
|
|||
@torch.compile(backend="eager", fullgraph=True)
|
||||
def fn(t):
|
||||
nonlocal frames
|
||||
y = t.sum()
|
||||
_y = t.sum()
|
||||
try:
|
||||
with f():
|
||||
raise RuntimeErrorSubclass(42)
|
||||
|
|
@ -2777,7 +2777,7 @@ class CPythonContextManagerTestCase(torch._dynamo.test_case.TestCase):
|
|||
@torch.compile(backend="eager", fullgraph=True)
|
||||
def fn(t):
|
||||
nonlocal frames
|
||||
y = t.sum()
|
||||
_y = t.sum()
|
||||
try:
|
||||
with f():
|
||||
raise stop_exc
|
||||
|
|
|
|||
|
|
@ -612,7 +612,7 @@ class GraphModule(torch.nn.Module):
|
|||
|
||||
r1 = run(ones, train=False)
|
||||
r1.sum().backward()
|
||||
g1 = weight.grad.clone()
|
||||
weight.grad.clone()
|
||||
|
||||
def test_dynamic(self):
|
||||
@mark_compile_region
|
||||
|
|
|
|||
|
|
@ -376,7 +376,6 @@ class TestUnbackedSymints(InductorTestCase):
|
|||
sub = CustomSliceSubclass(t, slice_bounds=torch.tensor([2, 5], device=t.device))
|
||||
start = 2
|
||||
length = 3
|
||||
ragged_idx = 1
|
||||
example_inputs = (sub, start, length)
|
||||
|
||||
actual = torch.compile(fn, dynamic=dynamic, fullgraph=True)(*example_inputs)
|
||||
|
|
|
|||
|
|
@ -952,7 +952,7 @@ class TestTorchDeviceType(TestCase):
|
|||
def test_dtypetensor_warnings(self, device):
|
||||
msg = 'The torch.cuda.*DtypeTensor constructors are no longer recommended'
|
||||
with self.assertWarnsOnceRegex(UserWarning, msg):
|
||||
t = torch.cuda.FloatTensor([0])
|
||||
torch.cuda.FloatTensor([0])
|
||||
|
||||
with self.assertWarnsOnceRegex(UserWarning, msg):
|
||||
torch.cuda.DoubleTensor([0])
|
||||
|
|
|
|||
|
|
@ -326,7 +326,7 @@ class TestTransformers(NNTestCase):
|
|||
e = None
|
||||
try:
|
||||
encoder(test, src_key_padding_mask=pad_mask.to(torch.uint8))
|
||||
except AssertionError as e:
|
||||
except AssertionError:
|
||||
continue
|
||||
self.assertFalse(e, "Failed to catch unsupported uint8 type exception") # noqa: F821
|
||||
|
||||
|
|
|
|||
|
|
@ -84,7 +84,7 @@ class DocstringLinter(_linter.FileLinter):
|
|||
|
||||
def next_token(start: int, token_type: int, error: str) -> int: # type: ignore[return]
|
||||
for i in range(start, len(tokens)):
|
||||
if (t := tokens[i]).type == token_type:
|
||||
if tokens[i].type == token_type:
|
||||
return i
|
||||
_linter.ParseError.check(False, tokens[-1], error)
|
||||
|
||||
|
|
@ -137,12 +137,12 @@ class DocstringLinter(_linter.FileLinter):
|
|||
print()
|
||||
top = sorted(v, reverse=True)[:REPORT_TOP_RESULTS]
|
||||
if len(top) == 1:
|
||||
s = "s"
|
||||
t = ""
|
||||
else:
|
||||
s = ""
|
||||
t = f"{len(top)} "
|
||||
print(f"Top {t}undocumented {k}s:")
|
||||
else:
|
||||
s = "es" if k.endswith("s") else "s"
|
||||
t = ""
|
||||
print(f"Top {t}undocumented {k}{s}:")
|
||||
for lines, path, tname in top:
|
||||
print(f" {lines} lines: {path}:{tname}")
|
||||
|
||||
|
|
|
|||
|
|
@ -46,11 +46,11 @@ tools/test/docstring_linter_testdata/python_code.py.txt:84: No docstring found f
|
|||
85 | def short1(self):
|
||||
86 | pass
|
||||
|
||||
Top 3 undocumented classs:
|
||||
Top undocumented classes:
|
||||
12 lines: tools/test/docstring_linter_testdata/python_code.py.txt:NotDocstring
|
||||
6 lines: tools/test/docstring_linter_testdata/python_code.py.txt:LongWithShortDocstring
|
||||
6 lines: tools/test/docstring_linter_testdata/python_code.py.txt:Long
|
||||
|
||||
Top 2 undocumented functions:
|
||||
Top undocumented functions:
|
||||
12 lines: tools/test/docstring_linter_testdata/python_code.py.txt:needs_docs
|
||||
11 lines: tools/test/docstring_linter_testdata/python_code.py.txt:not_short
|
||||
|
|
|
|||
|
|
@ -3229,7 +3229,6 @@ class InliningInstructionTranslator(InstructionTranslatorBase):
|
|||
def inline_call_(self):
|
||||
parent = self.parent
|
||||
code = self.f_code
|
||||
func = self.funcvar
|
||||
|
||||
strict_ctx: Any = contextlib.nullcontext()
|
||||
if parent.strict_checks_fn:
|
||||
|
|
|
|||
|
|
@ -90,9 +90,7 @@ class CUDATemplate(KernelTemplate):
|
|||
call_args,
|
||||
expected_args,
|
||||
)
|
||||
extra_args = V.graph.sizevars.size_hints(
|
||||
map(sympy.expand, call_args[len(expected_args) :])
|
||||
)
|
||||
V.graph.sizevars.size_hints(map(sympy.expand, call_args[len(expected_args) :]))
|
||||
size_args = V.graph.sizevars.size_hints(kernel.get_layout_args())
|
||||
|
||||
kernel_hash_name = f"cuda_{self.name}_{next(self.index_counter)}"
|
||||
|
|
|
|||
|
|
@ -1270,7 +1270,8 @@ class PythonWrapperCodegen(CodeGen):
|
|||
|
||||
# conservatively use the sum of all allocated buffer sizes
|
||||
# in potentially nested scopes as the total allocated size
|
||||
total_allocated_buffer_size = sum(
|
||||
# FIXME(rec): not used
|
||||
_total_allocated_buffer_size = sum(
|
||||
s.total_allocated_buffer_size for s in past_planning_states
|
||||
)
|
||||
|
||||
|
|
|
|||
|
|
@ -189,7 +189,7 @@ class SubprocPool:
|
|||
data = b""
|
||||
try:
|
||||
job_id, data = _recv_msg(self.read_pipe)
|
||||
except Exception as e:
|
||||
except Exception:
|
||||
# Something went wrong during the read. There's no way we have a
|
||||
# valid job_id.
|
||||
log.exception("failure in subproc_pool._recv_msg")
|
||||
|
|
|
|||
|
|
@ -438,7 +438,6 @@ def scaled_mm_options_device_tma( # type: ignore[no-untyped-def]
|
|||
"Expect scale_a and scale_b to be either both scalars (including single-element tensors) "
|
||||
f"or 1-dimensional tensors with the same size. Got scale_a: {len(size_a)} and scale_b: {len(size_b)}."
|
||||
)
|
||||
NUM_SMS = torch.cuda.get_device_properties("cuda").multi_processor_count
|
||||
return dict(
|
||||
GROUP_M=8,
|
||||
EVEN_K=even_k_symbolic,
|
||||
|
|
|
|||
|
|
@ -695,8 +695,6 @@ class TritonTemplateKernel(TritonKernel):
|
|||
lengths = [V.graph.sizevars.simplify(s) for s in input_node.get_size()]
|
||||
assert len(indices) == len(lengths)
|
||||
|
||||
stride = self.named_input_nodes[input_name].get_stride()
|
||||
|
||||
index_symbols = [sympy.Symbol(x, integer=True) for x in indices]
|
||||
assert len(indices) == len(lengths)
|
||||
|
||||
|
|
@ -722,7 +720,6 @@ class TritonTemplateKernel(TritonKernel):
|
|||
sympy.Integer(1), sympy_product(lengths)
|
||||
)
|
||||
xindex_range_root.set_name("xindex")
|
||||
xindex_expr = xindex_range_root.expr
|
||||
|
||||
# Note - ["None" override_mask]
|
||||
# MM Templates work by taking out of bounds index values and wrapping them around to 0
|
||||
|
|
|
|||
|
|
@ -1123,10 +1123,6 @@ def convert(
|
|||
# for dynamic quant ops or weight only quant ops
|
||||
_run_weight_observers(model, backend_config)
|
||||
|
||||
graph_inputs: List[str] = [
|
||||
node.name for node in model.graph.nodes if node.op == "placeholder"
|
||||
]
|
||||
|
||||
# additional state to override inputs to be quantized, if specified
|
||||
# by the user
|
||||
placeholder_node_seen_cnt = 0
|
||||
|
|
|
|||
|
|
@ -41,7 +41,7 @@ def _retry(max_retries: int, sleep_time: float) -> Callable:
|
|||
for i in range(max_retries):
|
||||
try:
|
||||
return func(*args, **kwargs)
|
||||
except Exception as e:
|
||||
except Exception:
|
||||
logger.exception("Error running %s. Retrying...", func.__name__)
|
||||
if i < max_retries - 1:
|
||||
time.sleep(sleep_time)
|
||||
|
|
@ -277,7 +277,7 @@ class FileTimerServer:
|
|||
# thread, which will not block the process to stop.
|
||||
try:
|
||||
fd = open(self._file_path)
|
||||
except Exception as e:
|
||||
except Exception:
|
||||
logger.exception("Could not open the FileTimerServer pipe")
|
||||
raise
|
||||
|
||||
|
|
|
|||
|
|
@ -319,7 +319,6 @@ class _AllGatherRotater(_RingRotater):
|
|||
)
|
||||
|
||||
def next_buffer(self) -> torch.Tensor:
|
||||
size = dist.get_world_size(self._pg)
|
||||
rank = dist.get_rank(self._pg)
|
||||
idx = rank - self._idx
|
||||
|
||||
|
|
|
|||
|
|
@ -1548,10 +1548,6 @@ def _export_to_aten_ir_make_fx(
|
|||
strict=True,
|
||||
stack_weights=True,
|
||||
), _ignore_backend_decomps(), _compiling_state_context(): # type: ignore[attr-defined]
|
||||
param_len = len(dict(mod.named_parameters(remove_duplicate=False)))
|
||||
buffer_len = len(dict(mod.named_buffers(remove_duplicate=False)))
|
||||
params_len = param_len + buffer_len
|
||||
|
||||
gm, graph_signature = transform(_make_fx_helper)(
|
||||
mod,
|
||||
fake_args,
|
||||
|
|
|
|||
|
|
@ -2900,7 +2900,7 @@ class DimConstraints:
|
|||
assert op == "==", t
|
||||
try:
|
||||
results[left]["eq"] = sympy.sympify(right)
|
||||
except TypeError as e: # rhs source is not linked to Dim name
|
||||
except TypeError: # rhs source is not linked to Dim name
|
||||
pass
|
||||
|
||||
# order forced specializations based on name
|
||||
|
|
|
|||
|
|
@ -917,8 +917,6 @@ def chunk_default(func, *args, **kwargs):
|
|||
|
||||
if operating_on_batch:
|
||||
chunks = new_kwargs["chunks"]
|
||||
dim0_size = inp._size[0]
|
||||
chunk_size = math.ceil(dim0_size / chunks)
|
||||
|
||||
# get _offsets of the chunks
|
||||
lengths = inp._offsets.diff()
|
||||
|
|
|
|||
|
|
@ -8752,7 +8752,6 @@ def sample_inputs_scaled_mm(op_info, device, dtype, requires_grad, **kwargs):
|
|||
def sample_inputs_scaled_dot_product_attention(op_info, device, dtype, requires_grad, **kwargs):
|
||||
make = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
|
||||
batch, seq_q, seq_kv, num_heads, head_dim = 4, 3, 6, 4, 8
|
||||
num_heads_q_gqa, num_heads_kv_gqa = 32, 8
|
||||
|
||||
dim_3_q_shape = (batch, seq_q, head_dim)
|
||||
dim_3_kv_shape = (batch, seq_kv, head_dim)
|
||||
|
|
|
|||
|
|
@ -1452,7 +1452,7 @@ class RpcTest(RpcAgentTestFixture, RpcTestCommon):
|
|||
model = torch.nn.parallel.DistributedDataParallel(model)
|
||||
|
||||
with self.assertRaisesRegex(RuntimeError, 'Current RPC agent is not set! Did you initialize the RPC framework'):
|
||||
params = [RRef(param) for param in model.parameters()]
|
||||
[RRef(param) for param in model.parameters()]
|
||||
|
||||
def test_world_size_one(self):
|
||||
self._world_size_one(
|
||||
|
|
|
|||
|
|
@ -447,7 +447,6 @@ def reduction_reference(op, sample):
|
|||
assert op._extra_op_data.dim_args is not None
|
||||
single_dim_argname, dimlist_argname = op._extra_op_data.get_dim_argnames()
|
||||
assert single_dim_argname is not None
|
||||
supports_dimlist = dimlist_argname is not None
|
||||
|
||||
dim = sample.kwargs.get(
|
||||
dimlist_argname, sample.kwargs.get(single_dim_argname, None)
|
||||
|
|
@ -814,7 +813,6 @@ def sample_inputs_unary_dimwise(
|
|||
|
||||
def batchwise_reference_chunk(op, sample):
|
||||
# reference for chunk() over dim=0
|
||||
kwargs = sample.kwargs
|
||||
B = sample.input.size(0)
|
||||
num_chunks = sample.kwargs["chunks"]
|
||||
chunk_size = math.ceil(B / num_chunks)
|
||||
|
|
|
|||
Loading…
Reference in a new issue