mirror of
https://github.com/saymrwulf/pytorch.git
synced 2026-05-14 20:57:59 +00:00
Flake8 fixes (#48453)
Summary: Quiet errors from flake8. Only a couple of code changes for deprecated Python syntax from before 2.4. The rest is just adding noqa markers. Pull Request resolved: https://github.com/pytorch/pytorch/pull/48453 Reviewed By: mruberry Differential Revision: D25181871 Pulled By: ngimel fbshipit-source-id: f8d7298aae783b1bce2a46827b088fc390970641
This commit is contained in:
parent
55e225a2dc
commit
db1b0b06c4
6 changed files with 33 additions and 33 deletions
|
|
@ -24,7 +24,7 @@ def main():
|
|||
args = parser.parse_args()
|
||||
|
||||
if len(args.file) != 2:
|
||||
raise "Must specify 2 files to diff"
|
||||
raise RuntimeError("Must specify 2 files to diff")
|
||||
|
||||
ja = load(args.file[0])
|
||||
jb = load(args.file[1])
|
||||
|
|
|
|||
|
|
@ -124,7 +124,7 @@ def convert_tests(testcases, sets=1):
|
|||
input = gen_input(t)
|
||||
if (module_name != "FunctionalModule"):
|
||||
nn_module[module_name] |= 1
|
||||
except: # noqa: E722
|
||||
except: # noqa: E722,B001
|
||||
traceback.print_exc()
|
||||
if (module_name != "FunctionalModule"):
|
||||
nn_module[module_name] |= 2
|
||||
|
|
|
|||
|
|
@ -3932,33 +3932,33 @@ tensor([[[1.+1.j, 1.+1.j, 1.+1.j, ..., 1.+1.j, 1.+1.j, 1.+1.j],
|
|||
|
||||
def test_comparison_ops_check_for_scalar_overflow(self):
|
||||
with self.assertRaisesRegex(RuntimeError, 'value cannot be converted to type'):
|
||||
torch.tensor([1 << 5], dtype=torch.uint8) < (1 << 20)
|
||||
(1 << 20) < torch.tensor([1 << 5], dtype=torch.uint8)
|
||||
torch.tensor([1 << 5], dtype=torch.uint8) <= (1 << 20)
|
||||
(1 << 20) <= torch.tensor([1 << 5], dtype=torch.uint8)
|
||||
torch.tensor([1 << 5], dtype=torch.uint8) > (1 << 20)
|
||||
(1 << 20) > torch.tensor([1 << 5], dtype=torch.uint8)
|
||||
torch.tensor([1 << 5], dtype=torch.uint8) >= (1 << 20)
|
||||
(1 << 20) >= torch.tensor([1 << 5], dtype=torch.uint8)
|
||||
torch.tensor([1 << 5], dtype=torch.uint8) == (1 << 20)
|
||||
(1 << 20) == torch.tensor([1 << 5], dtype=torch.uint8)
|
||||
torch.tensor([1 << 5], dtype=torch.uint8) != (1 << 20)
|
||||
(1 << 20) != torch.tensor([1 << 5], dtype=torch.uint8)
|
||||
torch.tensor([1 << 5], dtype=torch.uint8) < (1 << 20) # noqa: B015
|
||||
(1 << 20) < torch.tensor([1 << 5], dtype=torch.uint8) # noqa: B015
|
||||
torch.tensor([1 << 5], dtype=torch.uint8) <= (1 << 20) # noqa: B015
|
||||
(1 << 20) <= torch.tensor([1 << 5], dtype=torch.uint8) # noqa: B015
|
||||
torch.tensor([1 << 5], dtype=torch.uint8) > (1 << 20) # noqa: B015
|
||||
(1 << 20) > torch.tensor([1 << 5], dtype=torch.uint8) # noqa: B015
|
||||
torch.tensor([1 << 5], dtype=torch.uint8) >= (1 << 20) # noqa: B015
|
||||
(1 << 20) >= torch.tensor([1 << 5], dtype=torch.uint8) # noqa: B015
|
||||
torch.tensor([1 << 5], dtype=torch.uint8) == (1 << 20) # noqa: B015
|
||||
(1 << 20) == torch.tensor([1 << 5], dtype=torch.uint8) # noqa: B015
|
||||
torch.tensor([1 << 5], dtype=torch.uint8) != (1 << 20) # noqa: B015
|
||||
(1 << 20) != torch.tensor([1 << 5], dtype=torch.uint8) # noqa: B015
|
||||
|
||||
def test_comparison_ops_check_for_zerodim_tensor_overflow(self):
|
||||
with self.assertRaisesRegex(RuntimeError, 'value cannot be converted to type'):
|
||||
torch.tensor([1 << 5], dtype=torch.uint8) < torch.tensor(1 << 20, dtype=torch.int32)
|
||||
torch.tensor(1 << 40, dtype=torch.int64) < torch.tensor([1 << 30], dtype=torch.int32)
|
||||
torch.tensor([1 << 5], dtype=torch.uint8) <= torch.tensor(1 << 20, dtype=torch.int32)
|
||||
torch.tensor(1 << 40, dtype=torch.int64) <= torch.tensor([1 << 30], dtype=torch.int32)
|
||||
torch.tensor([1 << 5], dtype=torch.uint8) > torch.tensor(1 << 20, dtype=torch.int32)
|
||||
torch.tensor(1 << 40, dtype=torch.int64) > torch.tensor([1 << 30], dtype=torch.int32)
|
||||
torch.tensor([1 << 5], dtype=torch.uint8) >= torch.tensor(1 << 20, dtype=torch.int32)
|
||||
torch.tensor(1 << 40, dtype=torch.int64) >= torch.tensor([1 << 30], dtype=torch.int32)
|
||||
torch.tensor([1 << 5], dtype=torch.uint8) == torch.tensor(1 << 20, dtype=torch.int32)
|
||||
torch.tensor(1 << 40, dtype=torch.int64) == torch.tensor([1 << 30], dtype=torch.int32)
|
||||
torch.tensor([1 << 5], dtype=torch.uint8) != torch.tensor(1 << 20, dtype=torch.int32)
|
||||
torch.tensor(1 << 40, dtype=torch.int64) != torch.tensor([1 << 30], dtype=torch.int32)
|
||||
torch.tensor([1 << 5], dtype=torch.uint8) < torch.tensor(1 << 20, dtype=torch.int32) # noqa: B015
|
||||
torch.tensor(1 << 40, dtype=torch.int64) < torch.tensor([1 << 30], dtype=torch.int32) # noqa: B015
|
||||
torch.tensor([1 << 5], dtype=torch.uint8) <= torch.tensor(1 << 20, dtype=torch.int32) # noqa: B015
|
||||
torch.tensor(1 << 40, dtype=torch.int64) <= torch.tensor([1 << 30], dtype=torch.int32) # noqa: B015
|
||||
torch.tensor([1 << 5], dtype=torch.uint8) > torch.tensor(1 << 20, dtype=torch.int32) # noqa: B015
|
||||
torch.tensor(1 << 40, dtype=torch.int64) > torch.tensor([1 << 30], dtype=torch.int32) # noqa: B015
|
||||
torch.tensor([1 << 5], dtype=torch.uint8) >= torch.tensor(1 << 20, dtype=torch.int32) # noqa: B015
|
||||
torch.tensor(1 << 40, dtype=torch.int64) >= torch.tensor([1 << 30], dtype=torch.int32) # noqa: B015
|
||||
torch.tensor([1 << 5], dtype=torch.uint8) == torch.tensor(1 << 20, dtype=torch.int32) # noqa: B015
|
||||
torch.tensor(1 << 40, dtype=torch.int64) == torch.tensor([1 << 30], dtype=torch.int32) # noqa: B015
|
||||
torch.tensor([1 << 5], dtype=torch.uint8) != torch.tensor(1 << 20, dtype=torch.int32) # noqa: B015
|
||||
torch.tensor(1 << 40, dtype=torch.int64) != torch.tensor([1 << 30], dtype=torch.int32) # noqa: B015
|
||||
|
||||
def test_bitwise_ops(self):
|
||||
x = torch.randn(5, 5).gt(0)
|
||||
|
|
|
|||
|
|
@ -8,4 +8,4 @@ class BarModule(torch.nn.Module):
|
|||
pass
|
||||
|
||||
ml: torch.nn.ModuleList = torch.nn.ModuleList([FooModule(), BarModule()])
|
||||
ml[0].children() == []
|
||||
ml[0].children() == [] # noqa: B015
|
||||
|
|
|
|||
|
|
@ -4,10 +4,10 @@ import torch
|
|||
t = torch.tensor([[3.0, 1.5], [2.0, 1.5]])
|
||||
|
||||
t_sort = t.sort()
|
||||
t_sort[0][0, 0] == 1.5
|
||||
t_sort.indices[0, 0] == 1
|
||||
t_sort.values[0, 0] == 1.5
|
||||
t_sort[0][0, 0] == 1.5 # noqa: B015
|
||||
t_sort.indices[0, 0] == 1 # noqa: B015
|
||||
t_sort.values[0, 0] == 1.5 # noqa: B015
|
||||
|
||||
t_qr = torch.qr(t)
|
||||
t_qr[0].shape == [2, 2]
|
||||
t_qr.Q.shape == [2, 2]
|
||||
t_qr[0].shape == [2, 2] # noqa: B015
|
||||
t_qr.Q.shape == [2, 2] # noqa: B015
|
||||
|
|
|
|||
|
|
@ -33,7 +33,7 @@ convolution_notes = \
|
|||
|
||||
In other words, for an input of size :math:`(N, C_{in}, L_{in})`,
|
||||
a depthwise convolution with a depthwise multiplier `K` can be performed with the arguments
|
||||
:math:`(C_\\text{in}=C_\\text{in}, C_\\text{out}=C_\\text{in} \\times \\text{K}, ..., \\text{groups}=C_\\text{in})`."""} # noqa: W605
|
||||
:math:`(C_\\text{in}=C_\\text{in}, C_\\text{out}=C_\\text{in} \\times \\text{K}, ..., \\text{groups}=C_\\text{in})`."""} # noqa: W605,B950
|
||||
|
||||
|
||||
|
||||
|
|
|
|||
Loading…
Reference in a new issue