mirror of
https://github.com/saymrwulf/pytorch.git
synced 2026-05-14 20:57:59 +00:00
Upgrade lint. (#39483)
Summary: Pull Request resolved: https://github.com/pytorch/pytorch/pull/39483 I fixed all of the new errors that occurred because of the upgrade. Signed-off-by: Edward Z. Yang <ezyang@fb.com> Test Plan: Imported from OSS Differential Revision: D21884575 Pulled By: ezyang fbshipit-source-id: 45c8e1f1ecb410c8d7c46dd3922ad70e982a0685
This commit is contained in:
parent
fe684679b0
commit
da2004e132
12 changed files with 54 additions and 33 deletions
5
.flake8
5
.flake8
|
|
@ -5,9 +5,12 @@ max-line-length = 120
|
|||
# E501 is not flexible enough, we're using B950 instead
|
||||
ignore =
|
||||
E203,E305,E402,E501,E721,E741,F403,F405,F821,F841,F999,W503,W504,C408,E302,W291,E303,
|
||||
# shebang has extra meaning in fbcode lints, so I think it's not worth trying
|
||||
# to line this up with executable bit
|
||||
EXE001,
|
||||
# these ignores are from flake8-bugbear; please fix!
|
||||
B007,B008,
|
||||
# these ignores are from flake8-comprehensions; please fix!
|
||||
C400,C401,C402,C403,C404,C405,C407,C411,C413,C414,C415
|
||||
per-file-ignores = __init__.py: F401
|
||||
exclude = docs/src,venv,third_party,caffe2,scripts,docs/caffe2,torch/lib/include,torch/lib/tmp_install,build,torch/include,*.pyi,.git
|
||||
exclude = docs/src,venv,third_party,caffe2,scripts,docs/caffe2,torch/lib/include,torch/lib/tmp_install,build,torch/include,*.pyi,.git,build,build_test_custom_build,build_code_analyzer
|
||||
|
|
|
|||
2
.github/workflows/lint.yml
vendored
2
.github/workflows/lint.yml
vendored
|
|
@ -66,7 +66,7 @@ jobs:
|
|||
- name: Run flake8
|
||||
run: |
|
||||
set -eux
|
||||
pip install flake8==3.7.9 flake8-mypy flake8-bugbear flake8-comprehensions flake8-executable flake8-pyi mccabe pycodestyle==2.5.0 pyflakes==2.1.1
|
||||
pip install flake8==3.8.2 flake8-mypy flake8-bugbear flake8-comprehensions flake8-executable flake8-pyi==20.5.0 mccabe pycodestyle==2.6.0 pyflakes==2.2.0
|
||||
flake8 --version
|
||||
flake8 --exit-zero > ${GITHUB_WORKSPACE}/flake8-output.txt
|
||||
cat ${GITHUB_WORKSPACE}/flake8-output.txt
|
||||
|
|
|
|||
|
|
@ -10,12 +10,12 @@ import operator_benchmark as op_bench
|
|||
# 2D pooling will have input matrix of rank 3 or 4
|
||||
qpool2d_long_configs = op_bench.config_list(
|
||||
attrs=(
|
||||
# C H W k s p
|
||||
( 1, 3, 3, (3, 3), (1, 1), (0, 0)), # dummy # noqa
|
||||
( 3, 64, 64, (3, 3), (2, 2), (1, 1)), # dummy # noqa
|
||||
# VGG16 pools with original input shape: (-1, 3, 224, 224)
|
||||
( 64, 224, 224, (2, 2), (2, 2), (0, 0)), # MaxPool2d-4 # noqa
|
||||
(256, 56, 56, (2, 2), (2, 2), (0, 0)), # MaxPool2d-16 # noqa
|
||||
# C H W k s p
|
||||
( 1, 3, 3, (3, 3), (1, 1), (0, 0)), # dummy # noqa
|
||||
( 3, 64, 64, (3, 3), (2, 2), (1, 1)), # dummy # noqa
|
||||
# VGG16 pools with original input shape: (-1, 3, 224, 224)
|
||||
( 64, 224, 224, (2, 2), (2, 2), (0, 0)), # MaxPool2d-4 # noqa
|
||||
(256, 56, 56, (2, 2), (2, 2), (0, 0)), # MaxPool2d-16 # noqa
|
||||
),
|
||||
attr_names=('C', 'H', 'W', # Input layout
|
||||
'k', 's', 'p'), # Pooling parameters
|
||||
|
|
|
|||
|
|
@ -485,6 +485,7 @@ class TestClassType(JitTestCase):
|
|||
|
||||
def test_interface(self):
|
||||
global Foo, Bar, OneTwo, OneTwoThree, OneTwoWrong, NotMember, NotMember2
|
||||
|
||||
@torch.jit.script
|
||||
class Foo(object):
|
||||
def __init__(self):
|
||||
|
|
@ -647,6 +648,7 @@ class TestClassType(JitTestCase):
|
|||
|
||||
def test_overloaded_fn(self):
|
||||
global Foo, MyClass # see [local resolution in python]
|
||||
|
||||
@torch.jit.script
|
||||
class Foo(object):
|
||||
def __init__(self, x):
|
||||
|
|
@ -802,6 +804,7 @@ class TestClassType(JitTestCase):
|
|||
|
||||
def test_cast_overloads(self):
|
||||
global Foo # see [local resolution in python]
|
||||
|
||||
@torch.jit.script
|
||||
class Foo(object):
|
||||
def __init__(self, val):
|
||||
|
|
|
|||
|
|
@ -47,11 +47,11 @@ class TestSuite:
|
|||
print(f" avg_time: {self.total_time/test_count:.2f} seconds")
|
||||
if test_count > 2:
|
||||
print(f" mean_time: {sorted_tests[test_count>>1].time:.2f} seconds")
|
||||
print(f" Three longest tests:")
|
||||
print(" Three longest tests:")
|
||||
for idx in [-1, -2, -3]:
|
||||
print(f" {sorted_tests[idx].name} time: {sorted_tests[idx].time:.2f} seconds")
|
||||
elif test_count > 0:
|
||||
print(f" Longest test:")
|
||||
print(" Longest test:")
|
||||
print(f" {sorted_tests[-1].name} time: {sorted_tests[-1].time:.2f} seconds")
|
||||
print("")
|
||||
|
||||
|
|
|
|||
|
|
@ -1401,7 +1401,8 @@ class TestQuantizedOps(TestCase):
|
|||
mode=mode, align_corners=align_corners)
|
||||
# TODO(#38095): Replace assertEqualIgnoreType. See issue #38095
|
||||
self.assertEqualIgnoreType(X_ref, qX_hat.int_repr(), atol=1.0, rtol=0,
|
||||
msg="{} results are off".format(name, qX_hat.int_repr(), X_ref))
|
||||
msg="{} results are off: qX_hat={} X_ref={}"
|
||||
.format(name, qX_hat.int_repr(), X_ref))
|
||||
self.assertEqual(scale, qX_hat.q_scale(),
|
||||
msg=error_message.format(name + '.scale', scale, qX_hat.q_scale()))
|
||||
self.assertEqual(zero_point, qX_hat.q_zero_point(),
|
||||
|
|
@ -1455,7 +1456,8 @@ class TestQuantizedOps(TestCase):
|
|||
mode=mode, align_corners=align_corners)
|
||||
# TODO(#38095): Replace assertEqualIgnoreType. See issue #38095
|
||||
self.assertEqualIgnoreType(X_ref, qX_hat.int_repr(), atol=1.0, rtol=0,
|
||||
msg="{} results are off".format(name, qX_hat.int_repr(), X_ref))
|
||||
msg="{} results are off: qX_hat={}, X_ref={}"
|
||||
.format(name, qX_hat.int_repr(), X_ref))
|
||||
self.assertEqual(scale, qX_hat.q_scale(),
|
||||
msg=error_message.format(name + '.scale', scale, qX_hat.q_scale()))
|
||||
self.assertEqual(zero_point, qX_hat.q_zero_point(),
|
||||
|
|
|
|||
|
|
@ -6072,6 +6072,7 @@ class TestAutogradDeviceType(TestCase):
|
|||
def test_simple_reentrant_cross_device(self, device):
|
||||
class ReentrantFunc(Function):
|
||||
_cpu_mode = True
|
||||
|
||||
@staticmethod
|
||||
def forward(ctx, x):
|
||||
return x * (x + 2)
|
||||
|
|
|
|||
|
|
@ -3796,6 +3796,7 @@ class TestScript(JitTestCase):
|
|||
def test_bailout_loop_carried_deps_name_clash(self):
|
||||
with enable_profiling_mode_for_profiling_tests():
|
||||
NUM_ITERATIONS = 10
|
||||
|
||||
@torch.jit.script
|
||||
def fct_loop(z, size):
|
||||
# type: (int, int) -> Tuple[Tensor, List[int]]
|
||||
|
|
@ -3817,6 +3818,7 @@ class TestScript(JitTestCase):
|
|||
def test_bailout_loop_counter_transition(self):
|
||||
with enable_profiling_mode_for_profiling_tests():
|
||||
NUM_ITERATIONS = 10
|
||||
|
||||
@torch.jit.script
|
||||
def fct_loop(z, size):
|
||||
# type: (int, int) -> Tuple[Tensor, List[int]]
|
||||
|
|
@ -7001,9 +7003,10 @@ a")
|
|||
self.checkScript(test_cast_float, (-1.,))
|
||||
|
||||
with self.assertRaisesRegex(RuntimeError, r"Could not cast value of type Tuple\[int, int\] to bool"): # noqa: W605
|
||||
|
||||
@torch.jit.script
|
||||
def test_bad_conditional(x):
|
||||
if (1, 2):
|
||||
if (1, 2): # noqa F634
|
||||
return
|
||||
else:
|
||||
return 0
|
||||
|
|
@ -7669,6 +7672,7 @@ a")
|
|||
|
||||
def test_error_stacktrace_interface(self):
|
||||
global IFace
|
||||
|
||||
@torch.jit.script
|
||||
def baz(c, b):
|
||||
return c + b
|
||||
|
|
@ -8318,6 +8322,7 @@ a")
|
|||
return 4
|
||||
self.assertEqual(foo(4), 7)
|
||||
self.assertEqual(foo(None), 4)
|
||||
|
||||
@torch.jit.script
|
||||
def foo2(a, b):
|
||||
# type: (Optional[int], Optional[int]) -> int
|
||||
|
|
@ -16082,7 +16087,7 @@ a")
|
|||
def identity(x1): # noqa: F811
|
||||
# type: (str) -> str
|
||||
pass
|
||||
#
|
||||
|
||||
@torch.jit._overload # noqa: F811
|
||||
def identity(x1): # noqa: F811
|
||||
# type: (float) -> float
|
||||
|
|
|
|||
|
|
@ -16,7 +16,7 @@ class TestScriptPy3(JitTestCase):
|
|||
def func(x):
|
||||
hello, test = "Hello", "test"
|
||||
print(f"{hello + ' ' + test}, I'm a {test}") # noqa E999
|
||||
print(f"format blank")
|
||||
print(f"format blank") # noqa F541
|
||||
hi = 'hi'
|
||||
print(f"stuff before {hi}")
|
||||
print(f"{hi} stuff after")
|
||||
|
|
@ -39,6 +39,7 @@ class TestScriptPy3(JitTestCase):
|
|||
@unittest.skipIf(sys.version_info[:2] < (3, 7), "`dataclasses` module not present on < 3.7")
|
||||
def test_dataclass_error(self):
|
||||
from dataclasses import dataclass
|
||||
|
||||
@dataclass
|
||||
class NormalizationInfo(object):
|
||||
mean: float = 0.0
|
||||
|
|
@ -256,6 +257,7 @@ class TestScriptPy3(JitTestCase):
|
|||
return str(type(args[0]))
|
||||
|
||||
the_class = MyPythonClass()
|
||||
|
||||
@torch.jit.script
|
||||
def fn(x):
|
||||
return the_class(x)
|
||||
|
|
@ -424,6 +426,7 @@ class TestScriptPy3(JitTestCase):
|
|||
|
||||
def test_export_opnames_interface(self):
|
||||
global OneTwoModule
|
||||
|
||||
@torch.jit.interface
|
||||
class OneTwoModule(nn.Module):
|
||||
def one(self, x, y):
|
||||
|
|
|
|||
|
|
@ -363,28 +363,32 @@ class TestTensorBoardSummary(BaseTestCase):
|
|||
self))
|
||||
|
||||
def test_image_with_one_channel(self):
|
||||
self.assertTrue(compare_image_proto(summary.image('dummy',
|
||||
tensor_N(shape=(1, 8, 8)),
|
||||
dataformats='CHW'),
|
||||
self)) # noqa E127
|
||||
self.assertTrue(compare_image_proto(
|
||||
summary.image('dummy',
|
||||
tensor_N(shape=(1, 8, 8)),
|
||||
dataformats='CHW'),
|
||||
self)) # noqa E127
|
||||
|
||||
def test_image_with_one_channel_batched(self):
|
||||
self.assertTrue(compare_image_proto(summary.image('dummy',
|
||||
tensor_N(shape=(2, 1, 8, 8)),
|
||||
dataformats='NCHW'),
|
||||
self)) # noqa E127
|
||||
self.assertTrue(compare_image_proto(
|
||||
summary.image('dummy',
|
||||
tensor_N(shape=(2, 1, 8, 8)),
|
||||
dataformats='NCHW'),
|
||||
self)) # noqa E127
|
||||
|
||||
def test_image_with_3_channel_batched(self):
|
||||
self.assertTrue(compare_image_proto(summary.image('dummy',
|
||||
tensor_N(shape=(2, 3, 8, 8)),
|
||||
dataformats='NCHW'),
|
||||
self)) # noqa E127
|
||||
self.assertTrue(compare_image_proto(
|
||||
summary.image('dummy',
|
||||
tensor_N(shape=(2, 3, 8, 8)),
|
||||
dataformats='NCHW'),
|
||||
self)) # noqa E127
|
||||
|
||||
def test_image_without_channel(self):
|
||||
self.assertTrue(compare_image_proto(summary.image('dummy',
|
||||
tensor_N(shape=(8, 8)),
|
||||
dataformats='HW'),
|
||||
self)) # noqa E127
|
||||
self.assertTrue(compare_image_proto(
|
||||
summary.image('dummy',
|
||||
tensor_N(shape=(8, 8)),
|
||||
dataformats='HW'),
|
||||
self)) # noqa E127
|
||||
|
||||
def test_video(self):
|
||||
try:
|
||||
|
|
|
|||
|
|
@ -161,7 +161,7 @@ class _Formatter(object):
|
|||
ret = '({{:.{}f}}{{}}{{:.{}f}}j)'.format(p, p).format(value.real, '+-'[value.imag < 0], abs(value.imag))
|
||||
if not self.has_non_zero_decimal_val:
|
||||
# complex tensor contains integer elements only
|
||||
ret = "({{:.0f}}.{{}}{{:.0f}}.j)".format(p, p).format(value.real, '+-'[value.imag < 0], abs(value.imag))
|
||||
ret = "({{:.0f}}.{{}}{{:.0f}}.j)".format(p, p).format(value.real, '+-'[value.imag < 0], abs(value.imag)) # noqa: F523
|
||||
else:
|
||||
ret = '{}'.format(value)
|
||||
return (self.max_width - len(ret)) * ' ' + ret
|
||||
|
|
|
|||
|
|
@ -31,5 +31,5 @@ def _get_device_index(device, optional=False):
|
|||
return torch.cuda.current_device()
|
||||
else:
|
||||
raise ValueError('Expected a cuda device with a specified index '
|
||||
'or an integer, but got: '.format(device))
|
||||
'or an integer, but got: {}'.format(device))
|
||||
return device_idx
|
||||
|
|
|
|||
Loading…
Reference in a new issue