Reduce pytest blocklist (#96016)

`TestCase = object` or variations of it get switched to `TestCase = NoTest`.

unittest collects test based on subclassing unittest.TestCase, so setting TestCase = object removes it from unittest test collection.  pytest collects based on name (https://docs.pytest.org/en/7.1.x/reference/reference.html#confval-python_classes) but can be told to ignore a class (bottom of https://docs.pytest.org/en/7.1.x/example/pythoncollection.html#changing-naming-conventions)
Pull Request resolved: https://github.com/pytorch/pytorch/pull/96016
Approved by: https://github.com/ZainRizvi, https://github.com/huydhn
This commit is contained in:
Catherine Lee 2023-03-07 18:30:27 +00:00 committed by PyTorch MergeBot
parent 30237e7aec
commit eea0733045
12 changed files with 26 additions and 32 deletions

View file

@ -13,6 +13,7 @@ from torch.testing._internal.common_utils import (
load_tests,
TEST_WITH_ROCM,
skip_but_pass_in_sandcastle_if,
NoTest,
)
from torch.testing._internal.common_cuda import TEST_CUDA, TEST_MULTIGPU
from torch.testing._internal.common_device_type import (
@ -34,7 +35,7 @@ load_tests = load_tests
nGPUs = torch.cuda.device_count()
if not TEST_CUDA:
print("CUDA not available, skipping tests", file=sys.stderr)
TestCase = object # noqa: F811
TestCase = NoTest # noqa: F811
datatypes = [torch.float]

View file

@ -9,7 +9,7 @@ import torch
from typing import NamedTuple
from torch.testing import FileCheck
from torch.testing._internal.jit_utils import JitTestCase
from torch.testing._internal.common_utils import skipIfRocm, skipCUDANonDefaultStreamIf
from torch.testing._internal.common_utils import skipIfRocm, skipCUDANonDefaultStreamIf, NoTest
# Make the helper files in test/ importable
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
@ -23,7 +23,7 @@ TEST_MULTIGPU = TEST_CUDA and torch.cuda.device_count() >= 2
# If GPU is not available, then do not run the tests
if not TEST_CUDA:
print('CUDA not available, skipping tests', file=sys.stderr)
JitTestCase = object # noqa: F811
JitTestCase = NoTest # noqa: F811
TEST_LARGE_TENSOR = TEST_CUDA

View file

@ -6,6 +6,7 @@ import torch
from torch.testing._internal.jit_utils import JitTestCase, make_global
from torch.jit._monkeytype_config import _IS_MONKEYTYPE_INSTALLED
from typing import List, Dict, Tuple, Any, Optional, NamedTuple # noqa: F401
from torch.testing._internal.common_utils import NoTest
# Make the helper files in test/ importable
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
@ -13,7 +14,7 @@ sys.path.append(pytorch_test_dir)
if not _IS_MONKEYTYPE_INSTALLED:
print("monkeytype is not installed. Skipping tests for Profile-Directed Typing", file=sys.stderr)
JitTestCase = object # type: ignore[misc, assignment] # noqa: F811
JitTestCase = NoTest # type: ignore[misc, assignment] # noqa: F811
if __name__ == "__main__":
raise RuntimeError(

View file

@ -887,21 +887,10 @@ CUSTOM_HANDLERS = {
PYTEST_BLOCKLIST = [
"test_package",
"test_nccl",
"inductor/test_torchinductor",
"test_cuda",
"test_quantization",
"test_cuda_nvml_based_avail",
"test_cuda_primary_ctx",
"test_cuda_sanitizer",
"test_cuda_trace",
"test_fx",
"test_jiterator",
"test_mps",
"test_cuda_trace",
"profiler/test_profiler",
"test_jit",
"test_jit_legacy",
"dynamo/test_repros", # skip_if_pytest
"dynamo/test_optimizers", # skip_if_pytest
"dynamo/test_dynamic_shapes", # needs change to check_if_enable for disabled test issues

View file

@ -28,7 +28,7 @@ from torch.utils.checkpoint import checkpoint_sequential
from torch.testing._internal.common_utils import TestCase, freeze_rng_state, run_tests, \
NO_MULTIPROCESSING_SPAWN, skipIfRocm, load_tests, IS_REMOTE_GPU, IS_SANDCASTLE, IS_WINDOWS, \
slowTest, skipCUDANonDefaultStreamIf, skipCUDAMemoryLeakCheckIf, TEST_WITH_ROCM, TEST_NUMPY, \
get_cycles_per_ms, parametrize, instantiate_parametrized_tests, subtest, IS_JETSON, gcIfJetson
get_cycles_per_ms, parametrize, instantiate_parametrized_tests, subtest, IS_JETSON, gcIfJetson, NoTest
from torch.testing._internal.autocast_test_lists import AutocastTestLists
# load_tests from common_utils is used to automatically filter tests for
@ -44,7 +44,7 @@ TEST_MULTIGPU = TEST_CUDA and torch.cuda.device_count() >= 2
if not TEST_CUDA:
print('CUDA not available, skipping tests', file=sys.stderr)
TestCase = object # noqa: F811
TestCase = NoTest # noqa: F811
TEST_CUDAMALLOCASYNC = TEST_CUDA and (torch.cuda.get_allocator_backend() == "cudaMallocAsync")
TEST_LARGE_TENSOR = TEST_CUDA

View file

@ -13,7 +13,7 @@ with patch.dict(os.environ, {"PYTORCH_NVML_BASED_CUDA_CHECK": "1"}):
# Before executing the desired tests, we need to disable CUDA initialization and fork_handler additions that would
# otherwise be triggered by the `torch.testing._internal.common_utils` module import
from torch.testing._internal.common_utils import (parametrize, instantiate_parametrized_tests, run_tests, TestCase,
IS_WINDOWS, IS_JETSON)
IS_WINDOWS, IS_JETSON, NoTest)
# NOTE: Because `remove_device_and_dtype_suffixes` initializes CUDA context (triggered via the import of
# `torch.testing._internal.common_device_type` which imports `torch.testing._internal.common_cuda`) we need
# to bypass that method here which should be irrelevant to the parameterized tests in this module.
@ -22,7 +22,7 @@ with patch.dict(os.environ, {"PYTORCH_NVML_BASED_CUDA_CHECK": "1"}):
TEST_CUDA = torch.cuda.is_available()
if not TEST_CUDA:
print('CUDA not available, skipping tests', file=sys.stderr)
TestCase = object # type: ignore[misc, assignment] # noqa: F811
TestCase = NoTest # type: ignore[misc, assignment] # noqa: F811
class TestExtendedCUDAIsAvail(TestCase):

View file

@ -1,7 +1,7 @@
# Owner(s): ["module: cuda"]
import torch
from torch.testing._internal.common_utils import TestCase, run_tests, skipIfRocmVersionLessThan
from torch.testing._internal.common_utils import TestCase, run_tests, skipIfRocmVersionLessThan, NoTest
import sys
import unittest
@ -16,7 +16,7 @@ TEST_MULTIGPU = TEST_CUDA and torch.cuda.device_count() >= 2
if not TEST_CUDA:
print('CUDA not available, skipping tests', file=sys.stderr)
TestCase = object # noqa: F811
TestCase = NoTest # noqa: F811
class TestCudaPrimaryCtx(TestCase):

View file

@ -8,7 +8,7 @@ from typing import List
import torch
import torch.cuda._sanitizer as csan
from torch.cuda._sanitizer import StreamId, DataPtr, EventId
from torch.testing._internal.common_utils import TestCase, run_tests
from torch.testing._internal.common_utils import TestCase, run_tests, NoTest
# We cannot import TEST_CUDA from torch.testing._internal.common_cuda here,
@ -19,7 +19,7 @@ TEST_CUDA = torch.cuda.is_available()
if not TEST_CUDA:
print("CUDA not available, skipping tests", file=sys.stderr)
TestCase = object # noqa: F811
TestCase = NoTest # noqa: F811
class TestArgumentHandler(TestCase):

View file

@ -6,7 +6,7 @@ import unittest.mock
import torch
import torch.utils._cuda_trace as cuda_trace
from torch.testing._internal.common_utils import TestCase, run_tests
from torch.testing._internal.common_utils import TestCase, run_tests, NoTest
# NOTE: Each test needs to be run in a brand new process, to reset the registered hooks
# and make sure the CUDA streams are initialized for each test that uses them.
@ -19,7 +19,7 @@ TEST_CUDA = torch.cuda.is_available()
if not TEST_CUDA:
print("CUDA not available, skipping tests", file=sys.stderr)
TestCase = object # noqa: F811
TestCase = NoTest # noqa: F811
class TestCudaTrace(TestCase):

View file

@ -5,7 +5,7 @@ from torch.cuda.jiterator import _create_jit_fn as create_jit_fn
from torch.cuda.jiterator import _create_multi_output_jit_fn as create_multi_output_jit_fn
import sys
from itertools import product
from torch.testing._internal.common_utils import TestCase, parametrize, run_tests, TEST_CUDA
from torch.testing._internal.common_utils import TestCase, parametrize, run_tests, TEST_CUDA, NoTest
from torch.testing._internal.common_dtype import all_types_and_complex_and
from torch.testing._internal.common_device_type import (
skipCUDAIfRocm, skipCUDAIf, instantiate_device_type_tests, dtypes, toleranceOverride, tol)
@ -13,7 +13,7 @@ from torch.testing._internal.common_cuda import _get_torch_cuda_version
if not TEST_CUDA:
print('CUDA not available, skipping tests', file=sys.stderr)
TestCase = object # noqa: F811
TestCase = NoTest # noqa: F811
code_string = "template <typename T> T my_fused_kernel(T x, T y, T alpha, T beta) { return alpha * x + beta * y; }"

View file

@ -22,7 +22,7 @@ from torch import inf
from torch.nn import Parameter
from torch.testing._internal import opinfo
from torch.testing._internal.common_utils import \
(gradcheck, gradgradcheck, run_tests, TestCase, download_file, IS_CI,
(gradcheck, gradgradcheck, run_tests, TestCase, download_file, IS_CI, NoTest,
TEST_WITH_UBSAN, dtype_abbrs, skipIfSlowGradcheckEnv, TEST_WITH_ASAN, suppress_warnings)
from torch.testing import make_tensor
from torch.testing._comparison import TensorLikePair
@ -175,8 +175,8 @@ def mps_ops_modifier(ops):
# Same logic as test_cuda.py
if not torch.backends.mps.is_available():
print('MPS not available, skipping tests', file=sys.stderr)
TestCase = object # noqa: F811
NNTestCase = object # noqa: F811
TestCase = NoTest # noqa: F811
NNTestCase = NoTest # noqa: F811
product_version = float('.'.join(platform.mac_ver()[0].split('.')[:2]))

View file

@ -1686,8 +1686,6 @@ def remove_device_and_dtype_suffixes(test_name: str) -> str:
def check_if_enable(test: unittest.TestCase):
test_suite = str(test.__class__).split('\'')[1]
if "USING_PYTEST" in os.environ:
test_suite = f"__main__.{test_suite.split('.')[1]}"
raw_test_name = f'{test._testMethodName} ({test_suite})'
if raw_test_name in slow_tests_dict:
getattr(test, test._testMethodName).__dict__['slow_test'] = True
@ -1976,6 +1974,11 @@ def set_warn_always_context(new_val: bool):
torch.set_warn_always(old_val)
class NoTest():
# causes pytest to not recognize this class as a test
__test__ = False
class TestCase(expecttest.TestCase):
# NOTE: "precision" lets classes and generated tests set minimum
# atol values when comparing tensors. Used by @precisionOverride and @toleranceOverride, for