unify TEST_CUDA (#106685)

Fixes #ISSUE_NUMBER
as title, unify TEST_CUDA
Pull Request resolved: https://github.com/pytorch/pytorch/pull/106685
Approved by: https://github.com/zou3519
This commit is contained in:
shibo19 2023-08-10 09:01:36 +00:00 committed by PyTorch MergeBot
parent 2b560d3c3a
commit bb2fcc7659
9 changed files with 13 additions and 39 deletions

View file

@ -9,14 +9,12 @@ import torch
from typing import NamedTuple
from torch.testing import FileCheck
from torch.testing._internal.jit_utils import JitTestCase
from torch.testing._internal.common_utils import skipIfRocm, skipCUDANonDefaultStreamIf, NoTest
from torch.testing._internal.common_utils import skipIfRocm, skipCUDANonDefaultStreamIf, NoTest, TEST_CUDA
# Make the helper files in test/ importable
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(pytorch_test_dir)
# Check if GPU is available
TEST_CUDA = torch.cuda.is_available()
# Check if multiple GPU's are available
TEST_MULTIGPU = TEST_CUDA and torch.cuda.device_count() >= 2

View file

@ -13,7 +13,7 @@ from torch.testing import FileCheck
from torch.testing._internal.common_quantization import skipIfNoFBGEMM
from torch.testing._internal.common_quantized import override_quantized_engine
from torch.testing._internal.common_utils import set_default_dtype, skipCUDAMemoryLeakCheckIf, TEST_WITH_ROCM
from torch.testing._internal.common_cuda import TEST_CUDNN
from torch.testing._internal.common_cuda import TEST_CUDNN, TEST_CUDA
from torch.testing._internal.jit_utils import JitTestCase
from torch.utils import mkldnn as mkldnn_utils
@ -29,7 +29,6 @@ if __name__ == '__main__':
"\tpython test/test_jit.py TESTNAME\n\n"
"instead.")
TEST_CUDA = torch.cuda.is_available()
TEST_ROCM = torch.cuda.is_available() and torch.version.hip is not None
def removeExceptions(graph):

View file

@ -2,14 +2,9 @@
import torch
import torch._dynamo as torchdynamo
from torch.testing._internal.common_utils import TestCase, run_tests
from torch.testing._internal.common_utils import TestCase, run_tests, TEST_CUDA
import unittest
# We cannot import TEST_CUDA from torch.testing._internal.common_cuda here, because if we do that,
# the TEST_CUDNN line from torch.testing._internal.common_cuda will be executed multiple times
# as well during the execution of this test suite, and it will cause
# CUDA OOM error on Windows.
TEST_CUDA = torch.cuda.is_available()
try:
import tabulate # noqa: F401 # type: ignore[import]
from torch.utils.benchmark.utils.compile import bench_all

View file

@ -11,7 +11,7 @@ import subprocess
import glob
import torch.testing._internal.common_utils as common
from torch.testing._internal.common_cuda import TEST_CUDNN
from torch.testing._internal.common_cuda import TEST_CUDNN, TEST_CUDA
import torch
import torch.backends.cudnn
import torch.utils.cpp_extension
@ -20,8 +20,8 @@ from torch.testing._internal.common_utils import gradcheck
import torch.multiprocessing as mp
TEST_CUDA = torch.cuda.is_available() and CUDA_HOME is not None
TEST_ROCM = torch.cuda.is_available() and torch.version.hip is not None and ROCM_HOME is not None
TEST_CUDA = TEST_CUDA and CUDA_HOME is not None
TEST_ROCM = TEST_CUDA and torch.version.hip is not None and ROCM_HOME is not None
TEST_MPS = torch.backends.mps.is_available()
IS_WINDOWS = sys.platform == "win32"

View file

@ -8,14 +8,14 @@ import tempfile
import unittest
import torch.testing._internal.common_utils as common
from torch.testing._internal.common_utils import IS_ARM64
from torch.testing._internal.common_utils import IS_ARM64, TEST_CUDA
import torch
import torch.utils.cpp_extension
from torch.utils.cpp_extension import CUDA_HOME, ROCM_HOME
TEST_CUDA = torch.cuda.is_available() and CUDA_HOME is not None
TEST_ROCM = torch.cuda.is_available() and torch.version.hip is not None and ROCM_HOME is not None
TEST_CUDA = TEST_CUDA and CUDA_HOME is not None
TEST_ROCM = TEST_CUDA and torch.version.hip is not None and ROCM_HOME is not None
def remove_build_path():

View file

@ -8,15 +8,9 @@ from typing import List
import torch
import torch.cuda._sanitizer as csan
from torch.cuda._sanitizer import StreamId, DataPtr, EventId
from torch.testing._internal.common_utils import TestCase, run_tests, NoTest
from torch.testing._internal.common_utils import TestCase, run_tests, NoTest, TEST_CUDA
# We cannot import TEST_CUDA from torch.testing._internal.common_cuda here,
# because if we do that, the TEST_CUDNN line from torch.testing._internal.common_cuda will be executed
# multiple times as well during the execution of this test suite, and it will
# cause CUDA OOM error on Windows.
TEST_CUDA = torch.cuda.is_available()
if not TEST_CUDA:
print("CUDA not available, skipping tests", file=sys.stderr)
TestCase = NoTest # noqa: F811

View file

@ -6,17 +6,11 @@ import unittest.mock
import torch
import torch.utils._cuda_trace as cuda_trace
from torch.testing._internal.common_utils import TestCase, run_tests, NoTest
from torch.testing._internal.common_utils import TestCase, run_tests, NoTest, TEST_CUDA
# NOTE: Each test needs to be run in a brand new process, to reset the registered hooks
# and make sure the CUDA streams are initialized for each test that uses them.
# We cannot import TEST_CUDA from torch.testing._internal.common_cuda here,
# because if we do that, the TEST_CUDNN line from torch.testing._internal.common_cuda will be executed
# multiple times as well during the execution of this test suite, and it will
# cause CUDA OOM error on Windows.
TEST_CUDA = torch.cuda.is_available()
if not TEST_CUDA:
print("CUDA not available, skipping tests", file=sys.stderr)
TestCase = NoTest # noqa: F811

View file

@ -35,7 +35,7 @@ from torch._utils import ExceptionWrapper
from torch.testing._internal.common_utils import (TestCase, run_tests, TEST_NUMPY, IS_WINDOWS, IS_JETSON,
IS_CI, NO_MULTIPROCESSING_SPAWN, skipIfRocm, slowTest,
load_tests, TEST_WITH_ASAN, TEST_WITH_TSAN, IS_SANDCASTLE,
IS_MACOS)
IS_MACOS, TEST_CUDA)
try:
@ -74,11 +74,6 @@ skipIfNoNumpy = unittest.skipIf(not HAS_NUMPY, "no NumPy")
# sharding on sandcastle. This line silences flake warnings
load_tests = load_tests
# We cannot import TEST_CUDA from torch.testing._internal.common_cuda here, because if we do that,
# the TEST_CUDNN line from torch.testing._internal.common_cuda will be executed multiple times
# as well during the execution of this test suite, and it will cause
# CUDA OOM error on Windows.
TEST_CUDA = torch.cuda.is_available()
if TEST_CUDA:
torch.cuda.memory._set_allocator_settings('expandable_segments:False')

View file

@ -3,7 +3,7 @@ r"""This file is allowed to initialize CUDA context when imported."""
import functools
import torch
import torch.cuda
from torch.testing._internal.common_utils import LazyVal, TEST_NUMBA, TEST_WITH_ROCM
from torch.testing._internal.common_utils import LazyVal, TEST_NUMBA, TEST_WITH_ROCM, TEST_CUDA
import inspect
import contextlib
@ -11,7 +11,6 @@ import contextlib
CUDA_ALREADY_INITIALIZED_ON_IMPORT = torch.cuda.is_initialized()
TEST_CUDA = torch.cuda.is_available()
TEST_MULTIGPU = TEST_CUDA and torch.cuda.device_count() >= 2
CUDA_DEVICE = torch.device("cuda:0") if TEST_CUDA else None
# note: if ROCm is targeted, TEST_CUDNN is code for TEST_MIOPEN