Disable tests failing on test2 in ROCm CI (#37427)

Summary:
This pull request disables the unit tests that were observed to be failing once `test2` was enabled. These tests will be one by one looked at and fixed at the earliest, but until then disabling them to unblock `test2`
The pull request also disables fftPlanDestroy for rocFFT to avoid double-freeing FFT handles

cc: ezyang jeffdaily
Pull Request resolved: https://github.com/pytorch/pytorch/pull/37427

Differential Revision: D21302909

Pulled By: ezyang

fbshipit-source-id: ecadda3778e65b7f4f97e24b932b96b9ce928616
This commit is contained in:
ashishfarmer 2020-04-29 09:53:30 -07:00 committed by Facebook GitHub Bot
parent 58a46a174e
commit bbd2350c99
5 changed files with 11 additions and 1 deletions

View file

@ -55,9 +55,12 @@ static inline void setCuFFTParams(CuFFTParams* params,
struct CuFFTHandleDeleter {
void operator()(cufftHandle* x) {
// Not using fftDestroy() for rocFFT to work around double freeing of handles
#ifndef __HIP_PLATFORM_HCC__
if (x != nullptr) {
CUFFT_CHECK(cufftDestroy(*x));
}
#endif
}
};

View file

@ -107,6 +107,7 @@ ROCM_BLACKLIST = [
'test_jit_legacy',
'test_jit_fuser_legacy',
'test_tensorexpr',
'test_type_hints',
]
# These tests are slow enough that it's worth calculating whether the patch

View file

@ -1813,6 +1813,7 @@ class TestCuda(TestCase):
counted = t.bincount(minlength=65536)
self.assertEqual(torch.sum(counted), 10)
@skipIfRocm
def test_tiny_half_norm_(self):
a = torch.arange(25).cuda().float()
a /= 100000000

View file

@ -1031,6 +1031,7 @@ class TestSparse(TestCase):
ab = a.bmm(b)
@cuda_only
@skipIfRocm
@unittest.skipIf(
(torch.version.cuda
and [int(x) for x in torch.version.cuda.split(".")] >= [10, 1]),
@ -1210,6 +1211,7 @@ class TestSparse(TestCase):
self._test_spadd_shape(10, [50, 30, 20], [2, 0])
@cuda_only
@skipIfRocm
@unittest.skipIf(not TEST_WITH_ROCM, "runs only on ROCm")
def test_sparse_add_out_bfloat16(self):
# fp32

View file

@ -12655,6 +12655,7 @@ class TestTorchDeviceType(TestCase):
self.assertRaises(RuntimeError, torch.istft, torch.zeros((0, 3, 2)), 2)
@onlyOnCPUAndCUDA
@skipIfRocm
@dtypes(torch.double)
def test_istft_of_sine(self, device, dtype):
def _test(amplitude, L, n):
@ -12688,6 +12689,7 @@ class TestTorchDeviceType(TestCase):
_test(amplitude=99, L=10, n=7)
@onlyOnCPUAndCUDA
@skipIfRocm
@dtypes(torch.double)
def test_istft_linearity(self, device, dtype):
num_trials = 100
@ -12752,7 +12754,7 @@ class TestTorchDeviceType(TestCase):
_test(data_size, kwargs)
@onlyOnCPUAndCUDA
@skipCUDAIfRocm
@skipIfRocm
def test_batch_istft(self, device):
original = torch.tensor([
[[4., 0.], [4., 0.], [4., 0.], [4., 0.], [4., 0.]],
@ -16081,6 +16083,7 @@ scipy_lobpcg | {:10.2e} | {:10.2e} | {:6} | N/A
# dtype's dynamic range. This can (and should) cause undefined behavior
# errors with UBSAN. These casts are deliberate in PyTorch, however, and
# NumPy has the same behavior.
@skipIfRocm
@unittest.skipIf(not TEST_NUMPY, "NumPy not found")
@dtypes(torch.bool, torch.uint8, torch.int8, torch.int16, torch.int32, torch.int64)
def test_float_to_int_conversion_finite(self, device, dtype):