[CI] Fix test failures at TestTensorCreationCPU.test_float_to_int_conversion_finite_cpu_uint8 (#98916)

This PR fixes divergent value issues in converting float32 to uint8. The failures of `TestTensorCreationCPU.test_float_to_int_conversion_finite_cpu_uint8` came from the divergent values of PyTorch and numpy among platforms. This PR adds two items:

- Enhance `_float_to_int_conversion_helper()` to have given reference values to provide the stable reference value
- Omit a test for `float.max` since the results on PyTorch are divergent (e.g. `float.max` -> `uint8` is 0 on x86_64, or 255 on s390x).

Fixes #97794

Pull Request resolved: https://github.com/pytorch/pytorch/pull/98916
Approved by: https://github.com/dagitses
This commit is contained in:
Kazuaki Ishizaki 2023-04-18 15:05:07 +00:00 committed by PyTorch MergeBot
parent 24d20ea194
commit e7a5cb99e2

View file

@ -941,17 +941,19 @@ class TestTensorCreation(TestCase):
a = np.array(val, dtype=torch_to_numpy_dtype_dict[dtype])
self.assertEqual(t, torch.from_numpy(a))
def _float_to_int_conversion_helper(self, vals, device, dtype):
a = np.array(vals, dtype=np.float32).astype(torch_to_numpy_dtype_dict[dtype])
def _float_to_int_conversion_helper(self, vals, device, dtype, refs=None):
if refs is None:
a = np.array(vals, dtype=np.float32).astype(torch_to_numpy_dtype_dict[dtype])
refs = torch.from_numpy(a)
t = torch.tensor(vals, device=device, dtype=torch.float).to(dtype)
self.assertEqual(torch.from_numpy(a), t.cpu())
self.assertEqual(refs, t.cpu())
# Checks that float->integer casts don't produce undefined behavior errors.
# Note: In C++, casting from a floating value to an integral dtype
# is undefined if the floating point value is not within the integral
# dtype's dynamic range. This can (and should) cause undefined behavior
# errors with UBSAN. These casts are deliberate in PyTorch, however, and
# NumPy has the same behavior.
# NumPy may have the same behavior.
@onlyNativeDeviceTypes
@unittest.skipIf(IS_MACOS or IS_JETSON, "Test is broken on MacOS and Jetson, \
see https://github.com/pytorch/pytorch/issues/38752")
@ -963,14 +965,21 @@ class TestTensorCreation(TestCase):
# Note: CUDA max float -> integer conversion is divergent on some dtypes
vals = (min, -2, -1.5, -.5, 0, .5, 1.5, 2, max)
refs = None
if self.device_type == 'cuda':
if torch.version.hip:
# HIP min float -> int64 conversion is divergent
vals = (-2, -1.5, -.5, 0, .5, 1.5, 2)
else:
vals = (min, -2, -1.5, -.5, 0, .5, 1.5, 2)
elif dtype == torch.uint8:
# Note: CPU max float -> uint8 conversion is divergent
vals = (min, -2, -1.5, -.5, 0, .5, 1.5, 2)
# Note: numpy -2.0 or -1.5 -> uint8 conversion is undefined
# see https://github.com/pytorch/pytorch/issues/97794
refs = (0, 254, 255, 0, 0, 0, 1, 2)
self._float_to_int_conversion_helper(vals, device, dtype)
self._float_to_int_conversion_helper(vals, device, dtype, refs)
# Note: CUDA will fail this test on most dtypes, often dramatically.
@onlyCPU