From 16005852192a3c0d0bb50ae0c447581dcdfd0657 Mon Sep 17 00:00:00 2001 From: PyTorch MergeBot Date: Mon, 24 Jul 2023 14:18:44 +0000 Subject: [PATCH] Revert "Fix test failure in TestCudaMultiGPU.test_cuda_device_memory_allocated (#105501)" This reverts commit e6fd8ca3eef2b85b821936829e86beb7d832575c. Reverted https://github.com/pytorch/pytorch/pull/105501 on behalf of https://github.com/zou3519 due to We've agreed that the PR is wrong. It didn't actually break anything. ([comment](https://github.com/pytorch/pytorch/pull/105501#issuecomment-1648005842)) --- test/test_cuda_multigpu.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/test_cuda_multigpu.py b/test/test_cuda_multigpu.py index b00b7e501b4..9a118c26ec9 100644 --- a/test/test_cuda_multigpu.py +++ b/test/test_cuda_multigpu.py @@ -1285,7 +1285,7 @@ t2.start() device_count = torch.cuda.device_count() current_alloc = [memory_allocated(idx) for idx in range(device_count)] x = torch.ones(10, device="cuda:0") - self.assertGreaterEqual(memory_allocated(0), current_alloc[0]) + self.assertGreater(memory_allocated(0), current_alloc[0]) self.assertTrue(all(memory_allocated(torch.cuda.device(idx)) == current_alloc[idx] for idx in range(1, device_count)))