[BE] reduce log spew from test_triton_kernels.py (#145895)

One of the tests in this file was setting `self._logging.set_logs(output_code=True)` - which would cause logs to be printed for the rest of the tests in this file.

This PR puts the log-setting in a context manager so that the old behavior is restored afterwards.

Pull Request resolved: https://github.com/pytorch/pytorch/pull/145895
Approved by: https://github.com/nmacchioni
This commit is contained in:
David Berard 2025-01-31 11:20:51 -08:00 committed by PyTorch MergeBot
parent 5f53889850
commit 7997ecf809

View file

@ -26,7 +26,7 @@ from torch.testing._internal.common_utils import (
TEST_WITH_ROCM,
)
from torch.testing._internal.inductor_utils import GPU_TYPE, HAS_CUDA, HAS_GPU, HAS_XPU
from torch.testing._internal.logging_utils import logs_to_string
from torch.testing._internal.logging_utils import log_settings, logs_to_string
# Defines all the kernels for tests
from torch.testing._internal.triton_utils import * # noqa: F403
@ -3474,8 +3474,10 @@ class CustomOpTests(torch._inductor.test_case.TestCase):
w = torch.randn(K, N, device=GPU_TYPE)
torch._dynamo.decorators.mark_unbacked(x, 0)
torch._logging.set_logs(output_code=True)
with self.assertLogs(logger="torch._inductor", level=logging.DEBUG) as log:
with log_settings("+output_code"), self.assertLogs(
logger="torch._inductor", level=logging.DEBUG
) as log:
foo(x, w)
output = "\n".join(record.getMessage() for record in log.records)