diff --git a/caffe2/CMakeLists.txt b/caffe2/CMakeLists.txt index d8ea2c06de5..44b1d0213ee 100644 --- a/caffe2/CMakeLists.txt +++ b/caffe2/CMakeLists.txt @@ -1559,7 +1559,7 @@ if(USE_CUDA) # FIXME: If kineto is linked with CUPTI it pollutes torch_cpu with CUDA dependencies # Even worse, it never declares that it depends on cudart, but calls the API, see # https://github.com/pytorch/kineto/blob/aef2f5c0f15e3be52406ac0b885e8689de6bc9f6/libkineto/src/CudaDeviceProperties.cpp#L24 - if(USE_KINETO AND NOT MSVC AND NOT LIBKINETO_NOCUPTI) + if(USE_KINETO AND NOT LIBKINETO_NOCUPTI) target_link_libraries(torch_cpu PRIVATE torch::cudart) endif() target_link_libraries(torch_cuda INTERFACE torch::cudart) diff --git a/cmake/Dependencies.cmake b/cmake/Dependencies.cmake index 9342555d9bc..114ce44adb9 100644 --- a/cmake/Dependencies.cmake +++ b/cmake/Dependencies.cmake @@ -1568,7 +1568,7 @@ if(USE_KINETO AND INTERN_BUILD_MOBILE AND USE_LITE_INTERPRETER_PROFILER AND (USE endif() if(USE_KINETO) - if((NOT USE_CUDA) OR MSVC) + if(NOT USE_CUDA) set(LIBKINETO_NOCUPTI ON CACHE STRING "" FORCE) else() set(LIBKINETO_NOCUPTI OFF CACHE STRING "") diff --git a/test/profiler/test_profiler.py b/test/profiler/test_profiler.py index 79bf93e63cb..a8a7e559db2 100644 --- a/test/profiler/test_profiler.py +++ b/test/profiler/test_profiler.py @@ -289,8 +289,7 @@ class TestProfiler(TestCase): ) ) - # TODO: https://github.com/pytorch/kineto/issues/617 - if kineto_available() and not IS_WINDOWS: + if kineto_available(): with TemporaryFileName(mode="w+") as fname: p.export_chrome_trace(fname) with open(fname) as f: @@ -1360,11 +1359,7 @@ class TestProfiler(TestCase): finally: torch._C._profiler._set_fwd_bwd_enabled_val(True) - # This test is broken on Windows, the likely reason is that kineto/CUPTI - # is not supported that particular environment. Once the CI stabilizes - # we can narrow the condition so Windows is checked as well (TODO) @unittest.skipIf(not kineto_available(), "Kineto is required") - @unittest.skipIf(IS_WINDOWS, "Test does not work on Windows") @unittest.skipIf(not torch.cuda.is_available(), "CUDA is required") def test_profiler_cuda_sync_events(self): device = torch.device("cuda:0")