mirror of
https://github.com/saymrwulf/pytorch.git
synced 2026-05-15 21:00:47 +00:00
Summary: Pull Request resolved: https://github.com/pytorch/pytorch/pull/39759 Caffe2 has a mode where it uses PT's caching allocator. Somehow we were not calling the initialization explicitly. Now, I have no idea why it worked before. Probably worth to run a bisect separately. Reviewed By: houseroad Differential Revision: D21962331 fbshipit-source-id: f16ad6b27a67dbe0bda93939cca8c94620d22a09
25 lines
1 KiB
Python
25 lines
1 KiB
Python
from __future__ import absolute_import
|
|
from __future__ import division
|
|
from __future__ import print_function
|
|
from __future__ import unicode_literals
|
|
|
|
import unittest
|
|
|
|
import torch
|
|
from caffe2.python import core, workspace
|
|
|
|
# This is a standalone test that doesn't use test_util as we're testing
|
|
# initialization and thus we should be the ones calling GlobalInit
|
|
@unittest.skipIf(not workspace.has_gpu_support, "No gpu support.")
|
|
class TestGPUInit(unittest.TestCase):
|
|
def testTHCAllocator(self):
|
|
core.GlobalInit(['caffe2', '--caffe2_cuda_memory_pool=thc'])
|
|
# just run one operator
|
|
# it's importantant to not call anything here from Torch API
|
|
# even torch.cuda.memory_allocated would initialize CUDA context
|
|
workspace.RunOperatorOnce(core.CreateOperator(
|
|
'ConstantFill', [], ["x"], shape=[5, 5], value=1.0,
|
|
device_option=core.DeviceOption(workspace.GpuDeviceType)
|
|
))
|
|
# make sure we actually used THC allocator
|
|
self.assertGreater(torch.cuda.memory_allocated(), 0)
|