2020-09-24 00:55:24 +00:00
|
|
|
|
caffe2::DeviceType -> at::DeviceType (#11254)
Summary:
Pull Request resolved: https://github.com/pytorch/pytorch/pull/11254
Previously we use DeviceType in caffe2.proto directly, but it's an `enum` and have implicit conversion to int, which does not have type safety, e.g. we have to explicitly check for a device type is valid in event.h:
```
template <int d>
struct EventCreateFunctionRegisterer {
explicit EventCreateFunctionRegisterer(EventCreateFunction f) {
static_assert(d < MaxDeviceTypes, "");
Event::event_creator_[d] = f;
}
};
```
at::DeviceType is an `enum class`, and it does not have implicit conversion to int, and provides better type safety guarantees. In this diff we have done the following refactor(taking CPU as an example):
1. caffe2::DeviceType → caffe2::DeviceTypeProto
2. caffe2::CPU → caffe2::PROTO_CPU
3. caffe2::DeviceType = at::DeviceType
4. caffe2::CPU = at::DeviceType::CPU
codemod -d caffe2/caffe2 --extensions h,cc,cpp 'device_type\(\), ' 'device_type(), PROTO_'
+ some manual changes
In short, after this diff, in c++, caffe2::CPU refers to the at::DeviceType::CPU and the old proto caffe2::CPU will be caffe2::PROTO_CPU.
In python side, we have a temporary workaround that alias `caffe2_pb2.CPU = caffe2_pb2.PROOT_CPU` to make the change easier to review and this will be removed later.
Reviewed By: ezyang
Differential Revision: D9545704
fbshipit-source-id: 461a28a4ca74e616d3ee183a607078a717fd38a7
2018-09-05 23:13:54 +00:00
|
|
|
from caffe2.proto import caffe2_pb2
|
2018-12-17 23:18:15 +00:00
|
|
|
import os
|
|
|
|
|
import sys
|
caffe2::DeviceType -> at::DeviceType (#11254)
Summary:
Pull Request resolved: https://github.com/pytorch/pytorch/pull/11254
Previously we use DeviceType in caffe2.proto directly, but it's an `enum` and have implicit conversion to int, which does not have type safety, e.g. we have to explicitly check for a device type is valid in event.h:
```
template <int d>
struct EventCreateFunctionRegisterer {
explicit EventCreateFunctionRegisterer(EventCreateFunction f) {
static_assert(d < MaxDeviceTypes, "");
Event::event_creator_[d] = f;
}
};
```
at::DeviceType is an `enum class`, and it does not have implicit conversion to int, and provides better type safety guarantees. In this diff we have done the following refactor(taking CPU as an example):
1. caffe2::DeviceType → caffe2::DeviceTypeProto
2. caffe2::CPU → caffe2::PROTO_CPU
3. caffe2::DeviceType = at::DeviceType
4. caffe2::CPU = at::DeviceType::CPU
codemod -d caffe2/caffe2 --extensions h,cc,cpp 'device_type\(\), ' 'device_type(), PROTO_'
+ some manual changes
In short, after this diff, in c++, caffe2::CPU refers to the at::DeviceType::CPU and the old proto caffe2::CPU will be caffe2::PROTO_CPU.
In python side, we have a temporary workaround that alias `caffe2_pb2.CPU = caffe2_pb2.PROOT_CPU` to make the change easier to review and this will be removed later.
Reviewed By: ezyang
Differential Revision: D9545704
fbshipit-source-id: 461a28a4ca74e616d3ee183a607078a717fd38a7
2018-09-05 23:13:54 +00:00
|
|
|
# TODO: refactor & remove the following alias
|
|
|
|
|
caffe2_pb2.CPU = caffe2_pb2.PROTO_CPU
|
|
|
|
|
caffe2_pb2.CUDA = caffe2_pb2.PROTO_CUDA
|
|
|
|
|
caffe2_pb2.MKLDNN = caffe2_pb2.PROTO_MKLDNN
|
|
|
|
|
caffe2_pb2.OPENGL = caffe2_pb2.PROTO_OPENGL
|
|
|
|
|
caffe2_pb2.OPENCL = caffe2_pb2.PROTO_OPENCL
|
|
|
|
|
caffe2_pb2.IDEEP = caffe2_pb2.PROTO_IDEEP
|
|
|
|
|
caffe2_pb2.HIP = caffe2_pb2.PROTO_HIP
|
|
|
|
|
caffe2_pb2.COMPILE_TIME_MAX_DEVICE_TYPES = caffe2_pb2.PROTO_COMPILE_TIME_MAX_DEVICE_TYPES
|
2018-12-17 23:18:15 +00:00
|
|
|
|
2021-02-12 04:07:32 +00:00
|
|
|
if sys.platform == "win32":
|
2020-01-22 16:32:01 +00:00
|
|
|
is_conda = os.path.exists(os.path.join(sys.prefix, 'conda-meta'))
|
2019-02-20 00:53:42 +00:00
|
|
|
py_dll_path = os.path.join(os.path.dirname(sys.executable), 'Library', 'bin')
|
2018-12-17 23:18:15 +00:00
|
|
|
th_root = os.path.join(os.path.dirname(os.path.dirname(os.path.dirname(__file__))), 'torch')
|
2019-02-20 00:53:42 +00:00
|
|
|
th_dll_path = os.path.join(th_root, 'lib')
|
2018-12-17 23:18:15 +00:00
|
|
|
|
2020-01-22 16:32:01 +00:00
|
|
|
if not os.path.exists(os.path.join(th_dll_path, 'nvToolsExt64_1.dll')) and \
|
|
|
|
|
not os.path.exists(os.path.join(py_dll_path, 'nvToolsExt64_1.dll')):
|
|
|
|
|
nvtoolsext_dll_path = os.path.join(
|
|
|
|
|
os.getenv('NVTOOLSEXT_PATH', 'C:\\Program Files\\NVIDIA Corporation\\NvToolsExt'), 'bin', 'x64')
|
|
|
|
|
else:
|
|
|
|
|
nvtoolsext_dll_path = ''
|
|
|
|
|
|
|
|
|
|
import importlib.util
|
|
|
|
|
import glob
|
|
|
|
|
spec = importlib.util.spec_from_file_location('torch_version', os.path.join(th_root, 'version.py'))
|
|
|
|
|
torch_version = importlib.util.module_from_spec(spec)
|
|
|
|
|
spec.loader.exec_module(torch_version)
|
|
|
|
|
if torch_version.cuda and len(glob.glob(os.path.join(th_dll_path, 'cudart64*.dll'))) == 0 and \
|
|
|
|
|
len(glob.glob(os.path.join(py_dll_path, 'cudart64*.dll'))) == 0:
|
|
|
|
|
cuda_version = torch_version.cuda
|
|
|
|
|
cuda_version_1 = cuda_version.replace('.', '_')
|
|
|
|
|
cuda_path_var = 'CUDA_PATH_V' + cuda_version_1
|
|
|
|
|
default_path = 'C:\\Program Files\\NVIDIA GPU Computing Toolkit\\CUDA\\v' + cuda_version
|
|
|
|
|
cuda_path = os.path.join(os.getenv(cuda_path_var, default_path), 'bin')
|
|
|
|
|
else:
|
|
|
|
|
cuda_path = ''
|
|
|
|
|
|
2020-06-24 03:09:57 +00:00
|
|
|
import ctypes
|
|
|
|
|
kernel32 = ctypes.WinDLL('kernel32.dll', use_last_error=True)
|
|
|
|
|
dll_paths = list(filter(os.path.exists, [th_dll_path, py_dll_path, nvtoolsext_dll_path, cuda_path]))
|
|
|
|
|
with_load_library_flags = hasattr(kernel32, 'AddDllDirectory')
|
|
|
|
|
prev_error_mode = kernel32.SetErrorMode(0x0001)
|
2020-01-22 16:32:01 +00:00
|
|
|
|
2020-06-24 03:09:57 +00:00
|
|
|
kernel32.LoadLibraryW.restype = ctypes.c_void_p
|
|
|
|
|
if with_load_library_flags:
|
|
|
|
|
kernel32.AddDllDirectory.restype = ctypes.c_void_p
|
|
|
|
|
kernel32.LoadLibraryExW.restype = ctypes.c_void_p
|
|
|
|
|
|
|
|
|
|
for dll_path in dll_paths:
|
|
|
|
|
if sys.version_info >= (3, 8):
|
2020-01-22 16:32:01 +00:00
|
|
|
os.add_dll_directory(dll_path)
|
2020-06-24 03:09:57 +00:00
|
|
|
elif with_load_library_flags:
|
|
|
|
|
res = kernel32.AddDllDirectory(dll_path)
|
|
|
|
|
if res is None:
|
|
|
|
|
err = ctypes.WinError(ctypes.get_last_error())
|
|
|
|
|
err.strerror += ' Error adding "{}" to the DLL directories.'.format(dll_path)
|
|
|
|
|
raise err
|
|
|
|
|
|
|
|
|
|
dlls = glob.glob(os.path.join(th_dll_path, '*.dll'))
|
|
|
|
|
path_patched = False
|
|
|
|
|
for dll in dlls:
|
|
|
|
|
is_loaded = False
|
|
|
|
|
if with_load_library_flags:
|
|
|
|
|
res = kernel32.LoadLibraryExW(dll, None, 0x00001100)
|
|
|
|
|
last_error = ctypes.get_last_error()
|
|
|
|
|
if res is None and last_error != 126:
|
|
|
|
|
err = ctypes.WinError(last_error)
|
|
|
|
|
err.strerror += ' Error loading "{}" or one of its dependencies.'.format(dll)
|
|
|
|
|
raise err
|
|
|
|
|
elif res is not None:
|
|
|
|
|
is_loaded = True
|
|
|
|
|
if not is_loaded:
|
|
|
|
|
if not path_patched:
|
|
|
|
|
os.environ['PATH'] = ';'.join(dll_paths + [os.environ['PATH']])
|
|
|
|
|
path_patched = True
|
|
|
|
|
res = kernel32.LoadLibraryW(dll)
|
|
|
|
|
if res is None:
|
|
|
|
|
err = ctypes.WinError(ctypes.get_last_error())
|
|
|
|
|
err.strerror += ' Error loading "{}" or one of its dependencies.'.format(dll)
|
|
|
|
|
raise err
|
2018-12-17 23:18:15 +00:00
|
|
|
|
2020-06-24 03:09:57 +00:00
|
|
|
kernel32.SetErrorMode(prev_error_mode)
|