mirror of
https://github.com/saymrwulf/pytorch.git
synced 2026-05-14 20:57:59 +00:00
Cleanup Windows warning suppression in CMake and fix some warnings in the source code (#94927)
This PR do two things: 1. It moves some Windows warning suppression from various CMake files into the main CMakeList.txt, following the conventions of gcc and clang. 2. It fixes some Windows warnings in the source code. Most importantly, it fixes lots of dll warnings by adjusting C10_API to TORCH_API or TORCH_PYTHON_API. There are still some dll warnings because some TORCH_API functions are actually built as part of libtorch_python Pull Request resolved: https://github.com/pytorch/pytorch/pull/94927 Approved by: https://github.com/malfet
This commit is contained in:
parent
d950f45577
commit
f27e09de04
27 changed files with 92 additions and 118 deletions
|
|
@ -479,13 +479,6 @@ if(MSVC)
|
|||
string(REGEX REPLACE "/Z[iI]" "/Z7" ${flag_var} "${${flag_var}}")
|
||||
endif(${flag_var} MATCHES "/Z[iI]")
|
||||
endif(MSVC_Z7_OVERRIDE)
|
||||
# Turn off warnings on Windows. In an ideal world we'd be warning
|
||||
# clean on Windows too, but this is too much work for our
|
||||
# non-Windows developers.
|
||||
|
||||
# Turn off warnings (Windows build is currently is extremely warning
|
||||
# unclean and the warnings aren't telling us anything useful.)
|
||||
string(APPEND ${flag_var} " /w")
|
||||
|
||||
if(${CAFFE2_USE_MSVC_STATIC_RUNTIME})
|
||||
if(${flag_var} MATCHES "/MD")
|
||||
|
|
@ -907,6 +900,35 @@ if(NOT MSVC)
|
|||
append_cxx_flag_if_supported("-fno-trapping-math" CMAKE_CXX_FLAGS)
|
||||
append_cxx_flag_if_supported("-Werror=format" CMAKE_CXX_FLAGS)
|
||||
append_cxx_flag_if_supported("-Werror=cast-function-type" CMAKE_CXX_FLAGS)
|
||||
else()
|
||||
# skip unwanted includes from windows.h
|
||||
add_compile_definitions(WIN32_LEAN_AND_MEAN)
|
||||
# Windows SDK broke compatibility since version 25131, but introduced this
|
||||
# define for backward compatibility.
|
||||
add_compile_definitions(_UCRT_LEGACY_INFINITY)
|
||||
# disable min/max macros
|
||||
add_compile_definitions(NOMINMAX)
|
||||
# The source code is in utf-8 encoding
|
||||
append_cxx_flag_if_supported("/utf-8" CMAKE_CXX_FLAGS)
|
||||
# Turn off these warnings on Windows.
|
||||
# destructor was implicitly defined as delete
|
||||
append_cxx_flag_if_supported("/wd4624" CMAKE_CXX_FLAGS)
|
||||
# unknown pragma
|
||||
append_cxx_flag_if_supported("/wd4068" CMAKE_CXX_FLAGS)
|
||||
# unexpected tokens following preprocessor directive - expected a newline
|
||||
append_cxx_flag_if_supported("/wd4067" CMAKE_CXX_FLAGS)
|
||||
# conversion from 'size_t' to 'unsigned int', possible loss of data
|
||||
append_cxx_flag_if_supported("/wd4267" CMAKE_CXX_FLAGS)
|
||||
# no suitable definition provided for explicit template instantiation request
|
||||
append_cxx_flag_if_supported("/wd4661" CMAKE_CXX_FLAGS)
|
||||
# recursive on all control paths, function will cause runtime stack overflow
|
||||
append_cxx_flag_if_supported("/wd4717" CMAKE_CXX_FLAGS)
|
||||
# conversion from '_Ty' to '_Ty', possible loss of data
|
||||
append_cxx_flag_if_supported("/wd4244" CMAKE_CXX_FLAGS)
|
||||
# unsafe use of type 'bool' in operation
|
||||
append_cxx_flag_if_supported("/wd4804" CMAKE_CXX_FLAGS)
|
||||
# inconsistent dll linkage
|
||||
append_cxx_flag_if_supported("/wd4273" CMAKE_CXX_FLAGS)
|
||||
endif()
|
||||
|
||||
if(USE_ASAN)
|
||||
|
|
|
|||
|
|
@ -30,17 +30,17 @@ std::vector<Argument> createArgumentVector(c10::ArrayRef<ArgumentDef> args) {
|
|||
}
|
||||
// This is intentionally a separate function and in a .cpp file
|
||||
// because then the template is smaller and that benefits binary size
|
||||
C10_EXPORT FunctionSchema make_function_schema(std::string&& name, std::string&& overload_name, c10::ArrayRef<ArgumentDef> arguments, c10::ArrayRef<ArgumentDef> returns) {
|
||||
FunctionSchema make_function_schema(std::string&& name, std::string&& overload_name, c10::ArrayRef<ArgumentDef> arguments, c10::ArrayRef<ArgumentDef> returns) {
|
||||
return FunctionSchema(std::move(name), std::move(overload_name), createArgumentVector(arguments), createArgumentVector(returns));
|
||||
}
|
||||
|
||||
C10_EXPORT FunctionSchema make_function_schema(c10::ArrayRef<ArgumentDef> arguments, c10::ArrayRef<ArgumentDef> returns) {
|
||||
FunctionSchema make_function_schema(c10::ArrayRef<ArgumentDef> arguments, c10::ArrayRef<ArgumentDef> returns) {
|
||||
return make_function_schema("", "", arguments, returns);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
C10_EXPORT c10::optional<std::string> findSchemaDifferences(const FunctionSchema& lhs, const FunctionSchema& rhs) {
|
||||
c10::optional<std::string> findSchemaDifferences(const FunctionSchema& lhs, const FunctionSchema& rhs) {
|
||||
if (lhs.arguments().size() != rhs.arguments().size()) {
|
||||
return "The number of arguments is different. " + guts::to_string(lhs.arguments().size()) +
|
||||
" vs " + guts::to_string(rhs.arguments().size()) + ".";
|
||||
|
|
|
|||
|
|
@ -108,8 +108,8 @@ struct createSingleReturn {
|
|||
}
|
||||
};
|
||||
|
||||
C10_API FunctionSchema make_function_schema(std::string&& name, std::string&& overload_name, c10::ArrayRef<ArgumentDef> arguments, c10::ArrayRef<ArgumentDef> returns);
|
||||
C10_API FunctionSchema make_function_schema(c10::ArrayRef<ArgumentDef> arguments, c10::ArrayRef<ArgumentDef> returns);
|
||||
TORCH_API FunctionSchema make_function_schema(std::string&& name, std::string&& overload_name, c10::ArrayRef<ArgumentDef> arguments, c10::ArrayRef<ArgumentDef> returns);
|
||||
TORCH_API FunctionSchema make_function_schema(c10::ArrayRef<ArgumentDef> arguments, c10::ArrayRef<ArgumentDef> returns);
|
||||
|
||||
/// Creates a `FunctionSchema` object from a `FunctionTraits` type for a
|
||||
/// function. Flattens std::tuple returns into multiple return types
|
||||
|
|
|
|||
|
|
@ -197,7 +197,7 @@ struct TORCH_API CUDAHooksInterface {
|
|||
// for the "..." in a variadic macro"
|
||||
struct TORCH_API CUDAHooksArgs {};
|
||||
|
||||
C10_DECLARE_REGISTRY(CUDAHooksRegistry, CUDAHooksInterface, CUDAHooksArgs);
|
||||
TORCH_DECLARE_REGISTRY(CUDAHooksRegistry, CUDAHooksInterface, CUDAHooksArgs);
|
||||
#define REGISTER_CUDA_HOOKS(clsname) \
|
||||
C10_REGISTER_CLASS(CUDAHooksRegistry, clsname, clsname)
|
||||
|
||||
|
|
|
|||
|
|
@ -60,7 +60,7 @@ struct TORCH_API HIPHooksInterface {
|
|||
// for the "..." in a variadic macro"
|
||||
struct TORCH_API HIPHooksArgs {};
|
||||
|
||||
C10_DECLARE_REGISTRY(HIPHooksRegistry, HIPHooksInterface, HIPHooksArgs);
|
||||
TORCH_DECLARE_REGISTRY(HIPHooksRegistry, HIPHooksInterface, HIPHooksArgs);
|
||||
#define REGISTER_HIP_HOOKS(clsname) \
|
||||
C10_REGISTER_CLASS(HIPHooksRegistry, clsname, clsname)
|
||||
|
||||
|
|
|
|||
|
|
@ -63,7 +63,7 @@ struct TORCH_API MPSHooksInterface {
|
|||
|
||||
struct TORCH_API MPSHooksArgs {};
|
||||
|
||||
C10_DECLARE_REGISTRY(MPSHooksRegistry, MPSHooksInterface, MPSHooksArgs);
|
||||
TORCH_DECLARE_REGISTRY(MPSHooksRegistry, MPSHooksInterface, MPSHooksArgs);
|
||||
#define REGISTER_MPS_HOOKS(clsname) \
|
||||
C10_REGISTER_CLASS(MPSHooksRegistry, clsname, clsname)
|
||||
|
||||
|
|
|
|||
|
|
@ -25,7 +25,7 @@ struct TORCH_API ORTHooksInterface {
|
|||
// for the "..." in a variadic macro"
|
||||
struct TORCH_API ORTHooksArgs {};
|
||||
|
||||
C10_DECLARE_REGISTRY(ORTHooksRegistry, ORTHooksInterface, ORTHooksArgs);
|
||||
TORCH_DECLARE_REGISTRY(ORTHooksRegistry, ORTHooksInterface, ORTHooksArgs);
|
||||
#define REGISTER_ORT_HOOKS(clsname) \
|
||||
C10_REGISTER_CLASS(ORTHooksRegistry, clsname, clsname)
|
||||
|
||||
|
|
|
|||
|
|
@ -208,7 +208,7 @@ C10_DECLARE_REGISTRY(C10FlagsRegistry, C10FlagParser, const std::string&);
|
|||
C10_DEFINE_typed_var(std::string, name, default_value, help_str)
|
||||
|
||||
// DECLARE_typed_var should be used in header files and in the global namespace.
|
||||
#define C10_DECLARE_typed_var(type, name) C10_IMPORT extern type FLAGS_##name
|
||||
#define C10_DECLARE_typed_var(type, name) C10_API extern type FLAGS_##name
|
||||
|
||||
#define C10_DECLARE_int(name) C10_DECLARE_typed_var(int, name)
|
||||
#define C10_DECLARE_int32(name) C10_DECLARE_int(name)
|
||||
|
|
|
|||
|
|
@ -207,11 +207,18 @@ class Registerer {
|
|||
// dllexport are mixed, but the warning is fine and linker will be properly
|
||||
// exporting the symbol. Same thing happens in the gflags flag declaration and
|
||||
// definition caes.
|
||||
#define C10_DECLARE_TYPED_REGISTRY( \
|
||||
RegistryName, SrcType, ObjectType, PtrType, ...) \
|
||||
C10_IMPORT ::c10::Registry<SrcType, PtrType<ObjectType>, ##__VA_ARGS__>* \
|
||||
RegistryName(); \
|
||||
typedef ::c10::Registerer<SrcType, PtrType<ObjectType>, ##__VA_ARGS__> \
|
||||
#define C10_DECLARE_TYPED_REGISTRY( \
|
||||
RegistryName, SrcType, ObjectType, PtrType, ...) \
|
||||
C10_API ::c10::Registry<SrcType, PtrType<ObjectType>, ##__VA_ARGS__>* \
|
||||
RegistryName(); \
|
||||
typedef ::c10::Registerer<SrcType, PtrType<ObjectType>, ##__VA_ARGS__> \
|
||||
Registerer##RegistryName
|
||||
|
||||
#define TORCH_DECLARE_TYPED_REGISTRY( \
|
||||
RegistryName, SrcType, ObjectType, PtrType, ...) \
|
||||
TORCH_API ::c10::Registry<SrcType, PtrType<ObjectType>, ##__VA_ARGS__>* \
|
||||
RegistryName(); \
|
||||
typedef ::c10::Registerer<SrcType, PtrType<ObjectType>, ##__VA_ARGS__> \
|
||||
Registerer##RegistryName
|
||||
|
||||
#define C10_DEFINE_TYPED_REGISTRY( \
|
||||
|
|
@ -268,6 +275,10 @@ class Registerer {
|
|||
C10_DECLARE_TYPED_REGISTRY( \
|
||||
RegistryName, std::string, ObjectType, std::unique_ptr, ##__VA_ARGS__)
|
||||
|
||||
#define TORCH_DECLARE_REGISTRY(RegistryName, ObjectType, ...) \
|
||||
TORCH_DECLARE_TYPED_REGISTRY( \
|
||||
RegistryName, std::string, ObjectType, std::unique_ptr, ##__VA_ARGS__)
|
||||
|
||||
#define C10_DEFINE_REGISTRY(RegistryName, ObjectType, ...) \
|
||||
C10_DEFINE_TYPED_REGISTRY( \
|
||||
RegistryName, std::string, ObjectType, std::unique_ptr, ##__VA_ARGS__)
|
||||
|
|
@ -280,6 +291,10 @@ class Registerer {
|
|||
C10_DECLARE_TYPED_REGISTRY( \
|
||||
RegistryName, std::string, ObjectType, std::shared_ptr, ##__VA_ARGS__)
|
||||
|
||||
#define TORCH_DECLARE_SHARED_REGISTRY(RegistryName, ObjectType, ...) \
|
||||
TORCH_DECLARE_TYPED_REGISTRY( \
|
||||
RegistryName, std::string, ObjectType, std::shared_ptr, ##__VA_ARGS__)
|
||||
|
||||
#define C10_DEFINE_SHARED_REGISTRY(RegistryName, ObjectType, ...) \
|
||||
C10_DEFINE_TYPED_REGISTRY( \
|
||||
RegistryName, std::string, ObjectType, std::shared_ptr, ##__VA_ARGS__)
|
||||
|
|
|
|||
|
|
@ -1319,15 +1319,11 @@ target_include_directories(torch_cpu INTERFACE $<INSTALL_INTERFACE:include>)
|
|||
target_include_directories(torch_cpu PRIVATE ${Caffe2_CPU_INCLUDE})
|
||||
target_include_directories(torch_cpu SYSTEM PRIVATE "${Caffe2_DEPENDENCY_INCLUDE}")
|
||||
|
||||
target_compile_options(torch_cpu PRIVATE "-DCAFFE2_BUILD_MAIN_LIB")
|
||||
target_compile_definitions(torch_cpu PRIVATE CAFFE2_BUILD_MAIN_LIB)
|
||||
if(USE_CUDA)
|
||||
target_compile_options(torch_cuda PRIVATE "-DTORCH_CUDA_BUILD_MAIN_LIB")
|
||||
# NB: This must be target_compile_definitions, not target_compile_options,
|
||||
# as the latter is not respected by nvcc
|
||||
target_compile_definitions(torch_cuda PRIVATE "-DTORCH_CUDA_BUILD_MAIN_LIB")
|
||||
target_compile_definitions(torch_cuda PRIVATE TORCH_CUDA_BUILD_MAIN_LIB)
|
||||
elseif(USE_ROCM)
|
||||
target_compile_options(torch_hip PRIVATE "-DTORCH_HIP_BUILD_MAIN_LIB")
|
||||
target_compile_definitions(torch_hip PRIVATE "-DTORCH_HIP_BUILD_MAIN_LIB")
|
||||
target_compile_definitions(torch_hip PRIVATE TORCH_HIP_BUILD_MAIN_LIB)
|
||||
endif()
|
||||
|
||||
if(USE_EXPERIMENTAL_CUDNN_V8_API)
|
||||
|
|
|
|||
|
|
@ -145,9 +145,11 @@ uint32_t crc32_16bytes_prefetch(const void* data, size_t length, uint32_t previo
|
|||
#ifdef __GNUC__
|
||||
#define PREFETCH(location) __builtin_prefetch(location)
|
||||
#else
|
||||
#ifndef PREFETCH
|
||||
// no prefetching
|
||||
#define PREFETCH(location) ;
|
||||
#endif
|
||||
#endif
|
||||
|
||||
// abort if byte order is undefined
|
||||
#ifndef __BYTE_ORDER
|
||||
|
|
|
|||
|
|
@ -84,45 +84,6 @@ if(CAFFE2_CMAKE_BUILDING_WITH_MAIN_REPO AND NOT INTERN_BUILD_MOBILE)
|
|||
enable_ubsan()
|
||||
endif()
|
||||
|
||||
# For MSVC,
|
||||
# 1. Remove /Zi, /ZI and /Z7 for Release, MinSizeRel and Default builds
|
||||
# 2. Switch off incremental linking in debug builds
|
||||
# 3. If MSVC_Z7_OVERRIDE is ON, then /Zi and /ZI will be replaced with /Z7
|
||||
# for Debug and RelWithDebInfo builds
|
||||
if(MSVC)
|
||||
# skip unwanted includes from windows.h
|
||||
add_definitions(-DWIN32_LEAN_AND_MEAN)
|
||||
|
||||
# Windows SDK broke compatibility since version 25131, but introduced this define for backward compatibility.
|
||||
add_definitions(-D_UCRT_LEGACY_INFINITY)
|
||||
|
||||
foreach(flag_var
|
||||
CMAKE_C_FLAGS CMAKE_C_FLAGS_RELEASE CMAKE_C_FLAGS_MINSIZEREL
|
||||
CMAKE_CXX_FLAGS CMAKE_CXX_FLAGS_RELEASE CMAKE_CXX_FLAGS_MINSIZEREL)
|
||||
if(${flag_var} MATCHES "/Z[iI7]")
|
||||
string(REGEX REPLACE "/Z[iI7]" "" ${flag_var} "${${flag_var}}")
|
||||
endif()
|
||||
endforeach(flag_var)
|
||||
if(MSVC_Z7_OVERRIDE)
|
||||
foreach(flag_var
|
||||
CMAKE_C_FLAGS_DEBUG CMAKE_C_FLAGS_RELWITHDEBINFO
|
||||
CMAKE_CXX_FLAGS_DEBUG CMAKE_CXX_FLAGS_RELWITHDEBINFO)
|
||||
if(${flag_var} MATCHES "/Z[iI]")
|
||||
string(REGEX REPLACE "/Z[iI]" "/Z7" ${flag_var} "${${flag_var}}")
|
||||
endif()
|
||||
endforeach(flag_var)
|
||||
endif(MSVC_Z7_OVERRIDE)
|
||||
foreach(flag_var
|
||||
CMAKE_SHARED_LINKER_FLAGS_RELWITHDEBINFO CMAKE_STATIC_LINKER_FLAGS_RELWITHDEBINFO
|
||||
CMAKE_EXE_LINKER_FLAGS_RELWITHDEBINFO CMAKE_MODULE_LINKER_FLAGS_RELWITHDEBINFO
|
||||
CMAKE_SHARED_LINKER_FLAGS_DEBUG CMAKE_STATIC_LINKER_FLAGS_DEBUG
|
||||
CMAKE_EXE_LINKER_FLAGS_DEBUG CMAKE_MODULE_LINKER_FLAGS_DEBUG)
|
||||
if(${flag_var} MATCHES "/INCREMENTAL" AND NOT ${flag_var} MATCHES "/INCREMENTAL:NO")
|
||||
string(REGEX REPLACE "/INCREMENTAL" "/INCREMENTAL:NO" ${flag_var} "${${flag_var}}")
|
||||
endif()
|
||||
endforeach(flag_var)
|
||||
endif(MSVC)
|
||||
|
||||
# ---[ Threads
|
||||
find_package(Threads REQUIRED)
|
||||
if(TARGET Threads::Threads)
|
||||
|
|
|
|||
|
|
@ -429,18 +429,6 @@ function(torch_compile_options libname)
|
|||
${MSVC_RUNTIME_LIBRARY_OPTION}
|
||||
$<$<OR:$<CONFIG:Debug>,$<CONFIG:RelWithDebInfo>>:${MSVC_DEBINFO_OPTION}>
|
||||
/EHsc
|
||||
/DNOMINMAX
|
||||
/wd4267
|
||||
/wd4251
|
||||
/wd4522
|
||||
/wd4522
|
||||
/wd4838
|
||||
/wd4305
|
||||
/wd4244
|
||||
/wd4190
|
||||
/wd4101
|
||||
/wd4996
|
||||
/wd4275
|
||||
/bigobj>
|
||||
)
|
||||
else()
|
||||
|
|
|
|||
7
setup.py
7
setup.py
|
|
@ -825,12 +825,7 @@ def configure_extension_build():
|
|||
# /MD links against DLL runtime
|
||||
# and matches the flags set for protobuf and ONNX
|
||||
# /EHsc is about standard C++ exception handling
|
||||
# /DNOMINMAX removes builtin min/max functions
|
||||
# /wdXXXX disables warning no. XXXX
|
||||
extra_compile_args = ['/MD', '/FS', '/EHsc', '/DNOMINMAX',
|
||||
'/wd4267', '/wd4251', '/wd4522', '/wd4522', '/wd4838',
|
||||
'/wd4305', '/wd4244', '/wd4190', '/wd4101', '/wd4996',
|
||||
'/wd4275']
|
||||
extra_compile_args = ['/MD', '/FS', '/EHsc']
|
||||
else:
|
||||
extra_link_args = []
|
||||
extra_compile_args = [
|
||||
|
|
|
|||
|
|
@ -305,7 +305,7 @@ struct IndexError : public PyTorchError {
|
|||
// Translates to Python TypeError
|
||||
struct TypeError : public PyTorchError {
|
||||
using PyTorchError::PyTorchError;
|
||||
TORCH_API TypeError(const char* format, ...) TORCH_FORMAT_FUNC(2, 3);
|
||||
TORCH_PYTHON_API TypeError(const char* format, ...) TORCH_FORMAT_FUNC(2, 3);
|
||||
PyObject* python_type() override {
|
||||
return PyExc_TypeError;
|
||||
}
|
||||
|
|
@ -358,9 +358,9 @@ struct PyWarningHandler {
|
|||
|
||||
public:
|
||||
/// See NOTE [ Conversion Cpp Python Warning ] for noexcept justification
|
||||
TORCH_API PyWarningHandler() noexcept(true);
|
||||
TORCH_PYTHON_API PyWarningHandler() noexcept(true);
|
||||
// NOLINTNEXTLINE(bugprone-exception-escape)
|
||||
TORCH_API ~PyWarningHandler() noexcept(false);
|
||||
TORCH_PYTHON_API ~PyWarningHandler() noexcept(false);
|
||||
|
||||
/** Call if an exception has been thrown
|
||||
|
||||
|
|
|
|||
|
|
@ -1244,11 +1244,7 @@ class WeakTensorRef {
|
|||
}
|
||||
};
|
||||
|
||||
extern "C"
|
||||
#ifdef _WIN32
|
||||
__declspec(dllexport)
|
||||
#endif
|
||||
TORCH_API PyObject* initModule();
|
||||
extern "C" C10_EXPORT PyObject* initModule();
|
||||
// separate decl and defn for msvc error C2491
|
||||
PyObject* initModule() {
|
||||
HANDLE_TH_ERRORS
|
||||
|
|
|
|||
|
|
@ -23,11 +23,11 @@ struct THPVariable {
|
|||
PyObject* backward_hooks = nullptr;
|
||||
};
|
||||
|
||||
TORCH_API void registerPythonTensorClass(
|
||||
TORCH_PYTHON_API void registerPythonTensorClass(
|
||||
const std::string& device,
|
||||
PyObject* python_tensor_class);
|
||||
|
||||
TORCH_API void activateCUDATrace();
|
||||
TORCH_PYTHON_API void activateCUDATrace();
|
||||
|
||||
TORCH_PYTHON_API extern PyObject* THPVariableClass;
|
||||
TORCH_PYTHON_API extern PyObject* ParameterClass;
|
||||
|
|
|
|||
|
|
@ -21,7 +21,7 @@ class TORCH_API GlooDeviceFactory {
|
|||
const std::string& hostname);
|
||||
};
|
||||
|
||||
C10_DECLARE_SHARED_REGISTRY(
|
||||
TORCH_DECLARE_SHARED_REGISTRY(
|
||||
GlooDeviceRegistry,
|
||||
::gloo::transport::Device,
|
||||
const std::string&, /* interface */
|
||||
|
|
|
|||
|
|
@ -71,5 +71,5 @@ class TORCH_API Timer {
|
|||
}
|
||||
};
|
||||
|
||||
C10_DECLARE_TYPED_REGISTRY(TimerRegistry, c10::DeviceType, Timer, std::unique_ptr, c10::Device);
|
||||
TORCH_DECLARE_TYPED_REGISTRY(TimerRegistry, c10::DeviceType, Timer, std::unique_ptr, c10::Device);
|
||||
} // namespace c10d
|
||||
|
|
|
|||
|
|
@ -15,7 +15,7 @@ struct TORCH_API NNCKernel {
|
|||
virtual int execute(void** /* args */) = 0;
|
||||
};
|
||||
|
||||
C10_DECLARE_REGISTRY(NNCKernelRegistry, NNCKernel);
|
||||
TORCH_DECLARE_REGISTRY(NNCKernelRegistry, NNCKernel);
|
||||
|
||||
#define REGISTER_NNC_KERNEL(id, kernel, ...) \
|
||||
extern "C" { \
|
||||
|
|
|
|||
|
|
@ -59,12 +59,12 @@ namespace jit {
|
|||
|
||||
void clear_registered_instances(void* ptr);
|
||||
|
||||
TORCH_API IValue toIValue(
|
||||
TORCH_PYTHON_API IValue toIValue(
|
||||
py::handle obj,
|
||||
const TypePtr& type,
|
||||
c10::optional<int32_t> N = c10::nullopt);
|
||||
|
||||
TORCH_API py::object toPyObject(IValue ivalue);
|
||||
TORCH_PYTHON_API py::object toPyObject(IValue ivalue);
|
||||
|
||||
// Hack to overload the behavior of toIValue to accept Python
|
||||
// numbers in places where a Tensor is expected
|
||||
|
|
@ -701,10 +701,6 @@ inline void guardAgainstNamedTensor(const T& var) {
|
|||
"workaround please drop names via `tensor = tensor.rename(None)`.");
|
||||
}
|
||||
|
||||
// Defined in pybind_utils.cpp to break a circular dependency with
|
||||
// python_ivalue.h
|
||||
IValue toIValue(py::handle obj, const TypePtr& type, c10::optional<int32_t> N);
|
||||
|
||||
// Extract custom class registered with torchbind
|
||||
template <typename T>
|
||||
c10::intrusive_ptr<T> toCustomClass(py::handle obj) {
|
||||
|
|
@ -1095,18 +1091,18 @@ inline py::object invokeScriptMethodFromPython(
|
|||
});
|
||||
}
|
||||
|
||||
TORCH_API std::pair<std::shared_ptr<Operator>, Stack> getOpWithStack(
|
||||
TORCH_PYTHON_API std::pair<std::shared_ptr<Operator>, Stack> getOpWithStack(
|
||||
const std::vector<std::shared_ptr<Operator>>& operations,
|
||||
py::args args,
|
||||
const py::kwargs& kwargs);
|
||||
|
||||
TORCH_API py::object invokeOperatorFromPython(
|
||||
TORCH_PYTHON_API py::object invokeOperatorFromPython(
|
||||
const std::vector<std::shared_ptr<Operator>>& operations,
|
||||
py::args args,
|
||||
const py::kwargs& kwargs,
|
||||
c10::optional<c10::DispatchKey> dk = c10::nullopt);
|
||||
|
||||
TORCH_API py::object _get_operation_for_overload_or_packet(
|
||||
TORCH_PYTHON_API py::object _get_operation_for_overload_or_packet(
|
||||
const std::vector<std::shared_ptr<Operator>>& operations,
|
||||
Symbol symbol,
|
||||
py::args args,
|
||||
|
|
|
|||
|
|
@ -32,7 +32,7 @@ struct SROperatorFunctor {
|
|||
virtual ~SROperatorFunctor() = default;
|
||||
};
|
||||
|
||||
C10_DECLARE_REGISTRY(SROperatorRegistry, SROperatorFunctor);
|
||||
TORCH_DECLARE_REGISTRY(SROperatorRegistry, SROperatorFunctor);
|
||||
|
||||
#define REGISTER_OPERATOR_FUNCTOR(name, id, ...) \
|
||||
struct SROperatorFunctor_##id : public SROperatorFunctor { \
|
||||
|
|
@ -43,7 +43,7 @@ C10_DECLARE_REGISTRY(SROperatorRegistry, SROperatorFunctor);
|
|||
}; \
|
||||
C10_REGISTER_CLASS(SROperatorRegistry, name, SROperatorFunctor_##id);
|
||||
|
||||
C10_DECLARE_REGISTRY(SRNativeOperatorRegistry, SROperatorFunctor);
|
||||
TORCH_DECLARE_REGISTRY(SRNativeOperatorRegistry, SROperatorFunctor);
|
||||
#define REGISTER_NATIVE_OPERATOR_FUNCTOR(name, id, ...) \
|
||||
struct SRNativeOperatorFunctor_##id : public SROperatorFunctor { \
|
||||
const SROpFunctor fn = __VA_ARGS__; \
|
||||
|
|
|
|||
|
|
@ -204,7 +204,7 @@ void AccessInfo::dumpDOT(std::ostream& os) const {
|
|||
os << "n" << id_ << " [\n";
|
||||
os << "label = \"" << AccessToString(type_) << " (#" << id_ << ")\\n";
|
||||
os << "buf : " << *var_ << "\\n";
|
||||
os << "bounds : \[";
|
||||
os << "bounds : [";
|
||||
if (!bounds_.empty()) {
|
||||
for (size_t i = 0; i < bounds_.size() - 1; ++i) {
|
||||
os << "(" << *bounds_[i].start << ", " << *bounds_[i].end << "), ";
|
||||
|
|
|
|||
|
|
@ -76,7 +76,9 @@ class TORCH_API Dtype {
|
|||
}
|
||||
|
||||
private:
|
||||
friend std::ostream& operator<<(std::ostream& stream, const Dtype& dtype);
|
||||
friend TORCH_API std::ostream& operator<<(
|
||||
std::ostream& stream,
|
||||
const Dtype& dtype);
|
||||
ScalarType scalar_type_;
|
||||
int lanes_; // the width of the element for a vector time
|
||||
};
|
||||
|
|
|
|||
|
|
@ -1,12 +1,12 @@
|
|||
#pragma once
|
||||
#include <c10/macros/Export.h>
|
||||
#include <pybind11/pybind11.h>
|
||||
#include <torch/csrc/Export.h>
|
||||
#include <torch/csrc/utils/pybind.h>
|
||||
|
||||
namespace torch {
|
||||
namespace lazy {
|
||||
|
||||
TORCH_API void initLazyBindings(PyObject* module);
|
||||
TORCH_PYTHON_API void initLazyBindings(PyObject* module);
|
||||
|
||||
} // namespace lazy
|
||||
} // namespace torch
|
||||
|
|
|
|||
|
|
@ -1,15 +1,15 @@
|
|||
#pragma once
|
||||
#include <c10/macros/Export.h>
|
||||
#include <c10/util/Optional.h>
|
||||
#include <torch/csrc/Export.h>
|
||||
#include <torch/csrc/lazy/core/ir_metadata.h>
|
||||
#include <vector>
|
||||
|
||||
namespace torch {
|
||||
namespace lazy {
|
||||
|
||||
c10::optional<SourceLocation> TORCH_API GetPythonFrameTop();
|
||||
c10::optional<SourceLocation> TORCH_PYTHON_API GetPythonFrameTop();
|
||||
|
||||
std::vector<SourceLocation> TORCH_API GetPythonFrames();
|
||||
std::vector<SourceLocation> TORCH_PYTHON_API GetPythonFrames();
|
||||
|
||||
} // namespace lazy
|
||||
} // namespace torch
|
||||
|
|
|
|||
|
|
@ -46,6 +46,7 @@
|
|||
#include <torch/csrc/Dtype.h>
|
||||
#include <torch/csrc/DynamicTypes.h>
|
||||
#include <torch/csrc/Exceptions.h>
|
||||
#include <torch/csrc/Export.h>
|
||||
#include <torch/csrc/Generator.h>
|
||||
#include <torch/csrc/Layout.h>
|
||||
#include <torch/csrc/MemoryFormat.h>
|
||||
|
|
@ -1127,7 +1128,7 @@ auto handle_torch_function(
|
|||
// PythonArgParser to get overloaded_args.
|
||||
enum class TorchFunctionName { TorchFunction, TorchDispatch };
|
||||
|
||||
auto TORCH_API handle_torch_function_no_python_arg_parser(
|
||||
auto TORCH_PYTHON_API handle_torch_function_no_python_arg_parser(
|
||||
at::ArrayRef<py::handle> overloaded_args,
|
||||
PyObject* args,
|
||||
PyObject* kwargs,
|
||||
|
|
|
|||
Loading…
Reference in a new issue