mirror of
https://github.com/saymrwulf/pytorch.git
synced 2026-05-15 21:00:47 +00:00
Summary: Pull Request resolved: https://github.com/pytorch/pytorch/pull/25252 Our model going forward for extensions will be that you will have to get an allocation of an ID in our system. This is how things work in practice today; we're just simplifying our underlying registration since there is no need to have distributed registration. There are some codemods in this diff: ``` codemod --extensions cpp,h,cc,cuh,py,in --exclude-paths=c10/core/TensorTypeId.h '([A-Za-z]+?)TensorId\(\)' 'TensorTypeId::\1TensorId' codemod --extensions cpp,h,cc,cuh,py,in 'TensorTypeIds::undefined\(\)' 'TensorTypeId::UndefinedTensorId' codemod --extensions cpp 'TensorType1\(\)' 'TensorTypeId::CPUTensorId' codemod --extensions cpp 'TensorType2\(\)' 'TensorTypeId::CUDATensorId' codemod --extensions cpp 'TensorType3\(\)' 'TensorTypeId::XLATensorId' codemod --extensions cpp 'TensorType1' 'CPUTensorId' codemod --extensions cpp 'TensorType2' 'CUDATensorId' codemod --extensions cpp 'TensorType3' 'XLATensorId' ``` The main hand-written changes are in c10/core/TensorTypeId.h Other manual fixes: - aten/src/ATen/core/op_registration/op_registration.cpp - stop using std::string operator+ - aten/src/ATen/function_wrapper.py - handle a hardcoded TypeId() that wasn't caught by codemod - torch/csrc/tensor/python_tensor.h - fix now incorrect forward declaration of TensorTypeId - aten/src/ATen/core/op_registration/ - remove out-of-line registration Differential Revision: D17072001 Test Plan: ossci and sandcastle Pulled By: ezyang fbshipit-source-id: c641515fd0604c045c54fbb1d6b1b950f45e89d1
56 lines
1.8 KiB
C++
56 lines
1.8 KiB
C++
#include <torch/extension.h>
|
|
|
|
#include <c10/core/Allocator.h>
|
|
#include <ATen/CPUGenerator.h>
|
|
#include <ATen/DeviceGuard.h>
|
|
#include <ATen/NativeFunctions.h>
|
|
#include <ATen/Utils.h>
|
|
#include <ATen/WrapDimUtils.h>
|
|
#include <c10/util/Half.h>
|
|
#include <c10/core/TensorImpl.h>
|
|
#include <c10/core/UndefinedTensorImpl.h>
|
|
#include <c10/util/Optional.h>
|
|
#include <ATen/core/ATenDispatch.h>
|
|
|
|
#include <cstddef>
|
|
#include <functional>
|
|
#include <memory>
|
|
#include <utility>
|
|
|
|
#include <ATen/Config.h>
|
|
|
|
namespace at {
|
|
|
|
static Tensor empty_complex(IntArrayRef size, const TensorOptions & options, c10::optional<c10::MemoryFormat> optional_memory_format) {
|
|
TORCH_CHECK(!optional_memory_format.has_value(), "memory format is not supported")
|
|
AT_ASSERT(options.device().is_cpu());
|
|
|
|
for (auto x: size) {
|
|
TORCH_CHECK(x >= 0, "Trying to create tensor using size with negative dimension: ", size);
|
|
}
|
|
auto* allocator = at::getCPUAllocator();
|
|
int64_t nelements = at::prod_intlist(size);
|
|
auto dtype = options.dtype();
|
|
auto storage_impl = c10::make_intrusive<StorageImpl>(
|
|
dtype,
|
|
nelements,
|
|
allocator->allocate(nelements * dtype.itemsize()),
|
|
allocator,
|
|
/*resizable=*/true);
|
|
|
|
auto tensor = detail::make_tensor<TensorImpl>(storage_impl, at::TensorTypeId::ComplexCPUTensorId);
|
|
// Default TensorImpl has size [0]
|
|
if (size.size() != 1 || size[0] != 0) {
|
|
tensor.unsafeGetTensorImpl()->set_sizes_contiguous(size);
|
|
}
|
|
return tensor;
|
|
}
|
|
|
|
static auto& complex_empty_registration = globalATenDispatch().registerOp(
|
|
Backend::ComplexCPU,
|
|
"aten::empty.memory_format(int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor",
|
|
&empty_complex);
|
|
|
|
}
|
|
|
|
PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { }
|