pytorch/test/cpp_extensions/complex_registration_extension.cpp
Sebastian Messmer ee920b92c4 Move complex extension test to c10 (#28208)
Summary:
Pull Request resolved: https://github.com/pytorch/pytorch/pull/28208

Backend extensions should call torch::RegisterOperators, not globalATenDispatch().
If the op is still on globalATenDispatch, then torch::RegisterOperators will do the right thing and forward it to globalATenDispatch.
ghstack-source-id: 92436988

Test Plan: waitforsandcastle

Differential Revision: D17975369

fbshipit-source-id: 0d4bd5e4e5b86e6dcfba527a7d11c25508896ac1
2019-10-23 01:33:47 -07:00

59 lines
2 KiB
C++

#include <torch/extension.h>
#include <c10/core/Allocator.h>
#include <ATen/CPUGenerator.h>
#include <ATen/DeviceGuard.h>
#include <ATen/NativeFunctions.h>
#include <ATen/Utils.h>
#include <ATen/WrapDimUtils.h>
#include <c10/util/Half.h>
#include <c10/core/TensorImpl.h>
#include <c10/core/UndefinedTensorImpl.h>
#include <c10/util/Optional.h>
#include <ATen/core/ATenDispatch.h>
#include <ATen/core/op_registration/op_registration.h>
#include <cstddef>
#include <functional>
#include <memory>
#include <utility>
#include <ATen/Config.h>
namespace at {
namespace {
Tensor empty_complex(IntArrayRef size, const TensorOptions & options, c10::optional<c10::MemoryFormat> optional_memory_format) {
TORCH_CHECK(!optional_memory_format.has_value(), "memory format is not supported")
AT_ASSERT(options.device().is_cpu());
for (auto x: size) {
TORCH_CHECK(x >= 0, "Trying to create tensor using size with negative dimension: ", size);
}
auto* allocator = at::getCPUAllocator();
int64_t nelements = at::prod_intlist(size);
auto dtype = options.dtype();
auto storage_impl = c10::make_intrusive<StorageImpl>(
dtype,
nelements,
allocator->allocate(nelements * dtype.itemsize()),
allocator,
/*resizable=*/true);
auto tensor = detail::make_tensor<TensorImpl>(storage_impl, at::TensorTypeId::ComplexCPUTensorId);
// Default TensorImpl has size [0]
if (size.size() != 1 || size[0] != 0) {
tensor.unsafeGetTensorImpl()->set_sizes_contiguous(size);
}
return tensor;
}
}
static auto complex_empty_registration = torch::RegisterOperators()
.op(torch::RegisterOperators::options()
.schema("aten::empty.memory_format(int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor")
.impl_unboxedOnlyKernel<decltype(empty_complex), &empty_complex>(TensorTypeId::ComplexCPUTensorId)
.aliasAnalysis(c10::AliasAnalysisKind::FROM_SCHEMA));
}
PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { }