2019-02-01 18:55:00 +00:00
|
|
|
#include <torch/extension.h>
|
|
|
|
|
|
|
|
|
|
#include <ATen/ExtensionBackendRegistration.h>
|
|
|
|
|
|
|
|
|
|
using namespace at;
|
|
|
|
|
|
|
|
|
|
static int test_int;
|
|
|
|
|
|
2019-02-15 21:44:18 +00:00
|
|
|
Tensor get_dtype_tensor(caffe2::TypeMeta dtype) {
|
|
|
|
|
auto tensor_impl = c10::make_intrusive<TensorImpl, UndefinedTensorImpl>(
|
|
|
|
|
Storage(
|
2019-03-20 20:47:41 +00:00
|
|
|
dtype, 0, at::DataPtr(nullptr, Device(DeviceType::MSNPU, 0)), nullptr, false),
|
2019-04-11 20:32:45 +00:00
|
|
|
MSNPUTensorId());
|
2019-02-15 21:44:18 +00:00
|
|
|
return Tensor(std::move(tensor_impl));
|
|
|
|
|
}
|
|
|
|
|
|
Rename IntList to IntArrayRef. (#16751)
Summary:
Pull Request resolved: https://github.com/pytorch/pytorch/pull/16751
This was made more complicated by the fact that ivalue::IntList
is a thing. So I had to fix all of the sites where we referring
to IValue post facto.
The following codemods were run, in this order:
```
codemod -m -d . --extensions cc,cpp,cu,cuh,h,hpp,py,cwrap,yaml,in IntList IntArrayRef
codemod -m -d . --extensions cc,cpp,cu,cuh,h,hpp,py,cwrap,yaml,in IntArrayRef::create IntList::create
codemod -m -d . --extensions cc,cpp,cu,cuh,h,hpp,py,cwrap,yaml,in ivalue::IntArrayRef ivalue::IntList
codemod -m -d . --extensions cc,cpp,cu,cuh,h,hpp,py,cwrap,yaml,in Tag::IntArrayRef Tag::IntList
codemod -m -d . --extensions cc,cpp,cu,cuh,h,hpp,py,cwrap,yaml,in isIntArrayRef isIntList
codemod -m -d . --extensions cc,cpp,cu,cuh,h,hpp,py,cwrap,yaml,in toIntArrayRef toIntList
codemod -m -d . --extensions cc,cpp,cu,cuh,h,hpp,py,cwrap,yaml,in 'Shared<IntArrayRef>' 'Shared<IntList>'
codemod -m -d . --extensions cc,cpp,cu,cuh,h,hpp,py,cwrap,yaml,in 'intrusive_ptr<IntArrayRef>' 'intrusive_ptr<IntList>'
```
Some manual fixups were done afterwards; they can be reviewed separately
at https://github.com/pytorch/pytorch/pull/16752
Reviewed By: dzhulgakov
Differential Revision: D13954363
fbshipit-source-id: b5c40aacba042402155a2f5a229fa6db7992ac64
2019-02-05 22:39:43 +00:00
|
|
|
Tensor zeros_override(IntArrayRef size, const TensorOptions & options) {
|
2019-02-01 18:55:00 +00:00
|
|
|
test_int = 0;
|
2019-02-15 21:44:18 +00:00
|
|
|
return get_dtype_tensor(options.dtype());
|
2019-02-01 18:55:00 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
Tensor add_override(const Tensor & a, const Tensor & b , Scalar c) {
|
|
|
|
|
test_int = 1;
|
2019-02-23 02:33:18 +00:00
|
|
|
return get_dtype_tensor(a.dtype());
|
2019-02-01 18:55:00 +00:00
|
|
|
}
|
|
|
|
|
|
2019-07-04 02:29:08 +00:00
|
|
|
Tensor sum_override(const Tensor & self, ScalarType dtype) {
|
2019-02-01 18:55:00 +00:00
|
|
|
test_int = 2;
|
2019-02-23 02:33:18 +00:00
|
|
|
return get_dtype_tensor(self.dtype());
|
2019-02-01 18:55:00 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// needed for sum backwards
|
Rename IntList to IntArrayRef. (#16751)
Summary:
Pull Request resolved: https://github.com/pytorch/pytorch/pull/16751
This was made more complicated by the fact that ivalue::IntList
is a thing. So I had to fix all of the sites where we referring
to IValue post facto.
The following codemods were run, in this order:
```
codemod -m -d . --extensions cc,cpp,cu,cuh,h,hpp,py,cwrap,yaml,in IntList IntArrayRef
codemod -m -d . --extensions cc,cpp,cu,cuh,h,hpp,py,cwrap,yaml,in IntArrayRef::create IntList::create
codemod -m -d . --extensions cc,cpp,cu,cuh,h,hpp,py,cwrap,yaml,in ivalue::IntArrayRef ivalue::IntList
codemod -m -d . --extensions cc,cpp,cu,cuh,h,hpp,py,cwrap,yaml,in Tag::IntArrayRef Tag::IntList
codemod -m -d . --extensions cc,cpp,cu,cuh,h,hpp,py,cwrap,yaml,in isIntArrayRef isIntList
codemod -m -d . --extensions cc,cpp,cu,cuh,h,hpp,py,cwrap,yaml,in toIntArrayRef toIntList
codemod -m -d . --extensions cc,cpp,cu,cuh,h,hpp,py,cwrap,yaml,in 'Shared<IntArrayRef>' 'Shared<IntList>'
codemod -m -d . --extensions cc,cpp,cu,cuh,h,hpp,py,cwrap,yaml,in 'intrusive_ptr<IntArrayRef>' 'intrusive_ptr<IntList>'
```
Some manual fixups were done afterwards; they can be reviewed separately
at https://github.com/pytorch/pytorch/pull/16752
Reviewed By: dzhulgakov
Differential Revision: D13954363
fbshipit-source-id: b5c40aacba042402155a2f5a229fa6db7992ac64
2019-02-05 22:39:43 +00:00
|
|
|
Tensor expand_override(const Tensor & self, IntArrayRef size, bool implicit) {
|
2019-02-23 02:33:18 +00:00
|
|
|
return get_dtype_tensor(self.dtype());
|
2019-02-01 18:55:00 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
Tensor kl_div_override(
|
|
|
|
|
const Tensor & self, const Tensor & target, int64_t reduction) {
|
|
|
|
|
test_int = 3;
|
2019-02-23 02:33:18 +00:00
|
|
|
return get_dtype_tensor(self.dtype());
|
2019-02-01 18:55:00 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
Tensor kl_div_backward_override(
|
|
|
|
|
const Tensor & grad_output,
|
|
|
|
|
const Tensor & self,
|
|
|
|
|
const Tensor & target,
|
|
|
|
|
int64_t reduction) {
|
|
|
|
|
test_int = 4;
|
2019-02-23 02:33:18 +00:00
|
|
|
return get_dtype_tensor(self.dtype());
|
2019-02-01 18:55:00 +00:00
|
|
|
}
|
|
|
|
|
|
Remove Variable::Impl and DifferentiableViewImpl (#17072)
Summary:
As part of the Variable/Tensor merge work: https://github.com/pytorch/pytorch/issues/13638, we make the following changes in this PR:
1. Remove the `Variable::Impl` class and the `DifferentiableViewImpl` class
2. Change all `Variable.data()` call sites to either use `Variable` directly, or use `Variable.tensor_data()`
3. Remove `Variable.data()` API
3. Add `Variable.variable_data()` that matches `tensor.data` in Python API, which creates a new `Variable` that shares the same storage and tensor metadata with the original `Variable`, but with a completely new autograd history.
After this PR, Variable doesn't wrap a Tensor internally anymore, and both Variable and Tensor use the same TensorImpl class as its `impl_`. The only difference is that Variable always has AutogradMeta in its TensorImpl, but Tensor doesn't.
**Note that this PR is BC-breaking in the following use cases:**
**Use Case 1:**
Previously, `x.data = y` works even if `x` and `y` are of different TensorImpl type (e.g. `x` is a CPU dense tensor whose impl is of type TensorImpl, while `y` is a CPU sparse tensor whose impl is of type SparseTensorImpl). However, after this PR, `x.data = y` doesn't work anymore if `x` and `y` are of different TensorImpl type, because the underlying implementation `variable.set_data(tensor)` no longer works if `variable` and `tensor` have different TensorImpl type.
**Use Case 2:**
If a tensor `x`'s `grad` is sparse, accumulating dense gradients to `x` will change the tensor that `x.grad` is pointing to. This is better illustrated with the following example:
```python
params = torch.tensor([1.5, 1.5]).requires_grad_()
with torch.no_grad():
# Change gradient to a sparse tensor
params.grad = torch.sparse_coo_tensor(torch.tensor([[1, 1]]).long(), torch.tensor([1., 1.]))
grad_saved = params.grad
params.backward(torch.tensor([1.5, 1.5]))
assert id(grad_saved) == id(params.grad) # This will fail after this PR
```
The assertion in the last line will fail after this PR, because adding dense gradients to sparse gradients will change the `params.grad` tensor reference.
Pull Request resolved: https://github.com/pytorch/pytorch/pull/17072
Differential Revision: D14075257
Pulled By: yf225
fbshipit-source-id: 0e681df641270dea586042dd26db59f2e76b5957
2019-05-24 04:03:29 +00:00
|
|
|
// ones_like is needed for autograd backwards
|
2019-02-01 18:55:00 +00:00
|
|
|
Tensor ones_like_override(const Tensor & self, const TensorOptions & options) {
|
2019-02-23 02:33:18 +00:00
|
|
|
return get_dtype_tensor(options.dtype());
|
2019-02-01 18:55:00 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void init_msnpu_extension() {
|
|
|
|
|
register_extension_backend_op(
|
|
|
|
|
Backend::MSNPU,
|
Rename IntList to IntArrayRef. (#16751)
Summary:
Pull Request resolved: https://github.com/pytorch/pytorch/pull/16751
This was made more complicated by the fact that ivalue::IntList
is a thing. So I had to fix all of the sites where we referring
to IValue post facto.
The following codemods were run, in this order:
```
codemod -m -d . --extensions cc,cpp,cu,cuh,h,hpp,py,cwrap,yaml,in IntList IntArrayRef
codemod -m -d . --extensions cc,cpp,cu,cuh,h,hpp,py,cwrap,yaml,in IntArrayRef::create IntList::create
codemod -m -d . --extensions cc,cpp,cu,cuh,h,hpp,py,cwrap,yaml,in ivalue::IntArrayRef ivalue::IntList
codemod -m -d . --extensions cc,cpp,cu,cuh,h,hpp,py,cwrap,yaml,in Tag::IntArrayRef Tag::IntList
codemod -m -d . --extensions cc,cpp,cu,cuh,h,hpp,py,cwrap,yaml,in isIntArrayRef isIntList
codemod -m -d . --extensions cc,cpp,cu,cuh,h,hpp,py,cwrap,yaml,in toIntArrayRef toIntList
codemod -m -d . --extensions cc,cpp,cu,cuh,h,hpp,py,cwrap,yaml,in 'Shared<IntArrayRef>' 'Shared<IntList>'
codemod -m -d . --extensions cc,cpp,cu,cuh,h,hpp,py,cwrap,yaml,in 'intrusive_ptr<IntArrayRef>' 'intrusive_ptr<IntList>'
```
Some manual fixups were done afterwards; they can be reviewed separately
at https://github.com/pytorch/pytorch/pull/16752
Reviewed By: dzhulgakov
Differential Revision: D13954363
fbshipit-source-id: b5c40aacba042402155a2f5a229fa6db7992ac64
2019-02-05 22:39:43 +00:00
|
|
|
"zeros(IntArrayRef size, TensorOptions options) -> Tensor", &zeros_override);
|
2019-02-01 18:55:00 +00:00
|
|
|
register_extension_backend_op(
|
|
|
|
|
Backend::MSNPU,
|
|
|
|
|
"add(Tensor self, Tensor other, Scalar alpha) -> Tensor", &add_override);
|
|
|
|
|
register_extension_backend_op(
|
|
|
|
|
Backend::MSNPU,
|
2019-07-04 02:29:08 +00:00
|
|
|
"sum(Tensor self, ScalarType dtype) -> Tensor", &sum_override);
|
2019-02-01 18:55:00 +00:00
|
|
|
register_extension_backend_op(
|
|
|
|
|
Backend::MSNPU,
|
Rename IntList to IntArrayRef. (#16751)
Summary:
Pull Request resolved: https://github.com/pytorch/pytorch/pull/16751
This was made more complicated by the fact that ivalue::IntList
is a thing. So I had to fix all of the sites where we referring
to IValue post facto.
The following codemods were run, in this order:
```
codemod -m -d . --extensions cc,cpp,cu,cuh,h,hpp,py,cwrap,yaml,in IntList IntArrayRef
codemod -m -d . --extensions cc,cpp,cu,cuh,h,hpp,py,cwrap,yaml,in IntArrayRef::create IntList::create
codemod -m -d . --extensions cc,cpp,cu,cuh,h,hpp,py,cwrap,yaml,in ivalue::IntArrayRef ivalue::IntList
codemod -m -d . --extensions cc,cpp,cu,cuh,h,hpp,py,cwrap,yaml,in Tag::IntArrayRef Tag::IntList
codemod -m -d . --extensions cc,cpp,cu,cuh,h,hpp,py,cwrap,yaml,in isIntArrayRef isIntList
codemod -m -d . --extensions cc,cpp,cu,cuh,h,hpp,py,cwrap,yaml,in toIntArrayRef toIntList
codemod -m -d . --extensions cc,cpp,cu,cuh,h,hpp,py,cwrap,yaml,in 'Shared<IntArrayRef>' 'Shared<IntList>'
codemod -m -d . --extensions cc,cpp,cu,cuh,h,hpp,py,cwrap,yaml,in 'intrusive_ptr<IntArrayRef>' 'intrusive_ptr<IntList>'
```
Some manual fixups were done afterwards; they can be reviewed separately
at https://github.com/pytorch/pytorch/pull/16752
Reviewed By: dzhulgakov
Differential Revision: D13954363
fbshipit-source-id: b5c40aacba042402155a2f5a229fa6db7992ac64
2019-02-05 22:39:43 +00:00
|
|
|
"expand(Tensor self, IntArrayRef size, bool implicit) -> Tensor",
|
2019-02-01 18:55:00 +00:00
|
|
|
&expand_override);
|
|
|
|
|
register_extension_backend_op(
|
|
|
|
|
Backend::MSNPU,
|
|
|
|
|
"kl_div(Tensor self, Tensor target, int64_t reduction) -> Tensor",
|
|
|
|
|
&kl_div_override);
|
|
|
|
|
register_extension_backend_op(
|
|
|
|
|
Backend::MSNPU,
|
|
|
|
|
"kl_div_backward(Tensor grad_output, Tensor self, Tensor target, int64_t reduction) -> Tensor",
|
|
|
|
|
&kl_div_backward_override);
|
|
|
|
|
register_extension_backend_op(
|
|
|
|
|
Backend::MSNPU,
|
|
|
|
|
"ones_like(Tensor self, TensorOptions options) -> Tensor",
|
|
|
|
|
&ones_like_override);
|
|
|
|
|
}
|
|
|
|
|
|
2019-03-20 20:47:41 +00:00
|
|
|
// TODO: Extend this to exercise multi-device setting. In that case,
|
|
|
|
|
// we need to add a thread local variable to track the current device.
|
|
|
|
|
struct MSNPUGuardImpl final : public c10::impl::DeviceGuardImplInterface {
|
|
|
|
|
static constexpr DeviceType static_type = DeviceType::MSNPU;
|
|
|
|
|
MSNPUGuardImpl() {}
|
|
|
|
|
MSNPUGuardImpl(DeviceType t) {
|
|
|
|
|
AT_ASSERT(t == DeviceType::MSNPU);
|
|
|
|
|
}
|
|
|
|
|
DeviceType type() const override {
|
|
|
|
|
return DeviceType::MSNPU;
|
|
|
|
|
}
|
|
|
|
|
Device exchangeDevice(Device d) const override {
|
|
|
|
|
AT_ASSERT(d.type() == DeviceType::MSNPU);
|
|
|
|
|
AT_ASSERT(d.index() == 0);
|
|
|
|
|
return d;
|
|
|
|
|
}
|
|
|
|
|
Device getDevice() const override {
|
|
|
|
|
return Device(DeviceType::MSNPU, 0);
|
|
|
|
|
}
|
|
|
|
|
void setDevice(Device d) const override {
|
|
|
|
|
AT_ASSERT(d.type() == DeviceType::MSNPU);
|
|
|
|
|
AT_ASSERT(d.index() == 0);
|
|
|
|
|
}
|
|
|
|
|
void uncheckedSetDevice(Device d) const noexcept override {
|
|
|
|
|
}
|
|
|
|
|
Stream getStream(Device d) const noexcept override {
|
|
|
|
|
return Stream(Stream::DEFAULT, Device(DeviceType::MSNPU, 0));
|
|
|
|
|
}
|
|
|
|
|
Stream exchangeStream(Stream s) const noexcept override {
|
|
|
|
|
return Stream(Stream::DEFAULT, Device(DeviceType::MSNPU, 0));
|
|
|
|
|
}
|
2019-03-26 16:42:41 +00:00
|
|
|
DeviceIndex deviceCount() const noexcept override {
|
2019-03-20 20:47:41 +00:00
|
|
|
return 1;
|
|
|
|
|
}
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
constexpr DeviceType MSNPUGuardImpl::static_type;
|
|
|
|
|
C10_REGISTER_GUARD_IMPL(MSNPU, MSNPUGuardImpl);
|
|
|
|
|
|
2019-02-01 18:55:00 +00:00
|
|
|
int get_test_int() {
|
|
|
|
|
return test_int;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
|
|
|
|
|
m.def("init_msnpu_extension", &init_msnpu_extension);
|
|
|
|
|
m.def("get_test_int", &get_test_int);
|
|
|
|
|
}
|