From 3aeb70db0be11c20b02c28c29e0c8672daf283de Mon Sep 17 00:00:00 2001 From: Mike Ruberry Date: Sun, 30 Aug 2020 15:42:19 -0700 Subject: [PATCH] Documents sub properly, adds subtract alias (#43850) Summary: `torch.sub` was undocumented, so this PR adds its documentation, analogous to `torch.add`'s documentation, and adds the alias `torch.subtract` for `torch.sub`, too. This alias comes from NumPy (see https://numpy.org/doc/stable/reference/generated/numpy.subtract.html?highlight=subtract#numpy.subtract) Pull Request resolved: https://github.com/pytorch/pytorch/pull/43850 Reviewed By: ngimel Differential Revision: D23416908 Pulled By: mruberry fbshipit-source-id: 6c4d2ebaf6ecae91f3a6efe484ce6c4dad96f016 --- aten/src/ATen/core/aten_interned_strings.h | 2 - aten/src/ATen/core/interned_strings.h | 4 ++ aten/src/ATen/native/BinaryOps.cpp | 49 +++++++++++++++------- aten/src/ATen/native/native_functions.yaml | 22 +++++++++- docs/source/tensors.rst | 2 + docs/source/torch.rst | 2 + test/test_op_aliases.py | 8 ++++ torch/_tensor_docs.py | 24 ++++++----- torch/_torch_docs.py | 34 +++++++++++++++ torch/csrc/jit/passes/normalize_ops.cpp | 1 + torch/overrides.py | 1 + 11 files changed, 122 insertions(+), 27 deletions(-) diff --git a/aten/src/ATen/core/aten_interned_strings.h b/aten/src/ATen/core/aten_interned_strings.h index e2d2cd3675b..57a36b135ea 100644 --- a/aten/src/ATen/core/aten_interned_strings.h +++ b/aten/src/ATen/core/aten_interned_strings.h @@ -654,8 +654,6 @@ _(aten, stft) \ _(aten, storage_offset) \ _(aten, stride) \ _(aten, strides) \ -_(aten, sub) \ -_(aten, sub_) \ _(aten, rsub) \ _(aten, sum) \ _(aten, sum_to_size) \ diff --git a/aten/src/ATen/core/interned_strings.h b/aten/src/ATen/core/interned_strings.h index d543260239a..9738bfaabb8 100644 --- a/aten/src/ATen/core/interned_strings.h +++ b/aten/src/ATen/core/interned_strings.h @@ -214,6 +214,10 @@ namespace c10 { _(aten, list) \ _(aten, wait) \ _(aten, save) \ + _(aten, sub) \ + _(aten, sub_) \ + _(aten, subtract) \ + _(aten, subtract_) \ _(aten, keys) \ _(aten, ord) \ _(aten, chr) \ diff --git a/aten/src/ATen/native/BinaryOps.cpp b/aten/src/ATen/native/BinaryOps.cpp index ea12fd86bc3..bccba591a52 100644 --- a/aten/src/ATen/native/BinaryOps.cpp +++ b/aten/src/ATen/native/BinaryOps.cpp @@ -48,6 +48,12 @@ DEFINE_DISPATCH(lcm_stub); DEFINE_DISPATCH(hypot_stub); DEFINE_DISPATCH(nextafter_stub); +static Tensor wrapped_scalar_tensor(Scalar scalar) { + auto tensor = scalar_to_tensor(scalar); + tensor.unsafeGetTensorImpl()->set_wrapped_number(true); + return tensor; +} + Tensor& add_out(Tensor& result, const Tensor& self, const Tensor& other, Scalar alpha) { auto iter = TensorIterator::binary_op(result, self, other); alpha_check(iter.dtype(), alpha); @@ -275,6 +281,35 @@ Tensor& sub_(Tensor& self, const Tensor& other, Scalar alpha) { return native::sub_out(self, self, other, alpha); } +Tensor sub(const Tensor& self, Scalar other, Scalar alpha) { + return native::sub(self, wrapped_scalar_tensor(other), alpha); +} + +Tensor& sub_(Tensor& self, Scalar other, Scalar alpha) { + return native::sub_(self, wrapped_scalar_tensor(other), alpha); +} + +// subtract, alias for sub +Tensor& subtract_out(Tensor& result, const Tensor& self, const Tensor& other, Scalar alpha) { + return at::sub_out(result, self, other, alpha); +} + +Tensor subtract(const Tensor& self, const Tensor& other, Scalar alpha) { + return self.sub(other, alpha); +} + +Tensor& subtract_(Tensor& self, const Tensor& other, Scalar alpha) { + return self.sub_(other, alpha); +} + +Tensor subtract(const Tensor& self, Scalar other, Scalar alpha) { + return self.sub(other, alpha); +} + +Tensor& subtract_(Tensor& self, Scalar other, Scalar alpha) { + return self.sub_(other, alpha); +} + Tensor& sigmoid_backward_out(Tensor& result, const Tensor& grad_output, const Tensor& output) { auto iter = TensorIterator::binary_op(result, grad_output, output); sigmoid_backward_stub(iter.device_type(), iter); @@ -346,12 +381,6 @@ Tensor& atan2_(Tensor& self, const Tensor& other) { // types (int, float, etc.) to Tensor (only to Scalar). They're not exposed // to Python. -static Tensor wrapped_scalar_tensor(Scalar scalar) { - auto tensor = scalar_to_tensor(scalar); - tensor.unsafeGetTensorImpl()->set_wrapped_number(true); - return tensor; -} - static void check_convert(Scalar scalar, ScalarType scalarType) { // Validate that is possible to convert scalar to tensor dtype without overflow AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3(at::ScalarType::Bool, at::ScalarType::BFloat16, at::ScalarType::Half, scalarType, "check_convert", [&]{ @@ -418,14 +447,6 @@ Tensor& mul_(Tensor& self, Scalar other) { return native::mul_(self, wrapped_scalar_tensor(other)); } -Tensor sub(const Tensor& self, Scalar other, Scalar alpha) { - return native::sub(self, wrapped_scalar_tensor(other), alpha); -} - -Tensor& sub_(Tensor& self, Scalar other, Scalar alpha) { - return native::sub_(self, wrapped_scalar_tensor(other), alpha); -} - Tensor rsub(const Tensor& self, Scalar other, Scalar alpha) { return native::rsub(self, wrapped_scalar_tensor(other), alpha); } diff --git a/aten/src/ATen/native/native_functions.yaml b/aten/src/ATen/native/native_functions.yaml index 1a480e1eae9..d4faba56132 100644 --- a/aten/src/ATen/native/native_functions.yaml +++ b/aten/src/ATen/native/native_functions.yaml @@ -3609,6 +3609,26 @@ use_c10_dispatcher: full variants: method +# subtract, alias for sub +- func: subtract.out(Tensor self, Tensor other, *, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!) + +- func: subtract.Tensor(Tensor self, Tensor other, *, Scalar alpha=1) -> Tensor + use_c10_dispatcher: full + variants: function, method + +- func: subtract_.Tensor(Tensor(a!) self, Tensor other, *, Scalar alpha=1) -> Tensor(a!) + use_c10_dispatcher: full + variants: method + +# For C++ only, until we have conversion from C++ numbers to Tensor +- func: subtract.Scalar(Tensor self, Scalar other, Scalar alpha=1) -> Tensor + use_c10_dispatcher: full + variants: function, method + +- func: subtract_.Scalar(Tensor(a!) self, Scalar other, Scalar alpha=1) -> Tensor(a!) + use_c10_dispatcher: full + variants: method + - func: rsub.Tensor(Tensor self, Tensor other, *, Scalar alpha=1) -> Tensor use_c10_dispatcher: full variants: function @@ -5756,7 +5776,7 @@ CUDA: foreach_tensor_add_scalar_kernel_cuda - func: _foreach_add_.Scalar(Tensor[](a!) self, Scalar scalar) -> () - device_guard: False + device_guard: False variants: function dispatch: CPU: foreach_tensor_add_scalar_kernel_slow_ diff --git a/docs/source/tensors.rst b/docs/source/tensors.rst index b4ad2756c72..c6a7f29036e 100644 --- a/docs/source/tensors.rst +++ b/docs/source/tensors.rst @@ -541,6 +541,8 @@ view of a storage and defines numeric operations on it. .. automethod:: stride .. automethod:: sub .. automethod:: sub_ + .. automethod:: subtract + .. automethod:: subtract_ .. automethod:: sum .. automethod:: sum_to_size .. automethod:: svd diff --git a/docs/source/torch.rst b/docs/source/torch.rst index fa03b904d8b..7ab9b5a61c6 100644 --- a/docs/source/torch.rst +++ b/docs/source/torch.rst @@ -325,6 +325,8 @@ Pointwise Ops sinh sqrt square + sub + subtract tan tanh true_divide diff --git a/test/test_op_aliases.py b/test/test_op_aliases.py index 7d5f2249da5..e00c0f7e759 100644 --- a/test/test_op_aliases.py +++ b/test/test_op_aliases.py @@ -80,6 +80,14 @@ alias_infos = ( lambda d: torch.clamp(torch.randn(20, device=d), -1, 1)), AliasInfo('arctanh_', torch.Tensor.arctanh_, 'atanh_', torch.Tensor.atanh_, lambda d: torch.clamp(torch.randn(20, device=d), -1, 1)), + AliasInfo('subtract', torch.subtract, 'sub', torch.sub, + lambda d: torch.randn(20, device=d), + get_args=lambda d: (torch.randn(20, device=d),), + decorators=(onlyCPU,)), + AliasInfo('subtract_', torch.Tensor.subtract_, 'sub_', torch.Tensor.sub_, + lambda d: torch.randn(20, device=d), + get_args=lambda d: (torch.randn(20, device=d),), + decorators=(onlyCPU,)), ) # Placeholder test class for validating that aliases are correctly diff --git a/torch/_tensor_docs.py b/torch/_tensor_docs.py index 18cd3825c61..cdd1086bac3 100644 --- a/torch/_tensor_docs.py +++ b/torch/_tensor_docs.py @@ -3167,18 +3167,10 @@ Example:: """) -add_docstr_all('sub', - r""" +add_docstr_all('sub', r""" sub(other, *, alpha=1) -> Tensor -Subtracts a scalar or tensor from :attr:`self` tensor. If both :attr:`alpha` -and :attr:`other` are specified, each element of :attr:`other` is scaled by -:attr:`alpha` before being used. - -When :attr:`other` is a tensor, the shape of :attr:`other` must be -:ref:`broadcastable ` with the shape of the underlying -tensor. - +See :func:`torch.sub`. """) add_docstr_all('sub_', @@ -3188,6 +3180,18 @@ sub_(other, *, alpha=1) -> Tensor In-place version of :meth:`~Tensor.sub` """) +add_docstr_all('subtract', r""" +subtract(other, *, alpha=1) -> Tensor + +See :func:`torch.subtract`. +""") + +add_docstr_all('subtract_', r""" +subtract_(other, *, alpha=1) -> Tensor + +In-place version of :meth:`~Tensor.subtract`. +""") + add_docstr_all('sum', r""" sum(dim=None, keepdim=False, dtype=None) -> Tensor diff --git a/torch/_torch_docs.py b/torch/_torch_docs.py index 32d8f1b3248..c86ea77afa0 100644 --- a/torch/_torch_docs.py +++ b/torch/_torch_docs.py @@ -6707,6 +6707,40 @@ Example:: (tensor([0.9110, 0.8197, 1.2552, 1.0608]), tensor([-0.6871, 0.6229, 0.2169, -0.9058])) """.format(**multi_dim_common)) +add_docstr(torch.sub, r""" +sub(input, other, *, alpha=1, out=None) -> Tensor + +Subtracts :attr:`other`, scaled by :attr:`alpha`, from :attr:`input`. + +.. math:: + \text{{out}}_i = \text{{input}}_i - \text{{alpha}} \times \text{{other}}_i +""" + r""" + +Supports :ref:`broadcasting to a common shape `, +:ref:`type promotion `, and integer, float, and complex inputs. + +Args: + {input} + other (Tensor or Scalar): the tensor or scalar to subtract from :attr:`input` + +Keyword args: + alpha (Scalar): the scalar multiplier for :attr:`other` + {out} + +Example:: + + >>> a = torch.tensor((1, 2)) + >>> b = torch.tensor((0, 1)) + >>> torch.sub(a, b, alpha=2) + tensor([1, 0]) +""".format(**common_args)) + +add_docstr(torch.subtract, r""" +subtract(input, other, *, alpha=1, out=None) -> Tensor + +Alias for :func:`torch.sub`. +""") + add_docstr(torch.sum, r""" sum(input, dtype=None) -> Tensor diff --git a/torch/csrc/jit/passes/normalize_ops.cpp b/torch/csrc/jit/passes/normalize_ops.cpp index e60149b3bfb..15ffeb0ce70 100644 --- a/torch/csrc/jit/passes/normalize_ops.cpp +++ b/torch/csrc/jit/passes/normalize_ops.cpp @@ -19,6 +19,7 @@ static const std::unordered_map alias_map = { {aten::arctanh, aten::atanh}, {aten::arctanh_, aten::atanh_}, {aten::fix, aten::trunc}, {aten::fix_, aten::trunc_}, {aten::negative, aten::neg}, {aten::negative_, aten::neg_}, + {aten::subtract, aten::sub}, {aten::subtract_, aten::sub_}, }; void replaceNodeWithNewSymbol(Node* node, Symbol new_symbol) { diff --git a/torch/overrides.py b/torch/overrides.py index f0e6599b5f9..6532fd4b257 100644 --- a/torch/overrides.py +++ b/torch/overrides.py @@ -707,6 +707,7 @@ def get_testing_overrides() -> Dict[Callable, Callable]: torch.stft: (lambda input, n_fft, hop_length=None, win_length=None, window=None, center=True, pad_mode='reflect', normalized=False, onesided=True: -1), torch.sub: lambda input, other, out=None: -1, + torch.subtract: lambda input, other, out=None: -1, torch.sum: lambda input, dim=None: -1, torch.nansum: lambda input, dim=None: -1, torch.svd: lambda input, some=True, compute_uv=True, out=None: -1,