Add alias torch.negative to torch.neg. (#43400)

Summary:
xref https://github.com/pytorch/pytorch/issues/42515

Pull Request resolved: https://github.com/pytorch/pytorch/pull/43400

Reviewed By: albanD

Differential Revision: D23266011

Pulled By: mruberry

fbshipit-source-id: ca20b30d99206a255cf26438b09c3ca1f99445c6
This commit is contained in:
Hameer Abbasi 2020-08-24 01:12:55 -07:00 committed by Facebook GitHub Bot
parent 1f0cfbaaad
commit c4e841654d
11 changed files with 50 additions and 3 deletions

View file

@ -519,7 +519,6 @@ _(aten, native_resize_as) \
_(aten, native_tensor) \
_(aten, native_zero) \
_(aten, ne) \
_(aten, neg) \
_(aten, nextafter) \
_(aten, bitwise_and) \
_(aten, bitwise_not) \

View file

@ -213,6 +213,10 @@ namespace c10 {
_(aten, trunc_) \
_(aten, fix) \
_(aten, fix_) \
_(aten, neg) \
_(aten, neg_) \
_(aten, negative) \
_(aten, negative_) \
_(aten, setdefault) \
_(aten, bin) \
_(aten, pop) \

View file

@ -382,6 +382,10 @@ Tensor& neg_out(Tensor& result, const Tensor& self) {
Tensor neg(const Tensor& self) { return unary_op_impl(self, at::neg_out); }
Tensor& neg_(Tensor& self) { return unary_op_impl_(self, at::neg_out); }
Tensor& negative_out(Tensor& result, const Tensor& self) { return at::native::neg_out(result, self); }
Tensor negative(const Tensor& self) { return at::native::neg(self); }
Tensor& negative_(Tensor& self) { return at::native::neg_(self); }
Tensor logical_not(const Tensor& self) {
Tensor result = at::empty({0}, self.options().dtype(kBool));
return at::logical_not_out(result, self);

View file

@ -2554,8 +2554,17 @@
variants: function, method
- func: neg.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
dispatch:
CPU, CUDA: neg_out
# Alias for neg
- func: negative(Tensor self) -> Tensor
use_c10_dispatcher: full
variants: function, method
- func: negative_(Tensor(a!) self) -> Tensor(a!)
use_c10_dispatcher: full
variants: function, method
- func: negative.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
- func: repeat(Tensor self, int[] repeats) -> Tensor
use_c10_dispatcher: full

View file

@ -438,6 +438,8 @@ view of a storage and defines numeric operations on it.
.. automethod:: ne_
.. automethod:: neg
.. automethod:: neg_
.. automethod:: negative
.. automethod:: negative_
.. automethod:: nelement
.. automethod:: nextafter
.. automethod:: nextafter_

View file

@ -306,6 +306,7 @@ Pointwise Ops
mul
mvlgamma
neg
negative
nextafter
polygamma
pow

View file

@ -68,6 +68,10 @@ alias_infos = (
lambda d: 10 * torch.randn(20, device=d)),
AliasInfo('fix_', torch.Tensor.fix_, 'trunc_', torch.Tensor.trunc_,
lambda d: 10 * torch.randn(20, device=d)),
AliasInfo('negative', torch.negative, 'neg', torch.neg,
lambda d: 10 * torch.randn(20, device=d)),
AliasInfo('negative_', torch.Tensor.negative_, 'neg_', torch.Tensor.neg_,
lambda d: 10 * torch.randn(20, device=d)),
)
# Placeholder test class for validating that aliases are correctly

View file

@ -2211,6 +2211,13 @@ neg() -> Tensor
See :func:`torch.neg`
""")
add_docstr_all('negative',
r"""
negative() -> Tensor
See :func:`torch.negative`
""")
add_docstr_all('neg_',
r"""
neg_() -> Tensor
@ -2218,6 +2225,13 @@ neg_() -> Tensor
In-place version of :meth:`~Tensor.neg`
""")
add_docstr_all('negative_',
r"""
negative_() -> Tensor
In-place version of :meth:`~Tensor.negative`
""")
add_docstr_all('nelement',
r"""
nelement() -> int

View file

@ -4897,6 +4897,13 @@ Example::
tensor([-0.0090, 0.2262, 0.0682, 0.2866, -0.3940])
""".format(**common_args))
add_docstr(torch.negative,
r"""
negative(input, *, out=None) -> Tensor
Alias for :func:`torch.neg`
""".format(**common_args))
add_docstr(torch.nextafter,
r"""
nextafter(input, other, *, out=None) -> Tensor

View file

@ -24,6 +24,8 @@ static const std::unordered_map<Symbol, Symbol> alias_map = {
{aten::arctan_, aten::atan_},
{aten::fix, aten::trunc},
{aten::fix_, aten::trunc_},
{aten::negative, aten::neg},
{aten::negative_, aten::neg_},
};
void replaceNodeWithNewSymbol(Node* node, Symbol new_symbol) {

View file

@ -479,6 +479,7 @@ def get_testing_overrides() -> Dict[Callable, Callable]:
torch.native_norm: lambda input, p=2, dim=None, keepdim=False, dtype=None: -1,
torch.ne: lambda input, other, out=None: -1,
torch.neg: lambda input, out=None: -1,
torch.negative: lambda input, out=None: -1,
torch.nextafter: lambda input, other, out=None: -1,
torch.nn.functional.adaptive_avg_pool2d: lambda input, output_size: -1,
torch.nn.functional.adaptive_avg_pool3d: lambda input, output_size: -1,