Adds inequality testing aliases for better NumPy compatibility (#43870)

Summary:
This PR adds the following aliaes:

- not_equal for torch.ne
- greater for torch.gt
- greater_equal for torch.ge
- less for torch.lt
- less_equal for torch.le

This aliases are consistent with NumPy's naming for these functions.

Pull Request resolved: https://github.com/pytorch/pytorch/pull/43870

Reviewed By: zou3519

Differential Revision: D23498975

Pulled By: mruberry

fbshipit-source-id: 78560df98c9f7747e804a420c1e53fd1dd225002
This commit is contained in:
Mike Ruberry 2020-09-06 09:34:46 -07:00 committed by Facebook GitHub Bot
parent 671160a963
commit 83a6e7d342
11 changed files with 387 additions and 87 deletions

View file

@ -341,7 +341,6 @@ _(aten, full) \
_(aten, full_like) \
_(aten, gather) \
_(aten, gcd) \
_(aten, ge) \
_(aten, gelu) \
_(aten, geometric) \
_(aten, geqrf) \
@ -357,7 +356,6 @@ _(aten, grid_sampler_3d_backward) \
_(aten, group_norm) \
_(aten, gru) \
_(aten, gru_cell) \
_(aten, gt) \
_(aten, hamming_window) \
_(aten, hann_window) \
_(aten, hardshrink) \
@ -410,7 +408,6 @@ _(aten, l1_loss_backward) \
_(aten, l1_loss_forward) \
_(aten, layer_norm) \
_(aten, lcm) \
_(aten, le) \
_(aten, leaky_relu) \
_(aten, leaky_relu_backward) \
_(aten, leaky_relu_forward) \
@ -437,7 +434,6 @@ _(aten, logsumexp) \
_(aten, lstm) \
_(aten, lstm_cell) \
_(aten, lstsq) \
_(aten, lt) \
_(aten, lu_solve) \
_(aten, margin_ranking_loss) \
_(aten, masked_fill) \
@ -519,7 +515,6 @@ _(aten, native_pow) \
_(aten, native_resize_as) \
_(aten, native_tensor) \
_(aten, native_zero) \
_(aten, ne) \
_(aten, nextafter) \
_(aten, bitwise_and) \
_(aten, bitwise_not) \

View file

@ -193,12 +193,27 @@ namespace c10 {
_(aten, addmv_) \
_(aten, addr_) \
_(aten, baddbmm_) \
_(aten, ge) \
_(aten, ge_) \
_(aten, greater_equal) \
_(aten, greater_equal_) \
_(aten, gt) \
_(aten, gt_) \
_(aten, greater) \
_(aten, greater_) \
_(aten, le) \
_(aten, le_) \
_(aten, less_equal) \
_(aten, less_equal_) \
_(aten, lerp_) \
_(aten, lt) \
_(aten, lt_) \
_(aten, less) \
_(aten, less_) \
_(aten, ne) \
_(aten, ne_) \
_(aten, not_equal) \
_(aten, not_equal_) \
_(aten, _ger) \
_(aten, ger) \
_(aten, outer) \

View file

@ -704,6 +704,14 @@ Tensor& lt_out(Tensor& result, const Tensor& self, Scalar other) { return compar
Tensor lt(const Tensor& self, Scalar other) { return comparison_op(self, other, static_cast<OutFunc>(at::lt_out)); }
Tensor& lt_(Tensor& self, Scalar other) { return comparison_op_(self, other, static_cast<OutFunc>(at::lt_out)); }
// less, alias for torch.lt
Tensor& less_out(Tensor& result, const Tensor& self, const Tensor& other) { return at::lt_out(result, self, other); }
Tensor less(const Tensor& self, const Tensor& other) { return self.lt(other); }
Tensor& less_(Tensor& self, const Tensor& other) { return self.lt_(other); }
Tensor& less_out(Tensor& result, const Tensor& self, Scalar other) { return at::lt_out(result, self, other); }
Tensor less(const Tensor& self, Scalar other) { return self.lt(other); }
Tensor& less_(Tensor& self, Scalar other) { return self.lt_(other); }
Tensor& le_out(Tensor& result, const Tensor& self, const Tensor& other) { return comparison_op_out(result, self, other, le_stub); }
Tensor le(const Tensor& self, const Tensor& other) { return comparison_op(self, other, static_cast<OutFunc>(at::le_out)); }
Tensor& le_(Tensor& self, const Tensor& other) { return comparison_op_(self, other, static_cast<OutFunc>(at::le_out)); }
@ -711,6 +719,14 @@ Tensor& le_out(Tensor& result, const Tensor& self, Scalar other) { return compar
Tensor le(const Tensor& self, Scalar other) { return comparison_op(self, other, static_cast<OutFunc>(at::le_out)); }
Tensor& le_(Tensor& self, Scalar other) { return comparison_op_(self, other, static_cast<OutFunc>(at::le_out)); }
// less_equal, alias for torch.le
Tensor& less_equal_out(Tensor& result, const Tensor& self, const Tensor& other) { return at::le_out(result, self, other); }
Tensor less_equal(const Tensor& self, const Tensor& other) { return self.le(other); }
Tensor& less_equal_(Tensor& self, const Tensor& other) { return self.le_(other); }
Tensor& less_equal_out(Tensor& result, const Tensor& self, Scalar other) { return at::le_out(result, self, other); }
Tensor less_equal(const Tensor& self, Scalar other) { return self.le(other); }
Tensor& less_equal_(Tensor& self, Scalar other) { return self.le_(other); }
Tensor& gt_out(Tensor& result, const Tensor& self, const Tensor& other) { return comparison_op_out(result, self, other, gt_stub); }
Tensor gt(const Tensor& self, const Tensor& other) { return comparison_op(self, other, static_cast<OutFunc>(at::gt_out)); }
Tensor& gt_(Tensor& self, const Tensor& other) { return comparison_op_(self, other, static_cast<OutFunc>(at::gt_out)); }
@ -718,6 +734,14 @@ Tensor& gt_out(Tensor& result, const Tensor& self, Scalar other) { return compar
Tensor gt(const Tensor& self, Scalar other) { return comparison_op(self, other, static_cast<OutFunc>(at::gt_out)); }
Tensor& gt_(Tensor& self, Scalar other) { return comparison_op_(self, other, static_cast<OutFunc>(at::gt_out)); }
// greater, alias for torch.gt
Tensor& greater_out(Tensor& result, const Tensor& self, const Tensor& other) { return at::gt_out(result, self, other); }
Tensor greater(const Tensor& self, const Tensor& other) { return self.gt(other); }
Tensor& greater_(Tensor& self, const Tensor& other) { return self.gt_(other); }
Tensor& greater_out(Tensor& result, const Tensor& self, Scalar other) { return at::gt_out(result, self, other); }
Tensor greater(const Tensor& self, Scalar other) { return self.gt(other); }
Tensor& greater_(Tensor& self, Scalar other) { return self.gt_(other); }
Tensor& ge_out(Tensor& result, const Tensor& self, const Tensor& other) { return comparison_op_out(result, self, other, ge_stub); }
Tensor ge(const Tensor& self, const Tensor& other) { return comparison_op(self, other, static_cast<OutFunc>(at::ge_out)); }
Tensor& ge_(Tensor& self, const Tensor& other) { return comparison_op_(self, other, static_cast<OutFunc>(at::ge_out)); }
@ -725,6 +749,14 @@ Tensor& ge_out(Tensor& result, const Tensor& self, Scalar other) { return compar
Tensor ge(const Tensor& self, Scalar other) { return comparison_op(self, other, static_cast<OutFunc>(at::ge_out)); }
Tensor& ge_(Tensor& self, Scalar other) { return comparison_op_(self, other, static_cast<OutFunc>(at::ge_out)); }
// greater_equal, alias for torch.ge
Tensor& greater_equal_out(Tensor& result, const Tensor& self, const Tensor& other) { return at::ge_out(result, self, other); }
Tensor greater_equal(const Tensor& self, const Tensor& other) { return self.ge(other); }
Tensor& greater_equal_(Tensor& self, const Tensor& other) { return self.ge_(other); }
Tensor& greater_equal_out(Tensor& result, const Tensor& self, Scalar other) { return at::ge_out(result, self, other); }
Tensor greater_equal(const Tensor& self, Scalar other) { return self.ge(other); }
Tensor& greater_equal_(Tensor& self, Scalar other) { return self.ge_(other); }
Tensor& eq_out(Tensor& result, const Tensor& self, const Tensor& other) { return comparison_op_out(result, self, other, eq_stub); }
Tensor eq(const Tensor& self, const Tensor& other) { return comparison_op(self, other, static_cast<OutFunc>(at::eq_out)); }
Tensor& eq_(Tensor& self, const Tensor& other) { return comparison_op_(self, other, static_cast<OutFunc>(at::eq_out)); }
@ -739,6 +771,14 @@ Tensor& ne_out(Tensor& result, const Tensor& self, Scalar other) { return compar
Tensor ne(const Tensor& self, Scalar other) { return comparison_op(self, other, static_cast<OutFunc>(at::ne_out)); }
Tensor& ne_(Tensor& self, Scalar other) { return comparison_op_(self, other, static_cast<OutFunc>(at::ne_out)); }
// not_equal, alias for torch.ne
Tensor& not_equal_out(Tensor& result, const Tensor& self, const Tensor& other) { return at::ne_out(result, self, other); }
Tensor not_equal(const Tensor& self, const Tensor& other) { return self.ne(other); }
Tensor& not_equal_(Tensor& self, const Tensor& other) { return self.ne_(other); }
Tensor& not_equal_out(Tensor& result, const Tensor& self, Scalar other) { return at::ne_out(result, self, other); }
Tensor not_equal(const Tensor& self, Scalar other) { return self.ne(other); }
Tensor& not_equal_(Tensor& self, Scalar other) { return self.ne_(other); }
Tensor& logical_and_out(Tensor& result, const Tensor& self, const Tensor& other) { return comparison_op_out(result, self, other, logical_and_stub); }
Tensor logical_and(const Tensor& self, const Tensor& other) { return comparison_op(self, other, static_cast<OutFunc>(at::logical_and_out)); }
Tensor& logical_and_(Tensor& self, const Tensor& other) { return comparison_op_(self, other, static_cast<OutFunc>(at::logical_and_out)); }

View file

@ -4530,38 +4530,6 @@
- func: scatter_add.dimname(Tensor self, Dimname dim, Tensor index, Tensor src) -> Tensor
variants: function, method
- func: lt_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
use_c10_dispatcher: full
variants: method
- func: lt_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
use_c10_dispatcher: full
variants: method
- func: gt_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
use_c10_dispatcher: full
variants: method
- func: gt_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
use_c10_dispatcher: full
variants: method
- func: le_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
use_c10_dispatcher: full
variants: method
- func: le_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
use_c10_dispatcher: full
variants: method
- func: ge_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
use_c10_dispatcher: full
variants: method
- func: ge_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
use_c10_dispatcher: full
variants: method
- func: eq_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
use_c10_dispatcher: full
variants: method
@ -4570,14 +4538,6 @@
use_c10_dispatcher: full
variants: method
- func: ne_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
use_c10_dispatcher: full
variants: method
- func: ne_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
use_c10_dispatcher: full
variants: method
- func: bitwise_and.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
variants: function
dispatch:
@ -4969,6 +4929,35 @@
CPU, CUDA: ne
QuantizedCPU: ne_quantized_cpu
- func: ne_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
use_c10_dispatcher: full
variants: method
- func: ne_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
use_c10_dispatcher: full
variants: method
# not_equal, alias for torch.ne
- func: not_equal.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
- func: not_equal.Scalar(Tensor self, Scalar other) -> Tensor
use_c10_dispatcher: full
variants: method, function
- func: not_equal.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
- func: not_equal.Tensor(Tensor self, Tensor other) -> Tensor
use_c10_dispatcher: full
variants: method, function
- func: not_equal_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
use_c10_dispatcher: full
variants: method
- func: not_equal_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
use_c10_dispatcher: full
variants: method
- func: eq.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
dispatch:
CPU, CUDA: eq_out
@ -5017,6 +5006,35 @@
CPU, CUDA: ge
QuantizedCPU: ge_quantized_cpu
- func: ge_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
use_c10_dispatcher: full
variants: method
- func: ge_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
use_c10_dispatcher: full
variants: method
# greater_equal, alias for torch.ge
- func: greater_equal.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
- func: greater_equal.Scalar(Tensor self, Scalar other) -> Tensor
use_c10_dispatcher: full
variants: method, function
- func: greater_equal.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
- func: greater_equal.Tensor(Tensor self, Tensor other) -> Tensor
use_c10_dispatcher: full
variants: method, function
- func: greater_equal_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
use_c10_dispatcher: full
variants: method
- func: greater_equal_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
use_c10_dispatcher: full
variants: method
- func: le.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
dispatch:
CPU, CUDA: le_out
@ -5041,6 +5059,35 @@
CPU, CUDA: le
QuantizedCPU: le_quantized_cpu
- func: le_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
use_c10_dispatcher: full
variants: method
- func: le_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
use_c10_dispatcher: full
variants: method
# less_equal, alias for torch.le
- func: less_equal.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
- func: less_equal.Scalar(Tensor self, Scalar other) -> Tensor
use_c10_dispatcher: full
variants: method, function
- func: less_equal.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
- func: less_equal.Tensor(Tensor self, Tensor other) -> Tensor
use_c10_dispatcher: full
variants: method, function
- func: less_equal_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
use_c10_dispatcher: full
variants: method
- func: less_equal_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
use_c10_dispatcher: full
variants: method
- func: gt.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
dispatch:
CPU, CUDA: gt_out
@ -5065,6 +5112,35 @@
CPU, CUDA: gt
QuantizedCPU: gt_quantized_cpu
- func: gt_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
use_c10_dispatcher: full
variants: method
- func: gt_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
use_c10_dispatcher: full
variants: method
# greater, alias for torch.gt
- func: greater.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
- func: greater.Scalar(Tensor self, Scalar other) -> Tensor
use_c10_dispatcher: full
variants: method, function
- func: greater.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
- func: greater.Tensor(Tensor self, Tensor other) -> Tensor
use_c10_dispatcher: full
variants: method, function
- func: greater_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
use_c10_dispatcher: full
variants: method
- func: greater_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
use_c10_dispatcher: full
variants: method
- func: lt.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
dispatch:
CPU, CUDA: lt_out
@ -5089,6 +5165,35 @@
CPU, CUDA: lt
QuantizedCPU: lt_quantized_cpu
- func: lt_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
use_c10_dispatcher: full
variants: method
- func: lt_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
use_c10_dispatcher: full
variants: method
# less, alias for torch.lt
- func: less.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
- func: less.Scalar(Tensor self, Scalar other) -> Tensor
use_c10_dispatcher: full
variants: method, function
- func: less.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
- func: less.Tensor(Tensor self, Tensor other) -> Tensor
use_c10_dispatcher: full
variants: method, function
- func: less_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
use_c10_dispatcher: full
variants: method
- func: less_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
use_c10_dispatcher: full
variants: method
- func: take.out(Tensor self, Tensor index, *, Tensor(a!) out) -> Tensor(a!)
dispatch:
CPU: legacy::cpu::_th_take_out

View file

@ -326,12 +326,16 @@ view of a storage and defines numeric operations on it.
.. automethod:: gcd_
.. automethod:: ge
.. automethod:: ge_
.. automethod:: greater_equal
.. automethod:: greater_equal_
.. automethod:: geometric_
.. automethod:: geqrf
.. automethod:: ger
.. automethod:: get_device
.. automethod:: gt
.. automethod:: gt_
.. automethod:: greater
.. automethod:: greater_
.. automethod:: half
.. automethod:: hardshrink
.. automethod:: heaviside
@ -379,6 +383,8 @@ view of a storage and defines numeric operations on it.
.. automethod:: lcm_
.. automethod:: le
.. automethod:: le_
.. automethod:: less_equal
.. automethod:: less_equal_
.. automethod:: lerp
.. automethod:: lerp_
.. automethod:: lgamma
@ -410,6 +416,8 @@ view of a storage and defines numeric operations on it.
.. automethod:: lstsq
.. automethod:: lt
.. automethod:: lt_
.. automethod:: less
.. automethod:: less_
.. automethod:: lu
.. automethod:: lu_solve
.. automethod:: as_subclass
@ -443,6 +451,8 @@ view of a storage and defines numeric operations on it.
.. automethod:: ndimension
.. automethod:: ne
.. automethod:: ne_
.. automethod:: not_equal
.. automethod:: not_equal_
.. automethod:: neg
.. automethod:: neg_
.. automethod:: negative

View file

@ -375,7 +375,9 @@ Comparison Ops
eq
equal
ge
greater_equal
gt
greater
isclose
isfinite
isinf
@ -385,10 +387,13 @@ Comparison Ops
isreal
kthvalue
le
less_equal
lt
less
maximum
minimum
ne
not_equal
sort
topk

View file

@ -88,6 +88,46 @@ alias_infos = (
lambda d: torch.randn(20, device=d),
get_args=lambda d: (torch.randn(20, device=d),),
decorators=(onlyCPU,)),
AliasInfo('greater_equal', torch.greater_equal, 'ge', torch.ge,
lambda d: torch.randn(20, device=d),
get_args=lambda d: (torch.randn(20, device=d),),
decorators=(onlyCPU,)),
AliasInfo('greater_equal_', torch.Tensor.greater_equal_, 'ge_', torch.Tensor.ge_,
lambda d: torch.randn(20, device=d),
get_args=lambda d: (torch.randn(20, device=d),),
decorators=(onlyCPU,)),
AliasInfo('greater', torch.greater, 'gt', torch.gt,
lambda d: torch.randn(20, device=d),
get_args=lambda d: (torch.randn(20, device=d),),
decorators=(onlyCPU,)),
AliasInfo('greater_', torch.Tensor.greater_, 'gt_', torch.Tensor.gt_,
lambda d: torch.randn(20, device=d),
get_args=lambda d: (torch.randn(20, device=d),),
decorators=(onlyCPU,)),
AliasInfo('less_equal', torch.less_equal, 'le', torch.le,
lambda d: torch.randn(20, device=d),
get_args=lambda d: (torch.randn(20, device=d),),
decorators=(onlyCPU,)),
AliasInfo('less_equal_', torch.Tensor.less_equal_, 'le_', torch.Tensor.less_equal_,
lambda d: torch.randn(20, device=d),
get_args=lambda d: (torch.randn(20, device=d),),
decorators=(onlyCPU,)),
AliasInfo('less', torch.less, 'lt', torch.lt,
lambda d: torch.randn(20, device=d),
get_args=lambda d: (torch.randn(20, device=d),),
decorators=(onlyCPU,)),
AliasInfo('less_', torch.Tensor.less_, 'lt_', torch.Tensor.lt_,
lambda d: torch.randn(20, device=d),
get_args=lambda d: (torch.randn(20, device=d),),
decorators=(onlyCPU,)),
AliasInfo('not_equal', torch.not_equal, 'ne', torch.ne,
lambda d: torch.randn(20, device=d),
get_args=lambda d: (torch.randn(20, device=d),),
decorators=(onlyCPU,)),
AliasInfo('not_equal_', torch.Tensor.not_equal_, 'ne_', torch.Tensor.ne_,
lambda d: torch.randn(20, device=d),
get_args=lambda d: (torch.randn(20, device=d),),
decorators=(onlyCPU,)),
)
# Placeholder test class for validating that aliases are correctly

View file

@ -1404,18 +1404,28 @@ gcd_(other) -> Tensor
In-place version of :meth:`~Tensor.gcd`
""")
add_docstr_all('ge',
r"""
add_docstr_all('ge', r"""
ge(other) -> Tensor
See :func:`torch.ge`
See :func:`torch.ge`.
""")
add_docstr_all('ge_',
r"""
add_docstr_all('ge_', r"""
ge_(other) -> Tensor
In-place version of :meth:`~Tensor.ge`
In-place version of :meth:`~Tensor.ge`.
""")
add_docstr_all('greater_equal', r"""
greater_equal(other) -> Tensor
See :func:`torch.greater_equal`.
""")
add_docstr_all('greater_equal_', r"""
greater_equal_(other) -> Tensor
In-place version of :meth:`~Tensor.greater_equal`.
""")
add_docstr_all('geometric_',
@ -1523,18 +1533,28 @@ See also :meth:`Tensor.indices`.
:meth:`Tensor.coalesce` for details.
""")
add_docstr_all('gt',
r"""
add_docstr_all('gt', r"""
gt(other) -> Tensor
See :func:`torch.gt`
See :func:`torch.gt`.
""")
add_docstr_all('gt_',
r"""
add_docstr_all('gt_', r"""
gt_(other) -> Tensor
In-place version of :meth:`~Tensor.gt`
In-place version of :meth:`~Tensor.gt`.
""")
add_docstr_all('greater', r"""
greater(other) -> Tensor
See :func:`torch.greater`.
""")
add_docstr_all('greater_', r"""
greater_(other) -> Tensor
In-place version of :meth:`~Tensor.greater`.
""")
add_docstr_all('has_names',
@ -1873,18 +1893,28 @@ lcm_(other) -> Tensor
In-place version of :meth:`~Tensor.lcm`
""")
add_docstr_all('le',
r"""
add_docstr_all('le', r"""
le(other) -> Tensor
See :func:`torch.le`
See :func:`torch.le`.
""")
add_docstr_all('le_',
r"""
add_docstr_all('le_', r"""
le_(other) -> Tensor
In-place version of :meth:`~Tensor.le`
In-place version of :meth:`~Tensor.le`.
""")
add_docstr_all('less_equal', r"""
less_equal(other) -> Tensor
See :func:`torch.less_equal`.
""")
add_docstr_all('less_equal_', r"""
less_equal_(other) -> Tensor
In-place version of :meth:`~Tensor.less_equal`.
""")
add_docstr_all('lerp',
@ -2011,18 +2041,28 @@ lstsq(A) -> (Tensor, Tensor)
See :func:`torch.lstsq`
""")
add_docstr_all('lt',
r"""
add_docstr_all('lt', r"""
lt(other) -> Tensor
See :func:`torch.lt`
See :func:`torch.lt`.
""")
add_docstr_all('lt_',
r"""
add_docstr_all('lt_', r"""
lt_(other) -> Tensor
In-place version of :meth:`~Tensor.lt`
In-place version of :meth:`~Tensor.lt`.
""")
add_docstr_all('less', r"""
lt(other) -> Tensor
See :func:`torch.less`.
""")
add_docstr_all('less_', r"""
less_(other) -> Tensor
In-place version of :meth:`~Tensor.less`.
""")
add_docstr_all('lu_solve',
@ -2268,18 +2308,28 @@ ndimension() -> int
Alias for :meth:`~Tensor.dim()`
""")
add_docstr_all('ne',
r"""
add_docstr_all('ne', r"""
ne(other) -> Tensor
See :func:`torch.ne`
See :func:`torch.ne`.
""")
add_docstr_all('ne_',
r"""
add_docstr_all('ne_', r"""
ne_(other) -> Tensor
In-place version of :meth:`~Tensor.ne`
In-place version of :meth:`~Tensor.ne`.
""")
add_docstr_all('not_equal', r"""
not_equal(other) -> Tensor
See :func:`torch.not_equal`.
""")
add_docstr_all('not_equal_', r"""
not_equal_(other) -> Tensor
In-place version of :meth:`~Tensor.not_equal`.
""")
add_docstr_all('neg',

View file

@ -2939,6 +2939,12 @@ Example::
tensor([[True, True], [False, True]])
""".format(**common_args))
add_docstr(torch.greater_equal, r"""
greater_equal(input, other, *, out=None) -> Tensor
Alias for :func:`torch.ge`.
""")
add_docstr(torch.geqrf,
r"""
geqrf(input, *, out=None) -> (Tensor, Tensor)
@ -3115,6 +3121,12 @@ Example::
tensor([[False, True], [False, False]])
""".format(**common_args))
add_docstr(torch.greater, r"""
greater(input, other, *, out=None) -> Tensor
Alias for :func:`torch.gt`.
""")
add_docstr(torch.histc,
r"""
histc(input, bins=100, min=0, max=0, *, out=None) -> Tensor
@ -3569,6 +3581,12 @@ Example::
tensor([[True, False], [True, True]])
""".format(**common_args))
add_docstr(torch.less_equal, r"""
less_equal(input, other, *, out=None) -> Tensor
Alias for :func:`torch.le`.
""")
add_docstr(torch.lerp,
r"""
lerp(input, end, weight, *, out=None)
@ -4097,6 +4115,12 @@ Example::
tensor([[False, False], [True, False]])
""".format(**common_args))
add_docstr(torch.less, r"""
less(input, other, *, out=None) -> Tensor
Alias for :func:`torch.lt`.
""")
add_docstr(torch.lu_solve,
r"""
lu_solve(input, LU_data, LU_pivots, *, out=None) -> Tensor
@ -5200,6 +5224,12 @@ Example::
tensor([[False, True], [True, False]])
""".format(**common_args))
add_docstr(torch.not_equal, r"""
not_equal(input, other, *, out=None) -> Tensor
Alias for :func:`torch.ne`.
""")
add_docstr(torch.neg,
r"""
neg(input, *, out=None) -> Tensor

View file

@ -8,18 +8,23 @@ namespace {
// map from op alias -> normalized op
static const std::unordered_map<Symbol, Symbol> alias_map = {
{aten::absolute, aten::abs}, {aten::absolute_, aten::abs_},
{aten::clip, aten::clamp}, {aten::clip_, aten::clamp_},
{aten::linalg_det, aten::det}, {aten::outer, aten::ger},
{aten::arccos, aten::acos}, {aten::arccos_, aten::acos_},
{aten::arcsin, aten::asin}, {aten::arcsin_, aten::asin_},
{aten::arctan, aten::atan}, {aten::arctan_, aten::atan_},
{aten::arccosh, aten::acosh}, {aten::arccosh_, aten::acosh_},
{aten::arcsinh, aten::asinh}, {aten::arcsinh_, aten::asinh_},
{aten::arctanh, aten::atanh}, {aten::arctanh_, aten::atanh_},
{aten::fix, aten::trunc}, {aten::fix_, aten::trunc_},
{aten::negative, aten::neg}, {aten::negative_, aten::neg_},
{aten::subtract, aten::sub}, {aten::subtract_, aten::sub_},
{aten::absolute, aten::abs}, {aten::absolute_, aten::abs_},
{aten::clip, aten::clamp}, {aten::clip_, aten::clamp_},
{aten::linalg_det, aten::det}, {aten::outer, aten::ger},
{aten::arccos, aten::acos}, {aten::arccos_, aten::acos_},
{aten::arcsin, aten::asin}, {aten::arcsin_, aten::asin_},
{aten::arctan, aten::atan}, {aten::arctan_, aten::atan_},
{aten::arccosh, aten::acosh}, {aten::arccosh_, aten::acosh_},
{aten::arcsinh, aten::asinh}, {aten::arcsinh_, aten::asinh_},
{aten::arctanh, aten::atanh}, {aten::arctanh_, aten::atanh_},
{aten::fix, aten::trunc}, {aten::fix_, aten::trunc_},
{aten::negative, aten::neg}, {aten::negative_, aten::neg_},
{aten::subtract, aten::sub}, {aten::subtract_, aten::sub_},
{aten::greater_equal, aten::ge}, {aten::greater_equal_, aten::ge_},
{aten::greater, aten::gt}, {aten::greater_, aten::gt_},
{aten::less_equal, aten::le}, {aten::less_equal_, aten::le_},
{aten::less, aten::lt}, {aten::less_, aten::lt_},
{aten::not_equal, aten::ne}, {aten::not_equal_, aten::ne_},
};
void replaceNodeWithNewSymbol(Node* node, Symbol new_symbol) {

View file

@ -368,6 +368,7 @@ def get_testing_overrides() -> Dict[Callable, Callable]:
torch.gather: lambda input, dim, index, out=None, sparse_grad=False: -1,
torch.gcd: lambda input, other, out=None: -1,
torch.ge: lambda input, other, out=None: -1,
torch.greater_equal: lambda input, other, out=None: -1,
torch.geqrf: lambda input, out=None: -1,
torch.i0: lambda input, out=None: -1,
torch.outer: lambda input, vec2, out=None: -1, # alias for torch.ger
@ -379,6 +380,7 @@ def get_testing_overrides() -> Dict[Callable, Callable]:
torch.gru: lambda input, hx, params, has_biases, num_layers, gropout, train, bidirectional, batch_first: -1,
torch.gru_cell: lambda input, hx, w_ih, w_hh, b_ih=None, b_hh=None: -1,
torch.gt: lambda input, other, out=None: -1,
torch.greater: lambda input, other, out=None: -1,
torch.hardshrink: lambda input, lambd=0.5: -1,
torch.heaviside: lambda input, values, out=None: -1,
torch.hinge_embedding_loss: lambda input, target, margin=1.0, size_average=None, reduce=None, reduction='mean': -1,
@ -418,6 +420,7 @@ def get_testing_overrides() -> Dict[Callable, Callable]:
torch.layer_norm: lambda input, normalized_shape, weight=None, bias=None, esp=1e-05, cudnn_enabled=True: -1,
torch.lcm: lambda input, other, out=None: -1,
torch.le: lambda input, other, out=None: -1,
torch.less_equal: lambda input, other, out=None: -1,
torch.lerp: lambda input, end, weight, out=None: -1,
torch.lgamma: lambda input, out=None: -1,
torch.lobpcg: lambda input, k=None, B=None, X=None, n=None, iK=None, niter=None, tol=None, largest=None, method=None,
@ -441,6 +444,7 @@ def get_testing_overrides() -> Dict[Callable, Callable]:
torch.lstm_cell: lambda input, hx, w_ih, w_hh, b_ih=None, b_hh=None: -1,
torch.lstsq: lambda input, A, out=None: -1,
torch.lt: lambda input, other, out=None: -1,
torch.less: lambda input, other, out=None: -1,
torch.lu: lambda A, pivot=True, get_infos=False, out=None: -1,
torch.lu_solve: lambda input, LU_data, LU_pivots, out=None: -1,
torch.margin_ranking_loss: lambda input1, input2, target, margin=0, size_average=None, reduce=None, reduction='mean': -1,
@ -487,6 +491,7 @@ def get_testing_overrides() -> Dict[Callable, Callable]:
torch.native_norm: lambda input, p=2: -1,
torch.native_norm: lambda input, p=2, dim=None, keepdim=False, dtype=None: -1,
torch.ne: lambda input, other, out=None: -1,
torch.not_equal: lambda input, other, out=None: -1,
torch.neg: lambda input, out=None: -1,
torch.negative: lambda input, out=None: -1,
torch.nextafter: lambda input, other, out=None: -1,