From adbb50ea67ab87f2dd4a09698d2e229a83877e09 Mon Sep 17 00:00:00 2001 From: Rahul Nambiar Date: Wed, 21 Oct 2020 19:59:47 -0700 Subject: [PATCH] Enabling alias annotation checks for all operations during autograd tests (#46601) Summary: Pull Request resolved: https://github.com/pytorch/pytorch/pull/46601 * except excluded tests and magic methods. https://github.com/pytorch/pytorch/issues/38731 Previously, we'd only do run these tests for inplace operations. Since this is a lot more tests, fixed these issues that came up when running them - - Updated schema of conj() to reflect existing behaviour. - Updated deepEquals method in check_alias_annotation.cpp to re-use the overloaded == operator. Previous implementation did not cover all types of IValues. - Corrected the order inputs are passed in during autograd testing of 'view' & 'reshape'. - Subbed out atn::ger with the func its aliased to, atn::outer, for testing. The alias annotation checking code doesn't handle aliased operators properly. ghstack-source-id: 114830903 Test Plan: Ran all tests in test:jit and verified they pass. Reviewed By: eellison Differential Revision: D24424955 fbshipit-source-id: 382d7e2585911b81b1573f21fff1d54a5e9a2054 --- aten/src/ATen/native/native_functions.yaml | 2 +- .../check_backward_compatibility.py | 1 + test/test_jit.py | 2 +- torch/csrc/jit/passes/normalize_ops.cpp | 55 ++++++++++--------- torch/csrc/jit/passes/normalize_ops.h | 2 + .../passes/utils/check_alias_annotation.cpp | 23 ++++---- .../_internal/common_methods_invocations.py | 4 +- 7 files changed, 49 insertions(+), 40 deletions(-) diff --git a/aten/src/ATen/native/native_functions.yaml b/aten/src/ATen/native/native_functions.yaml index 8b668d82712..cf0defa3245 100644 --- a/aten/src/ATen/native/native_functions.yaml +++ b/aten/src/ATen/native/native_functions.yaml @@ -314,7 +314,7 @@ use_c10_dispatcher: full variants: function -- func: conj(Tensor self) -> Tensor +- func: conj(Tensor(a) self) -> Tensor(a) use_c10_dispatcher: full variants: function, method diff --git a/test/backward_compatibility/check_backward_compatibility.py b/test/backward_compatibility/check_backward_compatibility.py index 10cd793f96c..73f77d698bc 100644 --- a/test/backward_compatibility/check_backward_compatibility.py +++ b/test/backward_compatibility/check_backward_compatibility.py @@ -128,6 +128,7 @@ allow_list = [ ("aten::_foreach_addcdiv_", datetime.date(2020, 10, 15)), ("aten::_foreach_addcdiv", datetime.date(2020, 10, 15)), ("aten::_foreach_addcmul", datetime.date(2020, 10, 15)), + ("aten::conj", datetime.date(2020, 11, 10)), ] def allow_listed(schema, allow_list): diff --git a/test/test_jit.py b/test/test_jit.py index e36558207a2..f7583432ada 100644 --- a/test/test_jit.py +++ b/test/test_jit.py @@ -15734,7 +15734,7 @@ def add_autograd_test( check_types=check_types) # alias annotation testing - if is_inplace and test_name not in EXCLUDE_SCRIPT: + if not is_magic_method and test_name not in EXCLUDE_SCRIPT: check_alias_annotation(name, (self_variable,) + args_variable, kwargs_variable) check(name) diff --git a/torch/csrc/jit/passes/normalize_ops.cpp b/torch/csrc/jit/passes/normalize_ops.cpp index 75ad2a4499c..9d9ac0203b9 100644 --- a/torch/csrc/jit/passes/normalize_ops.cpp +++ b/torch/csrc/jit/passes/normalize_ops.cpp @@ -6,30 +6,6 @@ namespace jit { namespace { -// map from op alias -> normalized op -static const std::unordered_map alias_map = { - {aten::absolute, aten::abs}, {aten::absolute_, aten::abs_}, - {aten::clip, aten::clamp}, {aten::clip_, aten::clamp_}, - {aten::linalg_det, aten::det}, {aten::ger, aten::outer}, - {aten::arccos, aten::acos}, {aten::arccos_, aten::acos_}, - {aten::arcsin, aten::asin}, {aten::arcsin_, aten::asin_}, - {aten::arctan, aten::atan}, {aten::arctan_, aten::atan_}, - {aten::arccosh, aten::acosh}, {aten::arccosh_, aten::acosh_}, - {aten::arcsinh, aten::asinh}, {aten::arcsinh_, aten::asinh_}, - {aten::arctanh, aten::atanh}, {aten::arctanh_, aten::atanh_}, - {aten::fix, aten::trunc}, {aten::fix_, aten::trunc_}, - {aten::negative, aten::neg}, {aten::negative_, aten::neg_}, - {aten::subtract, aten::sub}, {aten::subtract_, aten::sub_}, - {aten::greater_equal, aten::ge}, {aten::greater_equal_, aten::ge_}, - {aten::greater, aten::gt}, {aten::greater_, aten::gt_}, - {aten::less_equal, aten::le}, {aten::less_equal_, aten::le_}, - {aten::less, aten::lt}, {aten::less_, aten::lt_}, - {aten::not_equal, aten::ne}, {aten::not_equal_, aten::ne_}, - {aten::divide, aten::div}, {aten::divide_, aten::div_}, - {aten::multiply, aten::mul}, {aten::multiply_, aten::mul_}, - {aten::true_divide, aten::div}, {aten::true_divide_, aten::div_}, -}; - void replaceNodeWithNewSymbol(Node* node, Symbol new_symbol) { WithInsertPoint insert_guard{node}; auto graph = node->owningGraph(); @@ -53,8 +29,8 @@ void replaceNodeWithNewSymbol(Node* node, Symbol new_symbol) { // difficult to consumer for downstream user of the IR, such as our own // optimization passes here, we convert op aliases into a standard form bool normalizeOpAliases(graph_node_list_iterator& iter) { - auto alias = alias_map.find(iter->kind()); - if (alias != alias_map.end()) { + auto alias = getOperatorAliasMap().find(iter->kind()); + if (alias != getOperatorAliasMap().end()) { replaceNodeWithNewSymbol(*iter, alias->second); iter.destroyCurrent(); return true; @@ -79,6 +55,33 @@ void NormalizeOps(Block* block) { } // namespace +const std::unordered_map& getOperatorAliasMap() { + // map from op alias -> normalized op + static const std::unordered_map alias_map = { + {aten::absolute, aten::abs}, {aten::absolute_, aten::abs_}, + {aten::clip, aten::clamp}, {aten::clip_, aten::clamp_}, + {aten::linalg_det, aten::det}, {aten::ger, aten::outer}, + {aten::arccos, aten::acos}, {aten::arccos_, aten::acos_}, + {aten::arcsin, aten::asin}, {aten::arcsin_, aten::asin_}, + {aten::arctan, aten::atan}, {aten::arctan_, aten::atan_}, + {aten::arccosh, aten::acosh}, {aten::arccosh_, aten::acosh_}, + {aten::arcsinh, aten::asinh}, {aten::arcsinh_, aten::asinh_}, + {aten::arctanh, aten::atanh}, {aten::arctanh_, aten::atanh_}, + {aten::fix, aten::trunc}, {aten::fix_, aten::trunc_}, + {aten::negative, aten::neg}, {aten::negative_, aten::neg_}, + {aten::subtract, aten::sub}, {aten::subtract_, aten::sub_}, + {aten::greater_equal, aten::ge}, {aten::greater_equal_, aten::ge_}, + {aten::greater, aten::gt}, {aten::greater_, aten::gt_}, + {aten::less_equal, aten::le}, {aten::less_equal_, aten::le_}, + {aten::less, aten::lt}, {aten::less_, aten::lt_}, + {aten::not_equal, aten::ne}, {aten::not_equal_, aten::ne_}, + {aten::divide, aten::div}, {aten::divide_, aten::div_}, + {aten::multiply, aten::mul}, {aten::multiply_, aten::mul_}, + {aten::true_divide, aten::div}, {aten::true_divide_, aten::div_}, + }; + return alias_map; +} + void NormalizeOps(const std::shared_ptr& graph) { NormalizeOps(graph->block()); } diff --git a/torch/csrc/jit/passes/normalize_ops.h b/torch/csrc/jit/passes/normalize_ops.h index 03963cd26a8..4d630392ca4 100644 --- a/torch/csrc/jit/passes/normalize_ops.h +++ b/torch/csrc/jit/passes/normalize_ops.h @@ -12,5 +12,7 @@ namespace jit { // Currently only handles normalization of op aliases. TORCH_API void NormalizeOps(const std::shared_ptr& graph); +const std::unordered_map& getOperatorAliasMap(); + } // namespace jit } // namespace torch diff --git a/torch/csrc/jit/passes/utils/check_alias_annotation.cpp b/torch/csrc/jit/passes/utils/check_alias_annotation.cpp index 20e4c6874e6..40aa7db7e5e 100644 --- a/torch/csrc/jit/passes/utils/check_alias_annotation.cpp +++ b/torch/csrc/jit/passes/utils/check_alias_annotation.cpp @@ -1,5 +1,6 @@ #include #include +#include #include namespace torch { @@ -61,19 +62,11 @@ Stack deepCopy(const Stack& stack) { } bool deepEquals(const IValue& lhs, const IValue& rhs) { - if (lhs.isInt() && rhs.isInt()) { - return lhs.toInt() == rhs.toInt(); - } else if (lhs.isDouble() && rhs.isDouble()) { - return lhs.toDouble() == rhs.toDouble(); - } else if (lhs.isNone() && rhs.isNone()) { - return true; - } else if (lhs.isIntList() && rhs.isIntList()) { - return lhs.toIntVector() == rhs.toIntVector(); - } else if (lhs.isTensor() && rhs.isTensor()) { + if (lhs.isTensor() && rhs.isTensor()) { return lhs.toTensor().equal(rhs.toTensor()); } - throw std::runtime_error("Deep equals not implemented for type"); + return lhs == rhs; } struct AliasAndIValue { @@ -146,6 +139,16 @@ const Node* findNodeForOp( return node; } } + + // Check for alias-ed operator names + const auto aliasOp = torch::jit::getOperatorAliasMap().find(opName); + AT_ASSERT(aliasOp != torch::jit::getOperatorAliasMap().end()); + for (const auto node : g.nodes()) { + if (node->kind() == aliasOp->second) { + return node; + } + } + AT_ASSERT(false); } diff --git a/torch/testing/_internal/common_methods_invocations.py b/torch/testing/_internal/common_methods_invocations.py index 1c7b43d0be9..4152d0aa0ce 100644 --- a/torch/testing/_internal/common_methods_invocations.py +++ b/torch/testing/_internal/common_methods_invocations.py @@ -586,13 +586,13 @@ def method_tests(): ('transpose', (S, S, S), (2, 0), '3d', (False,)), ('t', (1, 2), NO_ARGS, '', (False,)), ('view', (S, S, S), (S * S, S), '', (False,)), - ('view', (S, S, S), (torch.Size([S * S, S]),), 'size', (False,)), + ('view', (torch.Size([S * S, S]),), (S, S, S), 'size', (False,)), ('view', (S,), (S,), '1d', (False,)), ('view', (), (dont_convert(()),), 'scalar_to_scalar', (False,)), ('view', (), (1,), 'scalar_to_1d', (False,)), ('ravel', (S, S, S), NO_ARGS, '', (False,)), ('reshape', (S, S, S), (S * S, S), '', (False,)), - ('reshape', (S, S, S), (torch.Size([S * S, S]),), 'size', (False,)), + ('reshape', (torch.Size([S * S, S]),), (S, S, S), 'size', (False,)), ('reshape', (S,), (S,), '1d', (False,)), ('reshape', (), (dont_convert(()),), 'scalar_to_scalar', (False,)), ('reshape', (), (1,), 'scalar_to_1d', (False,)),