diff --git a/aten/src/ATen/core/NamedRegistrations.cpp b/aten/src/ATen/core/NamedRegistrations.cpp index 419569d6edb..33e4ebcfc7d 100644 --- a/aten/src/ATen/core/NamedRegistrations.cpp +++ b/aten/src/ATen/core/NamedRegistrations.cpp @@ -37,9 +37,9 @@ TORCH_LIBRARY_IMPL(aten, Named, m) { m.impl("add.out", CppFunction::makeFallthrough()); m.impl("add_.Scalar", CppFunction::makeFallthrough()); m.impl("add_.Tensor", CppFunction::makeFallthrough()); - m.impl("add_relu.Tensor", CppFunction::makeFallthrough()); - m.impl("add_relu.out", CppFunction::makeFallthrough()); - m.impl("add_relu_.Tensor", CppFunction::makeFallthrough()); + m.impl("_add_relu.Tensor", CppFunction::makeFallthrough()); + m.impl("_add_relu.out", CppFunction::makeFallthrough()); + m.impl("_add_relu_.Tensor", CppFunction::makeFallthrough()); m.impl("addcdiv", CppFunction::makeFallthrough()); m.impl("addcdiv.out", CppFunction::makeFallthrough()); m.impl("addcdiv_", CppFunction::makeFallthrough()); diff --git a/aten/src/ATen/native/native_functions.yaml b/aten/src/ATen/native/native_functions.yaml index cf0defa3245..b143d39e67e 100644 --- a/aten/src/ATen/native/native_functions.yaml +++ b/aten/src/ATen/native/native_functions.yaml @@ -388,19 +388,19 @@ SparseCUDA: add_out_sparse_cuda MkldnnCPU: mkldnn_add_out -- func: add_relu.Tensor(Tensor self, Tensor other, *, Scalar alpha=1) -> Tensor +- func: _add_relu.Tensor(Tensor self, Tensor other, *, Scalar alpha=1) -> Tensor use_c10_dispatcher: full variants: function dispatch: CPU: add_relu -- func: add_relu_.Tensor(Tensor(a!) self, Tensor other, *, Scalar alpha=1) -> Tensor(a!) +- func: _add_relu_.Tensor(Tensor(a!) self, Tensor other, *, Scalar alpha=1) -> Tensor(a!) use_c10_dispatcher: full variants: function dispatch: CPU: add_relu_ -- func: add_relu.out(Tensor self, Tensor other, *, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!) +- func: _add_relu.out(Tensor self, Tensor other, *, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!) variants: function dispatch: CPU: add_relu_out diff --git a/test/backward_compatibility/check_backward_compatibility.py b/test/backward_compatibility/check_backward_compatibility.py index 73f77d698bc..a8537feeed1 100644 --- a/test/backward_compatibility/check_backward_compatibility.py +++ b/test/backward_compatibility/check_backward_compatibility.py @@ -129,6 +129,8 @@ allow_list = [ ("aten::_foreach_addcdiv", datetime.date(2020, 10, 15)), ("aten::_foreach_addcmul", datetime.date(2020, 10, 15)), ("aten::conj", datetime.date(2020, 11, 10)), + ("aten::add_relu", datetime.date(2020, 10, 28)), + ("aten::add_relu_", datetime.date(2020, 10, 28)), ] def allow_listed(schema, allow_list): diff --git a/test/test_jit.py b/test/test_jit.py index ba15d9b8a1c..79ac3452a8e 100644 --- a/test/test_jit.py +++ b/test/test_jit.py @@ -584,7 +584,7 @@ class TestJit(JitTestCase): m = torch.jit.load(buffer) new_res = m(a, b, c) FileCheck().check_not("aten::relu(") \ - .check("aten::add_relu(") \ + .check("aten::_add_relu(") \ .run(m.graph) torch.testing.assert_allclose(orig_res, new_res) @@ -603,7 +603,7 @@ class TestJit(JitTestCase): m = torch.jit.load(buffer) new_res = m(a, b, c) FileCheck().check_not("aten::relu_(") \ - .check("aten::add_relu(") \ + .check("aten::_add_relu(") \ .run(m.graph) torch.testing.assert_allclose(orig_res, new_res) @@ -634,10 +634,10 @@ class TestJit(JitTestCase): new_res = m(a_copy, b) FileCheck().check_not("aten::add_(") \ .check_not("aten::relu_(") \ - .check("aten::add_relu_(") \ + .check("aten::_add_relu_(") \ .run(m.graph) torch.testing.assert_allclose(orig_res, new_res) - # Since add_relu_ does inplace mutation ensure + # Since _add_relu_ does inplace mutation ensure # a_copy is modified torch.testing.assert_allclose(orig_res, a_copy) @@ -672,10 +672,10 @@ class TestJit(JitTestCase): new_res = m(a_copy, b) FileCheck().check_not("aten::add(") \ .check_not("aten::relu_(") \ - .check("aten::add_relu(") \ + .check("aten::_add_relu(") \ .run(m.graph) torch.testing.assert_allclose(orig_res, new_res) - # Since add_relu_ with out=a does inplace mutation ensure + # Since _add_relu_ with out=a does inplace mutation ensure # a_copy is modified torch.testing.assert_allclose(orig_res, a_copy) diff --git a/test/test_mobile_optimizer.py b/test/test_mobile_optimizer.py index 11235edac7c..3a2c4867dfc 100644 --- a/test/test_mobile_optimizer.py +++ b/test/test_mobile_optimizer.py @@ -95,7 +95,7 @@ class TestOptimizer(unittest.TestCase): .check_count("prepacked::linear_clamp_run", 1, exactly=True) \ .check_not("aten::add(") \ .check_not("aten::relu(") \ - .check_count("aten::add_relu(", 1, exactly=True) \ + .check_count("aten::_add_relu(", 1, exactly=True) \ .run(optimized_scripted_model.graph) torch.testing.assert_allclose(initial_result, optimized_result, rtol=1e-2, atol=1e-3) diff --git a/test/test_nn.py b/test/test_nn.py index dd8097e912c..101280526a4 100644 --- a/test/test_nn.py +++ b/test/test_nn.py @@ -9210,7 +9210,7 @@ class TestAddRelu(TestCase): a = a + 5 add_res = a + b relu_res = torch.relu(add_res) - add_relu_res = torch.add_relu(a, b) + add_relu_res = torch._VF._add_relu(a, b) self.assertTrue(torch.allclose(add_relu_res, relu_res)) diff --git a/tools/code_analyzer/default_op_deps.yaml b/tools/code_analyzer/default_op_deps.yaml index 8a71f33bcca..c2adb0dbb80 100644 --- a/tools/code_analyzer/default_op_deps.yaml +++ b/tools/code_analyzer/default_op_deps.yaml @@ -1903,7 +1903,7 @@ - name: aten::resize_as_ - name: aten::scalar_tensor - name: aten::to -- name: aten::add_relu +- name: aten::_add_relu depends: - name: aten::as_strided_ - name: aten::copy_ @@ -1915,7 +1915,7 @@ - name: aten::resize_ - name: aten::resize_as_ - name: aten::to -- name: aten::add_relu_ +- name: aten::_add_relu_ depends: - name: aten::as_strided_ - name: aten::copy_ diff --git a/torch/csrc/jit/passes/fuse_relu.cpp b/torch/csrc/jit/passes/fuse_relu.cpp index 8c3fe1e6e71..02e8f9f2bef 100644 --- a/torch/csrc/jit/passes/fuse_relu.cpp +++ b/torch/csrc/jit/passes/fuse_relu.cpp @@ -17,7 +17,7 @@ void fuseAddReluImpl(std::shared_ptr& graph) { return (%res))"; std::string add_relu_fused = R"( graph(%a, %b, %alpha): - %res = aten::add_relu(%a, %b, %alpha) + %res = aten::_add_relu(%a, %b, %alpha) return (%res))"; rewriter.RegisterRewritePattern(add_relu_0, add_relu_fused); @@ -35,7 +35,7 @@ void fuseAddReluImpl(std::shared_ptr& graph) { return (%res))"; std::string add_inplace_relu_fused = R"( graph(%a, %b, %alpha): - %res = aten::add_relu_(%a, %b, %alpha) + %res = aten::_add_relu_(%a, %b, %alpha) return (%res))"; rewriter.RegisterRewritePattern(add_inplace_relu_1, add_inplace_relu_fused); @@ -46,7 +46,7 @@ void fuseAddReluImpl(std::shared_ptr& graph) { return (%res))"; std::string add_out_relu_fused = R"( graph(%a, %b, %alpha, %out): - %res = aten::add_relu(%a, %b, %alpha, %out) + %res = aten::_add_relu(%a, %b, %alpha, %out) return (%res))"; rewriter.RegisterRewritePattern(add_out_relu, add_out_relu_fused); diff --git a/torch/overrides.py b/torch/overrides.py index 3bd34b4c835..8224944d9ff 100644 --- a/torch/overrides.py +++ b/torch/overrides.py @@ -209,7 +209,6 @@ def get_testing_overrides() -> Dict[Callable, Callable]: torch.arccos: lambda input, out=None: -1, torch.acosh: lambda input, out=None: -1, torch.arccosh: lambda input, out=None: -1, - torch.add_relu: lambda input, other, out=None: -1, torch.add: lambda input, other, out=None: -1, torch.addbmm: lambda input, batch1, batch2, alpha=1, beta=1, out=None: -1, torch.addcdiv: lambda input, tensor1, tensor2, value=1, out=None: -1,