From fe7e1bd1ceee405eb14ba54993ea90ddb463cf08 Mon Sep 17 00:00:00 2001 From: Don Jang Date: Mon, 28 Feb 2022 00:29:16 -0800 Subject: [PATCH] [Static Runtime] Add auto-generated out variant dispatchers (#72603) Summary: Pull Request resolved: https://github.com/pytorch/pytorch/pull/72603 This change adds out variant dispatchers generated by the previous diff. The number of the out variant dispatchers generated by this diff is 133, which increases the out variant coverage by 309% (current: 43, this diff: 133 + 43 = 176). This number is expected to increase a lot as we develop this script further to cover more ops. Test Plan: **Unittest** Confirmed ``` buck run //caffe2/benchmarks/static_runtime:static_runtime_cpptest ``` is passing. Reviewed By: swolchok Differential Revision: D33373928 fbshipit-source-id: 4d94d788282f3f313bb36f2f9452edecd9862246 (cherry picked from commit e4ce8b386d1fcc47b86cb9c9016a70e7a31b452c) --- benchmarks/static_runtime/CMakeLists.txt | 1 + .../static_runtime/test_generated_ops.cc | 4445 +++++++++++++++++ .../static_runtime/test_static_runtime.cc | 2 +- tools/build_variables.bzl | 1 + tools/codegen/static_runtime/config.py | 68 +- .../csrc/jit/runtime/static/generated_ops.cpp | 2772 ++++++++++ 6 files changed, 7274 insertions(+), 15 deletions(-) create mode 100644 benchmarks/static_runtime/test_generated_ops.cc create mode 100644 torch/csrc/jit/runtime/static/generated_ops.cpp diff --git a/benchmarks/static_runtime/CMakeLists.txt b/benchmarks/static_runtime/CMakeLists.txt index d248fe2a557..1fba0256677 100644 --- a/benchmarks/static_runtime/CMakeLists.txt +++ b/benchmarks/static_runtime/CMakeLists.txt @@ -6,4 +6,5 @@ list(APPEND STATIC_RUNTIME_TEST_SRCS ${CMAKE_CURRENT_SOURCE_DIR}/deep_wide_pt.cc list(APPEND STATIC_RUNTIME_TEST_SRCS ${CMAKE_CURRENT_SOURCE_DIR}/test_utils.cc) list(APPEND STATIC_RUNTIME_TEST_SRCS ${CMAKE_CURRENT_SOURCE_DIR}/test_static_runtime.cc) list(APPEND STATIC_RUNTIME_TEST_SRCS ${CMAKE_CURRENT_SOURCE_DIR}/test_static_module.cc) +list(APPEND STATIC_RUNTIME_TEST_SRCS ${CMAKE_CURRENT_SOURCE_DIR}/test_generated_ops.cc) set(STATIC_RUNTIME_TEST_SRCS ${STATIC_RUNTIME_TEST_SRCS} PARENT_SCOPE) diff --git a/benchmarks/static_runtime/test_generated_ops.cc b/benchmarks/static_runtime/test_generated_ops.cc new file mode 100644 index 00000000000..8b21035f603 --- /dev/null +++ b/benchmarks/static_runtime/test_generated_ops.cc @@ -0,0 +1,4445 @@ +// @lint-ignore-every CLANGTIDY HOWTOEVEN +#include +#include +#include + +#include "test_utils.h" + +using namespace caffe2; +using namespace torch; +using namespace torch::jit; +using namespace torch::jit::test; +using c10::IValue; + +TEST(StaticRuntime, autogen_sgn) { + const std::string script = R"IR( + graph(%self: Tensor): + %bias: None = prim::Constant() + %ret = aten::sgn(%self) + %cloned = aten::clone(%ret, %bias) + return (%cloned) + )IR"; + + auto self0 = at::rand({6, 6, 6}); + std::vector args{self0}; + testStaticRuntime( + script, + args, + {}, + /*use_allclose=*/false, + /*use_equalnan=*/false, + /*check_resize=*/true); + + auto self1 = at::rand({22, 22, 22}); + std::vector args2{self1}; + testStaticRuntime( + script, + args, + args2, + /*use_allclose=*/false, + /*use_equalnan=*/false, + /*check_resize=*/true); +} + +TEST(StaticRuntime, autogen_acos) { + const std::string script = R"IR( + graph(%self: Tensor): + %bias: None = prim::Constant() + %ret = aten::acos(%self) + %cloned = aten::clone(%ret, %bias) + return (%cloned) + )IR"; + + auto self0 = at::rand({6, 6, 6}); + std::vector args{self0}; + testStaticRuntime( + script, + args, + {}, + /*use_allclose=*/false, + /*use_equalnan=*/false, + /*check_resize=*/true); + + auto self1 = at::rand({22, 22, 22}); + std::vector args2{self1}; + testStaticRuntime( + script, + args, + args2, + /*use_allclose=*/false, + /*use_equalnan=*/false, + /*check_resize=*/true); +} + +TEST(StaticRuntime, autogen_addmv) { + const std::string script = R"IR( + graph(%self: Tensor, %mat: Tensor, %vec: Tensor, %beta: int, %alpha: int): + %bias: None = prim::Constant() + %ret = aten::addmv(%self, %mat, %vec, %beta, %alpha) + %cloned = aten::clone(%ret, %bias) + return (%cloned) + )IR"; + + auto self0 = at::rand({2}); + auto mat0 = at::rand({2, 2}); + auto vec0 = at::rand({2}); + auto beta0 = 2; + auto alpha0 = 2; + std::vector args{self0, mat0, vec0, beta0, alpha0}; + testStaticRuntime( + script, + args, + {}, + /*use_allclose=*/false, + /*use_equalnan=*/false, + /*check_resize=*/true); + + auto self1 = at::rand({35}); + auto mat1 = at::rand({35, 35}); + auto vec1 = at::rand({35}); + auto beta1 = 2; + auto alpha1 = 2; + std::vector args2{self1, mat1, vec1, beta1, alpha1}; + testStaticRuntime( + script, + args, + args2, + /*use_allclose=*/false, + /*use_equalnan=*/false, + /*check_resize=*/true); +} + +TEST(StaticRuntime, autogen_argmax) { + const std::string script = R"IR( + graph(%self: Tensor, %dim: int?, %keepdim: bool): + %bias: None = prim::Constant() + %ret = aten::argmax(%self, %dim, %keepdim) + %cloned = aten::clone(%ret, %bias) + return (%cloned) + )IR"; + + auto self0 = at::rand({6, 6, 6}); + auto dim0 = 1; + auto keepdim0 = false; + std::vector args{self0, dim0, keepdim0}; + testStaticRuntime( + script, + args, + {}, + /*use_allclose=*/false, + /*use_equalnan=*/false, + /*check_resize=*/true); + + auto self1 = at::rand({22, 22, 22}); + auto dim1 = 1; + auto keepdim1 = false; + std::vector args2{self1, dim1, keepdim1}; + testStaticRuntime( + script, + args, + args2, + /*use_allclose=*/false, + /*use_equalnan=*/false, + /*check_resize=*/true); +} + +TEST(StaticRuntime, autogen_acosh) { + const std::string script = R"IR( + graph(%self: Tensor): + %bias: None = prim::Constant() + %ret = aten::acosh(%self) + %cloned = aten::clone(%ret, %bias) + return (%cloned) + )IR"; + + auto self0 = at::rand({2, 2, 2}) + at::ones({2, 2, 2}); + std::vector args{self0}; + testStaticRuntime( + script, + args, + {}, + /*use_allclose=*/false, + /*use_equalnan=*/false, + /*check_resize=*/true); + + auto self1 = at::rand({5, 5, 5}) + at::ones({5, 5, 5}); + std::vector args2{self1}; + testStaticRuntime( + script, + args, + args2, + /*use_allclose=*/false, + /*use_equalnan=*/false, + /*check_resize=*/true); +} + +TEST(StaticRuntime, autogen_asinh) { + const std::string script = R"IR( + graph(%self: Tensor): + %bias: None = prim::Constant() + %ret = aten::asinh(%self) + %cloned = aten::clone(%ret, %bias) + return (%cloned) + )IR"; + + auto self0 = at::rand({6, 6, 6}); + std::vector args{self0}; + testStaticRuntime( + script, + args, + {}, + /*use_allclose=*/false, + /*use_equalnan=*/false, + /*check_resize=*/true); + + auto self1 = at::rand({22, 22, 22}); + std::vector args2{self1}; + testStaticRuntime( + script, + args, + args2, + /*use_allclose=*/false, + /*use_equalnan=*/false, + /*check_resize=*/true); +} + +TEST(StaticRuntime, autogen_atanh) { + const std::string script = R"IR( + graph(%self: Tensor): + %bias: None = prim::Constant() + %ret = aten::atanh(%self) + %cloned = aten::clone(%ret, %bias) + return (%cloned) + )IR"; + + auto self0 = at::rand({6, 6, 6}); + std::vector args{self0}; + testStaticRuntime( + script, + args, + {}, + /*use_allclose=*/false, + /*use_equalnan=*/false, + /*check_resize=*/true); + + auto self1 = at::rand({22, 22, 22}); + std::vector args2{self1}; + testStaticRuntime( + script, + args, + args2, + /*use_allclose=*/false, + /*use_equalnan=*/false, + /*check_resize=*/true); +} + +TEST(StaticRuntime, autogen_asin) { + const std::string script = R"IR( + graph(%self: Tensor): + %bias: None = prim::Constant() + %ret = aten::asin(%self) + %cloned = aten::clone(%ret, %bias) + return (%cloned) + )IR"; + + auto self0 = at::rand({6, 6, 6}); + std::vector args{self0}; + testStaticRuntime( + script, + args, + {}, + /*use_allclose=*/false, + /*use_equalnan=*/false, + /*check_resize=*/true); + + auto self1 = at::rand({22, 22, 22}); + std::vector args2{self1}; + testStaticRuntime( + script, + args, + args2, + /*use_allclose=*/false, + /*use_equalnan=*/false, + /*check_resize=*/true); +} + +TEST(StaticRuntime, autogen_atan) { + const std::string script = R"IR( + graph(%self: Tensor): + %bias: None = prim::Constant() + %ret = aten::atan(%self) + %cloned = aten::clone(%ret, %bias) + return (%cloned) + )IR"; + + auto self0 = at::rand({6, 6, 6}); + std::vector args{self0}; + testStaticRuntime( + script, + args, + {}, + /*use_allclose=*/false, + /*use_equalnan=*/false, + /*check_resize=*/true); + + auto self1 = at::rand({22, 22, 22}); + std::vector args2{self1}; + testStaticRuntime( + script, + args, + args2, + /*use_allclose=*/false, + /*use_equalnan=*/false, + /*check_resize=*/true); +} + +TEST(StaticRuntime, autogen_baddbmm) { + const std::string script = R"IR( + graph(%self: Tensor, %batch1: Tensor, %batch2: Tensor, %beta: int, %alpha: int): + %bias: None = prim::Constant() + %ret = aten::baddbmm(%self, %batch1, %batch2, %beta, %alpha) + %cloned = aten::clone(%ret, %bias) + return (%cloned) + )IR"; + + auto self0 = at::rand({6, 6, 6}); + auto batch10 = at::rand({6, 6, 6}); + auto batch20 = at::rand({6, 6, 6}); + auto beta0 = 2; + auto alpha0 = 2; + std::vector args{self0, batch10, batch20, beta0, alpha0}; + testStaticRuntime( + script, + args, + {}, + /*use_allclose=*/false, + /*use_equalnan=*/false, + /*check_resize=*/true); + + auto self1 = at::rand({22, 22, 22}); + auto batch11 = at::rand({22, 22, 22}); + auto batch21 = at::rand({22, 22, 22}); + auto beta1 = 2; + auto alpha1 = 2; + std::vector args2{self1, batch11, batch21, beta1, alpha1}; + testStaticRuntime( + script, + args, + args2, + /*use_allclose=*/false, + /*use_equalnan=*/false, + /*check_resize=*/true); +} + +TEST(StaticRuntime, autogen_bitwise_not) { + const std::string script = R"IR( + graph(%self: Tensor): + %bias: None = prim::Constant() + %ret = aten::bitwise_not(%self) + %cloned = aten::clone(%ret, %bias) + return (%cloned) + )IR"; + + auto self0 = at::randint(1, 100, {6, 6, 6}, at::kInt); + std::vector args{self0}; + testStaticRuntime( + script, + args, + {}, + /*use_allclose=*/false, + /*use_equalnan=*/false, + /*check_resize=*/true); + + auto self1 = at::randint(1, 100, {22, 22, 22}, at::kInt); + std::vector args2{self1}; + testStaticRuntime( + script, + args, + args2, + /*use_allclose=*/false, + /*use_equalnan=*/false, + /*check_resize=*/true); +} + +TEST(StaticRuntime, autogen_copysign_Tensor) { + const std::string script = R"IR( + graph(%self: Tensor, %other: Tensor): + %bias: None = prim::Constant() + %ret = aten::copysign(%self, %other) + %cloned = aten::clone(%ret, %bias) + return (%cloned) + )IR"; + + auto self0 = at::rand({6, 6, 6}); + auto other0 = at::rand({6, 6, 6}); + std::vector args{self0, other0}; + testStaticRuntime( + script, + args, + {}, + /*use_allclose=*/false, + /*use_equalnan=*/false, + /*check_resize=*/true); + + auto self1 = at::rand({22, 22, 22}); + auto other1 = at::rand({22, 22, 22}); + std::vector args2{self1, other1}; + testStaticRuntime( + script, + args, + args2, + /*use_allclose=*/false, + /*use_equalnan=*/false, + /*check_resize=*/true); +} + +TEST(StaticRuntime, autogen_ceil) { + const std::string script = R"IR( + graph(%self: Tensor): + %bias: None = prim::Constant() + %ret = aten::ceil(%self) + %cloned = aten::clone(%ret, %bias) + return (%cloned) + )IR"; + + auto self0 = at::rand({6, 6, 6}); + std::vector args{self0}; + testStaticRuntime( + script, + args, + {}, + /*use_allclose=*/false, + /*use_equalnan=*/false, + /*check_resize=*/true); + + auto self1 = at::rand({22, 22, 22}); + std::vector args2{self1}; + testStaticRuntime( + script, + args, + args2, + /*use_allclose=*/false, + /*use_equalnan=*/false, + /*check_resize=*/true); +} + +TEST(StaticRuntime, autogen_cos) { + const std::string script = R"IR( + graph(%self: Tensor): + %bias: None = prim::Constant() + %ret = aten::cos(%self) + %cloned = aten::clone(%ret, %bias) + return (%cloned) + )IR"; + + auto self0 = at::rand({6, 6, 6}); + std::vector args{self0}; + testStaticRuntime( + script, + args, + {}, + /*use_allclose=*/false, + /*use_equalnan=*/false, + /*check_resize=*/true); + + auto self1 = at::rand({22, 22, 22}); + std::vector args2{self1}; + testStaticRuntime( + script, + args, + args2, + /*use_allclose=*/false, + /*use_equalnan=*/false, + /*check_resize=*/true); +} + +TEST(StaticRuntime, autogen_cosh) { + const std::string script = R"IR( + graph(%self: Tensor): + %bias: None = prim::Constant() + %ret = aten::cosh(%self) + %cloned = aten::clone(%ret, %bias) + return (%cloned) + )IR"; + + auto self0 = at::rand({6, 6, 6}); + std::vector args{self0}; + testStaticRuntime( + script, + args, + {}, + /*use_allclose=*/false, + /*use_equalnan=*/false, + /*check_resize=*/true); + + auto self1 = at::rand({22, 22, 22}); + std::vector args2{self1}; + testStaticRuntime( + script, + args, + args2, + /*use_allclose=*/false, + /*use_equalnan=*/false, + /*check_resize=*/true); +} + +TEST(StaticRuntime, autogen_cumprod) { + const std::string script = R"IR( + graph(%self: Tensor, %dim: int, %dtype: int?): + %bias: None = prim::Constant() + %ret = aten::cumprod(%self, %dim, %dtype) + %cloned = aten::clone(%ret, %bias) + return (%cloned) + )IR"; + + auto self0 = at::rand({6, 6, 6}); + auto dim0 = 1; + auto dtype0 = at::ScalarType::Float; + std::vector args{self0, dim0, dtype0}; + testStaticRuntime( + script, + args, + {}, + /*use_allclose=*/false, + /*use_equalnan=*/false, + /*check_resize=*/true); + + auto self1 = at::rand({22, 22, 22}); + auto dim1 = 1; + auto dtype1 = at::ScalarType::Float; + std::vector args2{self1, dim1, dtype1}; + testStaticRuntime( + script, + args, + args2, + /*use_allclose=*/false, + /*use_equalnan=*/false, + /*check_resize=*/true); +} + +TEST(StaticRuntime, autogen_erf) { + const std::string script = R"IR( + graph(%self: Tensor): + %bias: None = prim::Constant() + %ret = aten::erf(%self) + %cloned = aten::clone(%ret, %bias) + return (%cloned) + )IR"; + + auto self0 = at::rand({6, 6, 6}); + std::vector args{self0}; + testStaticRuntime( + script, + args, + {}, + /*use_allclose=*/false, + /*use_equalnan=*/false, + /*check_resize=*/true); + + auto self1 = at::rand({22, 22, 22}); + std::vector args2{self1}; + testStaticRuntime( + script, + args, + args2, + /*use_allclose=*/false, + /*use_equalnan=*/false, + /*check_resize=*/true); +} + +TEST(StaticRuntime, autogen_erfc) { + const std::string script = R"IR( + graph(%self: Tensor): + %bias: None = prim::Constant() + %ret = aten::erfc(%self) + %cloned = aten::clone(%ret, %bias) + return (%cloned) + )IR"; + + auto self0 = at::rand({6, 6, 6}); + std::vector args{self0}; + testStaticRuntime( + script, + args, + {}, + /*use_allclose=*/false, + /*use_equalnan=*/false, + /*check_resize=*/true); + + auto self1 = at::rand({22, 22, 22}); + std::vector args2{self1}; + testStaticRuntime( + script, + args, + args2, + /*use_allclose=*/false, + /*use_equalnan=*/false, + /*check_resize=*/true); +} + +TEST(StaticRuntime, autogen_exp) { + const std::string script = R"IR( + graph(%self: Tensor): + %bias: None = prim::Constant() + %ret = aten::exp(%self) + %cloned = aten::clone(%ret, %bias) + return (%cloned) + )IR"; + + auto self0 = at::rand({6, 6, 6}); + std::vector args{self0}; + testStaticRuntime( + script, + args, + {}, + /*use_allclose=*/false, + /*use_equalnan=*/false, + /*check_resize=*/true); + + auto self1 = at::rand({22, 22, 22}); + std::vector args2{self1}; + testStaticRuntime( + script, + args, + args2, + /*use_allclose=*/false, + /*use_equalnan=*/false, + /*check_resize=*/true); +} + +TEST(StaticRuntime, autogen_exp2) { + const std::string script = R"IR( + graph(%self: Tensor): + %bias: None = prim::Constant() + %ret = aten::exp2(%self) + %cloned = aten::clone(%ret, %bias) + return (%cloned) + )IR"; + + auto self0 = at::rand({6, 6, 6}); + std::vector args{self0}; + testStaticRuntime( + script, + args, + {}, + /*use_allclose=*/false, + /*use_equalnan=*/false, + /*check_resize=*/true); + + auto self1 = at::rand({22, 22, 22}); + std::vector args2{self1}; + testStaticRuntime( + script, + args, + args2, + /*use_allclose=*/false, + /*use_equalnan=*/false, + /*check_resize=*/true); +} + +TEST(StaticRuntime, autogen_expm1) { + const std::string script = R"IR( + graph(%self: Tensor): + %bias: None = prim::Constant() + %ret = aten::expm1(%self) + %cloned = aten::clone(%ret, %bias) + return (%cloned) + )IR"; + + auto self0 = at::rand({6, 6, 6}); + std::vector args{self0}; + testStaticRuntime( + script, + args, + {}, + /*use_allclose=*/false, + /*use_equalnan=*/false, + /*check_resize=*/true); + + auto self1 = at::rand({22, 22, 22}); + std::vector args2{self1}; + testStaticRuntime( + script, + args, + args2, + /*use_allclose=*/false, + /*use_equalnan=*/false, + /*check_resize=*/true); +} + +TEST(StaticRuntime, autogen_floor) { + const std::string script = R"IR( + graph(%self: Tensor): + %bias: None = prim::Constant() + %ret = aten::floor(%self) + %cloned = aten::clone(%ret, %bias) + return (%cloned) + )IR"; + + auto self0 = at::rand({6, 6, 6}); + std::vector args{self0}; + testStaticRuntime( + script, + args, + {}, + /*use_allclose=*/false, + /*use_equalnan=*/false, + /*check_resize=*/true); + + auto self1 = at::rand({22, 22, 22}); + std::vector args2{self1}; + testStaticRuntime( + script, + args, + args2, + /*use_allclose=*/false, + /*use_equalnan=*/false, + /*check_resize=*/true); +} + +TEST(StaticRuntime, autogen_frac) { + const std::string script = R"IR( + graph(%self: Tensor): + %bias: None = prim::Constant() + %ret = aten::frac(%self) + %cloned = aten::clone(%ret, %bias) + return (%cloned) + )IR"; + + auto self0 = at::rand({6, 6, 6}); + std::vector args{self0}; + testStaticRuntime( + script, + args, + {}, + /*use_allclose=*/false, + /*use_equalnan=*/false, + /*check_resize=*/true); + + auto self1 = at::rand({22, 22, 22}); + std::vector args2{self1}; + testStaticRuntime( + script, + args, + args2, + /*use_allclose=*/false, + /*use_equalnan=*/false, + /*check_resize=*/true); +} + +TEST(StaticRuntime, autogen_gcd) { + const std::string script = R"IR( + graph(%self: Tensor, %other: Tensor): + %bias: None = prim::Constant() + %ret = aten::gcd(%self, %other) + %cloned = aten::clone(%ret, %bias) + return (%cloned) + )IR"; + + auto self0 = at::randint(1, 100, {6, 6, 6}, at::kInt); + auto other0 = at::randint(1, 100, {6, 6, 6}, at::kInt); + std::vector args{self0, other0}; + testStaticRuntime( + script, + args, + {}, + /*use_allclose=*/false, + /*use_equalnan=*/false, + /*check_resize=*/true); + + auto self1 = at::randint(1, 100, {22, 22, 22}, at::kInt); + auto other1 = at::randint(1, 100, {22, 22, 22}, at::kInt); + std::vector args2{self1, other1}; + testStaticRuntime( + script, + args, + args2, + /*use_allclose=*/false, + /*use_equalnan=*/false, + /*check_resize=*/true); +} + +TEST(StaticRuntime, autogen_lcm) { + const std::string script = R"IR( + graph(%self: Tensor, %other: Tensor): + %bias: None = prim::Constant() + %ret = aten::lcm(%self, %other) + %cloned = aten::clone(%ret, %bias) + return (%cloned) + )IR"; + + auto self0 = at::randint(1, 100, {6, 6, 6}, at::kInt); + auto other0 = at::randint(1, 100, {6, 6, 6}, at::kInt); + std::vector args{self0, other0}; + testStaticRuntime( + script, + args, + {}, + /*use_allclose=*/false, + /*use_equalnan=*/false, + /*check_resize=*/true); + + auto self1 = at::randint(1, 100, {22, 22, 22}, at::kInt); + auto other1 = at::randint(1, 100, {22, 22, 22}, at::kInt); + std::vector args2{self1, other1}; + testStaticRuntime( + script, + args, + args2, + /*use_allclose=*/false, + /*use_equalnan=*/false, + /*check_resize=*/true); +} + +TEST(StaticRuntime, autogen_index_copy) { + const std::string script = R"IR( + graph(%self: Tensor, %dim: int, %index: Tensor, %source: Tensor): + %bias: None = prim::Constant() + %ret = aten::index_copy(%self, %dim, %index, %source) + %cloned = aten::clone(%ret, %bias) + return (%cloned) + )IR"; + + auto self0 = at::rand({2}); + auto dim0 = 0; + auto index0 = at::randint(0, 1, {2}, at::kLong); + auto source0 = at::rand({2}); + std::vector args{self0, dim0, index0, source0}; + testStaticRuntime( + script, + args, + {}, + /*use_allclose=*/false, + /*use_equalnan=*/false, + /*check_resize=*/true); + + auto self1 = at::rand({32}); + auto dim1 = 0; + auto index1 = at::randint(0, 10, {32}, at::kLong); + auto source1 = at::rand({32}); + std::vector args2{self1, dim1, index1, source1}; + testStaticRuntime( + script, + args, + args2, + /*use_allclose=*/false, + /*use_equalnan=*/false, + /*check_resize=*/true); +} + +TEST(StaticRuntime, autogen_isin_Tensor_Tensor) { + const std::string script = R"IR( + graph(%elements: Tensor, %test_elements: Tensor, %assume_unique: bool, %invert: bool): + %bias: None = prim::Constant() + %ret = aten::isin(%elements, %test_elements, %assume_unique, %invert) + %cloned = aten::clone(%ret, %bias) + return (%cloned) + )IR"; + + auto elements0 = at::rand({6, 6, 6}); + auto test_elements0 = at::rand({6, 6, 6}); + auto assume_unique0 = false; + auto invert0 = false; + std::vector args{elements0, test_elements0, assume_unique0, invert0}; + testStaticRuntime( + script, + args, + {}, + /*use_allclose=*/false, + /*use_equalnan=*/false, + /*check_resize=*/true); + + auto elements1 = at::rand({22, 22, 22}); + auto test_elements1 = at::rand({22, 22, 22}); + auto assume_unique1 = false; + auto invert1 = false; + std::vector args2{elements1, test_elements1, assume_unique1, invert1}; + testStaticRuntime( + script, + args, + args2, + /*use_allclose=*/false, + /*use_equalnan=*/false, + /*check_resize=*/true); +} + +TEST(StaticRuntime, autogen_isin_Tensor_Scalar) { + const std::string script = R"IR( + graph(%elements: Tensor, %test_element: int, %assume_unique: bool, %invert: bool): + %bias: None = prim::Constant() + %ret = aten::isin(%elements, %test_element, %assume_unique, %invert) + %cloned = aten::clone(%ret, %bias) + return (%cloned) + )IR"; + + auto elements0 = at::rand({6, 6, 6}); + auto test_element0 = 2; + auto assume_unique0 = false; + auto invert0 = false; + std::vector args{elements0, test_element0, assume_unique0, invert0}; + testStaticRuntime( + script, + args, + {}, + /*use_allclose=*/false, + /*use_equalnan=*/false, + /*check_resize=*/true); + + auto elements1 = at::rand({22, 22, 22}); + auto test_element1 = 2; + auto assume_unique1 = false; + auto invert1 = false; + std::vector args2{elements1, test_element1, assume_unique1, invert1}; + testStaticRuntime( + script, + args, + args2, + /*use_allclose=*/false, + /*use_equalnan=*/false, + /*check_resize=*/true); +} + +TEST(StaticRuntime, autogen_isin_Scalar_Tensor) { + const std::string script = R"IR( + graph(%element: int, %test_elements: Tensor, %assume_unique: bool, %invert: bool): + %bias: None = prim::Constant() + %ret = aten::isin(%element, %test_elements, %assume_unique, %invert) + %cloned = aten::clone(%ret, %bias) + return (%cloned) + )IR"; + + auto element0 = 2; + auto test_elements0 = at::rand({6, 6, 6}); + auto assume_unique0 = false; + auto invert0 = false; + std::vector args{element0, test_elements0, assume_unique0, invert0}; + testStaticRuntime( + script, + args, + {}, + /*use_allclose=*/false, + /*use_equalnan=*/false, + /*check_resize=*/false); + + auto element1 = 2; + auto test_elements1 = at::rand({22, 22, 22}); + auto assume_unique1 = false; + auto invert1 = false; + std::vector args2{element1, test_elements1, assume_unique1, invert1}; + testStaticRuntime( + script, + args, + args2, + /*use_allclose=*/false, + /*use_equalnan=*/false, + /*check_resize=*/false); +} + +TEST(StaticRuntime, autogen_log10) { + const std::string script = R"IR( + graph(%self: Tensor): + %bias: None = prim::Constant() + %ret = aten::log10(%self) + %cloned = aten::clone(%ret, %bias) + return (%cloned) + )IR"; + + auto self0 = at::rand({6, 6, 6}); + std::vector args{self0}; + testStaticRuntime( + script, + args, + {}, + /*use_allclose=*/false, + /*use_equalnan=*/false, + /*check_resize=*/true); + + auto self1 = at::rand({22, 22, 22}); + std::vector args2{self1}; + testStaticRuntime( + script, + args, + args2, + /*use_allclose=*/false, + /*use_equalnan=*/false, + /*check_resize=*/true); +} + +TEST(StaticRuntime, autogen_log1p) { + const std::string script = R"IR( + graph(%self: Tensor): + %bias: None = prim::Constant() + %ret = aten::log1p(%self) + %cloned = aten::clone(%ret, %bias) + return (%cloned) + )IR"; + + auto self0 = at::rand({6, 6, 6}); + std::vector args{self0}; + testStaticRuntime( + script, + args, + {}, + /*use_allclose=*/false, + /*use_equalnan=*/false, + /*check_resize=*/true); + + auto self1 = at::rand({22, 22, 22}); + std::vector args2{self1}; + testStaticRuntime( + script, + args, + args2, + /*use_allclose=*/false, + /*use_equalnan=*/false, + /*check_resize=*/true); +} + +TEST(StaticRuntime, autogen_log2) { + const std::string script = R"IR( + graph(%self: Tensor): + %bias: None = prim::Constant() + %ret = aten::log2(%self) + %cloned = aten::clone(%ret, %bias) + return (%cloned) + )IR"; + + auto self0 = at::rand({6, 6, 6}); + std::vector args{self0}; + testStaticRuntime( + script, + args, + {}, + /*use_allclose=*/false, + /*use_equalnan=*/false, + /*check_resize=*/true); + + auto self1 = at::rand({22, 22, 22}); + std::vector args2{self1}; + testStaticRuntime( + script, + args, + args2, + /*use_allclose=*/false, + /*use_equalnan=*/false, + /*check_resize=*/true); +} + +TEST(StaticRuntime, autogen_logaddexp) { + const std::string script = R"IR( + graph(%self: Tensor, %other: Tensor): + %bias: None = prim::Constant() + %ret = aten::logaddexp(%self, %other) + %cloned = aten::clone(%ret, %bias) + return (%cloned) + )IR"; + + auto self0 = at::rand({6, 6, 6}); + auto other0 = at::rand({6, 6, 6}); + std::vector args{self0, other0}; + testStaticRuntime( + script, + args, + {}, + /*use_allclose=*/false, + /*use_equalnan=*/false, + /*check_resize=*/true); + + auto self1 = at::rand({22, 22, 22}); + auto other1 = at::rand({22, 22, 22}); + std::vector args2{self1, other1}; + testStaticRuntime( + script, + args, + args2, + /*use_allclose=*/false, + /*use_equalnan=*/false, + /*check_resize=*/true); +} + +TEST(StaticRuntime, autogen_logaddexp2) { + const std::string script = R"IR( + graph(%self: Tensor, %other: Tensor): + %bias: None = prim::Constant() + %ret = aten::logaddexp2(%self, %other) + %cloned = aten::clone(%ret, %bias) + return (%cloned) + )IR"; + + auto self0 = at::rand({6, 6, 6}); + auto other0 = at::rand({6, 6, 6}); + std::vector args{self0, other0}; + testStaticRuntime( + script, + args, + {}, + /*use_allclose=*/false, + /*use_equalnan=*/false, + /*check_resize=*/true); + + auto self1 = at::rand({22, 22, 22}); + auto other1 = at::rand({22, 22, 22}); + std::vector args2{self1, other1}; + testStaticRuntime( + script, + args, + args2, + /*use_allclose=*/false, + /*use_equalnan=*/false, + /*check_resize=*/true); +} + +TEST(StaticRuntime, autogen_xlogy_Tensor) { + const std::string script = R"IR( + graph(%self: Tensor, %other: Tensor): + %bias: None = prim::Constant() + %ret = aten::xlogy(%self, %other) + %cloned = aten::clone(%ret, %bias) + return (%cloned) + )IR"; + + auto self0 = at::rand({6, 6, 6}); + auto other0 = at::rand({6, 6, 6}); + std::vector args{self0, other0}; + testStaticRuntime( + script, + args, + {}, + /*use_allclose=*/false, + /*use_equalnan=*/false, + /*check_resize=*/true); + + auto self1 = at::rand({22, 22, 22}); + auto other1 = at::rand({22, 22, 22}); + std::vector args2{self1, other1}; + testStaticRuntime( + script, + args, + args2, + /*use_allclose=*/false, + /*use_equalnan=*/false, + /*check_resize=*/true); +} + +TEST(StaticRuntime, autogen__log_softmax) { + const std::string script = R"IR( + graph(%self: Tensor, %dim: int, %half_to_float: bool): + %bias: None = prim::Constant() + %ret = aten::_log_softmax(%self, %dim, %half_to_float) + %cloned = aten::clone(%ret, %bias) + return (%cloned) + )IR"; + + auto self0 = at::rand({6, 6, 6}); + auto dim0 = 1; + auto half_to_float0 = false; + std::vector args{self0, dim0, half_to_float0}; + testStaticRuntime( + script, + args, + {}, + /*use_allclose=*/false, + /*use_equalnan=*/false, + /*check_resize=*/true); + + auto self1 = at::rand({22, 22, 22}); + auto dim1 = 1; + auto half_to_float1 = false; + std::vector args2{self1, dim1, half_to_float1}; + testStaticRuntime( + script, + args, + args2, + /*use_allclose=*/false, + /*use_equalnan=*/false, + /*check_resize=*/true); +} + +TEST(StaticRuntime, autogen__log_softmax_backward_data) { + const std::string script = R"IR( + graph(%grad_output: Tensor, %output: Tensor, %dim: int, %input_dtype: int): + %bias: None = prim::Constant() + %ret = aten::_log_softmax_backward_data(%grad_output, %output, %dim, %input_dtype) + %cloned = aten::clone(%ret, %bias) + return (%cloned) + )IR"; + + auto grad_output0 = at::rand({6, 6, 6}); + auto output0 = at::rand({6, 6, 6}); + auto dim0 = 1; + auto input_dtype0 = at::ScalarType::Float; + std::vector args{grad_output0, output0, dim0, input_dtype0}; + testStaticRuntime( + script, + args, + {}, + /*use_allclose=*/false, + /*use_equalnan=*/false, + /*check_resize=*/true); + + auto grad_output1 = at::rand({22, 22, 22}); + auto output1 = at::rand({22, 22, 22}); + auto dim1 = 1; + auto input_dtype1 = at::ScalarType::Float; + std::vector args2{grad_output1, output1, dim1, input_dtype1}; + testStaticRuntime( + script, + args, + args2, + /*use_allclose=*/false, + /*use_equalnan=*/false, + /*check_resize=*/true); +} + +TEST(StaticRuntime, autogen_mm) { + const std::string script = R"IR( + graph(%self: Tensor, %mat2: Tensor): + %bias: None = prim::Constant() + %ret = aten::mm(%self, %mat2) + %cloned = aten::clone(%ret, %bias) + return (%cloned) + )IR"; + + auto self0 = at::rand({8, 8}); + auto mat20 = at::rand({8, 8}); + std::vector args{self0, mat20}; + testStaticRuntime( + script, + args, + {}, + /*use_allclose=*/false, + /*use_equalnan=*/false, + /*check_resize=*/true); + + auto self1 = at::rand({32, 32}); + auto mat21 = at::rand({32, 32}); + std::vector args2{self1, mat21}; + testStaticRuntime( + script, + args, + args2, + /*use_allclose=*/false, + /*use_equalnan=*/false, + /*check_resize=*/true); +} + +TEST(StaticRuntime, autogen_reciprocal) { + const std::string script = R"IR( + graph(%self: Tensor): + %bias: None = prim::Constant() + %ret = aten::reciprocal(%self) + %cloned = aten::clone(%ret, %bias) + return (%cloned) + )IR"; + + auto self0 = at::rand({6, 6, 6}); + std::vector args{self0}; + testStaticRuntime( + script, + args, + {}, + /*use_allclose=*/false, + /*use_equalnan=*/false, + /*check_resize=*/true); + + auto self1 = at::rand({22, 22, 22}); + std::vector args2{self1}; + testStaticRuntime( + script, + args, + args2, + /*use_allclose=*/false, + /*use_equalnan=*/false, + /*check_resize=*/true); +} + +TEST(StaticRuntime, autogen_neg) { + const std::string script = R"IR( + graph(%self: Tensor): + %bias: None = prim::Constant() + %ret = aten::neg(%self) + %cloned = aten::clone(%ret, %bias) + return (%cloned) + )IR"; + + auto self0 = at::rand({6, 6, 6}); + std::vector args{self0}; + testStaticRuntime( + script, + args, + {}, + /*use_allclose=*/false, + /*use_equalnan=*/false, + /*check_resize=*/true); + + auto self1 = at::rand({22, 22, 22}); + std::vector args2{self1}; + testStaticRuntime( + script, + args, + args2, + /*use_allclose=*/false, + /*use_equalnan=*/false, + /*check_resize=*/true); +} + +TEST(StaticRuntime, autogen_round) { + const std::string script = R"IR( + graph(%self: Tensor): + %bias: None = prim::Constant() + %ret = aten::round(%self) + %cloned = aten::clone(%ret, %bias) + return (%cloned) + )IR"; + + auto self0 = at::rand({6, 6, 6}); + std::vector args{self0}; + testStaticRuntime( + script, + args, + {}, + /*use_allclose=*/false, + /*use_equalnan=*/false, + /*check_resize=*/true); + + auto self1 = at::rand({22, 22, 22}); + std::vector args2{self1}; + testStaticRuntime( + script, + args, + args2, + /*use_allclose=*/false, + /*use_equalnan=*/false, + /*check_resize=*/true); +} + +TEST(StaticRuntime, autogen_round_decimals) { + const std::string script = R"IR( + graph(%self: Tensor, %decimals: int): + %bias: None = prim::Constant() + %ret = aten::round(%self, %decimals) + %cloned = aten::clone(%ret, %bias) + return (%cloned) + )IR"; + + auto self0 = at::rand({6, 6, 6}); + auto decimals0 = 1; + std::vector args{self0, decimals0}; + testStaticRuntime( + script, + args, + {}, + /*use_allclose=*/false, + /*use_equalnan=*/false, + /*check_resize=*/true); + + auto self1 = at::rand({22, 22, 22}); + auto decimals1 = 1; + std::vector args2{self1, decimals1}; + testStaticRuntime( + script, + args, + args2, + /*use_allclose=*/false, + /*use_equalnan=*/false, + /*check_resize=*/true); +} + +TEST(StaticRuntime, autogen_gelu) { + const std::string script = R"IR( + graph(%self: Tensor, %approximate: str): + %bias: None = prim::Constant() + %ret = aten::gelu(%self, %approximate) + %cloned = aten::clone(%ret, %bias) + return (%cloned) + )IR"; + + auto self0 = at::rand({6, 6, 6}); + auto approximate0 = "tanh"; + std::vector args{self0, approximate0}; + testStaticRuntime( + script, + args, + {}, + /*use_allclose=*/false, + /*use_equalnan=*/false, + /*check_resize=*/true); + + auto self1 = at::rand({22, 22, 22}); + auto approximate1 = "tanh"; + std::vector args2{self1, approximate1}; + testStaticRuntime( + script, + args, + args2, + /*use_allclose=*/false, + /*use_equalnan=*/false, + /*check_resize=*/true); +} + +TEST(StaticRuntime, autogen_gelu_backward) { + const std::string script = R"IR( + graph(%grad_output: Tensor, %self: Tensor, %approximate: str): + %bias: None = prim::Constant() + %ret = aten::gelu_backward(%grad_output, %self, %approximate) + %cloned = aten::clone(%ret, %bias) + return (%cloned) + )IR"; + + auto grad_output0 = at::rand({6, 6, 6}); + auto self0 = at::rand({6, 6, 6}); + auto approximate0 = "tanh"; + std::vector args{grad_output0, self0, approximate0}; + testStaticRuntime( + script, + args, + {}, + /*use_allclose=*/false, + /*use_equalnan=*/false, + /*check_resize=*/true); + + auto grad_output1 = at::rand({22, 22, 22}); + auto self1 = at::rand({22, 22, 22}); + auto approximate1 = "tanh"; + std::vector args2{grad_output1, self1, approximate1}; + testStaticRuntime( + script, + args, + args2, + /*use_allclose=*/false, + /*use_equalnan=*/false, + /*check_resize=*/true); +} + +TEST(StaticRuntime, autogen_hardshrink) { + const std::string script = R"IR( + graph(%self: Tensor, %lambd: int): + %bias: None = prim::Constant() + %ret = aten::hardshrink(%self, %lambd) + %cloned = aten::clone(%ret, %bias) + return (%cloned) + )IR"; + + auto self0 = at::rand({6, 6, 6}); + auto lambd0 = 2; + std::vector args{self0, lambd0}; + testStaticRuntime( + script, + args, + {}, + /*use_allclose=*/false, + /*use_equalnan=*/false, + /*check_resize=*/true); + + auto self1 = at::rand({22, 22, 22}); + auto lambd1 = 2; + std::vector args2{self1, lambd1}; + testStaticRuntime( + script, + args, + args2, + /*use_allclose=*/false, + /*use_equalnan=*/false, + /*check_resize=*/true); +} + +TEST(StaticRuntime, autogen_hardshrink_backward) { + const std::string script = R"IR( + graph(%grad_out: Tensor, %self: Tensor, %lambd: int): + %bias: None = prim::Constant() + %ret = aten::hardshrink_backward(%grad_out, %self, %lambd) + %cloned = aten::clone(%ret, %bias) + return (%cloned) + )IR"; + + auto grad_out0 = at::rand({6, 6, 6}); + auto self0 = at::rand({6, 6, 6}); + auto lambd0 = 2; + std::vector args{grad_out0, self0, lambd0}; + testStaticRuntime( + script, + args, + {}, + /*use_allclose=*/false, + /*use_equalnan=*/false, + /*check_resize=*/true); + + auto grad_out1 = at::rand({22, 22, 22}); + auto self1 = at::rand({22, 22, 22}); + auto lambd1 = 2; + std::vector args2{grad_out1, self1, lambd1}; + testStaticRuntime( + script, + args, + args2, + /*use_allclose=*/false, + /*use_equalnan=*/false, + /*check_resize=*/true); +} + +TEST(StaticRuntime, autogen_rsqrt) { + const std::string script = R"IR( + graph(%self: Tensor): + %bias: None = prim::Constant() + %ret = aten::rsqrt(%self) + %cloned = aten::clone(%ret, %bias) + return (%cloned) + )IR"; + + auto self0 = at::rand({6, 6, 6}); + std::vector args{self0}; + testStaticRuntime( + script, + args, + {}, + /*use_allclose=*/false, + /*use_equalnan=*/false, + /*check_resize=*/true); + + auto self1 = at::rand({22, 22, 22}); + std::vector args2{self1}; + testStaticRuntime( + script, + args, + args2, + /*use_allclose=*/false, + /*use_equalnan=*/false, + /*check_resize=*/true); +} + +TEST(StaticRuntime, autogen_silu) { + const std::string script = R"IR( + graph(%self: Tensor): + %bias: None = prim::Constant() + %ret = aten::silu(%self) + %cloned = aten::clone(%ret, %bias) + return (%cloned) + )IR"; + + auto self0 = at::rand({6, 6, 6}); + std::vector args{self0}; + testStaticRuntime( + script, + args, + {}, + /*use_allclose=*/false, + /*use_equalnan=*/false, + /*check_resize=*/true); + + auto self1 = at::rand({22, 22, 22}); + std::vector args2{self1}; + testStaticRuntime( + script, + args, + args2, + /*use_allclose=*/false, + /*use_equalnan=*/false, + /*check_resize=*/true); +} + +TEST(StaticRuntime, autogen_silu_backward) { + const std::string script = R"IR( + graph(%grad_output: Tensor, %self: Tensor): + %bias: None = prim::Constant() + %ret = aten::silu_backward(%grad_output, %self) + %cloned = aten::clone(%ret, %bias) + return (%cloned) + )IR"; + + auto grad_output0 = at::rand({6, 6, 6}); + auto self0 = at::rand({6, 6, 6}); + std::vector args{grad_output0, self0}; + testStaticRuntime( + script, + args, + {}, + /*use_allclose=*/false, + /*use_equalnan=*/false, + /*check_resize=*/true); + + auto grad_output1 = at::rand({22, 22, 22}); + auto self1 = at::rand({22, 22, 22}); + std::vector args2{grad_output1, self1}; + testStaticRuntime( + script, + args, + args2, + /*use_allclose=*/false, + /*use_equalnan=*/false, + /*check_resize=*/true); +} + +TEST(StaticRuntime, autogen_mish) { + const std::string script = R"IR( + graph(%self: Tensor): + %bias: None = prim::Constant() + %ret = aten::mish(%self) + %cloned = aten::clone(%ret, %bias) + return (%cloned) + )IR"; + + auto self0 = at::rand({6, 6, 6}); + std::vector args{self0}; + testStaticRuntime( + script, + args, + {}, + /*use_allclose=*/false, + /*use_equalnan=*/false, + /*check_resize=*/true); + + auto self1 = at::rand({22, 22, 22}); + std::vector args2{self1}; + testStaticRuntime( + script, + args, + args2, + /*use_allclose=*/false, + /*use_equalnan=*/false, + /*check_resize=*/true); +} + +TEST(StaticRuntime, autogen_sin) { + const std::string script = R"IR( + graph(%self: Tensor): + %bias: None = prim::Constant() + %ret = aten::sin(%self) + %cloned = aten::clone(%ret, %bias) + return (%cloned) + )IR"; + + auto self0 = at::rand({6, 6, 6}); + std::vector args{self0}; + testStaticRuntime( + script, + args, + {}, + /*use_allclose=*/false, + /*use_equalnan=*/false, + /*check_resize=*/true); + + auto self1 = at::rand({22, 22, 22}); + std::vector args2{self1}; + testStaticRuntime( + script, + args, + args2, + /*use_allclose=*/false, + /*use_equalnan=*/false, + /*check_resize=*/true); +} + +TEST(StaticRuntime, autogen_sinc) { + const std::string script = R"IR( + graph(%self: Tensor): + %bias: None = prim::Constant() + %ret = aten::sinc(%self) + %cloned = aten::clone(%ret, %bias) + return (%cloned) + )IR"; + + auto self0 = at::rand({6, 6, 6}); + std::vector args{self0}; + testStaticRuntime( + script, + args, + {}, + /*use_allclose=*/false, + /*use_equalnan=*/false, + /*check_resize=*/true); + + auto self1 = at::rand({22, 22, 22}); + std::vector args2{self1}; + testStaticRuntime( + script, + args, + args2, + /*use_allclose=*/false, + /*use_equalnan=*/false, + /*check_resize=*/true); +} + +TEST(StaticRuntime, autogen_sinh) { + const std::string script = R"IR( + graph(%self: Tensor): + %bias: None = prim::Constant() + %ret = aten::sinh(%self) + %cloned = aten::clone(%ret, %bias) + return (%cloned) + )IR"; + + auto self0 = at::rand({6, 6, 6}); + std::vector args{self0}; + testStaticRuntime( + script, + args, + {}, + /*use_allclose=*/false, + /*use_equalnan=*/false, + /*check_resize=*/true); + + auto self1 = at::rand({22, 22, 22}); + std::vector args2{self1}; + testStaticRuntime( + script, + args, + args2, + /*use_allclose=*/false, + /*use_equalnan=*/false, + /*check_resize=*/true); +} + +TEST(StaticRuntime, autogen__softmax) { + const std::string script = R"IR( + graph(%self: Tensor, %dim: int, %half_to_float: bool): + %bias: None = prim::Constant() + %ret = aten::_softmax(%self, %dim, %half_to_float) + %cloned = aten::clone(%ret, %bias) + return (%cloned) + )IR"; + + auto self0 = at::rand({6, 6, 6}); + auto dim0 = 1; + auto half_to_float0 = false; + std::vector args{self0, dim0, half_to_float0}; + testStaticRuntime( + script, + args, + {}, + /*use_allclose=*/false, + /*use_equalnan=*/false, + /*check_resize=*/true); + + auto self1 = at::rand({22, 22, 22}); + auto dim1 = 1; + auto half_to_float1 = false; + std::vector args2{self1, dim1, half_to_float1}; + testStaticRuntime( + script, + args, + args2, + /*use_allclose=*/false, + /*use_equalnan=*/false, + /*check_resize=*/true); +} + +TEST(StaticRuntime, autogen__softmax_backward_data) { + const std::string script = R"IR( + graph(%grad_output: Tensor, %output: Tensor, %dim: int, %input_dtype: int): + %bias: None = prim::Constant() + %ret = aten::_softmax_backward_data(%grad_output, %output, %dim, %input_dtype) + %cloned = aten::clone(%ret, %bias) + return (%cloned) + )IR"; + + auto grad_output0 = at::rand({6, 6, 6}); + auto output0 = at::rand({6, 6, 6}); + auto dim0 = 1; + auto input_dtype0 = at::ScalarType::Float; + std::vector args{grad_output0, output0, dim0, input_dtype0}; + testStaticRuntime( + script, + args, + {}, + /*use_allclose=*/false, + /*use_equalnan=*/false, + /*check_resize=*/true); + + auto grad_output1 = at::rand({22, 22, 22}); + auto output1 = at::rand({22, 22, 22}); + auto dim1 = 1; + auto input_dtype1 = at::ScalarType::Float; + std::vector args2{grad_output1, output1, dim1, input_dtype1}; + testStaticRuntime( + script, + args, + args2, + /*use_allclose=*/false, + /*use_equalnan=*/false, + /*check_resize=*/true); +} + +TEST(StaticRuntime, autogen_sqrt) { + const std::string script = R"IR( + graph(%self: Tensor): + %bias: None = prim::Constant() + %ret = aten::sqrt(%self) + %cloned = aten::clone(%ret, %bias) + return (%cloned) + )IR"; + + auto self0 = at::rand({6, 6, 6}); + std::vector args{self0}; + testStaticRuntime( + script, + args, + {}, + /*use_allclose=*/false, + /*use_equalnan=*/false, + /*check_resize=*/true); + + auto self1 = at::rand({22, 22, 22}); + std::vector args2{self1}; + testStaticRuntime( + script, + args, + args2, + /*use_allclose=*/false, + /*use_equalnan=*/false, + /*check_resize=*/true); +} + +TEST(StaticRuntime, autogen_prod_dim_int) { + const std::string script = R"IR( + graph(%self: Tensor, %dim: int, %keepdim: bool, %dtype: int?): + %bias: None = prim::Constant() + %ret = aten::prod(%self, %dim, %keepdim, %dtype) + %cloned = aten::clone(%ret, %bias) + return (%cloned) + )IR"; + + auto self0 = at::rand({6, 6, 6}); + auto dim0 = 1; + auto keepdim0 = false; + auto dtype0 = at::ScalarType::Float; + std::vector args{self0, dim0, keepdim0, dtype0}; + testStaticRuntime( + script, + args, + {}, + /*use_allclose=*/false, + /*use_equalnan=*/false, + /*check_resize=*/true); + + auto self1 = at::rand({22, 22, 22}); + auto dim1 = 1; + auto keepdim1 = false; + auto dtype1 = at::ScalarType::Float; + std::vector args2{self1, dim1, keepdim1, dtype1}; + testStaticRuntime( + script, + args, + args2, + /*use_allclose=*/false, + /*use_equalnan=*/false, + /*check_resize=*/true); +} + +TEST(StaticRuntime, autogen_tan) { + const std::string script = R"IR( + graph(%self: Tensor): + %bias: None = prim::Constant() + %ret = aten::tan(%self) + %cloned = aten::clone(%ret, %bias) + return (%cloned) + )IR"; + + auto self0 = at::rand({6, 6, 6}); + std::vector args{self0}; + testStaticRuntime( + script, + args, + {}, + /*use_allclose=*/false, + /*use_equalnan=*/false, + /*check_resize=*/true); + + auto self1 = at::rand({22, 22, 22}); + std::vector args2{self1}; + testStaticRuntime( + script, + args, + args2, + /*use_allclose=*/false, + /*use_equalnan=*/false, + /*check_resize=*/true); +} + +TEST(StaticRuntime, autogen_threshold) { + const std::string script = R"IR( + graph(%self: Tensor, %threshold: int, %value: int): + %bias: None = prim::Constant() + %ret = aten::threshold(%self, %threshold, %value) + %cloned = aten::clone(%ret, %bias) + return (%cloned) + )IR"; + + auto self0 = at::rand({6, 6, 6}); + auto threshold0 = 2; + auto value0 = 2; + std::vector args{self0, threshold0, value0}; + testStaticRuntime( + script, + args, + {}, + /*use_allclose=*/false, + /*use_equalnan=*/false, + /*check_resize=*/true); + + auto self1 = at::rand({22, 22, 22}); + auto threshold1 = 2; + auto value1 = 2; + std::vector args2{self1, threshold1, value1}; + testStaticRuntime( + script, + args, + args2, + /*use_allclose=*/false, + /*use_equalnan=*/false, + /*check_resize=*/true); +} + +TEST(StaticRuntime, autogen_threshold_backward) { + const std::string script = R"IR( + graph(%grad_output: Tensor, %self: Tensor, %threshold: int): + %bias: None = prim::Constant() + %ret = aten::threshold_backward(%grad_output, %self, %threshold) + %cloned = aten::clone(%ret, %bias) + return (%cloned) + )IR"; + + auto grad_output0 = at::rand({6, 6, 6}); + auto self0 = at::rand({6, 6, 6}); + auto threshold0 = 2; + std::vector args{grad_output0, self0, threshold0}; + testStaticRuntime( + script, + args, + {}, + /*use_allclose=*/false, + /*use_equalnan=*/false, + /*check_resize=*/true); + + auto grad_output1 = at::rand({22, 22, 22}); + auto self1 = at::rand({22, 22, 22}); + auto threshold1 = 2; + std::vector args2{grad_output1, self1, threshold1}; + testStaticRuntime( + script, + args, + args2, + /*use_allclose=*/false, + /*use_equalnan=*/false, + /*check_resize=*/true); +} + +TEST(StaticRuntime, autogen_trunc) { + const std::string script = R"IR( + graph(%self: Tensor): + %bias: None = prim::Constant() + %ret = aten::trunc(%self) + %cloned = aten::clone(%ret, %bias) + return (%cloned) + )IR"; + + auto self0 = at::rand({6, 6, 6}); + std::vector args{self0}; + testStaticRuntime( + script, + args, + {}, + /*use_allclose=*/false, + /*use_equalnan=*/false, + /*check_resize=*/true); + + auto self1 = at::rand({22, 22, 22}); + std::vector args2{self1}; + testStaticRuntime( + script, + args, + args2, + /*use_allclose=*/false, + /*use_equalnan=*/false, + /*check_resize=*/true); +} + +TEST(StaticRuntime, autogen_heaviside) { + const std::string script = R"IR( + graph(%self: Tensor, %values: Tensor): + %bias: None = prim::Constant() + %ret = aten::heaviside(%self, %values) + %cloned = aten::clone(%ret, %bias) + return (%cloned) + )IR"; + + auto self0 = at::rand({6, 6, 6}); + auto values0 = at::rand({6, 6, 6}); + std::vector args{self0, values0}; + testStaticRuntime( + script, + args, + {}, + /*use_allclose=*/false, + /*use_equalnan=*/false, + /*check_resize=*/true); + + auto self1 = at::rand({22, 22, 22}); + auto values1 = at::rand({22, 22, 22}); + std::vector args2{self1, values1}; + testStaticRuntime( + script, + args, + args2, + /*use_allclose=*/false, + /*use_equalnan=*/false, + /*check_resize=*/true); +} + +TEST(StaticRuntime, autogen_index_add) { + const std::string script = R"IR( + graph(%self: Tensor, %dim: int, %index: Tensor, %source: Tensor, %alpha: int): + %bias: None = prim::Constant() + %ret = aten::index_add(%self, %dim, %index, %source, %alpha) + %cloned = aten::clone(%ret, %bias) + return (%cloned) + )IR"; + + auto self0 = at::rand({2}); + auto dim0 = 0; + auto index0 = at::randint(0, 1, {2}, at::kInt); + auto source0 = at::rand({2}); + auto alpha0 = 2; + std::vector args{self0, dim0, index0, source0, alpha0}; + testStaticRuntime( + script, + args, + {}, + /*use_allclose=*/false, + /*use_equalnan=*/false, + /*check_resize=*/false); + + auto self1 = at::rand({16}); + auto dim1 = 0; + auto index1 = at::randint(0, 10, {16}, at::kInt); + auto source1 = at::rand({16}); + auto alpha1 = 2; + std::vector args2{self1, dim1, index1, source1, alpha1}; + testStaticRuntime( + script, + args, + args2, + /*use_allclose=*/false, + /*use_equalnan=*/false, + /*check_resize=*/false); +} + +TEST(StaticRuntime, autogen_scatter_src) { + const std::string script = R"IR( + graph(%self: Tensor, %dim: int, %index: Tensor, %src: Tensor): + %bias: None = prim::Constant() + %ret = aten::scatter(%self, %dim, %index, %src) + %cloned = aten::clone(%ret, %bias) + return (%cloned) + )IR"; + + auto self0 = at::randint(1, 100, {2, 2, 2}, torch::kInt64); + auto dim0 = 1; + auto index0 = at::randint(0, 1, {2, 2, 2}, torch::kInt64); + auto src0 = at::randint(1, 100, {2, 2, 2}, torch::kInt64); + std::vector args{self0, dim0, index0, src0}; + testStaticRuntime( + script, + args, + {}, + /*use_allclose=*/false, + /*use_equalnan=*/false, + /*check_resize=*/true); + + auto self1 = at::randint(1, 100, {5, 5, 5}, torch::kInt64); + auto dim1 = 1; + auto index1 = at::randint(0, 1, {5, 5, 5}, torch::kInt64); + auto src1 = at::randint(1, 100, {5, 5, 5}, torch::kInt64); + std::vector args2{self1, dim1, index1, src1}; + testStaticRuntime( + script, + args, + args2, + /*use_allclose=*/false, + /*use_equalnan=*/false, + /*check_resize=*/true); +} + +TEST(StaticRuntime, autogen_scatter_value) { + const std::string script = R"IR( + graph(%self: Tensor, %dim: int, %index: Tensor, %value: int): + %bias: None = prim::Constant() + %ret = aten::scatter(%self, %dim, %index, %value) + %cloned = aten::clone(%ret, %bias) + return (%cloned) + )IR"; + + auto self0 = at::randint(1, 100, {2, 2, 2}, torch::kInt64); + auto dim0 = 1; + auto index0 = at::randint(0, 1, {2, 2, 2}, torch::kInt64); + auto value0 = 2; + auto src0 = at::randint(1, 100, {2, 2, 2}, torch::kInt64); + std::vector args{self0, dim0, index0, value0}; + testStaticRuntime( + script, + args, + {}, + /*use_allclose=*/false, + /*use_equalnan=*/false, + /*check_resize=*/true); + + auto self1 = at::randint(1, 100, {5, 5, 5}, torch::kInt64); + auto dim1 = 1; + auto index1 = at::randint(0, 1, {5, 5, 5}, torch::kInt64); + auto value1 = 2; + auto src1 = at::randint(1, 100, {5, 5, 5}, torch::kInt64); + std::vector args2{self1, dim1, index1, value1}; + testStaticRuntime( + script, + args, + args2, + /*use_allclose=*/false, + /*use_equalnan=*/false, + /*check_resize=*/true); +} + +TEST(StaticRuntime, autogen_scatter_reduce) { + const std::string script = R"IR( + graph(%self: Tensor, %dim: int, %index: Tensor, %src: Tensor, %reduce: str): + %bias: None = prim::Constant() + %ret = aten::scatter(%self, %dim, %index, %src, %reduce) + %cloned = aten::clone(%ret, %bias) + return (%cloned) + )IR"; + + auto self0 = at::randint(1, 100, {2, 2, 2}, torch::kInt64); + auto dim0 = 1; + auto index0 = at::randint(0, 1, {2, 2, 2}, torch::kInt64); + auto src0 = at::randint(1, 100, {2, 2, 2}, torch::kInt64); + auto reduce0 = "add"; + std::vector args{self0, dim0, index0, src0, reduce0}; + testStaticRuntime( + script, + args, + {}, + /*use_allclose=*/false, + /*use_equalnan=*/false, + /*check_resize=*/true); + + auto self1 = at::randint(1, 100, {5, 5, 5}, torch::kInt64); + auto dim1 = 1; + auto index1 = at::randint(0, 1, {5, 5, 5}, torch::kInt64); + auto src1 = at::randint(1, 100, {5, 5, 5}, torch::kInt64); + auto reduce1 = "add"; + std::vector args2{self1, dim1, index1, src1, reduce1}; + testStaticRuntime( + script, + args, + args2, + /*use_allclose=*/false, + /*use_equalnan=*/false, + /*check_resize=*/true); +} + +TEST(StaticRuntime, autogen_scatter_value_reduce) { + const std::string script = R"IR( + graph(%self: Tensor, %dim: int, %index: Tensor, %value: int, %reduce: str): + %bias: None = prim::Constant() + %ret = aten::scatter(%self, %dim, %index, %value, %reduce) + %cloned = aten::clone(%ret, %bias) + return (%cloned) + )IR"; + + auto self0 = at::randint(1, 100, {2, 2, 2}, torch::kInt64); + auto dim0 = 1; + auto index0 = at::randint(0, 1, {2, 2, 2}, torch::kInt64); + auto value0 = 2; + auto reduce0 = "add"; + auto src0 = at::randint(1, 100, {2, 2, 2}, torch::kInt64); + std::vector args{self0, dim0, index0, value0, reduce0}; + testStaticRuntime( + script, + args, + {}, + /*use_allclose=*/false, + /*use_equalnan=*/false, + /*check_resize=*/true); + + auto self1 = at::randint(1, 100, {5, 5, 5}, torch::kInt64); + auto dim1 = 1; + auto index1 = at::randint(0, 1, {5, 5, 5}, torch::kInt64); + auto value1 = 2; + auto reduce1 = "add"; + auto src1 = at::randint(1, 100, {5, 5, 5}, torch::kInt64); + std::vector args2{self1, dim1, index1, value1, reduce1}; + testStaticRuntime( + script, + args, + args2, + /*use_allclose=*/false, + /*use_equalnan=*/false, + /*check_resize=*/true); +} + +TEST(StaticRuntime, autogen_scatter_add) { + const std::string script = R"IR( + graph(%self: Tensor, %dim: int, %index: Tensor, %src: Tensor): + %bias: None = prim::Constant() + %ret = aten::scatter_add(%self, %dim, %index, %src) + %cloned = aten::clone(%ret, %bias) + return (%cloned) + )IR"; + + auto self0 = at::randint(1, 100, {2, 2, 2}, torch::kInt64); + auto dim0 = 1; + auto index0 = at::randint(0, 1, {2, 2, 2}, torch::kInt64); + auto src0 = at::randint(1, 100, {2, 2, 2}, torch::kInt64); + std::vector args{self0, dim0, index0, src0}; + testStaticRuntime( + script, + args, + {}, + /*use_allclose=*/false, + /*use_equalnan=*/false, + /*check_resize=*/true); + + auto self1 = at::randint(1, 100, {5, 5, 5}, torch::kInt64); + auto dim1 = 1; + auto index1 = at::randint(0, 1, {5, 5, 5}, torch::kInt64); + auto src1 = at::randint(1, 100, {5, 5, 5}, torch::kInt64); + std::vector args2{self1, dim1, index1, src1}; + testStaticRuntime( + script, + args, + args2, + /*use_allclose=*/false, + /*use_equalnan=*/false, + /*check_resize=*/true); +} + +TEST(StaticRuntime, autogen_eq_Scalar) { + const std::string script = R"IR( + graph(%self: Tensor, %other: int): + %bias: None = prim::Constant() + %ret = aten::eq(%self, %other) + %cloned = aten::clone(%ret, %bias) + return (%cloned) + )IR"; + + auto self0 = at::rand({6, 6, 6}); + auto other0 = 2; + std::vector args{self0, other0}; + testStaticRuntime( + script, + args, + {}, + /*use_allclose=*/false, + /*use_equalnan=*/false, + /*check_resize=*/true); + + auto self1 = at::rand({22, 22, 22}); + auto other1 = 2; + std::vector args2{self1, other1}; + testStaticRuntime( + script, + args, + args2, + /*use_allclose=*/false, + /*use_equalnan=*/false, + /*check_resize=*/true); +} + +TEST(StaticRuntime, autogen_eq_Tensor) { + const std::string script = R"IR( + graph(%self: Tensor, %other: Tensor): + %bias: None = prim::Constant() + %ret = aten::eq(%self, %other) + %cloned = aten::clone(%ret, %bias) + return (%cloned) + )IR"; + + auto self0 = at::rand({6, 6, 6}); + auto other0 = at::rand({6, 6, 6}); + std::vector args{self0, other0}; + testStaticRuntime( + script, + args, + {}, + /*use_allclose=*/false, + /*use_equalnan=*/false, + /*check_resize=*/true); + + auto self1 = at::rand({22, 22, 22}); + auto other1 = at::rand({22, 22, 22}); + std::vector args2{self1, other1}; + testStaticRuntime( + script, + args, + args2, + /*use_allclose=*/false, + /*use_equalnan=*/false, + /*check_resize=*/true); +} + +TEST(StaticRuntime, autogen_bitwise_and_Tensor) { + const std::string script = R"IR( + graph(%self: Tensor, %other: Tensor): + %bias: None = prim::Constant() + %ret = aten::bitwise_and(%self, %other) + %cloned = aten::clone(%ret, %bias) + return (%cloned) + )IR"; + + auto self0 = at::randint(1, 100, {6, 6, 6}, at::kInt); + auto other0 = at::randint(1, 100, {6, 6, 6}, at::kInt); + std::vector args{self0, other0}; + testStaticRuntime( + script, + args, + {}, + /*use_allclose=*/false, + /*use_equalnan=*/false, + /*check_resize=*/true); + + auto self1 = at::randint(1, 100, {22, 22, 22}, at::kInt); + auto other1 = at::randint(1, 100, {22, 22, 22}, at::kInt); + std::vector args2{self1, other1}; + testStaticRuntime( + script, + args, + args2, + /*use_allclose=*/false, + /*use_equalnan=*/false, + /*check_resize=*/true); +} + +TEST(StaticRuntime, autogen_bitwise_or_Tensor) { + const std::string script = R"IR( + graph(%self: Tensor, %other: Tensor): + %bias: None = prim::Constant() + %ret = aten::bitwise_or(%self, %other) + %cloned = aten::clone(%ret, %bias) + return (%cloned) + )IR"; + + auto self0 = at::randint(1, 100, {6, 6, 6}, at::kInt); + auto other0 = at::randint(1, 100, {6, 6, 6}, at::kInt); + std::vector args{self0, other0}; + testStaticRuntime( + script, + args, + {}, + /*use_allclose=*/false, + /*use_equalnan=*/false, + /*check_resize=*/true); + + auto self1 = at::randint(1, 100, {22, 22, 22}, at::kInt); + auto other1 = at::randint(1, 100, {22, 22, 22}, at::kInt); + std::vector args2{self1, other1}; + testStaticRuntime( + script, + args, + args2, + /*use_allclose=*/false, + /*use_equalnan=*/false, + /*check_resize=*/true); +} + +TEST(StaticRuntime, autogen_bitwise_xor_Tensor) { + const std::string script = R"IR( + graph(%self: Tensor, %other: Tensor): + %bias: None = prim::Constant() + %ret = aten::bitwise_xor(%self, %other) + %cloned = aten::clone(%ret, %bias) + return (%cloned) + )IR"; + + auto self0 = at::randint(1, 100, {6, 6, 6}, at::kInt); + auto other0 = at::randint(1, 100, {6, 6, 6}, at::kInt); + std::vector args{self0, other0}; + testStaticRuntime( + script, + args, + {}, + /*use_allclose=*/false, + /*use_equalnan=*/false, + /*check_resize=*/true); + + auto self1 = at::randint(1, 100, {22, 22, 22}, at::kInt); + auto other1 = at::randint(1, 100, {22, 22, 22}, at::kInt); + std::vector args2{self1, other1}; + testStaticRuntime( + script, + args, + args2, + /*use_allclose=*/false, + /*use_equalnan=*/false, + /*check_resize=*/true); +} + +TEST(StaticRuntime, autogen_bitwise_left_shift_Tensor) { + const std::string script = R"IR( + graph(%self: Tensor, %other: Tensor): + %bias: None = prim::Constant() + %ret = aten::bitwise_left_shift(%self, %other) + %cloned = aten::clone(%ret, %bias) + return (%cloned) + )IR"; + + auto self0 = at::rand({6, 6, 6}); + auto other0 = at::rand({6, 6, 6}); + std::vector args{self0, other0}; + testStaticRuntime( + script, + args, + {}, + /*use_allclose=*/false, + /*use_equalnan=*/false, + /*check_resize=*/true); + + auto self1 = at::rand({22, 22, 22}); + auto other1 = at::rand({22, 22, 22}); + std::vector args2{self1, other1}; + testStaticRuntime( + script, + args, + args2, + /*use_allclose=*/false, + /*use_equalnan=*/false, + /*check_resize=*/true); +} + +TEST(StaticRuntime, autogen_bitwise_right_shift_Tensor) { + const std::string script = R"IR( + graph(%self: Tensor, %other: Tensor): + %bias: None = prim::Constant() + %ret = aten::bitwise_right_shift(%self, %other) + %cloned = aten::clone(%ret, %bias) + return (%cloned) + )IR"; + + auto self0 = at::rand({6, 6, 6}); + auto other0 = at::rand({6, 6, 6}); + std::vector args{self0, other0}; + testStaticRuntime( + script, + args, + {}, + /*use_allclose=*/false, + /*use_equalnan=*/false, + /*check_resize=*/true); + + auto self1 = at::rand({22, 22, 22}); + auto other1 = at::rand({22, 22, 22}); + std::vector args2{self1, other1}; + testStaticRuntime( + script, + args, + args2, + /*use_allclose=*/false, + /*use_equalnan=*/false, + /*check_resize=*/true); +} + +TEST(StaticRuntime, autogen_tril) { + const std::string script = R"IR( + graph(%self: Tensor, %diagonal: int): + %bias: None = prim::Constant() + %ret = aten::tril(%self, %diagonal) + %cloned = aten::clone(%ret, %bias) + return (%cloned) + )IR"; + + auto self0 = at::rand({6, 6, 6}); + auto diagonal0 = 1; + std::vector args{self0, diagonal0}; + testStaticRuntime( + script, + args, + {}, + /*use_allclose=*/false, + /*use_equalnan=*/false, + /*check_resize=*/true); + + auto self1 = at::rand({22, 22, 22}); + auto diagonal1 = 1; + std::vector args2{self1, diagonal1}; + testStaticRuntime( + script, + args, + args2, + /*use_allclose=*/false, + /*use_equalnan=*/false, + /*check_resize=*/true); +} + +TEST(StaticRuntime, autogen_triu) { + const std::string script = R"IR( + graph(%self: Tensor, %diagonal: int): + %bias: None = prim::Constant() + %ret = aten::triu(%self, %diagonal) + %cloned = aten::clone(%ret, %bias) + return (%cloned) + )IR"; + + auto self0 = at::rand({6, 6, 6}); + auto diagonal0 = 1; + std::vector args{self0, diagonal0}; + testStaticRuntime( + script, + args, + {}, + /*use_allclose=*/false, + /*use_equalnan=*/false, + /*check_resize=*/true); + + auto self1 = at::rand({22, 22, 22}); + auto diagonal1 = 1; + std::vector args2{self1, diagonal1}; + testStaticRuntime( + script, + args, + args2, + /*use_allclose=*/false, + /*use_equalnan=*/false, + /*check_resize=*/true); +} + +TEST(StaticRuntime, autogen_digamma) { + const std::string script = R"IR( + graph(%self: Tensor): + %bias: None = prim::Constant() + %ret = aten::digamma(%self) + %cloned = aten::clone(%ret, %bias) + return (%cloned) + )IR"; + + auto self0 = at::rand({6, 6, 6}); + std::vector args{self0}; + testStaticRuntime( + script, + args, + {}, + /*use_allclose=*/false, + /*use_equalnan=*/false, + /*check_resize=*/true); + + auto self1 = at::rand({22, 22, 22}); + std::vector args2{self1}; + testStaticRuntime( + script, + args, + args2, + /*use_allclose=*/false, + /*use_equalnan=*/false, + /*check_resize=*/true); +} + +TEST(StaticRuntime, autogen_lerp_Scalar) { + const std::string script = R"IR( + graph(%self: Tensor, %end: Tensor, %weight: int): + %bias: None = prim::Constant() + %ret = aten::lerp(%self, %end, %weight) + %cloned = aten::clone(%ret, %bias) + return (%cloned) + )IR"; + + auto self0 = at::rand({6, 6, 6}); + auto end0 = at::rand({6, 6, 6}); + auto weight0 = 2; + std::vector args{self0, end0, weight0}; + testStaticRuntime( + script, + args, + {}, + /*use_allclose=*/false, + /*use_equalnan=*/false, + /*check_resize=*/true); + + auto self1 = at::rand({22, 22, 22}); + auto end1 = at::rand({22, 22, 22}); + auto weight1 = 2; + std::vector args2{self1, end1, weight1}; + testStaticRuntime( + script, + args, + args2, + /*use_allclose=*/false, + /*use_equalnan=*/false, + /*check_resize=*/true); +} + +TEST(StaticRuntime, autogen_lerp_Tensor) { + const std::string script = R"IR( + graph(%self: Tensor, %end: Tensor, %weight: Tensor): + %bias: None = prim::Constant() + %ret = aten::lerp(%self, %end, %weight) + %cloned = aten::clone(%ret, %bias) + return (%cloned) + )IR"; + + auto self0 = at::rand({6, 6, 6}); + auto end0 = at::rand({6, 6, 6}); + auto weight0 = at::rand({6, 6, 6}); + std::vector args{self0, end0, weight0}; + testStaticRuntime( + script, + args, + {}, + /*use_allclose=*/false, + /*use_equalnan=*/false, + /*check_resize=*/true); + + auto self1 = at::rand({22, 22, 22}); + auto end1 = at::rand({22, 22, 22}); + auto weight1 = at::rand({22, 22, 22}); + std::vector args2{self1, end1, weight1}; + testStaticRuntime( + script, + args, + args2, + /*use_allclose=*/false, + /*use_equalnan=*/false, + /*check_resize=*/true); +} + +TEST(StaticRuntime, autogen_ne_Scalar) { + const std::string script = R"IR( + graph(%self: Tensor, %other: int): + %bias: None = prim::Constant() + %ret = aten::ne(%self, %other) + %cloned = aten::clone(%ret, %bias) + return (%cloned) + )IR"; + + auto self0 = at::rand({6, 6, 6}); + auto other0 = 2; + std::vector args{self0, other0}; + testStaticRuntime( + script, + args, + {}, + /*use_allclose=*/false, + /*use_equalnan=*/false, + /*check_resize=*/true); + + auto self1 = at::rand({22, 22, 22}); + auto other1 = 2; + std::vector args2{self1, other1}; + testStaticRuntime( + script, + args, + args2, + /*use_allclose=*/false, + /*use_equalnan=*/false, + /*check_resize=*/true); +} + +TEST(StaticRuntime, autogen_ne_Tensor) { + const std::string script = R"IR( + graph(%self: Tensor, %other: Tensor): + %bias: None = prim::Constant() + %ret = aten::ne(%self, %other) + %cloned = aten::clone(%ret, %bias) + return (%cloned) + )IR"; + + auto self0 = at::rand({6, 6, 6}); + auto other0 = at::rand({6, 6, 6}); + std::vector args{self0, other0}; + testStaticRuntime( + script, + args, + {}, + /*use_allclose=*/false, + /*use_equalnan=*/false, + /*check_resize=*/true); + + auto self1 = at::rand({22, 22, 22}); + auto other1 = at::rand({22, 22, 22}); + std::vector args2{self1, other1}; + testStaticRuntime( + script, + args, + args2, + /*use_allclose=*/false, + /*use_equalnan=*/false, + /*check_resize=*/true); +} + +TEST(StaticRuntime, autogen_ge_Scalar) { + const std::string script = R"IR( + graph(%self: Tensor, %other: int): + %bias: None = prim::Constant() + %ret = aten::ge(%self, %other) + %cloned = aten::clone(%ret, %bias) + return (%cloned) + )IR"; + + auto self0 = at::rand({6, 6, 6}); + auto other0 = 2; + std::vector args{self0, other0}; + testStaticRuntime( + script, + args, + {}, + /*use_allclose=*/false, + /*use_equalnan=*/false, + /*check_resize=*/true); + + auto self1 = at::rand({22, 22, 22}); + auto other1 = 2; + std::vector args2{self1, other1}; + testStaticRuntime( + script, + args, + args2, + /*use_allclose=*/false, + /*use_equalnan=*/false, + /*check_resize=*/true); +} + +TEST(StaticRuntime, autogen_ge_Tensor) { + const std::string script = R"IR( + graph(%self: Tensor, %other: Tensor): + %bias: None = prim::Constant() + %ret = aten::ge(%self, %other) + %cloned = aten::clone(%ret, %bias) + return (%cloned) + )IR"; + + auto self0 = at::rand({6, 6, 6}); + auto other0 = at::rand({6, 6, 6}); + std::vector args{self0, other0}; + testStaticRuntime( + script, + args, + {}, + /*use_allclose=*/false, + /*use_equalnan=*/false, + /*check_resize=*/true); + + auto self1 = at::rand({22, 22, 22}); + auto other1 = at::rand({22, 22, 22}); + std::vector args2{self1, other1}; + testStaticRuntime( + script, + args, + args2, + /*use_allclose=*/false, + /*use_equalnan=*/false, + /*check_resize=*/true); +} + +TEST(StaticRuntime, autogen_le_Scalar) { + const std::string script = R"IR( + graph(%self: Tensor, %other: int): + %bias: None = prim::Constant() + %ret = aten::le(%self, %other) + %cloned = aten::clone(%ret, %bias) + return (%cloned) + )IR"; + + auto self0 = at::rand({6, 6, 6}); + auto other0 = 2; + std::vector args{self0, other0}; + testStaticRuntime( + script, + args, + {}, + /*use_allclose=*/false, + /*use_equalnan=*/false, + /*check_resize=*/true); + + auto self1 = at::rand({22, 22, 22}); + auto other1 = 2; + std::vector args2{self1, other1}; + testStaticRuntime( + script, + args, + args2, + /*use_allclose=*/false, + /*use_equalnan=*/false, + /*check_resize=*/true); +} + +TEST(StaticRuntime, autogen_le_Tensor) { + const std::string script = R"IR( + graph(%self: Tensor, %other: Tensor): + %bias: None = prim::Constant() + %ret = aten::le(%self, %other) + %cloned = aten::clone(%ret, %bias) + return (%cloned) + )IR"; + + auto self0 = at::rand({6, 6, 6}); + auto other0 = at::rand({6, 6, 6}); + std::vector args{self0, other0}; + testStaticRuntime( + script, + args, + {}, + /*use_allclose=*/false, + /*use_equalnan=*/false, + /*check_resize=*/true); + + auto self1 = at::rand({22, 22, 22}); + auto other1 = at::rand({22, 22, 22}); + std::vector args2{self1, other1}; + testStaticRuntime( + script, + args, + args2, + /*use_allclose=*/false, + /*use_equalnan=*/false, + /*check_resize=*/true); +} + +TEST(StaticRuntime, autogen_gt_Scalar) { + const std::string script = R"IR( + graph(%self: Tensor, %other: int): + %bias: None = prim::Constant() + %ret = aten::gt(%self, %other) + %cloned = aten::clone(%ret, %bias) + return (%cloned) + )IR"; + + auto self0 = at::rand({6, 6, 6}); + auto other0 = 2; + std::vector args{self0, other0}; + testStaticRuntime( + script, + args, + {}, + /*use_allclose=*/false, + /*use_equalnan=*/false, + /*check_resize=*/true); + + auto self1 = at::rand({22, 22, 22}); + auto other1 = 2; + std::vector args2{self1, other1}; + testStaticRuntime( + script, + args, + args2, + /*use_allclose=*/false, + /*use_equalnan=*/false, + /*check_resize=*/true); +} + +TEST(StaticRuntime, autogen_gt_Tensor) { + const std::string script = R"IR( + graph(%self: Tensor, %other: Tensor): + %bias: None = prim::Constant() + %ret = aten::gt(%self, %other) + %cloned = aten::clone(%ret, %bias) + return (%cloned) + )IR"; + + auto self0 = at::rand({6, 6, 6}); + auto other0 = at::rand({6, 6, 6}); + std::vector args{self0, other0}; + testStaticRuntime( + script, + args, + {}, + /*use_allclose=*/false, + /*use_equalnan=*/false, + /*check_resize=*/true); + + auto self1 = at::rand({22, 22, 22}); + auto other1 = at::rand({22, 22, 22}); + std::vector args2{self1, other1}; + testStaticRuntime( + script, + args, + args2, + /*use_allclose=*/false, + /*use_equalnan=*/false, + /*check_resize=*/true); +} + +TEST(StaticRuntime, autogen_lt_Scalar) { + const std::string script = R"IR( + graph(%self: Tensor, %other: int): + %bias: None = prim::Constant() + %ret = aten::lt(%self, %other) + %cloned = aten::clone(%ret, %bias) + return (%cloned) + )IR"; + + auto self0 = at::rand({6, 6, 6}); + auto other0 = 2; + std::vector args{self0, other0}; + testStaticRuntime( + script, + args, + {}, + /*use_allclose=*/false, + /*use_equalnan=*/false, + /*check_resize=*/true); + + auto self1 = at::rand({22, 22, 22}); + auto other1 = 2; + std::vector args2{self1, other1}; + testStaticRuntime( + script, + args, + args2, + /*use_allclose=*/false, + /*use_equalnan=*/false, + /*check_resize=*/true); +} + +TEST(StaticRuntime, autogen_lt_Tensor) { + const std::string script = R"IR( + graph(%self: Tensor, %other: Tensor): + %bias: None = prim::Constant() + %ret = aten::lt(%self, %other) + %cloned = aten::clone(%ret, %bias) + return (%cloned) + )IR"; + + auto self0 = at::rand({6, 6, 6}); + auto other0 = at::rand({6, 6, 6}); + std::vector args{self0, other0}; + testStaticRuntime( + script, + args, + {}, + /*use_allclose=*/false, + /*use_equalnan=*/false, + /*check_resize=*/true); + + auto self1 = at::rand({22, 22, 22}); + auto other1 = at::rand({22, 22, 22}); + std::vector args2{self1, other1}; + testStaticRuntime( + script, + args, + args2, + /*use_allclose=*/false, + /*use_equalnan=*/false, + /*check_resize=*/true); +} + +TEST(StaticRuntime, autogen_gather) { + const std::string script = R"IR( + graph(%self: Tensor, %dim: int, %index: Tensor, %sparse_grad: bool): + %bias: None = prim::Constant() + %ret = aten::gather(%self, %dim, %index, %sparse_grad) + %cloned = aten::clone(%ret, %bias) + return (%cloned) + )IR"; + + auto self0 = at::randint(1, 100, {2, 2, 2}, at::kInt); + auto dim0 = 1; + auto index0 = at::randint(0, 1, {2, 2, 2}, torch::kInt64); + auto sparse_grad0 = false; + std::vector args{self0, dim0, index0, sparse_grad0}; + testStaticRuntime( + script, + args, + {}, + /*use_allclose=*/false, + /*use_equalnan=*/false, + /*check_resize=*/true); + + auto self1 = at::randint(1, 100, {5, 5, 5}, at::kInt); + auto dim1 = 1; + auto index1 = at::randint(0, 4, {5, 5, 5}, torch::kInt64); + auto sparse_grad1 = false; + std::vector args2{self1, dim1, index1, sparse_grad1}; + testStaticRuntime( + script, + args, + args2, + /*use_allclose=*/false, + /*use_equalnan=*/false, + /*check_resize=*/true); +} + +TEST(StaticRuntime, autogen_addcmul) { + const std::string script = R"IR( + graph(%self: Tensor, %tensor1: Tensor, %tensor2: Tensor, %value: int): + %bias: None = prim::Constant() + %ret = aten::addcmul(%self, %tensor1, %tensor2, %value) + %cloned = aten::clone(%ret, %bias) + return (%cloned) + )IR"; + + auto self0 = at::rand({6, 6, 6}); + auto tensor10 = at::rand({6, 6, 6}); + auto tensor20 = at::rand({6, 6, 6}); + auto value0 = 2; + std::vector args{self0, tensor10, tensor20, value0}; + testStaticRuntime( + script, + args, + {}, + /*use_allclose=*/false, + /*use_equalnan=*/false, + /*check_resize=*/true); + + auto self1 = at::rand({22, 22, 22}); + auto tensor11 = at::rand({22, 22, 22}); + auto tensor21 = at::rand({22, 22, 22}); + auto value1 = 2; + std::vector args2{self1, tensor11, tensor21, value1}; + testStaticRuntime( + script, + args, + args2, + /*use_allclose=*/false, + /*use_equalnan=*/false, + /*check_resize=*/true); +} + +TEST(StaticRuntime, autogen_addcdiv) { + const std::string script = R"IR( + graph(%self: Tensor, %tensor1: Tensor, %tensor2: Tensor, %value: int): + %bias: None = prim::Constant() + %ret = aten::addcdiv(%self, %tensor1, %tensor2, %value) + %cloned = aten::clone(%ret, %bias) + return (%cloned) + )IR"; + + auto self0 = at::rand({6, 6, 6}); + auto tensor10 = at::rand({6, 6, 6}); + auto tensor20 = at::rand({6, 6, 6}); + auto value0 = 2; + std::vector args{self0, tensor10, tensor20, value0}; + testStaticRuntime( + script, + args, + {}, + /*use_allclose=*/false, + /*use_equalnan=*/false, + /*check_resize=*/true); + + auto self1 = at::rand({22, 22, 22}); + auto tensor11 = at::rand({22, 22, 22}); + auto tensor21 = at::rand({22, 22, 22}); + auto value1 = 2; + std::vector args2{self1, tensor11, tensor21, value1}; + testStaticRuntime( + script, + args, + args2, + /*use_allclose=*/false, + /*use_equalnan=*/false, + /*check_resize=*/true); +} + +TEST(StaticRuntime, autogen_lgamma) { + const std::string script = R"IR( + graph(%self: Tensor): + %bias: None = prim::Constant() + %ret = aten::lgamma(%self) + %cloned = aten::clone(%ret, %bias) + return (%cloned) + )IR"; + + auto self0 = at::rand({6, 6, 6}); + std::vector args{self0}; + testStaticRuntime( + script, + args, + {}, + /*use_allclose=*/false, + /*use_equalnan=*/false, + /*check_resize=*/true); + + auto self1 = at::rand({22, 22, 22}); + std::vector args2{self1}; + testStaticRuntime( + script, + args, + args2, + /*use_allclose=*/false, + /*use_equalnan=*/false, + /*check_resize=*/true); +} + +TEST(StaticRuntime, autogen_polygamma) { + const std::string script = R"IR( + graph(%n: int, %self: Tensor): + %bias: None = prim::Constant() + %ret = aten::polygamma(%n, %self) + %cloned = aten::clone(%ret, %bias) + return (%cloned) + )IR"; + + auto n0 = 1; + auto self0 = at::rand({6, 6, 6}); + std::vector args{n0, self0}; + testStaticRuntime( + script, + args, + {}, + /*use_allclose=*/false, + /*use_equalnan=*/false, + /*check_resize=*/true); + + auto n1 = 1; + auto self1 = at::rand({22, 22, 22}); + std::vector args2{n1, self1}; + testStaticRuntime( + script, + args, + args2, + /*use_allclose=*/false, + /*use_equalnan=*/false, + /*check_resize=*/true); +} + +TEST(StaticRuntime, autogen_erfinv) { + const std::string script = R"IR( + graph(%self: Tensor): + %bias: None = prim::Constant() + %ret = aten::erfinv(%self) + %cloned = aten::clone(%ret, %bias) + return (%cloned) + )IR"; + + auto self0 = at::rand({6, 6, 6}); + std::vector args{self0}; + testStaticRuntime( + script, + args, + {}, + /*use_allclose=*/false, + /*use_equalnan=*/false, + /*check_resize=*/true); + + auto self1 = at::rand({22, 22, 22}); + std::vector args2{self1}; + testStaticRuntime( + script, + args, + args2, + /*use_allclose=*/false, + /*use_equalnan=*/false, + /*check_resize=*/true); +} + +TEST(StaticRuntime, autogen_i0) { + const std::string script = R"IR( + graph(%self: Tensor): + %bias: None = prim::Constant() + %ret = aten::i0(%self) + %cloned = aten::clone(%ret, %bias) + return (%cloned) + )IR"; + + auto self0 = at::rand({6, 6, 6}); + std::vector args{self0}; + testStaticRuntime( + script, + args, + {}, + /*use_allclose=*/false, + /*use_equalnan=*/false, + /*check_resize=*/true); + + auto self1 = at::rand({22, 22, 22}); + std::vector args2{self1}; + testStaticRuntime( + script, + args, + args2, + /*use_allclose=*/false, + /*use_equalnan=*/false, + /*check_resize=*/true); +} + +TEST(StaticRuntime, autogen_signbit) { + const std::string script = R"IR( + graph(%self: Tensor): + %bias: None = prim::Constant() + %ret = aten::signbit(%self) + %cloned = aten::clone(%ret, %bias) + return (%cloned) + )IR"; + + auto self0 = at::rand({6, 6, 6}); + std::vector args{self0}; + testStaticRuntime( + script, + args, + {}, + /*use_allclose=*/false, + /*use_equalnan=*/false, + /*check_resize=*/true); + + auto self1 = at::rand({22, 22, 22}); + std::vector args2{self1}; + testStaticRuntime( + script, + args, + args2, + /*use_allclose=*/false, + /*use_equalnan=*/false, + /*check_resize=*/true); +} + +TEST(StaticRuntime, autogen_atan2) { + const std::string script = R"IR( + graph(%self: Tensor, %other: Tensor): + %bias: None = prim::Constant() + %ret = aten::atan2(%self, %other) + %cloned = aten::clone(%ret, %bias) + return (%cloned) + )IR"; + + auto self0 = at::rand({6, 6, 6}); + auto other0 = at::rand({6, 6, 6}); + std::vector args{self0, other0}; + testStaticRuntime( + script, + args, + {}, + /*use_allclose=*/false, + /*use_equalnan=*/false, + /*check_resize=*/true); + + auto self1 = at::rand({22, 22, 22}); + auto other1 = at::rand({22, 22, 22}); + std::vector args2{self1, other1}; + testStaticRuntime( + script, + args, + args2, + /*use_allclose=*/false, + /*use_equalnan=*/false, + /*check_resize=*/true); +} + +TEST(StaticRuntime, autogen_hypot) { + const std::string script = R"IR( + graph(%self: Tensor, %other: Tensor): + %bias: None = prim::Constant() + %ret = aten::hypot(%self, %other) + %cloned = aten::clone(%ret, %bias) + return (%cloned) + )IR"; + + auto self0 = at::rand({6, 6, 6}); + auto other0 = at::rand({6, 6, 6}); + std::vector args{self0, other0}; + testStaticRuntime( + script, + args, + {}, + /*use_allclose=*/false, + /*use_equalnan=*/false, + /*check_resize=*/true); + + auto self1 = at::rand({22, 22, 22}); + auto other1 = at::rand({22, 22, 22}); + std::vector args2{self1, other1}; + testStaticRuntime( + script, + args, + args2, + /*use_allclose=*/false, + /*use_equalnan=*/false, + /*check_resize=*/true); +} + +TEST(StaticRuntime, autogen_igamma) { + const std::string script = R"IR( + graph(%self: Tensor, %other: Tensor): + %bias: None = prim::Constant() + %ret = aten::igamma(%self, %other) + %cloned = aten::clone(%ret, %bias) + return (%cloned) + )IR"; + + auto self0 = at::rand({6, 6, 6}); + auto other0 = at::rand({6, 6, 6}); + std::vector args{self0, other0}; + testStaticRuntime( + script, + args, + {}, + /*use_allclose=*/false, + /*use_equalnan=*/false, + /*check_resize=*/true); + + auto self1 = at::rand({22, 22, 22}); + auto other1 = at::rand({22, 22, 22}); + std::vector args2{self1, other1}; + testStaticRuntime( + script, + args, + args2, + /*use_allclose=*/false, + /*use_equalnan=*/false, + /*check_resize=*/true); +} + +TEST(StaticRuntime, autogen_igammac) { + const std::string script = R"IR( + graph(%self: Tensor, %other: Tensor): + %bias: None = prim::Constant() + %ret = aten::igammac(%self, %other) + %cloned = aten::clone(%ret, %bias) + return (%cloned) + )IR"; + + auto self0 = at::rand({6, 6, 6}); + auto other0 = at::rand({6, 6, 6}); + std::vector args{self0, other0}; + testStaticRuntime( + script, + args, + {}, + /*use_allclose=*/false, + /*use_equalnan=*/false, + /*check_resize=*/true); + + auto self1 = at::rand({22, 22, 22}); + auto other1 = at::rand({22, 22, 22}); + std::vector args2{self1, other1}; + testStaticRuntime( + script, + args, + args2, + /*use_allclose=*/false, + /*use_equalnan=*/false, + /*check_resize=*/true); +} + +TEST(StaticRuntime, autogen_nextafter) { + const std::string script = R"IR( + graph(%self: Tensor, %other: Tensor): + %bias: None = prim::Constant() + %ret = aten::nextafter(%self, %other) + %cloned = aten::clone(%ret, %bias) + return (%cloned) + )IR"; + + auto self0 = at::rand({6, 6, 6}); + auto other0 = at::rand({6, 6, 6}); + std::vector args{self0, other0}; + testStaticRuntime( + script, + args, + {}, + /*use_allclose=*/false, + /*use_equalnan=*/false, + /*check_resize=*/true); + + auto self1 = at::rand({22, 22, 22}); + auto other1 = at::rand({22, 22, 22}); + std::vector args2{self1, other1}; + testStaticRuntime( + script, + args, + args2, + /*use_allclose=*/false, + /*use_equalnan=*/false, + /*check_resize=*/true); +} + +TEST(StaticRuntime, autogen_fmin) { + const std::string script = R"IR( + graph(%self: Tensor, %other: Tensor): + %bias: None = prim::Constant() + %ret = aten::fmin(%self, %other) + %cloned = aten::clone(%ret, %bias) + return (%cloned) + )IR"; + + auto self0 = at::rand({6, 6, 6}); + auto other0 = at::rand({6, 6, 6}); + std::vector args{self0, other0}; + testStaticRuntime( + script, + args, + {}, + /*use_allclose=*/false, + /*use_equalnan=*/false, + /*check_resize=*/true); + + auto self1 = at::rand({22, 22, 22}); + auto other1 = at::rand({22, 22, 22}); + std::vector args2{self1, other1}; + testStaticRuntime( + script, + args, + args2, + /*use_allclose=*/false, + /*use_equalnan=*/false, + /*check_resize=*/true); +} + +TEST(StaticRuntime, autogen_fmax) { + const std::string script = R"IR( + graph(%self: Tensor, %other: Tensor): + %bias: None = prim::Constant() + %ret = aten::fmax(%self, %other) + %cloned = aten::clone(%ret, %bias) + return (%cloned) + )IR"; + + auto self0 = at::rand({6, 6, 6}); + auto other0 = at::rand({6, 6, 6}); + std::vector args{self0, other0}; + testStaticRuntime( + script, + args, + {}, + /*use_allclose=*/false, + /*use_equalnan=*/false, + /*check_resize=*/true); + + auto self1 = at::rand({22, 22, 22}); + auto other1 = at::rand({22, 22, 22}); + std::vector args2{self1, other1}; + testStaticRuntime( + script, + args, + args2, + /*use_allclose=*/false, + /*use_equalnan=*/false, + /*check_resize=*/true); +} + +TEST(StaticRuntime, autogen_maximum) { + const std::string script = R"IR( + graph(%self: Tensor, %other: Tensor): + %bias: None = prim::Constant() + %ret = aten::maximum(%self, %other) + %cloned = aten::clone(%ret, %bias) + return (%cloned) + )IR"; + + auto self0 = at::rand({6, 6, 6}); + auto other0 = at::rand({6, 6, 6}); + std::vector args{self0, other0}; + testStaticRuntime( + script, + args, + {}, + /*use_allclose=*/false, + /*use_equalnan=*/false, + /*check_resize=*/true); + + auto self1 = at::rand({22, 22, 22}); + auto other1 = at::rand({22, 22, 22}); + std::vector args2{self1, other1}; + testStaticRuntime( + script, + args, + args2, + /*use_allclose=*/false, + /*use_equalnan=*/false, + /*check_resize=*/true); +} + +TEST(StaticRuntime, autogen_minimum) { + const std::string script = R"IR( + graph(%self: Tensor, %other: Tensor): + %bias: None = prim::Constant() + %ret = aten::minimum(%self, %other) + %cloned = aten::clone(%ret, %bias) + return (%cloned) + )IR"; + + auto self0 = at::rand({6, 6, 6}); + auto other0 = at::rand({6, 6, 6}); + std::vector args{self0, other0}; + testStaticRuntime( + script, + args, + {}, + /*use_allclose=*/false, + /*use_equalnan=*/false, + /*check_resize=*/true); + + auto self1 = at::rand({22, 22, 22}); + auto other1 = at::rand({22, 22, 22}); + std::vector args2{self1, other1}; + testStaticRuntime( + script, + args, + args2, + /*use_allclose=*/false, + /*use_equalnan=*/false, + /*check_resize=*/true); +} + +TEST(StaticRuntime, autogen_renorm) { + const std::string script = R"IR( + graph(%self: Tensor, %p: int, %dim: int, %maxnorm: int): + %bias: None = prim::Constant() + %ret = aten::renorm(%self, %p, %dim, %maxnorm) + %cloned = aten::clone(%ret, %bias) + return (%cloned) + )IR"; + + auto self0 = at::rand({6, 6, 6}); + auto p0 = 2; + auto dim0 = 1; + auto maxnorm0 = 2; + std::vector args{self0, p0, dim0, maxnorm0}; + testStaticRuntime( + script, + args, + {}, + /*use_allclose=*/false, + /*use_equalnan=*/false, + /*check_resize=*/true); + + auto self1 = at::rand({22, 22, 22}); + auto p1 = 2; + auto dim1 = 1; + auto maxnorm1 = 2; + std::vector args2{self1, p1, dim1, maxnorm1}; + testStaticRuntime( + script, + args, + args2, + /*use_allclose=*/false, + /*use_equalnan=*/false, + /*check_resize=*/true); +} + +TEST(StaticRuntime, autogen__convert_indices_from_coo_to_csr) { + const std::string script = R"IR( + graph(%self: Tensor, %size: int, %out_int32: bool): + %bias: None = prim::Constant() + %ret = aten::_convert_indices_from_coo_to_csr(%self, %size, %out_int32) + %cloned = aten::clone(%ret, %bias) + return (%cloned) + )IR"; + + auto self0 = at::randint(0, 3, {2}, at::kInt); + auto size0 = 10; + auto out_int320 = false; + std::vector args{self0, size0, out_int320}; + testStaticRuntime( + script, + args, + {}, + /*use_allclose=*/false, + /*use_equalnan=*/false, + /*check_resize=*/true); + + auto self1 = at::randint(0, 3, {12}, at::kInt); + auto size1 = 24; + auto out_int321 = false; + std::vector args2{self1, size1, out_int321}; + testStaticRuntime( + script, + args, + args2, + /*use_allclose=*/false, + /*use_equalnan=*/false, + /*check_resize=*/true); +} + +TEST(StaticRuntime, autogen__convert_indices_from_csr_to_coo) { + const std::string script = R"IR( + graph(%crow_indices: Tensor, %col_indices: Tensor, %out_int32: bool, %transpose: bool): + %bias: None = prim::Constant() + %ret = aten::_convert_indices_from_csr_to_coo(%crow_indices, %col_indices, %out_int32, %transpose) + %cloned = aten::clone(%ret, %bias) + return (%cloned) + )IR"; + + auto crow_indices0 = torch::tensor({1}, torch::kInt32); + auto col_indices0 = torch::tensor({0, 1, 0}, torch::kInt32); + auto out_int320 = false; + auto transpose0 = false; + std::vector args{crow_indices0, col_indices0, out_int320, transpose0}; + testStaticRuntime( + script, + args, + {}, + /*use_allclose=*/false, + /*use_equalnan=*/false, + /*check_resize=*/true); + + auto crow_indices1 = torch::tensor({0, 1}, torch::kInt32); + auto col_indices1 = torch::tensor({0, 1, 0, 2, 1, 2}, torch::kInt32); + auto out_int321 = false; + auto transpose1 = false; + std::vector args2{ + crow_indices1, col_indices1, out_int321, transpose1}; + testStaticRuntime( + script, + args, + args2, + /*use_allclose=*/false, + /*use_equalnan=*/false, + /*check_resize=*/true); +} + +TEST(StaticRuntime, autogen_nll_loss_backward) { + const std::string script = R"IR( + graph(%grad_output: Tensor, %self: Tensor, %target: Tensor, %weight: Tensor?, %reduction: int, %ignore_index: int, %total_weight: Tensor): + %bias: None = prim::Constant() + %ret = aten::nll_loss_backward(%grad_output, %self, %target, %weight, %reduction, %ignore_index, %total_weight) + %cloned = aten::clone(%ret, %bias) + return (%cloned) + )IR"; + + auto grad_output0 = at::rand({}); + auto self0 = at::rand({6}); + auto target0 = at::randint(0, 5, {6}, torch::kInt64); + auto weight0 = at::rand({6}); + auto reduction0 = 1; + auto ignore_index0 = 1; + auto total_weight0 = at::rand({}); + std::vector args{ + grad_output0, + self0, + target0, + weight0, + reduction0, + ignore_index0, + total_weight0}; + testStaticRuntime( + script, + args, + {}, + /*use_allclose=*/false, + /*use_equalnan=*/false, + /*check_resize=*/true); + + auto grad_output1 = at::rand({}); + auto self1 = at::rand({36}); + auto target1 = at::randint(0, 11, {36}, torch::kInt64); + auto weight1 = at::rand({36}); + auto reduction1 = 1; + auto ignore_index1 = 1; + auto total_weight1 = at::rand({}); + std::vector args2{ + grad_output1, + self1, + target1, + weight1, + reduction1, + ignore_index1, + total_weight1}; + testStaticRuntime( + script, + args, + args2, + /*use_allclose=*/false, + /*use_equalnan=*/false, + /*check_resize=*/true); +} + +TEST(StaticRuntime, autogen_elu) { + const std::string script = R"IR( + graph(%self: Tensor, %alpha: int, %scale: int, %input_scale: int): + %bias: None = prim::Constant() + %ret = aten::elu(%self, %alpha, %scale, %input_scale) + %cloned = aten::clone(%ret, %bias) + return (%cloned) + )IR"; + + auto self0 = at::rand({6, 6, 6}); + auto alpha0 = 2; + auto scale0 = 2; + auto input_scale0 = 2; + std::vector args{self0, alpha0, scale0, input_scale0}; + testStaticRuntime( + script, + args, + {}, + /*use_allclose=*/false, + /*use_equalnan=*/false, + /*check_resize=*/true); + + auto self1 = at::rand({22, 22, 22}); + auto alpha1 = 2; + auto scale1 = 2; + auto input_scale1 = 2; + std::vector args2{self1, alpha1, scale1, input_scale1}; + testStaticRuntime( + script, + args, + args2, + /*use_allclose=*/false, + /*use_equalnan=*/false, + /*check_resize=*/true); +} + +TEST(StaticRuntime, autogen_elu_backward) { + const std::string script = R"IR( + graph(%grad_output: Tensor, %alpha: int, %scale: int, %input_scale: int, %is_result: bool, %self_or_result: Tensor): + %bias: None = prim::Constant() + %ret = aten::elu_backward(%grad_output, %alpha, %scale, %input_scale, %is_result, %self_or_result) + %cloned = aten::clone(%ret, %bias) + return (%cloned) + )IR"; + + auto grad_output0 = at::rand({6, 6, 6}); + auto alpha0 = 2; + auto scale0 = 2; + auto input_scale0 = 2; + auto is_result0 = false; + auto self_or_result0 = at::rand({6, 6, 6}); + std::vector args{ + grad_output0, alpha0, scale0, input_scale0, is_result0, self_or_result0}; + testStaticRuntime( + script, + args, + {}, + /*use_allclose=*/false, + /*use_equalnan=*/false, + /*check_resize=*/true); + + auto grad_output1 = at::rand({22, 22, 22}); + auto alpha1 = 2; + auto scale1 = 2; + auto input_scale1 = 2; + auto is_result1 = false; + auto self_or_result1 = at::rand({22, 22, 22}); + std::vector args2{ + grad_output1, alpha1, scale1, input_scale1, is_result1, self_or_result1}; + testStaticRuntime( + script, + args, + args2, + /*use_allclose=*/false, + /*use_equalnan=*/false, + /*check_resize=*/true); +} + +TEST(StaticRuntime, autogen_glu) { + const std::string script = R"IR( + graph(%self: Tensor, %dim: int): + %bias: None = prim::Constant() + %ret = aten::glu(%self, %dim) + %cloned = aten::clone(%ret, %bias) + return (%cloned) + )IR"; + + auto self0 = at::rand({6, 6, 6}); + auto dim0 = 1; + std::vector args{self0, dim0}; + testStaticRuntime( + script, + args, + {}, + /*use_allclose=*/false, + /*use_equalnan=*/false, + /*check_resize=*/true); + + auto self1 = at::rand({22, 22, 22}); + auto dim1 = 1; + std::vector args2{self1, dim1}; + testStaticRuntime( + script, + args, + args2, + /*use_allclose=*/false, + /*use_equalnan=*/false, + /*check_resize=*/true); +} + +TEST(StaticRuntime, autogen_hardsigmoid) { + const std::string script = R"IR( + graph(%self: Tensor): + %bias: None = prim::Constant() + %ret = aten::hardsigmoid(%self) + %cloned = aten::clone(%ret, %bias) + return (%cloned) + )IR"; + + auto self0 = at::rand({6, 6, 6}); + std::vector args{self0}; + testStaticRuntime( + script, + args, + {}, + /*use_allclose=*/false, + /*use_equalnan=*/false, + /*check_resize=*/true); + + auto self1 = at::rand({22, 22, 22}); + std::vector args2{self1}; + testStaticRuntime( + script, + args, + args2, + /*use_allclose=*/false, + /*use_equalnan=*/false, + /*check_resize=*/true); +} + +TEST(StaticRuntime, autogen_hardsigmoid_backward) { + const std::string script = R"IR( + graph(%grad_output: Tensor, %self: Tensor): + %bias: None = prim::Constant() + %ret = aten::hardsigmoid_backward(%grad_output, %self) + %cloned = aten::clone(%ret, %bias) + return (%cloned) + )IR"; + + auto grad_output0 = at::rand({6, 6, 6}); + auto self0 = at::rand({6, 6, 6}); + std::vector args{grad_output0, self0}; + testStaticRuntime( + script, + args, + {}, + /*use_allclose=*/false, + /*use_equalnan=*/false, + /*check_resize=*/true); + + auto grad_output1 = at::rand({22, 22, 22}); + auto self1 = at::rand({22, 22, 22}); + std::vector args2{grad_output1, self1}; + testStaticRuntime( + script, + args, + args2, + /*use_allclose=*/false, + /*use_equalnan=*/false, + /*check_resize=*/true); +} + +TEST(StaticRuntime, autogen_leaky_relu_backward) { + const std::string script = R"IR( + graph(%grad_output: Tensor, %self: Tensor, %negative_slope: int, %self_is_result: bool): + %bias: None = prim::Constant() + %ret = aten::leaky_relu_backward(%grad_output, %self, %negative_slope, %self_is_result) + %cloned = aten::clone(%ret, %bias) + return (%cloned) + )IR"; + + auto grad_output0 = at::rand({6, 6, 6}); + auto self0 = at::rand({6, 6, 6}); + auto negative_slope0 = 2; + auto self_is_result0 = false; + std::vector args{ + grad_output0, self0, negative_slope0, self_is_result0}; + testStaticRuntime( + script, + args, + {}, + /*use_allclose=*/false, + /*use_equalnan=*/false, + /*check_resize=*/true); + + auto grad_output1 = at::rand({22, 22, 22}); + auto self1 = at::rand({22, 22, 22}); + auto negative_slope1 = 2; + auto self_is_result1 = false; + std::vector args2{ + grad_output1, self1, negative_slope1, self_is_result1}; + testStaticRuntime( + script, + args, + args2, + /*use_allclose=*/false, + /*use_equalnan=*/false, + /*check_resize=*/true); +} + +TEST(StaticRuntime, autogen_softplus) { + const std::string script = R"IR( + graph(%self: Tensor, %beta: int, %threshold: int): + %bias: None = prim::Constant() + %ret = aten::softplus(%self, %beta, %threshold) + %cloned = aten::clone(%ret, %bias) + return (%cloned) + )IR"; + + auto self0 = at::rand({6, 6, 6}); + auto beta0 = 2; + auto threshold0 = 2; + std::vector args{self0, beta0, threshold0}; + testStaticRuntime( + script, + args, + {}, + /*use_allclose=*/false, + /*use_equalnan=*/false, + /*check_resize=*/true); + + auto self1 = at::rand({22, 22, 22}); + auto beta1 = 2; + auto threshold1 = 2; + std::vector args2{self1, beta1, threshold1}; + testStaticRuntime( + script, + args, + args2, + /*use_allclose=*/false, + /*use_equalnan=*/false, + /*check_resize=*/true); +} + +TEST(StaticRuntime, autogen_softplus_backward) { + const std::string script = R"IR( + graph(%grad_output: Tensor, %self: Tensor, %beta: int, %threshold: int): + %bias: None = prim::Constant() + %ret = aten::softplus_backward(%grad_output, %self, %beta, %threshold) + %cloned = aten::clone(%ret, %bias) + return (%cloned) + )IR"; + + auto grad_output0 = at::rand({6, 6, 6}); + auto self0 = at::rand({6, 6, 6}); + auto beta0 = 2; + auto threshold0 = 2; + std::vector args{grad_output0, self0, beta0, threshold0}; + testStaticRuntime( + script, + args, + {}, + /*use_allclose=*/false, + /*use_equalnan=*/false, + /*check_resize=*/true); + + auto grad_output1 = at::rand({22, 22, 22}); + auto self1 = at::rand({22, 22, 22}); + auto beta1 = 2; + auto threshold1 = 2; + std::vector args2{grad_output1, self1, beta1, threshold1}; + testStaticRuntime( + script, + args, + args2, + /*use_allclose=*/false, + /*use_equalnan=*/false, + /*check_resize=*/true); +} + +TEST(StaticRuntime, autogen_softshrink) { + const std::string script = R"IR( + graph(%self: Tensor, %lambd: int): + %bias: None = prim::Constant() + %ret = aten::softshrink(%self, %lambd) + %cloned = aten::clone(%ret, %bias) + return (%cloned) + )IR"; + + auto self0 = at::rand({6, 6, 6}); + auto lambd0 = 2; + std::vector args{self0, lambd0}; + testStaticRuntime( + script, + args, + {}, + /*use_allclose=*/false, + /*use_equalnan=*/false, + /*check_resize=*/true); + + auto self1 = at::rand({22, 22, 22}); + auto lambd1 = 2; + std::vector args2{self1, lambd1}; + testStaticRuntime( + script, + args, + args2, + /*use_allclose=*/false, + /*use_equalnan=*/false, + /*check_resize=*/true); +} + +TEST(StaticRuntime, autogen_softshrink_backward) { + const std::string script = R"IR( + graph(%grad_output: Tensor, %self: Tensor, %lambd: int): + %bias: None = prim::Constant() + %ret = aten::softshrink_backward(%grad_output, %self, %lambd) + %cloned = aten::clone(%ret, %bias) + return (%cloned) + )IR"; + + auto grad_output0 = at::rand({6, 6, 6}); + auto self0 = at::rand({6, 6, 6}); + auto lambd0 = 2; + std::vector args{grad_output0, self0, lambd0}; + testStaticRuntime( + script, + args, + {}, + /*use_allclose=*/false, + /*use_equalnan=*/false, + /*check_resize=*/true); + + auto grad_output1 = at::rand({22, 22, 22}); + auto self1 = at::rand({22, 22, 22}); + auto lambd1 = 2; + std::vector args2{grad_output1, self1, lambd1}; + testStaticRuntime( + script, + args, + args2, + /*use_allclose=*/false, + /*use_equalnan=*/false, + /*check_resize=*/true); +} + +TEST(StaticRuntime, autogen_adaptive_max_pool2d_backward) { + const std::string script = R"IR( + graph(%grad_output: Tensor, %self: Tensor, %indices: Tensor): + %bias: None = prim::Constant() + %ret = aten::adaptive_max_pool2d_backward(%grad_output, %self, %indices) + %cloned = aten::clone(%ret, %bias) + return (%cloned) + )IR"; + + auto grad_output0 = at::randint(-3, 2, {2, 2, 2}); + auto self0 = at::randint(-3, 2, {2, 2, 2}); + auto indices0 = at::randint(0, 1, {2, 2, 2}, at::kLong); + std::vector args{grad_output0, self0, indices0}; + testStaticRuntime( + script, + args, + {}, + /*use_allclose=*/false, + /*use_equalnan=*/false, + /*check_resize=*/true); + + auto grad_output1 = at::randint(-3, 3, {3, 3, 3}); + auto self1 = at::randint(-3, 2, {3, 3, 3}); + auto indices1 = at::randint(0, 1, {3, 3, 3}, at::kLong); + std::vector args2{grad_output1, self1, indices1}; + testStaticRuntime( + script, + args, + args2, + /*use_allclose=*/false, + /*use_equalnan=*/false, + /*check_resize=*/true); +} + +TEST(StaticRuntime, autogen_adaptive_max_pool3d_backward) { + const std::string script = R"IR( + graph(%grad_output: Tensor, %self: Tensor, %indices: Tensor): + %bias: None = prim::Constant() + %ret = aten::adaptive_max_pool3d_backward(%grad_output, %self, %indices) + %cloned = aten::clone(%ret, %bias) + return (%cloned) + )IR"; + + auto grad_output0 = at::randint(-3, 2, {2, 2, 2, 2}); + auto self0 = at::randint(-3, 2, {2, 2, 2, 2}); + auto indices0 = at::randint(0, 1, {2, 2, 2, 2}, at::kLong); + std::vector args{grad_output0, self0, indices0}; + testStaticRuntime( + script, + args, + {}, + /*use_allclose=*/false, + /*use_equalnan=*/false, + /*check_resize=*/true); + + auto grad_output1 = at::randint(-3, 3, {3, 3, 3, 3}); + auto self1 = at::randint(-3, 2, {3, 3, 3, 3}); + auto indices1 = at::randint(0, 1, {3, 3, 3, 3}, at::kLong); + std::vector args2{grad_output1, self1, indices1}; + testStaticRuntime( + script, + args, + args2, + /*use_allclose=*/false, + /*use_equalnan=*/false, + /*check_resize=*/true); +} + +TEST(StaticRuntime, autogen_sigmoid_backward) { + const std::string script = R"IR( + graph(%grad_output: Tensor, %output: Tensor): + %bias: None = prim::Constant() + %ret = aten::sigmoid_backward(%grad_output, %output) + %cloned = aten::clone(%ret, %bias) + return (%cloned) + )IR"; + + auto grad_output0 = at::rand({6, 6, 6}); + auto output0 = at::rand({6, 6, 6}); + std::vector args{grad_output0, output0}; + testStaticRuntime( + script, + args, + {}, + /*use_allclose=*/false, + /*use_equalnan=*/false, + /*check_resize=*/true); + + auto grad_output1 = at::rand({22, 22, 22}); + auto output1 = at::rand({22, 22, 22}); + std::vector args2{grad_output1, output1}; + testStaticRuntime( + script, + args, + args2, + /*use_allclose=*/false, + /*use_equalnan=*/false, + /*check_resize=*/true); +} + +TEST(StaticRuntime, autogen_tanh_backward) { + const std::string script = R"IR( + graph(%grad_output: Tensor, %output: Tensor): + %bias: None = prim::Constant() + %ret = aten::tanh_backward(%grad_output, %output) + %cloned = aten::clone(%ret, %bias) + return (%cloned) + )IR"; + + auto grad_output0 = at::rand({6, 6, 6}); + auto output0 = at::rand({6, 6, 6}); + std::vector args{grad_output0, output0}; + testStaticRuntime( + script, + args, + {}, + /*use_allclose=*/false, + /*use_equalnan=*/false, + /*check_resize=*/true); + + auto grad_output1 = at::rand({22, 22, 22}); + auto output1 = at::rand({22, 22, 22}); + std::vector args2{grad_output1, output1}; + testStaticRuntime( + script, + args, + args2, + /*use_allclose=*/false, + /*use_equalnan=*/false, + /*check_resize=*/true); +} + +TEST(StaticRuntime, autogen_isposinf) { + const std::string script = R"IR( + graph(%self: Tensor): + %bias: None = prim::Constant() + %ret = aten::isposinf(%self) + %cloned = aten::clone(%ret, %bias) + return (%cloned) + )IR"; + + auto self0 = at::rand({6, 6, 6}); + std::vector args{self0}; + testStaticRuntime( + script, + args, + {}, + /*use_allclose=*/false, + /*use_equalnan=*/false, + /*check_resize=*/true); + + auto self1 = at::rand({22, 22, 22}); + std::vector args2{self1}; + testStaticRuntime( + script, + args, + args2, + /*use_allclose=*/false, + /*use_equalnan=*/false, + /*check_resize=*/true); +} + +TEST(StaticRuntime, autogen_isneginf) { + const std::string script = R"IR( + graph(%self: Tensor): + %bias: None = prim::Constant() + %ret = aten::isneginf(%self) + %cloned = aten::clone(%ret, %bias) + return (%cloned) + )IR"; + + auto self0 = at::rand({6, 6, 6}); + std::vector args{self0}; + testStaticRuntime( + script, + args, + {}, + /*use_allclose=*/false, + /*use_equalnan=*/false, + /*check_resize=*/true); + + auto self1 = at::rand({22, 22, 22}); + std::vector args2{self1}; + testStaticRuntime( + script, + args, + args2, + /*use_allclose=*/false, + /*use_equalnan=*/false, + /*check_resize=*/true); +} + +TEST(StaticRuntime, autogen_special_entr) { + const std::string script = R"IR( + graph(%self: Tensor): + %bias: None = prim::Constant() + %ret = aten::special_entr(%self) + %cloned = aten::clone(%ret, %bias) + return (%cloned) + )IR"; + + auto self0 = at::rand({6, 6, 6}); + std::vector args{self0}; + testStaticRuntime( + script, + args, + {}, + /*use_allclose=*/false, + /*use_equalnan=*/false, + /*check_resize=*/true); + + auto self1 = at::rand({22, 22, 22}); + std::vector args2{self1}; + testStaticRuntime( + script, + args, + args2, + /*use_allclose=*/false, + /*use_equalnan=*/false, + /*check_resize=*/true); +} + +TEST(StaticRuntime, autogen_special_ndtri) { + const std::string script = R"IR( + graph(%self: Tensor): + %bias: None = prim::Constant() + %ret = aten::special_ndtri(%self) + %cloned = aten::clone(%ret, %bias) + return (%cloned) + )IR"; + + auto self0 = at::rand({6, 6, 6}); + std::vector args{self0}; + testStaticRuntime( + script, + args, + {}, + /*use_allclose=*/false, + /*use_equalnan=*/false, + /*check_resize=*/true); + + auto self1 = at::rand({22, 22, 22}); + std::vector args2{self1}; + testStaticRuntime( + script, + args, + args2, + /*use_allclose=*/false, + /*use_equalnan=*/false, + /*check_resize=*/true); +} + +TEST(StaticRuntime, autogen_special_erfcx) { + const std::string script = R"IR( + graph(%self: Tensor): + %bias: None = prim::Constant() + %ret = aten::special_erfcx(%self) + %cloned = aten::clone(%ret, %bias) + return (%cloned) + )IR"; + + auto self0 = at::rand({6, 6, 6}); + std::vector args{self0}; + testStaticRuntime( + script, + args, + {}, + /*use_allclose=*/false, + /*use_equalnan=*/false, + /*check_resize=*/true); + + auto self1 = at::rand({22, 22, 22}); + std::vector args2{self1}; + testStaticRuntime( + script, + args, + args2, + /*use_allclose=*/false, + /*use_equalnan=*/false, + /*check_resize=*/true); +} + +TEST(StaticRuntime, autogen_special_xlog1py) { + const std::string script = R"IR( + graph(%self: Tensor, %other: Tensor): + %bias: None = prim::Constant() + %ret = aten::special_xlog1py(%self, %other) + %cloned = aten::clone(%ret, %bias) + return (%cloned) + )IR"; + + auto self0 = at::rand({6, 6, 6}); + auto other0 = at::rand({6, 6, 6}); + std::vector args{self0, other0}; + testStaticRuntime( + script, + args, + {}, + /*use_allclose=*/false, + /*use_equalnan=*/false, + /*check_resize=*/true); + + auto self1 = at::rand({22, 22, 22}); + auto other1 = at::rand({22, 22, 22}); + std::vector args2{self1, other1}; + testStaticRuntime( + script, + args, + args2, + /*use_allclose=*/false, + /*use_equalnan=*/false, + /*check_resize=*/true); +} + +TEST(StaticRuntime, autogen_special_zeta) { + const std::string script = R"IR( + graph(%self: Tensor, %other: Tensor): + %bias: None = prim::Constant() + %ret = aten::special_zeta(%self, %other) + %cloned = aten::clone(%ret, %bias) + return (%cloned) + )IR"; + + auto self0 = at::rand({2, 2, 2}, at::kDouble) + at::ones({2, 2, 2}); + auto other0 = at::rand({2, 2, 2}, at::kDouble) + at::ones({2, 2, 2}); + std::vector args{self0, other0}; + testStaticRuntime( + script, + args, + {}, + /*use_allclose=*/false, + /*use_equalnan=*/false, + /*check_resize=*/true); + + auto self1 = at::rand({5, 5, 5}, at::kDouble) + at::ones({5, 5, 5}); + auto other1 = at::rand({5, 5, 5}, at::kDouble) + at::ones({5, 5, 5}); + std::vector args2{self1, other1}; + testStaticRuntime( + script, + args, + args2, + /*use_allclose=*/false, + /*use_equalnan=*/false, + /*check_resize=*/true); +} + +TEST(StaticRuntime, autogen_special_i0e) { + const std::string script = R"IR( + graph(%self: Tensor): + %bias: None = prim::Constant() + %ret = aten::special_i0e(%self) + %cloned = aten::clone(%ret, %bias) + return (%cloned) + )IR"; + + auto self0 = at::rand({6, 6, 6}); + std::vector args{self0}; + testStaticRuntime( + script, + args, + {}, + /*use_allclose=*/false, + /*use_equalnan=*/false, + /*check_resize=*/true); + + auto self1 = at::rand({22, 22, 22}); + std::vector args2{self1}; + testStaticRuntime( + script, + args, + args2, + /*use_allclose=*/false, + /*use_equalnan=*/false, + /*check_resize=*/true); +} + +TEST(StaticRuntime, autogen_special_i1) { + const std::string script = R"IR( + graph(%self: Tensor): + %bias: None = prim::Constant() + %ret = aten::special_i1(%self) + %cloned = aten::clone(%ret, %bias) + return (%cloned) + )IR"; + + auto self0 = at::rand({6, 6, 6}); + std::vector args{self0}; + testStaticRuntime( + script, + args, + {}, + /*use_allclose=*/false, + /*use_equalnan=*/false, + /*check_resize=*/true); + + auto self1 = at::rand({22, 22, 22}); + std::vector args2{self1}; + testStaticRuntime( + script, + args, + args2, + /*use_allclose=*/false, + /*use_equalnan=*/false, + /*check_resize=*/true); +} + +TEST(StaticRuntime, autogen_special_i1e) { + const std::string script = R"IR( + graph(%self: Tensor): + %bias: None = prim::Constant() + %ret = aten::special_i1e(%self) + %cloned = aten::clone(%ret, %bias) + return (%cloned) + )IR"; + + auto self0 = at::rand({6, 6, 6}); + std::vector args{self0}; + testStaticRuntime( + script, + args, + {}, + /*use_allclose=*/false, + /*use_equalnan=*/false, + /*check_resize=*/true); + + auto self1 = at::rand({22, 22, 22}); + std::vector args2{self1}; + testStaticRuntime( + script, + args, + args2, + /*use_allclose=*/false, + /*use_equalnan=*/false, + /*check_resize=*/true); +} + +TEST(StaticRuntime, autogen_linalg_cross) { + const std::string script = R"IR( + graph(%self: Tensor, %other: Tensor, %dim: int): + %bias: None = prim::Constant() + %ret = aten::linalg_cross(%self, %other, %dim) + %cloned = aten::clone(%ret, %bias) + return (%cloned) + )IR"; + + auto self0 = at::rand({6, 3, 6}); + auto other0 = at::rand({6, 3, 6}); + auto dim0 = 1; + std::vector args{self0, other0, dim0}; + testStaticRuntime( + script, + args, + {}, + /*use_allclose=*/false, + /*use_equalnan=*/false, + /*check_resize=*/true); + + auto self1 = at::rand({22, 3, 22}); + auto other1 = at::rand({22, 3, 22}); + auto dim1 = 1; + std::vector args2{self1, other1, dim1}; + testStaticRuntime( + script, + args, + args2, + /*use_allclose=*/false, + /*use_equalnan=*/false, + /*check_resize=*/true); +} diff --git a/benchmarks/static_runtime/test_static_runtime.cc b/benchmarks/static_runtime/test_static_runtime.cc index 37bf93b645e..b64e3d8d0d6 100644 --- a/benchmarks/static_runtime/test_static_runtime.cc +++ b/benchmarks/static_runtime/test_static_runtime.cc @@ -2160,7 +2160,7 @@ TEST(StaticRuntime, Where) { )JIT"; std::vector args1 = {at::randn({2, 2}), at::randn({2, 2})}; - std::vector args2 = {at::randn({3, 6}), at::randn({3, 6})}; + std::vector args2 = {at::randn({8, 10}), at::randn({8, 10})}; testStaticRuntime(where_script, args1); testStaticRuntime(where_script, args1, args2); diff --git a/tools/build_variables.bzl b/tools/build_variables.bzl index 8a579a22a75..8813544588a 100644 --- a/tools/build_variables.bzl +++ b/tools/build_variables.bzl @@ -373,6 +373,7 @@ core_sources_full_mobile = core_sources_full_mobile_no_backend_interface + [ core_sources_full = core_sources_full_mobile + [ "torch/csrc/jit/runtime/static/fusion.cpp", + "torch/csrc/jit/runtime/static/generated_ops.cpp", "torch/csrc/jit/runtime/static/impl.cpp", "torch/csrc/jit/runtime/static/memory_planner.cpp", "torch/csrc/jit/runtime/static/native_ops.cpp", diff --git a/tools/codegen/static_runtime/config.py b/tools/codegen/static_runtime/config.py index 959c16e8b37..8b5569bbbe2 100644 --- a/tools/codegen/static_runtime/config.py +++ b/tools/codegen/static_runtime/config.py @@ -32,20 +32,6 @@ def override_test_values(arg_map: Dict[str, str], op_name: str, index: int) -> N else: arg_map["self"] = "at::rand({5, 5, 5}) + at::ones({5, 5, 5})" return - if op_name == "index_add": - if index == 0: - arg_map["self"] = "at::rand({2})" - arg_map["dim"] = "0" - arg_map["index"] = "at::randint(0, 1, {2}, at::kInt)" - arg_map["source"] = "at::rand({2})" - arg_map["alpha"] = "2" - else: - arg_map["self"] = "at::rand({16})" - arg_map["dim"] = "0" - arg_map["index"] = "at::randint(0, 10, {16}, at::kInt)" - arg_map["source"] = "at::rand({16})" - arg_map["alpha"] = "2" - return if op_name == "adaptive_max_pool2d_backward": if index == 0: arg_map["grad_output"] = "at::randint(-3, 2, {2,2,2})" @@ -78,6 +64,60 @@ def override_test_values(arg_map: Dict[str, str], op_name: str, index: int) -> N arg_map["index"] = "at::randint(0, 4, {5,5,5}, torch::kInt64)" arg_map["sparse_grad"] = "false" return + if op_name == "gelu": + if index == 0: + arg_map["self"] = "at::rand({6, 6, 6})" + arg_map["approximate"] = "\"tanh\"" + else: + arg_map["self"] = "at::rand({22, 22, 22})" + arg_map["approximate"] = "\"tanh\"" + return + if op_name == "gelu_backward": + if index == 0: + arg_map["grad_output"] = "at::rand({6, 6, 6})" + arg_map["self"] = "at::rand({6, 6, 6})" + arg_map["approximate"] = "\"tanh\"" + else: + arg_map["grad_output"] = "at::rand({22, 22, 22})" + arg_map["self"] = "at::rand({22, 22, 22})" + arg_map["approximate"] = "\"tanh\"" + return + if op_name == "index_add": + if index == 0: + arg_map["self"] = "at::rand({2})" + arg_map["dim"] = "0" + arg_map["index"] = "at::randint(0, 1, {2}, at::kInt)" + arg_map["source"] = "at::rand({2})" + arg_map["alpha"] = "2" + else: + arg_map["self"] = "at::rand({16})" + arg_map["dim"] = "0" + arg_map["index"] = "at::randint(0, 10, {16}, at::kInt)" + arg_map["source"] = "at::rand({16})" + arg_map["alpha"] = "2" + return + if op_name == "index_copy": + if index == 0: + arg_map["self"] = "at::rand({2})" + arg_map["dim"] = "0" + arg_map["index"] = "at::randint(0, 1, {2}, at::kLong)" + arg_map["source"] = "at::rand({2})" + else: + arg_map["self"] = "at::rand({32})" + arg_map["dim"] = "0" + arg_map["index"] = "at::randint(0, 10, {32}, at::kLong)" + arg_map["source"] = "at::rand({32})" + return + if op_name == "linalg_cross": + if index == 0: + arg_map["self"] = "at::rand({6, 3, 6})" + arg_map["other"] = "at::rand({6, 3, 6})" + arg_map["dim"] = "1" + else: + arg_map["self"] = "at::rand({22, 3, 22})" + arg_map["other"] = "at::rand({22, 3, 22})" + arg_map["dim"] = "1" + return if op_name == "nll_loss_backward": if index == 0: arg_map["grad_output"] = "at::rand({})" diff --git a/torch/csrc/jit/runtime/static/generated_ops.cpp b/torch/csrc/jit/runtime/static/generated_ops.cpp new file mode 100644 index 00000000000..2e33c1c41fc --- /dev/null +++ b/torch/csrc/jit/runtime/static/generated_ops.cpp @@ -0,0 +1,2772 @@ +// @lint-ignore-every CLANGTIDY HOWTOEVEN +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace torch { +namespace jit { + +REGISTER_OPERATOR_FUNCTOR(aten::sgn, aten_sgn, [](Node* n) -> SROperator { + if (n->matches(torch::schema("aten::sgn(Tensor self) -> Tensor"))) { + return [](ProcessedNode* p_node) { + const auto& self = p_node->Input(0).toTensor(); + if (p_node->Output(0).isNone()) { + p_node->Output(0) = at::cpu::sgn(self); + return; + } + auto& out = p_node->Output(0).toTensor(); + fastResizeToZero(out); + at::cpu::sgn_out(out, self); + }; + } + LogAndDumpSchema(n); + return nullptr; +}); + +REGISTER_OPERATOR_FUNCTOR(aten::acos, aten_acos, [](Node* n) -> SROperator { + if (n->matches(torch::schema("aten::acos(Tensor self) -> Tensor"))) { + return [](ProcessedNode* p_node) { + const auto& self = p_node->Input(0).toTensor(); + if (p_node->Output(0).isNone()) { + p_node->Output(0) = at::cpu::acos(self); + return; + } + auto& out = p_node->Output(0).toTensor(); + fastResizeToZero(out); + at::cpu::acos_out(out, self); + }; + } + LogAndDumpSchema(n); + return nullptr; +}); + +REGISTER_OPERATOR_FUNCTOR(aten::addmv, aten_addmv, [](Node* n) -> SROperator { + if (n->matches(torch::schema( + "aten::addmv(Tensor self, Tensor mat, Tensor vec, *, Scalar beta=1, Scalar alpha=1) -> Tensor"))) { + return [](ProcessedNode* p_node) { + const auto& self = p_node->Input(0).toTensor(); + const auto& mat = p_node->Input(1).toTensor(); + const auto& vec = p_node->Input(2).toTensor(); + const auto beta = p_node->Input(3).toScalar(); + const auto alpha = p_node->Input(4).toScalar(); + if (p_node->Output(0).isNone()) { + p_node->Output(0) = at::cpu::addmv(self, mat, vec, beta, alpha); + return; + } + auto& out = p_node->Output(0).toTensor(); + fastResizeToZero(out); + at::cpu::addmv_out(out, self, mat, vec, beta, alpha); + }; + } + LogAndDumpSchema(n); + return nullptr; +}); + +REGISTER_OPERATOR_FUNCTOR(aten::argmax, aten_argmax, [](Node* n) -> SROperator { + if (n->matches(torch::schema( + "aten::argmax(Tensor self, int? dim=None, bool keepdim=False) -> Tensor"))) { + return [](ProcessedNode* p_node) { + const auto& self = p_node->Input(0).toTensor(); + const auto dim = p_node->Input(1).toOptional(); + const auto keepdim = p_node->Input(2).toBool(); + if (p_node->Output(0).isNone()) { + p_node->Output(0) = at::cpu::argmax(self, dim, keepdim); + return; + } + auto& out = p_node->Output(0).toTensor(); + fastResizeToZero(out); + at::cpu::argmax_out(out, self, dim, keepdim); + }; + } + LogAndDumpSchema(n); + return nullptr; +}); + +REGISTER_OPERATOR_FUNCTOR(aten::acosh, aten_acosh, [](Node* n) -> SROperator { + if (n->matches(torch::schema("aten::acosh(Tensor self) -> Tensor"))) { + return [](ProcessedNode* p_node) { + const auto& self = p_node->Input(0).toTensor(); + if (p_node->Output(0).isNone()) { + p_node->Output(0) = at::cpu::acosh(self); + return; + } + auto& out = p_node->Output(0).toTensor(); + fastResizeToZero(out); + at::cpu::acosh_out(out, self); + }; + } + LogAndDumpSchema(n); + return nullptr; +}); + +REGISTER_OPERATOR_FUNCTOR(aten::asinh, aten_asinh, [](Node* n) -> SROperator { + if (n->matches(torch::schema("aten::asinh(Tensor self) -> Tensor"))) { + return [](ProcessedNode* p_node) { + const auto& self = p_node->Input(0).toTensor(); + if (p_node->Output(0).isNone()) { + p_node->Output(0) = at::cpu::asinh(self); + return; + } + auto& out = p_node->Output(0).toTensor(); + fastResizeToZero(out); + at::cpu::asinh_out(out, self); + }; + } + LogAndDumpSchema(n); + return nullptr; +}); + +REGISTER_OPERATOR_FUNCTOR(aten::atanh, aten_atanh, [](Node* n) -> SROperator { + if (n->matches(torch::schema("aten::atanh(Tensor self) -> Tensor"))) { + return [](ProcessedNode* p_node) { + const auto& self = p_node->Input(0).toTensor(); + if (p_node->Output(0).isNone()) { + p_node->Output(0) = at::cpu::atanh(self); + return; + } + auto& out = p_node->Output(0).toTensor(); + fastResizeToZero(out); + at::cpu::atanh_out(out, self); + }; + } + LogAndDumpSchema(n); + return nullptr; +}); + +REGISTER_OPERATOR_FUNCTOR(aten::asin, aten_asin, [](Node* n) -> SROperator { + if (n->matches(torch::schema("aten::asin(Tensor self) -> Tensor"))) { + return [](ProcessedNode* p_node) { + const auto& self = p_node->Input(0).toTensor(); + if (p_node->Output(0).isNone()) { + p_node->Output(0) = at::cpu::asin(self); + return; + } + auto& out = p_node->Output(0).toTensor(); + fastResizeToZero(out); + at::cpu::asin_out(out, self); + }; + } + LogAndDumpSchema(n); + return nullptr; +}); + +REGISTER_OPERATOR_FUNCTOR(aten::atan, aten_atan, [](Node* n) -> SROperator { + if (n->matches(torch::schema("aten::atan(Tensor self) -> Tensor"))) { + return [](ProcessedNode* p_node) { + const auto& self = p_node->Input(0).toTensor(); + if (p_node->Output(0).isNone()) { + p_node->Output(0) = at::cpu::atan(self); + return; + } + auto& out = p_node->Output(0).toTensor(); + fastResizeToZero(out); + at::cpu::atan_out(out, self); + }; + } + LogAndDumpSchema(n); + return nullptr; +}); + +REGISTER_OPERATOR_FUNCTOR(aten::baddbmm, aten_baddbmm, [](Node* n) -> SROperator { + if (n->matches(torch::schema( + "aten::baddbmm(Tensor self, Tensor batch1, Tensor batch2, *, Scalar beta=1, Scalar alpha=1) -> Tensor"))) { + return [](ProcessedNode* p_node) { + const auto& self = p_node->Input(0).toTensor(); + const auto& batch1 = p_node->Input(1).toTensor(); + const auto& batch2 = p_node->Input(2).toTensor(); + const auto beta = p_node->Input(3).toScalar(); + const auto alpha = p_node->Input(4).toScalar(); + if (p_node->Output(0).isNone()) { + p_node->Output(0) = at::cpu::baddbmm(self, batch1, batch2, beta, alpha); + return; + } + auto& out = p_node->Output(0).toTensor(); + fastResizeToZero(out); + at::cpu::baddbmm_out(out, self, batch1, batch2, beta, alpha); + }; + } + LogAndDumpSchema(n); + return nullptr; +}); + +REGISTER_OPERATOR_FUNCTOR( + aten::bitwise_not, + aten_bitwise_not, + [](Node* n) -> SROperator { + if (n->matches( + torch::schema("aten::bitwise_not(Tensor self) -> Tensor"))) { + return [](ProcessedNode* p_node) { + const auto& self = p_node->Input(0).toTensor(); + if (p_node->Output(0).isNone()) { + p_node->Output(0) = at::cpu::bitwise_not(self); + return; + } + auto& out = p_node->Output(0).toTensor(); + fastResizeToZero(out); + at::cpu::bitwise_not_out(out, self); + }; + } + LogAndDumpSchema(n); + return nullptr; + }); + +REGISTER_OPERATOR_FUNCTOR( + aten::copysign, + aten_copysign, + [](Node* n) -> SROperator { + if (n->matches(torch::schema( + "aten::copysign.Tensor(Tensor self, Tensor other) -> Tensor"))) { + return [](ProcessedNode* p_node) { + const auto& self = p_node->Input(0).toTensor(); + const auto& other = p_node->Input(1).toTensor(); + if (p_node->Output(0).isNone()) { + p_node->Output(0) = at::cpu::copysign(self, other); + return; + } + auto& out = p_node->Output(0).toTensor(); + fastResizeToZero(out); + at::cpu::copysign_out(out, self, other); + }; + } + LogAndDumpSchema(n); + return nullptr; + }); + +REGISTER_OPERATOR_FUNCTOR(aten::ceil, aten_ceil, [](Node* n) -> SROperator { + if (n->matches(torch::schema("aten::ceil(Tensor self) -> Tensor"))) { + return [](ProcessedNode* p_node) { + const auto& self = p_node->Input(0).toTensor(); + if (p_node->Output(0).isNone()) { + p_node->Output(0) = at::cpu::ceil(self); + return; + } + auto& out = p_node->Output(0).toTensor(); + fastResizeToZero(out); + at::cpu::ceil_out(out, self); + }; + } + LogAndDumpSchema(n); + return nullptr; +}); + +REGISTER_OPERATOR_FUNCTOR(aten::cos, aten_cos, [](Node* n) -> SROperator { + if (n->matches(torch::schema("aten::cos(Tensor self) -> Tensor"))) { + return [](ProcessedNode* p_node) { + const auto& self = p_node->Input(0).toTensor(); + if (p_node->Output(0).isNone()) { + p_node->Output(0) = at::cpu::cos(self); + return; + } + auto& out = p_node->Output(0).toTensor(); + fastResizeToZero(out); + at::cpu::cos_out(out, self); + }; + } + LogAndDumpSchema(n); + return nullptr; +}); + +REGISTER_OPERATOR_FUNCTOR(aten::cosh, aten_cosh, [](Node* n) -> SROperator { + if (n->matches(torch::schema("aten::cosh(Tensor self) -> Tensor"))) { + return [](ProcessedNode* p_node) { + const auto& self = p_node->Input(0).toTensor(); + if (p_node->Output(0).isNone()) { + p_node->Output(0) = at::cpu::cosh(self); + return; + } + auto& out = p_node->Output(0).toTensor(); + fastResizeToZero(out); + at::cpu::cosh_out(out, self); + }; + } + LogAndDumpSchema(n); + return nullptr; +}); + +REGISTER_OPERATOR_FUNCTOR(aten::cumprod, aten_cumprod, [](Node* n) -> SROperator { + if (n->matches(torch::schema( + "aten::cumprod(Tensor self, int dim, *, ScalarType? dtype=None) -> Tensor"))) { + return [](ProcessedNode* p_node) { + const auto& self = p_node->Input(0).toTensor(); + const auto dim = p_node->Input(1).toInt(); + const auto dtype = p_node->Input(2).toOptional(); + if (p_node->Output(0).isNone()) { + p_node->Output(0) = at::cpu::cumprod(self, dim, dtype); + return; + } + auto& out = p_node->Output(0).toTensor(); + fastResizeToZero(out); + at::cpu::cumprod_out(out, self, dim, dtype); + }; + } + LogAndDumpSchema(n); + return nullptr; +}); + +REGISTER_OPERATOR_FUNCTOR(aten::erf, aten_erf, [](Node* n) -> SROperator { + if (n->matches(torch::schema("aten::erf(Tensor self) -> Tensor"))) { + return [](ProcessedNode* p_node) { + const auto& self = p_node->Input(0).toTensor(); + if (p_node->Output(0).isNone()) { + p_node->Output(0) = at::cpu::erf(self); + return; + } + auto& out = p_node->Output(0).toTensor(); + fastResizeToZero(out); + at::cpu::erf_out(out, self); + }; + } + LogAndDumpSchema(n); + return nullptr; +}); + +REGISTER_OPERATOR_FUNCTOR(aten::erfc, aten_erfc, [](Node* n) -> SROperator { + if (n->matches(torch::schema("aten::erfc(Tensor self) -> Tensor"))) { + return [](ProcessedNode* p_node) { + const auto& self = p_node->Input(0).toTensor(); + if (p_node->Output(0).isNone()) { + p_node->Output(0) = at::cpu::erfc(self); + return; + } + auto& out = p_node->Output(0).toTensor(); + fastResizeToZero(out); + at::cpu::erfc_out(out, self); + }; + } + LogAndDumpSchema(n); + return nullptr; +}); + +REGISTER_OPERATOR_FUNCTOR(aten::exp, aten_exp, [](Node* n) -> SROperator { + if (n->matches(torch::schema("aten::exp(Tensor self) -> Tensor"))) { + return [](ProcessedNode* p_node) { + const auto& self = p_node->Input(0).toTensor(); + if (p_node->Output(0).isNone()) { + p_node->Output(0) = at::cpu::exp(self); + return; + } + auto& out = p_node->Output(0).toTensor(); + fastResizeToZero(out); + at::cpu::exp_out(out, self); + }; + } + LogAndDumpSchema(n); + return nullptr; +}); + +REGISTER_OPERATOR_FUNCTOR(aten::exp2, aten_exp2, [](Node* n) -> SROperator { + if (n->matches(torch::schema("aten::exp2(Tensor self) -> Tensor"))) { + return [](ProcessedNode* p_node) { + const auto& self = p_node->Input(0).toTensor(); + if (p_node->Output(0).isNone()) { + p_node->Output(0) = at::cpu::exp2(self); + return; + } + auto& out = p_node->Output(0).toTensor(); + fastResizeToZero(out); + at::cpu::exp2_out(out, self); + }; + } + LogAndDumpSchema(n); + return nullptr; +}); + +REGISTER_OPERATOR_FUNCTOR(aten::expm1, aten_expm1, [](Node* n) -> SROperator { + if (n->matches(torch::schema("aten::expm1(Tensor self) -> Tensor"))) { + return [](ProcessedNode* p_node) { + const auto& self = p_node->Input(0).toTensor(); + if (p_node->Output(0).isNone()) { + p_node->Output(0) = at::cpu::expm1(self); + return; + } + auto& out = p_node->Output(0).toTensor(); + fastResizeToZero(out); + at::cpu::expm1_out(out, self); + }; + } + LogAndDumpSchema(n); + return nullptr; +}); + +REGISTER_OPERATOR_FUNCTOR(aten::floor, aten_floor, [](Node* n) -> SROperator { + if (n->matches(torch::schema("aten::floor(Tensor self) -> Tensor"))) { + return [](ProcessedNode* p_node) { + const auto& self = p_node->Input(0).toTensor(); + if (p_node->Output(0).isNone()) { + p_node->Output(0) = at::cpu::floor(self); + return; + } + auto& out = p_node->Output(0).toTensor(); + fastResizeToZero(out); + at::cpu::floor_out(out, self); + }; + } + LogAndDumpSchema(n); + return nullptr; +}); + +REGISTER_OPERATOR_FUNCTOR(aten::frac, aten_frac, [](Node* n) -> SROperator { + if (n->matches(torch::schema("aten::frac(Tensor self) -> Tensor"))) { + return [](ProcessedNode* p_node) { + const auto& self = p_node->Input(0).toTensor(); + if (p_node->Output(0).isNone()) { + p_node->Output(0) = at::cpu::frac(self); + return; + } + auto& out = p_node->Output(0).toTensor(); + fastResizeToZero(out); + at::cpu::frac_out(out, self); + }; + } + LogAndDumpSchema(n); + return nullptr; +}); + +REGISTER_OPERATOR_FUNCTOR(aten::gcd, aten_gcd, [](Node* n) -> SROperator { + if (n->matches( + torch::schema("aten::gcd(Tensor self, Tensor other) -> Tensor"))) { + return [](ProcessedNode* p_node) { + const auto& self = p_node->Input(0).toTensor(); + const auto& other = p_node->Input(1).toTensor(); + if (p_node->Output(0).isNone()) { + p_node->Output(0) = at::cpu::gcd(self, other); + return; + } + auto& out = p_node->Output(0).toTensor(); + fastResizeToZero(out); + at::cpu::gcd_out(out, self, other); + }; + } + LogAndDumpSchema(n); + return nullptr; +}); + +REGISTER_OPERATOR_FUNCTOR(aten::lcm, aten_lcm, [](Node* n) -> SROperator { + if (n->matches( + torch::schema("aten::lcm(Tensor self, Tensor other) -> Tensor"))) { + return [](ProcessedNode* p_node) { + const auto& self = p_node->Input(0).toTensor(); + const auto& other = p_node->Input(1).toTensor(); + if (p_node->Output(0).isNone()) { + p_node->Output(0) = at::cpu::lcm(self, other); + return; + } + auto& out = p_node->Output(0).toTensor(); + fastResizeToZero(out); + at::cpu::lcm_out(out, self, other); + }; + } + LogAndDumpSchema(n); + return nullptr; +}); + +REGISTER_OPERATOR_FUNCTOR(aten::index_copy, aten_index_copy, [](Node* n) -> SROperator { + if (n->matches(torch::schema( + "aten::index_copy(Tensor self, int dim, Tensor index, Tensor source) -> Tensor"))) { + return [](ProcessedNode* p_node) { + const auto& self = p_node->Input(0).toTensor(); + const auto dim = p_node->Input(1).toInt(); + const auto& index = p_node->Input(2).toTensor(); + const auto& source = p_node->Input(3).toTensor(); + if (p_node->Output(0).isNone()) { + p_node->Output(0) = at::cpu::index_copy(self, dim, index, source); + return; + } + auto& out = p_node->Output(0).toTensor(); + fastResizeToZero(out); + at::cpu::index_copy_out(out, self, dim, index, source); + }; + } + LogAndDumpSchema(n); + return nullptr; +}); + +REGISTER_OPERATOR_FUNCTOR(aten::isin, aten_isin, [](Node* n) -> SROperator { + if (n->matches(torch::schema( + "aten::isin.Tensor_Tensor(Tensor elements, Tensor test_elements, *, bool assume_unique=False, bool invert=False) -> Tensor"))) { + return [](ProcessedNode* p_node) { + const auto& elements = p_node->Input(0).toTensor(); + const auto& test_elements = p_node->Input(1).toTensor(); + const auto assume_unique = p_node->Input(2).toBool(); + const auto invert = p_node->Input(3).toBool(); + if (p_node->Output(0).isNone()) { + p_node->Output(0) = + at::cpu::isin(elements, test_elements, assume_unique, invert); + return; + } + auto& out = p_node->Output(0).toTensor(); + fastResizeToZero(out); + at::cpu::isin_out(out, elements, test_elements, assume_unique, invert); + }; + } + + if (n->matches(torch::schema( + "aten::isin.Tensor_Scalar(Tensor elements, Scalar test_element, *, bool assume_unique=False, bool invert=False) -> Tensor"))) { + return [](ProcessedNode* p_node) { + const auto& elements = p_node->Input(0).toTensor(); + const auto test_element = p_node->Input(1).toScalar(); + const auto assume_unique = p_node->Input(2).toBool(); + const auto invert = p_node->Input(3).toBool(); + if (p_node->Output(0).isNone()) { + p_node->Output(0) = + at::cpu::isin(elements, test_element, assume_unique, invert); + return; + } + auto& out = p_node->Output(0).toTensor(); + fastResizeToZero(out); + at::cpu::isin_out(out, elements, test_element, assume_unique, invert); + }; + } + + if (n->matches(torch::schema( + "aten::isin.Scalar_Tensor(Scalar element, Tensor test_elements, *, bool assume_unique=False, bool invert=False) -> Tensor"))) { + return [](ProcessedNode* p_node) { + const auto element = p_node->Input(0).toScalar(); + const auto& test_elements = p_node->Input(1).toTensor(); + const auto assume_unique = p_node->Input(2).toBool(); + const auto invert = p_node->Input(3).toBool(); + if (p_node->Output(0).isNone()) { + p_node->Output(0) = + at::cpu::isin(element, test_elements, assume_unique, invert); + return; + } + auto& out = p_node->Output(0).toTensor(); + fastResizeToZero(out); + at::cpu::isin_out(out, element, test_elements, assume_unique, invert); + }; + } + LogAndDumpSchema(n); + return nullptr; +}); + +REGISTER_OPERATOR_FUNCTOR(aten::log10, aten_log10, [](Node* n) -> SROperator { + if (n->matches(torch::schema("aten::log10(Tensor self) -> Tensor"))) { + return [](ProcessedNode* p_node) { + const auto& self = p_node->Input(0).toTensor(); + if (p_node->Output(0).isNone()) { + p_node->Output(0) = at::cpu::log10(self); + return; + } + auto& out = p_node->Output(0).toTensor(); + fastResizeToZero(out); + at::cpu::log10_out(out, self); + }; + } + LogAndDumpSchema(n); + return nullptr; +}); + +REGISTER_OPERATOR_FUNCTOR(aten::log1p, aten_log1p, [](Node* n) -> SROperator { + if (n->matches(torch::schema("aten::log1p(Tensor self) -> Tensor"))) { + return [](ProcessedNode* p_node) { + const auto& self = p_node->Input(0).toTensor(); + if (p_node->Output(0).isNone()) { + p_node->Output(0) = at::cpu::log1p(self); + return; + } + auto& out = p_node->Output(0).toTensor(); + fastResizeToZero(out); + at::cpu::log1p_out(out, self); + }; + } + LogAndDumpSchema(n); + return nullptr; +}); + +REGISTER_OPERATOR_FUNCTOR(aten::log2, aten_log2, [](Node* n) -> SROperator { + if (n->matches(torch::schema("aten::log2(Tensor self) -> Tensor"))) { + return [](ProcessedNode* p_node) { + const auto& self = p_node->Input(0).toTensor(); + if (p_node->Output(0).isNone()) { + p_node->Output(0) = at::cpu::log2(self); + return; + } + auto& out = p_node->Output(0).toTensor(); + fastResizeToZero(out); + at::cpu::log2_out(out, self); + }; + } + LogAndDumpSchema(n); + return nullptr; +}); + +REGISTER_OPERATOR_FUNCTOR( + aten::logaddexp, + aten_logaddexp, + [](Node* n) -> SROperator { + if (n->matches(torch::schema( + "aten::logaddexp(Tensor self, Tensor other) -> Tensor"))) { + return [](ProcessedNode* p_node) { + const auto& self = p_node->Input(0).toTensor(); + const auto& other = p_node->Input(1).toTensor(); + if (p_node->Output(0).isNone()) { + p_node->Output(0) = at::cpu::logaddexp(self, other); + return; + } + auto& out = p_node->Output(0).toTensor(); + fastResizeToZero(out); + at::cpu::logaddexp_out(out, self, other); + }; + } + LogAndDumpSchema(n); + return nullptr; + }); + +REGISTER_OPERATOR_FUNCTOR( + aten::logaddexp2, + aten_logaddexp2, + [](Node* n) -> SROperator { + if (n->matches(torch::schema( + "aten::logaddexp2(Tensor self, Tensor other) -> Tensor"))) { + return [](ProcessedNode* p_node) { + const auto& self = p_node->Input(0).toTensor(); + const auto& other = p_node->Input(1).toTensor(); + if (p_node->Output(0).isNone()) { + p_node->Output(0) = at::cpu::logaddexp2(self, other); + return; + } + auto& out = p_node->Output(0).toTensor(); + fastResizeToZero(out); + at::cpu::logaddexp2_out(out, self, other); + }; + } + LogAndDumpSchema(n); + return nullptr; + }); + +REGISTER_OPERATOR_FUNCTOR(aten::xlogy, aten_xlogy, [](Node* n) -> SROperator { + if (n->matches(torch::schema( + "aten::xlogy.Tensor(Tensor self, Tensor other) -> Tensor"))) { + return [](ProcessedNode* p_node) { + const auto& self = p_node->Input(0).toTensor(); + const auto& other = p_node->Input(1).toTensor(); + if (p_node->Output(0).isNone()) { + p_node->Output(0) = at::cpu::xlogy(self, other); + return; + } + auto& out = p_node->Output(0).toTensor(); + fastResizeToZero(out); + at::cpu::xlogy_out(out, self, other); + }; + } + LogAndDumpSchema(n); + return nullptr; +}); + +REGISTER_OPERATOR_FUNCTOR( + aten::_log_softmax, + aten__log_softmax, + [](Node* n) -> SROperator { + if (n->matches(torch::schema( + "aten::_log_softmax(Tensor self, int dim, bool half_to_float) -> Tensor"))) { + return [](ProcessedNode* p_node) { + const auto& self = p_node->Input(0).toTensor(); + const auto dim = p_node->Input(1).toInt(); + const auto half_to_float = p_node->Input(2).toBool(); + if (p_node->Output(0).isNone()) { + p_node->Output(0) = at::cpu::_log_softmax(self, dim, half_to_float); + return; + } + auto& out = p_node->Output(0).toTensor(); + fastResizeToZero(out); + at::cpu::_log_softmax_out(out, self, dim, half_to_float); + }; + } + LogAndDumpSchema(n); + return nullptr; + }); + +REGISTER_OPERATOR_FUNCTOR( + aten::_log_softmax_backward_data, + aten__log_softmax_backward_data, + [](Node* n) -> SROperator { + if (n->matches(torch::schema( + "aten::_log_softmax_backward_data(Tensor grad_output, Tensor output, int dim, ScalarType input_dtype) -> Tensor"))) { + return [](ProcessedNode* p_node) { + const auto& grad_output = p_node->Input(0).toTensor(); + const auto& output = p_node->Input(1).toTensor(); + const auto dim = p_node->Input(2).toInt(); + const auto input_dtype = p_node->Input(3).toScalarType(); + if (p_node->Output(0).isNone()) { + p_node->Output(0) = at::cpu::_log_softmax_backward_data( + grad_output, output, dim, input_dtype); + return; + } + auto& out = p_node->Output(0).toTensor(); + fastResizeToZero(out); + at::cpu::_log_softmax_backward_data_out( + out, grad_output, output, dim, input_dtype); + }; + } + LogAndDumpSchema(n); + return nullptr; + }); + +REGISTER_OPERATOR_FUNCTOR(aten::mm, aten_mm, [](Node* n) -> SROperator { + if (n->matches( + torch::schema("aten::mm(Tensor self, Tensor mat2) -> Tensor"))) { + return [](ProcessedNode* p_node) { + const auto& self = p_node->Input(0).toTensor(); + const auto& mat2 = p_node->Input(1).toTensor(); + if (p_node->Output(0).isNone()) { + p_node->Output(0) = at::cpu::mm(self, mat2); + return; + } + auto& out = p_node->Output(0).toTensor(); + fastResizeToZero(out); + at::cpu::mm_out(out, self, mat2); + }; + } + LogAndDumpSchema(n); + return nullptr; +}); + +REGISTER_OPERATOR_FUNCTOR( + aten::reciprocal, + aten_reciprocal, + [](Node* n) -> SROperator { + if (n->matches( + torch::schema("aten::reciprocal(Tensor self) -> Tensor"))) { + return [](ProcessedNode* p_node) { + const auto& self = p_node->Input(0).toTensor(); + if (p_node->Output(0).isNone()) { + p_node->Output(0) = at::cpu::reciprocal(self); + return; + } + auto& out = p_node->Output(0).toTensor(); + fastResizeToZero(out); + at::cpu::reciprocal_out(out, self); + }; + } + LogAndDumpSchema(n); + return nullptr; + }); + +REGISTER_OPERATOR_FUNCTOR(aten::neg, aten_neg, [](Node* n) -> SROperator { + if (n->matches(torch::schema("aten::neg(Tensor self) -> Tensor"))) { + return [](ProcessedNode* p_node) { + const auto& self = p_node->Input(0).toTensor(); + if (p_node->Output(0).isNone()) { + p_node->Output(0) = at::cpu::neg(self); + return; + } + auto& out = p_node->Output(0).toTensor(); + fastResizeToZero(out); + at::cpu::neg_out(out, self); + }; + } + LogAndDumpSchema(n); + return nullptr; +}); + +REGISTER_OPERATOR_FUNCTOR(aten::round, aten_round, [](Node* n) -> SROperator { + if (n->matches(torch::schema("aten::round(Tensor self) -> Tensor"))) { + return [](ProcessedNode* p_node) { + const auto& self = p_node->Input(0).toTensor(); + if (p_node->Output(0).isNone()) { + p_node->Output(0) = at::cpu::round(self); + return; + } + auto& out = p_node->Output(0).toTensor(); + fastResizeToZero(out); + at::cpu::round_out(out, self); + }; + } + + if (n->matches(torch::schema( + "aten::round.decimals(Tensor self, *, int decimals) -> Tensor"))) { + return [](ProcessedNode* p_node) { + const auto& self = p_node->Input(0).toTensor(); + const auto decimals = p_node->Input(1).toInt(); + if (p_node->Output(0).isNone()) { + p_node->Output(0) = at::cpu::round(self, decimals); + return; + } + auto& out = p_node->Output(0).toTensor(); + fastResizeToZero(out); + at::cpu::round_out(out, self, decimals); + }; + } + LogAndDumpSchema(n); + return nullptr; +}); + +REGISTER_OPERATOR_FUNCTOR(aten::gelu, aten_gelu, [](Node* n) -> SROperator { + if (n->matches(torch::schema( + "aten::gelu(Tensor self, *, str approximate='none') -> Tensor"))) { + return [](ProcessedNode* p_node) { + const auto& self = p_node->Input(0).toTensor(); + const auto approximate = p_node->Input(1).toStringView(); + if (p_node->Output(0).isNone()) { + p_node->Output(0) = at::cpu::gelu(self, approximate); + return; + } + auto& out = p_node->Output(0).toTensor(); + fastResizeToZero(out); + at::cpu::gelu_out(out, self, approximate); + }; + } + LogAndDumpSchema(n); + return nullptr; +}); + +REGISTER_OPERATOR_FUNCTOR( + aten::gelu_backward, + aten_gelu_backward, + [](Node* n) -> SROperator { + if (n->matches(torch::schema( + "aten::gelu_backward(Tensor grad_output, Tensor self, *, str approximate='none') -> Tensor"))) { + return [](ProcessedNode* p_node) { + const auto& grad_output = p_node->Input(0).toTensor(); + const auto& self = p_node->Input(1).toTensor(); + const auto approximate = p_node->Input(2).toStringView(); + if (p_node->Output(0).isNone()) { + p_node->Output(0) = + at::cpu::gelu_backward(grad_output, self, approximate); + return; + } + auto& grad_input = p_node->Output(0).toTensor(); + fastResizeToZero(grad_input); + at::cpu::gelu_backward_out( + grad_input, grad_output, self, approximate); + }; + } + LogAndDumpSchema(n); + return nullptr; + }); + +REGISTER_OPERATOR_FUNCTOR( + aten::hardshrink, + aten_hardshrink, + [](Node* n) -> SROperator { + if (n->matches(torch::schema( + "aten::hardshrink(Tensor self, Scalar lambd=0.5) -> Tensor"))) { + return [](ProcessedNode* p_node) { + const auto& self = p_node->Input(0).toTensor(); + const auto lambd = p_node->Input(1).toScalar(); + if (p_node->Output(0).isNone()) { + p_node->Output(0) = at::cpu::hardshrink(self, lambd); + return; + } + auto& out = p_node->Output(0).toTensor(); + fastResizeToZero(out); + at::cpu::hardshrink_out(out, self, lambd); + }; + } + LogAndDumpSchema(n); + return nullptr; + }); + +REGISTER_OPERATOR_FUNCTOR( + aten::hardshrink_backward, + aten_hardshrink_backward, + [](Node* n) -> SROperator { + if (n->matches(torch::schema( + "aten::hardshrink_backward(Tensor grad_out, Tensor self, Scalar lambd) -> Tensor"))) { + return [](ProcessedNode* p_node) { + const auto& grad_out = p_node->Input(0).toTensor(); + const auto& self = p_node->Input(1).toTensor(); + const auto lambd = p_node->Input(2).toScalar(); + if (p_node->Output(0).isNone()) { + p_node->Output(0) = + at::cpu::hardshrink_backward(grad_out, self, lambd); + return; + } + auto& grad_input = p_node->Output(0).toTensor(); + fastResizeToZero(grad_input); + at::cpu::hardshrink_backward_out(grad_input, grad_out, self, lambd); + }; + } + LogAndDumpSchema(n); + return nullptr; + }); + +REGISTER_OPERATOR_FUNCTOR(aten::rsqrt, aten_rsqrt, [](Node* n) -> SROperator { + if (n->matches(torch::schema("aten::rsqrt(Tensor self) -> Tensor"))) { + return [](ProcessedNode* p_node) { + const auto& self = p_node->Input(0).toTensor(); + if (p_node->Output(0).isNone()) { + p_node->Output(0) = at::cpu::rsqrt(self); + return; + } + auto& out = p_node->Output(0).toTensor(); + fastResizeToZero(out); + at::cpu::rsqrt_out(out, self); + }; + } + LogAndDumpSchema(n); + return nullptr; +}); + +REGISTER_OPERATOR_FUNCTOR(aten::silu, aten_silu, [](Node* n) -> SROperator { + if (n->matches(torch::schema("aten::silu(Tensor self) -> Tensor"))) { + return [](ProcessedNode* p_node) { + const auto& self = p_node->Input(0).toTensor(); + if (p_node->Output(0).isNone()) { + p_node->Output(0) = at::cpu::silu(self); + return; + } + auto& out = p_node->Output(0).toTensor(); + fastResizeToZero(out); + at::cpu::silu_out(out, self); + }; + } + LogAndDumpSchema(n); + return nullptr; +}); + +REGISTER_OPERATOR_FUNCTOR( + aten::silu_backward, + aten_silu_backward, + [](Node* n) -> SROperator { + if (n->matches(torch::schema( + "aten::silu_backward(Tensor grad_output, Tensor self) -> Tensor"))) { + return [](ProcessedNode* p_node) { + const auto& grad_output = p_node->Input(0).toTensor(); + const auto& self = p_node->Input(1).toTensor(); + if (p_node->Output(0).isNone()) { + p_node->Output(0) = at::cpu::silu_backward(grad_output, self); + return; + } + auto& grad_input = p_node->Output(0).toTensor(); + fastResizeToZero(grad_input); + at::cpu::silu_backward_out(grad_input, grad_output, self); + }; + } + LogAndDumpSchema(n); + return nullptr; + }); + +REGISTER_OPERATOR_FUNCTOR(aten::mish, aten_mish, [](Node* n) -> SROperator { + if (n->matches(torch::schema("aten::mish(Tensor self) -> Tensor"))) { + return [](ProcessedNode* p_node) { + const auto& self = p_node->Input(0).toTensor(); + if (p_node->Output(0).isNone()) { + p_node->Output(0) = at::cpu::mish(self); + return; + } + auto& out = p_node->Output(0).toTensor(); + fastResizeToZero(out); + at::cpu::mish_out(out, self); + }; + } + LogAndDumpSchema(n); + return nullptr; +}); + +REGISTER_OPERATOR_FUNCTOR(aten::sin, aten_sin, [](Node* n) -> SROperator { + if (n->matches(torch::schema("aten::sin(Tensor self) -> Tensor"))) { + return [](ProcessedNode* p_node) { + const auto& self = p_node->Input(0).toTensor(); + if (p_node->Output(0).isNone()) { + p_node->Output(0) = at::cpu::sin(self); + return; + } + auto& out = p_node->Output(0).toTensor(); + fastResizeToZero(out); + at::cpu::sin_out(out, self); + }; + } + LogAndDumpSchema(n); + return nullptr; +}); + +REGISTER_OPERATOR_FUNCTOR(aten::sinc, aten_sinc, [](Node* n) -> SROperator { + if (n->matches(torch::schema("aten::sinc(Tensor self) -> Tensor"))) { + return [](ProcessedNode* p_node) { + const auto& self = p_node->Input(0).toTensor(); + if (p_node->Output(0).isNone()) { + p_node->Output(0) = at::cpu::sinc(self); + return; + } + auto& out = p_node->Output(0).toTensor(); + fastResizeToZero(out); + at::cpu::sinc_out(out, self); + }; + } + LogAndDumpSchema(n); + return nullptr; +}); + +REGISTER_OPERATOR_FUNCTOR(aten::sinh, aten_sinh, [](Node* n) -> SROperator { + if (n->matches(torch::schema("aten::sinh(Tensor self) -> Tensor"))) { + return [](ProcessedNode* p_node) { + const auto& self = p_node->Input(0).toTensor(); + if (p_node->Output(0).isNone()) { + p_node->Output(0) = at::cpu::sinh(self); + return; + } + auto& out = p_node->Output(0).toTensor(); + fastResizeToZero(out); + at::cpu::sinh_out(out, self); + }; + } + LogAndDumpSchema(n); + return nullptr; +}); + +REGISTER_OPERATOR_FUNCTOR(aten::_softmax, aten__softmax, [](Node* n) -> SROperator { + if (n->matches(torch::schema( + "aten::_softmax(Tensor self, int dim, bool half_to_float) -> Tensor"))) { + return [](ProcessedNode* p_node) { + const auto& self = p_node->Input(0).toTensor(); + const auto dim = p_node->Input(1).toInt(); + const auto half_to_float = p_node->Input(2).toBool(); + if (p_node->Output(0).isNone()) { + p_node->Output(0) = at::cpu::_softmax(self, dim, half_to_float); + return; + } + auto& out = p_node->Output(0).toTensor(); + fastResizeToZero(out); + at::cpu::_softmax_out(out, self, dim, half_to_float); + }; + } + LogAndDumpSchema(n); + return nullptr; +}); + +REGISTER_OPERATOR_FUNCTOR( + aten::_softmax_backward_data, + aten__softmax_backward_data, + [](Node* n) -> SROperator { + if (n->matches(torch::schema( + "aten::_softmax_backward_data(Tensor grad_output, Tensor output, int dim, ScalarType input_dtype) -> Tensor"))) { + return [](ProcessedNode* p_node) { + const auto& grad_output = p_node->Input(0).toTensor(); + const auto& output = p_node->Input(1).toTensor(); + const auto dim = p_node->Input(2).toInt(); + const auto input_dtype = p_node->Input(3).toScalarType(); + if (p_node->Output(0).isNone()) { + p_node->Output(0) = at::cpu::_softmax_backward_data( + grad_output, output, dim, input_dtype); + return; + } + auto& grad_input = p_node->Output(0).toTensor(); + fastResizeToZero(grad_input); + at::cpu::_softmax_backward_data_out( + grad_input, grad_output, output, dim, input_dtype); + }; + } + LogAndDumpSchema(n); + return nullptr; + }); + +REGISTER_OPERATOR_FUNCTOR(aten::sqrt, aten_sqrt, [](Node* n) -> SROperator { + if (n->matches(torch::schema("aten::sqrt(Tensor self) -> Tensor"))) { + return [](ProcessedNode* p_node) { + const auto& self = p_node->Input(0).toTensor(); + if (p_node->Output(0).isNone()) { + p_node->Output(0) = at::cpu::sqrt(self); + return; + } + auto& out = p_node->Output(0).toTensor(); + fastResizeToZero(out); + at::cpu::sqrt_out(out, self); + }; + } + LogAndDumpSchema(n); + return nullptr; +}); + +REGISTER_OPERATOR_FUNCTOR(aten::prod, aten_prod, [](Node* n) -> SROperator { + if (n->matches(torch::schema( + "aten::prod.dim_int(Tensor self, int dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor"))) { + return [](ProcessedNode* p_node) { + const auto& self = p_node->Input(0).toTensor(); + const auto dim = p_node->Input(1).toInt(); + const auto keepdim = p_node->Input(2).toBool(); + const auto dtype = p_node->Input(3).toOptional(); + if (p_node->Output(0).isNone()) { + p_node->Output(0) = at::cpu::prod(self, dim, keepdim, dtype); + return; + } + auto& out = p_node->Output(0).toTensor(); + fastResizeToZero(out); + at::cpu::prod_out(out, self, dim, keepdim, dtype); + }; + } + LogAndDumpSchema(n); + return nullptr; +}); + +REGISTER_OPERATOR_FUNCTOR(aten::tan, aten_tan, [](Node* n) -> SROperator { + if (n->matches(torch::schema("aten::tan(Tensor self) -> Tensor"))) { + return [](ProcessedNode* p_node) { + const auto& self = p_node->Input(0).toTensor(); + if (p_node->Output(0).isNone()) { + p_node->Output(0) = at::cpu::tan(self); + return; + } + auto& out = p_node->Output(0).toTensor(); + fastResizeToZero(out); + at::cpu::tan_out(out, self); + }; + } + LogAndDumpSchema(n); + return nullptr; +}); + +REGISTER_OPERATOR_FUNCTOR(aten::threshold, aten_threshold, [](Node* n) -> SROperator { + if (n->matches(torch::schema( + "aten::threshold(Tensor self, Scalar threshold, Scalar value) -> Tensor"))) { + return [](ProcessedNode* p_node) { + const auto& self = p_node->Input(0).toTensor(); + const auto threshold = p_node->Input(1).toScalar(); + const auto value = p_node->Input(2).toScalar(); + if (p_node->Output(0).isNone()) { + p_node->Output(0) = at::cpu::threshold(self, threshold, value); + return; + } + auto& out = p_node->Output(0).toTensor(); + fastResizeToZero(out); + at::cpu::threshold_out(out, self, threshold, value); + }; + } + LogAndDumpSchema(n); + return nullptr; +}); + +REGISTER_OPERATOR_FUNCTOR( + aten::threshold_backward, + aten_threshold_backward, + [](Node* n) -> SROperator { + if (n->matches(torch::schema( + "aten::threshold_backward(Tensor grad_output, Tensor self, Scalar threshold) -> Tensor"))) { + return [](ProcessedNode* p_node) { + const auto& grad_output = p_node->Input(0).toTensor(); + const auto& self = p_node->Input(1).toTensor(); + const auto threshold = p_node->Input(2).toScalar(); + if (p_node->Output(0).isNone()) { + p_node->Output(0) = + at::cpu::threshold_backward(grad_output, self, threshold); + return; + } + auto& grad_input = p_node->Output(0).toTensor(); + fastResizeToZero(grad_input); + at::cpu::threshold_backward_out( + grad_input, grad_output, self, threshold); + }; + } + LogAndDumpSchema(n); + return nullptr; + }); + +REGISTER_OPERATOR_FUNCTOR(aten::trunc, aten_trunc, [](Node* n) -> SROperator { + if (n->matches(torch::schema("aten::trunc(Tensor self) -> Tensor"))) { + return [](ProcessedNode* p_node) { + const auto& self = p_node->Input(0).toTensor(); + if (p_node->Output(0).isNone()) { + p_node->Output(0) = at::cpu::trunc(self); + return; + } + auto& out = p_node->Output(0).toTensor(); + fastResizeToZero(out); + at::cpu::trunc_out(out, self); + }; + } + LogAndDumpSchema(n); + return nullptr; +}); + +REGISTER_OPERATOR_FUNCTOR( + aten::heaviside, + aten_heaviside, + [](Node* n) -> SROperator { + if (n->matches(torch::schema( + "aten::heaviside(Tensor self, Tensor values) -> Tensor"))) { + return [](ProcessedNode* p_node) { + const auto& self = p_node->Input(0).toTensor(); + const auto& values = p_node->Input(1).toTensor(); + if (p_node->Output(0).isNone()) { + p_node->Output(0) = at::cpu::heaviside(self, values); + return; + } + auto& out = p_node->Output(0).toTensor(); + fastResizeToZero(out); + at::cpu::heaviside_out(out, self, values); + }; + } + LogAndDumpSchema(n); + return nullptr; + }); + +REGISTER_OPERATOR_FUNCTOR(aten::index_add, aten_index_add, [](Node* n) -> SROperator { + if (n->matches(torch::schema( + "aten::index_add(Tensor self, int dim, Tensor index, Tensor source, *, Scalar alpha=1) -> Tensor"))) { + return [](ProcessedNode* p_node) { + const auto& self = p_node->Input(0).toTensor(); + const auto dim = p_node->Input(1).toInt(); + const auto& index = p_node->Input(2).toTensor(); + const auto& source = p_node->Input(3).toTensor(); + const auto alpha = p_node->Input(4).toScalar(); + if (p_node->Output(0).isNone()) { + p_node->Output(0) = at::cpu::index_add(self, dim, index, source, alpha); + return; + } + auto& out = p_node->Output(0).toTensor(); + fastResizeToZero(out); + at::cpu::index_add_out(out, self, dim, index, source, alpha); + }; + } + LogAndDumpSchema(n); + return nullptr; +}); + +REGISTER_OPERATOR_FUNCTOR(aten::scatter, aten_scatter, [](Node* n) -> SROperator { + if (n->matches(torch::schema( + "aten::scatter.src(Tensor self, int dim, Tensor index, Tensor src) -> Tensor"))) { + return [](ProcessedNode* p_node) { + const auto& self = p_node->Input(0).toTensor(); + const auto dim = p_node->Input(1).toInt(); + const auto& index = p_node->Input(2).toTensor(); + const auto& src = p_node->Input(3).toTensor(); + if (p_node->Output(0).isNone()) { + p_node->Output(0) = at::cpu::scatter(self, dim, index, src); + return; + } + auto& out = p_node->Output(0).toTensor(); + fastResizeToZero(out); + at::cpu::scatter_out(out, self, dim, index, src); + }; + } + + if (n->matches(torch::schema( + "aten::scatter.value(Tensor self, int dim, Tensor index, Scalar value) -> Tensor"))) { + return [](ProcessedNode* p_node) { + const auto& self = p_node->Input(0).toTensor(); + const auto dim = p_node->Input(1).toInt(); + const auto& index = p_node->Input(2).toTensor(); + const auto value = p_node->Input(3).toScalar(); + if (p_node->Output(0).isNone()) { + p_node->Output(0) = at::cpu::scatter(self, dim, index, value); + return; + } + auto& out = p_node->Output(0).toTensor(); + fastResizeToZero(out); + at::cpu::scatter_out(out, self, dim, index, value); + }; + } + + if (n->matches(torch::schema( + "aten::scatter.reduce(Tensor self, int dim, Tensor index, Tensor src, *, str reduce) -> Tensor"))) { + return [](ProcessedNode* p_node) { + const auto& self = p_node->Input(0).toTensor(); + const auto dim = p_node->Input(1).toInt(); + const auto& index = p_node->Input(2).toTensor(); + const auto& src = p_node->Input(3).toTensor(); + const auto reduce = p_node->Input(4).toStringView(); + if (p_node->Output(0).isNone()) { + p_node->Output(0) = at::cpu::scatter(self, dim, index, src, reduce); + return; + } + auto& out = p_node->Output(0).toTensor(); + fastResizeToZero(out); + at::cpu::scatter_out(out, self, dim, index, src, reduce); + }; + } + + if (n->matches(torch::schema( + "aten::scatter.value_reduce(Tensor self, int dim, Tensor index, Scalar value, *, str reduce) -> Tensor"))) { + return [](ProcessedNode* p_node) { + const auto& self = p_node->Input(0).toTensor(); + const auto dim = p_node->Input(1).toInt(); + const auto& index = p_node->Input(2).toTensor(); + const auto value = p_node->Input(3).toScalar(); + const auto reduce = p_node->Input(4).toStringView(); + if (p_node->Output(0).isNone()) { + p_node->Output(0) = at::cpu::scatter(self, dim, index, value, reduce); + return; + } + auto& out = p_node->Output(0).toTensor(); + fastResizeToZero(out); + at::cpu::scatter_out(out, self, dim, index, value, reduce); + }; + } + LogAndDumpSchema(n); + return nullptr; +}); + +REGISTER_OPERATOR_FUNCTOR(aten::scatter_add, aten_scatter_add, [](Node* n) -> SROperator { + if (n->matches(torch::schema( + "aten::scatter_add(Tensor self, int dim, Tensor index, Tensor src) -> Tensor"))) { + return [](ProcessedNode* p_node) { + const auto& self = p_node->Input(0).toTensor(); + const auto dim = p_node->Input(1).toInt(); + const auto& index = p_node->Input(2).toTensor(); + const auto& src = p_node->Input(3).toTensor(); + if (p_node->Output(0).isNone()) { + p_node->Output(0) = at::cpu::scatter_add(self, dim, index, src); + return; + } + auto& out = p_node->Output(0).toTensor(); + fastResizeToZero(out); + at::cpu::scatter_add_out(out, self, dim, index, src); + }; + } + LogAndDumpSchema(n); + return nullptr; +}); + +REGISTER_OPERATOR_FUNCTOR(aten::eq, aten_eq, [](Node* n) -> SROperator { + if (n->matches(torch::schema( + "aten::eq.Scalar(Tensor self, Scalar other) -> Tensor"))) { + return [](ProcessedNode* p_node) { + const auto& self = p_node->Input(0).toTensor(); + const auto other = p_node->Input(1).toScalar(); + if (p_node->Output(0).isNone()) { + p_node->Output(0) = at::cpu::eq(self, other); + return; + } + auto& out = p_node->Output(0).toTensor(); + fastResizeToZero(out); + at::cpu::eq_out(out, self, other); + }; + } + + if (n->matches(torch::schema( + "aten::eq.Tensor(Tensor self, Tensor other) -> Tensor"))) { + return [](ProcessedNode* p_node) { + const auto& self = p_node->Input(0).toTensor(); + const auto& other = p_node->Input(1).toTensor(); + if (p_node->Output(0).isNone()) { + p_node->Output(0) = at::cpu::eq(self, other); + return; + } + auto& out = p_node->Output(0).toTensor(); + fastResizeToZero(out); + at::cpu::eq_out(out, self, other); + }; + } + LogAndDumpSchema(n); + return nullptr; +}); + +REGISTER_OPERATOR_FUNCTOR( + aten::bitwise_and, + aten_bitwise_and, + [](Node* n) -> SROperator { + if (n->matches(torch::schema( + "aten::bitwise_and.Tensor(Tensor self, Tensor other) -> Tensor"))) { + return [](ProcessedNode* p_node) { + const auto& self = p_node->Input(0).toTensor(); + const auto& other = p_node->Input(1).toTensor(); + if (p_node->Output(0).isNone()) { + p_node->Output(0) = at::cpu::bitwise_and(self, other); + return; + } + auto& out = p_node->Output(0).toTensor(); + fastResizeToZero(out); + at::cpu::bitwise_and_out(out, self, other); + }; + } + LogAndDumpSchema(n); + return nullptr; + }); + +REGISTER_OPERATOR_FUNCTOR( + aten::bitwise_or, + aten_bitwise_or, + [](Node* n) -> SROperator { + if (n->matches(torch::schema( + "aten::bitwise_or.Tensor(Tensor self, Tensor other) -> Tensor"))) { + return [](ProcessedNode* p_node) { + const auto& self = p_node->Input(0).toTensor(); + const auto& other = p_node->Input(1).toTensor(); + if (p_node->Output(0).isNone()) { + p_node->Output(0) = at::cpu::bitwise_or(self, other); + return; + } + auto& out = p_node->Output(0).toTensor(); + fastResizeToZero(out); + at::cpu::bitwise_or_out(out, self, other); + }; + } + LogAndDumpSchema(n); + return nullptr; + }); + +REGISTER_OPERATOR_FUNCTOR( + aten::bitwise_xor, + aten_bitwise_xor, + [](Node* n) -> SROperator { + if (n->matches(torch::schema( + "aten::bitwise_xor.Tensor(Tensor self, Tensor other) -> Tensor"))) { + return [](ProcessedNode* p_node) { + const auto& self = p_node->Input(0).toTensor(); + const auto& other = p_node->Input(1).toTensor(); + if (p_node->Output(0).isNone()) { + p_node->Output(0) = at::cpu::bitwise_xor(self, other); + return; + } + auto& out = p_node->Output(0).toTensor(); + fastResizeToZero(out); + at::cpu::bitwise_xor_out(out, self, other); + }; + } + LogAndDumpSchema(n); + return nullptr; + }); + +REGISTER_OPERATOR_FUNCTOR( + aten::bitwise_left_shift, + aten_bitwise_left_shift, + [](Node* n) -> SROperator { + if (n->matches(torch::schema( + "aten::bitwise_left_shift.Tensor(Tensor self, Tensor other) -> Tensor"))) { + return [](ProcessedNode* p_node) { + const auto& self = p_node->Input(0).toTensor(); + const auto& other = p_node->Input(1).toTensor(); + if (p_node->Output(0).isNone()) { + p_node->Output(0) = at::cpu::bitwise_left_shift(self, other); + return; + } + auto& out = p_node->Output(0).toTensor(); + fastResizeToZero(out); + at::cpu::bitwise_left_shift_out(out, self, other); + }; + } + LogAndDumpSchema(n); + return nullptr; + }); + +REGISTER_OPERATOR_FUNCTOR( + aten::bitwise_right_shift, + aten_bitwise_right_shift, + [](Node* n) -> SROperator { + if (n->matches(torch::schema( + "aten::bitwise_right_shift.Tensor(Tensor self, Tensor other) -> Tensor"))) { + return [](ProcessedNode* p_node) { + const auto& self = p_node->Input(0).toTensor(); + const auto& other = p_node->Input(1).toTensor(); + if (p_node->Output(0).isNone()) { + p_node->Output(0) = at::cpu::bitwise_right_shift(self, other); + return; + } + auto& out = p_node->Output(0).toTensor(); + fastResizeToZero(out); + at::cpu::bitwise_right_shift_out(out, self, other); + }; + } + LogAndDumpSchema(n); + return nullptr; + }); + +REGISTER_OPERATOR_FUNCTOR(aten::tril, aten_tril, [](Node* n) -> SROperator { + if (n->matches( + torch::schema("aten::tril(Tensor self, int diagonal=0) -> Tensor"))) { + return [](ProcessedNode* p_node) { + const auto& self = p_node->Input(0).toTensor(); + const auto diagonal = p_node->Input(1).toInt(); + if (p_node->Output(0).isNone()) { + p_node->Output(0) = at::cpu::tril(self, diagonal); + return; + } + auto& out = p_node->Output(0).toTensor(); + fastResizeToZero(out); + at::cpu::tril_out(out, self, diagonal); + }; + } + LogAndDumpSchema(n); + return nullptr; +}); + +REGISTER_OPERATOR_FUNCTOR(aten::triu, aten_triu, [](Node* n) -> SROperator { + if (n->matches( + torch::schema("aten::triu(Tensor self, int diagonal=0) -> Tensor"))) { + return [](ProcessedNode* p_node) { + const auto& self = p_node->Input(0).toTensor(); + const auto diagonal = p_node->Input(1).toInt(); + if (p_node->Output(0).isNone()) { + p_node->Output(0) = at::cpu::triu(self, diagonal); + return; + } + auto& out = p_node->Output(0).toTensor(); + fastResizeToZero(out); + at::cpu::triu_out(out, self, diagonal); + }; + } + LogAndDumpSchema(n); + return nullptr; +}); + +REGISTER_OPERATOR_FUNCTOR( + aten::digamma, + aten_digamma, + [](Node* n) -> SROperator { + if (n->matches(torch::schema("aten::digamma(Tensor self) -> Tensor"))) { + return [](ProcessedNode* p_node) { + const auto& self = p_node->Input(0).toTensor(); + if (p_node->Output(0).isNone()) { + p_node->Output(0) = at::cpu::digamma(self); + return; + } + auto& out = p_node->Output(0).toTensor(); + fastResizeToZero(out); + at::cpu::digamma_out(out, self); + }; + } + LogAndDumpSchema(n); + return nullptr; + }); + +REGISTER_OPERATOR_FUNCTOR(aten::lerp, aten_lerp, [](Node* n) -> SROperator { + if (n->matches(torch::schema( + "aten::lerp.Scalar(Tensor self, Tensor end, Scalar weight) -> Tensor"))) { + return [](ProcessedNode* p_node) { + const auto& self = p_node->Input(0).toTensor(); + const auto& end = p_node->Input(1).toTensor(); + const auto weight = p_node->Input(2).toScalar(); + if (p_node->Output(0).isNone()) { + p_node->Output(0) = at::cpu::lerp(self, end, weight); + return; + } + auto& out = p_node->Output(0).toTensor(); + fastResizeToZero(out); + at::cpu::lerp_out(out, self, end, weight); + }; + } + + if (n->matches(torch::schema( + "aten::lerp.Tensor(Tensor self, Tensor end, Tensor weight) -> Tensor"))) { + return [](ProcessedNode* p_node) { + const auto& self = p_node->Input(0).toTensor(); + const auto& end = p_node->Input(1).toTensor(); + const auto& weight = p_node->Input(2).toTensor(); + if (p_node->Output(0).isNone()) { + p_node->Output(0) = at::cpu::lerp(self, end, weight); + return; + } + auto& out = p_node->Output(0).toTensor(); + fastResizeToZero(out); + at::cpu::lerp_out(out, self, end, weight); + }; + } + LogAndDumpSchema(n); + return nullptr; +}); + +REGISTER_OPERATOR_FUNCTOR(aten::ne, aten_ne, [](Node* n) -> SROperator { + if (n->matches(torch::schema( + "aten::ne.Scalar(Tensor self, Scalar other) -> Tensor"))) { + return [](ProcessedNode* p_node) { + const auto& self = p_node->Input(0).toTensor(); + const auto other = p_node->Input(1).toScalar(); + if (p_node->Output(0).isNone()) { + p_node->Output(0) = at::cpu::ne(self, other); + return; + } + auto& out = p_node->Output(0).toTensor(); + fastResizeToZero(out); + at::cpu::ne_out(out, self, other); + }; + } + + if (n->matches(torch::schema( + "aten::ne.Tensor(Tensor self, Tensor other) -> Tensor"))) { + return [](ProcessedNode* p_node) { + const auto& self = p_node->Input(0).toTensor(); + const auto& other = p_node->Input(1).toTensor(); + if (p_node->Output(0).isNone()) { + p_node->Output(0) = at::cpu::ne(self, other); + return; + } + auto& out = p_node->Output(0).toTensor(); + fastResizeToZero(out); + at::cpu::ne_out(out, self, other); + }; + } + LogAndDumpSchema(n); + return nullptr; +}); + +REGISTER_OPERATOR_FUNCTOR(aten::ge, aten_ge, [](Node* n) -> SROperator { + if (n->matches(torch::schema( + "aten::ge.Scalar(Tensor self, Scalar other) -> Tensor"))) { + return [](ProcessedNode* p_node) { + const auto& self = p_node->Input(0).toTensor(); + const auto other = p_node->Input(1).toScalar(); + if (p_node->Output(0).isNone()) { + p_node->Output(0) = at::cpu::ge(self, other); + return; + } + auto& out = p_node->Output(0).toTensor(); + fastResizeToZero(out); + at::cpu::ge_out(out, self, other); + }; + } + + if (n->matches(torch::schema( + "aten::ge.Tensor(Tensor self, Tensor other) -> Tensor"))) { + return [](ProcessedNode* p_node) { + const auto& self = p_node->Input(0).toTensor(); + const auto& other = p_node->Input(1).toTensor(); + if (p_node->Output(0).isNone()) { + p_node->Output(0) = at::cpu::ge(self, other); + return; + } + auto& out = p_node->Output(0).toTensor(); + fastResizeToZero(out); + at::cpu::ge_out(out, self, other); + }; + } + LogAndDumpSchema(n); + return nullptr; +}); + +REGISTER_OPERATOR_FUNCTOR(aten::le, aten_le, [](Node* n) -> SROperator { + if (n->matches(torch::schema( + "aten::le.Scalar(Tensor self, Scalar other) -> Tensor"))) { + return [](ProcessedNode* p_node) { + const auto& self = p_node->Input(0).toTensor(); + const auto other = p_node->Input(1).toScalar(); + if (p_node->Output(0).isNone()) { + p_node->Output(0) = at::cpu::le(self, other); + return; + } + auto& out = p_node->Output(0).toTensor(); + fastResizeToZero(out); + at::cpu::le_out(out, self, other); + }; + } + + if (n->matches(torch::schema( + "aten::le.Tensor(Tensor self, Tensor other) -> Tensor"))) { + return [](ProcessedNode* p_node) { + const auto& self = p_node->Input(0).toTensor(); + const auto& other = p_node->Input(1).toTensor(); + if (p_node->Output(0).isNone()) { + p_node->Output(0) = at::cpu::le(self, other); + return; + } + auto& out = p_node->Output(0).toTensor(); + fastResizeToZero(out); + at::cpu::le_out(out, self, other); + }; + } + LogAndDumpSchema(n); + return nullptr; +}); + +REGISTER_OPERATOR_FUNCTOR(aten::gt, aten_gt, [](Node* n) -> SROperator { + if (n->matches(torch::schema( + "aten::gt.Scalar(Tensor self, Scalar other) -> Tensor"))) { + return [](ProcessedNode* p_node) { + const auto& self = p_node->Input(0).toTensor(); + const auto other = p_node->Input(1).toScalar(); + if (p_node->Output(0).isNone()) { + p_node->Output(0) = at::cpu::gt(self, other); + return; + } + auto& out = p_node->Output(0).toTensor(); + fastResizeToZero(out); + at::cpu::gt_out(out, self, other); + }; + } + + if (n->matches(torch::schema( + "aten::gt.Tensor(Tensor self, Tensor other) -> Tensor"))) { + return [](ProcessedNode* p_node) { + const auto& self = p_node->Input(0).toTensor(); + const auto& other = p_node->Input(1).toTensor(); + if (p_node->Output(0).isNone()) { + p_node->Output(0) = at::cpu::gt(self, other); + return; + } + auto& out = p_node->Output(0).toTensor(); + fastResizeToZero(out); + at::cpu::gt_out(out, self, other); + }; + } + LogAndDumpSchema(n); + return nullptr; +}); + +REGISTER_OPERATOR_FUNCTOR(aten::lt, aten_lt, [](Node* n) -> SROperator { + if (n->matches(torch::schema( + "aten::lt.Scalar(Tensor self, Scalar other) -> Tensor"))) { + return [](ProcessedNode* p_node) { + const auto& self = p_node->Input(0).toTensor(); + const auto other = p_node->Input(1).toScalar(); + if (p_node->Output(0).isNone()) { + p_node->Output(0) = at::cpu::lt(self, other); + return; + } + auto& out = p_node->Output(0).toTensor(); + fastResizeToZero(out); + at::cpu::lt_out(out, self, other); + }; + } + + if (n->matches(torch::schema( + "aten::lt.Tensor(Tensor self, Tensor other) -> Tensor"))) { + return [](ProcessedNode* p_node) { + const auto& self = p_node->Input(0).toTensor(); + const auto& other = p_node->Input(1).toTensor(); + if (p_node->Output(0).isNone()) { + p_node->Output(0) = at::cpu::lt(self, other); + return; + } + auto& out = p_node->Output(0).toTensor(); + fastResizeToZero(out); + at::cpu::lt_out(out, self, other); + }; + } + LogAndDumpSchema(n); + return nullptr; +}); + +REGISTER_OPERATOR_FUNCTOR(aten::gather, aten_gather, [](Node* n) -> SROperator { + if (n->matches(torch::schema( + "aten::gather(Tensor self, int dim, Tensor index, *, bool sparse_grad=False) -> Tensor"))) { + return [](ProcessedNode* p_node) { + const auto& self = p_node->Input(0).toTensor(); + const auto dim = p_node->Input(1).toInt(); + const auto& index = p_node->Input(2).toTensor(); + const auto sparse_grad = p_node->Input(3).toBool(); + if (p_node->Output(0).isNone()) { + p_node->Output(0) = at::cpu::gather(self, dim, index, sparse_grad); + return; + } + auto& out = p_node->Output(0).toTensor(); + fastResizeToZero(out); + at::cpu::gather_out(out, self, dim, index, sparse_grad); + }; + } + LogAndDumpSchema(n); + return nullptr; +}); + +REGISTER_OPERATOR_FUNCTOR(aten::addcmul, aten_addcmul, [](Node* n) -> SROperator { + if (n->matches(torch::schema( + "aten::addcmul(Tensor self, Tensor tensor1, Tensor tensor2, *, Scalar value=1) -> Tensor"))) { + return [](ProcessedNode* p_node) { + const auto& self = p_node->Input(0).toTensor(); + const auto& tensor1 = p_node->Input(1).toTensor(); + const auto& tensor2 = p_node->Input(2).toTensor(); + const auto value = p_node->Input(3).toScalar(); + if (p_node->Output(0).isNone()) { + p_node->Output(0) = at::cpu::addcmul(self, tensor1, tensor2, value); + return; + } + auto& out = p_node->Output(0).toTensor(); + fastResizeToZero(out); + at::cpu::addcmul_out(out, self, tensor1, tensor2, value); + }; + } + LogAndDumpSchema(n); + return nullptr; +}); + +REGISTER_OPERATOR_FUNCTOR(aten::addcdiv, aten_addcdiv, [](Node* n) -> SROperator { + if (n->matches(torch::schema( + "aten::addcdiv(Tensor self, Tensor tensor1, Tensor tensor2, *, Scalar value=1) -> Tensor"))) { + return [](ProcessedNode* p_node) { + const auto& self = p_node->Input(0).toTensor(); + const auto& tensor1 = p_node->Input(1).toTensor(); + const auto& tensor2 = p_node->Input(2).toTensor(); + const auto value = p_node->Input(3).toScalar(); + if (p_node->Output(0).isNone()) { + p_node->Output(0) = at::cpu::addcdiv(self, tensor1, tensor2, value); + return; + } + auto& out = p_node->Output(0).toTensor(); + fastResizeToZero(out); + at::cpu::addcdiv_out(out, self, tensor1, tensor2, value); + }; + } + LogAndDumpSchema(n); + return nullptr; +}); + +REGISTER_OPERATOR_FUNCTOR(aten::lgamma, aten_lgamma, [](Node* n) -> SROperator { + if (n->matches(torch::schema("aten::lgamma(Tensor self) -> Tensor"))) { + return [](ProcessedNode* p_node) { + const auto& self = p_node->Input(0).toTensor(); + if (p_node->Output(0).isNone()) { + p_node->Output(0) = at::cpu::lgamma(self); + return; + } + auto& out = p_node->Output(0).toTensor(); + fastResizeToZero(out); + at::cpu::lgamma_out(out, self); + }; + } + LogAndDumpSchema(n); + return nullptr; +}); + +REGISTER_OPERATOR_FUNCTOR( + aten::polygamma, + aten_polygamma, + [](Node* n) -> SROperator { + if (n->matches( + torch::schema("aten::polygamma(int n, Tensor self) -> Tensor"))) { + return [](ProcessedNode* p_node) { + const auto n = p_node->Input(0).toInt(); + const auto& self = p_node->Input(1).toTensor(); + if (p_node->Output(0).isNone()) { + p_node->Output(0) = at::cpu::polygamma(n, self); + return; + } + auto& out = p_node->Output(0).toTensor(); + fastResizeToZero(out); + at::cpu::polygamma_out(out, n, self); + }; + } + LogAndDumpSchema(n); + return nullptr; + }); + +REGISTER_OPERATOR_FUNCTOR(aten::erfinv, aten_erfinv, [](Node* n) -> SROperator { + if (n->matches(torch::schema("aten::erfinv(Tensor self) -> Tensor"))) { + return [](ProcessedNode* p_node) { + const auto& self = p_node->Input(0).toTensor(); + if (p_node->Output(0).isNone()) { + p_node->Output(0) = at::cpu::erfinv(self); + return; + } + auto& out = p_node->Output(0).toTensor(); + fastResizeToZero(out); + at::cpu::erfinv_out(out, self); + }; + } + LogAndDumpSchema(n); + return nullptr; +}); + +REGISTER_OPERATOR_FUNCTOR(aten::i0, aten_i0, [](Node* n) -> SROperator { + if (n->matches(torch::schema("aten::i0(Tensor self) -> Tensor"))) { + return [](ProcessedNode* p_node) { + const auto& self = p_node->Input(0).toTensor(); + if (p_node->Output(0).isNone()) { + p_node->Output(0) = at::cpu::i0(self); + return; + } + auto& out = p_node->Output(0).toTensor(); + fastResizeToZero(out); + at::cpu::i0_out(out, self); + }; + } + LogAndDumpSchema(n); + return nullptr; +}); + +REGISTER_OPERATOR_FUNCTOR( + aten::signbit, + aten_signbit, + [](Node* n) -> SROperator { + if (n->matches(torch::schema("aten::signbit(Tensor self) -> Tensor"))) { + return [](ProcessedNode* p_node) { + const auto& self = p_node->Input(0).toTensor(); + if (p_node->Output(0).isNone()) { + p_node->Output(0) = at::cpu::signbit(self); + return; + } + auto& out = p_node->Output(0).toTensor(); + fastResizeToZero(out); + at::cpu::signbit_out(out, self); + }; + } + LogAndDumpSchema(n); + return nullptr; + }); + +REGISTER_OPERATOR_FUNCTOR(aten::atan2, aten_atan2, [](Node* n) -> SROperator { + if (n->matches( + torch::schema("aten::atan2(Tensor self, Tensor other) -> Tensor"))) { + return [](ProcessedNode* p_node) { + const auto& self = p_node->Input(0).toTensor(); + const auto& other = p_node->Input(1).toTensor(); + if (p_node->Output(0).isNone()) { + p_node->Output(0) = at::cpu::atan2(self, other); + return; + } + auto& out = p_node->Output(0).toTensor(); + fastResizeToZero(out); + at::cpu::atan2_out(out, self, other); + }; + } + LogAndDumpSchema(n); + return nullptr; +}); + +REGISTER_OPERATOR_FUNCTOR(aten::hypot, aten_hypot, [](Node* n) -> SROperator { + if (n->matches( + torch::schema("aten::hypot(Tensor self, Tensor other) -> Tensor"))) { + return [](ProcessedNode* p_node) { + const auto& self = p_node->Input(0).toTensor(); + const auto& other = p_node->Input(1).toTensor(); + if (p_node->Output(0).isNone()) { + p_node->Output(0) = at::cpu::hypot(self, other); + return; + } + auto& out = p_node->Output(0).toTensor(); + fastResizeToZero(out); + at::cpu::hypot_out(out, self, other); + }; + } + LogAndDumpSchema(n); + return nullptr; +}); + +REGISTER_OPERATOR_FUNCTOR(aten::igamma, aten_igamma, [](Node* n) -> SROperator { + if (n->matches( + torch::schema("aten::igamma(Tensor self, Tensor other) -> Tensor"))) { + return [](ProcessedNode* p_node) { + const auto& self = p_node->Input(0).toTensor(); + const auto& other = p_node->Input(1).toTensor(); + if (p_node->Output(0).isNone()) { + p_node->Output(0) = at::cpu::igamma(self, other); + return; + } + auto& out = p_node->Output(0).toTensor(); + fastResizeToZero(out); + at::cpu::igamma_out(out, self, other); + }; + } + LogAndDumpSchema(n); + return nullptr; +}); + +REGISTER_OPERATOR_FUNCTOR( + aten::igammac, + aten_igammac, + [](Node* n) -> SROperator { + if (n->matches(torch::schema( + "aten::igammac(Tensor self, Tensor other) -> Tensor"))) { + return [](ProcessedNode* p_node) { + const auto& self = p_node->Input(0).toTensor(); + const auto& other = p_node->Input(1).toTensor(); + if (p_node->Output(0).isNone()) { + p_node->Output(0) = at::cpu::igammac(self, other); + return; + } + auto& out = p_node->Output(0).toTensor(); + fastResizeToZero(out); + at::cpu::igammac_out(out, self, other); + }; + } + LogAndDumpSchema(n); + return nullptr; + }); + +REGISTER_OPERATOR_FUNCTOR( + aten::nextafter, + aten_nextafter, + [](Node* n) -> SROperator { + if (n->matches(torch::schema( + "aten::nextafter(Tensor self, Tensor other) -> Tensor"))) { + return [](ProcessedNode* p_node) { + const auto& self = p_node->Input(0).toTensor(); + const auto& other = p_node->Input(1).toTensor(); + if (p_node->Output(0).isNone()) { + p_node->Output(0) = at::cpu::nextafter(self, other); + return; + } + auto& out = p_node->Output(0).toTensor(); + fastResizeToZero(out); + at::cpu::nextafter_out(out, self, other); + }; + } + LogAndDumpSchema(n); + return nullptr; + }); + +REGISTER_OPERATOR_FUNCTOR(aten::fmin, aten_fmin, [](Node* n) -> SROperator { + if (n->matches( + torch::schema("aten::fmin(Tensor self, Tensor other) -> Tensor"))) { + return [](ProcessedNode* p_node) { + const auto& self = p_node->Input(0).toTensor(); + const auto& other = p_node->Input(1).toTensor(); + if (p_node->Output(0).isNone()) { + p_node->Output(0) = at::cpu::fmin(self, other); + return; + } + auto& out = p_node->Output(0).toTensor(); + fastResizeToZero(out); + at::cpu::fmin_out(out, self, other); + }; + } + LogAndDumpSchema(n); + return nullptr; +}); + +REGISTER_OPERATOR_FUNCTOR(aten::fmax, aten_fmax, [](Node* n) -> SROperator { + if (n->matches( + torch::schema("aten::fmax(Tensor self, Tensor other) -> Tensor"))) { + return [](ProcessedNode* p_node) { + const auto& self = p_node->Input(0).toTensor(); + const auto& other = p_node->Input(1).toTensor(); + if (p_node->Output(0).isNone()) { + p_node->Output(0) = at::cpu::fmax(self, other); + return; + } + auto& out = p_node->Output(0).toTensor(); + fastResizeToZero(out); + at::cpu::fmax_out(out, self, other); + }; + } + LogAndDumpSchema(n); + return nullptr; +}); + +REGISTER_OPERATOR_FUNCTOR( + aten::maximum, + aten_maximum, + [](Node* n) -> SROperator { + if (n->matches(torch::schema( + "aten::maximum(Tensor self, Tensor other) -> Tensor"))) { + return [](ProcessedNode* p_node) { + const auto& self = p_node->Input(0).toTensor(); + const auto& other = p_node->Input(1).toTensor(); + if (p_node->Output(0).isNone()) { + p_node->Output(0) = at::cpu::maximum(self, other); + return; + } + auto& out = p_node->Output(0).toTensor(); + fastResizeToZero(out); + at::cpu::maximum_out(out, self, other); + }; + } + LogAndDumpSchema(n); + return nullptr; + }); + +REGISTER_OPERATOR_FUNCTOR( + aten::minimum, + aten_minimum, + [](Node* n) -> SROperator { + if (n->matches(torch::schema( + "aten::minimum(Tensor self, Tensor other) -> Tensor"))) { + return [](ProcessedNode* p_node) { + const auto& self = p_node->Input(0).toTensor(); + const auto& other = p_node->Input(1).toTensor(); + if (p_node->Output(0).isNone()) { + p_node->Output(0) = at::cpu::minimum(self, other); + return; + } + auto& out = p_node->Output(0).toTensor(); + fastResizeToZero(out); + at::cpu::minimum_out(out, self, other); + }; + } + LogAndDumpSchema(n); + return nullptr; + }); + +REGISTER_OPERATOR_FUNCTOR(aten::renorm, aten_renorm, [](Node* n) -> SROperator { + if (n->matches(torch::schema( + "aten::renorm(Tensor self, Scalar p, int dim, Scalar maxnorm) -> Tensor"))) { + return [](ProcessedNode* p_node) { + const auto& self = p_node->Input(0).toTensor(); + const auto p = p_node->Input(1).toScalar(); + const auto dim = p_node->Input(2).toInt(); + const auto maxnorm = p_node->Input(3).toScalar(); + if (p_node->Output(0).isNone()) { + p_node->Output(0) = at::cpu::renorm(self, p, dim, maxnorm); + return; + } + auto& out = p_node->Output(0).toTensor(); + fastResizeToZero(out); + at::cpu::renorm_out(out, self, p, dim, maxnorm); + }; + } + LogAndDumpSchema(n); + return nullptr; +}); + +REGISTER_OPERATOR_FUNCTOR( + aten::_convert_indices_from_coo_to_csr, + aten__convert_indices_from_coo_to_csr, + [](Node* n) -> SROperator { + if (n->matches(torch::schema( + "aten::_convert_indices_from_coo_to_csr(Tensor self, int size, *, bool out_int32=False) -> Tensor"))) { + return [](ProcessedNode* p_node) { + const auto& self = p_node->Input(0).toTensor(); + const auto size = p_node->Input(1).toInt(); + const auto out_int32 = p_node->Input(2).toBool(); + if (p_node->Output(0).isNone()) { + p_node->Output(0) = at::cpu::_convert_indices_from_coo_to_csr( + self, size, out_int32); + return; + } + auto& out = p_node->Output(0).toTensor(); + fastResizeToZero(out); + at::cpu::_convert_indices_from_coo_to_csr_out( + out, self, size, out_int32); + }; + } + LogAndDumpSchema(n); + return nullptr; + }); + +REGISTER_OPERATOR_FUNCTOR( + aten::_convert_indices_from_csr_to_coo, + aten__convert_indices_from_csr_to_coo, + [](Node* n) -> SROperator { + if (n->matches(torch::schema( + "aten::_convert_indices_from_csr_to_coo(Tensor crow_indices, Tensor col_indices, *, bool out_int32=False, bool transpose=False) -> Tensor"))) { + return [](ProcessedNode* p_node) { + const auto& crow_indices = p_node->Input(0).toTensor(); + const auto& col_indices = p_node->Input(1).toTensor(); + const auto out_int32 = p_node->Input(2).toBool(); + const auto transpose = p_node->Input(3).toBool(); + if (p_node->Output(0).isNone()) { + p_node->Output(0) = at::cpu::_convert_indices_from_csr_to_coo( + crow_indices, col_indices, out_int32, transpose); + return; + } + auto& out = p_node->Output(0).toTensor(); + fastResizeToZero(out); + at::cpu::_convert_indices_from_csr_to_coo_out( + out, crow_indices, col_indices, out_int32, transpose); + }; + } + LogAndDumpSchema(n); + return nullptr; + }); + +REGISTER_OPERATOR_FUNCTOR( + aten::nll_loss_backward, + aten_nll_loss_backward, + [](Node* n) -> SROperator { + if (n->matches(torch::schema( + "aten::nll_loss_backward(Tensor grad_output, Tensor self, Tensor target, Tensor? weight, int reduction, int ignore_index, Tensor total_weight) -> Tensor"))) { + return [](ProcessedNode* p_node) { + const auto& grad_output = p_node->Input(0).toTensor(); + const auto& self = p_node->Input(1).toTensor(); + const auto& target = p_node->Input(2).toTensor(); + const auto weight = p_node->Input(3).toOptional(); + const auto reduction = p_node->Input(4).toInt(); + const auto ignore_index = p_node->Input(5).toInt(); + const auto& total_weight = p_node->Input(6).toTensor(); + if (p_node->Output(0).isNone()) { + p_node->Output(0) = at::cpu::nll_loss_backward( + grad_output, + self, + target, + weight, + reduction, + ignore_index, + total_weight); + return; + } + auto& grad_input = p_node->Output(0).toTensor(); + fastResizeToZero(grad_input); + at::cpu::nll_loss_backward_out( + grad_input, + grad_output, + self, + target, + weight, + reduction, + ignore_index, + total_weight); + }; + } + LogAndDumpSchema(n); + return nullptr; + }); + +REGISTER_OPERATOR_FUNCTOR(aten::elu, aten_elu, [](Node* n) -> SROperator { + if (n->matches(torch::schema( + "aten::elu(Tensor self, Scalar alpha=1, Scalar scale=1, Scalar input_scale=1) -> Tensor"))) { + return [](ProcessedNode* p_node) { + const auto& self = p_node->Input(0).toTensor(); + const auto alpha = p_node->Input(1).toScalar(); + const auto scale = p_node->Input(2).toScalar(); + const auto input_scale = p_node->Input(3).toScalar(); + if (p_node->Output(0).isNone()) { + p_node->Output(0) = at::cpu::elu(self, alpha, scale, input_scale); + return; + } + auto& out = p_node->Output(0).toTensor(); + fastResizeToZero(out); + at::cpu::elu_out(out, self, alpha, scale, input_scale); + }; + } + LogAndDumpSchema(n); + return nullptr; +}); + +REGISTER_OPERATOR_FUNCTOR( + aten::elu_backward, + aten_elu_backward, + [](Node* n) -> SROperator { + if (n->matches(torch::schema( + "aten::elu_backward(Tensor grad_output, Scalar alpha, Scalar scale, Scalar input_scale, bool is_result, Tensor self_or_result) -> Tensor"))) { + return [](ProcessedNode* p_node) { + const auto& grad_output = p_node->Input(0).toTensor(); + const auto alpha = p_node->Input(1).toScalar(); + const auto scale = p_node->Input(2).toScalar(); + const auto input_scale = p_node->Input(3).toScalar(); + const auto is_result = p_node->Input(4).toBool(); + const auto& self_or_result = p_node->Input(5).toTensor(); + if (p_node->Output(0).isNone()) { + p_node->Output(0) = at::cpu::elu_backward( + grad_output, + alpha, + scale, + input_scale, + is_result, + self_or_result); + return; + } + auto& grad_input = p_node->Output(0).toTensor(); + fastResizeToZero(grad_input); + at::cpu::elu_backward_out( + grad_input, + grad_output, + alpha, + scale, + input_scale, + is_result, + self_or_result); + }; + } + LogAndDumpSchema(n); + return nullptr; + }); + +REGISTER_OPERATOR_FUNCTOR(aten::glu, aten_glu, [](Node* n) -> SROperator { + if (n->matches( + torch::schema("aten::glu(Tensor self, int dim=-1) -> Tensor"))) { + return [](ProcessedNode* p_node) { + const auto& self = p_node->Input(0).toTensor(); + const auto dim = p_node->Input(1).toInt(); + if (p_node->Output(0).isNone()) { + p_node->Output(0) = at::cpu::glu(self, dim); + return; + } + auto& out = p_node->Output(0).toTensor(); + fastResizeToZero(out); + at::cpu::glu_out(out, self, dim); + }; + } + LogAndDumpSchema(n); + return nullptr; +}); + +REGISTER_OPERATOR_FUNCTOR( + aten::hardsigmoid, + aten_hardsigmoid, + [](Node* n) -> SROperator { + if (n->matches( + torch::schema("aten::hardsigmoid(Tensor self) -> Tensor"))) { + return [](ProcessedNode* p_node) { + const auto& self = p_node->Input(0).toTensor(); + if (p_node->Output(0).isNone()) { + p_node->Output(0) = at::cpu::hardsigmoid(self); + return; + } + auto& out = p_node->Output(0).toTensor(); + fastResizeToZero(out); + at::cpu::hardsigmoid_out(out, self); + }; + } + LogAndDumpSchema(n); + return nullptr; + }); + +REGISTER_OPERATOR_FUNCTOR( + aten::hardsigmoid_backward, + aten_hardsigmoid_backward, + [](Node* n) -> SROperator { + if (n->matches(torch::schema( + "aten::hardsigmoid_backward(Tensor grad_output, Tensor self) -> Tensor"))) { + return [](ProcessedNode* p_node) { + const auto& grad_output = p_node->Input(0).toTensor(); + const auto& self = p_node->Input(1).toTensor(); + if (p_node->Output(0).isNone()) { + p_node->Output(0) = + at::cpu::hardsigmoid_backward(grad_output, self); + return; + } + auto& grad_input = p_node->Output(0).toTensor(); + fastResizeToZero(grad_input); + at::cpu::hardsigmoid_backward_out(grad_input, grad_output, self); + }; + } + LogAndDumpSchema(n); + return nullptr; + }); + +REGISTER_OPERATOR_FUNCTOR( + aten::leaky_relu_backward, + aten_leaky_relu_backward, + [](Node* n) -> SROperator { + if (n->matches(torch::schema( + "aten::leaky_relu_backward(Tensor grad_output, Tensor self, Scalar negative_slope, bool self_is_result) -> Tensor"))) { + return [](ProcessedNode* p_node) { + const auto& grad_output = p_node->Input(0).toTensor(); + const auto& self = p_node->Input(1).toTensor(); + const auto negative_slope = p_node->Input(2).toScalar(); + const auto self_is_result = p_node->Input(3).toBool(); + if (p_node->Output(0).isNone()) { + p_node->Output(0) = at::cpu::leaky_relu_backward( + grad_output, self, negative_slope, self_is_result); + return; + } + auto& grad_input = p_node->Output(0).toTensor(); + fastResizeToZero(grad_input); + at::cpu::leaky_relu_backward_out( + grad_input, grad_output, self, negative_slope, self_is_result); + }; + } + LogAndDumpSchema(n); + return nullptr; + }); + +REGISTER_OPERATOR_FUNCTOR(aten::softplus, aten_softplus, [](Node* n) -> SROperator { + if (n->matches(torch::schema( + "aten::softplus(Tensor self, Scalar beta=1, Scalar threshold=20) -> Tensor"))) { + return [](ProcessedNode* p_node) { + const auto& self = p_node->Input(0).toTensor(); + const auto beta = p_node->Input(1).toScalar(); + const auto threshold = p_node->Input(2).toScalar(); + if (p_node->Output(0).isNone()) { + p_node->Output(0) = at::cpu::softplus(self, beta, threshold); + return; + } + auto& out = p_node->Output(0).toTensor(); + fastResizeToZero(out); + at::cpu::softplus_out(out, self, beta, threshold); + }; + } + LogAndDumpSchema(n); + return nullptr; +}); + +REGISTER_OPERATOR_FUNCTOR( + aten::softplus_backward, + aten_softplus_backward, + [](Node* n) -> SROperator { + if (n->matches(torch::schema( + "aten::softplus_backward(Tensor grad_output, Tensor self, Scalar beta, Scalar threshold) -> Tensor"))) { + return [](ProcessedNode* p_node) { + const auto& grad_output = p_node->Input(0).toTensor(); + const auto& self = p_node->Input(1).toTensor(); + const auto beta = p_node->Input(2).toScalar(); + const auto threshold = p_node->Input(3).toScalar(); + if (p_node->Output(0).isNone()) { + p_node->Output(0) = + at::cpu::softplus_backward(grad_output, self, beta, threshold); + return; + } + auto& grad_input = p_node->Output(0).toTensor(); + fastResizeToZero(grad_input); + at::cpu::softplus_backward_out( + grad_input, grad_output, self, beta, threshold); + }; + } + LogAndDumpSchema(n); + return nullptr; + }); + +REGISTER_OPERATOR_FUNCTOR( + aten::softshrink, + aten_softshrink, + [](Node* n) -> SROperator { + if (n->matches(torch::schema( + "aten::softshrink(Tensor self, Scalar lambd=0.5) -> Tensor"))) { + return [](ProcessedNode* p_node) { + const auto& self = p_node->Input(0).toTensor(); + const auto lambd = p_node->Input(1).toScalar(); + if (p_node->Output(0).isNone()) { + p_node->Output(0) = at::cpu::softshrink(self, lambd); + return; + } + auto& out = p_node->Output(0).toTensor(); + fastResizeToZero(out); + at::cpu::softshrink_out(out, self, lambd); + }; + } + LogAndDumpSchema(n); + return nullptr; + }); + +REGISTER_OPERATOR_FUNCTOR( + aten::softshrink_backward, + aten_softshrink_backward, + [](Node* n) -> SROperator { + if (n->matches(torch::schema( + "aten::softshrink_backward(Tensor grad_output, Tensor self, Scalar lambd) -> Tensor"))) { + return [](ProcessedNode* p_node) { + const auto& grad_output = p_node->Input(0).toTensor(); + const auto& self = p_node->Input(1).toTensor(); + const auto lambd = p_node->Input(2).toScalar(); + if (p_node->Output(0).isNone()) { + p_node->Output(0) = + at::cpu::softshrink_backward(grad_output, self, lambd); + return; + } + auto& grad_input = p_node->Output(0).toTensor(); + fastResizeToZero(grad_input); + at::cpu::softshrink_backward_out( + grad_input, grad_output, self, lambd); + }; + } + LogAndDumpSchema(n); + return nullptr; + }); + +REGISTER_OPERATOR_FUNCTOR( + aten::adaptive_max_pool2d_backward, + aten_adaptive_max_pool2d_backward, + [](Node* n) -> SROperator { + if (n->matches(torch::schema( + "aten::adaptive_max_pool2d_backward(Tensor grad_output, Tensor self, Tensor indices) -> Tensor"))) { + return [](ProcessedNode* p_node) { + const auto& grad_output = p_node->Input(0).toTensor(); + const auto& self = p_node->Input(1).toTensor(); + const auto& indices = p_node->Input(2).toTensor(); + if (p_node->Output(0).isNone()) { + p_node->Output(0) = at::cpu::adaptive_max_pool2d_backward( + grad_output, self, indices); + return; + } + auto& grad_input = p_node->Output(0).toTensor(); + fastResizeToZero(grad_input); + at::cpu::adaptive_max_pool2d_backward_out( + grad_input, grad_output, self, indices); + }; + } + LogAndDumpSchema(n); + return nullptr; + }); + +REGISTER_OPERATOR_FUNCTOR( + aten::adaptive_max_pool3d_backward, + aten_adaptive_max_pool3d_backward, + [](Node* n) -> SROperator { + if (n->matches(torch::schema( + "aten::adaptive_max_pool3d_backward(Tensor grad_output, Tensor self, Tensor indices) -> Tensor"))) { + return [](ProcessedNode* p_node) { + const auto& grad_output = p_node->Input(0).toTensor(); + const auto& self = p_node->Input(1).toTensor(); + const auto& indices = p_node->Input(2).toTensor(); + if (p_node->Output(0).isNone()) { + p_node->Output(0) = at::cpu::adaptive_max_pool3d_backward( + grad_output, self, indices); + return; + } + auto& grad_input = p_node->Output(0).toTensor(); + fastResizeToZero(grad_input); + at::cpu::adaptive_max_pool3d_backward_out( + grad_input, grad_output, self, indices); + }; + } + LogAndDumpSchema(n); + return nullptr; + }); + +REGISTER_OPERATOR_FUNCTOR( + aten::sigmoid_backward, + aten_sigmoid_backward, + [](Node* n) -> SROperator { + if (n->matches(torch::schema( + "aten::sigmoid_backward(Tensor grad_output, Tensor output) -> Tensor"))) { + return [](ProcessedNode* p_node) { + const auto& grad_output = p_node->Input(0).toTensor(); + const auto& output = p_node->Input(1).toTensor(); + if (p_node->Output(0).isNone()) { + p_node->Output(0) = at::cpu::sigmoid_backward(grad_output, output); + return; + } + auto& grad_input = p_node->Output(0).toTensor(); + fastResizeToZero(grad_input); + at::cpu::sigmoid_backward_out(grad_input, grad_output, output); + }; + } + LogAndDumpSchema(n); + return nullptr; + }); + +REGISTER_OPERATOR_FUNCTOR( + aten::tanh_backward, + aten_tanh_backward, + [](Node* n) -> SROperator { + if (n->matches(torch::schema( + "aten::tanh_backward(Tensor grad_output, Tensor output) -> Tensor"))) { + return [](ProcessedNode* p_node) { + const auto& grad_output = p_node->Input(0).toTensor(); + const auto& output = p_node->Input(1).toTensor(); + if (p_node->Output(0).isNone()) { + p_node->Output(0) = at::cpu::tanh_backward(grad_output, output); + return; + } + auto& grad_input = p_node->Output(0).toTensor(); + fastResizeToZero(grad_input); + at::cpu::tanh_backward_out(grad_input, grad_output, output); + }; + } + LogAndDumpSchema(n); + return nullptr; + }); + +REGISTER_OPERATOR_FUNCTOR( + aten::isposinf, + aten_isposinf, + [](Node* n) -> SROperator { + if (n->matches(torch::schema("aten::isposinf(Tensor self) -> Tensor"))) { + return [](ProcessedNode* p_node) { + const auto& self = p_node->Input(0).toTensor(); + if (p_node->Output(0).isNone()) { + p_node->Output(0) = at::cpu::isposinf(self); + return; + } + auto& out = p_node->Output(0).toTensor(); + fastResizeToZero(out); + at::cpu::isposinf_out(out, self); + }; + } + LogAndDumpSchema(n); + return nullptr; + }); + +REGISTER_OPERATOR_FUNCTOR( + aten::isneginf, + aten_isneginf, + [](Node* n) -> SROperator { + if (n->matches(torch::schema("aten::isneginf(Tensor self) -> Tensor"))) { + return [](ProcessedNode* p_node) { + const auto& self = p_node->Input(0).toTensor(); + if (p_node->Output(0).isNone()) { + p_node->Output(0) = at::cpu::isneginf(self); + return; + } + auto& out = p_node->Output(0).toTensor(); + fastResizeToZero(out); + at::cpu::isneginf_out(out, self); + }; + } + LogAndDumpSchema(n); + return nullptr; + }); + +REGISTER_OPERATOR_FUNCTOR( + aten::special_entr, + aten_special_entr, + [](Node* n) -> SROperator { + if (n->matches( + torch::schema("aten::special_entr(Tensor self) -> Tensor"))) { + return [](ProcessedNode* p_node) { + const auto& self = p_node->Input(0).toTensor(); + if (p_node->Output(0).isNone()) { + p_node->Output(0) = at::cpu::special_entr(self); + return; + } + auto& out = p_node->Output(0).toTensor(); + fastResizeToZero(out); + at::cpu::special_entr_out(out, self); + }; + } + LogAndDumpSchema(n); + return nullptr; + }); + +REGISTER_OPERATOR_FUNCTOR( + aten::special_ndtri, + aten_special_ndtri, + [](Node* n) -> SROperator { + if (n->matches( + torch::schema("aten::special_ndtri(Tensor self) -> Tensor"))) { + return [](ProcessedNode* p_node) { + const auto& self = p_node->Input(0).toTensor(); + if (p_node->Output(0).isNone()) { + p_node->Output(0) = at::cpu::special_ndtri(self); + return; + } + auto& out = p_node->Output(0).toTensor(); + fastResizeToZero(out); + at::cpu::special_ndtri_out(out, self); + }; + } + LogAndDumpSchema(n); + return nullptr; + }); + +REGISTER_OPERATOR_FUNCTOR( + aten::special_erfcx, + aten_special_erfcx, + [](Node* n) -> SROperator { + if (n->matches( + torch::schema("aten::special_erfcx(Tensor self) -> Tensor"))) { + return [](ProcessedNode* p_node) { + const auto& self = p_node->Input(0).toTensor(); + if (p_node->Output(0).isNone()) { + p_node->Output(0) = at::cpu::special_erfcx(self); + return; + } + auto& out = p_node->Output(0).toTensor(); + fastResizeToZero(out); + at::cpu::special_erfcx_out(out, self); + }; + } + LogAndDumpSchema(n); + return nullptr; + }); + +REGISTER_OPERATOR_FUNCTOR( + aten::special_xlog1py, + aten_special_xlog1py, + [](Node* n) -> SROperator { + if (n->matches(torch::schema( + "aten::special_xlog1py(Tensor self, Tensor other) -> Tensor"))) { + return [](ProcessedNode* p_node) { + const auto& self = p_node->Input(0).toTensor(); + const auto& other = p_node->Input(1).toTensor(); + if (p_node->Output(0).isNone()) { + p_node->Output(0) = at::cpu::special_xlog1py(self, other); + return; + } + auto& out = p_node->Output(0).toTensor(); + fastResizeToZero(out); + at::cpu::special_xlog1py_out(out, self, other); + }; + } + LogAndDumpSchema(n); + return nullptr; + }); + +REGISTER_OPERATOR_FUNCTOR( + aten::special_zeta, + aten_special_zeta, + [](Node* n) -> SROperator { + if (n->matches(torch::schema( + "aten::special_zeta(Tensor self, Tensor other) -> Tensor"))) { + return [](ProcessedNode* p_node) { + const auto& self = p_node->Input(0).toTensor(); + const auto& other = p_node->Input(1).toTensor(); + if (p_node->Output(0).isNone()) { + p_node->Output(0) = at::cpu::special_zeta(self, other); + return; + } + auto& out = p_node->Output(0).toTensor(); + fastResizeToZero(out); + at::cpu::special_zeta_out(out, self, other); + }; + } + LogAndDumpSchema(n); + return nullptr; + }); + +REGISTER_OPERATOR_FUNCTOR( + aten::special_i0e, + aten_special_i0e, + [](Node* n) -> SROperator { + if (n->matches( + torch::schema("aten::special_i0e(Tensor self) -> Tensor"))) { + return [](ProcessedNode* p_node) { + const auto& self = p_node->Input(0).toTensor(); + if (p_node->Output(0).isNone()) { + p_node->Output(0) = at::cpu::special_i0e(self); + return; + } + auto& out = p_node->Output(0).toTensor(); + fastResizeToZero(out); + at::cpu::special_i0e_out(out, self); + }; + } + LogAndDumpSchema(n); + return nullptr; + }); + +REGISTER_OPERATOR_FUNCTOR( + aten::special_i1, + aten_special_i1, + [](Node* n) -> SROperator { + if (n->matches( + torch::schema("aten::special_i1(Tensor self) -> Tensor"))) { + return [](ProcessedNode* p_node) { + const auto& self = p_node->Input(0).toTensor(); + if (p_node->Output(0).isNone()) { + p_node->Output(0) = at::cpu::special_i1(self); + return; + } + auto& out = p_node->Output(0).toTensor(); + fastResizeToZero(out); + at::cpu::special_i1_out(out, self); + }; + } + LogAndDumpSchema(n); + return nullptr; + }); + +REGISTER_OPERATOR_FUNCTOR( + aten::special_i1e, + aten_special_i1e, + [](Node* n) -> SROperator { + if (n->matches( + torch::schema("aten::special_i1e(Tensor self) -> Tensor"))) { + return [](ProcessedNode* p_node) { + const auto& self = p_node->Input(0).toTensor(); + if (p_node->Output(0).isNone()) { + p_node->Output(0) = at::cpu::special_i1e(self); + return; + } + auto& out = p_node->Output(0).toTensor(); + fastResizeToZero(out); + at::cpu::special_i1e_out(out, self); + }; + } + LogAndDumpSchema(n); + return nullptr; + }); + +REGISTER_OPERATOR_FUNCTOR( + aten::linalg_cross, + aten_linalg_cross, + [](Node* n) -> SROperator { + if (n->matches(torch::schema( + "aten::linalg_cross(Tensor self, Tensor other, *, int dim=-1) -> Tensor"))) { + return [](ProcessedNode* p_node) { + const auto& self = p_node->Input(0).toTensor(); + const auto& other = p_node->Input(1).toTensor(); + const auto dim = p_node->Input(2).toInt(); + if (p_node->Output(0).isNone()) { + p_node->Output(0) = at::cpu::linalg_cross(self, other, dim); + return; + } + auto& out = p_node->Output(0).toTensor(); + fastResizeToZero(out); + at::cpu::linalg_cross_out(out, self, other, dim); + }; + } + LogAndDumpSchema(n); + return nullptr; + }); + +} // namespace jit +} // namespace torch