[AOTI] Add more ABI-compatiblity unit test (#123900)

Summary: Follow https://github.com/pytorch/pytorch/pull/123848, and test more c10 util functions.

Pull Request resolved: https://github.com/pytorch/pytorch/pull/123900
Approved by: https://github.com/chenyang78
This commit is contained in:
Bin Bao 2024-04-22 12:07:02 -07:00 committed by PyTorch MergeBot
parent e558008a05
commit b2fd224f27
6 changed files with 172 additions and 1 deletions

View file

@ -3,7 +3,11 @@ set(AOTI_ABI_CHECK_TEST_ROOT ${TORCH_ROOT}/test/cpp/aoti_abi_check)
# Build the cpp gtest binary containing the cpp-only tests.
set(AOTI_ABI_CHECK_TEST_SRCS
${AOTI_ABI_CHECK_TEST_ROOT}/main.cpp
${AOTI_ABI_CHECK_TEST_ROOT}/test_cast.cpp
${AOTI_ABI_CHECK_TEST_ROOT}/test_dtype.cpp
${AOTI_ABI_CHECK_TEST_ROOT}/test_math.cpp
${AOTI_ABI_CHECK_TEST_ROOT}/test_rand.cpp
${AOTI_ABI_CHECK_TEST_ROOT}/test_vec.cpp
)
add_executable(test_aoti_abi_check

View file

@ -0,0 +1,25 @@
#include <gtest/gtest.h>
#include <c10/util/TypeCast.h>
#include <c10/util/bit_cast.h>
namespace torch {
namespace aot_inductor {
TEST(TestCast, TestConvert) {
c10::BFloat16 a = 3.0f;
c10::Half b = 3.0f;
EXPECT_EQ(c10::convert<c10::Half>(a), b);
EXPECT_EQ(a, c10::convert<c10::BFloat16>(b));
}
TEST(TestCast, TestBitcast) {
c10::BFloat16 a = 3.0f;
c10::Half b = 3.0f;
EXPECT_EQ(c10::bit_cast<c10::BFloat16>(c10::bit_cast<c10::Half>(a)), a);
EXPECT_EQ(c10::bit_cast<c10::Half>(c10::bit_cast<c10::BFloat16>(b)), b);
}
} // namespace aot_inductor
} // namespace torch

View file

@ -0,0 +1,23 @@
#include <gtest/gtest.h>
#include <ATen/NumericUtils.h>
#include <c10/util/generic_math.h>
#include <cmath>
namespace torch {
namespace aot_inductor {
TEST(TestMath, TestDivFloor) {
EXPECT_EQ(c10::div_floor_floating(5., 0.), INFINITY);
EXPECT_DOUBLE_EQ(c10::div_floor_floating(5., 2.), 2.);
EXPECT_DOUBLE_EQ(c10::div_floor_floating(5., -2.), -3.);
EXPECT_EQ(c10::div_floor_integer(5, 2), 2);
EXPECT_EQ(c10::div_floor_integer(5, -2), -3);
}
TEST(TestMath, TestNan) {
EXPECT_FALSE(at::_isnan(1.0));
EXPECT_TRUE(at::_isnan(std::nan("")));
}
} // namespace aot_inductor
} // namespace torch

View file

@ -0,0 +1,39 @@
#include <gtest/gtest.h>
#include <ATen/core/PhiloxRNGEngine.h>
#include <cstdint>
#include <iostream>
namespace torch {
namespace aot_inductor {
int64_t randint64_cpu(
uint32_t seed,
uint32_t offset,
int64_t low,
int64_t high) {
auto gen = at::Philox4_32(seed, 0, offset);
uint64_t r0 = gen();
uint64_t r1 = gen();
uint64_t result = r0 | (r1 << 32);
return static_cast<int64_t>(result % (high - low)) + low;
}
TEST(TestRand, TestRandn) {
at::Philox4_32 engine_1(1, 0, 0);
float a = engine_1.randn(10);
at::Philox4_32 engine_2(1, 0, 0);
float b = engine_2.randn(10);
EXPECT_EQ(a, b);
}
TEST(TestRand, TestRandint64) {
int64_t a = randint64_cpu(0xffffffff, 100, 0, INT64_MAX);
int64_t b = randint64_cpu(0xffffffff, 100, 0, INT64_MAX);
EXPECT_EQ(a, b);
}
} // namespace aot_inductor
} // namespace torch

View file

@ -0,0 +1,81 @@
#include <gtest/gtest.h>
#include <ATen/cpu/vec/vec.h>
#include <iostream>
namespace torch {
namespace aot_inductor {
TEST(TestVec, TestAdd) {
using Vec = at::vec::Vectorized<int>;
std::vector<int> a(1024, 1);
std::vector<int> b(1024, 2);
Vec a_vec = Vec::loadu(a.data());
Vec b_vec = Vec::loadu(b.data());
Vec actual_vec = a_vec + b_vec;
std::vector<int> expected(1024, 3);
Vec expected_vec = Vec::loadu(expected.data());
for (int i = 0; i < Vec::size(); i++) {
EXPECT_EQ(expected_vec[i], actual_vec[i]);
}
}
TEST(TestVec, TestMax) {
using Vec = at::vec::Vectorized<int>;
std::vector<int> a(1024, -1);
std::vector<int> b(1024, 2);
Vec a_vec = Vec::loadu(a.data());
Vec b_vec = Vec::loadu(b.data());
Vec actual_vec = at::vec::maximum(a_vec, b_vec);
Vec expected_vec = b_vec;
for (int i = 0; i < Vec::size(); i++) {
EXPECT_EQ(expected_vec[i], actual_vec[i]);
}
}
TEST(TestVec, TestMin) {
using Vec = at::vec::Vectorized<int>;
std::vector<int> a(1024, -1);
std::vector<int> b(1024, 2);
Vec a_vec = Vec::loadu(a.data());
Vec b_vec = Vec::loadu(b.data());
Vec actual_vec = at::vec::minimum(a_vec, b_vec);
Vec expected_vec = a_vec;
for (int i = 0; i < Vec::size(); i++) {
EXPECT_EQ(expected_vec[i], actual_vec[i]);
}
}
TEST(TestVec, TestConvert) {
std::vector<int> a(1024, -1);
std::vector<float> b(1024, -1.0);
at::vec::Vectorized<int> a_vec = at::vec::Vectorized<int>::loadu(a.data());
at::vec::Vectorized<float> b_vec =
at::vec::Vectorized<float>::loadu(b.data());
auto actual_vec = at::vec::convert<float>(a_vec);
auto expected_vec = b_vec;
for (int i = 0; i < at::vec::Vectorized<int>::size(); i++) {
EXPECT_EQ(expected_vec[i], actual_vec[i]);
}
}
TEST(TestVec, TestClampMin) {
using Vec = at::vec::Vectorized<float>;
std::vector<float> a(1024, -2.0);
std::vector<float> min(1024, -1.0);
Vec a_vec = Vec::loadu(a.data());
Vec min_vec = Vec::loadu(min.data());
Vec actual_vec = at::vec::clamp_min(a_vec, min_vec);
Vec expected_vec = min_vec;
for (int i = 0; i < Vec::size(); i++) {
EXPECT_EQ(expected_vec[i], actual_vec[i]);
}
}
} // namespace aot_inductor
} // namespace torch

View file

@ -15,7 +15,6 @@
#include <ATen/NumericUtils.h>
#include <ATen/core/PhiloxRNGEngine.h>
#include <ATen/native/Math.h>
#include <c10/util/Float8_e4m3fn.h>
#include <c10/util/Float8_e5m2.h>