2021-06-23 17:48:55 +00:00
|
|
|
#include <gtest/gtest.h>
|
|
|
|
|
#include <torch/csrc/jit/tensorexpr/eval.h>
|
2022-05-04 19:55:52 +00:00
|
|
|
#include <torch/csrc/jit/tensorexpr/expr.h>
|
2021-06-23 17:48:55 +00:00
|
|
|
#include <torch/csrc/jit/tensorexpr/loopnest.h>
|
|
|
|
|
#include <torch/csrc/jit/tensorexpr/operators/operators.h>
|
|
|
|
|
#include <torch/torch.h>
|
|
|
|
|
|
|
|
|
|
using namespace torch::jit::tensorexpr;
|
|
|
|
|
|
2021-08-24 07:29:22 +00:00
|
|
|
using Tensors = std::vector<Tensor>;
|
2021-06-23 17:48:55 +00:00
|
|
|
using Args = std::vector<CodeGen::BufferArg>;
|
|
|
|
|
std::unique_ptr<SimpleIREvaluator> compile(
|
|
|
|
|
const Args& inputs,
|
|
|
|
|
const Tensors& outputs) {
|
|
|
|
|
LoopNest nest({outputs});
|
|
|
|
|
nest.prepareForCodegen();
|
|
|
|
|
nest.simplify();
|
|
|
|
|
auto join = inputs;
|
|
|
|
|
join.insert(join.end(), outputs.begin(), outputs.end());
|
|
|
|
|
return std::make_unique<SimpleIREvaluator>(nest.root_stmt(), join);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
TEST(Ops, Sum) {
|
2021-10-01 05:51:23 +00:00
|
|
|
constexpr int M = 8;
|
|
|
|
|
constexpr int N = 16;
|
2021-06-23 17:48:55 +00:00
|
|
|
std::vector<IntList> testDims = {{0}, {1}, {0, 1}};
|
2021-10-01 05:51:23 +00:00
|
|
|
std::vector<std::vector<ExprHandle>> outputShapes = {{N}, {M}, {}};
|
2022-04-02 02:29:33 +00:00
|
|
|
for (unsigned idx = 0; idx < testDims.size(); idx++) {
|
2021-10-01 05:51:23 +00:00
|
|
|
const auto& dims = testDims[idx];
|
|
|
|
|
const auto& outShape = outputShapes[idx];
|
2021-06-23 17:48:55 +00:00
|
|
|
|
2021-09-14 07:19:57 +00:00
|
|
|
BufHandle a("a", {M, N}, kFloat);
|
2022-05-04 19:55:52 +00:00
|
|
|
std::vector<ExprHandle> outStrides =
|
|
|
|
|
c10::fmap<ExprHandle>(make_contiguous_strides(outShape));
|
|
|
|
|
Tensor b = computeSum(
|
|
|
|
|
{a, dims, false}, outShape, outStrides, c10::kFloat, at::kCPU);
|
2021-06-23 17:48:55 +00:00
|
|
|
auto cg = compile({a}, {b});
|
|
|
|
|
|
|
|
|
|
auto at = at::arange(M * N, at::kFloat).view({M, N});
|
|
|
|
|
auto ref = at::sum(at, dims);
|
|
|
|
|
auto bt = at::empty_like(ref);
|
|
|
|
|
|
|
|
|
|
cg->call({at.data_ptr<float>(), bt.data_ptr<float>()});
|
|
|
|
|
|
|
|
|
|
ASSERT_TRUE(at::allclose(bt, ref));
|
|
|
|
|
}
|
|
|
|
|
}
|
2022-05-04 19:55:52 +00:00
|
|
|
|
|
|
|
|
TEST(Ops, ChannelsLastSum) {
|
|
|
|
|
constexpr int A = 2;
|
|
|
|
|
constexpr int B = 3;
|
|
|
|
|
constexpr int C = 4;
|
|
|
|
|
constexpr int D = 5;
|
|
|
|
|
constexpr int E = 6;
|
|
|
|
|
std::vector<IntList> testDims = {{0}, {1}, {0, 1}};
|
|
|
|
|
|
|
|
|
|
std::vector<std::vector<ExprHandle>> outputShapes = {
|
|
|
|
|
{B, C, D, E}, {A, C, D, E}, {C, D, E}};
|
|
|
|
|
for (unsigned idx = 0; idx < testDims.size(); idx++) {
|
|
|
|
|
const auto& dims = testDims[idx];
|
|
|
|
|
const auto& outShape = outputShapes[idx];
|
|
|
|
|
|
|
|
|
|
BufHandle a("a", {A, B, C, D, E}, kFloat);
|
|
|
|
|
std::vector<ExprHandle> outStrides =
|
|
|
|
|
c10::fmap<ExprHandle>(make_channels_last_strides(outShape));
|
|
|
|
|
Tensor b = computeSum(
|
|
|
|
|
{a, dims, false}, outShape, outStrides, c10::kFloat, at::kCPU);
|
|
|
|
|
auto cg = compile({a}, {b});
|
|
|
|
|
|
|
|
|
|
auto at = at::arange(A * B * C * D * E, at::kFloat).view({A, B, C, D, E});
|
|
|
|
|
auto ref = at::sum(at, dims);
|
|
|
|
|
auto bt = at::empty_like(ref);
|
|
|
|
|
|
|
|
|
|
cg->call({at.data_ptr<float>(), bt.data_ptr<float>()});
|
|
|
|
|
|
|
|
|
|
ASSERT_TRUE(at::allclose(bt, ref));
|
|
|
|
|
}
|
|
|
|
|
}
|