Support double for operator ReduceMean, ReduceLogSumExp (#6217)

* Support double for operators ReduceMean, ReduceLogSumExp
This commit is contained in:
Xavier Dupré 2020-12-31 11:24:54 +01:00 committed by GitHub
parent 5968a91ea6
commit 84addcd2cf
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
4 changed files with 265 additions and 0 deletions

View file

@ -157,12 +157,15 @@ class ONNX_OPERATOR_VERSIONED_TYPED_KERNEL_CLASS_NAME(kCpuExecutionProvider, kOn
ReduceLogSum);
class ONNX_OPERATOR_VERSIONED_TYPED_KERNEL_CLASS_NAME(kCpuExecutionProvider, kOnnxDomain, 1, 10, float,
ReduceLogSumExp);
class ONNX_OPERATOR_VERSIONED_TYPED_KERNEL_CLASS_NAME(kCpuExecutionProvider, kOnnxDomain, 1, 10, double,
ReduceLogSumExp);
class ONNX_OPERATOR_VERSIONED_TYPED_KERNEL_CLASS_NAME(kCpuExecutionProvider, kOnnxDomain, 1, 10, int32_t,
ReduceLogSumExp);
class ONNX_OPERATOR_VERSIONED_TYPED_KERNEL_CLASS_NAME(kCpuExecutionProvider, kOnnxDomain, 1, 10, float, ReduceMax);
class ONNX_OPERATOR_VERSIONED_TYPED_KERNEL_CLASS_NAME(kCpuExecutionProvider, kOnnxDomain, 1, 10, int32_t, ReduceMax);
class ONNX_OPERATOR_VERSIONED_TYPED_KERNEL_CLASS_NAME(kCpuExecutionProvider, kOnnxDomain, 1, 10, int64_t, ReduceMax);
class ONNX_OPERATOR_VERSIONED_TYPED_KERNEL_CLASS_NAME(kCpuExecutionProvider, kOnnxDomain, 1, 10, float, ReduceMean);
class ONNX_OPERATOR_VERSIONED_TYPED_KERNEL_CLASS_NAME(kCpuExecutionProvider, kOnnxDomain, 1, 10, double, ReduceMean);
class ONNX_OPERATOR_VERSIONED_TYPED_KERNEL_CLASS_NAME(kCpuExecutionProvider, kOnnxDomain, 1, 10, int32_t, ReduceMean);
class ONNX_OPERATOR_VERSIONED_TYPED_KERNEL_CLASS_NAME(kCpuExecutionProvider, kOnnxDomain, 1, 10, float, ReduceMin);
class ONNX_OPERATOR_VERSIONED_TYPED_KERNEL_CLASS_NAME(kCpuExecutionProvider, kOnnxDomain, 1, 10, int32_t, ReduceMin);
@ -342,11 +345,13 @@ class ONNX_OPERATOR_VERSIONED_TYPED_KERNEL_CLASS_NAME(kCpuExecutionProvider, kOn
class ONNX_OPERATOR_VERSIONED_TYPED_KERNEL_CLASS_NAME(kCpuExecutionProvider, kOnnxDomain, 11, 12, float, ReduceLogSum);
class ONNX_OPERATOR_VERSIONED_TYPED_KERNEL_CLASS_NAME(kCpuExecutionProvider, kOnnxDomain, 11, 12, int32_t, ReduceLogSum);
class ONNX_OPERATOR_VERSIONED_TYPED_KERNEL_CLASS_NAME(kCpuExecutionProvider, kOnnxDomain, 11, 12, float, ReduceLogSumExp);
class ONNX_OPERATOR_VERSIONED_TYPED_KERNEL_CLASS_NAME(kCpuExecutionProvider, kOnnxDomain, 11, 12, double, ReduceLogSumExp);
class ONNX_OPERATOR_VERSIONED_TYPED_KERNEL_CLASS_NAME(kCpuExecutionProvider, kOnnxDomain, 11, 12, int32_t, ReduceLogSumExp);
class ONNX_OPERATOR_VERSIONED_TYPED_KERNEL_CLASS_NAME(kCpuExecutionProvider, kOnnxDomain, 11, 11, float, ReduceMax);
class ONNX_OPERATOR_VERSIONED_TYPED_KERNEL_CLASS_NAME(kCpuExecutionProvider, kOnnxDomain, 11, 11, int32_t, ReduceMax);
class ONNX_OPERATOR_VERSIONED_TYPED_KERNEL_CLASS_NAME(kCpuExecutionProvider, kOnnxDomain, 11, 11, int64_t, ReduceMax);
class ONNX_OPERATOR_VERSIONED_TYPED_KERNEL_CLASS_NAME(kCpuExecutionProvider, kOnnxDomain, 11, 12, float, ReduceMean);
class ONNX_OPERATOR_VERSIONED_TYPED_KERNEL_CLASS_NAME(kCpuExecutionProvider, kOnnxDomain, 11, 12, double, ReduceMean);
class ONNX_OPERATOR_VERSIONED_TYPED_KERNEL_CLASS_NAME(kCpuExecutionProvider, kOnnxDomain, 11, 12, int32_t, ReduceMean);
class ONNX_OPERATOR_VERSIONED_TYPED_KERNEL_CLASS_NAME(kCpuExecutionProvider, kOnnxDomain, 11, 11, float, ReduceMin);
class ONNX_OPERATOR_VERSIONED_TYPED_KERNEL_CLASS_NAME(kCpuExecutionProvider, kOnnxDomain, 11, 11, int32_t, ReduceMin);
@ -596,6 +601,7 @@ class ONNX_OPERATOR_TYPED_KERNEL_CLASS_NAME(kCpuExecutionProvider, kOnnxDomain,
class ONNX_OPERATOR_TYPED_KERNEL_CLASS_NAME(kCpuExecutionProvider, kOnnxDomain, 13, float, ReduceLogSum);
class ONNX_OPERATOR_TYPED_KERNEL_CLASS_NAME(kCpuExecutionProvider, kOnnxDomain, 13, int32_t, ReduceLogSum);
class ONNX_OPERATOR_TYPED_KERNEL_CLASS_NAME(kCpuExecutionProvider, kOnnxDomain, 13, float, ReduceLogSumExp);
class ONNX_OPERATOR_TYPED_KERNEL_CLASS_NAME(kCpuExecutionProvider, kOnnxDomain, 13, double, ReduceLogSumExp);
class ONNX_OPERATOR_TYPED_KERNEL_CLASS_NAME(kCpuExecutionProvider, kOnnxDomain, 13, int32_t, ReduceLogSumExp);
class ONNX_OPERATOR_TYPED_KERNEL_CLASS_NAME(kCpuExecutionProvider, kOnnxDomain, 13, float, ReduceMax);
class ONNX_OPERATOR_TYPED_KERNEL_CLASS_NAME(kCpuExecutionProvider, kOnnxDomain, 13, int32_t, ReduceMax);
@ -603,6 +609,7 @@ class ONNX_OPERATOR_TYPED_KERNEL_CLASS_NAME(kCpuExecutionProvider, kOnnxDomain,
class ONNX_OPERATOR_TYPED_KERNEL_CLASS_NAME(kCpuExecutionProvider, kOnnxDomain, 13, int8_t, ReduceMax);
class ONNX_OPERATOR_TYPED_KERNEL_CLASS_NAME(kCpuExecutionProvider, kOnnxDomain, 13, uint8_t, ReduceMax);
class ONNX_OPERATOR_TYPED_KERNEL_CLASS_NAME(kCpuExecutionProvider, kOnnxDomain, 13, float, ReduceMean);
class ONNX_OPERATOR_TYPED_KERNEL_CLASS_NAME(kCpuExecutionProvider, kOnnxDomain, 13, double, ReduceMean);
class ONNX_OPERATOR_TYPED_KERNEL_CLASS_NAME(kCpuExecutionProvider, kOnnxDomain, 13, int32_t, ReduceMean);
class ONNX_OPERATOR_TYPED_KERNEL_CLASS_NAME(kCpuExecutionProvider, kOnnxDomain, 13, float, ReduceMin);
class ONNX_OPERATOR_TYPED_KERNEL_CLASS_NAME(kCpuExecutionProvider, kOnnxDomain, 13, int32_t, ReduceMin);
@ -864,6 +871,8 @@ Status RegisterOnnxOperatorKernels(KernelRegistry& kernel_registry) {
int32_t, ReduceLogSum)>,
BuildKernelCreateInfo<ONNX_OPERATOR_VERSIONED_TYPED_KERNEL_CLASS_NAME(kCpuExecutionProvider, kOnnxDomain, 1, 10,
float, ReduceLogSumExp)>,
BuildKernelCreateInfo<ONNX_OPERATOR_VERSIONED_TYPED_KERNEL_CLASS_NAME(kCpuExecutionProvider, kOnnxDomain, 1, 10,
double, ReduceLogSumExp)>,
BuildKernelCreateInfo<ONNX_OPERATOR_VERSIONED_TYPED_KERNEL_CLASS_NAME(kCpuExecutionProvider, kOnnxDomain, 1, 10,
int32_t, ReduceLogSumExp)>,
BuildKernelCreateInfo<ONNX_OPERATOR_VERSIONED_TYPED_KERNEL_CLASS_NAME(kCpuExecutionProvider, kOnnxDomain, 1, 10,
@ -874,6 +883,8 @@ Status RegisterOnnxOperatorKernels(KernelRegistry& kernel_registry) {
int64_t, ReduceMax)>,
BuildKernelCreateInfo<ONNX_OPERATOR_VERSIONED_TYPED_KERNEL_CLASS_NAME(kCpuExecutionProvider, kOnnxDomain, 1, 10,
float, ReduceMean)>,
BuildKernelCreateInfo<ONNX_OPERATOR_VERSIONED_TYPED_KERNEL_CLASS_NAME(kCpuExecutionProvider, kOnnxDomain, 1, 10,
double, ReduceMean)>,
BuildKernelCreateInfo<ONNX_OPERATOR_VERSIONED_TYPED_KERNEL_CLASS_NAME(kCpuExecutionProvider, kOnnxDomain, 1, 10,
int32_t, ReduceMean)>,
BuildKernelCreateInfo<ONNX_OPERATOR_VERSIONED_TYPED_KERNEL_CLASS_NAME(kCpuExecutionProvider, kOnnxDomain, 1, 10,
@ -1289,10 +1300,14 @@ Status RegisterOnnxOperatorKernels(KernelRegistry& kernel_registry) {
int32_t, ReduceLogSum)>,
BuildKernelCreateInfo<ONNX_OPERATOR_VERSIONED_TYPED_KERNEL_CLASS_NAME(kCpuExecutionProvider, kOnnxDomain, 11, 12,
float, ReduceLogSumExp)>,
BuildKernelCreateInfo<ONNX_OPERATOR_VERSIONED_TYPED_KERNEL_CLASS_NAME(kCpuExecutionProvider, kOnnxDomain, 11, 12,
double, ReduceLogSumExp)>,
BuildKernelCreateInfo<ONNX_OPERATOR_VERSIONED_TYPED_KERNEL_CLASS_NAME(kCpuExecutionProvider, kOnnxDomain, 11, 12,
int32_t, ReduceLogSumExp)>,
BuildKernelCreateInfo<ONNX_OPERATOR_VERSIONED_TYPED_KERNEL_CLASS_NAME(kCpuExecutionProvider, kOnnxDomain, 11, 12,
float, ReduceMean)>,
BuildKernelCreateInfo<ONNX_OPERATOR_VERSIONED_TYPED_KERNEL_CLASS_NAME(kCpuExecutionProvider, kOnnxDomain, 11, 12,
double, ReduceMean)>,
BuildKernelCreateInfo<ONNX_OPERATOR_VERSIONED_TYPED_KERNEL_CLASS_NAME(kCpuExecutionProvider, kOnnxDomain, 11, 12,
int32_t, ReduceMean)>,
BuildKernelCreateInfo<ONNX_OPERATOR_VERSIONED_TYPED_KERNEL_CLASS_NAME(kCpuExecutionProvider, kOnnxDomain, 11, 12,
@ -1579,6 +1594,8 @@ Status RegisterOnnxOperatorKernels(KernelRegistry& kernel_registry) {
ReduceLogSum)>,
BuildKernelCreateInfo<ONNX_OPERATOR_TYPED_KERNEL_CLASS_NAME(kCpuExecutionProvider, kOnnxDomain, 13, float,
ReduceLogSumExp)>,
BuildKernelCreateInfo<ONNX_OPERATOR_TYPED_KERNEL_CLASS_NAME(kCpuExecutionProvider, kOnnxDomain, 13, double,
ReduceLogSumExp)>,
BuildKernelCreateInfo<ONNX_OPERATOR_TYPED_KERNEL_CLASS_NAME(kCpuExecutionProvider, kOnnxDomain, 13, int32_t,
ReduceLogSumExp)>,
BuildKernelCreateInfo<ONNX_OPERATOR_TYPED_KERNEL_CLASS_NAME(kCpuExecutionProvider, kOnnxDomain, 13, float,
@ -1593,6 +1610,8 @@ Status RegisterOnnxOperatorKernels(KernelRegistry& kernel_registry) {
ReduceMax)>,
BuildKernelCreateInfo<ONNX_OPERATOR_TYPED_KERNEL_CLASS_NAME(kCpuExecutionProvider, kOnnxDomain, 13, float,
ReduceMean)>,
BuildKernelCreateInfo<ONNX_OPERATOR_TYPED_KERNEL_CLASS_NAME(kCpuExecutionProvider, kOnnxDomain, 13, double,
ReduceMean)>,
BuildKernelCreateInfo<ONNX_OPERATOR_TYPED_KERNEL_CLASS_NAME(kCpuExecutionProvider, kOnnxDomain, 13, int32_t,
ReduceMean)>,
BuildKernelCreateInfo<ONNX_OPERATOR_TYPED_KERNEL_CLASS_NAME(kCpuExecutionProvider, kOnnxDomain, 13, float,

View file

@ -122,6 +122,9 @@ REGISTER_UNARY_ELEMENTWISE_KERNEL(ReduceLogSum, 13);
REGISTER_UNARY_ELEMENTWISE_VERSIONED_KERNEL(ReduceLogSumExp, 1, 10);
REGISTER_UNARY_ELEMENTWISE_VERSIONED_KERNEL(ReduceLogSumExp, 11, 12);
REGISTER_UNARY_ELEMENTWISE_KERNEL(ReduceLogSumExp, 13);
REGISTER_UNARY_ELEMENTWISE_VERSIONED_KERNEL_DOUBLE_ONLY(ReduceLogSumExp, 1, 10);
REGISTER_UNARY_ELEMENTWISE_VERSIONED_KERNEL_DOUBLE_ONLY(ReduceLogSumExp, 11, 12);
REGISTER_UNARY_ELEMENTWISE_KERNEL_DOUBLE_ONLY(ReduceLogSumExp, 13);
REGISTER_UNARY_ELEMENTWISE_VERSIONED_KERNEL(ReduceMax, 1, 10);
REGISTER_UNARY_ELEMENTWISE_VERSIONED_KERNEL_INT64_ONLY(ReduceMax, 1, 10);
@ -141,6 +144,9 @@ REGISTER_UNARY_ELEMENTWISE_KERNEL_UINT8_ONLY(ReduceMax, 13);
REGISTER_UNARY_ELEMENTWISE_VERSIONED_KERNEL(ReduceMean, 1, 10);
REGISTER_UNARY_ELEMENTWISE_VERSIONED_KERNEL(ReduceMean, 11, 12);
REGISTER_UNARY_ELEMENTWISE_KERNEL(ReduceMean, 13);
REGISTER_UNARY_ELEMENTWISE_VERSIONED_KERNEL_DOUBLE_ONLY(ReduceMean, 1, 10);
REGISTER_UNARY_ELEMENTWISE_VERSIONED_KERNEL_DOUBLE_ONLY(ReduceMean, 11, 12);
REGISTER_UNARY_ELEMENTWISE_KERNEL_DOUBLE_ONLY(ReduceMean, 13);
REGISTER_UNARY_ELEMENTWISE_VERSIONED_KERNEL(ReduceMin, 1, 10);
REGISTER_UNARY_ELEMENTWISE_VERSIONED_KERNEL_INT64_ONLY(ReduceMin, 1, 10);

View file

@ -14,6 +14,7 @@ namespace test {
const float FLOAT_INF = std::numeric_limits<float>::infinity();
const float FLOAT_NINF = -std::numeric_limits<float>::infinity();
const double DOUBLE_NINF = -std::numeric_limits<double>::infinity();
// Disable TensorRT on some of the tests because the limit in its parser: axis >=0 && axis < nbDims
template <typename OutT>
@ -384,6 +385,22 @@ TEST(ReductionOpTest, ReduceLogSumExp_default_axes_keepdims) {
test.Run();
}
TEST(ReductionOpTest, ReduceLogSumExp_default_axes_keepdims_double) {
OpTester test("ReduceLogSumExp");
test.AddAttribute("keepdims", (int64_t)1);
test.AddInput<double>("data", {3, 2, 2},
{5.0, 1.0,
20.0, 2.0,
30.0, 1.0,
40.0, 2.0,
55.0, 1.0,
60.0, 2.0});
test.AddOutput<double>("reduced", {1, 1, 1}, {60.00671387});
test.Run();
}
TEST(ReductionOpTest, ReduceLogSumExp_default_axes_do_not_keep_dims) {
OpTester test("ReduceLogSumExp");
test.AddAttribute("keepdims", static_cast<int64_t>(0));
@ -400,6 +417,22 @@ TEST(ReductionOpTest, ReduceLogSumExp_default_axes_do_not_keep_dims) {
test.Run(OpTester::ExpectResult::kExpectSuccess, "", {kTensorrtExecutionProvider}); //TensorRT: full reduce without keepDimensions is not supported with explicit batch
}
TEST(ReductionOpTest, ReduceLogSumExp_default_axes_do_not_keep_dims_double) {
OpTester test("ReduceLogSumExp");
test.AddAttribute("keepdims", static_cast<int64_t>(0));
test.AddInput<double>("data", {3, 2, 2},
{5.0, 1.0,
20.0, 2.0,
30.0, 1.0,
40.0, 2.0,
55.0, 1.0,
60.0, 2.0});
test.AddOutput<double>("reduced", {}, {60.00671387});
test.Run(OpTester::ExpectResult::kExpectSuccess, "", {kTensorrtExecutionProvider}); //TensorRT: full reduce without keepDimensions is not supported with explicit batch
}
TEST(ReductionOpTest, ReduceLogSumExp_do_not_keepdims) {
OpTester test("ReduceLogSumExp");
test.AddAttribute("axes", std::vector<int64_t>{1});
@ -417,6 +450,23 @@ TEST(ReductionOpTest, ReduceLogSumExp_do_not_keepdims) {
test.Run(OpTester::ExpectResult::kExpectSuccess, "", {kTensorrtExecutionProvider}); //TensorRT: full reduce without keepDimensions is not supported with explicit batch
}
TEST(ReductionOpTest, ReduceLogSumExp_do_not_keepdims_double) {
OpTester test("ReduceLogSumExp");
test.AddAttribute("axes", std::vector<int64_t>{1});
test.AddAttribute("keepdims", (int64_t)0);
test.AddInput<double>("data", {3, 2, 2},
{5.0, 1.0,
20.0, 2.0,
30.0, 1.0,
40.0, 2.0,
55.0, 1.0,
60.0, 2.0});
test.AddOutput<double>("reduced", {3, 2}, {20.0, 2.31326175, 40.00004578, 2.31326175, 60.00671387, 2.31326175});
test.Run(OpTester::ExpectResult::kExpectSuccess, "", {kTensorrtExecutionProvider}); //TensorRT: full reduce without keepDimensions is not supported with explicit batch
}
TEST(ReductionOpTest, ReduceLogSumExp_do_not_keepdims_2) {
OpTester test("ReduceLogSumExp");
test.AddAttribute("axes", std::vector<int64_t>{0});
@ -427,6 +477,16 @@ TEST(ReductionOpTest, ReduceLogSumExp_do_not_keepdims_2) {
test.Run(OpTester::ExpectResult::kExpectSuccess, "", {kTensorrtExecutionProvider}); //TensorRT: full reduce without keepDimensions is not supported with explicit batch
}
TEST(ReductionOpTest, ReduceLogSumExp_do_not_keepdims_2_double) {
OpTester test("ReduceLogSumExp");
test.AddAttribute("axes", std::vector<int64_t>{0});
test.AddAttribute("keepdims", (int64_t)0);
test.AddInput<double>("data", {3},
{1.0, 2.0, 3.0});
test.AddOutput<double>("reduced", {}, {3.40760596});
test.Run(OpTester::ExpectResult::kExpectSuccess, "", {kTensorrtExecutionProvider}); //TensorRT: full reduce without keepDimensions is not supported with explicit batch
}
TEST(ReductionOpTest, ReduceLogSumExp_keepdims) {
OpTester test("ReduceLogSumExp");
test.AddAttribute("axes", std::vector<int64_t>{1});
@ -444,6 +504,23 @@ TEST(ReductionOpTest, ReduceLogSumExp_keepdims) {
test.Run();
}
TEST(ReductionOpTest, ReduceLogSumExp_keepdims_double) {
OpTester test("ReduceLogSumExp");
test.AddAttribute("axes", std::vector<int64_t>{1});
test.AddAttribute("keepdims", (int64_t)1);
test.AddInput<double>("data", {3, 2, 2},
{5.0, 1.0,
20.0, 2.0,
30.0, 1.0,
40.0, 2.0,
55.0, 1.0,
60.0, 2.0});
test.AddOutput<double>("reduced", {3, 1, 2}, {20.0, 2.31326175, 40.00004578, 2.31326175, 60.00671387, 2.31326175});
test.Run();
}
TEST(ReductionOpTest, ReduceLogSumExp) {
OpTester test("ReduceLogSumExp");
test.AddAttribute("axes", std::vector<int64_t>{0, 2});
@ -461,6 +538,23 @@ TEST(ReductionOpTest, ReduceLogSumExp) {
test.Run();
}
TEST(ReductionOpTest, ReduceLogSumExp_double) {
OpTester test("ReduceLogSumExp");
test.AddAttribute("axes", std::vector<int64_t>{0, 2});
test.AddAttribute("keepdims", (int64_t)1);
test.AddInput<double>("data", {3, 2, 2},
{1.0, 2.0,
3.0, 4.0,
5.0, 6.0,
7.0, 8.0,
9.0, 10.0,
11.0, 12.0});
test.AddOutput<double>("reduced", {1, 2, 1}, {10.33174133, 12.33174133});
test.Run();
}
TEST(ReductionOpTest, ReduceLogSumExp_int32) {
OpTester test("ReduceLogSumExp");
test.AddAttribute("axes", std::vector<int64_t>{0, 2});
@ -485,6 +579,13 @@ TEST(ReductionOpTest, ReduceLogSumExp0DTensor) {
test.AddOutput<float>("reduced", {}, {2});
test.Run();
}
TEST(ReductionOpTest, ReduceLogSumExp0DTensor_double) {
OpTester test("ReduceLogSumExp");
test.AddInput<double>("data", {}, {2});
test.AddOutput<double>("reduced", {}, {2});
test.Run();
}
#endif // !(defined USE_TENSORRT) && !(defined USE_TVM)
TEST(ReductionOpTest, ReduceMax_default_axes_keepdims) {
@ -690,6 +791,22 @@ TEST(ReductionOpTest, ReduceMean_default_axes_keepdims) {
test.Run();
}
TEST(ReductionOpTest, ReduceMean_default_axes_keepdims_double) {
OpTester test("ReduceMean");
test.AddAttribute("keepdims", (int64_t)1);
test.AddInput<double>("data", {3, 2, 2},
{5.0, 1.0,
20.0, 2.0,
30.0, 1.0,
40.0, 2.0,
55.0, 1.0,
60.0, 2.0});
test.AddOutput<double>("reduced", {1, 1, 1}, {18.25});
test.Run();
}
TEST(ReductionOpTest, ReduceMean_default_axes_do_not_keep_dims) {
OpTester test("ReduceMean");
test.AddAttribute("keepdims", static_cast<int64_t>(0));
@ -706,6 +823,22 @@ TEST(ReductionOpTest, ReduceMean_default_axes_do_not_keep_dims) {
test.Run(OpTester::ExpectResult::kExpectSuccess, "", {kTensorrtExecutionProvider}); //TensorRT: full reduce without keepDimensions is not supported with explicit batch
}
TEST(ReductionOpTest, ReduceMean_default_axes_do_not_keep_dims_double) {
OpTester test("ReduceMean");
test.AddAttribute("keepdims", static_cast<int64_t>(0));
test.AddInput<double>("data", {3, 2, 2},
{5.0, 1.0,
20.0, 2.0,
30.0, 1.0,
40.0, 2.0,
55.0, 1.0,
60.0, 2.0});
test.AddOutput<double>("reduced", {}, {18.25});
test.Run(OpTester::ExpectResult::kExpectSuccess, "", {kTensorrtExecutionProvider}); //TensorRT: full reduce without keepDimensions is not supported with explicit batch
}
TEST(ReductionOpTest, ReduceMean_do_not_keepdims) {
OpTester test("ReduceMean");
test.AddAttribute("axes", std::vector<int64_t>{1});
@ -731,6 +864,31 @@ TEST(ReductionOpTest, ReduceMean_do_not_keepdims) {
test.Run();
}
TEST(ReductionOpTest, ReduceMean_do_not_keepdims_double) {
OpTester test("ReduceMean");
test.AddAttribute("axes", std::vector<int64_t>{1});
test.AddAttribute("keepdims", (int64_t)0);
test.AddInput<double>("data", {3, 2, 2},
{5.0, 1.0,
20.0, 2.0,
30.0, 1.0f,
40.0, 2.0f,
55.0, 1.0,
60.0, 2.0});
test.AddOutput<double>("reduced", {3, 2}, {12.5, 1.5, 35.0, 1.5, 57.5, 1.5});
#if defined(__arm__)
// armv7 isn't as accurate so need to add a little tolerance for the diffs
// expected[i] evaluates to 35,
// output[i] evaluates to 34.999866485595703
test.SetOutputRelErr("reduced", 1e-5f);
#endif
test.Run();
}
TEST(ReductionOpTest, ReduceMean_do_not_keepdims_2) {
OpTester test("ReduceMean");
test.AddAttribute("axes", std::vector<int64_t>{0});
@ -741,6 +899,16 @@ TEST(ReductionOpTest, ReduceMean_do_not_keepdims_2) {
test.Run(OpTester::ExpectResult::kExpectSuccess, "", {kTensorrtExecutionProvider}); //TensorRT: full reduce without keepDimensions is not supported with explicit batch
}
TEST(ReductionOpTest, ReduceMean_do_not_keepdims_2_double) {
OpTester test("ReduceMean");
test.AddAttribute("axes", std::vector<int64_t>{0});
test.AddAttribute("keepdims", (int64_t)0);
test.AddInput<double>("data", {3},
{1.0, 2.0, 3.0});
test.AddOutput<double>("reduced", {}, {2.0});
test.Run(OpTester::ExpectResult::kExpectSuccess, "", {kTensorrtExecutionProvider}); //TensorRT: full reduce without keepDimensions is not supported with explicit batch
}
TEST(ReductionOpTest, ReduceMean_keepdims) {
OpTester test("ReduceMean");
test.AddAttribute("axes", std::vector<int64_t>{1});
@ -766,6 +934,31 @@ TEST(ReductionOpTest, ReduceMean_keepdims) {
test.Run();
}
TEST(ReductionOpTest, ReduceMean_keepdims_double) {
OpTester test("ReduceMean");
test.AddAttribute("axes", std::vector<int64_t>{1});
test.AddAttribute("keepdims", (int64_t)1);
test.AddInput<double>("data", {3, 2, 2},
{5.0, 1.0,
20.0, 2.0,
30.0, 1.0,
40.0, 2.0,
55.0, 1.0,
60.0, 2.0});
test.AddOutput<double>("reduced", {3, 1, 2}, {12.5, 1.5, 35.0, 1.5, 57.5, 1.5});
#if defined(__arm__)
// armv7 isn't as accurate so need to add a little tolerance for the diffs
// expected[i] evaluates to 35,
// output[i] evaluates to 34.999866485595703
test.SetOutputRelErr("reduced", 1e-5f);
#endif
test.Run();
}
TEST(ReductionOpTest, ReduceMean) {
OpTester test("ReduceMean");
test.AddAttribute("axes", std::vector<int64_t>{0, 2});
@ -784,6 +977,24 @@ TEST(ReductionOpTest, ReduceMean) {
test.Run();
}
TEST(ReductionOpTest, ReduceMean_double) {
OpTester test("ReduceMean");
test.AddAttribute("axes", std::vector<int64_t>{0, 2});
test.AddAttribute("keepdims", (int64_t)1);
test.AddInput<double>("data", {3, 2, 2},
{1.0, 2.0,
3.0, 4.0,
5.0, 6.0,
7.0, 8.0,
9.0, 10.0,
11.0, 12.0});
test.AddOutput<double>("reduced", {1, 2, 1}, {5.5, 7.5});
test.Run();
}
TEST(ReductionOpTest, ReduceMean_int32) {
OpTester test("ReduceMean");
test.AddAttribute("axes", std::vector<int64_t>{0, 2});
@ -808,6 +1019,13 @@ TEST(ReductionOpTest, ReduceMean0DTensor) {
test.AddOutput<float>("reduced", {}, {2});
test.Run();
}
TEST(ReductionOpTest, ReduceMean0DTensor_double) {
OpTester test("ReduceMean");
test.AddInput<double>("data", {}, {2});
test.AddOutput<double>("reduced", {}, {2});
test.Run();
}
#endif // !(defined USE_TENSORRT) && !(defined USE_TVM)
TEST(ReductionOpTest, ReduceMin_default_axes_keepdims) {
@ -2080,5 +2298,14 @@ TEST(ReductionOpTest, ReduceInfLogSumExp) {
test.Run();
}
TEST(ReductionOpTest, ReduceInfLogSumExp_double) {
OpTester test("ReduceLogSumExp");
test.AddAttribute("axes", std::vector<int64_t>{1});
test.AddAttribute("keepdims", (int64_t)0);
test.AddInput<double>("data", {2, 2}, {1.0, DOUBLE_NINF, DOUBLE_NINF, 1.0});
test.AddOutput<double>("reduced", {2}, {1.0, 1.0});
test.Run();
}
} // namespace test
} // namespace onnxruntime

View file

@ -42,11 +42,17 @@ ReductionOpTest.ReduceLogSum_do_not_keepdims_2
ReductionOpTest.ReduceLogSumAxes01
ReductionOpTest.ReduceLogSum0DTensor
ReductionOpTest.ReduceLogSumExp_default_axes_keepdims
ReductionOpTest.ReduceLogSumExp_default_axes_keepdims_double
ReductionOpTest.ReduceLogSumExp_default_axes_do_not_keep_dims
ReductionOpTest.ReduceLogSumExp_default_axes_do_not_keep_dims_double
ReductionOpTest.ReduceLogSumExp_do_not_keepdims
ReductionOpTest.ReduceLogSumExp_do_not_keepdims_double
ReductionOpTest.ReduceLogSumExp_do_not_keepdims_2
ReductionOpTest.ReduceLogSumExp_do_not_keepdims_2_double
ReductionOpTest.ReduceLogSumExp_keepdims
ReductionOpTest.ReduceLogSumExp_keepdims_double
ReductionOpTest.ReduceLogSumExp
ReductionOpTest.ReduceLogSumExp_double
ReductionOpTest.ReduceMax_default_axes_keepdims
ReductionOpTest.ReduceMax_default_axes_do_not_keep_dims
ReductionOpTest.ReduceMax_do_not_keepdims
@ -55,11 +61,17 @@ ReductionOpTest.ReduceMax_keepdims
ReductionOpTest.ReduceMax
ReductionOpTest.ReduceMax_int32
ReductionOpTest.ReduceMean_default_axes_keepdims
ReductionOpTest.ReduceMean_default_axes_keepdims_double
ReductionOpTest.ReduceMean_default_axes_do_not_keep_dims
ReductionOpTest.ReduceMean_default_axes_do_not_keep_dims_double
ReductionOpTest.ReduceMean_do_not_keepdims
ReductionOpTest.ReduceMean_do_not_keepdims_double
ReductionOpTest.ReduceMean_do_not_keepdims_2
ReductionOpTest.ReduceMean_do_not_keepdims_2_double
ReductionOpTest.ReduceMean_keepdims
ReductionOpTest.ReduceMean_keepdims_double
ReductionOpTest.ReduceMean
ReductionOpTest.ReduceMean_double
ReductionOpTest.ReduceMean_int32
ReductionOpTest.ReduceMin_default_axes_keepdims
ReductionOpTest.ReduceMin_default_axes_do_not_keep_dims
@ -107,6 +119,7 @@ ReductionOpTest.ReduceInfMin
ReductionOpTest.ReduceInfSum
ReductionOpTest.ReduceInfLogSum
ReductionOpTest.ReduceInfLogSumExp
ReductionOpTest.ReduceInfLogSumExp_double
GatherOpTest.Gather_invalid_index_cpu
Scatter.InvalidIndex
LogSoftmaxOperator.LargeNumber