Implement DepthToSpace uint8_t and Enable DropQDQNodesRules (#23352)

### Description
<!-- Describe your changes. -->

- Implemented the DepthToSpace uint8_t kernel.
- Enabled DropQDQNodesRules for DepthToSpace.
- Added unit tests for the DepthToSpace uint8_t kernel.

### Motivation and Context
<!-- - Why is this change required? What problem does it solve?
- If it fixes an open issue, please link to the issue here. -->

This commit aims to enhance the performance of the Image
Super-Resolution INT8 Model (RFDN). Specifically, it improves the
Inference Per Second (IPS) by 25%, providing a significant boost in
efficiency and speed.
This commit is contained in:
Yi-Hong Lyu 2025-01-15 19:24:50 -08:00 committed by GitHub
parent 331fc36b6a
commit e51bcfb541
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
5 changed files with 64 additions and 12 deletions

View file

@ -84,8 +84,8 @@ Do not modify directly.*
|||[11, 13]|**T** = tensor(double), tensor(float), tensor(int32), tensor(int64)<br/> **T2** = tensor(int32), tensor(int64)|
|DFT|*in* input:**T1**<br> *in* dft_length:**T2**<br> *in* axis:**tensor(int64)**<br> *out* output:**T1**<br><br>or<br><br>*in* input:**T1**<br> *in* dft_length:**T2**<br> *out* output:**T1**|20+|**T1** = tensor(double), tensor(float)<br/> **T2** = tensor(int32), tensor(int64)|
|||[17, 19]|**T1** = tensor(double), tensor(float)<br/> **T2** = tensor(int32), tensor(int64)|
|DepthToSpace|*in* input:**T**<br> *out* output:**T**|13+|**T** = tensor(double), tensor(float)|
|||[11, 12]|**T** = tensor(double), tensor(float)|
|DepthToSpace|*in* input:**T**<br> *out* output:**T**|13+|**T** = tensor(double), tensor(float), tensor(uint8)|
|||[11, 12]|**T** = tensor(double), tensor(float), tensor(uint8)|
|||[1, 10]|**T** = tensor(double), tensor(float)|
|DequantizeLinear|*in* x:**T**<br> *in* x_scale:**tensor(float)**<br> *in* x_zero_point:**T**<br> *out* y:**tensor(float)**<br><br>or<br><br>*in* x:**T1**<br> *in* x_scale:**T2**<br> *in* x_zero_point:**T1**<br> *out* y:**T2**|21+|**T1** = tensor(float8e4m3fn), tensor(float8e4m3fnuz), tensor(float8e5m2), tensor(float8e5m2fnuz), tensor(int16), tensor(int32), tensor(int4), tensor(int8), tensor(uint16), tensor(uint4), tensor(uint8)<br/> **T2** = tensor(float), tensor(float16)|
|||[19, 20]|**T1** = tensor(float8e4m3fn), tensor(float8e4m3fnuz), tensor(float8e5m2), tensor(float8e5m2fnuz), tensor(int32), tensor(int8), tensor(uint8)<br/> **T2** = tensor(float), tensor(float16)|

View file

@ -77,7 +77,8 @@ void DropQDQNodesRules(SelectorActionRegistry& qdq_selector_action_registry) {
true,
cpu_ep);
qdq_selector_action_registry.RegisterSelectorAndAction(drop_action_no_int16_name,
{{"Resize", {}}},
{{"DepthToSpace", {}},
{"Resize", {}}},
std::move(selector_no_16bit),
std::move(drop_action_no_int16));
@ -91,7 +92,7 @@ void DropQDQNodesRules(SelectorActionRegistry& qdq_selector_action_registry) {
std::move(drop_action_no_int16_and_positive_scale));
std::unique_ptr<NodeSelector> selector = std::make_unique<QDQ::DropQDQNodesSelector>(true, false, true, providers);
// DepthToSpace and SpaceToDepth not included because there are no integer implementations.
// SpaceToDepth not included because there are no integer implementations.
// https://github.com/microsoft/onnxruntime/issues/21287
qdq_selector_action_registry.RegisterSelectorAndAction(drop_action_name,
{{"Expand", {}},

View file

@ -43,7 +43,8 @@ ONNX_CPU_OPERATOR_VERSIONED_KERNEL(
12,
KernelDefBuilder()
.TypeConstraint("T", {DataTypeImpl::GetTensorType<float>(),
DataTypeImpl::GetTensorType<double>()}),
DataTypeImpl::GetTensorType<double>(),
DataTypeImpl::GetTensorType<uint8_t>()}),
DepthToSpace);
ONNX_CPU_OPERATOR_KERNEL(
@ -51,7 +52,8 @@ ONNX_CPU_OPERATOR_KERNEL(
13,
KernelDefBuilder()
.TypeConstraint("T", {DataTypeImpl::GetTensorType<float>(),
DataTypeImpl::GetTensorType<double>()}),
DataTypeImpl::GetTensorType<double>(),
DataTypeImpl::GetTensorType<uint8_t>()}),
DepthToSpace);
// intermediate tensor shapes are:
@ -196,6 +198,19 @@ Status DepthToSpace::Compute(OpKernelContext* context) const {
onnxruntime::narrow<std::ptrdiff_t>(blocksize_),
onnxruntime::narrow<std::ptrdiff_t>(input_width),
onnxruntime::narrow<std::ptrdiff_t>(blocksize_));
} else if (input.IsDataType<uint8_t>()) {
SpaceDepthOpCpuImpl<uint8_t>(input, output, permutation,
onnxruntime::narrow<std::ptrdiff_t>(batch),
onnxruntime::narrow<std::ptrdiff_t>(dim1),
onnxruntime::narrow<std::ptrdiff_t>(blocksize_),
onnxruntime::narrow<std::ptrdiff_t>(dim3),
onnxruntime::narrow<std::ptrdiff_t>(input_height),
onnxruntime::narrow<std::ptrdiff_t>(input_width),
onnxruntime::narrow<std::ptrdiff_t>(input_depth / blocksize_ / blocksize_),
onnxruntime::narrow<std::ptrdiff_t>(input_height),
onnxruntime::narrow<std::ptrdiff_t>(blocksize_),
onnxruntime::narrow<std::ptrdiff_t>(input_width),
onnxruntime::narrow<std::ptrdiff_t>(blocksize_));
} else {
// user will not see this as the kernel doesn't claim support for types other than float and double
return ORT_MAKE_STATUS(ONNXRUNTIME, FAIL, "Unsupported input type in DepthToSpace op: ", input.DataType());

View file

@ -13,7 +13,7 @@ template <typename T>
class TensorOpTest : public ::testing::Test {
};
using TensorOpTestTypes = ::testing::Types<float, MLFloat16>;
using TensorOpTestTypes = ::testing::Types<float, MLFloat16, uint8_t>;
TYPED_TEST_SUITE(TensorOpTest, TensorOpTestTypes);
TEST(TensorOpTest, SpaceToDepthTest_1) {
@ -224,6 +224,7 @@ TEST(TensorOpTest, DepthToSpaceTest_1_double) {
test.AddOutput<double>("output", {N, C / (blocksize * blocksize), H * blocksize, W * blocksize}, result);
test.Run();
}
TEST(TensorOpTest, DepthToSpaceTest_2) {
OpTester test("DepthToSpace", 7); // create an opset 7 model
constexpr int64_t blocksize = 2;
@ -308,14 +309,24 @@ TYPED_TEST(TensorOpTest, DepthToSpaceTest_3) {
if constexpr (std::is_same<TypeParam, float>::value) {
test.AddInput<float>("input", {N, C, H, W}, X);
test.AddOutput<float>("output", {2, 3, 6, 4}, result);
} else {
} else if constexpr (std::is_same<TypeParam, MLFloat16>::value) {
std::vector<TypeParam> X_fp16(X.size());
std::vector<TypeParam> result_fp16(result.size());
ConvertFloatToMLFloat16(result.data(), result_fp16.data(), result.size());
ConvertFloatToMLFloat16(X.data(), X_fp16.data(), X.size());
test.AddOutput<TypeParam>("output", {2, 3, 6, 4}, result_fp16);
ConvertFloatToMLFloat16(result.data(), result_fp16.data(), result.size());
test.AddInput<TypeParam>("input", {N, C, H, W}, X_fp16);
test.AddOutput<TypeParam>("output", {2, 3, 6, 4}, result_fp16);
} else if constexpr (std::is_same<TypeParam, uint8_t>::value) {
std::vector<uint8_t> X_u8(X.size());
std::vector<uint8_t> result_u8(result.size());
ConvertFloatToUint8_t(X.data(), X_u8.data(), X.size());
ConvertFloatToUint8_t(result.data(), result_u8.data(), result.size());
test.AddInput<uint8_t>("input", {N, C, H, W}, X_u8);
test.AddOutput<uint8_t>("output", {2, 3, 6, 4}, result_u8);
} else {
ORT_THROW("Type not supported");
}
// TODO: Test is flaky on QNN EP (CPU backend).
// Re-enable when the QnnCPUBackendTests.DISABLED_SpaceToDepth_Flaky test is fixed.
test.Run(OpTester::ExpectResult::kExpectSuccess, "", {kQnnExecutionProvider});
@ -363,13 +374,22 @@ TYPED_TEST(TensorOpTest, DepthToSpaceTest_4) {
if constexpr (std::is_same<TypeParam, float>::value) {
test.AddInput<float>("input", {N, C, H, W}, X);
test.AddOutput<float>("output", {2, 3, 6, 4}, result);
} else {
} else if constexpr (std::is_same<TypeParam, MLFloat16>::value) {
std::vector<TypeParam> X_fp16(X.size());
std::vector<TypeParam> result_fp16(result.size());
ConvertFloatToMLFloat16(X.data(), X_fp16.data(), X.size());
ConvertFloatToMLFloat16(result.data(), result_fp16.data(), result.size());
test.AddInput<TypeParam>("input", {N, C, H, W}, X_fp16);
test.AddOutput<TypeParam>("output", {2, 3, 6, 4}, result_fp16);
} else if constexpr (std::is_same<TypeParam, uint8_t>::value) {
std::vector<uint8_t> X_u8(X.size());
std::vector<uint8_t> result_u8(result.size());
ConvertFloatToUint8_t(X.data(), X_u8.data(), X.size());
ConvertFloatToUint8_t(result.data(), result_u8.data(), result.size());
test.AddInput<uint8_t>("input", {N, C, H, W}, X_u8);
test.AddOutput<uint8_t>("output", {2, 3, 6, 4}, result_u8);
} else {
ORT_THROW("Type not supported");
}
// TODO: Test is flaky on QNN EP (CPU backend).
@ -401,14 +421,24 @@ TYPED_TEST(TensorOpTest, DepthToSpaceTest_5) {
if constexpr (std::is_same<TypeParam, float>::value) {
test.AddInput<float>("input", {N, C, H, W}, X);
test.AddOutput<float>("output", {1, 1, 4, 6}, result);
} else {
} else if constexpr (std::is_same<TypeParam, MLFloat16>::value) {
std::vector<TypeParam> X_fp16(X.size());
std::vector<TypeParam> result_fp16(result.size());
ConvertFloatToMLFloat16(X.data(), X_fp16.data(), X.size());
ConvertFloatToMLFloat16(result.data(), result_fp16.data(), result.size());
test.AddInput<TypeParam>("input", {N, C, H, W}, X_fp16);
test.AddOutput<TypeParam>("output", {1, 1, 4, 6}, result_fp16);
} else if constexpr (std::is_same<TypeParam, uint8_t>::value) {
std::vector<uint8_t> X_u8(X.size());
std::vector<uint8_t> result_u8(result.size());
ConvertFloatToUint8_t(X.data(), X_u8.data(), X.size());
ConvertFloatToUint8_t(result.data(), result_u8.data(), result.size());
test.AddInput<uint8_t>("input", {N, C, H, W}, X_u8);
test.AddOutput<uint8_t>("output", {1, 1, 4, 6}, result_u8);
} else {
ORT_THROW("Type not supported");
}
// TODO: Test is flaky on QNN EP (CPU backend).
// Re-enable when the QnnCPUBackendTests.DISABLED_SpaceToDepth_Flaky2 test is fixed.
test.Run(OpTester::ExpectResult::kExpectSuccess, "", {kQnnExecutionProvider});

View file

@ -15,6 +15,12 @@ inline void ConvertFloatToMLFloat16(const float* f_datat, MLFloat16* h_data, siz
output_vector = in_vector.template cast<Eigen::half>();
}
inline void ConvertFloatToUint8_t(const float* f_datat, uint8_t* u8_data, size_t input_size) {
auto in_vector = ConstEigenVectorMap<float>(f_datat, input_size);
auto output_vector = EigenVectorMap<uint8_t>(static_cast<uint8_t*>(static_cast<void*>(u8_data)), input_size);
output_vector = in_vector.template cast<uint8_t>();
}
inline void ConvertMLFloat16ToFloat(const MLFloat16* h_data, float* f_data, size_t input_size) {
auto in_vector =
ConstEigenVectorMap<Eigen::half>(static_cast<const Eigen::half*>(static_cast<const void*>(h_data)), input_size);