mirror of
https://github.com/saymrwulf/onnxruntime.git
synced 2026-05-14 20:48:00 +00:00
Add check that bias and scale sizes match norm_size in LayerNormalization (#13060)
### Description Add check that bias and scale sizes match norm_size in LayerNormalization. ### Motivation and Context #12917
This commit is contained in:
parent
19c51376c4
commit
b820256f34
3 changed files with 36 additions and 1 deletions
|
|
@ -33,6 +33,15 @@ Status ComputeImpl(OpKernelContext* p_ctx, int64_t orig_axis, float epsilon, boo
|
|||
auto norm_count = x_shape.SizeToDimension(axis);
|
||||
auto norm_size = x_shape.SizeFromDimension(axis);
|
||||
|
||||
const auto scale_size = scale->Shape().Size();
|
||||
const auto bias_size = (bias_data) ? bias->Shape().Size() : 0;
|
||||
if (scale_size != norm_size || (bias_data && bias_size != norm_size)) {
|
||||
return ORT_MAKE_STATUS(ONNXRUNTIME, INVALID_ARGUMENT,
|
||||
"Size of X.shape()[axis:] == ", norm_size,
|
||||
". Size of scale and bias (if provided) must match this. Got scale size of ",
|
||||
scale_size, " and bias size of ", bias_size);
|
||||
}
|
||||
|
||||
Tensor* Y = p_ctx->Output(0, x_shape);
|
||||
auto Y_data = Y->MutableData<T>();
|
||||
|
||||
|
|
|
|||
|
|
@ -49,7 +49,15 @@ Status LayerNorm<T, U, V, simplified>::ComputeInternal(OpKernelContext* ctx) con
|
|||
int n1 = gsl::narrow<int>(x_shape.SizeToDimension(axis));
|
||||
int n2 = gsl::narrow<int>(x_shape.SizeFromDimension(axis));
|
||||
|
||||
ORT_ENFORCE(n2 != 1, "n2 should not be 1");
|
||||
const auto scale_size = scale->Shape().Size();
|
||||
const auto bias_size = (bias_data) ? bias->Shape().Size() : 0;
|
||||
if (n2 == 1 || scale_size != n2 || (bias_data && bias_size != n2)) {
|
||||
return ORT_MAKE_STATUS(ONNXRUNTIME, INVALID_ARGUMENT,
|
||||
"Size of X.shape()[axis:] == ", n2,
|
||||
". Size of scale and bias (if provided) must match this "
|
||||
"and the size must not be 1. Got scale size of ",
|
||||
scale_size, " and bias size of ", bias_size);
|
||||
}
|
||||
|
||||
// Outputs
|
||||
Tensor* Y = ctx->Output(0, x_shape);
|
||||
|
|
|
|||
|
|
@ -124,5 +124,23 @@ TEST(LayerNormTest, LayerNorm17_double) {
|
|||
test.Run(OpTester::ExpectResult::kExpectSuccess, "", {kDnnlExecutionProvider});
|
||||
}
|
||||
|
||||
TEST(LayerNormTest, LayerNorm_InvalidScaleBias) {
|
||||
OpTester test("LayerNormalization");
|
||||
test.AddAttribute<float>("epsilon", 1e-05f);
|
||||
|
||||
// as axis is 1, the scale and bias should have size 6
|
||||
std::vector<int64_t> dims{1, 3, 2};
|
||||
test.AddInput<float>("x", dims, {1.2416f, 0.946123f, 13.1685f, 0.36423f, 21.145f, 0.03941f});
|
||||
test.AddInput<float>("gamma", {2}, {-0.6953f, 5.1824f});
|
||||
test.AddInput<float>("bias", {2}, {0.6435f, -0.3964f});
|
||||
test.AddAttribute<int64_t>("axis", 1);
|
||||
test.AddOutput<float>("output", dims, {-0.0516f, -5.5776f, -0.0518f, -5.5788f, -0.0518f, -5.5788f});
|
||||
// CPU and CUDA EPs have check for unexpected scale or bias sizes. Exclude other EPs with a LayerNormalization
|
||||
// implementation for which we don't control the check or error message.
|
||||
test.Run(OpTester::ExpectResult::kExpectFailure,
|
||||
"Size of X.shape()[axis:] == 6. Size of scale and bias (if provided) must match this",
|
||||
{kDnnlExecutionProvider, kDmlExecutionProvider});
|
||||
}
|
||||
|
||||
} // namespace test
|
||||
} // namespace onnxruntime
|
||||
|
|
|
|||
Loading…
Reference in a new issue