Skip to content

Commit ec51a2a

Browse files
[ONNX] Add support for GroupNormalization v.21 (#32700)
### Details: Add support for GropuNormalization v.21 ONNX operation. v21 vs v18 differences: https://onnx.ai/onnx/operators/onnx__GroupNormalization.html Scale & Bias inputs now have shape [C] (channels) instead of [num_groups] stash_type attribute present which determines precision for the first part of calculations ### Tickets: - [CVS-139937](https://jira.devtools.intel.com/browse/CVS-139937) Signed-off-by: Andrii Staikov <andrii.staikov@intel.com>
1 parent aeac611 commit ec51a2a

File tree

5 files changed

+177
-6
lines changed

5 files changed

+177
-6
lines changed

src/frontends/onnx/docs/supported_ops.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -79,7 +79,7 @@ OpenVINO provides support for operations of Default Opset (empty in table below)
7979
| |Greater |1 |13, 9, 7, 1 | |
8080
| |GreaterOrEqual |16, 12 |16, 12 | |
8181
| |GridSample |16 |22, 20, 16 | |
82-
| |GroupNormalization |18 |21, 18 | |
82+
| |GroupNormalization |21, 18 |21, 18 | |
8383
| |HammingWindow |17 |17 | |
8484
| |HannWindow |17 |17 | |
8585
| |HardSigmoid |1 |22, 6, 1 | |

src/frontends/onnx/frontend/src/op/group_normalization.cpp

Lines changed: 39 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -5,21 +5,25 @@
55
#include "openvino/op/group_normalization.hpp"
66

77
#include "core/operator_set.hpp"
8+
#include "exceptions.hpp"
89
#include "openvino/op/broadcast.hpp"
910
#include "openvino/op/constant.hpp"
11+
#include "openvino/op/convert.hpp"
1012
#include "openvino/op/divide.hpp"
1113
#include "openvino/op/gather.hpp"
1214
#include "openvino/op/reshape.hpp"
1315
#include "openvino/op/shape_of.hpp"
1416
#include "openvino/op/unsqueeze.hpp"
17+
#include "utils/common.hpp"
18+
using ::ONNX_NAMESPACE::TensorProto_DataType;
1519
using namespace ov::op;
1620
using ov::Shape;
1721

1822
namespace ov {
1923
namespace frontend {
2024
namespace onnx {
2125
namespace ai_onnx {
22-
namespace opset_1 {
26+
namespace opset_18 {
2327
ov::OutputVector group_normalization(const ov::frontend::onnx::Node& node) {
2428
const auto inputs = node.get_ov_inputs();
2529
OPENVINO_ASSERT(inputs.size() == 3);
@@ -51,8 +55,40 @@ ov::OutputVector group_normalization(const ov::frontend::onnx::Node& node) {
5155

5256
return {std::make_shared<v12::GroupNormalization>(data, c_scale, c_bias, num_groups, eps)};
5357
}
54-
ONNX_OP("GroupNormalization", OPSET_SINCE(1), ai_onnx::opset_1::group_normalization);
55-
} // namespace opset_1
58+
ONNX_OP("GroupNormalization", OPSET_RANGE(1, 20), ai_onnx::opset_18::group_normalization);
59+
} // namespace opset_18
60+
namespace opset_21 {
61+
ov::OutputVector group_normalization(const ov::frontend::onnx::Node& node) {
62+
const auto inputs = node.get_ov_inputs();
63+
OPENVINO_ASSERT(inputs.size() == 3);
64+
65+
auto default_stash_type_i = static_cast<int64_t>(TensorProto_DataType::TensorProto_DataType_FLOAT);
66+
int64_t stash_type_i = node.get_attribute_value<int64_t>("stash_type", default_stash_type_i);
67+
element::Type stash_type = common::get_ov_element_type(stash_type_i);
68+
69+
ov::Output<ov::Node> data = inputs[0]; // Shape [N, C, ...]
70+
ov::Output<ov::Node> scale = inputs[1]; // Shape [C]
71+
ov::Output<ov::Node> bias = inputs[2]; // Shape [C]
72+
73+
element::Type original_type = data.get_element_type();
74+
bool needs_type_casting = stash_type != original_type;
75+
if (needs_type_casting) {
76+
data = std::make_shared<ov::op::v0::Convert>(data, stash_type);
77+
scale = std::make_shared<ov::op::v0::Convert>(scale, stash_type);
78+
bias = std::make_shared<ov::op::v0::Convert>(bias, stash_type);
79+
}
80+
81+
const auto eps = node.get_attribute_value<float>("epsilon", 1e-05f);
82+
const auto num_groups = node.get_attribute_value<int64_t>("num_groups");
83+
84+
ov::Output<ov::Node> op = std::make_shared<v12::GroupNormalization>(data, scale, bias, num_groups, eps);
85+
if (needs_type_casting)
86+
op = std::make_shared<ov::op::v0::Convert>(op, original_type);
87+
88+
return {op};
89+
}
90+
ONNX_OP("GroupNormalization", OPSET_SINCE(21), ai_onnx::opset_21::group_normalization);
91+
} // namespace opset_21
5692
} // namespace ai_onnx
5793
} // namespace onnx
5894
} // namespace frontend
Lines changed: 96 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,96 @@
1+
ir_version: 10
2+
producer_name: "OpenVINO ONNX Frontend"
3+
graph {
4+
node {
5+
input: "x"
6+
input: "scale"
7+
input: "bias"
8+
output: "y"
9+
op_type: "GroupNormalization"
10+
attribute {
11+
name: "epsilon"
12+
f: 0.0099999997764825821
13+
type: FLOAT
14+
}
15+
attribute {
16+
name: "num_groups"
17+
i: 2
18+
type: INT
19+
}
20+
}
21+
name: "test_group_normalization_epsilon"
22+
input {
23+
name: "x"
24+
type {
25+
tensor_type {
26+
elem_type: 1
27+
shape {
28+
dim {
29+
dim_value: 3
30+
}
31+
dim {
32+
dim_value: 4
33+
}
34+
dim {
35+
dim_value: 2
36+
}
37+
dim {
38+
dim_value: 2
39+
}
40+
}
41+
}
42+
}
43+
}
44+
input {
45+
name: "scale"
46+
type {
47+
tensor_type {
48+
elem_type: 1
49+
shape {
50+
dim {
51+
dim_value: 4
52+
}
53+
}
54+
}
55+
}
56+
}
57+
input {
58+
name: "bias"
59+
type {
60+
tensor_type {
61+
elem_type: 1
62+
shape {
63+
dim {
64+
dim_value: 4
65+
}
66+
}
67+
}
68+
}
69+
}
70+
output {
71+
name: "y"
72+
type {
73+
tensor_type {
74+
elem_type: 1
75+
shape {
76+
dim {
77+
dim_value: 3
78+
}
79+
dim {
80+
dim_value: 4
81+
}
82+
dim {
83+
dim_value: 2
84+
}
85+
dim {
86+
dim_value: 2
87+
}
88+
}
89+
}
90+
}
91+
}
92+
}
93+
opset_import {
94+
domain: ""
95+
version: 21
96+
}

src/frontends/onnx/tests/onnx_import.in.cpp

Lines changed: 41 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -6829,6 +6829,47 @@ OPENVINO_TEST(${BACKEND_NAME}, onnx_group_normalization_2grp_custom_eps) {
68296829
test_case.run_with_tolerance_as_fp(0.000001f);
68306830
}
68316831

6832+
OPENVINO_TEST(${BACKEND_NAME}, onnx_group_normalization_21) {
6833+
auto model = convert_model("group_normalization_21.onnx");
6834+
6835+
auto test_case = ov::test::TestCase(model, s_device);
6836+
test_case.add_input<float>(
6837+
{1.764052391052246f, 0.40015721321105957f, 0.978738009929657f, 2.2408931255340576f,
6838+
1.8675580024719238f, -0.9772778749465942f, 0.9500884413719177f, -0.15135720372200012f,
6839+
-0.10321885347366333f, 0.4105985164642334f, 0.14404356479644775f, 1.4542734622955322f,
6840+
0.7610377073287964f, 0.12167501449584961f, 0.44386324286460876f, 0.3336743414402008f,
6841+
1.4940791130065918f, -0.2051582634449005f, 0.3130677044391632f, -0.8540957570075989f,
6842+
-2.5529897212982178f, 0.653618574142456f, 0.8644362092018127f, -0.7421650290489197f,
6843+
2.269754648208618f, -1.4543657302856445f, 0.04575851559638977f, -0.18718385696411133f,
6844+
1.5327792167663574f, 1.4693588018417358f, 0.154947429895401f, 0.37816253304481506f,
6845+
-0.8877857327461243f, -1.980796456336975f, -0.34791216254234314f, 0.15634897351264954f,
6846+
1.2302906513214111f, 1.202379822731018f, -0.38732680678367615f, -0.302302747964859f,
6847+
-1.0485529899597168f, -1.420017957687378f, -1.7062702178955078f, 1.950775384902954f,
6848+
-0.5096521973609924f, -0.4380742907524109f, -1.2527953386306763f, 0.7774903774261475f});
6849+
test_case.add_input<float>(
6850+
{-1.6138978004455566f, -0.21274028718471527f, -0.8954665660858154f, 0.38690251111984253f});
6851+
test_case.add_input<float>({-0.5108051300048828, -1.18063223361969f, -0.02818222902715206f, 0.4283318817615509f});
6852+
6853+
test_case.add_expected_output<float>(
6854+
Shape{3, 4, 2, 2},
6855+
{
6856+
-1.8928775787353516f, 0.24930185079574585f, -0.6594365239143372f, -2.641819477081299f,
6857+
-1.384243369102478f, -0.7952562570571899f, -1.1942929029464722f, -0.9662526845932007f,
6858+
1.0357780456542969f, 0.03993310034275055f, 0.5565513372421265f, -1.9828449487686157f,
6859+
0.6923606395721436f, 0.15695568919181824f, 0.42675745487213135f, 0.3344848155975342f,
6860+
-2.7151150703430176f, -0.4068778157234192f, -1.1108338832855225f, 0.4746362566947937f,
6861+
-0.7465286254882812f, -1.3207058906555176f, -1.3584550619125366f, -1.0707759857177734f,
6862+
-1.4356461763381958f, 1.570522427558899f, 0.35959845781326294f, 0.5476332902908325f,
6863+
0.7794156074523926f, 0.7572963237762451f, 0.29886627197265625f, 0.3767174780368805f,
6864+
0.6620793342590332f, 2.4348504543304443f, -0.21355000138282776f, -1.0314189195632935f,
6865+
-1.4788639545440674f, -1.4728968143463135f, -1.1330219507217407f, -1.1511999368667603f,
6866+
0.4269883334636688f, 0.7122754454612732f, 0.9321187734603882f, -1.876512050628662f,
6867+
0.4104909896850586f, 0.43424272537231445f, 0.16389334201812744f, 0.837604284286499f,
6868+
});
6869+
6870+
test_case.run_with_tolerance_as_fp(0.000001f);
6871+
}
6872+
68326873
OPENVINO_TEST(${BACKEND_NAME}, onnx_model_mm_nms_rotated) {
68336874
auto model = convert_model("mm_nms_rotated.onnx");
68346875

src/frontends/onnx/tests/tests_python/test_backend.py

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -581,8 +581,6 @@ def expect_fail(test_case_path, xfail): # type: (str) -> None
581581
"OnnxBackendNodeModelTest.test_gelu_default_2_expanded_cpu",
582582
"OnnxBackendNodeModelTest.test_reduce_log_sum_exp_empty_set_expanded_cpu",
583583
"OnnxBackendNodeModelTest.test_reduce_max_empty_set_cpu",
584-
"OnnxBackendNodeModelTest.test_group_normalization_epsilon_cpu",
585-
"OnnxBackendNodeModelTest.test_group_normalization_example_cpu",
586584
"OnnxBackendNodeModelTest.test_qlinearmatmul_3D_int8_float16_cpu",
587585
"OnnxBackendNodeModelTest.test_qlinearmatmul_3D_int8_float32_cpu",
588586
"OnnxBackendNodeModelTest.test_qlinearmatmul_3D_uint8_float16_cpu",

0 commit comments

Comments
 (0)