Skip to content

Commit ccc76f0

Browse files
authored
Remove using namespace ov::op and ov::pass::pattern directives to fix UNITY build conflicts (#33134)
Add using declarations and namespace aliases to improve code readability after removing using namespace directives. ### Details: Remove using namespace directives that cause name conflicts in UNITY builds and replace them with explicit using declarations and namespace aliases for improved readability. - Remove using namespace ov::op and using namespace ov::pass::pattern directives - Add specific using declarations (e.g., using ov::pass::pattern::wrap_type;) only when a construct is used 2+ times in a file - Add namespace aliases (e.g., namespace v0 = ov::op::v0;) only when a version namespace is used 2+ times in a file - Replace fully qualified names with short forms where declarations are added ### Tickets: - 177276
1 parent edb059e commit ccc76f0

File tree

309 files changed

+10674
-9740
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

309 files changed

+10674
-9740
lines changed

src/common/transformations/src/transformations/common_optimizations/activations_scaling.cpp

Lines changed: 70 additions & 65 deletions
Large diffs are not rendered by default.

src/common/transformations/src/transformations/common_optimizations/adaptive_pool_to_reduce.cpp

Lines changed: 9 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -17,9 +17,13 @@
1717
#include "openvino/pass/pattern/op/wrap_type.hpp"
1818
#include "transformations/utils/utils.hpp"
1919

20-
using namespace ov::op;
20+
namespace ov::pass {
2121

22-
ov::pass::AdaptivePoolToReduce::AdaptivePoolToReduce() {
22+
namespace v0 = ov::op::v0;
23+
namespace v1 = ov::op::v1;
24+
namespace v8 = ov::op::v8;
25+
26+
AdaptivePoolToReduce::AdaptivePoolToReduce() {
2327
MATCHER_SCOPE(AdaptivePoolToReduce);
2428
auto data_pattern = pattern::any_input();
2529
auto out_spatial_shape = pattern::wrap_type<v0::Constant>();
@@ -58,6 +62,8 @@ ov::pass::AdaptivePoolToReduce::AdaptivePoolToReduce() {
5862
return true;
5963
};
6064

61-
auto m = std::make_shared<ov::pass::pattern::Matcher>(a_pool, matcher_name);
65+
auto m = std::make_shared<pattern::Matcher>(a_pool, matcher_name);
6266
this->register_matcher(m, callback);
6367
}
68+
69+
} // namespace ov::pass

src/common/transformations/src/transformations/common_optimizations/add_fake_quantize_fusion.cpp

Lines changed: 28 additions & 27 deletions
Original file line numberDiff line numberDiff line change
@@ -23,34 +23,35 @@
2323
#include "openvino/pass/pattern/op/wrap_type.hpp"
2424
#include "transformations/utils/utils.hpp"
2525

26-
ov::pass::AddFakeQuantizeFusion::AddFakeQuantizeFusion() {
26+
namespace v0 = ov::op::v0;
27+
namespace v1 = ov::op::v1;
28+
namespace op_util = ov::op::util;
29+
30+
namespace ov::pass {
31+
32+
AddFakeQuantizeFusion::AddFakeQuantizeFusion() {
2733
MATCHER_SCOPE(AddFakeQuantizeFusion);
28-
auto input_pattern = pass::pattern::any_input();
29-
auto const_pattern = ov::pass::pattern::wrap_type<ov::op::v0::Constant>();
30-
auto add_pattern =
31-
ov::pass::pattern::wrap_type<ov::op::v1::Add>({input_pattern, const_pattern}, pattern::consumers_count(1));
32-
auto fq_pattern = ov::pass::pattern::wrap_type<ov::op::v0::FakeQuantize>({add_pattern,
33-
pass::pattern::any_input(),
34-
pass::pattern::any_input(),
35-
pass::pattern::any_input(),
36-
pass::pattern::any_input()});
34+
auto input_pattern = pattern::any_input();
35+
auto const_pattern = pattern::wrap_type<v0::Constant>();
36+
auto add_pattern = pattern::wrap_type<v1::Add>({input_pattern, const_pattern}, pattern::consumers_count(1));
37+
auto fq_pattern = pattern::wrap_type<v0::FakeQuantize>(
38+
{add_pattern, pattern::any_input(), pattern::any_input(), pattern::any_input(), pattern::any_input()});
3739
matcher_pass_callback callback = [OV_CAPTURE_CPY_AND_THIS](pattern::Matcher& m) {
3840
const auto& pattern_value_map = m.get_pattern_value_map();
3941
const auto& input = pattern_value_map.at(input_pattern);
4042
const auto& type = input.get_element_type();
4143
if (type.bitwidth() < element::f32.bitwidth())
4244
return false;
43-
auto fq = ov::as_type_ptr<ov::op::v0::FakeQuantize>(pattern_value_map.at(fq_pattern).get_node_shared_ptr());
45+
auto fq = ov::as_type_ptr<v0::FakeQuantize>(pattern_value_map.at(fq_pattern).get_node_shared_ptr());
4446
if (!fq)
4547
return false;
4648
const auto& add_node = pattern_value_map.at(add_pattern).get_node_shared_ptr();
47-
auto add_const =
48-
ov::as_type_ptr<ov::op::v0::Constant>(pattern_value_map.at(const_pattern).get_node_shared_ptr());
49+
auto add_const = ov::as_type_ptr<v0::Constant>(pattern_value_map.at(const_pattern).get_node_shared_ptr());
4950
if (!add_const)
5051
return false;
5152

5253
auto const_shape = add_const->get_shape();
53-
if (!ov::op::util::check_for_broadcast(input.get_partial_shape(), const_shape)) {
54+
if (!op_util::check_for_broadcast(input.get_partial_shape(), const_shape)) {
5455
// We can't eliminate Add if Constant input broadcasts another input shape because
5556
// when we reconnect input from Add to FQ won't broadcast given input, so it will result
5657
// in shape collision.
@@ -63,9 +64,9 @@ ov::pass::AddFakeQuantizeFusion::AddFakeQuantizeFusion() {
6364

6465
if (!is_single_value) {
6566
float v;
66-
is_single_value = ov::op::util::get_single_value(add_const, v);
67+
is_single_value = op_util::get_single_value(add_const, v);
6768
if (is_single_value) {
68-
new_const = std::make_shared<ov::op::v0::Constant>(add_const->get_element_type(), Shape{1}, v);
69+
new_const = std::make_shared<v0::Constant>(add_const->get_element_type(), Shape{1}, v);
6970
}
7071
}
7172

@@ -78,9 +79,9 @@ ov::pass::AddFakeQuantizeFusion::AddFakeQuantizeFusion() {
7879
if (diff > 0) {
7980
// Reshape constants like (C, 1, 1) to (1, C, 1, 1)
8081
const_shape.insert(const_shape.begin(), diff, 1);
81-
new_const = std::make_shared<ov::op::v1::Reshape>(
82+
new_const = std::make_shared<v1::Reshape>(
8283
new_const,
83-
ov::op::v0::Constant::create(element::u64, Shape{const_shape.size()}, const_shape),
84+
v0::Constant::create(element::u64, Shape{const_shape.size()}, const_shape),
8485
false);
8586
}
8687

@@ -96,29 +97,27 @@ ov::pass::AddFakeQuantizeFusion::AddFakeQuantizeFusion() {
9697
bool add_parent_is_conv_or_mm =
9798
std::any_of(add_inputs.begin(), add_inputs.end(), [](const Output<Node>& node) -> bool {
9899
auto node_ptr = node.get_node();
99-
return is_type<ov::op::v1::Convolution>(node_ptr) ||
100-
is_type<ov::op::v1::GroupConvolution>(node_ptr) ||
101-
is_type<ov::op::v1::ConvolutionBackpropData>(node_ptr) ||
102-
is_type<ov::op::v1::GroupConvolutionBackpropData>(node_ptr) ||
103-
is_type<ov::op::v0::MatMul>(node_ptr);
100+
return is_type<v1::Convolution>(node_ptr) || is_type<v1::GroupConvolution>(node_ptr) ||
101+
is_type<v1::ConvolutionBackpropData>(node_ptr) ||
102+
is_type<v1::GroupConvolutionBackpropData>(node_ptr) || is_type<v0::MatMul>(node_ptr);
104103
});
105104
if (add_parent_is_conv_or_mm)
106105
return false;
107106
auto fq_users = fq->get_users();
108107
// Concat LPT transformation supports per tensor quantization only
109108
bool fq_user_is_concat =
110109
std::any_of(fq_users.begin(), fq_users.end(), [](const std::shared_ptr<Node> node_ptr) -> bool {
111-
return is_type<ov::op::v0::Concat>(node_ptr);
110+
return is_type<v0::Concat>(node_ptr);
112111
});
113112
if (fq_user_is_concat)
114113
return false;
115114
}
116115

117-
auto input_low_sub = std::make_shared<ov::op::v1::Subtract>(fq->input_value(1), new_const);
116+
auto input_low_sub = std::make_shared<v1::Subtract>(fq->input_value(1), new_const);
118117
std::shared_ptr<Node> new_input_low = ov::util::get_constant_from_source(input_low_sub);
119118
if (!new_input_low)
120119
new_input_low = input_low_sub;
121-
auto input_high_sub = std::make_shared<ov::op::v1::Subtract>(fq->input_value(2), new_const);
120+
auto input_high_sub = std::make_shared<v1::Subtract>(fq->input_value(2), new_const);
122121
std::shared_ptr<Node> new_input_high = ov::util::get_constant_from_source(input_high_sub);
123122
if (!new_input_high)
124123
new_input_high = input_high_sub;
@@ -133,6 +132,8 @@ ov::pass::AddFakeQuantizeFusion::AddFakeQuantizeFusion() {
133132
return true;
134133
};
135134

136-
auto m = std::make_shared<ov::pass::pattern::Matcher>(fq_pattern, matcher_name);
135+
auto m = std::make_shared<pattern::Matcher>(fq_pattern, matcher_name);
137136
this->register_matcher(m, callback);
138137
}
138+
139+
} // namespace ov::pass

src/common/transformations/src/transformations/common_optimizations/align_eltwise_input_ranks.cpp

Lines changed: 17 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -14,17 +14,22 @@
1414
#include "openvino/op/util/binary_elementwise_logical.hpp"
1515
#include "openvino/pass/pattern/op/wrap_type.hpp"
1616

17-
ov::pass::AlignEltwiseInputRanks::AlignEltwiseInputRanks() {
18-
auto eltwise_pattern = pattern::wrap_type<ov::op::v0::SquaredDifference,
19-
ov::op::util::BinaryElementwiseComparison,
20-
ov::op::util::BinaryElementwiseLogical,
21-
ov::op::util::BinaryElementwiseArithmetic,
22-
ov::op::v0::FakeQuantize>(pattern::has_static_rank());
17+
namespace ov::pass {
18+
19+
namespace v0 = ov::op::v0;
20+
namespace op_util = ov::op::util;
21+
22+
AlignEltwiseInputRanks::AlignEltwiseInputRanks() {
23+
auto eltwise_pattern = pattern::wrap_type<v0::SquaredDifference,
24+
op_util::BinaryElementwiseComparison,
25+
op_util::BinaryElementwiseLogical,
26+
op_util::BinaryElementwiseArithmetic,
27+
v0::FakeQuantize>(pattern::has_static_rank());
2328

2429
matcher_pass_callback callback = [=](pattern::Matcher& m) {
2530
auto node = m.get_match_root();
2631

27-
auto fq = as_type<ov::op::v0::FakeQuantize>(node.get());
32+
auto fq = as_type<v0::FakeQuantize>(node.get());
2833
if (fq) {
2934
if (fq->get_auto_broadcast() != ov::op::AutoBroadcastType::NUMPY) {
3035
return false;
@@ -40,23 +45,23 @@ ov::pass::AlignEltwiseInputRanks::AlignEltwiseInputRanks() {
4045
if (ov::is_type<ov::op::v1::Multiply>(node)) {
4146
auto inputs = node->input_values();
4247
if (std::any_of(inputs.begin(), inputs.end(), [](const Output<Node>& input) -> bool {
43-
return ov::is_type<ov::op::v0::NormalizeL2>(input.get_node());
48+
return ov::is_type<v0::NormalizeL2>(input.get_node());
4449
}))
4550
return false;
4651
}
4752

4853
const auto rank = static_cast<int64_t>(node->get_output_partial_shape(0).size());
4954

5055
for (size_t i = 0; i < node->get_input_size(); i++) {
51-
auto const_node = as_type<ov::op::v0::Constant>(node->get_input_node_ptr(i));
56+
auto const_node = as_type<v0::Constant>(node->get_input_node_ptr(i));
5257
if (const_node == nullptr)
5358
continue;
5459
const auto& const_shape = const_node->get_shape();
5560
auto diff = rank - static_cast<int64_t>(const_shape.size());
5661
if (diff > 0) {
5762
Shape new_shape = const_shape;
5863
new_shape.insert(new_shape.begin(), diff, 1);
59-
auto new_const = std::make_shared<ov::op::v0::Constant>(*const_node, new_shape);
64+
auto new_const = std::make_shared<v0::Constant>(*const_node, new_shape);
6065
copy_runtime_info(node->get_input_node_shared_ptr(i), new_const);
6166
node->input(i).replace_source_output(new_const);
6267
}
@@ -68,3 +73,5 @@ ov::pass::AlignEltwiseInputRanks::AlignEltwiseInputRanks() {
6873
auto m = std::make_shared<pattern::Matcher>(eltwise_pattern, "AlignEltwiseInputRanks");
6974
this->register_matcher(m, callback);
7075
}
76+
77+
} // namespace ov::pass

src/common/transformations/src/transformations/common_optimizations/augru_cell_fusion.cpp

Lines changed: 49 additions & 44 deletions
Original file line numberDiff line numberDiff line change
@@ -25,9 +25,12 @@
2525
#include "openvino/pass/pattern/op/wrap_type.hpp"
2626
#include "ov_ops/augru_cell.hpp"
2727

28-
using namespace std;
29-
using namespace ov::element;
30-
using namespace ov::pass::pattern;
28+
namespace ov::pass {
29+
30+
namespace v0 = ov::op::v0;
31+
namespace v1 = ov::op::v1;
32+
33+
namespace {
3134

3235
// The 1st input to the Add op is automatically broadcasted
3336
// from 1d to 2d tensor, but to be compatible with what
@@ -37,8 +40,7 @@ static std::shared_ptr<ov::Node> get_bias_add(const std::shared_ptr<ov::Node>& b
3740
auto input_source_1_ps = bias_add->input_value(1).get_partial_shape();
3841
if (input_source_1_ps.is_static() && input_source_1_ps.rank().get_length() == 1) {
3942
auto unsqueeze =
40-
rg.make<ov::op::v0::Unsqueeze>(bias_add->input_value(1),
41-
ov::op::v0::Constant::create(ov::element::i32, ov::Shape{}, {0}));
43+
rg.make<v0::Unsqueeze>(bias_add->input_value(1), v0::Constant::create(ov::element::i32, ov::Shape{}, {0}));
4244
bias_add->input(1).replace_source_output(unsqueeze);
4345
}
4446

@@ -54,19 +56,20 @@ static std::shared_ptr<ov::Node> get_bias_add(const std::shared_ptr<ov::Node>& b
5456
// compatible with the code of the transformation.
5557
static std::shared_ptr<ov::Node> get_weights_matmul(const std::shared_ptr<ov::Node>& mat_mul,
5658
ov::pass::NodeRegistry& rg) {
57-
if (auto matmul = ov::as_type_ptr<ov::op::v0::MatMul>(mat_mul)) {
59+
if (auto matmul = ov::as_type_ptr<v0::MatMul>(mat_mul)) {
5860
if (!matmul->get_transpose_b()) {
59-
auto transpose =
60-
rg.make<ov::op::v1::Transpose>(matmul->input_value(1),
61-
ov::op::v0::Constant::create(ov::element::i32, ov::Shape{2}, {1, 0}));
61+
auto transpose = rg.make<v1::Transpose>(matmul->input_value(1),
62+
v0::Constant::create(ov::element::i32, ov::Shape{2}, {1, 0}));
6263
matmul->input(1).replace_source_output(transpose);
6364
}
6465
}
6566

6667
return mat_mul;
6768
}
6869

69-
ov::pass::AUGRUCellFusion::AUGRUCellFusion() {
70+
} // namespace
71+
72+
AUGRUCellFusion::AUGRUCellFusion() {
7073
MATCHER_SCOPE(AUGRUCellFusion);
7174

7275
// we can't determine hidden_size or input_size in this case
@@ -75,27 +78,28 @@ ov::pass::AUGRUCellFusion::AUGRUCellFusion() {
7578
return !(p_shape.rank().is_dynamic() || p_shape[1].is_dynamic());
7679
};
7780

78-
auto concat_1 = wrap_type<ov::op::v0::Concat>({any_input(is_first_dim_static), any_input(is_first_dim_static)});
79-
auto matmul_1 = wrap_type<ov::op::v0::MatMul>({concat_1, any_input(is_first_dim_static)});
80-
auto add_1 = wrap_type<ov::op::v1::Add>({matmul_1, any_input()});
81+
auto concat_1 = pattern::wrap_type<v0::Concat>(
82+
{pattern::any_input(is_first_dim_static), pattern::any_input(is_first_dim_static)});
83+
auto matmul_1 = pattern::wrap_type<v0::MatMul>({concat_1, pattern::any_input(is_first_dim_static)});
84+
auto add_1 = pattern::wrap_type<v1::Add>({matmul_1, pattern::any_input()});
8185
// only Sigmoid is supported in the current version of AUGRUCell
82-
auto sigmoid = wrap_type<ov::op::v0::Sigmoid>({add_1});
83-
auto split = wrap_type<ov::op::v1::Split>({sigmoid, any_input()});
84-
auto multiply = wrap_type<ov::op::v1::Multiply>({split, any_input()});
86+
auto sigmoid = pattern::wrap_type<v0::Sigmoid>({add_1});
87+
auto split = pattern::wrap_type<v1::Split>({sigmoid, pattern::any_input()});
88+
auto multiply = pattern::wrap_type<v1::Multiply>({split, pattern::any_input()});
8589

86-
auto concat_2 = wrap_type<ov::op::v0::Concat>({any_input(), multiply});
87-
auto matmul_2 = wrap_type<ov::op::v0::MatMul>({concat_2, any_input(is_first_dim_static)});
88-
auto add_2 = wrap_type<ov::op::v1::Add>({matmul_2, any_input()});
90+
auto concat_2 = pattern::wrap_type<v0::Concat>({pattern::any_input(), multiply});
91+
auto matmul_2 = pattern::wrap_type<v0::MatMul>({concat_2, pattern::any_input(is_first_dim_static)});
92+
auto add_2 = pattern::wrap_type<v1::Add>({matmul_2, pattern::any_input()});
8993
// only Tanh is supported in the current version of AUGRUCell
90-
auto tanh = wrap_type<ov::op::v0::Tanh>({add_2});
94+
auto tanh = pattern::wrap_type<v0::Tanh>({add_2});
9195

92-
auto subtract_1 = wrap_type<ov::op::v1::Subtract>({any_input(), any_input()});
93-
auto multiply_2 = wrap_type<ov::op::v1::Multiply>({subtract_1, split});
94-
auto subtract_2 = wrap_type<ov::op::v1::Subtract>({any_input(), multiply_2});
95-
auto multiply_3 = wrap_type<ov::op::v1::Multiply>({subtract_2, tanh});
96+
auto subtract_1 = pattern::wrap_type<v1::Subtract>({pattern::any_input(), pattern::any_input()});
97+
auto multiply_2 = pattern::wrap_type<v1::Multiply>({subtract_1, split});
98+
auto subtract_2 = pattern::wrap_type<v1::Subtract>({pattern::any_input(), multiply_2});
99+
auto multiply_3 = pattern::wrap_type<v1::Multiply>({subtract_2, tanh});
96100

97-
auto multiply_4 = wrap_type<ov::op::v1::Multiply>({multiply_2, any_input()});
98-
auto add_3 = wrap_type<ov::op::v1::Add>({multiply_4, multiply_3});
101+
auto multiply_4 = pattern::wrap_type<v1::Multiply>({multiply_2, pattern::any_input()});
102+
auto add_3 = pattern::wrap_type<v1::Add>({multiply_4, multiply_3});
99103

100104
matcher_pass_callback callback = [=](pattern::Matcher& m) {
101105
NodeRegistry rg;
@@ -110,35 +114,34 @@ ov::pass::AUGRUCellFusion::AUGRUCellFusion() {
110114
auto hidden_size = h_pshape[1].get_length();
111115
auto input_size = x_pshape[1].get_length();
112116

113-
auto axis_0 = rg.make<ov::op::v0::Constant>(i64, Shape{}, 0);
114-
auto axis_1 = rg.make<ov::op::v0::Constant>(i64, Shape{}, 1);
117+
auto axis_0 = rg.make<v0::Constant>(element::i64, Shape{}, 0);
118+
auto axis_1 = rg.make<v0::Constant>(element::i64, Shape{}, 1);
115119

116120
auto A = pattern_map.at(subtract_1)->input_value(1);
117121
// biases are required
118122
auto bias_add_1 = get_bias_add(pattern_map.at(add_1), rg);
119-
auto split_bias_r_z = rg.make<ov::op::v1::Split>(bias_add_1->input_value(1), axis_1, 2);
123+
auto split_bias_r_z = rg.make<v1::Split>(bias_add_1->input_value(1), axis_1, 2);
120124
auto bias_add_2 = get_bias_add(pattern_map.at(add_2), rg);
121125

122-
auto B = rg.make<ov::op::v0::Concat>(
126+
auto B = rg.make<v0::Concat>(
123127
OutputVector{split_bias_r_z->output(1), split_bias_r_z->output(0), bias_add_2->input_value(1)},
124128
1);
125129

126130
auto WRrz = get_weights_matmul(pattern_map.at(matmul_1), rg)->input_value(1);
127131
auto WRh = get_weights_matmul(pattern_map.at(matmul_2), rg)->input_value(1);
128132

129-
auto split_lenghts = rg.make<ov::op::v0::Constant>(i64, Shape{2}, vector<int64_t>{input_size, hidden_size});
130-
auto split_WRrz = rg.make<ov::op::v1::VariadicSplit>(WRrz, axis_1, split_lenghts);
131-
auto split_W_r_z = rg.make<ov::op::v1::Split>(split_WRrz->output(0), axis_0, 2);
132-
auto split_R_r_z = rg.make<ov::op::v1::Split>(split_WRrz->output(1), axis_0, 2);
133-
auto split_WRh = rg.make<ov::op::v1::VariadicSplit>(WRh, axis_1, split_lenghts);
134-
auto Wzrh = rg.make<ov::op::v0::Concat>(
135-
OutputVector{split_W_r_z->output(1), split_W_r_z->output(0), split_WRh->output(0)},
136-
0);
137-
auto Rzrh = rg.make<ov::op::v0::Concat>(
138-
OutputVector{split_R_r_z->output(1), split_R_r_z->output(0), split_WRh->output(1)},
139-
0);
140-
141-
auto squeeze_B = rg.make<ov::op::v0::Squeeze>(B, axis_0);
133+
auto split_lenghts =
134+
rg.make<v0::Constant>(element::i64, Shape{2}, std::vector<int64_t>{input_size, hidden_size});
135+
auto split_WRrz = rg.make<v1::VariadicSplit>(WRrz, axis_1, split_lenghts);
136+
auto split_W_r_z = rg.make<v1::Split>(split_WRrz->output(0), axis_0, 2);
137+
auto split_R_r_z = rg.make<v1::Split>(split_WRrz->output(1), axis_0, 2);
138+
auto split_WRh = rg.make<v1::VariadicSplit>(WRh, axis_1, split_lenghts);
139+
auto Wzrh =
140+
rg.make<v0::Concat>(OutputVector{split_W_r_z->output(1), split_W_r_z->output(0), split_WRh->output(0)}, 0);
141+
auto Rzrh =
142+
rg.make<v0::Concat>(OutputVector{split_R_r_z->output(1), split_R_r_z->output(0), split_WRh->output(1)}, 0);
143+
144+
auto squeeze_B = rg.make<v0::Squeeze>(B, axis_0);
142145
auto cell =
143146
rg.make<ov::op::internal::AUGRUCell>(X, H, Wzrh, Rzrh, squeeze_B, A, H.get_partial_shape()[1].get_length());
144147

@@ -148,6 +151,8 @@ ov::pass::AUGRUCellFusion::AUGRUCellFusion() {
148151
return true;
149152
};
150153

151-
auto m = make_shared<Matcher>(add_3, matcher_name);
154+
auto m = std::make_shared<pattern::Matcher>(add_3, matcher_name);
152155
this->register_matcher(m, callback);
153156
}
157+
158+
} // namespace ov::pass

0 commit comments

Comments
 (0)