Skip to content

Commit e5ca8ae

Browse files
authored
[Codegen] Use GPUPadLayoutAttr to resolve layouts. (#20565)
Instead of using the nop layout resolver, the revision explicitly uses the GPUPadLayoutAttr to resolve all the layouts. If the resolver is present in an executable target, it is prioritized. The revision exposes a "hidden" dependency between padding layout resolver and nop layout resolver. Because the current type converter is not decoupled enough from data-tiling usage. It is a step towards #20160 --------- Signed-off-by: hanhanW <hanhan0912@gmail.com>
1 parent 05ab7f4 commit e5ca8ae

File tree

5 files changed

+198
-31
lines changed

5 files changed

+198
-31
lines changed

compiler/src/iree/compiler/Codegen/Common/BUILD.bazel

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -189,6 +189,7 @@ iree_compiler_cc_library(
189189
"//compiler/src/iree/compiler/Codegen/Dialect/Codegen/IR:IREECodegenDialect",
190190
"//compiler/src/iree/compiler/Codegen/Dialect/Codegen/Utils",
191191
"//compiler/src/iree/compiler/Codegen/Dialect/GPU/IR:IREEGPUDialect",
192+
"//compiler/src/iree/compiler/Codegen/Dialect/GPU/TargetUtils:KnownTargets",
192193
"//compiler/src/iree/compiler/Codegen/Dialect/VectorExt/IR:IREEVectorExtDialect",
193194
"//compiler/src/iree/compiler/Codegen/Interfaces:BufferizationInterfaces",
194195
"//compiler/src/iree/compiler/Codegen/Interfaces:PartitionableLoopsInterface",

compiler/src/iree/compiler/Codegen/Common/CMakeLists.txt

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -224,6 +224,7 @@ iree_cc_library(
224224
iree::compiler::Codegen::Dialect::Codegen::IR::IREECodegenDialect
225225
iree::compiler::Codegen::Dialect::Codegen::Utils
226226
iree::compiler::Codegen::Dialect::GPU::IR::IREEGPUDialect
227+
iree::compiler::Codegen::Dialect::GPU::TargetUtils::KnownTargets
227228
iree::compiler::Codegen::Dialect::VectorExt::IR::IREEVectorExtDialect
228229
iree::compiler::Codegen::Interfaces::BufferizationInterfaces
229230
iree::compiler::Codegen::Interfaces::PartitionableLoopsInterface

compiler/src/iree/compiler/Codegen/Common/MaterializeEncodingIntoPadding.cpp

Lines changed: 85 additions & 30 deletions
Original file line numberDiff line numberDiff line change
@@ -10,11 +10,15 @@
1010
#include "iree/compiler/Codegen/Common/Passes.h"
1111
#include "iree/compiler/Codegen/Dialect/Codegen/IR/IREECodegenAttrs.h"
1212
#include "iree/compiler/Codegen/Dialect/Codegen/IR/IREECodegenDialect.h"
13+
#include "iree/compiler/Codegen/Dialect/GPU/IR/IREEGPUAttrs.h"
14+
#include "iree/compiler/Codegen/Dialect/GPU/TargetUtils/KnownTargets.h"
15+
#include "iree/compiler/Codegen/Utils/GPUUtils.h"
1316
#include "iree/compiler/Dialect/Encoding/IR/EncodingTypes.h"
1417
#include "iree/compiler/Dialect/Flow/IR/FlowOps.h"
1518
#include "iree/compiler/Dialect/Flow/IR/FlowTypes.h"
1619
#include "llvm/ADT/STLExtras.h"
1720
#include "llvm/ADT/SmallVector.h"
21+
#include "llvm/Support/Debug.h"
1822
#include "mlir/Dialect/MemRef/Transforms/Transforms.h"
1923
#include "mlir/Dialect/Tensor/IR/Tensor.h"
2024
#include "mlir/IR/BuiltinTypes.h"
@@ -23,6 +27,8 @@
2327
#include "mlir/Transforms/GreedyPatternRewriteDriver.h"
2428
#include "mlir/Transforms/Passes.h"
2529

30+
#define DEBUG_TYPE "iree-codegen-materialize-encoding-into-padding"
31+
2632
namespace mlir::iree_compiler {
2733

2834
#define GEN_PASS_DEF_MATERIALIZEENCODINGINTOPADDINGPASS
@@ -34,24 +40,36 @@ namespace {
3440

3541
// Returns the pad encoding layout, or nullptr if this is not the only layout or
3642
// if there's no encoding at all.
37-
static PadEncodingLayoutAttr getPadLayout(RankedTensorType type) {
38-
auto encoding =
39-
dyn_cast_or_null<IREE::Encoding::LayoutAttr>(type.getEncoding());
40-
if (!encoding) {
43+
static PadEncodingLayoutAttr getPadLayout(Attribute layoutAttr,
44+
RankedTensorType type) {
45+
if (!type.getEncoding()) {
4146
return nullptr;
4247
}
43-
ArrayAttr layouts = encoding.getLayouts();
44-
if (!layouts || layouts.size() != 1) {
45-
return nullptr;
48+
auto encoding =
49+
dyn_cast_or_null<IREE::Encoding::LayoutAttr>(type.getEncoding());
50+
if (encoding) {
51+
ArrayAttr layouts = encoding.getLayouts();
52+
if (layouts.size() != 1) {
53+
return nullptr;
54+
}
55+
return dyn_cast<PadEncodingLayoutAttr>(*layouts.begin());
4656
}
47-
48-
return dyn_cast<PadEncodingLayoutAttr>(*layouts.begin());
57+
Attribute resolvedEncoding =
58+
cast<IREE::Encoding::EncodingLayoutResolverAttrInterface>(layoutAttr)
59+
.getLayout(type);
60+
LLVM_DEBUG({
61+
llvm::dbgs() << "Unresolved type: " << type << "\n";
62+
llvm::dbgs() << "layoutAttr: " << layoutAttr << "\n";
63+
llvm::dbgs() << "Resolved into: " << resolvedEncoding << "\n";
64+
});
65+
return dyn_cast<PadEncodingLayoutAttr>(resolvedEncoding);
4966
}
5067

5168
// Returns a padded tensor type (without encoding) for tensor types with the pad
5269
// encoding layout, or the same type for all other tensors.
53-
static RankedTensorType getPaddedType(RankedTensorType type) {
54-
PadEncodingLayoutAttr layout = getPadLayout(type);
70+
static RankedTensorType getPaddedType(Attribute layoutAttr,
71+
RankedTensorType type) {
72+
PadEncodingLayoutAttr layout = getPadLayout(layoutAttr, type);
5573
if (!isNonZeroPadding(layout)) {
5674
return type.dropEncoding();
5775
}
@@ -67,15 +85,11 @@ static RankedTensorType getPaddedType(RankedTensorType type) {
6785
return RankedTensorType::get(newShape, type.getElementType());
6886
}
6987

70-
static bool hasNonZeroPadding(RankedTensorType type) {
71-
return isNonZeroPadding(getPadLayout(type));
72-
}
73-
7488
struct MaterializePadEncodingTypeConverter final
7589
: MaterializeEncodingTypeConverter {
76-
MaterializePadEncodingTypeConverter(MLIRContext *ctx)
77-
: MaterializeEncodingTypeConverter(
78-
IREE::Codegen::EncodingNopLayoutAttr::get(ctx)) {
90+
MaterializePadEncodingTypeConverter(
91+
IREE::Codegen::LayoutAttrInterface layoutAttr)
92+
: MaterializeEncodingTypeConverter(layoutAttr) {
7993
addConversion([](RankedTensorType type) -> std::optional<RankedTensorType> {
8094
// The type converter is designed for `pad_encoding_layout` encoding
8195
// attribute. By the definition, the final converted type is the same
@@ -85,18 +99,23 @@ struct MaterializePadEncodingTypeConverter final
8599
addConversion([&](IREE::Flow::DispatchTensorType dispatchTensorType)
86100
-> IREE::Flow::DispatchTensorType {
87101
auto type = dyn_cast<RankedTensorType>(dispatchTensorType.getBoundType());
88-
if (!type) {
102+
if (!type || !type.getEncoding()) {
89103
return dispatchTensorType;
90104
}
91105
// The incoming bindings have the padded type, if `pad_encoding_layout` is
92106
// present.
93-
if (getPadLayout(type)) {
94-
type = getPaddedType(type);
107+
if (getPadLayout(getLayoutAttr(), type)) {
108+
type = getPaddedType(getLayoutAttr(), type);
95109
}
96110
return IREE::Flow::DispatchTensorType::get(dispatchTensorType.getAccess(),
97111
type);
98112
});
99113
}
114+
115+
bool hasNonZeroPadding(RankedTensorType type) const {
116+
PadEncodingLayoutAttr layout = getPadLayout(getLayoutAttr(), type);
117+
return layout && !layout.isIdentityLayout();
118+
}
100119
};
101120

102121
/// Pattern to convert `flow.dispatch.tensor.load` operation when
@@ -116,15 +135,15 @@ struct MaterializeFlowDispatchTensorLoadOp final
116135
return rewriter.notifyMatchFailure(loadOp, "unhandled partial loads");
117136
}
118137

138+
auto &typeConverter =
139+
*getTypeConverter<MaterializePadEncodingTypeConverter>();
119140
IREE::Flow::DispatchTensorType sourceType = loadOp.getSourceType();
120141
auto boundTensorType = cast<RankedTensorType>(sourceType.getBoundType());
121-
if (!hasNonZeroPadding(boundTensorType)) {
142+
if (!typeConverter.hasNonZeroPadding(boundTensorType)) {
122143
// Let the Nop pattern handle this.
123144
return rewriter.notifyMatchFailure(loadOp, "no padding applied");
124145
}
125146

126-
auto &typeConverter =
127-
*getTypeConverter<MaterializePadEncodingTypeConverter>();
128147
auto paddedType =
129148
typeConverter.convertType<RankedTensorType>(boundTensorType);
130149
assert(paddedType != boundTensorType && "Expected conversion with padding");
@@ -171,15 +190,15 @@ struct MaterializeFlowDispatchTensorStoreOp final
171190
return rewriter.notifyMatchFailure(storeOp, "unhandled partial stores");
172191
}
173192

193+
auto &typeConverter =
194+
*getTypeConverter<MaterializePadEncodingTypeConverter>();
174195
IREE::Flow::DispatchTensorType targetType = storeOp.getTargetType();
175196
auto boundTensorType = cast<RankedTensorType>(targetType.getBoundType());
176-
if (!hasNonZeroPadding(boundTensorType)) {
197+
if (!typeConverter.hasNonZeroPadding(boundTensorType)) {
177198
// Let the Nop pattern handle this.
178199
return rewriter.notifyMatchFailure(storeOp, "no padding applied");
179200
}
180201

181-
auto &typeConverter =
182-
*getTypeConverter<MaterializePadEncodingTypeConverter>();
183202
IREE::Flow::DispatchTensorType newTargetType =
184203
typeConverter.convertType<IREE::Flow::DispatchTensorType>(targetType);
185204
RankedTensorType paddedType = newTargetType.asRankedTensorType();
@@ -245,8 +264,9 @@ struct MaterializeEncodingIntoPaddingPass final
245264
: impl::MaterializeEncodingIntoPaddingPassBase<
246265
MaterializeEncodingIntoPaddingPass> {
247266
void getDependentDialects(DialectRegistry &registry) const override {
248-
registry.insert<linalg::LinalgDialect, tensor::TensorDialect,
249-
IREE::Codegen::IREECodegenDialect>();
267+
registry.insert<arith::ArithDialect, linalg::LinalgDialect,
268+
tensor::TensorDialect, IREE::Codegen::IREECodegenDialect,
269+
IREE::GPU::IREEGPUDialect>();
250270
}
251271

252272
void runOnOperation() override {
@@ -259,8 +279,43 @@ struct MaterializeEncodingIntoPaddingPass final
259279
return failure();
260280
};
261281

282+
// Retrieve the config from executable target attribute, if any. Otherwise,
283+
// retrieve the config from CLI GPU target and construct a virtual
284+
// configuration.
285+
auto targetAttr = IREE::HAL::ExecutableTargetAttr::lookup(operation);
286+
DictionaryAttr targetConfig;
287+
if (targetAttr) {
288+
targetConfig = targetAttr.getConfiguration();
289+
} else {
290+
IREE::GPU::TargetAttr gpuTargetAttr = getCLGPUTarget(context);
291+
SmallVector<NamedAttribute> items;
292+
items.emplace_back(
293+
IREE::Encoding::kEncodingResolverAttrName,
294+
IREE::GPU::getHIPTargetEncodingLayoutAttr(gpuTargetAttr, "pad"));
295+
targetConfig = DictionaryAttr::get(context, items);
296+
}
297+
298+
// The layoutAttr should come in without any target info attached to it,
299+
// so we need to clone the layout attrs with the configuration so it can
300+
// access the target info during materialization.
301+
//
302+
// Otherwise, fall back to the nop layout.
303+
IREE::Codegen::LayoutAttrInterface layoutAttr;
304+
if (targetConfig &&
305+
targetConfig.contains(IREE::Encoding::kEncodingResolverAttrName)) {
306+
layoutAttr = targetConfig.getAs<IREE::Codegen::LayoutAttrInterface>(
307+
IREE::Encoding::kEncodingResolverAttrName);
308+
auto resolverAttr =
309+
cast<IREE::Encoding::EncodingLayoutResolverAttrInterface>(layoutAttr);
310+
layoutAttr = cast<IREE::Codegen::LayoutAttrInterface>(
311+
resolverAttr.cloneWithSimplifiedConfig(targetConfig));
312+
} else {
313+
layoutAttr = cast<IREE::Codegen::LayoutAttrInterface>(
314+
IREE::Codegen::EncodingNopLayoutAttr::get(context));
315+
}
316+
262317
RewritePatternSet materializeEncodingPattern(context);
263-
MaterializePadEncodingTypeConverter typeConverter(context);
318+
MaterializePadEncodingTypeConverter typeConverter(layoutAttr);
264319
MaterializeEncodingConversionTarget target(*context);
265320
populateMaterializeEncodingPatterns(materializeEncodingPattern, target,
266321
typeConverter,

compiler/src/iree/compiler/Codegen/Common/test/materialize_encoding_into_padding.mlir

Lines changed: 88 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,94 @@
11
// RUN: iree-opt --pass-pipeline="builtin.module(func.func(iree-codegen-materialize-encoding-into-padding))" \
2+
// RUN: --iree-gpu-test-target=gfx942 \
23
// RUN: --split-input-file %s | FileCheck %s
34

5+
#binding_ro = #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">
6+
#binding = #hal.pipeline.binding<storage_buffer, Indirect>
7+
#encoding = #iree_encoding.matmul_k<k_dims = [1]>
8+
func.func @set_encoding_and_store_with_unresolved_encodings() {
9+
%c0 = arith.constant 0 : index
10+
%0 = hal.interface.constant.load layout(<constants = 1, bindings = [#binding_ro, #binding], flags = Indirect>) ordinal(0) : i32
11+
%1 = arith.index_castui %0 : i32 to index
12+
%3 = hal.interface.binding.subspan layout(<constants = 1, bindings = [#binding_ro, #binding], flags = Indirect>) binding(0) alignment(64) offset(%1) flags("ReadOnly|Indirect")
13+
: !flow.dispatch.tensor<readonly:tensor<2048x2048xf16>>
14+
%4 = hal.interface.binding.subspan layout(<constants = 1, bindings = [#binding_ro, #binding], flags = Indirect>) binding(1) alignment(64) offset(%c0) flags(Indirect)
15+
: !flow.dispatch.tensor<writeonly:tensor<2048x2048xf16, #encoding>>
16+
%5 = flow.dispatch.tensor.load %3, offsets = [0, 0], sizes = [2048, 2048], strides = [1, 1]
17+
: !flow.dispatch.tensor<readonly:tensor<2048x2048xf16>> -> tensor<2048x2048xf16>
18+
%6 = iree_encoding.set_encoding %5 : tensor<2048x2048xf16> -> tensor<2048x2048xf16, #encoding>
19+
flow.dispatch.tensor.store %6, %4, offsets = [0, 0], sizes = [2048, 2048], strides = [1, 1]
20+
: tensor<2048x2048xf16, #encoding> -> !flow.dispatch.tensor<writeonly:tensor<2048x2048xf16, #encoding>>
21+
return
22+
}
23+
// CHECK-LABEL: @set_encoding_and_store_with_unresolved_encodings
24+
// CHECK: %[[A:.+]] = hal.interface.binding.subspan layout({{.+}}) binding(0)
25+
// CHECK-SAME: !flow.dispatch.tensor<readonly:tensor<2048x2048xf16>>
26+
// CHECK: %[[B:.+]] = hal.interface.binding.subspan layout({{.+}}) binding(1)
27+
// CHECK-SAME: !flow.dispatch.tensor<writeonly:tensor<2048x2112xf16>>
28+
// CHECK: %[[LD:.+]] = flow.dispatch.tensor.load %[[A]], offsets = [0, 0], sizes = [2048, 2048], strides = [1, 1]
29+
// CHECK-SAME: !flow.dispatch.tensor<readonly:tensor<2048x2048xf16>> -> tensor<2048x2048xf16>
30+
// CHECK: flow.dispatch.tensor.store %[[LD]], %[[B]], offsets = [0, 0], sizes = [2048, 2048], strides = [1, 1]
31+
// CHECK-SAME: tensor<2048x2048xf16> -> !flow.dispatch.tensor<writeonly:tensor<2048x2112xf16>>
32+
33+
// -----
34+
35+
// The test is as the same as the
36+
// set_encoding_and_store_with_unresolved_encodings test, but it gets the
37+
// encoding resolver from executable target.
38+
39+
#binding_ro = #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">
40+
#binding = #hal.pipeline.binding<storage_buffer, Indirect>
41+
#encoding = #iree_encoding.matmul_k<k_dims = [1]>
42+
#executable_target = #hal.executable.target<"rocm", "rocm-hsaco-fb",
43+
{
44+
abi = "hip",
45+
iree.encoding.resolver = #iree_gpu.gpu_pad_layout<>,
46+
iree.gpu.target = #iree_gpu.target<arch = "gfx942",
47+
features = "",
48+
wgp = <compute = fp32,
49+
storage = b32,
50+
subgroup = none,
51+
dot = none,
52+
mma = [<MFMA_F32_16x16x4_F32>],
53+
subgroup_size_choices = [64],
54+
max_workgroup_sizes = [1024, 1024, 1024],
55+
max_thread_count_per_workgroup = 1024,
56+
max_workgroup_memory_bytes = 65536,
57+
max_workgroup_counts = [2147483647, 2147483647, 2147483647],
58+
max_load_instruction_bits = 128,
59+
simds_per_wgp = 4,
60+
vgpr_space_bits = 16384>>
61+
}>
62+
func.func @set_encoding_and_store_with_unresolved_encodings_from_executable() attributes {
63+
hal.executable.target = #executable_target
64+
} {
65+
%c0 = arith.constant 0 : index
66+
%0 = hal.interface.constant.load layout(<constants = 1, bindings = [#binding_ro, #binding], flags = Indirect>) ordinal(0) : i32
67+
%1 = arith.index_castui %0 : i32 to index
68+
%3 = hal.interface.binding.subspan layout(<constants = 1, bindings = [#binding_ro, #binding], flags = Indirect>) binding(0) alignment(64) offset(%1) flags("ReadOnly|Indirect")
69+
: !flow.dispatch.tensor<readonly:tensor<2048x2048xf16>>
70+
%4 = hal.interface.binding.subspan layout(<constants = 1, bindings = [#binding_ro, #binding], flags = Indirect>) binding(1) alignment(64) offset(%c0) flags(Indirect)
71+
: !flow.dispatch.tensor<writeonly:tensor<2048x2048xf16, #encoding>>
72+
%5 = flow.dispatch.tensor.load %3, offsets = [0, 0], sizes = [2048, 2048], strides = [1, 1]
73+
: !flow.dispatch.tensor<readonly:tensor<2048x2048xf16>> -> tensor<2048x2048xf16>
74+
%6 = iree_encoding.set_encoding %5 : tensor<2048x2048xf16> -> tensor<2048x2048xf16, #encoding>
75+
flow.dispatch.tensor.store %6, %4, offsets = [0, 0], sizes = [2048, 2048], strides = [1, 1]
76+
: tensor<2048x2048xf16, #encoding> -> !flow.dispatch.tensor<writeonly:tensor<2048x2048xf16, #encoding>>
77+
return
78+
}
79+
// CHECK-LABEL: @set_encoding_and_store_with_unresolved_encodings_from_executable
80+
// CHECK: %[[A:.+]] = hal.interface.binding.subspan layout({{.+}}) binding(0)
81+
// CHECK-SAME: !flow.dispatch.tensor<readonly:tensor<2048x2048xf16>>
82+
// CHECK: %[[B:.+]] = hal.interface.binding.subspan layout({{.+}}) binding(1)
83+
// CHECK-SAME: !flow.dispatch.tensor<writeonly:tensor<2048x2112xf16>>
84+
// CHECK: %[[LD:.+]] = flow.dispatch.tensor.load %[[A]], offsets = [0, 0], sizes = [2048, 2048], strides = [1, 1]
85+
// CHECK-SAME: !flow.dispatch.tensor<readonly:tensor<2048x2048xf16>> -> tensor<2048x2048xf16>
86+
// CHECK: flow.dispatch.tensor.store %[[LD]], %[[B]], offsets = [0, 0], sizes = [2048, 2048], strides = [1, 1]
87+
// CHECK-SAME: tensor<2048x2048xf16> -> !flow.dispatch.tensor<writeonly:tensor<2048x2112xf16>>
88+
89+
90+
// -----
91+
492
#binding_ro = #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">
593
#binding = #hal.pipeline.binding<storage_buffer, Indirect>
694
#encoding_mmt = #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f16, f16, f16]>

compiler/src/iree/compiler/Codegen/ExternalInterfaces/GPUEncodingExternalModels.cpp

Lines changed: 23 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -406,6 +406,27 @@ struct GPUHostEncodingLayoutResolverAttrInterface final
406406
}
407407
};
408408

409+
struct GPUPadDeviceEncodingLayoutAttrInterface final
410+
: Codegen::LayoutAttrInterface::ExternalModel<
411+
GPUPadDeviceEncodingLayoutAttrInterface, GPUPadLayoutAttr> {
412+
413+
// TODO(#20160): Do not implement the interface method because it is
414+
// data-tiling specific. It is a workaround to reuse encoding materialization
415+
// patterns, because we query types from the method in the conversion. We
416+
// should really move them to interface methods, then we can delete the
417+
// workaround.
418+
MaterializeEncodingInfo getEncodingInfo(Attribute attr,
419+
RankedTensorType type) const {
420+
return MaterializeEncodingInfo{};
421+
}
422+
423+
Operation *lowerOp(Attribute attr, OpBuilder &b, Operation *op,
424+
TypeRange convertedResTypes,
425+
ValueRange convertedOperands) const {
426+
return clone(b, op, convertedResTypes, convertedOperands);
427+
}
428+
};
429+
409430
struct GPUPadEncodingLayoutResolverAttrInterface final
410431
: Encoding::EncodingLayoutResolverAttrInterface::ExternalModel<
411432
GPUPadEncodingLayoutResolverAttrInterface, GPUPadLayoutAttr> {
@@ -416,7 +437,7 @@ struct GPUPadEncodingLayoutResolverAttrInterface final
416437
std::optional<IREE::GPU::L1CacheInfo> cache =
417438
IREE::GPU::getL1CacheInfo(gpuTarget);
418439
if (!cache) {
419-
return IREE::Encoding::IdentityEncodingAttr::get(ctx);
440+
return IREE::Codegen::EncodingNopLayoutAttr::get(ctx);
420441
}
421442
return GPUPadLayoutAttr::get(ctx, cache->cacheLineBytes, cache->cacheSets);
422443
}
@@ -535,6 +556,7 @@ void registerGPUEncodingExternalModels(DialectRegistry &registry) {
535556
GPUHostEncodingLayoutResolverAttrInterface,
536557
GPUHostSerializableEncodingAttrInterface>(*ctx);
537558
IREE::GPU::GPUPadLayoutAttr::attachInterface<
559+
GPUPadDeviceEncodingLayoutAttrInterface,
538560
GPUPadEncodingLayoutResolverAttrInterface>(*ctx);
539561
});
540562
}

0 commit comments

Comments
 (0)