Skip to content

Commit e096ffd

Browse files
committed
qaq
1 parent 3be03dd commit e096ffd

File tree

2 files changed

+46
-0
lines changed

2 files changed

+46
-0
lines changed

src/layer/loongarch/convolution_loongarch.cpp

Lines changed: 23 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -950,6 +950,29 @@ int Convolution_loongarch::forward_int8_loongarch(const Mat& bottom_blob, Mat& t
950950
}
951951
}
952952

953+
#if __loongarch_sx
954+
if (opt.use_packing_layout)
955+
{
956+
// NCNN_LOGE("top_blob_int32 %d %d", top_blob_int32.c, top_blob_int32.elempack);
957+
if (use_int8_requantize)
958+
{
959+
// TODO implement winograd sgemm packed int8 pack1 output
960+
if (top_blob_int32.elempack == 4 && top_blob_int32.c % 2 == 1)
961+
{
962+
Mat tmp;
963+
convert_packing(top_blob_int32, tmp, 1, opt);
964+
top_blob_int32 = tmp;
965+
}
966+
if (top_blob_int32.elempack == 4 && top_blob_int32.c % 2 == 0)
967+
{
968+
Mat tmp;
969+
convert_packing(top_blob_int32, tmp, 8, opt);
970+
top_blob_int32 = tmp;
971+
}
972+
}
973+
}
974+
#endif
975+
953976
if (use_int8_requantize)
954977
{
955978
requantize_from_int32_to_int8(top_blob_int32, top_blob, scale_in_data, top_blob_int8_scales, bias_data, activation_type, activation_params, opt);

src/layer/mips/convolution_mips.cpp

Lines changed: 23 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -950,6 +950,29 @@ int Convolution_mips::forward_int8_mips(const Mat& bottom_blob, Mat& top_blob, c
950950
}
951951
}
952952

953+
#if __mips_msa
954+
if (opt.use_packing_layout)
955+
{
956+
// NCNN_LOGE("top_blob_int32 %d %d", top_blob_int32.c, top_blob_int32.elempack);
957+
if (use_int8_requantize)
958+
{
959+
// TODO implement winograd sgemm packed int8 pack1 output
960+
if (top_blob_int32.elempack == 4 && top_blob_int32.c % 2 == 1)
961+
{
962+
Mat tmp;
963+
convert_packing(top_blob_int32, tmp, 1, opt);
964+
top_blob_int32 = tmp;
965+
}
966+
if (top_blob_int32.elempack == 4 && top_blob_int32.c % 2 == 0)
967+
{
968+
Mat tmp;
969+
convert_packing(top_blob_int32, tmp, 8, opt);
970+
top_blob_int32 = tmp;
971+
}
972+
}
973+
}
974+
#endif
975+
953976
if (use_int8_requantize)
954977
{
955978
requantize_from_int32_to_int8(top_blob_int32, top_blob, scale_in_data, top_blob_int8_scales, bias_data, activation_type, activation_params, opt);

0 commit comments

Comments
 (0)