|
| 1 | +// Copyright 2026 Tencent |
| 2 | +// SPDX-License-Identifier: BSD-3-Clause |
| 3 | + |
| 4 | +#if NCNN_RUNTIME_CPU && NCNN_AVX512BF16 && __AVX512F__ && !__AVX512BF16__ |
| 5 | +void bnll_bf16s_avx512bf16(Mat& a, const Option& opt); |
| 6 | +#endif |
| 7 | + |
| 8 | +static void bnll_bf16s(Mat& a, const Option& opt) |
| 9 | +{ |
| 10 | +#if NCNN_RUNTIME_CPU && NCNN_AVX512BF16 && __AVX512F__ && !__AVX512BF16__ |
| 11 | + if (ncnn::cpu_support_x86_avx512_bf16()) |
| 12 | + { |
| 13 | + bnll_bf16s_avx512bf16(a, opt); |
| 14 | + return; |
| 15 | + } |
| 16 | +#endif |
| 17 | + |
| 18 | + int w = a.w; |
| 19 | + int h = a.h; |
| 20 | + int d = a.d; |
| 21 | + int channels = a.c; |
| 22 | + int elempack = a.elempack; |
| 23 | + int size = w * h * d * elempack; |
| 24 | + |
| 25 | + #pragma omp parallel for num_threads(opt.num_threads) |
| 26 | + for (int q = 0; q < channels; q++) |
| 27 | + { |
| 28 | + unsigned short* ptr = a.channel(q); |
| 29 | + |
| 30 | + int i = 0; |
| 31 | +#if __SSE2__ |
| 32 | +#if __AVX__ |
| 33 | +#if __AVX512F__ |
| 34 | + __m512 _one_avx512 = _mm512_set1_ps(1.f); |
| 35 | + __m512 _zero_avx512 = _mm512_setzero_ps(); |
| 36 | + for (; i + 15 < size; i += 16) |
| 37 | + { |
| 38 | + __m512 _p = bfloat2float_avx512(_mm256_loadu_si256((const __m256i*)ptr)); |
| 39 | + __mmask16 mask = _mm512_cmp_ps_mask(_p, _zero_avx512, _CMP_GT_OQ); |
| 40 | + __m512 _abs_p = _mm512_castsi512_ps(_mm512_and_epi32(_mm512_castps_si512(_p), _mm512_set1_epi32(0x7fffffff))); |
| 41 | + __m512 _tmp = log512_ps(_mm512_add_ps(_one_avx512, exp512_ps(_mm512_sub_ps(_zero_avx512, _abs_p)))); |
| 42 | + _p = _mm512_mask_add_ps(_tmp, mask, _tmp, _p); |
| 43 | + _mm256_storeu_si256((__m256i*)ptr, float2bfloat_avx512(_p)); |
| 44 | + ptr += 16; |
| 45 | + } |
| 46 | + if (i < size) |
| 47 | + { |
| 48 | + const unsigned int remain = size - i; |
| 49 | + __mmask16 _mask = (__mmask16)((1u << remain) - 1); |
| 50 | + __m512 _p = bfloat2float_avx512(_mm256_maskz_loadu_epi16(_mask, ptr)); |
| 51 | + __mmask16 mask = _mm512_cmp_ps_mask(_p, _zero_avx512, _CMP_GT_OQ); |
| 52 | + __m512 _abs_p = _mm512_castsi512_ps(_mm512_and_epi32(_mm512_castps_si512(_p), _mm512_set1_epi32(0x7fffffff))); |
| 53 | + __m512 _tmp = log512_ps(_mm512_add_ps(_one_avx512, exp512_ps(_mm512_sub_ps(_zero_avx512, _abs_p)))); |
| 54 | + _p = _mm512_mask_add_ps(_tmp, mask, _tmp, _p); |
| 55 | + _mm256_mask_storeu_epi16(ptr, _mask, float2bfloat_avx512(_p)); |
| 56 | + i += remain; |
| 57 | + } |
| 58 | +#else // __AVX512F__ |
| 59 | + __m256 _one_avx = _mm256_set1_ps(1.f); |
| 60 | + __m256 _zero_avx = _mm256_setzero_ps(); |
| 61 | + for (; i + 7 < size; i += 8) |
| 62 | + { |
| 63 | + __m256 _p = bfloat2float_avx(_mm_loadu_si128((const __m128i*)ptr)); |
| 64 | + __m256 mask = _mm256_cmp_ps(_p, _mm256_setzero_ps(), _CMP_GT_OQ); |
| 65 | + __m256 _abs_p = _mm256_and_ps(_p, *(__m256*)_ps256_inv_sign_mask); |
| 66 | + __m256 _tmp = log256_ps(_mm256_add_ps(_one_avx, exp256_ps(_mm256_sub_ps(_zero_avx, _abs_p)))); |
| 67 | + __m256 _x = _mm256_and_ps(_p, mask); |
| 68 | + _p = _mm256_add_ps(_x, _tmp); |
| 69 | + _mm_storeu_si128((__m128i*)ptr, float2bfloat_avx(_p)); |
| 70 | + ptr += 8; |
| 71 | + } |
| 72 | + __m128 _one = _mm_set1_ps(1.f); |
| 73 | + __m128 _zero = _mm_setzero_ps(); |
| 74 | + for (; i + 3 < size; i += 4) |
| 75 | + { |
| 76 | + __m128 _p = bfloat2float_sse(_mm_loadl_epi64((const __m128i*)ptr)); |
| 77 | + __m128 mask = _mm_cmpgt_ps(_p, _zero); |
| 78 | + __m128 _abs_p = _mm_and_ps(_p, *(__m128*)_ps_inv_sign_mask); |
| 79 | + __m128 _tmp = log_ps(_mm_add_ps(_one, exp_ps(_mm_sub_ps(_zero, _abs_p)))); |
| 80 | + __m128 _x = _mm_and_ps(_p, mask); |
| 81 | + _p = _mm_add_ps(_x, _tmp); |
| 82 | + _mm_storel_epi64((__m128i*)ptr, float2bfloat_sse(_p, _p)); |
| 83 | + ptr += 4; |
| 84 | + } |
| 85 | +#endif // __AVX512F__ |
| 86 | +#else // __AVX__ |
| 87 | + __m128 _one = _mm_set1_ps(1.f); |
| 88 | + __m128 _zero = _mm_setzero_ps(); |
| 89 | + for (; i + 3 < size; i += 4) |
| 90 | + { |
| 91 | + __m128 _p = bfloat2float_sse(_mm_loadl_epi64((const __m128i*)ptr)); |
| 92 | + __m128 mask = _mm_cmpgt_ps(_p, _zero); |
| 93 | + __m128 _abs_p = _mm_and_ps(_p, *(__m128*)_ps_inv_sign_mask); |
| 94 | + __m128 _tmp = log_ps(_mm_add_ps(_one, exp_ps(_mm_sub_ps(_zero, _abs_p)))); |
| 95 | + __m128 _x = _mm_and_ps(_p, mask); |
| 96 | + _p = _mm_add_ps(_x, _tmp); |
| 97 | + _mm_storel_epi64((__m128i*)ptr, float2bfloat_sse(_p, _p)); |
| 98 | + ptr += 4; |
| 99 | + } |
| 100 | +#endif // __AVX__ |
| 101 | +#endif // __SSE2__ |
| 102 | + for (; i < size; i++) |
| 103 | + { |
| 104 | + float v = bfloat16_to_float32(*ptr); |
| 105 | + if (v > 0) |
| 106 | + v = v + logf(1.f + expf(-v)); |
| 107 | + else |
| 108 | + v = logf(1.f + expf(v)); |
| 109 | + *ptr = float32_to_bfloat16(v); |
| 110 | + ptr++; |
| 111 | + } |
| 112 | + } |
| 113 | +} |
0 commit comments