diff options
author | Scott LaVarnway <slavarnway@google.com> | 2022-07-12 13:22:35 -0700 |
---|---|---|
committer | Scott LaVarnway <slavarnway@google.com> | 2022-07-19 06:13:34 -0700 |
commit | 414b4f05124b27c512a63c78c3057934850bc941 (patch) | |
tree | 34ed0961917696b607239fba7657a329a69d71a2 /vpx_dsp/x86 | |
parent | 168b312774166958897f727196a59ee8ad423e78 (diff) | |
download | libvpx-414b4f05124b27c512a63c78c3057934850bc941.tar libvpx-414b4f05124b27c512a63c78c3057934850bc941.tar.gz libvpx-414b4f05124b27c512a63c78c3057934850bc941.tar.bz2 libvpx-414b4f05124b27c512a63c78c3057934850bc941.zip |
VPX: Add vpx_quantize_b_32x32_avx2().
Up to 1.36x faster than vpx_quantize_b_32x32_avx() for full
calculations. Up to 1.29x faster for VP9_HIGHBITDEPTH builds.
Bug: b/237714063
Change-Id: I97aa6a18d4dc2f3187b76800f91bbba7be447ef1
Diffstat (limited to 'vpx_dsp/x86')
-rw-r--r-- | vpx_dsp/x86/quantize_avx2.c | 117 |
1 files changed, 114 insertions, 3 deletions
diff --git a/vpx_dsp/x86/quantize_avx2.c b/vpx_dsp/x86/quantize_avx2.c index e1c6e944c..6fd517487 100644 --- a/vpx_dsp/x86/quantize_avx2.c +++ b/vpx_dsp/x86/quantize_avx2.c @@ -18,15 +18,25 @@ static VPX_FORCE_INLINE void load_b_values_avx2( const int16_t *zbin_ptr, __m256i *zbin, const int16_t *round_ptr, __m256i *round, const int16_t *quant_ptr, __m256i *quant, const int16_t *dequant_ptr, __m256i *dequant, const int16_t *shift_ptr, - __m256i *shift) { + __m256i *shift, int log_scale) { *zbin = _mm256_castsi128_si256(_mm_load_si128((const __m128i *)zbin_ptr)); *zbin = _mm256_permute4x64_epi64(*zbin, 0x54); + if (log_scale > 0) { + const __m256i rnd = _mm256_set1_epi16((int16_t)(1 << (log_scale - 1))); + *zbin = _mm256_add_epi16(*zbin, rnd); + *zbin = _mm256_srai_epi16(*zbin, log_scale); + } // Subtracting 1 here eliminates a _mm256_cmpeq_epi16() instruction when // calculating the zbin mask. (See quantize_b_logscale{0,1,2}_16) *zbin = _mm256_sub_epi16(*zbin, _mm256_set1_epi16(1)); *round = _mm256_castsi128_si256(_mm_load_si128((const __m128i *)round_ptr)); *round = _mm256_permute4x64_epi64(*round, 0x54); + if (log_scale > 0) { + const __m256i rnd = _mm256_set1_epi16((int16_t)(1 << (log_scale - 1))); + *round = _mm256_add_epi16(*round, rnd); + *round = _mm256_srai_epi16(*round, log_scale); + } *quant = _mm256_castsi128_si256(_mm_load_si128((const __m128i *)quant_ptr)); *quant = _mm256_permute4x64_epi64(*quant, 0x54); @@ -151,13 +161,13 @@ void vpx_quantize_b_avx2(const tran_low_t *coeff_ptr, intptr_t n_coeffs, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan) { __m256i v_zbin, v_round, v_quant, v_dequant, v_quant_shift, v_nz_mask; - __m256i v_eobmax = _mm256_set1_epi16(0); + __m256i v_eobmax = _mm256_setzero_si256(); intptr_t count; (void)scan; load_b_values_avx2(zbin_ptr, &v_zbin, round_ptr, &v_round, quant_ptr, &v_quant, dequant_ptr, &v_dequant, quant_shift_ptr, - &v_quant_shift); + &v_quant_shift, 0); // Do DC and first 15 AC. v_nz_mask = quantize_b_16(coeff_ptr, qcoeff_ptr, dqcoeff_ptr, &v_quant, &v_dequant, &v_round, &v_zbin, &v_quant_shift); @@ -183,3 +193,104 @@ void vpx_quantize_b_avx2(const tran_low_t *coeff_ptr, intptr_t n_coeffs, *eob_ptr = accumulate_eob256(v_eobmax); } + +static VPX_FORCE_INLINE __m256i quantize_b_32x32_16( + const tran_low_t *coeff_ptr, tran_low_t *qcoeff_ptr, + tran_low_t *dqcoeff_ptr, const int16_t *iscan, __m256i *v_quant, + __m256i *v_dequant, __m256i *v_round, __m256i *v_zbin, + __m256i *v_quant_shift, __m256i *v_eobmax) { + const __m256i v_coeff = load_coefficients_avx2(coeff_ptr); + const __m256i v_abs_coeff = _mm256_abs_epi16(v_coeff); + const __m256i v_zbin_mask = _mm256_cmpgt_epi16(v_abs_coeff, *v_zbin); + + if (_mm256_movemask_epi8(v_zbin_mask) == 0) { + _mm256_store_si256((__m256i *)qcoeff_ptr, _mm256_setzero_si256()); + _mm256_store_si256((__m256i *)dqcoeff_ptr, _mm256_setzero_si256()); +#if CONFIG_VP9_HIGHBITDEPTH + _mm256_store_si256((__m256i *)(qcoeff_ptr + 8), _mm256_setzero_si256()); + _mm256_store_si256((__m256i *)(dqcoeff_ptr + 8), _mm256_setzero_si256()); +#endif + return *v_eobmax; + } + { + // tmp = v_zbin_mask ? (int64_t)abs_coeff + round : 0 + const __m256i v_tmp_rnd = + _mm256_and_si256(_mm256_adds_epi16(v_abs_coeff, *v_round), v_zbin_mask); + // tmp32 = (int)(((((tmp * quant_ptr[rc != 0]) >> 16) + tmp) * + // quant_shift_ptr[rc != 0]) >> 15); + const __m256i v_tmp32_a = _mm256_mulhi_epi16(v_tmp_rnd, *v_quant); + const __m256i v_tmp32_b = _mm256_add_epi16(v_tmp32_a, v_tmp_rnd); + const __m256i v_tmp32_hi = + _mm256_slli_epi16(_mm256_mulhi_epi16(v_tmp32_b, *v_quant_shift), 1); + const __m256i v_tmp32_lo = + _mm256_srli_epi16(_mm256_mullo_epi16(v_tmp32_b, *v_quant_shift), 15); + const __m256i v_tmp32 = _mm256_or_si256(v_tmp32_hi, v_tmp32_lo); + const __m256i v_qcoeff = _mm256_sign_epi16(v_tmp32, v_coeff); + const __m256i v_sign_lo = + _mm256_unpacklo_epi16(_mm256_setzero_si256(), v_coeff); + const __m256i v_sign_hi = + _mm256_unpackhi_epi16(_mm256_setzero_si256(), v_coeff); + const __m256i low = _mm256_mullo_epi16(v_tmp32, *v_dequant); + const __m256i high = _mm256_mulhi_epi16(v_tmp32, *v_dequant); + const __m256i v_dqcoeff_lo = _mm256_sign_epi32( + _mm256_srli_epi32(_mm256_unpacklo_epi16(low, high), 1), v_sign_lo); + const __m256i v_dqcoeff_hi = _mm256_sign_epi32( + _mm256_srli_epi32(_mm256_unpackhi_epi16(low, high), 1), v_sign_hi); + const __m256i v_nz_mask = + _mm256_cmpgt_epi16(v_tmp32, _mm256_setzero_si256()); + + store_coefficients_avx2(v_qcoeff, qcoeff_ptr); + +#if CONFIG_VP9_HIGHBITDEPTH + _mm256_storeu_si256((__m256i *)(dqcoeff_ptr), v_dqcoeff_lo); + _mm256_storeu_si256((__m256i *)(dqcoeff_ptr + 8), v_dqcoeff_hi); +#else + store_coefficients_avx2(_mm256_packs_epi32(v_dqcoeff_lo, v_dqcoeff_hi), + dqcoeff_ptr); +#endif + + return get_max_lane_eob(iscan, *v_eobmax, v_nz_mask); + } +} + +void vpx_quantize_b_32x32_avx2(const tran_low_t *coeff_ptr, intptr_t n_coeffs, + const int16_t *zbin_ptr, + const int16_t *round_ptr, + const int16_t *quant_ptr, + const int16_t *quant_shift_ptr, + tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, + const int16_t *dequant_ptr, uint16_t *eob_ptr, + const int16_t *scan, const int16_t *iscan) { + __m256i v_zbin, v_round, v_quant, v_dequant, v_quant_shift; + __m256i v_eobmax = _mm256_setzero_si256(); + intptr_t count; + (void)n_coeffs; + (void)scan; + + load_b_values_avx2(zbin_ptr, &v_zbin, round_ptr, &v_round, quant_ptr, + &v_quant, dequant_ptr, &v_dequant, quant_shift_ptr, + &v_quant_shift, 1); + + // Do DC and first 15 AC. + v_eobmax = quantize_b_32x32_16(coeff_ptr, qcoeff_ptr, dqcoeff_ptr, iscan, + &v_quant, &v_dequant, &v_round, &v_zbin, + &v_quant_shift, &v_eobmax); + + v_round = _mm256_unpackhi_epi64(v_round, v_round); + v_quant = _mm256_unpackhi_epi64(v_quant, v_quant); + v_dequant = _mm256_unpackhi_epi64(v_dequant, v_dequant); + v_quant_shift = _mm256_unpackhi_epi64(v_quant_shift, v_quant_shift); + v_zbin = _mm256_unpackhi_epi64(v_zbin, v_zbin); + + for (count = (32 * 32) - 16; count > 0; count -= 16) { + coeff_ptr += 16; + qcoeff_ptr += 16; + dqcoeff_ptr += 16; + iscan += 16; + v_eobmax = quantize_b_32x32_16(coeff_ptr, qcoeff_ptr, dqcoeff_ptr, iscan, + &v_quant, &v_dequant, &v_round, &v_zbin, + &v_quant_shift, &v_eobmax); + } + + *eob_ptr = accumulate_eob256(v_eobmax); +} |