From 93166c5e519ecbb8477e80abb4acb1420e5cc2c4 Mon Sep 17 00:00:00 2001 From: Johann Date: Tue, 8 Aug 2017 14:05:16 -0700 Subject: neon: vpx_quantize_b_32x32 With skip block the neon is about twice as fast as C. The neon has no shortcut for coeff < zbin so it always takes the same amount of time. Even if the C can take the shortcut, it is over twice as fast in neon. If it can't, that gap increases to over 10x. BUG=webm:1426 Change-Id: I400722146c1b5a5f6289f67d85fd642463d2bfc6 --- test/vp9_quantize_test.cc | 19 +++-- vpx_dsp/arm/quantize_neon.c | 175 ++++++++++++++++++++++++++++++++++++++++++- vpx_dsp/vpx_dsp_rtcd_defs.pl | 2 +- 3 files changed, 186 insertions(+), 10 deletions(-) diff --git a/test/vp9_quantize_test.cc b/test/vp9_quantize_test.cc index 7ec8f585b..e3985357c 100644 --- a/test/vp9_quantize_test.cc +++ b/test/vp9_quantize_test.cc @@ -358,15 +358,18 @@ INSTANTIATE_TEST_CASE_P(DISABLED_AVX, VP9QuantizeTest, // TODO(webm:1448): dqcoeff is not handled correctly in HBD builds. #if HAVE_NEON && !CONFIG_VP9_HIGHBITDEPTH -INSTANTIATE_TEST_CASE_P(NEON, VP9QuantizeTest, - ::testing::Values(make_tuple(&vpx_quantize_b_neon, - &vpx_quantize_b_c, - VPX_BITS_8, 16))); +INSTANTIATE_TEST_CASE_P( + NEON, VP9QuantizeTest, + ::testing::Values(make_tuple(&vpx_quantize_b_neon, &vpx_quantize_b_c, + VPX_BITS_8, 16), + make_tuple(&vpx_quantize_b_32x32_neon, + &vpx_quantize_b_32x32_c, VPX_BITS_8, 32))); #endif // HAVE_NEON && !CONFIG_VP9_HIGHBITDEPTH // Only useful to compare "Speed" test results. -INSTANTIATE_TEST_CASE_P(DISABLED_C, VP9QuantizeTest, - ::testing::Values(make_tuple(&vpx_quantize_b_c, - &vpx_quantize_b_c, - VPX_BITS_8, 16))); +INSTANTIATE_TEST_CASE_P( + DISABLED_C, VP9QuantizeTest, + ::testing::Values( + make_tuple(&vpx_quantize_b_c, &vpx_quantize_b_c, VPX_BITS_8, 16), + make_tuple(&vpx_quantize_b_c, &vpx_quantize_b_c, VPX_BITS_8, 32))); } // namespace diff --git a/vpx_dsp/arm/quantize_neon.c b/vpx_dsp/arm/quantize_neon.c index 8bed05703..e62933aaa 100644 --- a/vpx_dsp/arm/quantize_neon.c +++ b/vpx_dsp/arm/quantize_neon.c @@ -99,7 +99,7 @@ void vpx_quantize_b_neon(const tran_low_t *coeff_ptr, intptr_t n_coeffs, const int16x8_t dequant = vdupq_n_s16(dequant_ptr[1]); do { - // Add one because the eob is not it's index. + // Add one because the eob is not its index. const uint16x8_t iscan = vreinterpretq_u16_s16(vaddq_s16(vld1q_s16(iscan_ptr), one)); @@ -153,3 +153,176 @@ void vpx_quantize_b_neon(const tran_low_t *coeff_ptr, intptr_t n_coeffs, vst1_lane_u16(eob_ptr, eob_max_2, 0); } } + +// Main difference is that zbin values are halved before comparison and dqcoeff +// values are divided by 2. zbin is rounded but dqcoeff is not. +void vpx_quantize_b_32x32_neon( + const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, + const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, + const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, + tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr, + const int16_t *scan_ptr, const int16_t *iscan_ptr) { + const int16x8_t zero = vdupq_n_s16(0); + const int16x8_t one = vdupq_n_s16(1); + const int16x8_t neg_one = vdupq_n_s16(-1); + uint16x8_t eob_max; + int i; + (void)scan_ptr; + (void)n_coeffs; // Because we will always calculate 32*32. + + if (skip_block) { + for (i = 0; i < 32 * 32 / 8; ++i) { + store_s16q_to_tran_low(qcoeff_ptr, zero); + store_s16q_to_tran_low(dqcoeff_ptr, zero); + qcoeff_ptr += 8; + dqcoeff_ptr += 8; + } + *eob_ptr = 0; + return; + } + + // Process first 8 values which include a dc component. + { + // Only the first element of each vector is DC. + const int16x8_t zbin = vrshrq_n_s16(vld1q_s16(zbin_ptr), 1); + const int16x8_t round = vrshrq_n_s16(vld1q_s16(round_ptr), 1); + const int16x8_t quant = vld1q_s16(quant_ptr); + const int16x8_t quant_shift = vld1q_s16(quant_shift_ptr); + const int16x8_t dequant = vld1q_s16(dequant_ptr); + // Add one because the eob does not index from 0. + const uint16x8_t iscan = + vreinterpretq_u16_s16(vaddq_s16(vld1q_s16(iscan_ptr), one)); + + const int16x8_t coeff = load_tran_low_to_s16q(coeff_ptr); + const int16x8_t coeff_sign = vshrq_n_s16(coeff, 15); + const int16x8_t coeff_abs = vabsq_s16(coeff); + + const int16x8_t zbin_mask = + vreinterpretq_s16_u16(vcgeq_s16(coeff_abs, zbin)); + + const int16x8_t rounded = vqaddq_s16(coeff_abs, round); + + // (round * quant * 2) >> 16 >> 1 == (round * quant) >> 16 + int16x8_t qcoeff = vshrq_n_s16(vqdmulhq_s16(rounded, quant), 1); + int16x8_t dqcoeff; + int32x4_t dqcoeff_0, dqcoeff_1, dqcoeff_0_sign, dqcoeff_1_sign; + + qcoeff = vaddq_s16(qcoeff, rounded); + + // (qcoeff * quant_shift * 2) >> 16 == (qcoeff * quant_shift) >> 15 + qcoeff = vqdmulhq_s16(qcoeff, quant_shift); + + // Restore the sign bit. + qcoeff = veorq_s16(qcoeff, coeff_sign); + qcoeff = vsubq_s16(qcoeff, coeff_sign); + + qcoeff = vandq_s16(qcoeff, zbin_mask); + + // Set non-zero elements to -1 and use that to extract values for eob. + eob_max = vandq_u16(vtstq_s16(qcoeff, neg_one), iscan); + + coeff_ptr += 8; + iscan_ptr += 8; + + store_s16q_to_tran_low(qcoeff_ptr, qcoeff); + qcoeff_ptr += 8; + + dqcoeff_0 = vmull_s16(vget_low_s16(qcoeff), vget_low_s16(dequant)); + dqcoeff_1 = vmull_s16(vget_high_s16(qcoeff), vget_high_s16(dequant)); + + // The way the C shifts the values requires us to convert to positive before + // shifting or even narrowing, then put the sign back. + dqcoeff_0_sign = vshrq_n_s32(dqcoeff_0, 31); + dqcoeff_1_sign = vshrq_n_s32(dqcoeff_1, 31); + dqcoeff_0 = vabsq_s32(dqcoeff_0); + dqcoeff_1 = vabsq_s32(dqcoeff_1); + dqcoeff_0 = vshrq_n_s32(dqcoeff_0, 1); + dqcoeff_1 = vshrq_n_s32(dqcoeff_1, 1); + dqcoeff_0 = veorq_s32(dqcoeff_0, dqcoeff_0_sign); + dqcoeff_1 = veorq_s32(dqcoeff_1, dqcoeff_1_sign); + dqcoeff_0 = vsubq_s32(dqcoeff_0, dqcoeff_0_sign); + dqcoeff_1 = vsubq_s32(dqcoeff_1, dqcoeff_1_sign); + + // Narrow *without saturation* because that's what the C does. + dqcoeff = vcombine_s16(vmovn_s32(dqcoeff_0), vmovn_s32(dqcoeff_1)); + + store_s16q_to_tran_low(dqcoeff_ptr, dqcoeff); + dqcoeff_ptr += 8; + } + + { + const int16x8_t zbin = vrshrq_n_s16(vdupq_n_s16(zbin_ptr[1]), 1); + const int16x8_t round = vrshrq_n_s16(vdupq_n_s16(round_ptr[1]), 1); + const int16x8_t quant = vdupq_n_s16(quant_ptr[1]); + const int16x8_t quant_shift = vdupq_n_s16(quant_shift_ptr[1]); + const int16x8_t dequant = vdupq_n_s16(dequant_ptr[1]); + + for (i = 1; i < 32 * 32 / 8; ++i) { + // Add one because the eob is not its index. + const uint16x8_t iscan = + vreinterpretq_u16_s16(vaddq_s16(vld1q_s16(iscan_ptr), one)); + + const int16x8_t coeff = load_tran_low_to_s16q(coeff_ptr); + const int16x8_t coeff_sign = vshrq_n_s16(coeff, 15); + const int16x8_t coeff_abs = vabsq_s16(coeff); + + const int16x8_t zbin_mask = + vreinterpretq_s16_u16(vcgeq_s16(coeff_abs, zbin)); + + const int16x8_t rounded = vqaddq_s16(coeff_abs, round); + + // (round * quant * 2) >> 16 >> 1 == (round * quant) >> 16 + int16x8_t qcoeff = vshrq_n_s16(vqdmulhq_s16(rounded, quant), 1); + int16x8_t dqcoeff; + int32x4_t dqcoeff_0, dqcoeff_1, dqcoeff_0_sign, dqcoeff_1_sign; + + qcoeff = vaddq_s16(qcoeff, rounded); + + // (qcoeff * quant_shift * 2) >> 16 == (qcoeff * quant_shift) >> 15 + qcoeff = vqdmulhq_s16(qcoeff, quant_shift); + + // Restore the sign bit. + qcoeff = veorq_s16(qcoeff, coeff_sign); + qcoeff = vsubq_s16(qcoeff, coeff_sign); + + qcoeff = vandq_s16(qcoeff, zbin_mask); + + // Set non-zero elements to -1 and use that to extract values for eob. + eob_max = + vmaxq_u16(eob_max, vandq_u16(vtstq_s16(qcoeff, neg_one), iscan)); + + coeff_ptr += 8; + iscan_ptr += 8; + + store_s16q_to_tran_low(qcoeff_ptr, qcoeff); + qcoeff_ptr += 8; + + dqcoeff_0 = vmull_s16(vget_low_s16(qcoeff), vget_low_s16(dequant)); + dqcoeff_1 = vmull_s16(vget_high_s16(qcoeff), vget_high_s16(dequant)); + + dqcoeff_0_sign = vshrq_n_s32(dqcoeff_0, 31); + dqcoeff_1_sign = vshrq_n_s32(dqcoeff_1, 31); + dqcoeff_0 = vabsq_s32(dqcoeff_0); + dqcoeff_1 = vabsq_s32(dqcoeff_1); + dqcoeff_0 = vshrq_n_s32(dqcoeff_0, 1); + dqcoeff_1 = vshrq_n_s32(dqcoeff_1, 1); + dqcoeff_0 = veorq_s32(dqcoeff_0, dqcoeff_0_sign); + dqcoeff_1 = veorq_s32(dqcoeff_1, dqcoeff_1_sign); + dqcoeff_0 = vsubq_s32(dqcoeff_0, dqcoeff_0_sign); + dqcoeff_1 = vsubq_s32(dqcoeff_1, dqcoeff_1_sign); + + dqcoeff = vcombine_s16(vmovn_s32(dqcoeff_0), vmovn_s32(dqcoeff_1)); + + store_s16q_to_tran_low(dqcoeff_ptr, dqcoeff); + dqcoeff_ptr += 8; + } + } + + { + const uint16x4_t eob_max_0 = + vmax_u16(vget_low_u16(eob_max), vget_high_u16(eob_max)); + const uint16x4_t eob_max_1 = vpmax_u16(eob_max_0, eob_max_0); + const uint16x4_t eob_max_2 = vpmax_u16(eob_max_1, eob_max_1); + vst1_lane_u16(eob_ptr, eob_max_2, 0); + } +} diff --git a/vpx_dsp/vpx_dsp_rtcd_defs.pl b/vpx_dsp/vpx_dsp_rtcd_defs.pl index 498e93797..c5424741d 100644 --- a/vpx_dsp/vpx_dsp_rtcd_defs.pl +++ b/vpx_dsp/vpx_dsp_rtcd_defs.pl @@ -673,7 +673,7 @@ if (vpx_config("CONFIG_VP9_ENCODER") eq "yes") { specialize qw/vpx_quantize_b neon sse2/, "$ssse3_x86_64", "$avx_x86_64"; add_proto qw/void vpx_quantize_b_32x32/, "const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan"; - specialize qw/vpx_quantize_b_32x32/, "$ssse3_x86_64", "$avx_x86_64"; + specialize qw/vpx_quantize_b_32x32 neon/, "$ssse3_x86_64", "$avx_x86_64"; if (vpx_config("CONFIG_VP9_HIGHBITDEPTH") eq "yes") { add_proto qw/void vpx_highbd_quantize_b/, "const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan"; -- cgit v1.2.3