summaryrefslogtreecommitdiff
path: root/vpx_dsp
diff options
context:
space:
mode:
authorJohann Koenig <johannkoenig@google.com>2018-12-05 18:20:29 +0000
committerGerrit Code Review <noreply-gerritcodereview@google.com>2018-12-05 18:20:29 +0000
commit08f281ef0e70c662bbaf67bb7f5300103fa00e27 (patch)
tree48f38f9b750e2e0e43a1fe7a665209c5da1844a4 /vpx_dsp
parentfc5d16678294a405de44da418474b119965dfb0f (diff)
parent26dbf9eba8cbbd0d57a87525ff28103dac229325 (diff)
downloadlibvpx-08f281ef0e70c662bbaf67bb7f5300103fa00e27.tar
libvpx-08f281ef0e70c662bbaf67bb7f5300103fa00e27.tar.gz
libvpx-08f281ef0e70c662bbaf67bb7f5300103fa00e27.tar.bz2
libvpx-08f281ef0e70c662bbaf67bb7f5300103fa00e27.zip
Merge "quantize neon: fix hbd builds"
Diffstat (limited to 'vpx_dsp')
-rw-r--r--vpx_dsp/arm/quantize_neon.c73
1 files changed, 42 insertions, 31 deletions
diff --git a/vpx_dsp/arm/quantize_neon.c b/vpx_dsp/arm/quantize_neon.c
index b5d1e7ecb..adef5f6e1 100644
--- a/vpx_dsp/arm/quantize_neon.c
+++ b/vpx_dsp/arm/quantize_neon.c
@@ -15,6 +15,22 @@
#include "./vpx_dsp_rtcd.h"
#include "vpx_dsp/arm/mem_neon.h"
+static INLINE void calculate_dqcoeff_and_store(const int16x8_t qcoeff,
+ const int16x8_t dequant,
+ tran_low_t *dqcoeff) {
+ const int32x4_t dqcoeff_0 =
+ vmull_s16(vget_low_s16(qcoeff), vget_low_s16(dequant));
+ const int32x4_t dqcoeff_1 =
+ vmull_s16(vget_high_s16(qcoeff), vget_high_s16(dequant));
+
+#if CONFIG_VP9_HIGHBITDEPTH
+ vst1q_s32(dqcoeff, dqcoeff_0);
+ vst1q_s32(dqcoeff + 4, dqcoeff_1);
+#else
+ vst1q_s16(dqcoeff, vcombine_s16(vmovn_s32(dqcoeff_0), vmovn_s32(dqcoeff_1)));
+#endif // CONFIG_VP9_HIGHBITDEPTH
+}
+
void vpx_quantize_b_neon(const tran_low_t *coeff_ptr, intptr_t n_coeffs,
int skip_block, const int16_t *zbin_ptr,
const int16_t *round_ptr, const int16_t *quant_ptr,
@@ -73,9 +89,7 @@ void vpx_quantize_b_neon(const tran_low_t *coeff_ptr, intptr_t n_coeffs,
store_s16q_to_tran_low(qcoeff_ptr, qcoeff);
qcoeff_ptr += 8;
- qcoeff = vmulq_s16(qcoeff, dequant);
-
- store_s16q_to_tran_low(dqcoeff_ptr, qcoeff);
+ calculate_dqcoeff_and_store(qcoeff, dequant, dqcoeff_ptr);
dqcoeff_ptr += 8;
}
@@ -126,9 +140,7 @@ void vpx_quantize_b_neon(const tran_low_t *coeff_ptr, intptr_t n_coeffs,
store_s16q_to_tran_low(qcoeff_ptr, qcoeff);
qcoeff_ptr += 8;
- qcoeff = vmulq_s16(qcoeff, dequant);
-
- store_s16q_to_tran_low(dqcoeff_ptr, qcoeff);
+ calculate_dqcoeff_and_store(qcoeff, dequant, dqcoeff_ptr);
dqcoeff_ptr += 8;
n_coeffs -= 8;
@@ -152,6 +164,28 @@ static INLINE int32x4_t extract_sign_bit(int32x4_t a) {
return vreinterpretq_s32_u32(vshrq_n_u32(vreinterpretq_u32_s32(a), 31));
}
+static INLINE void calculate_dqcoeff_and_store_32x32(const int16x8_t qcoeff,
+ const int16x8_t dequant,
+ tran_low_t *dqcoeff) {
+ int32x4_t dqcoeff_0 = vmull_s16(vget_low_s16(qcoeff), vget_low_s16(dequant));
+ int32x4_t dqcoeff_1 =
+ vmull_s16(vget_high_s16(qcoeff), vget_high_s16(dequant));
+
+ // Add 1 if negative to round towards zero because the C uses division.
+ dqcoeff_0 = vaddq_s32(dqcoeff_0, extract_sign_bit(dqcoeff_0));
+ dqcoeff_1 = vaddq_s32(dqcoeff_1, extract_sign_bit(dqcoeff_1));
+
+#if CONFIG_VP9_HIGHBITDEPTH
+ dqcoeff_0 = vshrq_n_s32(dqcoeff_0, 1);
+ dqcoeff_1 = vshrq_n_s32(dqcoeff_1, 1);
+ vst1q_s32(dqcoeff, dqcoeff_0);
+ vst1q_s32(dqcoeff + 4, dqcoeff_1);
+#else
+ vst1q_s16(dqcoeff,
+ vcombine_s16(vshrn_n_s32(dqcoeff_0, 1), vshrn_n_s32(dqcoeff_1, 1)));
+#endif // CONFIG_VP9_HIGHBITDEPTH
+}
+
// Main difference is that zbin values are halved before comparison and dqcoeff
// values are divided by 2. zbin is rounded but dqcoeff is not.
void vpx_quantize_b_32x32_neon(const tran_low_t *coeff_ptr, intptr_t n_coeffs,
@@ -194,8 +228,6 @@ void vpx_quantize_b_32x32_neon(const tran_low_t *coeff_ptr, intptr_t n_coeffs,
// (round * quant * 2) >> 16 >> 1 == (round * quant) >> 16
int16x8_t qcoeff = vshrq_n_s16(vqdmulhq_s16(rounded, quant), 1);
- int16x8_t dqcoeff;
- int32x4_t dqcoeff_0, dqcoeff_1;
qcoeff = vaddq_s16(qcoeff, rounded);
@@ -217,17 +249,7 @@ void vpx_quantize_b_32x32_neon(const tran_low_t *coeff_ptr, intptr_t n_coeffs,
store_s16q_to_tran_low(qcoeff_ptr, qcoeff);
qcoeff_ptr += 8;
- dqcoeff_0 = vmull_s16(vget_low_s16(qcoeff), vget_low_s16(dequant));
- dqcoeff_1 = vmull_s16(vget_high_s16(qcoeff), vget_high_s16(dequant));
-
- // Add 1 if negative to round towards zero because the C uses division.
- dqcoeff_0 = vaddq_s32(dqcoeff_0, extract_sign_bit(dqcoeff_0));
- dqcoeff_1 = vaddq_s32(dqcoeff_1, extract_sign_bit(dqcoeff_1));
-
- dqcoeff =
- vcombine_s16(vshrn_n_s32(dqcoeff_0, 1), vshrn_n_s32(dqcoeff_1, 1));
-
- store_s16q_to_tran_low(dqcoeff_ptr, dqcoeff);
+ calculate_dqcoeff_and_store_32x32(qcoeff, dequant, dqcoeff_ptr);
dqcoeff_ptr += 8;
}
@@ -254,8 +276,6 @@ void vpx_quantize_b_32x32_neon(const tran_low_t *coeff_ptr, intptr_t n_coeffs,
// (round * quant * 2) >> 16 >> 1 == (round * quant) >> 16
int16x8_t qcoeff = vshrq_n_s16(vqdmulhq_s16(rounded, quant), 1);
- int16x8_t dqcoeff;
- int32x4_t dqcoeff_0, dqcoeff_1;
qcoeff = vaddq_s16(qcoeff, rounded);
@@ -278,16 +298,7 @@ void vpx_quantize_b_32x32_neon(const tran_low_t *coeff_ptr, intptr_t n_coeffs,
store_s16q_to_tran_low(qcoeff_ptr, qcoeff);
qcoeff_ptr += 8;
- dqcoeff_0 = vmull_s16(vget_low_s16(qcoeff), vget_low_s16(dequant));
- dqcoeff_1 = vmull_s16(vget_high_s16(qcoeff), vget_high_s16(dequant));
-
- dqcoeff_0 = vaddq_s32(dqcoeff_0, extract_sign_bit(dqcoeff_0));
- dqcoeff_1 = vaddq_s32(dqcoeff_1, extract_sign_bit(dqcoeff_1));
-
- dqcoeff =
- vcombine_s16(vshrn_n_s32(dqcoeff_0, 1), vshrn_n_s32(dqcoeff_1, 1));
-
- store_s16q_to_tran_low(dqcoeff_ptr, dqcoeff);
+ calculate_dqcoeff_and_store_32x32(qcoeff, dequant, dqcoeff_ptr);
dqcoeff_ptr += 8;
}
}