summaryrefslogtreecommitdiff
path: root/vp10/encoder/arm
diff options
context:
space:
mode:
authorJingning Han <jingning@google.com>2015-08-05 19:00:31 -0700
committerJingning Han <jingning@google.com>2015-08-11 17:05:28 -0700
commit3ee6db6c8110680c051fe7a4dca97bb27474ca00 (patch)
treee05e37dfc10ed26a4fdaee5551b13e2d85a7a1b2 /vp10/encoder/arm
parentb04dad328c33874ae1eda72c73079519935a3feb (diff)
downloadlibvpx-3ee6db6c8110680c051fe7a4dca97bb27474ca00.tar
libvpx-3ee6db6c8110680c051fe7a4dca97bb27474ca00.tar.gz
libvpx-3ee6db6c8110680c051fe7a4dca97bb27474ca00.tar.bz2
libvpx-3ee6db6c8110680c051fe7a4dca97bb27474ca00.zip
Fork VP9 and VP10 codebase
This commit folks the VP9 and VP10 codebase and makes libvpx support VP8, VP9, and VP10. Change-Id: I81782e0b809acb3c9844bee8c8ec8f4d5e8fa356
Diffstat (limited to 'vp10/encoder/arm')
-rw-r--r--vp10/encoder/arm/neon/vp9_avg_neon.c160
-rw-r--r--vp10/encoder/arm/neon/vp9_dct_neon.c36
-rw-r--r--vp10/encoder/arm/neon/vp9_error_neon.c41
-rw-r--r--vp10/encoder/arm/neon/vp9_quantize_neon.c118
4 files changed, 355 insertions, 0 deletions
diff --git a/vp10/encoder/arm/neon/vp9_avg_neon.c b/vp10/encoder/arm/neon/vp9_avg_neon.c
new file mode 100644
index 000000000..c4ec5c4a7
--- /dev/null
+++ b/vp10/encoder/arm/neon/vp9_avg_neon.c
@@ -0,0 +1,160 @@
+/*
+ * Copyright (c) 2015 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <arm_neon.h>
+#include <assert.h>
+
+#include "./vp10_rtcd.h"
+#include "./vpx_config.h"
+
+#include "vpx/vpx_integer.h"
+
+static INLINE unsigned int horizontal_add_u16x8(const uint16x8_t v_16x8) {
+ const uint32x4_t a = vpaddlq_u16(v_16x8);
+ const uint64x2_t b = vpaddlq_u32(a);
+ const uint32x2_t c = vadd_u32(vreinterpret_u32_u64(vget_low_u64(b)),
+ vreinterpret_u32_u64(vget_high_u64(b)));
+ return vget_lane_u32(c, 0);
+}
+
+unsigned int vp10_avg_8x8_neon(const uint8_t *s, int p) {
+ uint8x8_t v_s0 = vld1_u8(s);
+ const uint8x8_t v_s1 = vld1_u8(s + p);
+ uint16x8_t v_sum = vaddl_u8(v_s0, v_s1);
+
+ v_s0 = vld1_u8(s + 2 * p);
+ v_sum = vaddw_u8(v_sum, v_s0);
+
+ v_s0 = vld1_u8(s + 3 * p);
+ v_sum = vaddw_u8(v_sum, v_s0);
+
+ v_s0 = vld1_u8(s + 4 * p);
+ v_sum = vaddw_u8(v_sum, v_s0);
+
+ v_s0 = vld1_u8(s + 5 * p);
+ v_sum = vaddw_u8(v_sum, v_s0);
+
+ v_s0 = vld1_u8(s + 6 * p);
+ v_sum = vaddw_u8(v_sum, v_s0);
+
+ v_s0 = vld1_u8(s + 7 * p);
+ v_sum = vaddw_u8(v_sum, v_s0);
+
+ return (horizontal_add_u16x8(v_sum) + 32) >> 6;
+}
+
+void vp10_int_pro_row_neon(int16_t hbuf[16], uint8_t const *ref,
+ const int ref_stride, const int height) {
+ int i;
+ uint16x8_t vec_sum_lo = vdupq_n_u16(0);
+ uint16x8_t vec_sum_hi = vdupq_n_u16(0);
+ const int shift_factor = ((height >> 5) + 3) * -1;
+ const int16x8_t vec_shift = vdupq_n_s16(shift_factor);
+
+ for (i = 0; i < height; i += 8) {
+ const uint8x16_t vec_row1 = vld1q_u8(ref);
+ const uint8x16_t vec_row2 = vld1q_u8(ref + ref_stride);
+ const uint8x16_t vec_row3 = vld1q_u8(ref + ref_stride * 2);
+ const uint8x16_t vec_row4 = vld1q_u8(ref + ref_stride * 3);
+ const uint8x16_t vec_row5 = vld1q_u8(ref + ref_stride * 4);
+ const uint8x16_t vec_row6 = vld1q_u8(ref + ref_stride * 5);
+ const uint8x16_t vec_row7 = vld1q_u8(ref + ref_stride * 6);
+ const uint8x16_t vec_row8 = vld1q_u8(ref + ref_stride * 7);
+
+ vec_sum_lo = vaddw_u8(vec_sum_lo, vget_low_u8(vec_row1));
+ vec_sum_hi = vaddw_u8(vec_sum_hi, vget_high_u8(vec_row1));
+
+ vec_sum_lo = vaddw_u8(vec_sum_lo, vget_low_u8(vec_row2));
+ vec_sum_hi = vaddw_u8(vec_sum_hi, vget_high_u8(vec_row2));
+
+ vec_sum_lo = vaddw_u8(vec_sum_lo, vget_low_u8(vec_row3));
+ vec_sum_hi = vaddw_u8(vec_sum_hi, vget_high_u8(vec_row3));
+
+ vec_sum_lo = vaddw_u8(vec_sum_lo, vget_low_u8(vec_row4));
+ vec_sum_hi = vaddw_u8(vec_sum_hi, vget_high_u8(vec_row4));
+
+ vec_sum_lo = vaddw_u8(vec_sum_lo, vget_low_u8(vec_row5));
+ vec_sum_hi = vaddw_u8(vec_sum_hi, vget_high_u8(vec_row5));
+
+ vec_sum_lo = vaddw_u8(vec_sum_lo, vget_low_u8(vec_row6));
+ vec_sum_hi = vaddw_u8(vec_sum_hi, vget_high_u8(vec_row6));
+
+ vec_sum_lo = vaddw_u8(vec_sum_lo, vget_low_u8(vec_row7));
+ vec_sum_hi = vaddw_u8(vec_sum_hi, vget_high_u8(vec_row7));
+
+ vec_sum_lo = vaddw_u8(vec_sum_lo, vget_low_u8(vec_row8));
+ vec_sum_hi = vaddw_u8(vec_sum_hi, vget_high_u8(vec_row8));
+
+ ref += ref_stride * 8;
+ }
+
+ vec_sum_lo = vshlq_u16(vec_sum_lo, vec_shift);
+ vec_sum_hi = vshlq_u16(vec_sum_hi, vec_shift);
+
+ vst1q_s16(hbuf, vreinterpretq_s16_u16(vec_sum_lo));
+ hbuf += 8;
+ vst1q_s16(hbuf, vreinterpretq_s16_u16(vec_sum_hi));
+}
+
+int16_t vp10_int_pro_col_neon(uint8_t const *ref, const int width) {
+ int i;
+ uint16x8_t vec_sum = vdupq_n_u16(0);
+
+ for (i = 0; i < width; i += 16) {
+ const uint8x16_t vec_row = vld1q_u8(ref);
+ vec_sum = vaddw_u8(vec_sum, vget_low_u8(vec_row));
+ vec_sum = vaddw_u8(vec_sum, vget_high_u8(vec_row));
+ ref += 16;
+ }
+
+ return horizontal_add_u16x8(vec_sum);
+}
+
+// ref, src = [0, 510] - max diff = 16-bits
+// bwl = {2, 3, 4}, width = {16, 32, 64}
+int vp10_vector_var_neon(int16_t const *ref, int16_t const *src, const int bwl) {
+ int width = 4 << bwl;
+ int32x4_t sse = vdupq_n_s32(0);
+ int16x8_t total = vdupq_n_s16(0);
+
+ assert(width >= 8);
+ assert((width % 8) == 0);
+
+ do {
+ const int16x8_t r = vld1q_s16(ref);
+ const int16x8_t s = vld1q_s16(src);
+ const int16x8_t diff = vsubq_s16(r, s); // [-510, 510], 10 bits.
+ const int16x4_t diff_lo = vget_low_s16(diff);
+ const int16x4_t diff_hi = vget_high_s16(diff);
+ sse = vmlal_s16(sse, diff_lo, diff_lo); // dynamic range 26 bits.
+ sse = vmlal_s16(sse, diff_hi, diff_hi);
+ total = vaddq_s16(total, diff); // dynamic range 16 bits.
+
+ ref += 8;
+ src += 8;
+ width -= 8;
+ } while (width != 0);
+
+ {
+ // Note: 'total''s pairwise addition could be implemented similarly to
+ // horizontal_add_u16x8(), but one less vpaddl with 'total' when paired
+ // with the summation of 'sse' performed better on a Cortex-A15.
+ const int32x4_t t0 = vpaddlq_s16(total); // cascading summation of 'total'
+ const int32x2_t t1 = vadd_s32(vget_low_s32(t0), vget_high_s32(t0));
+ const int32x2_t t2 = vpadd_s32(t1, t1);
+ const int t = vget_lane_s32(t2, 0);
+ const int64x2_t s0 = vpaddlq_s32(sse); // cascading summation of 'sse'.
+ const int32x2_t s1 = vadd_s32(vreinterpret_s32_s64(vget_low_s64(s0)),
+ vreinterpret_s32_s64(vget_high_s64(s0)));
+ const int s = vget_lane_s32(s1, 0);
+ const int shift_factor = bwl + 2;
+ return s - ((t * t) >> shift_factor);
+ }
+}
diff --git a/vp10/encoder/arm/neon/vp9_dct_neon.c b/vp10/encoder/arm/neon/vp9_dct_neon.c
new file mode 100644
index 000000000..c91178250
--- /dev/null
+++ b/vp10/encoder/arm/neon/vp9_dct_neon.c
@@ -0,0 +1,36 @@
+/*
+ * Copyright (c) 2014 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <arm_neon.h>
+
+#include "./vp10_rtcd.h"
+#include "./vpx_config.h"
+#include "./vpx_dsp_rtcd.h"
+
+#include "vp10/common/vp9_blockd.h"
+#include "vpx_dsp/txfm_common.h"
+
+void vp10_fdct8x8_quant_neon(const int16_t *input, int stride,
+ int16_t* coeff_ptr, intptr_t n_coeffs,
+ int skip_block, const int16_t* zbin_ptr,
+ const int16_t* round_ptr, const int16_t* quant_ptr,
+ const int16_t* quant_shift_ptr,
+ int16_t* qcoeff_ptr, int16_t* dqcoeff_ptr,
+ const int16_t* dequant_ptr, uint16_t* eob_ptr,
+ const int16_t* scan_ptr,
+ const int16_t* iscan_ptr) {
+ int16_t temp_buffer[64];
+ (void)coeff_ptr;
+
+ vpx_fdct8x8_neon(input, temp_buffer, stride);
+ vp10_quantize_fp_neon(temp_buffer, n_coeffs, skip_block, zbin_ptr, round_ptr,
+ quant_ptr, quant_shift_ptr, qcoeff_ptr, dqcoeff_ptr,
+ dequant_ptr, eob_ptr, scan_ptr, iscan_ptr);
+}
diff --git a/vp10/encoder/arm/neon/vp9_error_neon.c b/vp10/encoder/arm/neon/vp9_error_neon.c
new file mode 100644
index 000000000..009520aee
--- /dev/null
+++ b/vp10/encoder/arm/neon/vp9_error_neon.c
@@ -0,0 +1,41 @@
+/*
+ * Copyright (c) 2015 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <arm_neon.h>
+#include <assert.h>
+
+#include "./vp10_rtcd.h"
+
+int64_t vp10_block_error_fp_neon(const int16_t *coeff, const int16_t *dqcoeff,
+ int block_size) {
+ int64x2_t error = vdupq_n_s64(0);
+
+ assert(block_size >= 8);
+ assert((block_size % 8) == 0);
+
+ do {
+ const int16x8_t c = vld1q_s16(coeff);
+ const int16x8_t d = vld1q_s16(dqcoeff);
+ const int16x8_t diff = vsubq_s16(c, d);
+ const int16x4_t diff_lo = vget_low_s16(diff);
+ const int16x4_t diff_hi = vget_high_s16(diff);
+ // diff is 15-bits, the squares 30, so we can store 2 in 31-bits before
+ // accumulating them in 64-bits.
+ const int32x4_t err0 = vmull_s16(diff_lo, diff_lo);
+ const int32x4_t err1 = vmlal_s16(err0, diff_hi, diff_hi);
+ const int64x2_t err2 = vaddl_s32(vget_low_s32(err1), vget_high_s32(err1));
+ error = vaddq_s64(error, err2);
+ coeff += 8;
+ dqcoeff += 8;
+ block_size -= 8;
+ } while (block_size != 0);
+
+ return vgetq_lane_s64(error, 0) + vgetq_lane_s64(error, 1);
+}
diff --git a/vp10/encoder/arm/neon/vp9_quantize_neon.c b/vp10/encoder/arm/neon/vp9_quantize_neon.c
new file mode 100644
index 000000000..a7dce3742
--- /dev/null
+++ b/vp10/encoder/arm/neon/vp9_quantize_neon.c
@@ -0,0 +1,118 @@
+/*
+ * Copyright (c) 2014 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <arm_neon.h>
+
+#include <math.h>
+
+#include "vpx_mem/vpx_mem.h"
+
+#include "vp10/common/vp9_quant_common.h"
+#include "vp10/common/vp9_seg_common.h"
+
+#include "vp10/encoder/vp9_encoder.h"
+#include "vp10/encoder/vp9_quantize.h"
+#include "vp10/encoder/vp9_rd.h"
+
+void vp10_quantize_fp_neon(const int16_t *coeff_ptr, intptr_t count,
+ int skip_block, const int16_t *zbin_ptr,
+ const int16_t *round_ptr, const int16_t *quant_ptr,
+ const int16_t *quant_shift_ptr, int16_t *qcoeff_ptr,
+ int16_t *dqcoeff_ptr, const int16_t *dequant_ptr,
+ uint16_t *eob_ptr,
+ const int16_t *scan, const int16_t *iscan) {
+ // TODO(jingning) Decide the need of these arguments after the
+ // quantization process is completed.
+ (void)zbin_ptr;
+ (void)quant_shift_ptr;
+ (void)scan;
+
+ if (!skip_block) {
+ // Quantization pass: All coefficients with index >= zero_flag are
+ // skippable. Note: zero_flag can be zero.
+ int i;
+ const int16x8_t v_zero = vdupq_n_s16(0);
+ const int16x8_t v_one = vdupq_n_s16(1);
+ int16x8_t v_eobmax_76543210 = vdupq_n_s16(-1);
+ int16x8_t v_round = vmovq_n_s16(round_ptr[1]);
+ int16x8_t v_quant = vmovq_n_s16(quant_ptr[1]);
+ int16x8_t v_dequant = vmovq_n_s16(dequant_ptr[1]);
+ // adjust for dc
+ v_round = vsetq_lane_s16(round_ptr[0], v_round, 0);
+ v_quant = vsetq_lane_s16(quant_ptr[0], v_quant, 0);
+ v_dequant = vsetq_lane_s16(dequant_ptr[0], v_dequant, 0);
+ // process dc and the first seven ac coeffs
+ {
+ const int16x8_t v_iscan = vld1q_s16(&iscan[0]);
+ const int16x8_t v_coeff = vld1q_s16(&coeff_ptr[0]);
+ const int16x8_t v_coeff_sign = vshrq_n_s16(v_coeff, 15);
+ const int16x8_t v_tmp = vabaq_s16(v_round, v_coeff, v_zero);
+ const int32x4_t v_tmp_lo = vmull_s16(vget_low_s16(v_tmp),
+ vget_low_s16(v_quant));
+ const int32x4_t v_tmp_hi = vmull_s16(vget_high_s16(v_tmp),
+ vget_high_s16(v_quant));
+ const int16x8_t v_tmp2 = vcombine_s16(vshrn_n_s32(v_tmp_lo, 16),
+ vshrn_n_s32(v_tmp_hi, 16));
+ const uint16x8_t v_nz_mask = vceqq_s16(v_tmp2, v_zero);
+ const int16x8_t v_iscan_plus1 = vaddq_s16(v_iscan, v_one);
+ const int16x8_t v_nz_iscan = vbslq_s16(v_nz_mask, v_zero, v_iscan_plus1);
+ const int16x8_t v_qcoeff_a = veorq_s16(v_tmp2, v_coeff_sign);
+ const int16x8_t v_qcoeff = vsubq_s16(v_qcoeff_a, v_coeff_sign);
+ const int16x8_t v_dqcoeff = vmulq_s16(v_qcoeff, v_dequant);
+ v_eobmax_76543210 = vmaxq_s16(v_eobmax_76543210, v_nz_iscan);
+ vst1q_s16(&qcoeff_ptr[0], v_qcoeff);
+ vst1q_s16(&dqcoeff_ptr[0], v_dqcoeff);
+ v_round = vmovq_n_s16(round_ptr[1]);
+ v_quant = vmovq_n_s16(quant_ptr[1]);
+ v_dequant = vmovq_n_s16(dequant_ptr[1]);
+ }
+ // now process the rest of the ac coeffs
+ for (i = 8; i < count; i += 8) {
+ const int16x8_t v_iscan = vld1q_s16(&iscan[i]);
+ const int16x8_t v_coeff = vld1q_s16(&coeff_ptr[i]);
+ const int16x8_t v_coeff_sign = vshrq_n_s16(v_coeff, 15);
+ const int16x8_t v_tmp = vabaq_s16(v_round, v_coeff, v_zero);
+ const int32x4_t v_tmp_lo = vmull_s16(vget_low_s16(v_tmp),
+ vget_low_s16(v_quant));
+ const int32x4_t v_tmp_hi = vmull_s16(vget_high_s16(v_tmp),
+ vget_high_s16(v_quant));
+ const int16x8_t v_tmp2 = vcombine_s16(vshrn_n_s32(v_tmp_lo, 16),
+ vshrn_n_s32(v_tmp_hi, 16));
+ const uint16x8_t v_nz_mask = vceqq_s16(v_tmp2, v_zero);
+ const int16x8_t v_iscan_plus1 = vaddq_s16(v_iscan, v_one);
+ const int16x8_t v_nz_iscan = vbslq_s16(v_nz_mask, v_zero, v_iscan_plus1);
+ const int16x8_t v_qcoeff_a = veorq_s16(v_tmp2, v_coeff_sign);
+ const int16x8_t v_qcoeff = vsubq_s16(v_qcoeff_a, v_coeff_sign);
+ const int16x8_t v_dqcoeff = vmulq_s16(v_qcoeff, v_dequant);
+ v_eobmax_76543210 = vmaxq_s16(v_eobmax_76543210, v_nz_iscan);
+ vst1q_s16(&qcoeff_ptr[i], v_qcoeff);
+ vst1q_s16(&dqcoeff_ptr[i], v_dqcoeff);
+ }
+ {
+ const int16x4_t v_eobmax_3210 =
+ vmax_s16(vget_low_s16(v_eobmax_76543210),
+ vget_high_s16(v_eobmax_76543210));
+ const int64x1_t v_eobmax_xx32 =
+ vshr_n_s64(vreinterpret_s64_s16(v_eobmax_3210), 32);
+ const int16x4_t v_eobmax_tmp =
+ vmax_s16(v_eobmax_3210, vreinterpret_s16_s64(v_eobmax_xx32));
+ const int64x1_t v_eobmax_xxx3 =
+ vshr_n_s64(vreinterpret_s64_s16(v_eobmax_tmp), 16);
+ const int16x4_t v_eobmax_final =
+ vmax_s16(v_eobmax_tmp, vreinterpret_s16_s64(v_eobmax_xxx3));
+
+ *eob_ptr = (uint16_t)vget_lane_s16(v_eobmax_final, 0);
+ }
+ } else {
+ memset(qcoeff_ptr, 0, count * sizeof(int16_t));
+ memset(dqcoeff_ptr, 0, count * sizeof(int16_t));
+ *eob_ptr = 0;
+ }
+}