summaryrefslogtreecommitdiff
path: root/vp8/common/arm
diff options
context:
space:
mode:
authorJohann <johannkoenig@google.com>2014-05-06 12:26:21 -0700
committerJohann <johannkoenig@google.com>2014-05-06 14:28:00 -0700
commit677fb5123e0ece1d6c30be9d0282e1e1f594a56d (patch)
tree23e9943f0fa39a61420c83f778f1a2d8d6c842cc /vp8/common/arm
parent928ff03889dadc3f63883553b443c08e625b4885 (diff)
downloadlibvpx-677fb5123e0ece1d6c30be9d0282e1e1f594a56d.tar
libvpx-677fb5123e0ece1d6c30be9d0282e1e1f594a56d.tar.gz
libvpx-677fb5123e0ece1d6c30be9d0282e1e1f594a56d.tar.bz2
libvpx-677fb5123e0ece1d6c30be9d0282e1e1f594a56d.zip
Revert "VP8 for ARMv8 by using NEON intrinsics 10"
This reverts commit c500fc22c1bb2a3ae5c318bfb806f7e9bd57ce25 There is an issue with gcc 4.6 in the Android NDK: loopfiltersimpleverticaledge_neon.c: In function 'vp8_loop_filter_bvs_neon': loopfiltersimpleverticaledge_neon.c:176:1: error: insn does not satisfy its constraints: Change-Id: I95b6509d12f075890308914cc691b813d2e5cd9f
Diffstat (limited to 'vp8/common/arm')
-rw-r--r--vp8/common/arm/neon/loopfiltersimpleverticaledge_neon.asm156
-rw-r--r--vp8/common/arm/neon/loopfiltersimpleverticaledge_neon.c184
2 files changed, 156 insertions, 184 deletions
diff --git a/vp8/common/arm/neon/loopfiltersimpleverticaledge_neon.asm b/vp8/common/arm/neon/loopfiltersimpleverticaledge_neon.asm
new file mode 100644
index 000000000..78d13c895
--- /dev/null
+++ b/vp8/common/arm/neon/loopfiltersimpleverticaledge_neon.asm
@@ -0,0 +1,156 @@
+;
+; Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+;
+; Use of this source code is governed by a BSD-style license
+; that can be found in the LICENSE file in the root of the source
+; tree. An additional intellectual property rights grant can be found
+; in the file PATENTS. All contributing project authors may
+; be found in the AUTHORS file in the root of the source tree.
+;
+
+
+ EXPORT |vp8_loop_filter_bvs_neon|
+ EXPORT |vp8_loop_filter_mbvs_neon|
+ ARM
+ PRESERVE8
+
+ AREA ||.text||, CODE, READONLY, ALIGN=2
+
+; r0 unsigned char *s, PRESERVE
+; r1 int p, PRESERVE
+; q1 limit, PRESERVE
+
+|vp8_loop_filter_simple_vertical_edge_neon| PROC
+ vpush {d8-d15}
+
+ sub r0, r0, #2 ; move src pointer down by 2 columns
+ add r12, r1, r1
+ add r3, r0, r1
+
+ vld4.8 {d6[0], d7[0], d8[0], d9[0]}, [r0], r12
+ vld4.8 {d6[1], d7[1], d8[1], d9[1]}, [r3], r12
+ vld4.8 {d6[2], d7[2], d8[2], d9[2]}, [r0], r12
+ vld4.8 {d6[3], d7[3], d8[3], d9[3]}, [r3], r12
+ vld4.8 {d6[4], d7[4], d8[4], d9[4]}, [r0], r12
+ vld4.8 {d6[5], d7[5], d8[5], d9[5]}, [r3], r12
+ vld4.8 {d6[6], d7[6], d8[6], d9[6]}, [r0], r12
+ vld4.8 {d6[7], d7[7], d8[7], d9[7]}, [r3], r12
+
+ vld4.8 {d10[0], d11[0], d12[0], d13[0]}, [r0], r12
+ vld4.8 {d10[1], d11[1], d12[1], d13[1]}, [r3], r12
+ vld4.8 {d10[2], d11[2], d12[2], d13[2]}, [r0], r12
+ vld4.8 {d10[3], d11[3], d12[3], d13[3]}, [r3], r12
+ vld4.8 {d10[4], d11[4], d12[4], d13[4]}, [r0], r12
+ vld4.8 {d10[5], d11[5], d12[5], d13[5]}, [r3], r12
+ vld4.8 {d10[6], d11[6], d12[6], d13[6]}, [r0], r12
+ vld4.8 {d10[7], d11[7], d12[7], d13[7]}, [r3]
+
+ vswp d7, d10
+ vswp d12, d9
+
+ ;vp8_filter_mask() function
+ ;vp8_hevmask() function
+ sub r0, r0, r1, lsl #4
+ vabd.u8 q15, q5, q4 ; abs(p0 - q0)
+ vabd.u8 q14, q3, q6 ; abs(p1 - q1)
+
+ vqadd.u8 q15, q15, q15 ; abs(p0 - q0) * 2
+ vshr.u8 q14, q14, #1 ; abs(p1 - q1) / 2
+ vmov.u8 q0, #0x80 ; 0x80
+ vmov.s16 q11, #3
+ vqadd.u8 q15, q15, q14 ; abs(p0 - q0) * 2 + abs(p1 - q1) / 2
+
+ veor q4, q4, q0 ; qs0: q0 offset to convert to a signed value
+ veor q5, q5, q0 ; ps0: p0 offset to convert to a signed value
+ veor q3, q3, q0 ; ps1: p1 offset to convert to a signed value
+ veor q6, q6, q0 ; qs1: q1 offset to convert to a signed value
+
+ vcge.u8 q15, q1, q15 ; abs(p0 - q0)*2 + abs(p1-q1)/2 > flimit*2 + limit)*-1
+
+ vsubl.s8 q2, d8, d10 ; ( qs0 - ps0)
+ vsubl.s8 q13, d9, d11
+
+ vqsub.s8 q14, q3, q6 ; vp8_filter = vp8_signed_char_clamp(ps1-qs1)
+
+ vmul.s16 q2, q2, q11 ; 3 * ( qs0 - ps0)
+ vmul.s16 q13, q13, q11
+
+ vmov.u8 q11, #0x03 ; 0x03
+ vmov.u8 q12, #0x04 ; 0x04
+
+ vaddw.s8 q2, q2, d28 ; vp8_filter + 3 * ( qs0 - ps0)
+ vaddw.s8 q13, q13, d29
+
+ vqmovn.s16 d28, q2 ; vp8_filter = vp8_signed_char_clamp(vp8_filter + 3 * ( qs0 - ps0))
+ vqmovn.s16 d29, q13
+
+ add r0, r0, #1
+ add r3, r0, r1
+
+ vand q14, q14, q15 ; vp8_filter &= mask
+
+ vqadd.s8 q2, q14, q11 ; Filter2 = vp8_signed_char_clamp(vp8_filter+3)
+ vqadd.s8 q3, q14, q12 ; Filter1 = vp8_signed_char_clamp(vp8_filter+4)
+ vshr.s8 q2, q2, #3 ; Filter2 >>= 3
+ vshr.s8 q14, q3, #3 ; Filter1 >>= 3
+
+ ;calculate output
+ vqadd.s8 q11, q5, q2 ; u = vp8_signed_char_clamp(ps0 + Filter2)
+ vqsub.s8 q10, q4, q14 ; u = vp8_signed_char_clamp(qs0 - Filter1)
+
+ veor q6, q11, q0 ; *op0 = u^0x80
+ veor q7, q10, q0 ; *oq0 = u^0x80
+ add r12, r1, r1
+ vswp d13, d14
+
+ ;store op1, op0, oq0, oq1
+ vst2.8 {d12[0], d13[0]}, [r0], r12
+ vst2.8 {d12[1], d13[1]}, [r3], r12
+ vst2.8 {d12[2], d13[2]}, [r0], r12
+ vst2.8 {d12[3], d13[3]}, [r3], r12
+ vst2.8 {d12[4], d13[4]}, [r0], r12
+ vst2.8 {d12[5], d13[5]}, [r3], r12
+ vst2.8 {d12[6], d13[6]}, [r0], r12
+ vst2.8 {d12[7], d13[7]}, [r3], r12
+ vst2.8 {d14[0], d15[0]}, [r0], r12
+ vst2.8 {d14[1], d15[1]}, [r3], r12
+ vst2.8 {d14[2], d15[2]}, [r0], r12
+ vst2.8 {d14[3], d15[3]}, [r3], r12
+ vst2.8 {d14[4], d15[4]}, [r0], r12
+ vst2.8 {d14[5], d15[5]}, [r3], r12
+ vst2.8 {d14[6], d15[6]}, [r0], r12
+ vst2.8 {d14[7], d15[7]}, [r3]
+
+ vpop {d8-d15}
+ bx lr
+ ENDP ; |vp8_loop_filter_simple_vertical_edge_neon|
+
+; r0 unsigned char *y
+; r1 int ystride
+; r2 const unsigned char *blimit
+
+|vp8_loop_filter_bvs_neon| PROC
+ push {r4, lr}
+ ldrb r3, [r2] ; load blim from mem
+ mov r4, r0
+ add r0, r0, #4
+ vdup.s8 q1, r3 ; duplicate blim
+ bl vp8_loop_filter_simple_vertical_edge_neon
+ ; vp8_loop_filter_simple_vertical_edge_neon preserves r1 and q1
+ add r0, r4, #8
+ bl vp8_loop_filter_simple_vertical_edge_neon
+ add r0, r4, #12
+ pop {r4, lr}
+ b vp8_loop_filter_simple_vertical_edge_neon
+ ENDP ;|vp8_loop_filter_bvs_neon|
+
+; r0 unsigned char *y
+; r1 int ystride
+; r2 const unsigned char *blimit
+
+|vp8_loop_filter_mbvs_neon| PROC
+ ldrb r3, [r2] ; load mblim from mem
+ vdup.s8 q1, r3 ; duplicate mblim
+ b vp8_loop_filter_simple_vertical_edge_neon
+ ENDP ;|vp8_loop_filter_bvs_neon|
+ END
diff --git a/vp8/common/arm/neon/loopfiltersimpleverticaledge_neon.c b/vp8/common/arm/neon/loopfiltersimpleverticaledge_neon.c
deleted file mode 100644
index b0952b582..000000000
--- a/vp8/common/arm/neon/loopfiltersimpleverticaledge_neon.c
+++ /dev/null
@@ -1,184 +0,0 @@
-/*
- * Copyright (c) 2014 The WebM project authors. All Rights Reserved.
- *
- * Use of this source code is governed by a BSD-style license
- * that can be found in the LICENSE file in the root of the source
- * tree. An additional intellectual property rights grant can be found
- * in the file PATENTS. All contributing project authors may
- * be found in the AUTHORS file in the root of the source tree.
- */
-
-#include <arm_neon.h>
-#include "./vpx_config.h"
-
-static INLINE void vp8_loop_filter_simple_vertical_edge_neon(
- unsigned char *s,
- int p,
- const unsigned char *blimit) {
- int pitch;
- unsigned char *src1, *src2;
- uint8x16_t qblimit, q0u8;
- uint8x16_t q3u8, q4u8, q5u8, q6u8, q7u8, q11u8, q12u8, q14u8, q15u8;
- int16x8_t q2s16, q13s16, q11s16;
- int8x8_t d28s8, d29s8;
- int8x16_t q2s8, q3s8, q10s8, q11s8, q14s8;
- uint8x8x4_t d0u8x4; // d6, d7, d8, d9
- uint8x8x4_t d1u8x4; // d10, d11, d12, d13
- uint8x8x2_t d2u8x2; // d12, d13
- uint8x8x2_t d3u8x2; // d14, d15
-
- pitch = p << 1;
- qblimit = vdupq_n_u8(*blimit);
-
- src1 = s - 2;
- d0u8x4 = vld4_lane_u8(src1, d0u8x4, 0);
- src1 += pitch;
- d0u8x4 = vld4_lane_u8(src1, d0u8x4, 2);
- src1 += pitch;
- d0u8x4 = vld4_lane_u8(src1, d0u8x4, 4);
- src1 += pitch;
- d0u8x4 = vld4_lane_u8(src1, d0u8x4, 6);
- src1 += pitch;
- d1u8x4 = vld4_lane_u8(src1, d1u8x4, 0);
- src1 += pitch;
- d1u8x4 = vld4_lane_u8(src1, d1u8x4, 2);
- src1 += pitch;
- d1u8x4 = vld4_lane_u8(src1, d1u8x4, 4);
- src1 += pitch;
- d1u8x4 = vld4_lane_u8(src1, d1u8x4, 6);
-
- src2 = s - 2 + p;
- d0u8x4 = vld4_lane_u8(src2, d0u8x4, 1);
- src2 += pitch;
- d0u8x4 = vld4_lane_u8(src2, d0u8x4, 3);
- src2 += pitch;
- d0u8x4 = vld4_lane_u8(src2, d0u8x4, 5);
- src2 += pitch;
- d0u8x4 = vld4_lane_u8(src2, d0u8x4, 7);
- src2 += pitch;
- d1u8x4 = vld4_lane_u8(src2, d1u8x4, 1);
- src2 += pitch;
- d1u8x4 = vld4_lane_u8(src2, d1u8x4, 3);
- src2 += pitch;
- d1u8x4 = vld4_lane_u8(src2, d1u8x4, 5);
- src2 += pitch;
- d1u8x4 = vld4_lane_u8(src2, d1u8x4, 7);
-
- // vswp d7, d10
- // vswp d12, d9
- q3u8 = vcombine_u8(d0u8x4.val[0], d1u8x4.val[0]); // d6 d10
- q4u8 = vcombine_u8(d0u8x4.val[2], d1u8x4.val[2]); // d8 d12
- q5u8 = vcombine_u8(d0u8x4.val[1], d1u8x4.val[1]); // d7 d11
- q6u8 = vcombine_u8(d0u8x4.val[3], d1u8x4.val[3]); // d9 d13
-
- q15u8 = vabdq_u8(q5u8, q4u8);
- q14u8 = vabdq_u8(q3u8, q6u8);
-
- q15u8 = vqaddq_u8(q15u8, q15u8);
- q14u8 = vshrq_n_u8(q14u8, 1);
- q0u8 = vdupq_n_u8(0x80);
- q11s16 = vdupq_n_s16(3);
- q15u8 = vqaddq_u8(q15u8, q14u8);
-
- q3u8 = veorq_u8(q3u8, q0u8);
- q4u8 = veorq_u8(q4u8, q0u8);
- q5u8 = veorq_u8(q5u8, q0u8);
- q6u8 = veorq_u8(q6u8, q0u8);
-
- q15u8 = vcgeq_u8(qblimit, q15u8);
-
- q2s16 = vsubl_s8(vget_low_s8(vreinterpretq_s8_u8(q4u8)),
- vget_low_s8(vreinterpretq_s8_u8(q5u8)));
- q13s16 = vsubl_s8(vget_high_s8(vreinterpretq_s8_u8(q4u8)),
- vget_high_s8(vreinterpretq_s8_u8(q5u8)));
-
- q14s8 = vqsubq_s8(vreinterpretq_s8_u8(q3u8),
- vreinterpretq_s8_u8(q6u8));
-
- q2s16 = vmulq_s16(q2s16, q11s16);
- q13s16 = vmulq_s16(q13s16, q11s16);
-
- q11u8 = vdupq_n_u8(3);
- q12u8 = vdupq_n_u8(4);
-
- q2s16 = vaddw_s8(q2s16, vget_low_s8(q14s8));
- q13s16 = vaddw_s8(q13s16, vget_high_s8(q14s8));
-
- d28s8 = vqmovn_s16(q2s16);
- d29s8 = vqmovn_s16(q13s16);
- q14s8 = vcombine_s8(d28s8, d29s8);
-
- q14s8 = vandq_s8(q14s8, vreinterpretq_s8_u8(q15u8));
-
- q2s8 = vqaddq_s8(q14s8, vreinterpretq_s8_u8(q11u8));
- q3s8 = vqaddq_s8(q14s8, vreinterpretq_s8_u8(q12u8));
- q2s8 = vshrq_n_s8(q2s8, 3);
- q14s8 = vshrq_n_s8(q3s8, 3);
-
- q11s8 = vqaddq_s8(vreinterpretq_s8_u8(q5u8), q2s8);
- q10s8 = vqsubq_s8(vreinterpretq_s8_u8(q4u8), q14s8);
-
- q6u8 = veorq_u8(vreinterpretq_u8_s8(q11s8), q0u8);
- q7u8 = veorq_u8(vreinterpretq_u8_s8(q10s8), q0u8);
-
- d2u8x2.val[0] = vget_low_u8(q6u8); // d12
- d2u8x2.val[1] = vget_low_u8(q7u8); // d14
- d3u8x2.val[0] = vget_high_u8(q6u8); // d13
- d3u8x2.val[1] = vget_high_u8(q7u8); // d15
-
- src1 = s - 1;
- vst2_lane_u8(src1, d2u8x2, 0);
- src1 += pitch;
- vst2_lane_u8(src1, d2u8x2, 2);
- src1 += pitch;
- vst2_lane_u8(src1, d2u8x2, 4);
- src1 += pitch;
- vst2_lane_u8(src1, d2u8x2, 6);
- src1 += pitch;
- vst2_lane_u8(src1, d3u8x2, 0);
- src1 += pitch;
- vst2_lane_u8(src1, d3u8x2, 2);
- src1 += pitch;
- vst2_lane_u8(src1, d3u8x2, 4);
- src1 += pitch;
- vst2_lane_u8(src1, d3u8x2, 6);
-
- src2 = s - 1 + p;
- vst2_lane_u8(src2, d2u8x2, 1);
- src2 += pitch;
- vst2_lane_u8(src2, d2u8x2, 3);
- src2 += pitch;
- vst2_lane_u8(src2, d2u8x2, 5);
- src2 += pitch;
- vst2_lane_u8(src2, d2u8x2, 7);
- src2 += pitch;
- vst2_lane_u8(src2, d3u8x2, 1);
- src2 += pitch;
- vst2_lane_u8(src2, d3u8x2, 3);
- src2 += pitch;
- vst2_lane_u8(src2, d3u8x2, 5);
- src2 += pitch;
- vst2_lane_u8(src2, d3u8x2, 7);
- return;
-}
-
-void vp8_loop_filter_bvs_neon(
- unsigned char *y_ptr,
- int y_stride,
- const unsigned char *blimit) {
- y_ptr += 4;
- vp8_loop_filter_simple_vertical_edge_neon(y_ptr, y_stride, blimit);
- y_ptr += 4;
- vp8_loop_filter_simple_vertical_edge_neon(y_ptr, y_stride, blimit);
- y_ptr += 4;
- vp8_loop_filter_simple_vertical_edge_neon(y_ptr, y_stride, blimit);
- return;
-}
-
-void vp8_loop_filter_mbvs_neon(
- unsigned char *y_ptr,
- int y_stride,
- const unsigned char *blimit) {
- vp8_loop_filter_simple_vertical_edge_neon(y_ptr, y_stride, blimit);
- return;
-}