summaryrefslogtreecommitdiff
path: root/vp9/common
diff options
context:
space:
mode:
authorYunqing Wang <yunqingwang@google.com>2012-11-20 16:28:08 -0800
committerYunqing Wang <yunqingwang@google.com>2012-11-26 09:53:50 -0800
commite7cd80718b04c03d5ce21f13981712704b36fc66 (patch)
tree492da7cfccaa2669a8656f9939237465430d96b6 /vp9/common
parentf42e41f2eff366338f8f7b36d5b6f8c9c5a26573 (diff)
downloadlibvpx-e7cd80718b04c03d5ce21f13981712704b36fc66.tar
libvpx-e7cd80718b04c03d5ce21f13981712704b36fc66.tar.gz
libvpx-e7cd80718b04c03d5ce21f13981712704b36fc66.tar.bz2
libvpx-e7cd80718b04c03d5ce21f13981712704b36fc66.zip
Improve sad3x16 SSE2 function
Vp9_sad3x16_sse2() is heavily called in decoder, in which the unaligned reads consume lots of cpu cycles. When CONFIG_SUBPELREFMV is off, the unaligned offset is 1. In this situation, we can adjust the src_ptr to be 4-byte aligned, and then do the aligned reads. This reduced the reading time significantly. Tests on 1080p clip showed over 2% decoder performance gain with CONFIG_SUBPELREFM off. Change-Id: I953afe3ac5406107933ef49d0b695eafba9a6507
Diffstat (limited to 'vp9/common')
-rw-r--r--vp9/common/findnearmv.c14
-rw-r--r--vp9/common/rtcd_defs.sh4
-rw-r--r--vp9/common/x86/sadmxn_x86.c35
3 files changed, 31 insertions, 22 deletions
diff --git a/vp9/common/findnearmv.c b/vp9/common/findnearmv.c
index 768abf23e..a551db810 100644
--- a/vp9/common/findnearmv.c
+++ b/vp9/common/findnearmv.c
@@ -45,15 +45,13 @@ vp9_prob *vp9_mv_ref_probs(VP9_COMMON *pc,
unsigned int vp9_sad3x16_c(const unsigned char *src_ptr,
int src_stride,
const unsigned char *ref_ptr,
- int ref_stride,
- int max_sad) {
+ int ref_stride) {
return sad_mx_n_c(src_ptr, src_stride, ref_ptr, ref_stride, 3, 16);
}
unsigned int vp9_sad16x3_c(const unsigned char *src_ptr,
int src_stride,
const unsigned char *ref_ptr,
- int ref_stride,
- int max_sad) {
+ int ref_stride) {
return sad_mx_n_c(src_ptr, src_stride, ref_ptr, ref_stride, 16, 3);
}
@@ -230,23 +228,23 @@ void vp9_find_best_ref_mvs(MACROBLOCKD *xd,
score = 0;
if (xd->up_available) {
score += vp9_sad16x3(above_src, xd->dst.y_stride,
- above_ref + offset, ref_y_stride, INT_MAX);
+ above_ref + offset, ref_y_stride);
#if CONFIG_SUPERBLOCKS
if (xd->mode_info_context->mbmi.encoded_as_sb) {
score += vp9_sad16x3(above_src + 16, xd->dst.y_stride,
- above_ref + offset + 16, ref_y_stride, INT_MAX);
+ above_ref + offset + 16, ref_y_stride);
}
#endif
}
if (xd->left_available) {
score += vp9_sad3x16(left_src, xd->dst.y_stride,
- left_ref + offset, ref_y_stride, INT_MAX);
+ left_ref + offset, ref_y_stride);
#if CONFIG_SUPERBLOCKS
if (xd->mode_info_context->mbmi.encoded_as_sb) {
score += vp9_sad3x16(left_src + xd->dst.y_stride * 16,
xd->dst.y_stride,
left_ref + offset + ref_y_stride * 16,
- ref_y_stride, INT_MAX);
+ ref_y_stride);
}
#endif
}
diff --git a/vp9/common/rtcd_defs.sh b/vp9/common/rtcd_defs.sh
index bbef1ec49..ea134a854 100644
--- a/vp9/common/rtcd_defs.sh
+++ b/vp9/common/rtcd_defs.sh
@@ -221,10 +221,10 @@ vp9_loop_filter_simple_bh_neon=vp9_loop_filter_bhs_neon
#
# sad 16x3, 3x16
#
-prototype unsigned int vp9_sad16x3 "const unsigned char *src_ptr, int src_stride, const unsigned char *ref_ptr, int ref_stride, int max_sad"
+prototype unsigned int vp9_sad16x3 "const unsigned char *src_ptr, int src_stride, const unsigned char *ref_ptr, int ref_stride"
specialize vp9_sad16x3 sse2
-prototype unsigned int vp9_sad3x16 "const unsigned char *src_ptr, int src_stride, const unsigned char *ref_ptr, int ref_stride, int max_sad"
+prototype unsigned int vp9_sad3x16 "const unsigned char *src_ptr, int src_stride, const unsigned char *ref_ptr, int ref_stride"
specialize vp9_sad3x16 sse2
#
diff --git a/vp9/common/x86/sadmxn_x86.c b/vp9/common/x86/sadmxn_x86.c
index 77cd372ac..0b783ccea 100644
--- a/vp9/common/x86/sadmxn_x86.c
+++ b/vp9/common/x86/sadmxn_x86.c
@@ -11,21 +11,18 @@
#include <emmintrin.h> // SSE2
#include "./vpx_config.h"
#include "./vp9_rtcd.h"
-
+#include "vpx/vpx_integer.h"
#if HAVE_SSE2
unsigned int vp9_sad16x3_sse2(
const unsigned char *src_ptr,
int src_stride,
const unsigned char *ref_ptr,
- int ref_stride,
- int max_sad) {
+ int ref_stride) {
__m128i s0, s1, s2;
__m128i r0, r1, r2;
__m128i sad;
- (void)max_sad;
-
s0 = _mm_loadu_si128((const __m128i *)(src_ptr + 0 * src_stride));
s1 = _mm_loadu_si128((const __m128i *)(src_ptr + 1 * src_stride));
s2 = _mm_loadu_si128((const __m128i *)(src_ptr + 2 * src_stride));
@@ -46,12 +43,25 @@ unsigned int vp9_sad3x16_sse2(
const unsigned char *src_ptr,
int src_stride,
const unsigned char *ref_ptr,
- int ref_stride,
- int max_sad) {
+ int ref_stride) {
int r;
__m128i s0, s1, s2, s3;
__m128i r0, r1, r2, r3;
- __m128i sad = _mm_set1_epi16(0);
+ __m128i sad = _mm_setzero_si128();
+ __m128i mask;
+ const int offset = (uintptr_t)src_ptr & 3;
+
+ /* In current use case, the offset is 1 if CONFIG_SUBPELREFMV is off.
+ * Here, for offset=1, we adjust src_ptr to be 4-byte aligned. Then, movd
+ * takes much less time.
+ */
+ if (offset == 1)
+ src_ptr -= 1;
+
+ /* mask = 0xffffffffffff0000ffffffffffff0000 */
+ mask = _mm_cmpeq_epi32(sad, sad);
+ mask = _mm_slli_epi64(mask, 16);
+
for (r = 0; r < 16; r += 4) {
s0 = _mm_cvtsi32_si128 (*(const int *)(src_ptr + 0 * src_stride));
s1 = _mm_cvtsi32_si128 (*(const int *)(src_ptr + 1 * src_stride));
@@ -69,8 +79,11 @@ unsigned int vp9_sad3x16_sse2(
s0 = _mm_unpacklo_epi64(s0, s2);
r0 = _mm_unpacklo_epi64(r0, r2);
- // throw out byte 3
- s0 = _mm_slli_epi64(s0, 16);
+ // throw out extra byte
+ if (offset == 1)
+ s0 = _mm_and_si128(s0, mask);
+ else
+ s0 = _mm_slli_epi64(s0, 16);
r0 = _mm_slli_epi64(r0, 16);
sad = _mm_add_epi16(sad, _mm_sad_epu8(s0, r0));
@@ -84,5 +97,3 @@ unsigned int vp9_sad3x16_sse2(
}
#endif
-
-