From c90a8a1a43929d160567d37eab8df4b63f501d50 Mon Sep 17 00:00:00 2001 From: Jian Zhou Date: Wed, 9 Dec 2015 18:12:52 -0800 Subject: SSE2 based h_predictor_32x32 Relocate the function from SSSE3 to SSE2, Unroll loop from 16 to 8, and reduce mem access to left. Speed up by single digit in ./test_intra_pred_speed on big core machines. Change-Id: I2b7fc95ffc0c42145be2baca4dc77116dff1c960 --- vpx_dsp/x86/intrapred_sse2.asm | 28 ++++++++++++++++++++++++++++ 1 file changed, 28 insertions(+) (limited to 'vpx_dsp/x86/intrapred_sse2.asm') diff --git a/vpx_dsp/x86/intrapred_sse2.asm b/vpx_dsp/x86/intrapred_sse2.asm index edbf05e33..4aca3ec16 100644 --- a/vpx_dsp/x86/intrapred_sse2.asm +++ b/vpx_dsp/x86/intrapred_sse2.asm @@ -581,6 +581,34 @@ cglobal h_predictor_16x16, 2, 5, 3, dst, stride, line, left jnz .loop REP_RET +INIT_XMM sse2 +cglobal h_predictor_32x32, 2, 5, 3, dst, stride, line, left + movifnidn leftq, leftmp + mov lineq, -8 + DEFINE_ARGS dst, stride, line, left, stride3 + lea stride3q, [strideq*3] +.loop: + movd m0, [leftq] + punpcklbw m0, m0 + punpcklbw m0, m0 ; l1 to l4 each repeated 4 times + pshufd m1, m0, 0x0 ; l1 repeated 16 times + pshufd m2, m0, 0x55 ; l2 repeated 16 times + mova [dstq ], m1 + mova [dstq+16 ], m1 + mova [dstq+strideq ], m2 + mova [dstq+strideq+16 ], m2 + pshufd m1, m0, 0xaa + pshufd m2, m0, 0xff + mova [dstq+strideq*2 ], m1 + mova [dstq+strideq*2+16], m1 + mova [dstq+stride3q ], m2 + mova [dstq+stride3q+16 ], m2 + inc lineq + lea leftq, [leftq+4 ] + lea dstq, [dstq+strideq*4] + jnz .loop + REP_RET + INIT_XMM sse2 cglobal tm_predictor_4x4, 4, 4, 5, dst, stride, above, left pxor m1, m1 -- cgit v1.2.3