summaryrefslogtreecommitdiff
path: root/vpx_dsp/x86/sad4d_sse2.asm
diff options
context:
space:
mode:
authorJian Zhou <zhoujian@google.com>2015-12-17 11:08:17 -0800
committerJian Zhou <zhoujian@google.com>2015-12-17 17:43:46 -0800
commit789dbb3131380f95fc1507de61fe180a12db44f3 (patch)
tree3cdc24fa2ccaa6ae1b15587f480891e1deff1270 /vpx_dsp/x86/sad4d_sse2.asm
parent8f8a3b6a7842e82d1bdf61538a0f05c7044060ab (diff)
downloadlibvpx-789dbb3131380f95fc1507de61fe180a12db44f3.tar
libvpx-789dbb3131380f95fc1507de61fe180a12db44f3.tar.gz
libvpx-789dbb3131380f95fc1507de61fe180a12db44f3.tar.bz2
libvpx-789dbb3131380f95fc1507de61fe180a12db44f3.zip
Code clean of sad4xNx4D_sse
Replace MMX with SSE2. Change-Id: I948ca1be6ed9b8e67f16555e226f1203726b7da6
Diffstat (limited to 'vpx_dsp/x86/sad4d_sse2.asm')
-rw-r--r--vpx_dsp/x86/sad4d_sse2.asm56
1 files changed, 32 insertions, 24 deletions
diff --git a/vpx_dsp/x86/sad4d_sse2.asm b/vpx_dsp/x86/sad4d_sse2.asm
index a2f0ae79e..3f6e55ce9 100644
--- a/vpx_dsp/x86/sad4d_sse2.asm
+++ b/vpx_dsp/x86/sad4d_sse2.asm
@@ -20,33 +20,41 @@ SECTION .text
movd m4, [ref2q+%3]
movd m7, [ref3q+%3]
movd m5, [ref4q+%3]
- punpckldq m0, [srcq +%4]
- punpckldq m6, [ref1q+%5]
- punpckldq m4, [ref2q+%5]
- punpckldq m7, [ref3q+%5]
- punpckldq m5, [ref4q+%5]
+ movd m1, [srcq +%4]
+ movd m2, [ref1q+%5]
+ punpckldq m0, m1
+ punpckldq m6, m2
+ movd m1, [ref2q+%5]
+ movd m2, [ref3q+%5]
+ movd m3, [ref4q+%5]
+ punpckldq m4, m1
+ punpckldq m7, m2
+ punpckldq m5, m3
+ movlhps m0, m0
+ movlhps m6, m4
+ movlhps m7, m5
psadbw m6, m0
- psadbw m4, m0
psadbw m7, m0
- psadbw m5, m0
- punpckldq m6, m4
- punpckldq m7, m5
%else
movd m1, [ref1q+%3]
+ movd m5, [ref1q+%5]
movd m2, [ref2q+%3]
+ movd m4, [ref2q+%5]
+ punpckldq m1, m5
+ punpckldq m2, m4
movd m3, [ref3q+%3]
+ movd m5, [ref3q+%5]
+ punpckldq m3, m5
movd m4, [ref4q+%3]
- punpckldq m0, [srcq +%4]
- punpckldq m1, [ref1q+%5]
- punpckldq m2, [ref2q+%5]
- punpckldq m3, [ref3q+%5]
- punpckldq m4, [ref4q+%5]
+ movd m5, [ref4q+%5]
+ punpckldq m4, m5
+ movd m5, [srcq +%4]
+ punpckldq m0, m5
+ movlhps m0, m0
+ movlhps m1, m2
+ movlhps m3, m4
psadbw m1, m0
- psadbw m2, m0
psadbw m3, m0
- psadbw m4, m0
- punpckldq m1, m2
- punpckldq m3, m4
paddd m6, m1
paddd m7, m3
%endif
@@ -170,7 +178,7 @@ SECTION .text
; void vpx_sadNxNx4d_sse2(uint8_t *src, int src_stride,
; uint8_t *ref[4], int ref_stride,
; uint32_t res[4]);
-; where NxN = 64x64, 32x32, 16x16, 16x8, 8x16 or 8x8
+; where NxN = 64x64, 32x32, 16x16, 16x8, 8x16, 8x8, 8x4, 4x8 and 4x4
%macro SADNXN4D 2
%if UNIX64
cglobal sad%1x%2x4d, 5, 8, 8, src, src_stride, ref1, ref_stride, \
@@ -192,7 +200,7 @@ cglobal sad%1x%2x4d, 4, 7, 8, src, src_stride, ref1, ref_stride, \
%endrep
PROCESS_%1x2x4 0, 0, 0, src_strideq, ref_strideq, 0
-%if mmsize == 16
+%if %1 > 4
pslldq m5, 4
pslldq m7, 4
por m4, m5
@@ -207,8 +215,10 @@ cglobal sad%1x%2x4d, 4, 7, 8, src, src_stride, ref1, ref_stride, \
RET
%else
movifnidn r4, r4mp
- movq [r4+0], m6
- movq [r4+8], m7
+ pshufd m6, m6, 0x08
+ pshufd m7, m7, 0x08
+ movq [r4+0], m6
+ movq [r4+8], m7
RET
%endif
%endmacro
@@ -225,7 +235,5 @@ SADNXN4D 16, 8
SADNXN4D 8, 16
SADNXN4D 8, 8
SADNXN4D 8, 4
-
-INIT_MMX sse
SADNXN4D 4, 8
SADNXN4D 4, 4