summaryrefslogtreecommitdiff
path: root/vp8/encoder
diff options
context:
space:
mode:
authorJohann Koenig <johannkoenig@google.com>2011-04-07 16:40:05 -0400
committerJohann Koenig <johannkoenig@google.com>2011-04-07 16:40:05 -0400
commit08702002e889f861ad43ba58e4ce2effcbc35e3d (patch)
tree267fe531b4cdcbb62b1410994931884598f9cda5 /vp8/encoder
parentaec5487cddeb92095d3260a79383034e09d75c50 (diff)
downloadlibvpx-08702002e889f861ad43ba58e4ce2effcbc35e3d.tar
libvpx-08702002e889f861ad43ba58e4ce2effcbc35e3d.tar.gz
libvpx-08702002e889f861ad43ba58e4ce2effcbc35e3d.tar.bz2
libvpx-08702002e889f861ad43ba58e4ce2effcbc35e3d.zip
use asm_offsets with vp8_fast_quantize_b_sse3
on the same order as the sse2 fast quantize change: ~2% except for 32bit. only a slight improvment there. Change-Id: Iff80e5f1ce7e646eebfdc8871405458ff911986b
Diffstat (limited to 'vp8/encoder')
-rw-r--r--vp8/encoder/x86/quantize_ssse3.asm106
-rw-r--r--vp8/encoder/x86/quantize_x86.h20
-rw-r--r--vp8/encoder/x86/x86_csystemdependent.c17
3 files changed, 82 insertions, 61 deletions
diff --git a/vp8/encoder/x86/quantize_ssse3.asm b/vp8/encoder/x86/quantize_ssse3.asm
index 2f33199e5..912007e02 100644
--- a/vp8/encoder/x86/quantize_ssse3.asm
+++ b/vp8/encoder/x86/quantize_ssse3.asm
@@ -9,38 +9,62 @@
%include "vpx_ports/x86_abi_support.asm"
+%include "asm_enc_offsets.asm"
-;int vp8_fast_quantize_b_impl_ssse3(short *coeff_ptr
-; short *qcoeff_ptr,short *dequant_ptr,
-; short *round_ptr,
-; short *quant_ptr, short *dqcoeff_ptr);
+; void vp8_fast_quantize_b_ssse3 | arg
+; (BLOCK *b, | 0
+; BLOCKD *d) | 1
;
-global sym(vp8_fast_quantize_b_impl_ssse3)
-sym(vp8_fast_quantize_b_impl_ssse3):
+
+global sym(vp8_fast_quantize_b_ssse3)
+sym(vp8_fast_quantize_b_ssse3):
push rbp
mov rbp, rsp
- SHADOW_ARGS_TO_STACK 6
GET_GOT rbx
+
+%if ABI_IS_32BIT
+ push rdi
push rsi
+%else
+ %ifidn __OUTPUT_FORMAT__,x64
push rdi
+ push rsi
+ %endif
+%endif
; end prolog
- mov rdx, arg(0) ;coeff_ptr
- mov rdi, arg(3) ;round_ptr
- mov rsi, arg(4) ;quant_ptr
-
- movdqa xmm0, [rdx]
- movdqa xmm4, [rdx + 16]
-
- movdqa xmm2, [rdi] ;round lo
- movdqa xmm3, [rdi + 16] ;round hi
+%if ABI_IS_32BIT
+ mov rdi, arg(0) ; BLOCK *b
+ mov rsi, arg(1) ; BLOCKD *d
+%else
+ %ifidn __OUTPUT_FORMAT__,x64
+ mov rdi, rcx ; BLOCK *b
+ mov rsi, rdx ; BLOCKD *d
+ %else
+ ;mov rdi, rdi ; BLOCK *b
+ ;mov rsi, rsi ; BLOCKD *d
+ %endif
+%endif
+
+ mov rax, [rdi + vp8_block_coeff]
+ mov rcx, [rdi + vp8_block_round]
+ mov rdx, [rdi + vp8_block_quant_fast]
+
+ ; coeff
+ movdqa xmm0, [rax]
+ movdqa xmm4, [rax + 16]
+
+ ; round
+ movdqa xmm2, [rcx]
+ movdqa xmm3, [rcx + 16]
movdqa xmm1, xmm0
movdqa xmm5, xmm4
- psraw xmm0, 15 ;sign of z (aka sz)
- psraw xmm4, 15 ;sign of z (aka sz)
+ ; sz = z >> 15
+ psraw xmm0, 15
+ psraw xmm4, 15
pabsw xmm1, xmm1
pabsw xmm5, xmm5
@@ -48,23 +72,24 @@ sym(vp8_fast_quantize_b_impl_ssse3):
paddw xmm1, xmm2
paddw xmm5, xmm3
- pmulhw xmm1, [rsi]
- pmulhw xmm5, [rsi + 16]
+ ; quant_fast
+ pmulhw xmm1, [rdx]
+ pmulhw xmm5, [rdx + 16]
- mov rdi, arg(1) ;qcoeff_ptr
- mov rcx, arg(2) ;dequant_ptr
- mov rsi, arg(5) ;dqcoeff_ptr
+ mov rax, [rsi + vp8_blockd_qcoeff]
+ mov rdi, [rsi + vp8_blockd_dequant]
+ mov rcx, [rsi + vp8_blockd_dqcoeff]
pxor xmm1, xmm0
pxor xmm5, xmm4
psubw xmm1, xmm0
psubw xmm5, xmm4
- movdqa [rdi], xmm1
- movdqa [rdi + 16], xmm5
+ movdqa [rax], xmm1
+ movdqa [rax + 16], xmm5
- movdqa xmm2, [rcx]
- movdqa xmm3, [rcx + 16]
+ movdqa xmm2, [rdi]
+ movdqa xmm3, [rdi + 16]
pxor xmm4, xmm4
pmullw xmm2, xmm1
@@ -73,38 +98,37 @@ sym(vp8_fast_quantize_b_impl_ssse3):
pcmpeqw xmm1, xmm4 ;non zero mask
pcmpeqw xmm5, xmm4 ;non zero mask
packsswb xmm1, xmm5
- pshufb xmm1, [ GLOBAL(zz_shuf)]
+ pshufb xmm1, [GLOBAL(zz_shuf)]
pmovmskb edx, xmm1
-; xor ecx, ecx
-; mov eax, -1
-;find_eob_loop:
-; shr edx, 1
-; jc fq_skip
-; mov eax, ecx
-;fq_skip:
-; inc ecx
-; cmp ecx, 16
-; jne find_eob_loop
xor rdi, rdi
mov eax, -1
xor dx, ax ;flip the bits for bsr
bsr eax, edx
- movdqa [rsi], xmm2 ;store dqcoeff
- movdqa [rsi + 16], xmm3 ;store dqcoeff
+ movdqa [rcx], xmm2 ;store dqcoeff
+ movdqa [rcx + 16], xmm3 ;store dqcoeff
sub edi, edx ;check for all zeros in bit mask
sar edi, 31 ;0 or -1
add eax, 1
and eax, edi ;if the bit mask was all zero,
;then eob = 0
+ mov [rsi + vp8_blockd_eob], eax
+
; begin epilog
+%if ABI_IS_32BIT
+ pop rsi
pop rdi
+%else
+ %ifidn __OUTPUT_FORMAT__,x64
pop rsi
+ pop rdi
+ %endif
+%endif
+
RESTORE_GOT
- UNSHADOW_ARGS
pop rbp
ret
diff --git a/vp8/encoder/x86/quantize_x86.h b/vp8/encoder/x86/quantize_x86.h
index df2e0bc39..f09358061 100644
--- a/vp8/encoder/x86/quantize_x86.h
+++ b/vp8/encoder/x86/quantize_x86.h
@@ -19,7 +19,7 @@
*/
#if HAVE_MMX
-#endif
+#endif /* HAVE_MMX */
#if HAVE_SSE2
@@ -34,9 +34,21 @@ extern prototype_quantize_block(vp8_fast_quantize_b_sse2);
#undef vp8_quantize_fastquantb
#define vp8_quantize_fastquantb vp8_fast_quantize_b_sse2
-#endif
+#endif /* !CONFIG_RUNTIME_CPU_DETECT */
-#endif
+#endif /* HAVE_SSE2 */
-#endif
+#if HAVE_SSSE3
+extern prototype_quantize_block(vp8_fast_quantize_b_ssse3);
+
+#if !CONFIG_RUNTIME_CPU_DETECT
+
+#undef vp8_quantize_fastquantb
+#define vp8_quantize_fastquantb vp8_fast_quantize_b_ssse3
+
+#endif /* !CONFIG_RUNTIME_CPU_DETECT */
+
+#endif /* HAVE_SSSE3 */
+
+#endif /* QUANTIZE_X86_H */
diff --git a/vp8/encoder/x86/x86_csystemdependent.c b/vp8/encoder/x86/x86_csystemdependent.c
index 8bceecec4..8f2774b7a 100644
--- a/vp8/encoder/x86/x86_csystemdependent.c
+++ b/vp8/encoder/x86/x86_csystemdependent.c
@@ -112,21 +112,6 @@ static void subtract_b_sse2(BLOCK *be, BLOCKD *bd, int pitch)
#endif
#if HAVE_SSSE3
-int vp8_fast_quantize_b_impl_ssse3(short *coeff_ptr,
- short *qcoeff_ptr, short *dequant_ptr,
- short *round_ptr,
- short *quant_ptr, short *dqcoeff_ptr);
-static void fast_quantize_b_ssse3(BLOCK *b, BLOCKD *d)
-{
- d->eob = vp8_fast_quantize_b_impl_ssse3(
- b->coeff,
- d->qcoeff,
- d->dequant,
- b->round,
- b->quant_fast,
- d->dqcoeff
- );
-}
#if CONFIG_PSNR
#if ARCH_X86_64
typedef void ssimpf
@@ -307,7 +292,7 @@ void vp8_arch_x86_encoder_init(VP8_COMP *cpi)
cpi->rtcd.variance.subpixvar16x8 = vp8_sub_pixel_variance16x8_ssse3;
cpi->rtcd.variance.subpixvar16x16 = vp8_sub_pixel_variance16x16_ssse3;
- cpi->rtcd.quantize.fastquantb = fast_quantize_b_ssse3;
+ cpi->rtcd.quantize.fastquantb = vp8_fast_quantize_b_ssse3;
#if CONFIG_PSNR
#if ARCH_X86_64