summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJingning Han <jingning@google.com>2014-04-30 13:41:36 -0700
committerGerrit Code Review <gerrit@gerrit.golo.chromium.org>2014-04-30 13:41:36 -0700
commit39761eb5d62b6fb257c9000b717549812deaccd9 (patch)
treefad13fbe706b5ebf4e00c5fdd79bfcf6411107d8
parentbc81107533ce6f76573f6782c22398ed2bc44355 (diff)
parent1eaa3a76dc50fc8451cedbf4797d7e3b7a2d77d7 (diff)
downloadlibvpx-39761eb5d62b6fb257c9000b717549812deaccd9.tar
libvpx-39761eb5d62b6fb257c9000b717549812deaccd9.tar.gz
libvpx-39761eb5d62b6fb257c9000b717549812deaccd9.tar.bz2
libvpx-39761eb5d62b6fb257c9000b717549812deaccd9.zip
Merge "Enable SSSE3 implementation of 8x8 forward 2D-DCT"
-rw-r--r--vp9/common/vp9_rtcd_defs.pl2
-rw-r--r--vp9/encoder/x86/vp9_dct_ssse3.asm174
-rw-r--r--vp9/vp9cx.mk1
3 files changed, 176 insertions, 1 deletions
diff --git a/vp9/common/vp9_rtcd_defs.pl b/vp9/common/vp9_rtcd_defs.pl
index 99fd6ca10..d4c306507 100644
--- a/vp9/common/vp9_rtcd_defs.pl
+++ b/vp9/common/vp9_rtcd_defs.pl
@@ -707,7 +707,7 @@ add_proto qw/void vp9_fdct4x4/, "const int16_t *input, int16_t *output, int stri
specialize qw/vp9_fdct4x4 sse2 avx2/;
add_proto qw/void vp9_fdct8x8/, "const int16_t *input, int16_t *output, int stride";
-specialize qw/vp9_fdct8x8 sse2 avx2/;
+specialize qw/vp9_fdct8x8 sse2 avx2/, "$ssse3_x86_64";
add_proto qw/void vp9_fdct16x16/, "const int16_t *input, int16_t *output, int stride";
specialize qw/vp9_fdct16x16 sse2 avx2/;
diff --git a/vp9/encoder/x86/vp9_dct_ssse3.asm b/vp9/encoder/x86/vp9_dct_ssse3.asm
new file mode 100644
index 000000000..140007164
--- /dev/null
+++ b/vp9/encoder/x86/vp9_dct_ssse3.asm
@@ -0,0 +1,174 @@
+;
+; Copyright (c) 2014 The WebM project authors. All Rights Reserved.
+;
+; Use of this source code is governed by a BSD-style license
+; that can be found in the LICENSE file in the root of the source
+; tree. An additional intellectual property rights grant can be found
+; in the file PATENTS. All contributing project authors may
+; be found in the AUTHORS file in the root of the source tree.
+;
+%include "third_party/x86inc/x86inc.asm"
+
+; This file provides SSSE3 version of the forward transformation. Part
+; of the macro definitions are originally derived from ffmpeg project.
+; The current version applies to x86 64-bit only.
+
+SECTION_RODATA
+
+pw_11585x2: times 8 dw 23170
+pd_8192: times 4 dd 8192
+
+%macro TRANSFORM_COEFFS 2
+pw_%1_%2: dw %1, %2, %1, %2, %1, %2, %1, %2
+pw_%2_m%1: dw %2, -%1, %2, -%1, %2, -%1, %2, -%1
+%endmacro
+
+TRANSFORM_COEFFS 15137, 6270
+TRANSFORM_COEFFS 16069, 3196
+TRANSFORM_COEFFS 9102, 13623
+
+SECTION .text
+
+%if ARCH_X86_64
+%macro SUM_SUB 3
+ psubw m%3, m%1, m%2
+ paddw m%1, m%2
+ SWAP %2, %3
+%endmacro
+
+; butterfly operation
+%macro MUL_ADD_2X 6 ; dst1, dst2, src, round, coefs1, coefs2
+ pmaddwd m%1, m%3, %5
+ pmaddwd m%2, m%3, %6
+ paddd m%1, %4
+ paddd m%2, %4
+ psrad m%1, 14
+ psrad m%2, 14
+%endmacro
+
+%macro BUTTERFLY_4X 7 ; dst1, dst2, coef1, coef2, round, tmp1, tmp2
+ punpckhwd m%6, m%2, m%1
+ MUL_ADD_2X %7, %6, %6, %5, [pw_%4_%3], [pw_%3_m%4]
+ punpcklwd m%2, m%1
+ MUL_ADD_2X %1, %2, %2, %5, [pw_%4_%3], [pw_%3_m%4]
+ packssdw m%1, m%7
+ packssdw m%2, m%6
+%endmacro
+
+; matrix transpose
+%macro INTERLEAVE_2X 4
+ punpckh%1 m%4, m%2, m%3
+ punpckl%1 m%2, m%3
+ SWAP %3, %4
+%endmacro
+
+%macro TRANSPOSE8X8 9
+ INTERLEAVE_2X wd, %1, %2, %9
+ INTERLEAVE_2X wd, %3, %4, %9
+ INTERLEAVE_2X wd, %5, %6, %9
+ INTERLEAVE_2X wd, %7, %8, %9
+
+ INTERLEAVE_2X dq, %1, %3, %9
+ INTERLEAVE_2X dq, %2, %4, %9
+ INTERLEAVE_2X dq, %5, %7, %9
+ INTERLEAVE_2X dq, %6, %8, %9
+
+ INTERLEAVE_2X qdq, %1, %5, %9
+ INTERLEAVE_2X qdq, %3, %7, %9
+ INTERLEAVE_2X qdq, %2, %6, %9
+ INTERLEAVE_2X qdq, %4, %8, %9
+
+ SWAP %2, %5
+ SWAP %4, %7
+%endmacro
+
+; 1D forward 8x8 DCT transform
+%macro FDCT8_1D 0
+ SUM_SUB 0, 7, 9
+ SUM_SUB 1, 6, 9
+ SUM_SUB 2, 5, 9
+ SUM_SUB 3, 4, 9
+
+ SUM_SUB 0, 3, 9
+ SUM_SUB 1, 2, 9
+ SUM_SUB 6, 5, 9
+ SUM_SUB 0, 1, 9
+
+ BUTTERFLY_4X 2, 3, 6270, 15137, m8, 9, 10
+
+ pmulhrsw m6, m12
+ pmulhrsw m5, m12
+ pmulhrsw m0, m12
+ pmulhrsw m1, m12
+
+ SUM_SUB 4, 5, 9
+ SUM_SUB 7, 6, 9
+ BUTTERFLY_4X 4, 7, 3196, 16069, m8, 9, 10
+ BUTTERFLY_4X 5, 6, 13623, 9102, m8, 9, 10
+ SWAP 1, 4
+ SWAP 3, 6
+%endmacro
+
+%macro DIVIDE_ROUND_2X 4 ; dst1, dst2, tmp1, tmp2
+ psraw m%3, m%1, 15
+ psraw m%4, m%2, 15
+ psubw m%1, m%3
+ psubw m%2, m%4
+ psraw m%1, 1
+ psraw m%2, 1
+%endmacro
+
+INIT_XMM ssse3
+cglobal fdct8x8, 3, 5, 13, input, output, stride
+
+ mova m8, [pd_8192]
+ mova m12, [pw_11585x2]
+ pxor m11, m11
+
+ lea r3, [2 * strideq]
+ lea r4, [4 * strideq]
+ mova m0, [inputq]
+ mova m1, [inputq + r3]
+ lea inputq, [inputq + r4]
+ mova m2, [inputq]
+ mova m3, [inputq + r3]
+ lea inputq, [inputq + r4]
+ mova m4, [inputq]
+ mova m5, [inputq + r3]
+ lea inputq, [inputq + r4]
+ mova m6, [inputq]
+ mova m7, [inputq + r3]
+
+ ; left shift by 2 to increase forward transformation precision
+ psllw m0, 2
+ psllw m1, 2
+ psllw m2, 2
+ psllw m3, 2
+ psllw m4, 2
+ psllw m5, 2
+ psllw m6, 2
+ psllw m7, 2
+
+ ; column transform
+ FDCT8_1D
+ TRANSPOSE8X8 0, 1, 2, 3, 4, 5, 6, 7, 9
+
+ FDCT8_1D
+ TRANSPOSE8X8 0, 1, 2, 3, 4, 5, 6, 7, 9
+
+ DIVIDE_ROUND_2X 0, 1, 9, 10
+ DIVIDE_ROUND_2X 2, 3, 9, 10
+ DIVIDE_ROUND_2X 4, 5, 9, 10
+ DIVIDE_ROUND_2X 6, 7, 9, 10
+
+ mova [outputq + 0], m0
+ mova [outputq + 16], m1
+ mova [outputq + 32], m2
+ mova [outputq + 48], m3
+ mova [outputq + 64], m4
+ mova [outputq + 80], m5
+ mova [outputq + 96], m6
+ mova [outputq + 112], m7
+
+ RET
+%endif
diff --git a/vp9/vp9cx.mk b/vp9/vp9cx.mk
index c444fe424..fab7f18ee 100644
--- a/vp9/vp9cx.mk
+++ b/vp9/vp9cx.mk
@@ -112,6 +112,7 @@ endif
ifeq ($(ARCH_X86_64),yes)
VP9_CX_SRCS-$(HAVE_SSSE3) += encoder/x86/vp9_quantize_ssse3.asm
+VP9_CX_SRCS-$(HAVE_SSSE3) += encoder/x86/vp9_dct_ssse3.asm
endif
VP9_CX_SRCS-$(HAVE_SSSE3) += encoder/x86/vp9_sad_ssse3.asm
VP9_CX_SRCS-$(HAVE_SSE4_1) += encoder/x86/vp9_sad_sse4.asm