diff options
-rw-r--r-- | vp9/common/vp9_rtcd_defs.pl | 2 | ||||
-rw-r--r-- | vp9/decoder/vp9_decodeframe.c | 3 | ||||
-rw-r--r-- | vp9/decoder/vp9_decoder.c | 4 | ||||
-rw-r--r-- | vp9/encoder/x86/vp9_dct_ssse3.asm | 174 | ||||
-rw-r--r-- | vp9/vp9_iface_common.h | 13 | ||||
-rw-r--r-- | vp9/vp9cx.mk | 1 | ||||
-rw-r--r-- | vpx/vpx_image.h | 7 |
7 files changed, 191 insertions, 13 deletions
diff --git a/vp9/common/vp9_rtcd_defs.pl b/vp9/common/vp9_rtcd_defs.pl index 99fd6ca10..d4c306507 100644 --- a/vp9/common/vp9_rtcd_defs.pl +++ b/vp9/common/vp9_rtcd_defs.pl @@ -707,7 +707,7 @@ add_proto qw/void vp9_fdct4x4/, "const int16_t *input, int16_t *output, int stri specialize qw/vp9_fdct4x4 sse2 avx2/; add_proto qw/void vp9_fdct8x8/, "const int16_t *input, int16_t *output, int stride"; -specialize qw/vp9_fdct8x8 sse2 avx2/; +specialize qw/vp9_fdct8x8 sse2 avx2/, "$ssse3_x86_64"; add_proto qw/void vp9_fdct16x16/, "const int16_t *input, int16_t *output, int stride"; specialize qw/vp9_fdct16x16 sse2 avx2/; diff --git a/vp9/decoder/vp9_decodeframe.c b/vp9/decoder/vp9_decodeframe.c index 1cc7ab727..45ebb2fed 100644 --- a/vp9/decoder/vp9_decodeframe.c +++ b/vp9/decoder/vp9_decodeframe.c @@ -1370,7 +1370,8 @@ int vp9_decode_frame(VP9Decoder *pbi, "A stream must start with a complete key frame"); } - if (!cm->error_resilient_mode && !cm->frame_parallel_decoding_mode) { + if (!cm->error_resilient_mode && !cm->frame_parallel_decoding_mode && + !new_fb->corrupted) { vp9_adapt_coef_probs(cm); if (!frame_is_intra_only(cm)) { diff --git a/vp9/decoder/vp9_decoder.c b/vp9/decoder/vp9_decoder.c index 385b2ebe5..abcff9fb4 100644 --- a/vp9/decoder/vp9_decoder.c +++ b/vp9/decoder/vp9_decoder.c @@ -331,10 +331,6 @@ int vp9_get_raw_frame(VP9Decoder *pbi, YV12_BUFFER_CONFIG *sd, ret = vp9_post_proc_frame(&pbi->common, sd, flags); #else *sd = *pbi->common.frame_to_show; - sd->y_width = pbi->common.width; - sd->y_height = pbi->common.height; - sd->uv_width = sd->y_width >> pbi->common.subsampling_x; - sd->uv_height = sd->y_height >> pbi->common.subsampling_y; ret = 0; #endif /*!CONFIG_POSTPROC*/ vp9_clear_system_state(); diff --git a/vp9/encoder/x86/vp9_dct_ssse3.asm b/vp9/encoder/x86/vp9_dct_ssse3.asm new file mode 100644 index 000000000..140007164 --- /dev/null +++ b/vp9/encoder/x86/vp9_dct_ssse3.asm @@ -0,0 +1,174 @@ +; +; Copyright (c) 2014 The WebM project authors. All Rights Reserved. +; +; Use of this source code is governed by a BSD-style license +; that can be found in the LICENSE file in the root of the source +; tree. An additional intellectual property rights grant can be found +; in the file PATENTS. All contributing project authors may +; be found in the AUTHORS file in the root of the source tree. +; +%include "third_party/x86inc/x86inc.asm" + +; This file provides SSSE3 version of the forward transformation. Part +; of the macro definitions are originally derived from ffmpeg project. +; The current version applies to x86 64-bit only. + +SECTION_RODATA + +pw_11585x2: times 8 dw 23170 +pd_8192: times 4 dd 8192 + +%macro TRANSFORM_COEFFS 2 +pw_%1_%2: dw %1, %2, %1, %2, %1, %2, %1, %2 +pw_%2_m%1: dw %2, -%1, %2, -%1, %2, -%1, %2, -%1 +%endmacro + +TRANSFORM_COEFFS 15137, 6270 +TRANSFORM_COEFFS 16069, 3196 +TRANSFORM_COEFFS 9102, 13623 + +SECTION .text + +%if ARCH_X86_64 +%macro SUM_SUB 3 + psubw m%3, m%1, m%2 + paddw m%1, m%2 + SWAP %2, %3 +%endmacro + +; butterfly operation +%macro MUL_ADD_2X 6 ; dst1, dst2, src, round, coefs1, coefs2 + pmaddwd m%1, m%3, %5 + pmaddwd m%2, m%3, %6 + paddd m%1, %4 + paddd m%2, %4 + psrad m%1, 14 + psrad m%2, 14 +%endmacro + +%macro BUTTERFLY_4X 7 ; dst1, dst2, coef1, coef2, round, tmp1, tmp2 + punpckhwd m%6, m%2, m%1 + MUL_ADD_2X %7, %6, %6, %5, [pw_%4_%3], [pw_%3_m%4] + punpcklwd m%2, m%1 + MUL_ADD_2X %1, %2, %2, %5, [pw_%4_%3], [pw_%3_m%4] + packssdw m%1, m%7 + packssdw m%2, m%6 +%endmacro + +; matrix transpose +%macro INTERLEAVE_2X 4 + punpckh%1 m%4, m%2, m%3 + punpckl%1 m%2, m%3 + SWAP %3, %4 +%endmacro + +%macro TRANSPOSE8X8 9 + INTERLEAVE_2X wd, %1, %2, %9 + INTERLEAVE_2X wd, %3, %4, %9 + INTERLEAVE_2X wd, %5, %6, %9 + INTERLEAVE_2X wd, %7, %8, %9 + + INTERLEAVE_2X dq, %1, %3, %9 + INTERLEAVE_2X dq, %2, %4, %9 + INTERLEAVE_2X dq, %5, %7, %9 + INTERLEAVE_2X dq, %6, %8, %9 + + INTERLEAVE_2X qdq, %1, %5, %9 + INTERLEAVE_2X qdq, %3, %7, %9 + INTERLEAVE_2X qdq, %2, %6, %9 + INTERLEAVE_2X qdq, %4, %8, %9 + + SWAP %2, %5 + SWAP %4, %7 +%endmacro + +; 1D forward 8x8 DCT transform +%macro FDCT8_1D 0 + SUM_SUB 0, 7, 9 + SUM_SUB 1, 6, 9 + SUM_SUB 2, 5, 9 + SUM_SUB 3, 4, 9 + + SUM_SUB 0, 3, 9 + SUM_SUB 1, 2, 9 + SUM_SUB 6, 5, 9 + SUM_SUB 0, 1, 9 + + BUTTERFLY_4X 2, 3, 6270, 15137, m8, 9, 10 + + pmulhrsw m6, m12 + pmulhrsw m5, m12 + pmulhrsw m0, m12 + pmulhrsw m1, m12 + + SUM_SUB 4, 5, 9 + SUM_SUB 7, 6, 9 + BUTTERFLY_4X 4, 7, 3196, 16069, m8, 9, 10 + BUTTERFLY_4X 5, 6, 13623, 9102, m8, 9, 10 + SWAP 1, 4 + SWAP 3, 6 +%endmacro + +%macro DIVIDE_ROUND_2X 4 ; dst1, dst2, tmp1, tmp2 + psraw m%3, m%1, 15 + psraw m%4, m%2, 15 + psubw m%1, m%3 + psubw m%2, m%4 + psraw m%1, 1 + psraw m%2, 1 +%endmacro + +INIT_XMM ssse3 +cglobal fdct8x8, 3, 5, 13, input, output, stride + + mova m8, [pd_8192] + mova m12, [pw_11585x2] + pxor m11, m11 + + lea r3, [2 * strideq] + lea r4, [4 * strideq] + mova m0, [inputq] + mova m1, [inputq + r3] + lea inputq, [inputq + r4] + mova m2, [inputq] + mova m3, [inputq + r3] + lea inputq, [inputq + r4] + mova m4, [inputq] + mova m5, [inputq + r3] + lea inputq, [inputq + r4] + mova m6, [inputq] + mova m7, [inputq + r3] + + ; left shift by 2 to increase forward transformation precision + psllw m0, 2 + psllw m1, 2 + psllw m2, 2 + psllw m3, 2 + psllw m4, 2 + psllw m5, 2 + psllw m6, 2 + psllw m7, 2 + + ; column transform + FDCT8_1D + TRANSPOSE8X8 0, 1, 2, 3, 4, 5, 6, 7, 9 + + FDCT8_1D + TRANSPOSE8X8 0, 1, 2, 3, 4, 5, 6, 7, 9 + + DIVIDE_ROUND_2X 0, 1, 9, 10 + DIVIDE_ROUND_2X 2, 3, 9, 10 + DIVIDE_ROUND_2X 4, 5, 9, 10 + DIVIDE_ROUND_2X 6, 7, 9, 10 + + mova [outputq + 0], m0 + mova [outputq + 16], m1 + mova [outputq + 32], m2 + mova [outputq + 48], m3 + mova [outputq + 64], m4 + mova [outputq + 80], m5 + mova [outputq + 96], m6 + mova [outputq + 112], m7 + + RET +%endif diff --git a/vp9/vp9_iface_common.h b/vp9/vp9_iface_common.h index 58256b22b..d60883cc2 100644 --- a/vp9/vp9_iface_common.h +++ b/vp9/vp9_iface_common.h @@ -16,9 +16,11 @@ static void yuvconfig2image(vpx_image_t *img, const YV12_BUFFER_CONFIG *yv12, * the Y, U, and V planes, nor other alignment adjustments that * might be representable by a YV12_BUFFER_CONFIG, so we just * initialize all the fields.*/ - int bps = 12; - if (yv12->uv_height == yv12->y_height) { - if (yv12->uv_width == yv12->y_width) { + const int ss_x = yv12->uv_crop_width < yv12->y_crop_width; + const int ss_y = yv12->uv_crop_height < yv12->y_crop_height; + int bps; + if (!ss_y) { + if (!ss_x) { img->fmt = VPX_IMG_FMT_I444; bps = 24; } else { @@ -27,13 +29,14 @@ static void yuvconfig2image(vpx_image_t *img, const YV12_BUFFER_CONFIG *yv12, } } else { img->fmt = VPX_IMG_FMT_I420; + bps = 12; } img->w = yv12->y_stride; img->h = ALIGN_POWER_OF_TWO(yv12->y_height + 2 * VP9_ENC_BORDER_IN_PIXELS, 3); img->d_w = yv12->y_crop_width; img->d_h = yv12->y_crop_height; - img->x_chroma_shift = yv12->uv_width < yv12->y_width; - img->y_chroma_shift = yv12->uv_height < yv12->y_height; + img->x_chroma_shift = ss_x; + img->y_chroma_shift = ss_y; img->planes[VPX_PLANE_Y] = yv12->y_buffer; img->planes[VPX_PLANE_U] = yv12->u_buffer; img->planes[VPX_PLANE_V] = yv12->v_buffer; diff --git a/vp9/vp9cx.mk b/vp9/vp9cx.mk index c444fe424..fab7f18ee 100644 --- a/vp9/vp9cx.mk +++ b/vp9/vp9cx.mk @@ -112,6 +112,7 @@ endif ifeq ($(ARCH_X86_64),yes) VP9_CX_SRCS-$(HAVE_SSSE3) += encoder/x86/vp9_quantize_ssse3.asm +VP9_CX_SRCS-$(HAVE_SSSE3) += encoder/x86/vp9_dct_ssse3.asm endif VP9_CX_SRCS-$(HAVE_SSSE3) += encoder/x86/vp9_sad_ssse3.asm VP9_CX_SRCS-$(HAVE_SSE4_1) += encoder/x86/vp9_sad_sse4.asm diff --git a/vpx/vpx_image.h b/vpx/vpx_image.h index 8d0f4ec8c..d45b003c6 100644 --- a/vpx/vpx_image.h +++ b/vpx/vpx_image.h @@ -34,7 +34,7 @@ extern "C" { #define VPX_IMG_FMT_PLANAR 0x100 /**< Image is a planar format */ #define VPX_IMG_FMT_UV_FLIP 0x200 /**< V plane precedes U plane in memory */ #define VPX_IMG_FMT_HAS_ALPHA 0x400 /**< Image has an alpha channel component */ - +#define VPX_IMG_FMT_HIGH 0x800 /**< Image uses 16bit framebuffer */ /*!\brief List of supported image formats */ typedef enum vpx_img_fmt { @@ -58,7 +58,10 @@ extern "C" { VPX_IMG_FMT_VPXI420 = VPX_IMG_FMT_PLANAR | 4, VPX_IMG_FMT_I422 = VPX_IMG_FMT_PLANAR | 5, VPX_IMG_FMT_I444 = VPX_IMG_FMT_PLANAR | 6, - VPX_IMG_FMT_444A = VPX_IMG_FMT_PLANAR | VPX_IMG_FMT_HAS_ALPHA | 7 + VPX_IMG_FMT_444A = VPX_IMG_FMT_PLANAR | VPX_IMG_FMT_HAS_ALPHA | 7, + VPX_IMG_FMT_I42016 = VPX_IMG_FMT_I420 | VPX_IMG_FMT_HIGH, + VPX_IMG_FMT_I42216 = VPX_IMG_FMT_I422 | VPX_IMG_FMT_HIGH, + VPX_IMG_FMT_I44416 = VPX_IMG_FMT_I444 | VPX_IMG_FMT_HIGH } vpx_img_fmt_t; /**< alias for enum vpx_img_fmt */ #if !defined(VPX_CODEC_DISABLE_COMPAT) || !VPX_CODEC_DISABLE_COMPAT |