summaryrefslogtreecommitdiff
path: root/vp9
diff options
context:
space:
mode:
Diffstat (limited to 'vp9')
-rw-r--r--vp9/common/arm/neon/vp9_save_reg_neon.asm36
-rw-r--r--vp9/common/mips/dspr2/vp9_common_dspr2.c32
-rw-r--r--vp9/common/mips/dspr2/vp9_common_dspr2.h62
-rw-r--r--vp9/common/mips/dspr2/vp9_itrans16_dspr2.c1211
-rw-r--r--vp9/common/mips/dspr2/vp9_itrans32_cols_dspr2.c1074
-rw-r--r--vp9/common/mips/dspr2/vp9_itrans32_dspr2.c1076
-rw-r--r--vp9/common/mips/dspr2/vp9_itrans4_dspr2.c353
-rw-r--r--vp9/common/mips/dspr2/vp9_itrans8_dspr2.c655
-rw-r--r--vp9/common/mips/msa/vp9_idct16x16_msa.c16
-rw-r--r--vp9/common/vp9_alloccommon.c1
-rw-r--r--vp9/common/vp9_common.h2
-rw-r--r--vp9/common/vp9_idct.c77
-rw-r--r--vp9/common/vp9_idct.h4
-rw-r--r--vp9/common/vp9_mfqe.c4
-rw-r--r--vp9/common/vp9_postproc.c12
-rw-r--r--vp9/common/vp9_reconinter.c2
-rw-r--r--vp9/common/vp9_reconinter.h5
-rw-r--r--vp9/common/vp9_rtcd_defs.pl21
-rw-r--r--vp9/common/vp9_systemdependent.h84
-rw-r--r--vp9/decoder/vp9_decodeframe.c1
-rw-r--r--vp9/decoder/vp9_decodemv.c2
-rw-r--r--vp9/decoder/vp9_decoder.c8
-rw-r--r--vp9/encoder/arm/neon/vp9_avg_neon.c44
-rw-r--r--vp9/encoder/vp9_aq_complexity.c5
-rw-r--r--vp9/encoder/vp9_aq_cyclicrefresh.c9
-rw-r--r--vp9/encoder/vp9_aq_variance.c8
-rw-r--r--vp9/encoder/vp9_bitstream.c4
-rw-r--r--vp9/encoder/vp9_blockiness.c3
-rw-r--r--vp9/encoder/vp9_dct.c1
-rw-r--r--vp9/encoder/vp9_denoiser.c4
-rw-r--r--vp9/encoder/vp9_denoiser.h7
-rw-r--r--vp9/encoder/vp9_encodeframe.c10
-rw-r--r--vp9/encoder/vp9_encodemb.c51
-rw-r--r--vp9/encoder/vp9_encodemv.c1
-rw-r--r--vp9/encoder/vp9_encoder.c59
-rw-r--r--vp9/encoder/vp9_encoder.h8
-rw-r--r--vp9/encoder/vp9_fastssim.c465
-rw-r--r--vp9/encoder/vp9_firstpass.c19
-rw-r--r--vp9/encoder/vp9_mbgraph.c4
-rw-r--r--vp9/encoder/vp9_mcomp.c36
-rw-r--r--vp9/encoder/vp9_pickmode.c39
-rw-r--r--vp9/encoder/vp9_psnrhvs.c224
-rw-r--r--vp9/encoder/vp9_quantize.c4
-rw-r--r--vp9/encoder/vp9_ratectrl.c42
-rw-r--r--vp9/encoder/vp9_rd.c5
-rw-r--r--vp9/encoder/vp9_rdopt.c8
-rw-r--r--vp9/encoder/vp9_ssim.c500
-rw-r--r--vp9/encoder/vp9_ssim.h96
-rw-r--r--vp9/encoder/vp9_temporal_filter.c1
-rw-r--r--vp9/encoder/x86/vp9_dct_mmx.asm3
-rw-r--r--vp9/encoder/x86/vp9_dct_ssse3_x86_64.asm3
-rw-r--r--vp9/encoder/x86/vp9_error_sse2.asm2
-rw-r--r--vp9/encoder/x86/vp9_quantize_ssse3_x86_64.asm2
-rw-r--r--vp9/encoder/x86/vp9_ssim_opt_x86_64.asm216
-rw-r--r--vp9/vp9_common.mk9
-rw-r--r--vp9/vp9cx.mk5
56 files changed, 325 insertions, 6310 deletions
diff --git a/vp9/common/arm/neon/vp9_save_reg_neon.asm b/vp9/common/arm/neon/vp9_save_reg_neon.asm
deleted file mode 100644
index 71c3e7077..000000000
--- a/vp9/common/arm/neon/vp9_save_reg_neon.asm
+++ /dev/null
@@ -1,36 +0,0 @@
-;
-; Copyright (c) 2010 The WebM project authors. All Rights Reserved.
-;
-; Use of this source code is governed by a BSD-style license
-; that can be found in the LICENSE file in the root of the source
-; tree. An additional intellectual property rights grant can be found
-; in the file PATENTS. All contributing project authors may
-; be found in the AUTHORS file in the root of the source tree.
-;
-
-
- EXPORT |vp9_push_neon|
- EXPORT |vp9_pop_neon|
-
- ARM
- REQUIRE8
- PRESERVE8
-
- AREA ||.text||, CODE, READONLY, ALIGN=2
-
-|vp9_push_neon| PROC
- vst1.i64 {d8, d9, d10, d11}, [r0]!
- vst1.i64 {d12, d13, d14, d15}, [r0]!
- bx lr
-
- ENDP
-
-|vp9_pop_neon| PROC
- vld1.i64 {d8, d9, d10, d11}, [r0]!
- vld1.i64 {d12, d13, d14, d15}, [r0]!
- bx lr
-
- ENDP
-
- END
-
diff --git a/vp9/common/mips/dspr2/vp9_common_dspr2.c b/vp9/common/mips/dspr2/vp9_common_dspr2.c
deleted file mode 100644
index 6498a7e9e..000000000
--- a/vp9/common/mips/dspr2/vp9_common_dspr2.c
+++ /dev/null
@@ -1,32 +0,0 @@
-/*
- * Copyright (c) 2013 The WebM project authors. All Rights Reserved.
- *
- * Use of this source code is governed by a BSD-style license
- * that can be found in the LICENSE file in the root of the source
- * tree. An additional intellectual property rights grant can be found
- * in the file PATENTS. All contributing project authors may
- * be found in the AUTHORS file in the root of the source tree.
- */
-
-#include "vp9/common/mips/dspr2/vp9_common_dspr2.h"
-#include "vpx_dsp/vpx_dsp_common.h"
-#include "vpx_ports/mem.h"
-
-#if HAVE_DSPR2
-uint8_t vp9_ff_cropTbl_a[256 + 2 * CROP_WIDTH];
-uint8_t *vp9_ff_cropTbl;
-
-void vp9_dsputil_static_init(void) {
- int i;
-
- for (i = 0; i < 256; i++) vp9_ff_cropTbl_a[i + CROP_WIDTH] = i;
-
- for (i = 0; i < CROP_WIDTH; i++) {
- vp9_ff_cropTbl_a[i] = 0;
- vp9_ff_cropTbl_a[i + CROP_WIDTH + 256] = 255;
- }
-
- vp9_ff_cropTbl = &vp9_ff_cropTbl_a[CROP_WIDTH];
-}
-
-#endif
diff --git a/vp9/common/mips/dspr2/vp9_common_dspr2.h b/vp9/common/mips/dspr2/vp9_common_dspr2.h
deleted file mode 100644
index 4e6b3552e..000000000
--- a/vp9/common/mips/dspr2/vp9_common_dspr2.h
+++ /dev/null
@@ -1,62 +0,0 @@
-/*
- * Copyright (c) 2013 The WebM project authors. All Rights Reserved.
- *
- * Use of this source code is governed by a BSD-style license
- * that can be found in the LICENSE file in the root of the source
- * tree. An additional intellectual property rights grant can be found
- * in the file PATENTS. All contributing project authors may
- * be found in the AUTHORS file in the root of the source tree.
- */
-
-#ifndef VP9_COMMON_MIPS_DSPR2_VP9_COMMON_DSPR2_H_
-#define VP9_COMMON_MIPS_DSPR2_VP9_COMMON_DSPR2_H_
-
-#include <assert.h>
-
-#include "./vpx_config.h"
-#include "vpx/vpx_integer.h"
-#include "vpx_dsp/mips/common_dspr2.h"
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#if HAVE_DSPR2
-
-extern uint8_t *vpx_ff_cropTbl;
-
-#define DCT_CONST_ROUND_SHIFT_TWICE_COSPI_16_64(input) ({ \
- \
- int32_t tmp, out; \
- int dct_cost_rounding = DCT_CONST_ROUNDING; \
- int in = input; \
- \
- __asm__ __volatile__ ( \
- /* out = dct_const_round_shift(input_dc * cospi_16_64); */ \
- "mtlo %[dct_cost_rounding], $ac1 \n\t"\
- "mthi $zero, $ac1 \n\t"\
- "madd $ac1, %[in], %[cospi_16_64] \n\t"\
- "extp %[tmp], $ac1, 31 \n\t"\
- \
- /* out = dct_const_round_shift(out * cospi_16_64); */ \
- "mtlo %[dct_cost_rounding], $ac2 \n\t"\
- "mthi $zero, $ac2 \n\t"\
- "madd $ac2, %[tmp], %[cospi_16_64] \n\t"\
- "extp %[out], $ac2, 31 \n\t"\
- \
- : [tmp] "=&r" (tmp), [out] "=r" (out) \
- : [in] "r" (in), \
- [dct_cost_rounding] "r" (dct_cost_rounding), \
- [cospi_16_64] "r" (cospi_16_64) \
- ); \
- out; })
-
-void vp9_idct32_cols_add_blk_dspr2(int16_t *input, uint8_t *dest,
- int dest_stride);
-
-#endif // #if HAVE_DSPR2
-#ifdef __cplusplus
-} // extern "C"
-#endif
-
-#endif // VP9_COMMON_MIPS_DSPR2_VP9_COMMON_DSPR2_H_
diff --git a/vp9/common/mips/dspr2/vp9_itrans16_dspr2.c b/vp9/common/mips/dspr2/vp9_itrans16_dspr2.c
index aca6550c8..6ca83a00c 100644
--- a/vp9/common/mips/dspr2/vp9_itrans16_dspr2.c
+++ b/vp9/common/mips/dspr2/vp9_itrans16_dspr2.c
@@ -16,1074 +16,11 @@
#include "vp9/common/vp9_common.h"
#include "vp9/common/vp9_blockd.h"
#include "vp9/common/vp9_idct.h"
-#include "vp9/common/mips/dspr2/vp9_common_dspr2.h"
+#include "vpx_dsp/mips/inv_txfm_dspr2.h"
#include "vpx_dsp/txfm_common.h"
#include "vpx_ports/mem.h"
#if HAVE_DSPR2
-static void idct16_rows_dspr2(const int16_t *input, int16_t *output,
- uint32_t no_rows) {
- int i;
- int step1_0, step1_1, step1_2, step1_3, step1_4, step1_5, step1_6, step1_7;
- int step1_10, step1_11, step1_12, step1_13;
- int step2_0, step2_1, step2_2, step2_3;
- int step2_8, step2_9, step2_10, step2_11;
- int step2_12, step2_13, step2_14, step2_15;
- int load1, load2, load3, load4, load5, load6, load7, load8;
- int result1, result2, result3, result4;
- const int const_2_power_13 = 8192;
-
- for (i = no_rows; i--; ) {
- /* prefetch row */
- prefetch_load((const uint8_t *)(input + 16));
-
- __asm__ __volatile__ (
- "lh %[load1], 0(%[input]) \n\t"
- "lh %[load2], 16(%[input]) \n\t"
- "lh %[load3], 8(%[input]) \n\t"
- "lh %[load4], 24(%[input]) \n\t"
-
- "mtlo %[const_2_power_13], $ac1 \n\t"
- "mthi $zero, $ac1 \n\t"
- "mtlo %[const_2_power_13], $ac2 \n\t"
- "mthi $zero, $ac2 \n\t"
- "add %[result1], %[load1], %[load2] \n\t"
- "sub %[result2], %[load1], %[load2] \n\t"
- "madd $ac1, %[result1], %[cospi_16_64] \n\t"
- "madd $ac2, %[result2], %[cospi_16_64] \n\t"
- "extp %[step2_0], $ac1, 31 \n\t"
- "extp %[step2_1], $ac2, 31 \n\t"
-
- "mtlo %[const_2_power_13], $ac3 \n\t"
- "mthi $zero, $ac3 \n\t"
- "madd $ac3, %[load3], %[cospi_24_64] \n\t"
- "msub $ac3, %[load4], %[cospi_8_64] \n\t"
- "extp %[step2_2], $ac3, 31 \n\t"
-
- "mtlo %[const_2_power_13], $ac1 \n\t"
- "mthi $zero, $ac1 \n\t"
- "madd $ac1, %[load3], %[cospi_8_64] \n\t"
- "madd $ac1, %[load4], %[cospi_24_64] \n\t"
- "extp %[step2_3], $ac1, 31 \n\t"
-
- "add %[step1_0], %[step2_0], %[step2_3] \n\t"
- "add %[step1_1], %[step2_1], %[step2_2] \n\t"
- "sub %[step1_2], %[step2_1], %[step2_2] \n\t"
- "sub %[step1_3], %[step2_0], %[step2_3] \n\t"
-
- : [load1] "=&r" (load1), [load2] "=&r" (load2),
- [load3] "=&r" (load3), [load4] "=&r" (load4),
- [result1] "=&r" (result1), [result2] "=&r" (result2),
- [step2_0] "=&r" (step2_0), [step2_1] "=&r" (step2_1),
- [step2_2] "=&r" (step2_2), [step2_3] "=&r" (step2_3),
- [step1_0] "=r" (step1_0), [step1_1] "=r" (step1_1),
- [step1_2] "=r" (step1_2), [step1_3] "=r" (step1_3)
- : [const_2_power_13] "r" (const_2_power_13), [input] "r" (input),
- [cospi_24_64] "r" (cospi_24_64), [cospi_8_64] "r" (cospi_8_64),
- [cospi_16_64] "r" (cospi_16_64)
- );
-
- __asm__ __volatile__ (
- "lh %[load5], 2(%[input]) \n\t"
- "lh %[load6], 30(%[input]) \n\t"
- "lh %[load7], 18(%[input]) \n\t"
- "lh %[load8], 14(%[input]) \n\t"
-
- "mtlo %[const_2_power_13], $ac1 \n\t"
- "mthi $zero, $ac1 \n\t"
- "mtlo %[const_2_power_13], $ac3 \n\t"
- "mthi $zero, $ac3 \n\t"
-
- "madd $ac1, %[load5], %[cospi_30_64] \n\t"
- "msub $ac1, %[load6], %[cospi_2_64] \n\t"
- "extp %[result1], $ac1, 31 \n\t"
-
- "madd $ac3, %[load7], %[cospi_14_64] \n\t"
- "msub $ac3, %[load8], %[cospi_18_64] \n\t"
- "extp %[result2], $ac3, 31 \n\t"
-
- "mtlo %[const_2_power_13], $ac1 \n\t"
- "mthi $zero, $ac1 \n\t"
- "mtlo %[const_2_power_13], $ac2 \n\t"
- "mthi $zero, $ac2 \n\t"
-
- "madd $ac1, %[load7], %[cospi_18_64] \n\t"
- "madd $ac1, %[load8], %[cospi_14_64] \n\t"
- "extp %[result3], $ac1, 31 \n\t"
-
- "madd $ac2, %[load5], %[cospi_2_64] \n\t"
- "madd $ac2, %[load6], %[cospi_30_64] \n\t"
- "extp %[result4], $ac2, 31 \n\t"
-
- "sub %[load5], %[result1], %[result2] \n\t"
- "sub %[load6], %[result4], %[result3] \n\t"
-
- "mtlo %[const_2_power_13], $ac1 \n\t"
- "mthi $zero, $ac1 \n\t"
- "mtlo %[const_2_power_13], $ac3 \n\t"
- "mthi $zero, $ac3 \n\t"
-
- "madd $ac1, %[load6], %[cospi_24_64] \n\t"
- "msub $ac1, %[load5], %[cospi_8_64] \n\t"
- "madd $ac3, %[load5], %[cospi_24_64] \n\t"
- "madd $ac3, %[load6], %[cospi_8_64] \n\t"
-
- "extp %[step2_9], $ac1, 31 \n\t"
- "extp %[step2_14], $ac3, 31 \n\t"
- "add %[step2_8], %[result1], %[result2] \n\t"
- "add %[step2_15], %[result4], %[result3] \n\t"
-
- : [load5] "=&r" (load5), [load6] "=&r" (load6),
- [load7] "=&r" (load7), [load8] "=&r" (load8),
- [result1] "=&r" (result1), [result2] "=&r" (result2),
- [result3] "=&r" (result3), [result4] "=&r" (result4),
- [step2_8] "=r" (step2_8), [step2_15] "=r" (step2_15),
- [step2_9] "=r" (step2_9), [step2_14] "=r" (step2_14)
- : [const_2_power_13] "r" (const_2_power_13), [input] "r" (input),
- [cospi_30_64] "r" (cospi_30_64), [cospi_2_64] "r" (cospi_2_64),
- [cospi_14_64] "r" (cospi_14_64), [cospi_18_64] "r" (cospi_18_64),
- [cospi_24_64] "r" (cospi_24_64), [cospi_8_64] "r" (cospi_8_64)
- );
-
- __asm__ __volatile__ (
- "lh %[load1], 10(%[input]) \n\t"
- "lh %[load2], 22(%[input]) \n\t"
- "lh %[load3], 26(%[input]) \n\t"
- "lh %[load4], 6(%[input]) \n\t"
-
- "mtlo %[const_2_power_13], $ac1 \n\t"
- "mthi $zero, $ac1 \n\t"
- "mtlo %[const_2_power_13], $ac3 \n\t"
- "mthi $zero, $ac3 \n\t"
-
- "madd $ac1, %[load1], %[cospi_22_64] \n\t"
- "msub $ac1, %[load2], %[cospi_10_64] \n\t"
- "extp %[result1], $ac1, 31 \n\t"
-
- "madd $ac3, %[load3], %[cospi_6_64] \n\t"
- "msub $ac3, %[load4], %[cospi_26_64] \n\t"
- "extp %[result2], $ac3, 31 \n\t"
-
- "mtlo %[const_2_power_13], $ac1 \n\t"
- "mthi $zero, $ac1 \n\t"
- "mtlo %[const_2_power_13], $ac2 \n\t"
- "mthi $zero, $ac2 \n\t"
-
- "madd $ac1, %[load1], %[cospi_10_64] \n\t"
- "madd $ac1, %[load2], %[cospi_22_64] \n\t"
- "extp %[result3], $ac1, 31 \n\t"
-
- "madd $ac2, %[load3], %[cospi_26_64] \n\t"
- "madd $ac2, %[load4], %[cospi_6_64] \n\t"
- "extp %[result4], $ac2, 31 \n\t"
-
- "mtlo %[const_2_power_13], $ac1 \n\t"
- "mthi $zero, $ac1 \n\t"
- "mtlo %[const_2_power_13], $ac3 \n\t"
- "mthi $zero, $ac3 \n\t"
-
- "sub %[load1], %[result2], %[result1] \n\t"
- "sub %[load2], %[result4], %[result3] \n\t"
-
- "msub $ac1, %[load1], %[cospi_24_64] \n\t"
- "msub $ac1, %[load2], %[cospi_8_64] \n\t"
- "madd $ac3, %[load2], %[cospi_24_64] \n\t"
- "msub $ac3, %[load1], %[cospi_8_64] \n\t"
-
- "extp %[step2_10], $ac1, 31 \n\t"
- "extp %[step2_13], $ac3, 31 \n\t"
- "add %[step2_11], %[result1], %[result2] \n\t"
- "add %[step2_12], %[result4], %[result3] \n\t"
-
- : [load1] "=&r" (load1), [load2] "=&r" (load2),
- [load3] "=&r" (load3), [load4] "=&r" (load4),
- [result1] "=&r" (result1), [result2] "=&r" (result2),
- [result3] "=&r" (result3), [result4] "=&r" (result4),
- [step2_10] "=r" (step2_10), [step2_11] "=r" (step2_11),
- [step2_12] "=r" (step2_12), [step2_13] "=r" (step2_13)
- : [const_2_power_13] "r" (const_2_power_13), [input] "r" (input),
- [cospi_22_64] "r" (cospi_22_64), [cospi_10_64] "r" (cospi_10_64),
- [cospi_6_64] "r" (cospi_6_64), [cospi_26_64] "r" (cospi_26_64),
- [cospi_24_64] "r" (cospi_24_64), [cospi_8_64] "r" (cospi_8_64)
- );
-
- __asm__ __volatile__ (
- "lh %[load5], 4(%[input]) \n\t"
- "lh %[load6], 28(%[input]) \n\t"
- "lh %[load7], 20(%[input]) \n\t"
- "lh %[load8], 12(%[input]) \n\t"
-
- "mtlo %[const_2_power_13], $ac1 \n\t"
- "mthi $zero, $ac1 \n\t"
- "mtlo %[const_2_power_13], $ac3 \n\t"
- "mthi $zero, $ac3 \n\t"
-
- "madd $ac1, %[load5], %[cospi_28_64] \n\t"
- "msub $ac1, %[load6], %[cospi_4_64] \n\t"
- "extp %[result1], $ac1, 31 \n\t"
-
- "madd $ac3, %[load7], %[cospi_12_64] \n\t"
- "msub $ac3, %[load8], %[cospi_20_64] \n\t"
- "extp %[result2], $ac3, 31 \n\t"
-
- "mtlo %[const_2_power_13], $ac1 \n\t"
- "mthi $zero, $ac1 \n\t"
- "mtlo %[const_2_power_13], $ac2 \n\t"
- "mthi $zero, $ac2 \n\t"
-
- "madd $ac1, %[load7], %[cospi_20_64] \n\t"
- "madd $ac1, %[load8], %[cospi_12_64] \n\t"
- "extp %[result3], $ac1, 31 \n\t"
-
- "madd $ac2, %[load5], %[cospi_4_64] \n\t"
- "madd $ac2, %[load6], %[cospi_28_64] \n\t"
- "extp %[result4], $ac2, 31 \n\t"
-
- "mtlo %[const_2_power_13], $ac1 \n\t"
- "mthi $zero, $ac1 \n\t"
- "mtlo %[const_2_power_13], $ac3 \n\t"
- "mthi $zero, $ac3 \n\t"
-
- "sub %[load5], %[result4], %[result3] \n\t"
- "sub %[load5], %[load5], %[result1] \n\t"
- "add %[load5], %[load5], %[result2] \n\t"
-
- "sub %[load6], %[result1], %[result2] \n\t"
- "sub %[load6], %[load6], %[result3] \n\t"
- "add %[load6], %[load6], %[result4] \n\t"
-
- "madd $ac1, %[load5], %[cospi_16_64] \n\t"
- "madd $ac3, %[load6], %[cospi_16_64] \n\t"
-
- "extp %[step1_5], $ac1, 31 \n\t"
- "extp %[step1_6], $ac3, 31 \n\t"
- "add %[step1_4], %[result1], %[result2] \n\t"
- "add %[step1_7], %[result4], %[result3] \n\t"
-
- : [load5] "=&r" (load5), [load6] "=&r" (load6),
- [load7] "=&r" (load7), [load8] "=&r" (load8),
- [result1] "=&r" (result1), [result2] "=&r" (result2),
- [result3] "=&r" (result3), [result4] "=&r" (result4),
- [step1_4] "=r" (step1_4), [step1_5] "=r" (step1_5),
- [step1_6] "=r" (step1_6), [step1_7] "=r" (step1_7)
- : [const_2_power_13] "r" (const_2_power_13), [input] "r" (input),
- [cospi_20_64] "r" (cospi_20_64), [cospi_12_64] "r" (cospi_12_64),
- [cospi_4_64] "r" (cospi_4_64), [cospi_28_64] "r" (cospi_28_64),
- [cospi_16_64] "r" (cospi_16_64)
- );
-
- __asm__ __volatile__ (
- "mtlo %[const_2_power_13], $ac0 \n\t"
- "mthi $zero, $ac0 \n\t"
- "mtlo %[const_2_power_13], $ac1 \n\t"
- "mthi $zero, $ac1 \n\t"
-
- "sub %[load5], %[step2_14], %[step2_13] \n\t"
- "sub %[load5], %[load5], %[step2_9] \n\t"
- "add %[load5], %[load5], %[step2_10] \n\t"
-
- "madd $ac0, %[load5], %[cospi_16_64] \n\t"
-
- "sub %[load6], %[step2_14], %[step2_13] \n\t"
- "sub %[load6], %[load6], %[step2_10] \n\t"
- "add %[load6], %[load6], %[step2_9] \n\t"
-
- "madd $ac1, %[load6], %[cospi_16_64] \n\t"
-
- "mtlo %[const_2_power_13], $ac2 \n\t"
- "mthi $zero, $ac2 \n\t"
- "mtlo %[const_2_power_13], $ac3 \n\t"
- "mthi $zero, $ac3 \n\t"
-
- "sub %[load5], %[step2_15], %[step2_12] \n\t"
- "sub %[load5], %[load5], %[step2_8] \n\t"
- "add %[load5], %[load5], %[step2_11] \n\t"
-
- "madd $ac2, %[load5], %[cospi_16_64] \n\t"
-
- "sub %[load6], %[step2_15], %[step2_12] \n\t"
- "sub %[load6], %[load6], %[step2_11] \n\t"
- "add %[load6], %[load6], %[step2_8] \n\t"
-
- "madd $ac3, %[load6], %[cospi_16_64] \n\t"
-
- "extp %[step1_10], $ac0, 31 \n\t"
- "extp %[step1_13], $ac1, 31 \n\t"
- "extp %[step1_11], $ac2, 31 \n\t"
- "extp %[step1_12], $ac3, 31 \n\t"
-
- : [load5] "=&r" (load5), [load6] "=&r" (load6),
- [step1_10] "=r" (step1_10), [step1_11] "=r" (step1_11),
- [step1_12] "=r" (step1_12), [step1_13] "=r" (step1_13)
- : [const_2_power_13] "r" (const_2_power_13),
- [step2_14] "r" (step2_14), [step2_13] "r" (step2_13),
- [step2_9] "r" (step2_9), [step2_10] "r" (step2_10),
- [step2_15] "r" (step2_15), [step2_12] "r" (step2_12),
- [step2_8] "r" (step2_8), [step2_11] "r" (step2_11),
- [cospi_16_64] "r" (cospi_16_64)
- );
-
- __asm__ __volatile__ (
- "add %[load5], %[step1_0], %[step1_7] \n\t"
- "add %[load5], %[load5], %[step2_12] \n\t"
- "add %[load5], %[load5], %[step2_15] \n\t"
- "add %[load6], %[step1_1], %[step1_6] \n\t"
- "add %[load6], %[load6], %[step2_13] \n\t"
- "add %[load6], %[load6], %[step2_14] \n\t"
- "sh %[load5], 0(%[output]) \n\t"
- "sh %[load6], 32(%[output]) \n\t"
- "sub %[load5], %[step1_1], %[step1_6] \n\t"
- "add %[load5], %[load5], %[step2_9] \n\t"
- "add %[load5], %[load5], %[step2_10] \n\t"
- "sub %[load6], %[step1_0], %[step1_7] \n\t"
- "add %[load6], %[load6], %[step2_8] \n\t"
- "add %[load6], %[load6], %[step2_11] \n\t"
- "sh %[load5], 192(%[output]) \n\t"
- "sh %[load6], 224(%[output]) \n\t"
- "sub %[load5], %[step1_0], %[step1_7] \n\t"
- "sub %[load5], %[load5], %[step2_8] \n\t"
- "sub %[load5], %[load5], %[step2_11] \n\t"
- "sub %[load6], %[step1_1], %[step1_6] \n\t"
- "sub %[load6], %[load6], %[step2_9] \n\t"
- "sub %[load6], %[load6], %[step2_10] \n\t"
- "sh %[load5], 256(%[output]) \n\t"
- "sh %[load6], 288(%[output]) \n\t"
- "add %[load5], %[step1_1], %[step1_6] \n\t"
- "sub %[load5], %[load5], %[step2_13] \n\t"
- "sub %[load5], %[load5], %[step2_14] \n\t"
- "add %[load6], %[step1_0], %[step1_7] \n\t"
- "sub %[load6], %[load6], %[step2_12] \n\t"
- "sub %[load6], %[load6], %[step2_15] \n\t"
- "sh %[load5], 448(%[output]) \n\t"
- "sh %[load6], 480(%[output]) \n\t"
-
- : [load5] "=&r" (load5), [load6] "=&r" (load6)
- : [output] "r" (output),
- [step1_0] "r" (step1_0), [step1_1] "r" (step1_1),
- [step1_6] "r" (step1_6), [step1_7] "r" (step1_7),
- [step2_8] "r" (step2_8), [step2_9] "r" (step2_9),
- [step2_10] "r" (step2_10), [step2_11] "r" (step2_11),
- [step2_12] "r" (step2_12), [step2_13] "r" (step2_13),
- [step2_14] "r" (step2_14), [step2_15] "r" (step2_15)
- );
-
- __asm__ __volatile__ (
- "add %[load5], %[step1_2], %[step1_5] \n\t"
- "add %[load5], %[load5], %[step1_13] \n\t"
- "add %[load6], %[step1_3], %[step1_4] \n\t"
- "add %[load6], %[load6], %[step1_12] \n\t"
- "sh %[load5], 64(%[output]) \n\t"
- "sh %[load6], 96(%[output]) \n\t"
- "sub %[load5], %[step1_3], %[step1_4] \n\t"
- "add %[load5], %[load5], %[step1_11] \n\t"
- "sub %[load6], %[step1_2], %[step1_5] \n\t"
- "add %[load6], %[load6], %[step1_10] \n\t"
- "sh %[load5], 128(%[output]) \n\t"
- "sh %[load6], 160(%[output]) \n\t"
- "sub %[load5], %[step1_2], %[step1_5] \n\t"
- "sub %[load5], %[load5], %[step1_10] \n\t"
- "sub %[load6], %[step1_3], %[step1_4] \n\t"
- "sub %[load6], %[load6], %[step1_11] \n\t"
- "sh %[load5], 320(%[output]) \n\t"
- "sh %[load6], 352(%[output]) \n\t"
- "add %[load5], %[step1_3], %[step1_4] \n\t"
- "sub %[load5], %[load5], %[step1_12] \n\t"
- "add %[load6], %[step1_2], %[step1_5] \n\t"
- "sub %[load6], %[load6], %[step1_13] \n\t"
- "sh %[load5], 384(%[output]) \n\t"
- "sh %[load6], 416(%[output]) \n\t"
-
- : [load5] "=&r" (load5), [load6] "=&r" (load6)
- : [output] "r" (output),
- [step1_2] "r" (step1_2), [step1_3] "r" (step1_3),
- [step1_4] "r" (step1_4), [step1_5] "r" (step1_5),
- [step1_10] "r" (step1_10), [step1_11] "r" (step1_11),
- [step1_12] "r" (step1_12), [step1_13] "r" (step1_13)
- );
-
- input += 16;
- output += 1;
- }
-}
-
-static void idct16_cols_add_blk_dspr2(int16_t *input, uint8_t *dest,
- int dest_stride) {
- int i;
- int step1_0, step1_1, step1_2, step1_3, step1_4, step1_5, step1_6, step1_7;
- int step1_8, step1_9, step1_10, step1_11;
- int step1_12, step1_13, step1_14, step1_15;
- int step2_0, step2_1, step2_2, step2_3;
- int step2_8, step2_9, step2_10, step2_11;
- int step2_12, step2_13, step2_14, step2_15;
- int load1, load2, load3, load4, load5, load6, load7, load8;
- int result1, result2, result3, result4;
- const int const_2_power_13 = 8192;
- uint8_t *dest_pix;
- uint8_t *cm = vpx_ff_cropTbl;
-
- /* prefetch vpx_ff_cropTbl */
- prefetch_load(vpx_ff_cropTbl);
- prefetch_load(vpx_ff_cropTbl + 32);
- prefetch_load(vpx_ff_cropTbl + 64);
- prefetch_load(vpx_ff_cropTbl + 96);
- prefetch_load(vpx_ff_cropTbl + 128);
- prefetch_load(vpx_ff_cropTbl + 160);
- prefetch_load(vpx_ff_cropTbl + 192);
- prefetch_load(vpx_ff_cropTbl + 224);
-
- for (i = 0; i < 16; ++i) {
- dest_pix = (dest + i);
- __asm__ __volatile__ (
- "lh %[load1], 0(%[input]) \n\t"
- "lh %[load2], 16(%[input]) \n\t"
- "lh %[load3], 8(%[input]) \n\t"
- "lh %[load4], 24(%[input]) \n\t"
-
- "mtlo %[const_2_power_13], $ac1 \n\t"
- "mthi $zero, $ac1 \n\t"
- "mtlo %[const_2_power_13], $ac2 \n\t"
- "mthi $zero, $ac2 \n\t"
- "add %[result1], %[load1], %[load2] \n\t"
- "sub %[result2], %[load1], %[load2] \n\t"
- "madd $ac1, %[result1], %[cospi_16_64] \n\t"
- "madd $ac2, %[result2], %[cospi_16_64] \n\t"
- "extp %[step2_0], $ac1, 31 \n\t"
- "extp %[step2_1], $ac2, 31 \n\t"
-
- "mtlo %[const_2_power_13], $ac3 \n\t"
- "mthi $zero, $ac3 \n\t"
- "madd $ac3, %[load3], %[cospi_24_64] \n\t"
- "msub $ac3, %[load4], %[cospi_8_64] \n\t"
- "extp %[step2_2], $ac3, 31 \n\t"
-
- "mtlo %[const_2_power_13], $ac1 \n\t"
- "mthi $zero, $ac1 \n\t"
- "madd $ac1, %[load3], %[cospi_8_64] \n\t"
- "madd $ac1, %[load4], %[cospi_24_64] \n\t"
- "extp %[step2_3], $ac1, 31 \n\t"
-
- "add %[step1_0], %[step2_0], %[step2_3] \n\t"
- "add %[step1_1], %[step2_1], %[step2_2] \n\t"
- "sub %[step1_2], %[step2_1], %[step2_2] \n\t"
- "sub %[step1_3], %[step2_0], %[step2_3] \n\t"
-
- : [load1] "=&r" (load1), [load2] "=&r" (load2),
- [load3] "=&r" (load3), [load4] "=&r" (load4),
- [result1] "=&r" (result1), [result2] "=&r" (result2),
- [step2_0] "=&r" (step2_0), [step2_1] "=&r" (step2_1),
- [step2_2] "=&r" (step2_2), [step2_3] "=&r" (step2_3),
- [step1_0] "=r" (step1_0), [step1_1] "=r" (step1_1),
- [step1_2] "=r" (step1_2), [step1_3] "=r" (step1_3)
- : [const_2_power_13] "r" (const_2_power_13), [input] "r" (input),
- [cospi_24_64] "r" (cospi_24_64), [cospi_8_64] "r" (cospi_8_64),
- [cospi_16_64] "r" (cospi_16_64)
- );
-
- __asm__ __volatile__ (
- "lh %[load5], 2(%[input]) \n\t"
- "lh %[load6], 30(%[input]) \n\t"
- "lh %[load7], 18(%[input]) \n\t"
- "lh %[load8], 14(%[input]) \n\t"
-
- "mtlo %[const_2_power_13], $ac1 \n\t"
- "mthi $zero, $ac1 \n\t"
- "mtlo %[const_2_power_13], $ac3 \n\t"
- "mthi $zero, $ac3 \n\t"
-
- "madd $ac1, %[load5], %[cospi_30_64] \n\t"
- "msub $ac1, %[load6], %[cospi_2_64] \n\t"
- "extp %[result1], $ac1, 31 \n\t"
-
- "madd $ac3, %[load7], %[cospi_14_64] \n\t"
- "msub $ac3, %[load8], %[cospi_18_64] \n\t"
- "extp %[result2], $ac3, 31 \n\t"
-
- "mtlo %[const_2_power_13], $ac1 \n\t"
- "mthi $zero, $ac1 \n\t"
- "mtlo %[const_2_power_13], $ac2 \n\t"
- "mthi $zero, $ac2 \n\t"
-
- "madd $ac1, %[load7], %[cospi_18_64] \n\t"
- "madd $ac1, %[load8], %[cospi_14_64] \n\t"
- "extp %[result3], $ac1, 31 \n\t"
-
- "madd $ac2, %[load5], %[cospi_2_64] \n\t"
- "madd $ac2, %[load6], %[cospi_30_64] \n\t"
- "extp %[result4], $ac2, 31 \n\t"
-
- "sub %[load5], %[result1], %[result2] \n\t"
- "sub %[load6], %[result4], %[result3] \n\t"
-
- "mtlo %[const_2_power_13], $ac1 \n\t"
- "mthi $zero, $ac1 \n\t"
- "mtlo %[const_2_power_13], $ac3 \n\t"
- "mthi $zero, $ac3 \n\t"
-
- "madd $ac1, %[load6], %[cospi_24_64] \n\t"
- "msub $ac1, %[load5], %[cospi_8_64] \n\t"
- "madd $ac3, %[load5], %[cospi_24_64] \n\t"
- "madd $ac3, %[load6], %[cospi_8_64] \n\t"
-
- "extp %[step2_9], $ac1, 31 \n\t"
- "extp %[step2_14], $ac3, 31 \n\t"
- "add %[step2_8], %[result1], %[result2] \n\t"
- "add %[step2_15], %[result4], %[result3] \n\t"
-
- : [load5] "=&r" (load5), [load6] "=&r" (load6),
- [load7] "=&r" (load7), [load8] "=&r" (load8),
- [result1] "=&r" (result1), [result2] "=&r" (result2),
- [result3] "=&r" (result3), [result4] "=&r" (result4),
- [step2_8] "=r" (step2_8), [step2_15] "=r" (step2_15),
- [step2_9] "=r" (step2_9), [step2_14] "=r" (step2_14)
- : [const_2_power_13] "r" (const_2_power_13), [input] "r" (input),
- [cospi_30_64] "r" (cospi_30_64), [cospi_2_64] "r" (cospi_2_64),
- [cospi_14_64] "r" (cospi_14_64), [cospi_18_64] "r" (cospi_18_64),
- [cospi_24_64] "r" (cospi_24_64), [cospi_8_64] "r" (cospi_8_64)
- );
-
- __asm__ __volatile__ (
- "lh %[load1], 10(%[input]) \n\t"
- "lh %[load2], 22(%[input]) \n\t"
- "lh %[load3], 26(%[input]) \n\t"
- "lh %[load4], 6(%[input]) \n\t"
-
- "mtlo %[const_2_power_13], $ac1 \n\t"
- "mthi $zero, $ac1 \n\t"
- "mtlo %[const_2_power_13], $ac3 \n\t"
- "mthi $zero, $ac3 \n\t"
-
- "madd $ac1, %[load1], %[cospi_22_64] \n\t"
- "msub $ac1, %[load2], %[cospi_10_64] \n\t"
- "extp %[result1], $ac1, 31 \n\t"
-
- "madd $ac3, %[load3], %[cospi_6_64] \n\t"
- "msub $ac3, %[load4], %[cospi_26_64] \n\t"
- "extp %[result2], $ac3, 31 \n\t"
-
- "mtlo %[const_2_power_13], $ac1 \n\t"
- "mthi $zero, $ac1 \n\t"
- "mtlo %[const_2_power_13], $ac2 \n\t"
- "mthi $zero, $ac2 \n\t"
-
- "madd $ac1, %[load1], %[cospi_10_64] \n\t"
- "madd $ac1, %[load2], %[cospi_22_64] \n\t"
- "extp %[result3], $ac1, 31 \n\t"
-
- "madd $ac2, %[load3], %[cospi_26_64] \n\t"
- "madd $ac2, %[load4], %[cospi_6_64] \n\t"
- "extp %[result4], $ac2, 31 \n\t"
-
- "mtlo %[const_2_power_13], $ac1 \n\t"
- "mthi $zero, $ac1 \n\t"
- "mtlo %[const_2_power_13], $ac3 \n\t"
- "mthi $zero, $ac3 \n\t"
-
- "sub %[load1], %[result2], %[result1] \n\t"
- "sub %[load2], %[result4], %[result3] \n\t"
-
- "msub $ac1, %[load1], %[cospi_24_64] \n\t"
- "msub $ac1, %[load2], %[cospi_8_64] \n\t"
- "madd $ac3, %[load2], %[cospi_24_64] \n\t"
- "msub $ac3, %[load1], %[cospi_8_64] \n\t"
-
- "extp %[step2_10], $ac1, 31 \n\t"
- "extp %[step2_13], $ac3, 31 \n\t"
- "add %[step2_11], %[result1], %[result2] \n\t"
- "add %[step2_12], %[result4], %[result3] \n\t"
-
- : [load1] "=&r" (load1), [load2] "=&r" (load2),
- [load3] "=&r" (load3), [load4] "=&r" (load4),
- [result1] "=&r" (result1), [result2] "=&r" (result2),
- [result3] "=&r" (result3), [result4] "=&r" (result4),
- [step2_10] "=r" (step2_10), [step2_11] "=r" (step2_11),
- [step2_12] "=r" (step2_12), [step2_13] "=r" (step2_13)
- : [const_2_power_13] "r" (const_2_power_13), [input] "r" (input),
- [cospi_22_64] "r" (cospi_22_64), [cospi_10_64] "r" (cospi_10_64),
- [cospi_6_64] "r" (cospi_6_64), [cospi_26_64] "r" (cospi_26_64),
- [cospi_24_64] "r" (cospi_24_64), [cospi_8_64] "r" (cospi_8_64)
- );
-
- __asm__ __volatile__ (
- "lh %[load5], 4(%[input]) \n\t"
- "lh %[load6], 28(%[input]) \n\t"
- "lh %[load7], 20(%[input]) \n\t"
- "lh %[load8], 12(%[input]) \n\t"
-
- "mtlo %[const_2_power_13], $ac1 \n\t"
- "mthi $zero, $ac1 \n\t"
- "mtlo %[const_2_power_13], $ac3 \n\t"
- "mthi $zero, $ac3 \n\t"
-
- "madd $ac1, %[load5], %[cospi_28_64] \n\t"
- "msub $ac1, %[load6], %[cospi_4_64] \n\t"
- "extp %[result1], $ac1, 31 \n\t"
-
- "madd $ac3, %[load7], %[cospi_12_64] \n\t"
- "msub $ac3, %[load8], %[cospi_20_64] \n\t"
- "extp %[result2], $ac3, 31 \n\t"
-
- "mtlo %[const_2_power_13], $ac1 \n\t"
- "mthi $zero, $ac1 \n\t"
- "mtlo %[const_2_power_13], $ac2 \n\t"
- "mthi $zero, $ac2 \n\t"
-
- "madd $ac1, %[load7], %[cospi_20_64] \n\t"
- "madd $ac1, %[load8], %[cospi_12_64] \n\t"
- "extp %[result3], $ac1, 31 \n\t"
-
- "madd $ac2, %[load5], %[cospi_4_64] \n\t"
- "madd $ac2, %[load6], %[cospi_28_64] \n\t"
- "extp %[result4], $ac2, 31 \n\t"
-
- "mtlo %[const_2_power_13], $ac1 \n\t"
- "mthi $zero, $ac1 \n\t"
- "mtlo %[const_2_power_13], $ac3 \n\t"
- "mthi $zero, $ac3 \n\t"
-
- "sub %[load5], %[result4], %[result3] \n\t"
- "sub %[load5], %[load5], %[result1] \n\t"
- "add %[load5], %[load5], %[result2] \n\t"
-
- "sub %[load6], %[result1], %[result2] \n\t"
- "sub %[load6], %[load6], %[result3] \n\t"
- "add %[load6], %[load6], %[result4] \n\t"
-
- "madd $ac1, %[load5], %[cospi_16_64] \n\t"
- "madd $ac3, %[load6], %[cospi_16_64] \n\t"
-
- "extp %[step1_5], $ac1, 31 \n\t"
- "extp %[step1_6], $ac3, 31 \n\t"
-
- "add %[step1_4], %[result1], %[result2] \n\t"
- "add %[step1_7], %[result4], %[result3] \n\t"
-
- : [load5] "=&r" (load5), [load6] "=&r" (load6),
- [load7] "=&r" (load7), [load8] "=&r" (load8),
- [result1] "=&r" (result1), [result2] "=&r" (result2),
- [result3] "=&r" (result3), [result4] "=&r" (result4),
- [step1_4] "=r" (step1_4), [step1_5] "=r" (step1_5),
- [step1_6] "=r" (step1_6), [step1_7] "=r" (step1_7)
- : [const_2_power_13] "r" (const_2_power_13), [input] "r" (input),
- [cospi_20_64] "r" (cospi_20_64), [cospi_12_64] "r" (cospi_12_64),
- [cospi_4_64] "r" (cospi_4_64), [cospi_28_64] "r" (cospi_28_64),
- [cospi_16_64] "r" (cospi_16_64)
- );
-
- __asm__ __volatile__ (
- "mtlo %[const_2_power_13], $ac0 \n\t"
- "mthi $zero, $ac0 \n\t"
- "mtlo %[const_2_power_13], $ac1 \n\t"
- "mthi $zero, $ac1 \n\t"
-
- "sub %[load5], %[step2_14], %[step2_13] \n\t"
- "sub %[load5], %[load5], %[step2_9] \n\t"
- "add %[load5], %[load5], %[step2_10] \n\t"
-
- "madd $ac0, %[load5], %[cospi_16_64] \n\t"
-
- "sub %[load6], %[step2_14], %[step2_13] \n\t"
- "sub %[load6], %[load6], %[step2_10] \n\t"
- "add %[load6], %[load6], %[step2_9] \n\t"
-
- "madd $ac1, %[load6], %[cospi_16_64] \n\t"
-
- "mtlo %[const_2_power_13], $ac2 \n\t"
- "mthi $zero, $ac2 \n\t"
- "mtlo %[const_2_power_13], $ac3 \n\t"
- "mthi $zero, $ac3 \n\t"
-
- "sub %[load5], %[step2_15], %[step2_12] \n\t"
- "sub %[load5], %[load5], %[step2_8] \n\t"
- "add %[load5], %[load5], %[step2_11] \n\t"
-
- "madd $ac2, %[load5], %[cospi_16_64] \n\t"
-
- "sub %[load6], %[step2_15], %[step2_12] \n\t"
- "sub %[load6], %[load6], %[step2_11] \n\t"
- "add %[load6], %[load6], %[step2_8] \n\t"
-
- "madd $ac3, %[load6], %[cospi_16_64] \n\t"
-
- "extp %[step1_10], $ac0, 31 \n\t"
- "extp %[step1_13], $ac1, 31 \n\t"
- "extp %[step1_11], $ac2, 31 \n\t"
- "extp %[step1_12], $ac3, 31 \n\t"
-
- : [load5] "=&r" (load5), [load6] "=&r" (load6),
- [step1_10] "=r" (step1_10), [step1_11] "=r" (step1_11),
- [step1_12] "=r" (step1_12), [step1_13] "=r" (step1_13)
- : [const_2_power_13] "r" (const_2_power_13),
- [step2_14] "r" (step2_14), [step2_13] "r" (step2_13),
- [step2_9] "r" (step2_9), [step2_10] "r" (step2_10),
- [step2_15] "r" (step2_15), [step2_12] "r" (step2_12),
- [step2_8] "r" (step2_8), [step2_11] "r" (step2_11),
- [cospi_16_64] "r" (cospi_16_64)
- );
-
- step1_8 = step2_8 + step2_11;
- step1_9 = step2_9 + step2_10;
- step1_14 = step2_13 + step2_14;
- step1_15 = step2_12 + step2_15;
-
- __asm__ __volatile__ (
- "lbu %[load7], 0(%[dest_pix]) \n\t"
- "add %[load5], %[step1_0], %[step1_7] \n\t"
- "add %[load5], %[load5], %[step1_15] \n\t"
- "addi %[load5], %[load5], 32 \n\t"
- "sra %[load5], %[load5], 6 \n\t"
- "add %[load7], %[load7], %[load5] \n\t"
- "lbux %[load5], %[load7](%[cm]) \n\t"
- "add %[load6], %[step1_1], %[step1_6] \n\t"
- "add %[load6], %[load6], %[step1_14] \n\t"
- "sb %[load5], 0(%[dest_pix]) \n\t"
- "addu %[dest_pix], %[dest_pix], %[dest_stride] \n\t"
- "lbu %[load8], 0(%[dest_pix]) \n\t"
- "addi %[load6], %[load6], 32 \n\t"
- "sra %[load6], %[load6], 6 \n\t"
- "add %[load8], %[load8], %[load6] \n\t"
- "lbux %[load6], %[load8](%[cm]) \n\t"
- "sb %[load6], 0(%[dest_pix]) \n\t"
- "addu %[dest_pix], %[dest_pix], %[dest_stride] \n\t"
-
- "lbu %[load7], 0(%[dest_pix]) \n\t"
- "add %[load5], %[step1_2], %[step1_5] \n\t"
- "add %[load5], %[load5], %[step1_13] \n\t"
- "addi %[load5], %[load5], 32 \n\t"
- "sra %[load5], %[load5], 6 \n\t"
- "add %[load7], %[load7], %[load5] \n\t"
- "lbux %[load5], %[load7](%[cm]) \n\t"
- "add %[load6], %[step1_3], %[step1_4] \n\t"
- "add %[load6], %[load6], %[step1_12] \n\t"
- "sb %[load5], 0(%[dest_pix]) \n\t"
- "addu %[dest_pix], %[dest_pix], %[dest_stride] \n\t"
- "lbu %[load8], 0(%[dest_pix]) \n\t"
- "addi %[load6], %[load6], 32 \n\t"
- "sra %[load6], %[load6], 6 \n\t"
- "add %[load8], %[load8], %[load6] \n\t"
- "lbux %[load6], %[load8](%[cm]) \n\t"
- "sb %[load6], 0(%[dest_pix]) \n\t"
- "addu %[dest_pix], %[dest_pix], %[dest_stride] \n\t"
-
- "lbu %[load7], 0(%[dest_pix]) \n\t"
- "sub %[load5], %[step1_3], %[step1_4] \n\t"
- "add %[load5], %[load5], %[step1_11] \n\t"
- "addi %[load5], %[load5], 32 \n\t"
- "sra %[load5], %[load5], 6 \n\t"
- "add %[load7], %[load7], %[load5] \n\t"
- "lbux %[load5], %[load7](%[cm]) \n\t"
- "sub %[load6], %[step1_2], %[step1_5] \n\t"
- "add %[load6], %[load6], %[step1_10] \n\t"
- "sb %[load5], 0(%[dest_pix]) \n\t"
- "addu %[dest_pix], %[dest_pix], %[dest_stride] \n\t"
- "lbu %[load8], 0(%[dest_pix]) \n\t"
- "addi %[load6], %[load6], 32 \n\t"
- "sra %[load6], %[load6], 6 \n\t"
- "add %[load8], %[load8], %[load6] \n\t"
- "lbux %[load6], %[load8](%[cm]) \n\t"
- "sb %[load6], 0(%[dest_pix]) \n\t"
- "addu %[dest_pix], %[dest_pix], %[dest_stride] \n\t"
-
- "sub %[load5], %[step1_1], %[step1_6] \n\t"
- "lbu %[load7], 0(%[dest_pix]) \n\t"
- "add %[load5], %[load5], %[step1_9] \n\t"
- "addi %[load5], %[load5], 32 \n\t"
- "sra %[load5], %[load5], 6 \n\t"
- "add %[load7], %[load7], %[load5] \n\t"
- "lbux %[load5], %[load7](%[cm]) \n\t"
- "sub %[load6], %[step1_0], %[step1_7] \n\t"
- "add %[load6], %[load6], %[step1_8] \n\t"
- "sb %[load5], 0(%[dest_pix]) \n\t"
- "addu %[dest_pix], %[dest_pix], %[dest_stride] \n\t"
- "lbu %[load8], 0(%[dest_pix]) \n\t"
- "addi %[load6], %[load6], 32 \n\t"
- "sra %[load6], %[load6], 6 \n\t"
- "add %[load8], %[load8], %[load6] \n\t"
- "lbux %[load6], %[load8](%[cm]) \n\t"
- "sb %[load6], 0(%[dest_pix]) \n\t"
- "addu %[dest_pix], %[dest_pix], %[dest_stride] \n\t"
-
- "lbu %[load7], 0(%[dest_pix]) \n\t"
- "sub %[load5], %[step1_0], %[step1_7] \n\t"
- "sub %[load5], %[load5], %[step1_8] \n\t"
- "addi %[load5], %[load5], 32 \n\t"
- "sra %[load5], %[load5], 6 \n\t"
- "add %[load7], %[load7], %[load5] \n\t"
- "lbux %[load5], %[load7](%[cm]) \n\t"
- "sub %[load6], %[step1_1], %[step1_6] \n\t"
- "sub %[load6], %[load6], %[step1_9] \n\t"
- "sb %[load5], 0(%[dest_pix]) \n\t"
- "addu %[dest_pix], %[dest_pix], %[dest_stride] \n\t"
- "lbu %[load8], 0(%[dest_pix]) \n\t"
- "addi %[load6], %[load6], 32 \n\t"
- "sra %[load6], %[load6], 6 \n\t"
- "add %[load8], %[load8], %[load6] \n\t"
- "lbux %[load6], %[load8](%[cm]) \n\t"
- "sb %[load6], 0(%[dest_pix]) \n\t"
- "addu %[dest_pix], %[dest_pix], %[dest_stride] \n\t"
-
- "lbu %[load7], 0(%[dest_pix]) \n\t"
- "sub %[load5], %[step1_2], %[step1_5] \n\t"
- "sub %[load5], %[load5], %[step1_10] \n\t"
- "addi %[load5], %[load5], 32 \n\t"
- "sra %[load5], %[load5], 6 \n\t"
- "add %[load7], %[load7], %[load5] \n\t"
- "lbux %[load5], %[load7](%[cm]) \n\t"
- "sub %[load6], %[step1_3], %[step1_4] \n\t"
- "sub %[load6], %[load6], %[step1_11] \n\t"
- "sb %[load5], 0(%[dest_pix]) \n\t"
- "addu %[dest_pix], %[dest_pix], %[dest_stride] \n\t"
- "lbu %[load8], 0(%[dest_pix]) \n\t"
- "addi %[load6], %[load6], 32 \n\t"
- "sra %[load6], %[load6], 6 \n\t"
- "add %[load8], %[load8], %[load6] \n\t"
- "lbux %[load6], %[load8](%[cm]) \n\t"
- "sb %[load6], 0(%[dest_pix]) \n\t"
- "addu %[dest_pix], %[dest_pix], %[dest_stride] \n\t"
-
- "lbu %[load7], 0(%[dest_pix]) \n\t"
- "add %[load5], %[step1_3], %[step1_4] \n\t"
- "sub %[load5], %[load5], %[step1_12] \n\t"
- "addi %[load5], %[load5], 32 \n\t"
- "sra %[load5], %[load5], 6 \n\t"
- "add %[load7], %[load7], %[load5] \n\t"
- "lbux %[load5], %[load7](%[cm]) \n\t"
- "add %[load6], %[step1_2], %[step1_5] \n\t"
- "sub %[load6], %[load6], %[step1_13] \n\t"
- "sb %[load5], 0(%[dest_pix]) \n\t"
- "addu %[dest_pix], %[dest_pix], %[dest_stride] \n\t"
- "lbu %[load8], 0(%[dest_pix]) \n\t"
- "addi %[load6], %[load6], 32 \n\t"
- "sra %[load6], %[load6], 6 \n\t"
- "add %[load8], %[load8], %[load6] \n\t"
- "lbux %[load6], %[load8](%[cm]) \n\t"
- "sb %[load6], 0(%[dest_pix]) \n\t"
- "addu %[dest_pix], %[dest_pix], %[dest_stride] \n\t"
-
- "lbu %[load7], 0(%[dest_pix]) \n\t"
- "add %[load5], %[step1_1], %[step1_6] \n\t"
- "sub %[load5], %[load5], %[step1_14] \n\t"
- "addi %[load5], %[load5], 32 \n\t"
- "sra %[load5], %[load5], 6 \n\t"
- "add %[load7], %[load7], %[load5] \n\t"
- "lbux %[load5], %[load7](%[cm]) \n\t"
- "add %[load6], %[step1_0], %[step1_7] \n\t"
- "sub %[load6], %[load6], %[step1_15] \n\t"
- "sb %[load5], 0(%[dest_pix]) \n\t"
- "addu %[dest_pix], %[dest_pix], %[dest_stride] \n\t"
- "lbu %[load8], 0(%[dest_pix]) \n\t"
- "addi %[load6], %[load6], 32 \n\t"
- "sra %[load6], %[load6], 6 \n\t"
- "add %[load8], %[load8], %[load6] \n\t"
- "lbux %[load6], %[load8](%[cm]) \n\t"
- "sb %[load6], 0(%[dest_pix]) \n\t"
-
- : [load5] "=&r" (load5), [load6] "=&r" (load6), [load7] "=&r" (load7),
- [load8] "=&r" (load8), [dest_pix] "+r" (dest_pix)
- : [cm] "r" (cm), [dest_stride] "r" (dest_stride),
- [step1_0] "r" (step1_0), [step1_1] "r" (step1_1),
- [step1_2] "r" (step1_2), [step1_3] "r" (step1_3),
- [step1_4] "r" (step1_4), [step1_5] "r" (step1_5),
- [step1_6] "r" (step1_6), [step1_7] "r" (step1_7),
- [step1_8] "r" (step1_8), [step1_9] "r" (step1_9),
- [step1_10] "r" (step1_10), [step1_11] "r" (step1_11),
- [step1_12] "r" (step1_12), [step1_13] "r" (step1_13),
- [step1_14] "r" (step1_14), [step1_15] "r" (step1_15)
- );
-
- input += 16;
- }
-}
-
-void vp9_idct16x16_256_add_dspr2(const int16_t *input, uint8_t *dest,
- int dest_stride) {
- DECLARE_ALIGNED(32, int16_t, out[16 * 16]);
- uint32_t pos = 45;
-
- /* bit positon for extract from acc */
- __asm__ __volatile__ (
- "wrdsp %[pos], 1 \n\t"
- :
- : [pos] "r" (pos)
- );
-
- // First transform rows
- idct16_rows_dspr2(input, out, 16);
-
- // Then transform columns and add to dest
- idct16_cols_add_blk_dspr2(out, dest, dest_stride);
-}
-
-static void iadst16_dspr2(const int16_t *input, int16_t *output) {
- int s0, s1, s2, s3, s4, s5, s6, s7, s8, s9, s10, s11, s12, s13, s14, s15;
-
- int x0 = input[15];
- int x1 = input[0];
- int x2 = input[13];
- int x3 = input[2];
- int x4 = input[11];
- int x5 = input[4];
- int x6 = input[9];
- int x7 = input[6];
- int x8 = input[7];
- int x9 = input[8];
- int x10 = input[5];
- int x11 = input[10];
- int x12 = input[3];
- int x13 = input[12];
- int x14 = input[1];
- int x15 = input[14];
-
- if (!(x0 | x1 | x2 | x3 | x4 | x5 | x6 | x7 | x8
- | x9 | x10 | x11 | x12 | x13 | x14 | x15)) {
- output[0] = output[1] = output[2] = output[3] = output[4]
- = output[5] = output[6] = output[7] = output[8]
- = output[9] = output[10] = output[11] = output[12]
- = output[13] = output[14] = output[15] = 0;
- return;
- }
-
- // stage 1
- s0 = x0 * cospi_1_64 + x1 * cospi_31_64;
- s1 = x0 * cospi_31_64 - x1 * cospi_1_64;
- s2 = x2 * cospi_5_64 + x3 * cospi_27_64;
- s3 = x2 * cospi_27_64 - x3 * cospi_5_64;
- s4 = x4 * cospi_9_64 + x5 * cospi_23_64;
- s5 = x4 * cospi_23_64 - x5 * cospi_9_64;
- s6 = x6 * cospi_13_64 + x7 * cospi_19_64;
- s7 = x6 * cospi_19_64 - x7 * cospi_13_64;
- s8 = x8 * cospi_17_64 + x9 * cospi_15_64;
- s9 = x8 * cospi_15_64 - x9 * cospi_17_64;
- s10 = x10 * cospi_21_64 + x11 * cospi_11_64;
- s11 = x10 * cospi_11_64 - x11 * cospi_21_64;
- s12 = x12 * cospi_25_64 + x13 * cospi_7_64;
- s13 = x12 * cospi_7_64 - x13 * cospi_25_64;
- s14 = x14 * cospi_29_64 + x15 * cospi_3_64;
- s15 = x14 * cospi_3_64 - x15 * cospi_29_64;
-
- x0 = dct_const_round_shift(s0 + s8);
- x1 = dct_const_round_shift(s1 + s9);
- x2 = dct_const_round_shift(s2 + s10);
- x3 = dct_const_round_shift(s3 + s11);
- x4 = dct_const_round_shift(s4 + s12);
- x5 = dct_const_round_shift(s5 + s13);
- x6 = dct_const_round_shift(s6 + s14);
- x7 = dct_const_round_shift(s7 + s15);
- x8 = dct_const_round_shift(s0 - s8);
- x9 = dct_const_round_shift(s1 - s9);
- x10 = dct_const_round_shift(s2 - s10);
- x11 = dct_const_round_shift(s3 - s11);
- x12 = dct_const_round_shift(s4 - s12);
- x13 = dct_const_round_shift(s5 - s13);
- x14 = dct_const_round_shift(s6 - s14);
- x15 = dct_const_round_shift(s7 - s15);
-
- // stage 2
- s0 = x0;
- s1 = x1;
- s2 = x2;
- s3 = x3;
- s4 = x4;
- s5 = x5;
- s6 = x6;
- s7 = x7;
- s8 = x8 * cospi_4_64 + x9 * cospi_28_64;
- s9 = x8 * cospi_28_64 - x9 * cospi_4_64;
- s10 = x10 * cospi_20_64 + x11 * cospi_12_64;
- s11 = x10 * cospi_12_64 - x11 * cospi_20_64;
- s12 = - x12 * cospi_28_64 + x13 * cospi_4_64;
- s13 = x12 * cospi_4_64 + x13 * cospi_28_64;
- s14 = - x14 * cospi_12_64 + x15 * cospi_20_64;
- s15 = x14 * cospi_20_64 + x15 * cospi_12_64;
-
- x0 = s0 + s4;
- x1 = s1 + s5;
- x2 = s2 + s6;
- x3 = s3 + s7;
- x4 = s0 - s4;
- x5 = s1 - s5;
- x6 = s2 - s6;
- x7 = s3 - s7;
- x8 = dct_const_round_shift(s8 + s12);
- x9 = dct_const_round_shift(s9 + s13);
- x10 = dct_const_round_shift(s10 + s14);
- x11 = dct_const_round_shift(s11 + s15);
- x12 = dct_const_round_shift(s8 - s12);
- x13 = dct_const_round_shift(s9 - s13);
- x14 = dct_const_round_shift(s10 - s14);
- x15 = dct_const_round_shift(s11 - s15);
-
- // stage 3
- s0 = x0;
- s1 = x1;
- s2 = x2;
- s3 = x3;
- s4 = x4 * cospi_8_64 + x5 * cospi_24_64;
- s5 = x4 * cospi_24_64 - x5 * cospi_8_64;
- s6 = - x6 * cospi_24_64 + x7 * cospi_8_64;
- s7 = x6 * cospi_8_64 + x7 * cospi_24_64;
- s8 = x8;
- s9 = x9;
- s10 = x10;
- s11 = x11;
- s12 = x12 * cospi_8_64 + x13 * cospi_24_64;
- s13 = x12 * cospi_24_64 - x13 * cospi_8_64;
- s14 = - x14 * cospi_24_64 + x15 * cospi_8_64;
- s15 = x14 * cospi_8_64 + x15 * cospi_24_64;
-
- x0 = s0 + s2;
- x1 = s1 + s3;
- x2 = s0 - s2;
- x3 = s1 - s3;
- x4 = dct_const_round_shift(s4 + s6);
- x5 = dct_const_round_shift(s5 + s7);
- x6 = dct_const_round_shift(s4 - s6);
- x7 = dct_const_round_shift(s5 - s7);
- x8 = s8 + s10;
- x9 = s9 + s11;
- x10 = s8 - s10;
- x11 = s9 - s11;
- x12 = dct_const_round_shift(s12 + s14);
- x13 = dct_const_round_shift(s13 + s15);
- x14 = dct_const_round_shift(s12 - s14);
- x15 = dct_const_round_shift(s13 - s15);
-
- // stage 4
- s2 = (- cospi_16_64) * (x2 + x3);
- s3 = cospi_16_64 * (x2 - x3);
- s6 = cospi_16_64 * (x6 + x7);
- s7 = cospi_16_64 * (- x6 + x7);
- s10 = cospi_16_64 * (x10 + x11);
- s11 = cospi_16_64 * (- x10 + x11);
- s14 = (- cospi_16_64) * (x14 + x15);
- s15 = cospi_16_64 * (x14 - x15);
-
- x2 = dct_const_round_shift(s2);
- x3 = dct_const_round_shift(s3);
- x6 = dct_const_round_shift(s6);
- x7 = dct_const_round_shift(s7);
- x10 = dct_const_round_shift(s10);
- x11 = dct_const_round_shift(s11);
- x14 = dct_const_round_shift(s14);
- x15 = dct_const_round_shift(s15);
-
- output[0] = x0;
- output[1] = -x8;
- output[2] = x12;
- output[3] = -x4;
- output[4] = x6;
- output[5] = x14;
- output[6] = x10;
- output[7] = x2;
- output[8] = x3;
- output[9] = x11;
- output[10] = x15;
- output[11] = x7;
- output[12] = x5;
- output[13] = -x13;
- output[14] = x9;
- output[15] = -x1;
-}
-
void vp9_iht16x16_256_add_dspr2(const int16_t *input, uint8_t *dest,
int pitch, int tx_type) {
int i, j;
@@ -1168,150 +105,4 @@ void vp9_iht16x16_256_add_dspr2(const int16_t *input, uint8_t *dest,
break;
}
}
-
-void vp9_idct16x16_10_add_dspr2(const int16_t *input, uint8_t *dest,
- int dest_stride) {
- DECLARE_ALIGNED(32, int16_t, out[16 * 16]);
- int16_t *outptr = out;
- uint32_t i;
- uint32_t pos = 45;
-
- /* bit positon for extract from acc */
- __asm__ __volatile__ (
- "wrdsp %[pos], 1 \n\t"
- :
- : [pos] "r" (pos)
- );
-
- // First transform rows. Since all non-zero dct coefficients are in
- // upper-left 4x4 area, we only need to calculate first 4 rows here.
- idct16_rows_dspr2(input, outptr, 4);
-
- outptr += 4;
- for (i = 0; i < 6; ++i) {
- __asm__ __volatile__ (
- "sw $zero, 0(%[outptr]) \n\t"
- "sw $zero, 32(%[outptr]) \n\t"
- "sw $zero, 64(%[outptr]) \n\t"
- "sw $zero, 96(%[outptr]) \n\t"
- "sw $zero, 128(%[outptr]) \n\t"
- "sw $zero, 160(%[outptr]) \n\t"
- "sw $zero, 192(%[outptr]) \n\t"
- "sw $zero, 224(%[outptr]) \n\t"
- "sw $zero, 256(%[outptr]) \n\t"
- "sw $zero, 288(%[outptr]) \n\t"
- "sw $zero, 320(%[outptr]) \n\t"
- "sw $zero, 352(%[outptr]) \n\t"
- "sw $zero, 384(%[outptr]) \n\t"
- "sw $zero, 416(%[outptr]) \n\t"
- "sw $zero, 448(%[outptr]) \n\t"
- "sw $zero, 480(%[outptr]) \n\t"
-
- :
- : [outptr] "r" (outptr)
- );
-
- outptr += 2;
- }
-
- // Then transform columns
- idct16_cols_add_blk_dspr2(out, dest, dest_stride);
-}
-
-void vp9_idct16x16_1_add_dspr2(const int16_t *input, uint8_t *dest,
- int dest_stride) {
- uint32_t pos = 45;
- int32_t out;
- int32_t r;
- int32_t a1, absa1;
- int32_t vector_a1;
- int32_t t1, t2, t3, t4;
- int32_t vector_1, vector_2, vector_3, vector_4;
-
- /* bit positon for extract from acc */
- __asm__ __volatile__ (
- "wrdsp %[pos], 1 \n\t"
-
- :
- : [pos] "r" (pos)
- );
-
- out = DCT_CONST_ROUND_SHIFT_TWICE_COSPI_16_64(input[0]);
- __asm__ __volatile__ (
- "addi %[out], %[out], 32 \n\t"
- "sra %[a1], %[out], 6 \n\t"
-
- : [out] "+r" (out), [a1] "=r" (a1)
- :
- );
-
- if (a1 < 0) {
- /* use quad-byte
- * input and output memory are four byte aligned */
- __asm__ __volatile__ (
- "abs %[absa1], %[a1] \n\t"
- "replv.qb %[vector_a1], %[absa1] \n\t"
-
- : [absa1] "=r" (absa1), [vector_a1] "=r" (vector_a1)
- : [a1] "r" (a1)
- );
-
- for (r = 16; r--;) {
- __asm__ __volatile__ (
- "lw %[t1], 0(%[dest]) \n\t"
- "lw %[t2], 4(%[dest]) \n\t"
- "lw %[t3], 8(%[dest]) \n\t"
- "lw %[t4], 12(%[dest]) \n\t"
- "subu_s.qb %[vector_1], %[t1], %[vector_a1] \n\t"
- "subu_s.qb %[vector_2], %[t2], %[vector_a1] \n\t"
- "subu_s.qb %[vector_3], %[t3], %[vector_a1] \n\t"
- "subu_s.qb %[vector_4], %[t4], %[vector_a1] \n\t"
- "sw %[vector_1], 0(%[dest]) \n\t"
- "sw %[vector_2], 4(%[dest]) \n\t"
- "sw %[vector_3], 8(%[dest]) \n\t"
- "sw %[vector_4], 12(%[dest]) \n\t"
- "add %[dest], %[dest], %[dest_stride] \n\t"
-
- : [t1] "=&r" (t1), [t2] "=&r" (t2), [t3] "=&r" (t3), [t4] "=&r" (t4),
- [vector_1] "=&r" (vector_1), [vector_2] "=&r" (vector_2),
- [vector_3] "=&r" (vector_3), [vector_4] "=&r" (vector_4),
- [dest] "+&r" (dest)
- : [dest_stride] "r" (dest_stride), [vector_a1] "r" (vector_a1)
- );
- }
- } else {
- /* use quad-byte
- * input and output memory are four byte aligned */
- __asm__ __volatile__ (
- "replv.qb %[vector_a1], %[a1] \n\t"
-
- : [vector_a1] "=r" (vector_a1)
- : [a1] "r" (a1)
- );
-
- for (r = 16; r--;) {
- __asm__ __volatile__ (
- "lw %[t1], 0(%[dest]) \n\t"
- "lw %[t2], 4(%[dest]) \n\t"
- "lw %[t3], 8(%[dest]) \n\t"
- "lw %[t4], 12(%[dest]) \n\t"
- "addu_s.qb %[vector_1], %[t1], %[vector_a1] \n\t"
- "addu_s.qb %[vector_2], %[t2], %[vector_a1] \n\t"
- "addu_s.qb %[vector_3], %[t3], %[vector_a1] \n\t"
- "addu_s.qb %[vector_4], %[t4], %[vector_a1] \n\t"
- "sw %[vector_1], 0(%[dest]) \n\t"
- "sw %[vector_2], 4(%[dest]) \n\t"
- "sw %[vector_3], 8(%[dest]) \n\t"
- "sw %[vector_4], 12(%[dest]) \n\t"
- "add %[dest], %[dest], %[dest_stride] \n\t"
-
- : [t1] "=&r" (t1), [t2] "=&r" (t2), [t3] "=&r" (t3), [t4] "=&r" (t4),
- [vector_1] "=&r" (vector_1), [vector_2] "=&r" (vector_2),
- [vector_3] "=&r" (vector_3), [vector_4] "=&r" (vector_4),
- [dest] "+&r" (dest)
- : [dest_stride] "r" (dest_stride), [vector_a1] "r" (vector_a1)
- );
- }
- }
-}
#endif // #if HAVE_DSPR2
diff --git a/vp9/common/mips/dspr2/vp9_itrans32_cols_dspr2.c b/vp9/common/mips/dspr2/vp9_itrans32_cols_dspr2.c
deleted file mode 100644
index 48da85cbc..000000000
--- a/vp9/common/mips/dspr2/vp9_itrans32_cols_dspr2.c
+++ /dev/null
@@ -1,1074 +0,0 @@
-/*
- * Copyright (c) 2013 The WebM project authors. All Rights Reserved.
- *
- * Use of this source code is governed by a BSD-style license
- * that can be found in the LICENSE file in the root of the source
- * tree. An additional intellectual property rights grant can be found
- * in the file PATENTS. All contributing project authors may
- * be found in the AUTHORS file in the root of the source tree.
- */
-
-#include <assert.h>
-
-#include "./vpx_config.h"
-#include "./vp9_rtcd.h"
-#include "vp9/common/vp9_common.h"
-#include "vp9/common/vp9_blockd.h"
-#include "vp9/common/mips/dspr2/vp9_common_dspr2.h"
-#include "vpx_dsp/txfm_common.h"
-#include "vpx_ports/mem.h"
-
-#if HAVE_DSPR2
-void vp9_idct32_cols_add_blk_dspr2(int16_t *input, uint8_t *dest,
- int dest_stride) {
- int16_t step1_0, step1_1, step1_2, step1_3, step1_4, step1_5, step1_6;
- int16_t step1_7, step1_8, step1_9, step1_10, step1_11, step1_12, step1_13;
- int16_t step1_14, step1_15, step1_16, step1_17, step1_18, step1_19;
- int16_t step1_20, step1_21, step1_22, step1_23, step1_24, step1_25, step1_26;
- int16_t step1_27, step1_28, step1_29, step1_30, step1_31;
- int16_t step2_0, step2_1, step2_2, step2_3, step2_4, step2_5, step2_6;
- int16_t step2_7, step2_8, step2_9, step2_10, step2_11, step2_12, step2_13;
- int16_t step2_14, step2_15, step2_16, step2_17, step2_18, step2_19, step2_20;
- int16_t step2_21, step2_22, step2_23, step2_24, step2_25, step2_26, step2_27;
- int16_t step2_28, step2_29, step2_30, step2_31;
- int16_t step3_8, step3_9, step3_10, step3_11, step3_12, step3_13, step3_14;
- int16_t step3_15, step3_16, step3_17, step3_18, step3_19, step3_20, step3_21;
- int16_t step3_22, step3_23, step3_24, step3_25, step3_26, step3_27;
- int16_t step3_28, step3_29, step3_30, step3_31;
- int temp0, temp1, temp2, temp3;
- int load1, load2, load3, load4;
- int result1, result2;
- int i, temp21;
- uint8_t *dest_pix, *dest_pix1;
- const int const_2_power_13 = 8192;
- uint8_t *cm = vpx_ff_cropTbl;
-
- /* prefetch vpx_ff_cropTbl */
- prefetch_load(vpx_ff_cropTbl);
- prefetch_load(vpx_ff_cropTbl + 32);
- prefetch_load(vpx_ff_cropTbl + 64);
- prefetch_load(vpx_ff_cropTbl + 96);
- prefetch_load(vpx_ff_cropTbl + 128);
- prefetch_load(vpx_ff_cropTbl + 160);
- prefetch_load(vpx_ff_cropTbl + 192);
- prefetch_load(vpx_ff_cropTbl + 224);
-
- for (i = 0; i < 32; ++i) {
- dest_pix = dest + i;
- dest_pix1 = dest + i + 31 * dest_stride;
-
- __asm__ __volatile__ (
- "lh %[load1], 2(%[input]) \n\t"
- "lh %[load2], 62(%[input]) \n\t"
- "lh %[load3], 34(%[input]) \n\t"
- "lh %[load4], 30(%[input]) \n\t"
-
- "mtlo %[const_2_power_13], $ac1 \n\t"
- "mthi $zero, $ac1 \n\t"
- "mtlo %[const_2_power_13], $ac3 \n\t"
- "mthi $zero, $ac3 \n\t"
-
- "madd $ac1, %[load1], %[cospi_31_64] \n\t"
- "msub $ac1, %[load2], %[cospi_1_64] \n\t"
- "extp %[temp0], $ac1, 31 \n\t"
-
- "madd $ac3, %[load1], %[cospi_1_64] \n\t"
- "madd $ac3, %[load2], %[cospi_31_64] \n\t"
- "extp %[temp3], $ac3, 31 \n\t"
-
- "mtlo %[const_2_power_13], $ac1 \n\t"
- "mthi $zero, $ac1 \n\t"
- "mtlo %[const_2_power_13], $ac2 \n\t"
- "mthi $zero, $ac2 \n\t"
-
- "madd $ac2, %[load3], %[cospi_15_64] \n\t"
- "msub $ac2, %[load4], %[cospi_17_64] \n\t"
- "extp %[temp1], $ac2, 31 \n\t"
-
- "madd $ac1, %[load3], %[cospi_17_64] \n\t"
- "madd $ac1, %[load4], %[cospi_15_64] \n\t"
- "extp %[temp2], $ac1, 31 \n\t"
-
- "mtlo %[const_2_power_13], $ac1 \n\t"
- "mthi $zero, $ac1 \n\t"
- "mtlo %[const_2_power_13], $ac3 \n\t"
- "mthi $zero, $ac3 \n\t"
-
- "sub %[load1], %[temp3], %[temp2] \n\t"
- "sub %[load2], %[temp0], %[temp1] \n\t"
-
- "madd $ac1, %[load1], %[cospi_28_64] \n\t"
- "msub $ac1, %[load2], %[cospi_4_64] \n\t"
- "madd $ac3, %[load1], %[cospi_4_64] \n\t"
- "madd $ac3, %[load2], %[cospi_28_64] \n\t"
-
- "extp %[step1_17], $ac1, 31 \n\t"
- "extp %[step1_30], $ac3, 31 \n\t"
- "add %[step1_16], %[temp0], %[temp1] \n\t"
- "add %[step1_31], %[temp2], %[temp3] \n\t"
-
- : [load1] "=&r" (load1), [load2] "=&r" (load2), [load3] "=&r" (load3),
- [load4] "=&r" (load4), [temp0] "=&r" (temp0), [temp1] "=&r" (temp1),
- [temp2] "=&r" (temp2), [temp3] "=&r" (temp3),
- [step1_16] "=r" (step1_16), [step1_17] "=r" (step1_17),
- [step1_30] "=r" (step1_30), [step1_31] "=r" (step1_31)
- : [const_2_power_13] "r" (const_2_power_13), [input] "r" (input),
- [cospi_31_64] "r" (cospi_31_64), [cospi_1_64] "r" (cospi_1_64),
- [cospi_4_64] "r" (cospi_4_64), [cospi_17_64] "r" (cospi_17_64),
- [cospi_15_64] "r" (cospi_15_64), [cospi_28_64] "r" (cospi_28_64)
- );
-
- __asm__ __volatile__ (
- "lh %[load1], 18(%[input]) \n\t"
- "lh %[load2], 46(%[input]) \n\t"
- "lh %[load3], 50(%[input]) \n\t"
- "lh %[load4], 14(%[input]) \n\t"
-
- "mtlo %[const_2_power_13], $ac1 \n\t"
- "mthi $zero, $ac1 \n\t"
- "mtlo %[const_2_power_13], $ac3 \n\t"
- "mthi $zero, $ac3 \n\t"
-
- "madd $ac1, %[load1], %[cospi_23_64] \n\t"
- "msub $ac1, %[load2], %[cospi_9_64] \n\t"
- "extp %[temp0], $ac1, 31 \n\t"
-
- "madd $ac3, %[load1], %[cospi_9_64] \n\t"
- "madd $ac3, %[load2], %[cospi_23_64] \n\t"
- "extp %[temp3], $ac3, 31 \n\t"
-
- "mtlo %[const_2_power_13], $ac1 \n\t"
- "mthi $zero, $ac1 \n\t"
- "mtlo %[const_2_power_13], $ac2 \n\t"
- "mthi $zero, $ac2 \n\t"
-
- "madd $ac2, %[load3], %[cospi_7_64] \n\t"
- "msub $ac2, %[load4], %[cospi_25_64] \n\t"
- "extp %[temp1], $ac2, 31 \n\t"
-
- "madd $ac1, %[load3], %[cospi_25_64] \n\t"
- "madd $ac1, %[load4], %[cospi_7_64] \n\t"
- "extp %[temp2], $ac1, 31 \n\t"
-
- "mtlo %[const_2_power_13], $ac1 \n\t"
- "mthi $zero, $ac1 \n\t"
- "mtlo %[const_2_power_13], $ac3 \n\t"
- "mthi $zero, $ac3 \n\t"
-
- "sub %[load1], %[temp1], %[temp0] \n\t"
- "sub %[load2], %[temp2], %[temp3] \n\t"
-
- "msub $ac1, %[load1], %[cospi_28_64] \n\t"
- "msub $ac1, %[load2], %[cospi_4_64] \n\t"
- "msub $ac3, %[load1], %[cospi_4_64] \n\t"
- "madd $ac3, %[load2], %[cospi_28_64] \n\t"
-
- "extp %[step1_18], $ac1, 31 \n\t"
- "extp %[step1_29], $ac3, 31 \n\t"
- "add %[step1_19], %[temp0], %[temp1] \n\t"
- "add %[step1_28], %[temp2], %[temp3] \n\t"
-
- : [load1] "=&r" (load1), [load2] "=&r" (load2), [load3] "=&r" (load3),
- [load4] "=&r" (load4), [temp0] "=&r" (temp0), [temp1] "=&r" (temp1),
- [temp2] "=&r" (temp2), [temp3] "=&r" (temp3),
- [step1_18] "=r" (step1_18), [step1_19] "=r" (step1_19),
- [step1_28] "=r" (step1_28), [step1_29] "=r" (step1_29)
- : [const_2_power_13] "r" (const_2_power_13), [input] "r" (input),
- [cospi_23_64] "r" (cospi_23_64), [cospi_9_64] "r" (cospi_9_64),
- [cospi_4_64] "r" (cospi_4_64), [cospi_7_64] "r" (cospi_7_64),
- [cospi_25_64] "r" (cospi_25_64), [cospi_28_64] "r" (cospi_28_64)
- );
-
- __asm__ __volatile__ (
- "lh %[load1], 10(%[input]) \n\t"
- "lh %[load2], 54(%[input]) \n\t"
- "lh %[load3], 42(%[input]) \n\t"
- "lh %[load4], 22(%[input]) \n\t"
-
- "mtlo %[const_2_power_13], $ac1 \n\t"
- "mthi $zero, $ac1 \n\t"
- "mtlo %[const_2_power_13], $ac3 \n\t"
- "mthi $zero, $ac3 \n\t"
-
- "madd $ac1, %[load1], %[cospi_27_64] \n\t"
- "msub $ac1, %[load2], %[cospi_5_64] \n\t"
- "extp %[temp0], $ac1, 31 \n\t"
-
- "madd $ac3, %[load1], %[cospi_5_64] \n\t"
- "madd $ac3, %[load2], %[cospi_27_64] \n\t"
- "extp %[temp3], $ac3, 31 \n\t"
-
- "mtlo %[const_2_power_13], $ac1 \n\t"
- "mthi $zero, $ac1 \n\t"
- "mtlo %[const_2_power_13], $ac2 \n\t"
- "mthi $zero, $ac2 \n\t"
-
- "madd $ac2, %[load3], %[cospi_11_64] \n\t"
- "msub $ac2, %[load4], %[cospi_21_64] \n\t"
- "extp %[temp1], $ac2, 31 \n\t"
-
- "madd $ac1, %[load3], %[cospi_21_64] \n\t"
- "madd $ac1, %[load4], %[cospi_11_64] \n\t"
- "extp %[temp2], $ac1, 31 \n\t"
-
- "mtlo %[const_2_power_13], $ac1 \n\t"
- "mthi $zero, $ac1 \n\t"
- "mtlo %[const_2_power_13], $ac3 \n\t"
- "mthi $zero, $ac3 \n\t"
-
- "sub %[load1], %[temp0], %[temp1] \n\t"
- "sub %[load2], %[temp3], %[temp2] \n\t"
-
- "madd $ac1, %[load2], %[cospi_12_64] \n\t"
- "msub $ac1, %[load1], %[cospi_20_64] \n\t"
- "madd $ac3, %[load1], %[cospi_12_64] \n\t"
- "madd $ac3, %[load2], %[cospi_20_64] \n\t"
-
- "extp %[step1_21], $ac1, 31 \n\t"
- "extp %[step1_26], $ac3, 31 \n\t"
- "add %[step1_20], %[temp0], %[temp1] \n\t"
- "add %[step1_27], %[temp2], %[temp3] \n\t"
-
- : [load1] "=&r" (load1), [load2] "=&r" (load2), [load3] "=&r" (load3),
- [load4] "=&r" (load4), [temp0] "=&r" (temp0), [temp1] "=&r" (temp1),
- [temp2] "=&r" (temp2), [temp3] "=&r" (temp3),
- [step1_20] "=r" (step1_20), [step1_21] "=r" (step1_21),
- [step1_26] "=r" (step1_26), [step1_27] "=r" (step1_27)
- : [const_2_power_13] "r" (const_2_power_13), [input] "r" (input),
- [cospi_27_64] "r" (cospi_27_64), [cospi_5_64] "r" (cospi_5_64),
- [cospi_11_64] "r" (cospi_11_64), [cospi_21_64] "r" (cospi_21_64),
- [cospi_12_64] "r" (cospi_12_64), [cospi_20_64] "r" (cospi_20_64)
- );
-
- __asm__ __volatile__ (
- "lh %[load1], 26(%[input]) \n\t"
- "lh %[load2], 38(%[input]) \n\t"
- "lh %[load3], 58(%[input]) \n\t"
- "lh %[load4], 6(%[input]) \n\t"
-
- "mtlo %[const_2_power_13], $ac1 \n\t"
- "mthi $zero, $ac1 \n\t"
- "mtlo %[const_2_power_13], $ac3 \n\t"
- "mthi $zero, $ac3 \n\t"
-
- "madd $ac1, %[load1], %[cospi_19_64] \n\t"
- "msub $ac1, %[load2], %[cospi_13_64] \n\t"
- "extp %[temp0], $ac1, 31 \n\t"
- "madd $ac3, %[load1], %[cospi_13_64] \n\t"
- "madd $ac3, %[load2], %[cospi_19_64] \n\t"
- "extp %[temp3], $ac3, 31 \n\t"
-
- "mtlo %[const_2_power_13], $ac1 \n\t"
- "mthi $zero, $ac1 \n\t"
- "mtlo %[const_2_power_13], $ac2 \n\t"
- "mthi $zero, $ac2 \n\t"
-
- "madd $ac2, %[load3], %[cospi_3_64] \n\t"
- "msub $ac2, %[load4], %[cospi_29_64] \n\t"
- "extp %[temp1], $ac2, 31 \n\t"
- "madd $ac1, %[load3], %[cospi_29_64] \n\t"
- "madd $ac1, %[load4], %[cospi_3_64] \n\t"
- "extp %[temp2], $ac1, 31 \n\t"
-
- "mtlo %[const_2_power_13], $ac1 \n\t"
- "mthi $zero, $ac1 \n\t"
- "mtlo %[const_2_power_13], $ac3 \n\t"
- "mthi $zero, $ac3 \n\t"
-
- "sub %[load1], %[temp1], %[temp0] \n\t"
- "sub %[load2], %[temp2], %[temp3] \n\t"
- "msub $ac1, %[load1], %[cospi_12_64] \n\t"
- "msub $ac1, %[load2], %[cospi_20_64] \n\t"
- "msub $ac3, %[load1], %[cospi_20_64] \n\t"
- "madd $ac3, %[load2], %[cospi_12_64] \n\t"
- "extp %[step1_22], $ac1, 31 \n\t"
- "extp %[step1_25], $ac3, 31 \n\t"
- "add %[step1_23], %[temp0], %[temp1] \n\t"
- "add %[step1_24], %[temp2], %[temp3] \n\t"
-
- : [load1] "=&r" (load1), [load2] "=&r" (load2), [load3] "=&r" (load3),
- [load4] "=&r" (load4), [temp0] "=&r" (temp0), [temp1] "=&r" (temp1),
- [temp2] "=&r" (temp2), [temp3] "=&r" (temp3),
- [step1_22] "=r" (step1_22), [step1_23] "=r" (step1_23),
- [step1_24] "=r" (step1_24), [step1_25] "=r" (step1_25)
- : [const_2_power_13] "r" (const_2_power_13), [input] "r" (input),
- [cospi_19_64] "r" (cospi_19_64), [cospi_13_64] "r" (cospi_13_64),
- [cospi_3_64] "r" (cospi_3_64), [cospi_29_64] "r" (cospi_29_64),
- [cospi_12_64] "r" (cospi_12_64), [cospi_20_64] "r" (cospi_20_64)
- );
-
- __asm__ __volatile__ (
- "lh %[load1], 4(%[input]) \n\t"
- "lh %[load2], 60(%[input]) \n\t"
- "lh %[load3], 36(%[input]) \n\t"
- "lh %[load4], 28(%[input]) \n\t"
-
- "mtlo %[const_2_power_13], $ac1 \n\t"
- "mthi $zero, $ac1 \n\t"
- "mtlo %[const_2_power_13], $ac3 \n\t"
- "mthi $zero, $ac3 \n\t"
-
- "madd $ac1, %[load1], %[cospi_30_64] \n\t"
- "msub $ac1, %[load2], %[cospi_2_64] \n\t"
- "extp %[temp0], $ac1, 31 \n\t"
- "madd $ac3, %[load1], %[cospi_2_64] \n\t"
- "madd $ac3, %[load2], %[cospi_30_64] \n\t"
- "extp %[temp3], $ac3, 31 \n\t"
-
- "mtlo %[const_2_power_13], $ac1 \n\t"
- "mthi $zero, $ac1 \n\t"
- "mtlo %[const_2_power_13], $ac2 \n\t"
- "mthi $zero, $ac2 \n\t"
-
- "madd $ac2, %[load3], %[cospi_14_64] \n\t"
- "msub $ac2, %[load4], %[cospi_18_64] \n\t"
- "extp %[temp1], $ac2, 31 \n\t"
- "madd $ac1, %[load3], %[cospi_18_64] \n\t"
- "madd $ac1, %[load4], %[cospi_14_64] \n\t"
- "extp %[temp2], $ac1, 31 \n\t"
-
- "mtlo %[const_2_power_13], $ac1 \n\t"
- "mthi $zero, $ac1 \n\t"
- "mtlo %[const_2_power_13], $ac3 \n\t"
- "mthi $zero, $ac3 \n\t"
-
- "sub %[load1], %[temp0], %[temp1] \n\t"
- "sub %[load2], %[temp3], %[temp2] \n\t"
- "msub $ac1, %[load1], %[cospi_8_64] \n\t"
- "madd $ac1, %[load2], %[cospi_24_64] \n\t"
- "madd $ac3, %[load1], %[cospi_24_64] \n\t"
- "madd $ac3, %[load2], %[cospi_8_64] \n\t"
- "extp %[step2_9], $ac1, 31 \n\t"
- "extp %[step2_14], $ac3, 31 \n\t"
- "add %[step2_8], %[temp0], %[temp1] \n\t"
- "add %[step2_15], %[temp2], %[temp3] \n\t"
-
- : [load1] "=&r" (load1), [load2] "=&r" (load2), [load3] "=&r" (load3),
- [load4] "=&r" (load4), [temp0] "=&r" (temp0), [temp1] "=&r" (temp1),
- [temp2] "=&r" (temp2), [temp3] "=&r" (temp3),
- [step2_8] "=r" (step2_8), [step2_9] "=r" (step2_9),
- [step2_14] "=r" (step2_14), [step2_15] "=r" (step2_15)
- : [const_2_power_13] "r" (const_2_power_13), [input] "r" (input),
- [cospi_30_64] "r" (cospi_30_64), [cospi_2_64] "r" (cospi_2_64),
- [cospi_14_64] "r" (cospi_14_64), [cospi_18_64] "r" (cospi_18_64),
- [cospi_8_64] "r" (cospi_8_64), [cospi_24_64] "r" (cospi_24_64)
- );
-
- __asm__ __volatile__ (
- "lh %[load1], 20(%[input]) \n\t"
- "lh %[load2], 44(%[input]) \n\t"
- "lh %[load3], 52(%[input]) \n\t"
- "lh %[load4], 12(%[input]) \n\t"
-
- "mtlo %[const_2_power_13], $ac1 \n\t"
- "mthi $zero, $ac1 \n\t"
- "mtlo %[const_2_power_13], $ac3 \n\t"
- "mthi $zero, $ac3 \n\t"
-
- "madd $ac1, %[load1], %[cospi_22_64] \n\t"
- "msub $ac1, %[load2], %[cospi_10_64] \n\t"
- "extp %[temp0], $ac1, 31 \n\t"
- "madd $ac3, %[load1], %[cospi_10_64] \n\t"
- "madd $ac3, %[load2], %[cospi_22_64] \n\t"
- "extp %[temp3], $ac3, 31 \n\t"
-
- "mtlo %[const_2_power_13], $ac1 \n\t"
- "mthi $zero, $ac1 \n\t"
- "mtlo %[const_2_power_13], $ac2 \n\t"
- "mthi $zero, $ac2 \n\t"
-
- "madd $ac2, %[load3], %[cospi_6_64] \n\t"
- "msub $ac2, %[load4], %[cospi_26_64] \n\t"
- "extp %[temp1], $ac2, 31 \n\t"
- "madd $ac1, %[load3], %[cospi_26_64] \n\t"
- "madd $ac1, %[load4], %[cospi_6_64] \n\t"
- "extp %[temp2], $ac1, 31 \n\t"
-
- "mtlo %[const_2_power_13], $ac1 \n\t"
- "mthi $zero, $ac1 \n\t"
- "mtlo %[const_2_power_13], $ac3 \n\t"
- "mthi $zero, $ac3 \n\t"
-
- "sub %[load1], %[temp1], %[temp0] \n\t"
- "sub %[load2], %[temp2], %[temp3] \n\t"
- "msub $ac1, %[load1], %[cospi_24_64] \n\t"
- "msub $ac1, %[load2], %[cospi_8_64] \n\t"
- "madd $ac3, %[load2], %[cospi_24_64] \n\t"
- "msub $ac3, %[load1], %[cospi_8_64] \n\t"
- "extp %[step2_10], $ac1, 31 \n\t"
- "extp %[step2_13], $ac3, 31 \n\t"
- "add %[step2_11], %[temp0], %[temp1] \n\t"
- "add %[step2_12], %[temp2], %[temp3] \n\t"
-
- : [load1] "=&r" (load1), [load2] "=&r" (load2), [load3] "=&r" (load3),
- [load4] "=&r" (load4), [temp0] "=&r" (temp0), [temp1] "=&r" (temp1),
- [temp2] "=&r" (temp2), [temp3] "=&r" (temp3),
- [step2_10] "=r" (step2_10), [step2_11] "=r" (step2_11),
- [step2_12] "=r" (step2_12), [step2_13] "=r" (step2_13)
- : [const_2_power_13] "r" (const_2_power_13), [input] "r" (input),
- [cospi_22_64] "r" (cospi_22_64), [cospi_10_64] "r" (cospi_10_64),
- [cospi_6_64] "r" (cospi_6_64), [cospi_26_64] "r" (cospi_26_64),
- [cospi_8_64] "r" (cospi_8_64), [cospi_24_64] "r" (cospi_24_64)
- );
-
- __asm__ __volatile__ (
- "mtlo %[const_2_power_13], $ac0 \n\t"
- "mthi $zero, $ac0 \n\t"
- "sub %[temp0], %[step2_14], %[step2_13] \n\t"
- "sub %[temp0], %[temp0], %[step2_9] \n\t"
- "add %[temp0], %[temp0], %[step2_10] \n\t"
- "madd $ac0, %[temp0], %[cospi_16_64] \n\t"
- "mtlo %[const_2_power_13], $ac1 \n\t"
- "mthi $zero, $ac1 \n\t"
- "sub %[temp1], %[step2_14], %[step2_13] \n\t"
- "add %[temp1], %[temp1], %[step2_9] \n\t"
- "sub %[temp1], %[temp1], %[step2_10] \n\t"
- "madd $ac1, %[temp1], %[cospi_16_64] \n\t"
- "mtlo %[const_2_power_13], $ac2 \n\t"
- "mthi $zero, $ac2 \n\t"
- "sub %[temp0], %[step2_15], %[step2_12] \n\t"
- "sub %[temp0], %[temp0], %[step2_8] \n\t"
- "add %[temp0], %[temp0], %[step2_11] \n\t"
- "madd $ac2, %[temp0], %[cospi_16_64] \n\t"
- "mtlo %[const_2_power_13], $ac3 \n\t"
- "mthi $zero, $ac3 \n\t"
- "sub %[temp1], %[step2_15], %[step2_12] \n\t"
- "add %[temp1], %[temp1], %[step2_8] \n\t"
- "sub %[temp1], %[temp1], %[step2_11] \n\t"
- "madd $ac3, %[temp1], %[cospi_16_64] \n\t"
-
- "add %[step3_8], %[step2_8], %[step2_11] \n\t"
- "add %[step3_9], %[step2_9], %[step2_10] \n\t"
- "add %[step3_14], %[step2_13], %[step2_14] \n\t"
- "add %[step3_15], %[step2_12], %[step2_15] \n\t"
- "extp %[step3_10], $ac0, 31 \n\t"
- "extp %[step3_13], $ac1, 31 \n\t"
- "extp %[step3_11], $ac2, 31 \n\t"
- "extp %[step3_12], $ac3, 31 \n\t"
-
- : [temp0] "=&r" (temp0), [temp1] "=&r" (temp1),
- [step3_8] "=r" (step3_8), [step3_9] "=r" (step3_9),
- [step3_10] "=r" (step3_10), [step3_11] "=r" (step3_11),
- [step3_12] "=r" (step3_12), [step3_13] "=r" (step3_13),
- [step3_14] "=r" (step3_14), [step3_15] "=r" (step3_15)
- : [const_2_power_13] "r" (const_2_power_13), [step2_8] "r" (step2_8),
- [step2_9] "r" (step2_9), [step2_10] "r" (step2_10),
- [step2_11] "r" (step2_11), [step2_12] "r" (step2_12),
- [step2_13] "r" (step2_13), [step2_14] "r" (step2_14),
- [step2_15] "r" (step2_15), [cospi_16_64] "r" (cospi_16_64)
- );
-
- step2_18 = step1_17 - step1_18;
- step2_29 = step1_30 - step1_29;
-
- __asm__ __volatile__ (
- "mtlo %[const_2_power_13], $ac0 \n\t"
- "mthi $zero, $ac0 \n\t"
- "msub $ac0, %[step2_18], %[cospi_8_64] \n\t"
- "madd $ac0, %[step2_29], %[cospi_24_64] \n\t"
- "extp %[step3_18], $ac0, 31 \n\t"
-
- : [step3_18] "=r" (step3_18)
- : [const_2_power_13] "r" (const_2_power_13),
- [step2_18] "r" (step2_18), [step2_29] "r" (step2_29),
- [cospi_24_64] "r" (cospi_24_64), [cospi_8_64] "r" (cospi_8_64)
- );
-
- temp21 = step2_18 * cospi_24_64 + step2_29 * cospi_8_64;
- step3_29 = (temp21 + DCT_CONST_ROUNDING) >> DCT_CONST_BITS;
-
- step2_19 = step1_16 - step1_19;
- step2_28 = step1_31 - step1_28;
-
- __asm__ __volatile__ (
- "mtlo %[const_2_power_13], $ac0 \n\t"
- "mthi $zero, $ac0 \n\t"
- "msub $ac0, %[step2_19], %[cospi_8_64] \n\t"
- "madd $ac0, %[step2_28], %[cospi_24_64] \n\t"
- "extp %[step3_19], $ac0, 31 \n\t"
-
- : [step3_19] "=r" (step3_19)
- : [const_2_power_13] "r" (const_2_power_13),
- [step2_19] "r" (step2_19), [step2_28] "r" (step2_28),
- [cospi_24_64] "r" (cospi_24_64), [cospi_8_64] "r" (cospi_8_64)
- );
-
- temp21 = step2_19 * cospi_24_64 + step2_28 * cospi_8_64;
- step3_28 = (temp21 + DCT_CONST_ROUNDING) >> DCT_CONST_BITS;
-
- step3_16 = step1_16 + step1_19;
- step3_17 = step1_17 + step1_18;
- step3_30 = step1_29 + step1_30;
- step3_31 = step1_28 + step1_31;
-
- step2_20 = step1_23 - step1_20;
- step2_27 = step1_24 - step1_27;
-
- __asm__ __volatile__ (
- "mtlo %[const_2_power_13], $ac0 \n\t"
- "mthi $zero, $ac0 \n\t"
- "msub $ac0, %[step2_20], %[cospi_24_64] \n\t"
- "msub $ac0, %[step2_27], %[cospi_8_64] \n\t"
- "extp %[step3_20], $ac0, 31 \n\t"
-
- : [step3_20] "=r" (step3_20)
- : [const_2_power_13] "r" (const_2_power_13),
- [step2_20] "r" (step2_20), [step2_27] "r" (step2_27),
- [cospi_24_64] "r" (cospi_24_64), [cospi_8_64] "r" (cospi_8_64)
- );
-
- temp21 = -step2_20 * cospi_8_64 + step2_27 * cospi_24_64;
- step3_27 = (temp21 + DCT_CONST_ROUNDING) >> DCT_CONST_BITS;
-
- step2_21 = step1_22 - step1_21;
- step2_26 = step1_25 - step1_26;
-
- __asm__ __volatile__ (
- "mtlo %[const_2_power_13], $ac1 \n\t"
- "mthi $zero, $ac1 \n\t"
- "msub $ac1, %[step2_21], %[cospi_24_64] \n\t"
- "msub $ac1, %[step2_26], %[cospi_8_64] \n\t"
- "extp %[step3_21], $ac1, 31 \n\t"
-
- : [step3_21] "=r" (step3_21)
- : [const_2_power_13] "r" (const_2_power_13),
- [step2_21] "r" (step2_21), [step2_26] "r" (step2_26),
- [cospi_24_64] "r" (cospi_24_64), [cospi_8_64] "r" (cospi_8_64)
- );
-
- temp21 = -step2_21 * cospi_8_64 + step2_26 * cospi_24_64;
- step3_26 = (temp21 + DCT_CONST_ROUNDING) >> DCT_CONST_BITS;
-
- step3_22 = step1_21 + step1_22;
- step3_23 = step1_20 + step1_23;
- step3_24 = step1_24 + step1_27;
- step3_25 = step1_25 + step1_26;
-
- step2_16 = step3_16 + step3_23;
- step2_17 = step3_17 + step3_22;
- step2_18 = step3_18 + step3_21;
- step2_19 = step3_19 + step3_20;
- step2_20 = step3_19 - step3_20;
- step2_21 = step3_18 - step3_21;
- step2_22 = step3_17 - step3_22;
- step2_23 = step3_16 - step3_23;
-
- step2_24 = step3_31 - step3_24;
- step2_25 = step3_30 - step3_25;
- step2_26 = step3_29 - step3_26;
- step2_27 = step3_28 - step3_27;
- step2_28 = step3_28 + step3_27;
- step2_29 = step3_29 + step3_26;
- step2_30 = step3_30 + step3_25;
- step2_31 = step3_31 + step3_24;
-
- __asm__ __volatile__ (
- "lh %[load1], 0(%[input]) \n\t"
- "lh %[load2], 32(%[input]) \n\t"
- "lh %[load3], 16(%[input]) \n\t"
- "lh %[load4], 48(%[input]) \n\t"
-
- "mtlo %[const_2_power_13], $ac1 \n\t"
- "mthi $zero, $ac1 \n\t"
- "mtlo %[const_2_power_13], $ac2 \n\t"
- "mthi $zero, $ac2 \n\t"
- "add %[result1], %[load1], %[load2] \n\t"
- "sub %[result2], %[load1], %[load2] \n\t"
- "madd $ac1, %[result1], %[cospi_16_64] \n\t"
- "madd $ac2, %[result2], %[cospi_16_64] \n\t"
- "extp %[temp0], $ac1, 31 \n\t"
- "extp %[temp1], $ac2, 31 \n\t"
-
- "mtlo %[const_2_power_13], $ac3 \n\t"
- "mthi $zero, $ac3 \n\t"
- "madd $ac3, %[load3], %[cospi_24_64] \n\t"
- "msub $ac3, %[load4], %[cospi_8_64] \n\t"
- "extp %[temp2], $ac3, 31 \n\t"
- "mtlo %[const_2_power_13], $ac1 \n\t"
- "mthi $zero, $ac1 \n\t"
- "madd $ac1, %[load3], %[cospi_8_64] \n\t"
- "madd $ac1, %[load4], %[cospi_24_64] \n\t"
- "extp %[temp3], $ac1, 31 \n\t"
- "add %[step1_0], %[temp0], %[temp3] \n\t"
- "add %[step1_1], %[temp1], %[temp2] \n\t"
- "sub %[step1_2], %[temp1], %[temp2] \n\t"
- "sub %[step1_3], %[temp0], %[temp3] \n\t"
-
- : [load1] "=&r" (load1), [load2] "=&r" (load2),
- [load3] "=&r" (load3), [load4] "=&r" (load4),
- [result1] "=&r" (result1), [result2] "=&r" (result2),
- [temp0] "=&r" (temp0), [temp1] "=&r" (temp1),
- [temp2] "=&r" (temp2), [temp3] "=&r" (temp3),
- [step1_0] "=r" (step1_0), [step1_1] "=r" (step1_1),
- [step1_2] "=r" (step1_2), [step1_3] "=r" (step1_3)
- : [const_2_power_13] "r" (const_2_power_13), [input] "r" (input),
- [cospi_24_64] "r" (cospi_24_64), [cospi_8_64] "r" (cospi_8_64),
- [cospi_16_64] "r" (cospi_16_64)
- );
-
- __asm__ __volatile__ (
- "lh %[load1], 8(%[input]) \n\t"
- "lh %[load2], 56(%[input]) \n\t"
- "lh %[load3], 40(%[input]) \n\t"
- "lh %[load4], 24(%[input]) \n\t"
-
- "mtlo %[const_2_power_13], $ac1 \n\t"
- "mthi $zero, $ac1 \n\t"
- "mtlo %[const_2_power_13], $ac3 \n\t"
- "mthi $zero, $ac3 \n\t"
-
- "madd $ac1, %[load1], %[cospi_28_64] \n\t"
- "msub $ac1, %[load2], %[cospi_4_64] \n\t"
- "extp %[temp0], $ac1, 31 \n\t"
- "madd $ac3, %[load1], %[cospi_4_64] \n\t"
- "madd $ac3, %[load2], %[cospi_28_64] \n\t"
- "extp %[temp3], $ac3, 31 \n\t"
-
- "mtlo %[const_2_power_13], $ac1 \n\t"
- "mthi $zero, $ac1 \n\t"
- "mtlo %[const_2_power_13], $ac2 \n\t"
- "mthi $zero, $ac2 \n\t"
-
- "madd $ac2, %[load3], %[cospi_12_64] \n\t"
- "msub $ac2, %[load4], %[cospi_20_64] \n\t"
- "extp %[temp1], $ac2, 31 \n\t"
- "madd $ac1, %[load3], %[cospi_20_64] \n\t"
- "madd $ac1, %[load4], %[cospi_12_64] \n\t"
- "extp %[temp2], $ac1, 31 \n\t"
-
- "mtlo %[const_2_power_13], $ac1 \n\t"
- "mthi $zero, $ac1 \n\t"
- "mtlo %[const_2_power_13], $ac3 \n\t"
- "mthi $zero, $ac3 \n\t"
-
- "sub %[load1], %[temp3], %[temp2] \n\t"
- "sub %[load1], %[load1], %[temp0] \n\t"
- "add %[load1], %[load1], %[temp1] \n\t"
- "sub %[load2], %[temp0], %[temp1] \n\t"
- "sub %[load2], %[load2], %[temp2] \n\t"
- "add %[load2], %[load2], %[temp3] \n\t"
- "madd $ac1, %[load1], %[cospi_16_64] \n\t"
- "madd $ac3, %[load2], %[cospi_16_64] \n\t"
-
- "extp %[step1_5], $ac1, 31 \n\t"
- "extp %[step1_6], $ac3, 31 \n\t"
- "add %[step1_4], %[temp0], %[temp1] \n\t"
- "add %[step1_7], %[temp3], %[temp2] \n\t"
-
- : [load1] "=&r" (load1), [load2] "=&r" (load2),
- [load3] "=&r" (load3), [load4] "=&r" (load4),
- [temp0] "=&r" (temp0), [temp1] "=&r" (temp1),
- [temp2] "=&r" (temp2), [temp3] "=&r" (temp3),
- [step1_4] "=r" (step1_4), [step1_5] "=r" (step1_5),
- [step1_6] "=r" (step1_6), [step1_7] "=r" (step1_7)
- : [const_2_power_13] "r" (const_2_power_13), [input] "r" (input),
- [cospi_20_64] "r" (cospi_20_64), [cospi_12_64] "r" (cospi_12_64),
- [cospi_4_64] "r" (cospi_4_64), [cospi_28_64] "r" (cospi_28_64),
- [cospi_16_64] "r" (cospi_16_64)
- );
-
- step2_0 = step1_0 + step1_7;
- step2_1 = step1_1 + step1_6;
- step2_2 = step1_2 + step1_5;
- step2_3 = step1_3 + step1_4;
- step2_4 = step1_3 - step1_4;
- step2_5 = step1_2 - step1_5;
- step2_6 = step1_1 - step1_6;
- step2_7 = step1_0 - step1_7;
-
- // stage 7
- step1_0 = step2_0 + step3_15;
- step1_1 = step2_1 + step3_14;
- step1_2 = step2_2 + step3_13;
- step1_3 = step2_3 + step3_12;
- step1_4 = step2_4 + step3_11;
- step1_5 = step2_5 + step3_10;
- step1_6 = step2_6 + step3_9;
- step1_7 = step2_7 + step3_8;
- step1_8 = step2_7 - step3_8;
- step1_9 = step2_6 - step3_9;
- step1_10 = step2_5 - step3_10;
- step1_11 = step2_4 - step3_11;
- step1_12 = step2_3 - step3_12;
- step1_13 = step2_2 - step3_13;
- step1_14 = step2_1 - step3_14;
- step1_15 = step2_0 - step3_15;
-
- __asm__ __volatile__ (
- "sub %[temp0], %[step2_27], %[step2_20] \n\t"
- "mtlo %[const_2_power_13], $ac0 \n\t"
- "mthi $zero, $ac0 \n\t"
- "madd $ac0, %[temp0], %[cospi_16_64] \n\t"
- "extp %[step1_20], $ac0, 31 \n\t"
-
- : [temp0] "=&r" (temp0), [step1_20] "=r" (step1_20)
- : [const_2_power_13] "r" (const_2_power_13), [step2_20] "r" (step2_20),
- [step2_27] "r" (step2_27), [cospi_16_64] "r" (cospi_16_64)
- );
-
- temp21 = (step2_20 + step2_27) * cospi_16_64;
- step1_27 = (temp21 + DCT_CONST_ROUNDING) >> DCT_CONST_BITS;
-
- __asm__ __volatile__ (
- "sub %[temp0], %[step2_26], %[step2_21] \n\t"
- "mtlo %[const_2_power_13], $ac0 \n\t"
- "mthi $zero, $ac0 \n\t"
- "madd $ac0, %[temp0], %[cospi_16_64] \n\t"
- "extp %[step1_21], $ac0, 31 \n\t"
-
- : [temp0] "=&r" (temp0), [step1_21] "=r" (step1_21)
- : [const_2_power_13] "r" (const_2_power_13), [step2_26] "r" (step2_26),
- [step2_21] "r" (step2_21), [cospi_16_64] "r" (cospi_16_64)
- );
-
- temp21 = (step2_21 + step2_26) * cospi_16_64;
- step1_26 = (temp21 + DCT_CONST_ROUNDING) >> DCT_CONST_BITS;
-
- __asm__ __volatile__ (
- "sub %[temp0], %[step2_25], %[step2_22] \n\t"
- "mtlo %[const_2_power_13], $ac0 \n\t"
- "mthi $zero, $ac0 \n\t"
- "madd $ac0, %[temp0], %[cospi_16_64] \n\t"
- "extp %[step1_22], $ac0, 31 \n\t"
-
- : [temp0] "=&r" (temp0), [step1_22] "=r" (step1_22)
- : [const_2_power_13] "r" (const_2_power_13), [step2_25] "r" (step2_25),
- [step2_22] "r" (step2_22), [cospi_16_64] "r" (cospi_16_64)
- );
-
- temp21 = (step2_22 + step2_25) * cospi_16_64;
- step1_25 = (temp21 + DCT_CONST_ROUNDING) >> DCT_CONST_BITS;
-
- __asm__ __volatile__ (
- "sub %[temp0], %[step2_24], %[step2_23] \n\t"
- "mtlo %[const_2_power_13], $ac0 \n\t"
- "mthi $zero, $ac0 \n\t"
- "madd $ac0, %[temp0], %[cospi_16_64] \n\t"
- "extp %[step1_23], $ac0, 31 \n\t"
-
- : [temp0] "=&r" (temp0), [step1_23] "=r" (step1_23)
- : [const_2_power_13] "r" (const_2_power_13), [step2_24] "r" (step2_24),
- [step2_23] "r" (step2_23), [cospi_16_64] "r" (cospi_16_64)
- );
-
- temp21 = (step2_23 + step2_24) * cospi_16_64;
- step1_24 = (temp21 + DCT_CONST_ROUNDING) >> DCT_CONST_BITS;
-
- __asm__ __volatile__ (
- "lbu %[temp2], 0(%[dest_pix]) \n\t"
- "add %[temp0], %[step1_0], %[step2_31] \n\t"
- "addi %[temp0], %[temp0], 32 \n\t"
- "sra %[temp0], %[temp0], 6 \n\t"
- "add %[temp2], %[temp2], %[temp0] \n\t"
- "lbux %[temp0], %[temp2](%[cm]) \n\t"
- "add %[temp1], %[step1_1], %[step2_30] \n\t"
- "sb %[temp0], 0(%[dest_pix]) \n\t"
- "addu %[dest_pix], %[dest_pix], %[dest_stride] \n\t"
- "lbu %[temp3], 0(%[dest_pix]) \n\t"
- "addi %[temp1], %[temp1], 32 \n\t"
- "sra %[temp1], %[temp1], 6 \n\t"
- "add %[temp3], %[temp3], %[temp1] \n\t"
- "lbux %[temp1], %[temp3](%[cm]) \n\t"
- "sb %[temp1], 0(%[dest_pix]) \n\t"
- "addu %[dest_pix], %[dest_pix], %[dest_stride] \n\t"
-
- "lbu %[temp2], 0(%[dest_pix]) \n\t"
- "add %[temp0], %[step1_2], %[step2_29] \n\t"
- "addi %[temp0], %[temp0], 32 \n\t"
- "sra %[temp0], %[temp0], 6 \n\t"
- "add %[temp2], %[temp2], %[temp0] \n\t"
- "lbux %[temp0], %[temp2](%[cm]) \n\t"
- "add %[temp1], %[step1_3], %[step2_28] \n\t"
- "sb %[temp0], 0(%[dest_pix]) \n\t"
- "addu %[dest_pix], %[dest_pix], %[dest_stride] \n\t"
- "lbu %[temp3], 0(%[dest_pix]) \n\t"
- "addi %[temp1], %[temp1], 32 \n\t"
- "sra %[temp1], %[temp1], 6 \n\t"
- "add %[temp3], %[temp3], %[temp1] \n\t"
- "lbux %[temp1], %[temp3](%[cm]) \n\t"
- "sb %[temp1], 0(%[dest_pix]) \n\t"
- "addu %[dest_pix], %[dest_pix], %[dest_stride] \n\t"
-
- : [temp0] "=&r" (temp0), [temp1] "=&r" (temp1), [temp2] "=&r" (temp2),
- [temp3] "=&r" (temp3), [dest_pix] "+r" (dest_pix)
- : [cm] "r" (cm), [dest_stride] "r" (dest_stride),
- [step1_0] "r" (step1_0), [step1_1] "r" (step1_1),
- [step1_2] "r" (step1_2), [step1_3] "r" (step1_3),
- [step2_28] "r" (step2_28), [step2_29] "r" (step2_29),
- [step2_30] "r" (step2_30), [step2_31] "r" (step2_31)
- );
-
- step3_12 = ROUND_POWER_OF_TWO((step1_3 - step2_28), 6);
- step3_13 = ROUND_POWER_OF_TWO((step1_2 - step2_29), 6);
- step3_14 = ROUND_POWER_OF_TWO((step1_1 - step2_30), 6);
- step3_15 = ROUND_POWER_OF_TWO((step1_0 - step2_31), 6);
-
- __asm__ __volatile__ (
- "lbu %[temp2], 0(%[dest_pix1]) \n\t"
- "add %[temp2], %[temp2], %[step3_15] \n\t"
- "lbux %[temp0], %[temp2](%[cm]) \n\t"
- "sb %[temp0], 0(%[dest_pix1]) \n\t"
- "subu %[dest_pix1], %[dest_pix1], %[dest_stride] \n\t"
- "lbu %[temp3], 0(%[dest_pix1]) \n\t"
- "add %[temp3], %[temp3], %[step3_14] \n\t"
- "lbux %[temp1], %[temp3](%[cm]) \n\t"
- "sb %[temp1], 0(%[dest_pix1]) \n\t"
- "subu %[dest_pix1], %[dest_pix1], %[dest_stride] \n\t"
-
- "lbu %[temp2], 0(%[dest_pix1]) \n\t"
- "add %[temp2], %[temp2], %[step3_13] \n\t"
- "lbux %[temp0], %[temp2](%[cm]) \n\t"
- "sb %[temp0], 0(%[dest_pix1]) \n\t"
- "subu %[dest_pix1], %[dest_pix1], %[dest_stride] \n\t"
- "lbu %[temp3], 0(%[dest_pix1]) \n\t"
- "add %[temp3], %[temp3], %[step3_12] \n\t"
- "lbux %[temp1], %[temp3](%[cm]) \n\t"
- "sb %[temp1], 0(%[dest_pix1]) \n\t"
- "subu %[dest_pix1], %[dest_pix1], %[dest_stride] \n\t"
-
- : [temp0] "=&r" (temp0), [temp1] "=&r" (temp1), [temp2] "=&r" (temp2),
- [temp3] "=&r" (temp3), [dest_pix1] "+r" (dest_pix1)
- : [cm] "r" (cm), [dest_stride] "r" (dest_stride),
- [step3_12] "r" (step3_12), [step3_13] "r" (step3_13),
- [step3_14] "r" (step3_14), [step3_15] "r" (step3_15)
- );
-
- __asm__ __volatile__ (
- "lbu %[temp2], 0(%[dest_pix]) \n\t"
- "add %[temp0], %[step1_4], %[step1_27] \n\t"
- "addi %[temp0], %[temp0], 32 \n\t"
- "sra %[temp0], %[temp0], 6 \n\t"
- "add %[temp2], %[temp2], %[temp0] \n\t"
- "lbux %[temp0], %[temp2](%[cm]) \n\t"
- "add %[temp1], %[step1_5], %[step1_26] \n\t"
- "sb %[temp0], 0(%[dest_pix]) \n\t"
- "addu %[dest_pix], %[dest_pix], %[dest_stride] \n\t"
- "lbu %[temp3], 0(%[dest_pix]) \n\t"
- "addi %[temp1], %[temp1], 32 \n\t"
- "sra %[temp1], %[temp1], 6 \n\t"
- "add %[temp3], %[temp3], %[temp1] \n\t"
- "lbux %[temp1], %[temp3](%[cm]) \n\t"
- "sb %[temp1], 0(%[dest_pix]) \n\t"
- "addu %[dest_pix], %[dest_pix], %[dest_stride] \n\t"
-
- "lbu %[temp2], 0(%[dest_pix]) \n\t"
- "add %[temp0], %[step1_6], %[step1_25] \n\t"
- "addi %[temp0], %[temp0], 32 \n\t"
- "sra %[temp0], %[temp0], 6 \n\t"
- "add %[temp2], %[temp2], %[temp0] \n\t"
- "lbux %[temp0], %[temp2](%[cm]) \n\t"
- "add %[temp1], %[step1_7], %[step1_24] \n\t"
- "sb %[temp0], 0(%[dest_pix]) \n\t"
- "addu %[dest_pix], %[dest_pix], %[dest_stride] \n\t"
- "lbu %[temp3], 0(%[dest_pix]) \n\t"
- "addi %[temp1], %[temp1], 32 \n\t"
- "sra %[temp1], %[temp1], 6 \n\t"
- "add %[temp3], %[temp3], %[temp1] \n\t"
- "lbux %[temp1], %[temp3](%[cm]) \n\t"
- "sb %[temp1], 0(%[dest_pix]) \n\t"
- "addu %[dest_pix], %[dest_pix], %[dest_stride] \n\t"
-
- : [temp0] "=&r" (temp0), [temp1] "=&r" (temp1), [temp2] "=&r" (temp2),
- [temp3] "=&r" (temp3), [dest_pix] "+r" (dest_pix)
- : [cm] "r" (cm), [dest_stride] "r" (dest_stride),
- [step1_4] "r" (step1_4), [step1_5] "r" (step1_5),
- [step1_6] "r" (step1_6), [step1_7] "r" (step1_7),
- [step1_24] "r" (step1_24), [step1_25] "r" (step1_25),
- [step1_26] "r" (step1_26), [step1_27] "r" (step1_27)
- );
-
- step3_12 = ROUND_POWER_OF_TWO((step1_7 - step1_24), 6);
- step3_13 = ROUND_POWER_OF_TWO((step1_6 - step1_25), 6);
- step3_14 = ROUND_POWER_OF_TWO((step1_5 - step1_26), 6);
- step3_15 = ROUND_POWER_OF_TWO((step1_4 - step1_27), 6);
-
- __asm__ __volatile__ (
- "lbu %[temp2], 0(%[dest_pix1]) \n\t"
- "add %[temp2], %[temp2], %[step3_15] \n\t"
- "lbux %[temp0], %[temp2](%[cm]) \n\t"
- "sb %[temp0], 0(%[dest_pix1]) \n\t"
- "subu %[dest_pix1], %[dest_pix1], %[dest_stride] \n\t"
- "lbu %[temp3], 0(%[dest_pix1]) \n\t"
- "add %[temp3], %[temp3], %[step3_14] \n\t"
- "lbux %[temp1], %[temp3](%[cm]) \n\t"
- "sb %[temp1], 0(%[dest_pix1]) \n\t"
- "subu %[dest_pix1], %[dest_pix1], %[dest_stride] \n\t"
-
- "lbu %[temp2], 0(%[dest_pix1]) \n\t"
- "add %[temp2], %[temp2], %[step3_13] \n\t"
- "lbux %[temp0], %[temp2](%[cm]) \n\t"
- "sb %[temp0], 0(%[dest_pix1]) \n\t"
- "subu %[dest_pix1], %[dest_pix1], %[dest_stride] \n\t"
- "lbu %[temp3], 0(%[dest_pix1]) \n\t"
- "add %[temp3], %[temp3], %[step3_12] \n\t"
- "lbux %[temp1], %[temp3](%[cm]) \n\t"
- "sb %[temp1], 0(%[dest_pix1]) \n\t"
- "subu %[dest_pix1], %[dest_pix1], %[dest_stride] \n\t"
-
- : [temp0] "=&r" (temp0), [temp1] "=&r" (temp1), [temp2] "=&r" (temp2),
- [temp3] "=&r" (temp3), [dest_pix1] "+r" (dest_pix1)
- : [cm] "r" (cm), [dest_stride] "r" (dest_stride),
- [step3_12] "r" (step3_12), [step3_13] "r" (step3_13),
- [step3_14] "r" (step3_14), [step3_15] "r" (step3_15)
- );
-
- __asm__ __volatile__ (
- "lbu %[temp2], 0(%[dest_pix]) \n\t"
- "add %[temp0], %[step1_8], %[step1_23] \n\t"
- "addi %[temp0], %[temp0], 32 \n\t"
- "sra %[temp0], %[temp0], 6 \n\t"
- "add %[temp2], %[temp2], %[temp0] \n\t"
- "lbux %[temp0], %[temp2](%[cm]) \n\t"
- "add %[temp1], %[step1_9], %[step1_22] \n\t"
- "sb %[temp0], 0(%[dest_pix]) \n\t"
- "addu %[dest_pix], %[dest_pix], %[dest_stride] \n\t"
- "lbu %[temp3], 0(%[dest_pix]) \n\t"
- "addi %[temp1], %[temp1], 32 \n\t"
- "sra %[temp1], %[temp1], 6 \n\t"
- "add %[temp3], %[temp3], %[temp1] \n\t"
- "lbux %[temp1], %[temp3](%[cm]) \n\t"
- "sb %[temp1], 0(%[dest_pix]) \n\t"
- "addu %[dest_pix], %[dest_pix], %[dest_stride] \n\t"
-
- "lbu %[temp2], 0(%[dest_pix]) \n\t"
- "add %[temp0], %[step1_10], %[step1_21] \n\t"
- "addi %[temp0], %[temp0], 32 \n\t"
- "sra %[temp0], %[temp0], 6 \n\t"
- "add %[temp2], %[temp2], %[temp0] \n\t"
- "lbux %[temp0], %[temp2](%[cm]) \n\t"
- "add %[temp1], %[step1_11], %[step1_20] \n\t"
- "sb %[temp0], 0(%[dest_pix]) \n\t"
- "addu %[dest_pix], %[dest_pix], %[dest_stride] \n\t"
- "lbu %[temp3], 0(%[dest_pix]) \n\t"
- "addi %[temp1], %[temp1], 32 \n\t"
- "sra %[temp1], %[temp1], 6 \n\t"
- "add %[temp3], %[temp3], %[temp1] \n\t"
- "lbux %[temp1], %[temp3](%[cm]) \n\t"
- "sb %[temp1], 0(%[dest_pix]) \n\t"
- "addu %[dest_pix], %[dest_pix], %[dest_stride] \n\t"
-
- : [temp0] "=&r" (temp0), [temp1] "=&r" (temp1), [temp2] "=&r" (temp2),
- [temp3] "=&r" (temp3), [dest_pix] "+r" (dest_pix)
- : [cm] "r" (cm), [dest_stride] "r" (dest_stride),
- [step1_8] "r" (step1_8), [step1_9] "r" (step1_9),
- [step1_10] "r" (step1_10), [step1_11] "r" (step1_11),
- [step1_20] "r" (step1_20), [step1_21] "r" (step1_21),
- [step1_22] "r" (step1_22), [step1_23] "r" (step1_23)
- );
-
- step3_12 = ROUND_POWER_OF_TWO((step1_11 - step1_20), 6);
- step3_13 = ROUND_POWER_OF_TWO((step1_10 - step1_21), 6);
- step3_14 = ROUND_POWER_OF_TWO((step1_9 - step1_22), 6);
- step3_15 = ROUND_POWER_OF_TWO((step1_8 - step1_23), 6);
-
- __asm__ __volatile__ (
- "lbu %[temp2], 0(%[dest_pix1]) \n\t"
- "add %[temp2], %[temp2], %[step3_15] \n\t"
- "lbux %[temp0], %[temp2](%[cm]) \n\t"
- "sb %[temp0], 0(%[dest_pix1]) \n\t"
- "subu %[dest_pix1], %[dest_pix1], %[dest_stride] \n\t"
- "lbu %[temp3], 0(%[dest_pix1]) \n\t"
- "add %[temp3], %[temp3], %[step3_14] \n\t"
- "lbux %[temp1], %[temp3](%[cm]) \n\t"
- "sb %[temp1], 0(%[dest_pix1]) \n\t"
- "subu %[dest_pix1], %[dest_pix1], %[dest_stride] \n\t"
-
- "lbu %[temp2], 0(%[dest_pix1]) \n\t"
- "add %[temp2], %[temp2], %[step3_13] \n\t"
- "lbux %[temp0], %[temp2](%[cm]) \n\t"
- "sb %[temp0], 0(%[dest_pix1]) \n\t"
- "subu %[dest_pix1], %[dest_pix1], %[dest_stride] \n\t"
- "lbu %[temp3], 0(%[dest_pix1]) \n\t"
- "add %[temp3], %[temp3], %[step3_12] \n\t"
- "lbux %[temp1], %[temp3](%[cm]) \n\t"
- "sb %[temp1], 0(%[dest_pix1]) \n\t"
- "subu %[dest_pix1], %[dest_pix1], %[dest_stride] \n\t"
-
- : [temp0] "=&r" (temp0), [temp1] "=&r" (temp1), [temp2] "=&r" (temp2),
- [temp3] "=&r" (temp3), [dest_pix1] "+r" (dest_pix1)
- : [cm] "r" (cm), [dest_stride] "r" (dest_stride),
- [step3_12] "r" (step3_12), [step3_13] "r" (step3_13),
- [step3_14] "r" (step3_14), [step3_15] "r" (step3_15)
- );
-
- __asm__ __volatile__ (
- "lbu %[temp2], 0(%[dest_pix]) \n\t"
- "add %[temp0], %[step1_12], %[step2_19] \n\t"
- "addi %[temp0], %[temp0], 32 \n\t"
- "sra %[temp0], %[temp0], 6 \n\t"
- "add %[temp2], %[temp2], %[temp0] \n\t"
- "lbux %[temp0], %[temp2](%[cm]) \n\t"
- "add %[temp1], %[step1_13], %[step2_18] \n\t"
- "sb %[temp0], 0(%[dest_pix]) \n\t"
- "addu %[dest_pix], %[dest_pix], %[dest_stride] \n\t"
- "lbu %[temp3], 0(%[dest_pix]) \n\t"
- "addi %[temp1], %[temp1], 32 \n\t"
- "sra %[temp1], %[temp1], 6 \n\t"
- "add %[temp3], %[temp3], %[temp1] \n\t"
- "lbux %[temp1], %[temp3](%[cm]) \n\t"
- "sb %[temp1], 0(%[dest_pix]) \n\t"
- "addu %[dest_pix], %[dest_pix], %[dest_stride] \n\t"
-
- "lbu %[temp2], 0(%[dest_pix]) \n\t"
- "add %[temp0], %[step1_14], %[step2_17] \n\t"
- "addi %[temp0], %[temp0], 32 \n\t"
- "sra %[temp0], %[temp0], 6 \n\t"
- "add %[temp2], %[temp2], %[temp0] \n\t"
- "lbux %[temp0], %[temp2](%[cm]) \n\t"
- "add %[temp1], %[step1_15], %[step2_16] \n\t"
- "sb %[temp0], 0(%[dest_pix]) \n\t"
- "addu %[dest_pix], %[dest_pix], %[dest_stride] \n\t"
- "lbu %[temp3], 0(%[dest_pix]) \n\t"
- "addi %[temp1], %[temp1], 32 \n\t"
- "sra %[temp1], %[temp1], 6 \n\t"
- "add %[temp3], %[temp3], %[temp1] \n\t"
- "lbux %[temp1], %[temp3](%[cm]) \n\t"
- "sb %[temp1], 0(%[dest_pix]) \n\t"
-
- : [temp0] "=&r" (temp0), [temp1] "=&r" (temp1), [temp2] "=&r" (temp2),
- [temp3] "=&r" (temp3), [dest_pix] "+r" (dest_pix)
- : [cm] "r" (cm), [dest_stride] "r" (dest_stride),
- [step1_12] "r" (step1_12), [step1_13] "r" (step1_13),
- [step1_14] "r" (step1_14), [step1_15] "r" (step1_15),
- [step2_16] "r" (step2_16), [step2_17] "r" (step2_17),
- [step2_18] "r" (step2_18), [step2_19] "r" (step2_19)
- );
-
- step3_12 = ROUND_POWER_OF_TWO((step1_15 - step2_16), 6);
- step3_13 = ROUND_POWER_OF_TWO((step1_14 - step2_17), 6);
- step3_14 = ROUND_POWER_OF_TWO((step1_13 - step2_18), 6);
- step3_15 = ROUND_POWER_OF_TWO((step1_12 - step2_19), 6);
-
- __asm__ __volatile__ (
- "lbu %[temp2], 0(%[dest_pix1]) \n\t"
- "add %[temp2], %[temp2], %[step3_15] \n\t"
- "lbux %[temp0], %[temp2](%[cm]) \n\t"
- "sb %[temp0], 0(%[dest_pix1]) \n\t"
- "subu %[dest_pix1], %[dest_pix1], %[dest_stride] \n\t"
- "lbu %[temp3], 0(%[dest_pix1]) \n\t"
- "add %[temp3], %[temp3], %[step3_14] \n\t"
- "lbux %[temp1], %[temp3](%[cm]) \n\t"
- "sb %[temp1], 0(%[dest_pix1]) \n\t"
- "subu %[dest_pix1], %[dest_pix1], %[dest_stride] \n\t"
-
- "lbu %[temp2], 0(%[dest_pix1]) \n\t"
- "add %[temp2], %[temp2], %[step3_13] \n\t"
- "lbux %[temp0], %[temp2](%[cm]) \n\t"
- "sb %[temp0], 0(%[dest_pix1]) \n\t"
- "subu %[dest_pix1], %[dest_pix1], %[dest_stride] \n\t"
- "lbu %[temp3], 0(%[dest_pix1]) \n\t"
- "add %[temp3], %[temp3], %[step3_12] \n\t"
- "lbux %[temp1], %[temp3](%[cm]) \n\t"
- "sb %[temp1], 0(%[dest_pix1]) \n\t"
-
- : [temp0] "=&r" (temp0), [temp1] "=&r" (temp1), [temp2] "=&r" (temp2),
- [temp3] "=&r" (temp3), [dest_pix1] "+r" (dest_pix1)
- : [cm] "r" (cm), [dest_stride] "r" (dest_stride),
- [step3_12] "r" (step3_12), [step3_13] "r" (step3_13),
- [step3_14] "r" (step3_14), [step3_15] "r" (step3_15)
- );
-
- input += 32;
- }
-}
-#endif // #if HAVE_DSPR2
diff --git a/vp9/common/mips/dspr2/vp9_itrans32_dspr2.c b/vp9/common/mips/dspr2/vp9_itrans32_dspr2.c
deleted file mode 100644
index b4b0d248c..000000000
--- a/vp9/common/mips/dspr2/vp9_itrans32_dspr2.c
+++ /dev/null
@@ -1,1076 +0,0 @@
-/*
- * Copyright (c) 2013 The WebM project authors. All Rights Reserved.
- *
- * Use of this source code is governed by a BSD-style license
- * that can be found in the LICENSE file in the root of the source
- * tree. An additional intellectual property rights grant can be found
- * in the file PATENTS. All contributing project authors may
- * be found in the AUTHORS file in the root of the source tree.
- */
-
-#include <assert.h>
-#include <stdio.h>
-
-#include "./vpx_config.h"
-#include "./vp9_rtcd.h"
-#include "vp9/common/vp9_common.h"
-#include "vp9/common/vp9_blockd.h"
-#include "vp9/common/mips/dspr2/vp9_common_dspr2.h"
-#include "vpx_dsp/txfm_common.h"
-
-#if HAVE_DSPR2
-static void idct32_rows_dspr2(const int16_t *input, int16_t *output,
- uint32_t no_rows) {
- int16_t step1_0, step1_1, step1_2, step1_3, step1_4, step1_5, step1_6;
- int16_t step1_7, step1_8, step1_9, step1_10, step1_11, step1_12, step1_13;
- int16_t step1_14, step1_15, step1_16, step1_17, step1_18, step1_19, step1_20;
- int16_t step1_21, step1_22, step1_23, step1_24, step1_25, step1_26, step1_27;
- int16_t step1_28, step1_29, step1_30, step1_31;
- int16_t step2_0, step2_1, step2_2, step2_3, step2_4, step2_5, step2_6;
- int16_t step2_7, step2_8, step2_9, step2_10, step2_11, step2_12, step2_13;
- int16_t step2_14, step2_15, step2_16, step2_17, step2_18, step2_19, step2_20;
- int16_t step2_21, step2_22, step2_23, step2_24, step2_25, step2_26, step2_27;
- int16_t step2_28, step2_29, step2_30, step2_31;
- int16_t step3_8, step3_9, step3_10, step3_11, step3_12, step3_13, step3_14;
- int16_t step3_15, step3_16, step3_17, step3_18, step3_19, step3_20, step3_21;
- int16_t step3_22, step3_23, step3_24, step3_25, step3_26, step3_27, step3_28;
- int16_t step3_29, step3_30, step3_31;
- int temp0, temp1, temp2, temp3;
- int load1, load2, load3, load4;
- int result1, result2;
- int temp21;
- int i;
- const int const_2_power_13 = 8192;
- const int32_t *input_int;
-
- for (i = no_rows; i--; ) {
- input_int = (const int32_t *)input;
-
- if (!(input_int[0] | input_int[1] | input_int[2] | input_int[3] |
- input_int[4] | input_int[5] | input_int[6] | input_int[7] |
- input_int[8] | input_int[9] | input_int[10] | input_int[11] |
- input_int[12] | input_int[13] | input_int[14] | input_int[15])) {
- input += 32;
-
- __asm__ __volatile__ (
- "sh $zero, 0(%[output]) \n\t"
- "sh $zero, 64(%[output]) \n\t"
- "sh $zero, 128(%[output]) \n\t"
- "sh $zero, 192(%[output]) \n\t"
- "sh $zero, 256(%[output]) \n\t"
- "sh $zero, 320(%[output]) \n\t"
- "sh $zero, 384(%[output]) \n\t"
- "sh $zero, 448(%[output]) \n\t"
- "sh $zero, 512(%[output]) \n\t"
- "sh $zero, 576(%[output]) \n\t"
- "sh $zero, 640(%[output]) \n\t"
- "sh $zero, 704(%[output]) \n\t"
- "sh $zero, 768(%[output]) \n\t"
- "sh $zero, 832(%[output]) \n\t"
- "sh $zero, 896(%[output]) \n\t"
- "sh $zero, 960(%[output]) \n\t"
- "sh $zero, 1024(%[output]) \n\t"
- "sh $zero, 1088(%[output]) \n\t"
- "sh $zero, 1152(%[output]) \n\t"
- "sh $zero, 1216(%[output]) \n\t"
- "sh $zero, 1280(%[output]) \n\t"
- "sh $zero, 1344(%[output]) \n\t"
- "sh $zero, 1408(%[output]) \n\t"
- "sh $zero, 1472(%[output]) \n\t"
- "sh $zero, 1536(%[output]) \n\t"
- "sh $zero, 1600(%[output]) \n\t"
- "sh $zero, 1664(%[output]) \n\t"
- "sh $zero, 1728(%[output]) \n\t"
- "sh $zero, 1792(%[output]) \n\t"
- "sh $zero, 1856(%[output]) \n\t"
- "sh $zero, 1920(%[output]) \n\t"
- "sh $zero, 1984(%[output]) \n\t"
-
- :
- : [output] "r" (output)
- );
-
- output += 1;
-
- continue;
- }
-
- /* prefetch row */
- prefetch_load((const uint8_t *)(input + 32));
- prefetch_load((const uint8_t *)(input + 48));
-
- __asm__ __volatile__ (
- "lh %[load1], 2(%[input]) \n\t"
- "lh %[load2], 62(%[input]) \n\t"
- "lh %[load3], 34(%[input]) \n\t"
- "lh %[load4], 30(%[input]) \n\t"
-
- "mtlo %[const_2_power_13], $ac1 \n\t"
- "mthi $zero, $ac1 \n\t"
- "mtlo %[const_2_power_13], $ac3 \n\t"
- "mthi $zero, $ac3 \n\t"
-
- "madd $ac1, %[load1], %[cospi_31_64] \n\t"
- "msub $ac1, %[load2], %[cospi_1_64] \n\t"
- "extp %[temp0], $ac1, 31 \n\t"
-
- "madd $ac3, %[load1], %[cospi_1_64] \n\t"
- "madd $ac3, %[load2], %[cospi_31_64] \n\t"
- "extp %[temp3], $ac3, 31 \n\t"
-
- "mtlo %[const_2_power_13], $ac1 \n\t"
- "mthi $zero, $ac1 \n\t"
- "mtlo %[const_2_power_13], $ac2 \n\t"
- "mthi $zero, $ac2 \n\t"
-
- "madd $ac2, %[load3], %[cospi_15_64] \n\t"
- "msub $ac2, %[load4], %[cospi_17_64] \n\t"
- "extp %[temp1], $ac2, 31 \n\t"
-
- "madd $ac1, %[load3], %[cospi_17_64] \n\t"
- "madd $ac1, %[load4], %[cospi_15_64] \n\t"
- "extp %[temp2], $ac1, 31 \n\t"
-
- "mtlo %[const_2_power_13], $ac1 \n\t"
- "mthi $zero, $ac1 \n\t"
- "mtlo %[const_2_power_13], $ac3 \n\t"
- "mthi $zero, $ac3 \n\t"
-
- "sub %[load1], %[temp3], %[temp2] \n\t"
- "sub %[load2], %[temp0], %[temp1] \n\t"
-
- "madd $ac1, %[load1], %[cospi_28_64] \n\t"
- "msub $ac1, %[load2], %[cospi_4_64] \n\t"
- "madd $ac3, %[load1], %[cospi_4_64] \n\t"
- "madd $ac3, %[load2], %[cospi_28_64] \n\t"
-
- "extp %[step1_17], $ac1, 31 \n\t"
- "extp %[step1_30], $ac3, 31 \n\t"
- "add %[step1_16], %[temp0], %[temp1] \n\t"
- "add %[step1_31], %[temp2], %[temp3] \n\t"
-
- : [load1] "=&r" (load1), [load2] "=&r" (load2),
- [load3] "=&r" (load3), [load4] "=&r" (load4),
- [temp0] "=&r" (temp0), [temp1] "=&r" (temp1),
- [temp2] "=&r" (temp2), [temp3] "=&r" (temp3),
- [step1_16] "=r" (step1_16), [step1_17] "=r" (step1_17),
- [step1_30] "=r" (step1_30), [step1_31] "=r" (step1_31)
- : [const_2_power_13] "r" (const_2_power_13), [input] "r" (input),
- [cospi_31_64] "r" (cospi_31_64), [cospi_1_64] "r" (cospi_1_64),
- [cospi_4_64] "r" (cospi_4_64), [cospi_17_64] "r" (cospi_17_64),
- [cospi_15_64] "r" (cospi_15_64), [cospi_28_64] "r" (cospi_28_64)
- );
-
- __asm__ __volatile__ (
- "lh %[load1], 18(%[input]) \n\t"
- "lh %[load2], 46(%[input]) \n\t"
- "lh %[load3], 50(%[input]) \n\t"
- "lh %[load4], 14(%[input]) \n\t"
-
- "mtlo %[const_2_power_13], $ac1 \n\t"
- "mthi $zero, $ac1 \n\t"
- "mtlo %[const_2_power_13], $ac3 \n\t"
- "mthi $zero, $ac3 \n\t"
-
- "madd $ac1, %[load1], %[cospi_23_64] \n\t"
- "msub $ac1, %[load2], %[cospi_9_64] \n\t"
- "extp %[temp0], $ac1, 31 \n\t"
-
- "madd $ac3, %[load1], %[cospi_9_64] \n\t"
- "madd $ac3, %[load2], %[cospi_23_64] \n\t"
- "extp %[temp3], $ac3, 31 \n\t"
-
- "mtlo %[const_2_power_13], $ac1 \n\t"
- "mthi $zero, $ac1 \n\t"
- "mtlo %[const_2_power_13], $ac2 \n\t"
- "mthi $zero, $ac2 \n\t"
-
- "madd $ac2, %[load3], %[cospi_7_64] \n\t"
- "msub $ac2, %[load4], %[cospi_25_64] \n\t"
- "extp %[temp1], $ac2, 31 \n\t"
-
- "madd $ac1, %[load3], %[cospi_25_64] \n\t"
- "madd $ac1, %[load4], %[cospi_7_64] \n\t"
- "extp %[temp2], $ac1, 31 \n\t"
-
- "mtlo %[const_2_power_13], $ac1 \n\t"
- "mthi $zero, $ac1 \n\t"
- "mtlo %[const_2_power_13], $ac3 \n\t"
- "mthi $zero, $ac3 \n\t"
-
- "sub %[load1], %[temp1], %[temp0] \n\t"
- "sub %[load2], %[temp2], %[temp3] \n\t"
-
- "msub $ac1, %[load1], %[cospi_28_64] \n\t"
- "msub $ac1, %[load2], %[cospi_4_64] \n\t"
- "msub $ac3, %[load1], %[cospi_4_64] \n\t"
- "madd $ac3, %[load2], %[cospi_28_64] \n\t"
-
- "extp %[step1_18], $ac1, 31 \n\t"
- "extp %[step1_29], $ac3, 31 \n\t"
- "add %[step1_19], %[temp0], %[temp1] \n\t"
- "add %[step1_28], %[temp2], %[temp3] \n\t"
-
- : [load1] "=&r" (load1), [load2] "=&r" (load2),
- [load3] "=&r" (load3), [load4] "=&r" (load4),
- [temp0] "=&r" (temp0), [temp1] "=&r" (temp1),
- [temp2] "=&r" (temp2), [temp3] "=&r" (temp3),
- [step1_18] "=r" (step1_18), [step1_19] "=r" (step1_19),
- [step1_28] "=r" (step1_28), [step1_29] "=r" (step1_29)
- : [const_2_power_13] "r" (const_2_power_13), [input] "r" (input),
- [cospi_23_64] "r" (cospi_23_64), [cospi_9_64] "r" (cospi_9_64),
- [cospi_4_64] "r" (cospi_4_64), [cospi_7_64] "r" (cospi_7_64),
- [cospi_25_64] "r" (cospi_25_64), [cospi_28_64] "r" (cospi_28_64)
- );
-
- __asm__ __volatile__ (
- "lh %[load1], 10(%[input]) \n\t"
- "lh %[load2], 54(%[input]) \n\t"
- "lh %[load3], 42(%[input]) \n\t"
- "lh %[load4], 22(%[input]) \n\t"
-
- "mtlo %[const_2_power_13], $ac1 \n\t"
- "mthi $zero, $ac1 \n\t"
- "mtlo %[const_2_power_13], $ac3 \n\t"
- "mthi $zero, $ac3 \n\t"
-
- "madd $ac1, %[load1], %[cospi_27_64] \n\t"
- "msub $ac1, %[load2], %[cospi_5_64] \n\t"
- "extp %[temp0], $ac1, 31 \n\t"
-
- "madd $ac3, %[load1], %[cospi_5_64] \n\t"
- "madd $ac3, %[load2], %[cospi_27_64] \n\t"
- "extp %[temp3], $ac3, 31 \n\t"
-
- "mtlo %[const_2_power_13], $ac1 \n\t"
- "mthi $zero, $ac1 \n\t"
- "mtlo %[const_2_power_13], $ac2 \n\t"
- "mthi $zero, $ac2 \n\t"
-
- "madd $ac2, %[load3], %[cospi_11_64] \n\t"
- "msub $ac2, %[load4], %[cospi_21_64] \n\t"
- "extp %[temp1], $ac2, 31 \n\t"
-
- "madd $ac1, %[load3], %[cospi_21_64] \n\t"
- "madd $ac1, %[load4], %[cospi_11_64] \n\t"
- "extp %[temp2], $ac1, 31 \n\t"
-
- "mtlo %[const_2_power_13], $ac1 \n\t"
- "mthi $zero, $ac1 \n\t"
- "mtlo %[const_2_power_13], $ac3 \n\t"
- "mthi $zero, $ac3 \n\t"
-
- "sub %[load1], %[temp0], %[temp1] \n\t"
- "sub %[load2], %[temp3], %[temp2] \n\t"
-
- "madd $ac1, %[load2], %[cospi_12_64] \n\t"
- "msub $ac1, %[load1], %[cospi_20_64] \n\t"
- "madd $ac3, %[load1], %[cospi_12_64] \n\t"
- "madd $ac3, %[load2], %[cospi_20_64] \n\t"
-
- "extp %[step1_21], $ac1, 31 \n\t"
- "extp %[step1_26], $ac3, 31 \n\t"
- "add %[step1_20], %[temp0], %[temp1] \n\t"
- "add %[step1_27], %[temp2], %[temp3] \n\t"
-
- : [load1] "=&r" (load1), [load2] "=&r" (load2),
- [load3] "=&r" (load3), [load4] "=&r" (load4),
- [temp0] "=&r" (temp0), [temp1] "=&r" (temp1),
- [temp2] "=&r" (temp2), [temp3] "=&r" (temp3),
- [step1_20] "=r" (step1_20), [step1_21] "=r" (step1_21),
- [step1_26] "=r" (step1_26), [step1_27] "=r" (step1_27)
- : [const_2_power_13] "r" (const_2_power_13), [input] "r" (input),
- [cospi_27_64] "r" (cospi_27_64), [cospi_5_64] "r" (cospi_5_64),
- [cospi_11_64] "r" (cospi_11_64), [cospi_21_64] "r" (cospi_21_64),
- [cospi_12_64] "r" (cospi_12_64), [cospi_20_64] "r" (cospi_20_64)
- );
-
- __asm__ __volatile__ (
- "lh %[load1], 26(%[input]) \n\t"
- "lh %[load2], 38(%[input]) \n\t"
- "lh %[load3], 58(%[input]) \n\t"
- "lh %[load4], 6(%[input]) \n\t"
-
- "mtlo %[const_2_power_13], $ac1 \n\t"
- "mthi $zero, $ac1 \n\t"
- "mtlo %[const_2_power_13], $ac3 \n\t"
- "mthi $zero, $ac3 \n\t"
-
- "madd $ac1, %[load1], %[cospi_19_64] \n\t"
- "msub $ac1, %[load2], %[cospi_13_64] \n\t"
- "extp %[temp0], $ac1, 31 \n\t"
-
- "madd $ac3, %[load1], %[cospi_13_64] \n\t"
- "madd $ac3, %[load2], %[cospi_19_64] \n\t"
- "extp %[temp3], $ac3, 31 \n\t"
-
- "mtlo %[const_2_power_13], $ac1 \n\t"
- "mthi $zero, $ac1 \n\t"
- "mtlo %[const_2_power_13], $ac2 \n\t"
- "mthi $zero, $ac2 \n\t"
-
- "madd $ac2, %[load3], %[cospi_3_64] \n\t"
- "msub $ac2, %[load4], %[cospi_29_64] \n\t"
- "extp %[temp1], $ac2, 31 \n\t"
-
- "madd $ac1, %[load3], %[cospi_29_64] \n\t"
- "madd $ac1, %[load4], %[cospi_3_64] \n\t"
- "extp %[temp2], $ac1, 31 \n\t"
-
- "mtlo %[const_2_power_13], $ac1 \n\t"
- "mthi $zero, $ac1 \n\t"
- "mtlo %[const_2_power_13], $ac3 \n\t"
- "mthi $zero, $ac3 \n\t"
-
- "sub %[load1], %[temp1], %[temp0] \n\t"
- "sub %[load2], %[temp2], %[temp3] \n\t"
-
- "msub $ac1, %[load1], %[cospi_12_64] \n\t"
- "msub $ac1, %[load2], %[cospi_20_64] \n\t"
- "msub $ac3, %[load1], %[cospi_20_64] \n\t"
- "madd $ac3, %[load2], %[cospi_12_64] \n\t"
-
- "extp %[step1_22], $ac1, 31 \n\t"
- "extp %[step1_25], $ac3, 31 \n\t"
- "add %[step1_23], %[temp0], %[temp1] \n\t"
- "add %[step1_24], %[temp2], %[temp3] \n\t"
-
- : [load1] "=&r" (load1), [load2] "=&r" (load2),
- [load3] "=&r" (load3), [load4] "=&r" (load4),
- [temp0] "=&r" (temp0), [temp1] "=&r" (temp1),
- [temp2] "=&r" (temp2), [temp3] "=&r" (temp3),
- [step1_22] "=r" (step1_22), [step1_23] "=r" (step1_23),
- [step1_24] "=r" (step1_24), [step1_25] "=r" (step1_25)
- : [const_2_power_13] "r" (const_2_power_13), [input] "r" (input),
- [cospi_19_64] "r" (cospi_19_64), [cospi_13_64] "r" (cospi_13_64),
- [cospi_3_64] "r" (cospi_3_64), [cospi_29_64] "r" (cospi_29_64),
- [cospi_12_64] "r" (cospi_12_64), [cospi_20_64] "r" (cospi_20_64)
- );
-
- __asm__ __volatile__ (
- "lh %[load1], 4(%[input]) \n\t"
- "lh %[load2], 60(%[input]) \n\t"
- "lh %[load3], 36(%[input]) \n\t"
- "lh %[load4], 28(%[input]) \n\t"
-
- "mtlo %[const_2_power_13], $ac1 \n\t"
- "mthi $zero, $ac1 \n\t"
- "mtlo %[const_2_power_13], $ac3 \n\t"
- "mthi $zero, $ac3 \n\t"
-
- "madd $ac1, %[load1], %[cospi_30_64] \n\t"
- "msub $ac1, %[load2], %[cospi_2_64] \n\t"
- "extp %[temp0], $ac1, 31 \n\t"
-
- "madd $ac3, %[load1], %[cospi_2_64] \n\t"
- "madd $ac3, %[load2], %[cospi_30_64] \n\t"
- "extp %[temp3], $ac3, 31 \n\t"
-
- "mtlo %[const_2_power_13], $ac1 \n\t"
- "mthi $zero, $ac1 \n\t"
- "mtlo %[const_2_power_13], $ac2 \n\t"
- "mthi $zero, $ac2 \n\t"
-
- "madd $ac2, %[load3], %[cospi_14_64] \n\t"
- "msub $ac2, %[load4], %[cospi_18_64] \n\t"
- "extp %[temp1], $ac2, 31 \n\t"
-
- "madd $ac1, %[load3], %[cospi_18_64] \n\t"
- "madd $ac1, %[load4], %[cospi_14_64] \n\t"
- "extp %[temp2], $ac1, 31 \n\t"
-
- "mtlo %[const_2_power_13], $ac1 \n\t"
- "mthi $zero, $ac1 \n\t"
- "mtlo %[const_2_power_13], $ac3 \n\t"
- "mthi $zero, $ac3 \n\t"
-
- "sub %[load1], %[temp0], %[temp1] \n\t"
- "sub %[load2], %[temp3], %[temp2] \n\t"
-
- "msub $ac1, %[load1], %[cospi_8_64] \n\t"
- "madd $ac1, %[load2], %[cospi_24_64] \n\t"
- "madd $ac3, %[load1], %[cospi_24_64] \n\t"
- "madd $ac3, %[load2], %[cospi_8_64] \n\t"
-
- "extp %[step2_9], $ac1, 31 \n\t"
- "extp %[step2_14], $ac3, 31 \n\t"
- "add %[step2_8], %[temp0], %[temp1] \n\t"
- "add %[step2_15], %[temp2], %[temp3] \n\t"
-
- : [load1] "=&r" (load1), [load2] "=&r" (load2),
- [load3] "=&r" (load3), [load4] "=&r" (load4),
- [temp0] "=&r" (temp0), [temp1] "=&r" (temp1),
- [temp2] "=&r" (temp2), [temp3] "=&r" (temp3),
- [step2_8] "=r" (step2_8), [step2_9] "=r" (step2_9),
- [step2_14] "=r" (step2_14), [step2_15] "=r" (step2_15)
- : [const_2_power_13] "r" (const_2_power_13), [input] "r" (input),
- [cospi_30_64] "r" (cospi_30_64), [cospi_2_64] "r" (cospi_2_64),
- [cospi_14_64] "r" (cospi_14_64), [cospi_18_64] "r" (cospi_18_64),
- [cospi_8_64] "r" (cospi_8_64), [cospi_24_64] "r" (cospi_24_64)
- );
-
- __asm__ __volatile__ (
- "lh %[load1], 20(%[input]) \n\t"
- "lh %[load2], 44(%[input]) \n\t"
- "lh %[load3], 52(%[input]) \n\t"
- "lh %[load4], 12(%[input]) \n\t"
-
- "mtlo %[const_2_power_13], $ac1 \n\t"
- "mthi $zero, $ac1 \n\t"
- "mtlo %[const_2_power_13], $ac3 \n\t"
- "mthi $zero, $ac3 \n\t"
-
- "madd $ac1, %[load1], %[cospi_22_64] \n\t"
- "msub $ac1, %[load2], %[cospi_10_64] \n\t"
- "extp %[temp0], $ac1, 31 \n\t"
-
- "madd $ac3, %[load1], %[cospi_10_64] \n\t"
- "madd $ac3, %[load2], %[cospi_22_64] \n\t"
- "extp %[temp3], $ac3, 31 \n\t"
-
- "mtlo %[const_2_power_13], $ac1 \n\t"
- "mthi $zero, $ac1 \n\t"
- "mtlo %[const_2_power_13], $ac2 \n\t"
- "mthi $zero, $ac2 \n\t"
-
- "madd $ac2, %[load3], %[cospi_6_64] \n\t"
- "msub $ac2, %[load4], %[cospi_26_64] \n\t"
- "extp %[temp1], $ac2, 31 \n\t"
-
- "madd $ac1, %[load3], %[cospi_26_64] \n\t"
- "madd $ac1, %[load4], %[cospi_6_64] \n\t"
- "extp %[temp2], $ac1, 31 \n\t"
-
- "mtlo %[const_2_power_13], $ac1 \n\t"
- "mthi $zero, $ac1 \n\t"
- "mtlo %[const_2_power_13], $ac3 \n\t"
- "mthi $zero, $ac3 \n\t"
-
- "sub %[load1], %[temp1], %[temp0] \n\t"
- "sub %[load2], %[temp2], %[temp3] \n\t"
-
- "msub $ac1, %[load1], %[cospi_24_64] \n\t"
- "msub $ac1, %[load2], %[cospi_8_64] \n\t"
- "madd $ac3, %[load2], %[cospi_24_64] \n\t"
- "msub $ac3, %[load1], %[cospi_8_64] \n\t"
-
- "extp %[step2_10], $ac1, 31 \n\t"
- "extp %[step2_13], $ac3, 31 \n\t"
- "add %[step2_11], %[temp0], %[temp1] \n\t"
- "add %[step2_12], %[temp2], %[temp3] \n\t"
-
- : [load1] "=&r" (load1), [load2] "=&r" (load2),
- [load3] "=&r" (load3), [load4] "=&r" (load4),
- [temp0] "=&r" (temp0), [temp1] "=&r" (temp1),
- [temp2] "=&r" (temp2), [temp3] "=&r" (temp3),
- [step2_10] "=r" (step2_10), [step2_11] "=r" (step2_11),
- [step2_12] "=r" (step2_12), [step2_13] "=r" (step2_13)
- : [const_2_power_13] "r" (const_2_power_13), [input] "r" (input),
- [cospi_22_64] "r" (cospi_22_64), [cospi_10_64] "r" (cospi_10_64),
- [cospi_6_64] "r" (cospi_6_64), [cospi_26_64] "r" (cospi_26_64),
- [cospi_8_64] "r" (cospi_8_64), [cospi_24_64] "r" (cospi_24_64)
- );
-
- __asm__ __volatile__ (
- "mtlo %[const_2_power_13], $ac0 \n\t"
- "mthi $zero, $ac0 \n\t"
- "sub %[temp0], %[step2_14], %[step2_13] \n\t"
- "sub %[temp0], %[temp0], %[step2_9] \n\t"
- "add %[temp0], %[temp0], %[step2_10] \n\t"
- "madd $ac0, %[temp0], %[cospi_16_64] \n\t"
-
- "mtlo %[const_2_power_13], $ac1 \n\t"
- "mthi $zero, $ac1 \n\t"
- "sub %[temp1], %[step2_14], %[step2_13] \n\t"
- "add %[temp1], %[temp1], %[step2_9] \n\t"
- "sub %[temp1], %[temp1], %[step2_10] \n\t"
- "madd $ac1, %[temp1], %[cospi_16_64] \n\t"
-
- "mtlo %[const_2_power_13], $ac2 \n\t"
- "mthi $zero, $ac2 \n\t"
- "sub %[temp0], %[step2_15], %[step2_12] \n\t"
- "sub %[temp0], %[temp0], %[step2_8] \n\t"
- "add %[temp0], %[temp0], %[step2_11] \n\t"
- "madd $ac2, %[temp0], %[cospi_16_64] \n\t"
-
- "mtlo %[const_2_power_13], $ac3 \n\t"
- "mthi $zero, $ac3 \n\t"
- "sub %[temp1], %[step2_15], %[step2_12] \n\t"
- "add %[temp1], %[temp1], %[step2_8] \n\t"
- "sub %[temp1], %[temp1], %[step2_11] \n\t"
- "madd $ac3, %[temp1], %[cospi_16_64] \n\t"
-
- "add %[step3_8], %[step2_8], %[step2_11] \n\t"
- "add %[step3_9], %[step2_9], %[step2_10] \n\t"
- "add %[step3_14], %[step2_13], %[step2_14] \n\t"
- "add %[step3_15], %[step2_12], %[step2_15] \n\t"
-
- "extp %[step3_10], $ac0, 31 \n\t"
- "extp %[step3_13], $ac1, 31 \n\t"
- "extp %[step3_11], $ac2, 31 \n\t"
- "extp %[step3_12], $ac3, 31 \n\t"
-
- : [temp0] "=&r" (temp0), [temp1] "=&r" (temp1),
- [step3_8] "=r" (step3_8), [step3_9] "=r" (step3_9),
- [step3_10] "=r" (step3_10), [step3_11] "=r" (step3_11),
- [step3_12] "=r" (step3_12), [step3_13] "=r" (step3_13),
- [step3_14] "=r" (step3_14), [step3_15] "=r" (step3_15)
- : [const_2_power_13] "r" (const_2_power_13),
- [step2_8] "r" (step2_8), [step2_9] "r" (step2_9),
- [step2_10] "r" (step2_10), [step2_11] "r" (step2_11),
- [step2_12] "r" (step2_12), [step2_13] "r" (step2_13),
- [step2_14] "r" (step2_14), [step2_15] "r" (step2_15),
- [cospi_16_64] "r" (cospi_16_64)
- );
-
- step2_18 = step1_17 - step1_18;
- step2_29 = step1_30 - step1_29;
-
- __asm__ __volatile__ (
- "mtlo %[const_2_power_13], $ac0 \n\t"
- "mthi $zero, $ac0 \n\t"
- "msub $ac0, %[step2_18], %[cospi_8_64] \n\t"
- "madd $ac0, %[step2_29], %[cospi_24_64] \n\t"
- "extp %[step3_18], $ac0, 31 \n\t"
-
- : [step3_18] "=r" (step3_18)
- : [const_2_power_13] "r" (const_2_power_13),
- [step2_18] "r" (step2_18), [step2_29] "r" (step2_29),
- [cospi_24_64] "r" (cospi_24_64), [cospi_8_64] "r" (cospi_8_64)
- );
-
- temp21 = step2_18 * cospi_24_64 + step2_29 * cospi_8_64;
- step3_29 = (temp21 + DCT_CONST_ROUNDING) >> DCT_CONST_BITS;
-
- step2_19 = step1_16 - step1_19;
- step2_28 = step1_31 - step1_28;
-
- __asm__ __volatile__ (
- "mtlo %[const_2_power_13], $ac0 \n\t"
- "mthi $zero, $ac0 \n\t"
- "msub $ac0, %[step2_19], %[cospi_8_64] \n\t"
- "madd $ac0, %[step2_28], %[cospi_24_64] \n\t"
- "extp %[step3_19], $ac0, 31 \n\t"
-
- : [step3_19] "=r" (step3_19)
- : [const_2_power_13] "r" (const_2_power_13),
- [step2_19] "r" (step2_19), [step2_28] "r" (step2_28),
- [cospi_24_64] "r" (cospi_24_64), [cospi_8_64] "r" (cospi_8_64)
- );
-
- temp21 = step2_19 * cospi_24_64 + step2_28 * cospi_8_64;
- step3_28 = (temp21 + DCT_CONST_ROUNDING) >> DCT_CONST_BITS;
-
- step3_16 = step1_16 + step1_19;
- step3_17 = step1_17 + step1_18;
- step3_30 = step1_29 + step1_30;
- step3_31 = step1_28 + step1_31;
-
- step2_20 = step1_23 - step1_20;
- step2_27 = step1_24 - step1_27;
-
- __asm__ __volatile__ (
- "mtlo %[const_2_power_13], $ac0 \n\t"
- "mthi $zero, $ac0 \n\t"
- "msub $ac0, %[step2_20], %[cospi_24_64] \n\t"
- "msub $ac0, %[step2_27], %[cospi_8_64] \n\t"
- "extp %[step3_20], $ac0, 31 \n\t"
-
- : [step3_20] "=r" (step3_20)
- : [const_2_power_13] "r" (const_2_power_13),
- [step2_20] "r" (step2_20), [step2_27] "r" (step2_27),
- [cospi_24_64] "r" (cospi_24_64), [cospi_8_64] "r" (cospi_8_64)
- );
-
- temp21 = -step2_20 * cospi_8_64 + step2_27 * cospi_24_64;
- step3_27 = (temp21 + DCT_CONST_ROUNDING) >> DCT_CONST_BITS;
-
- step2_21 = step1_22 - step1_21;
- step2_26 = step1_25 - step1_26;
-
- __asm__ __volatile__ (
- "mtlo %[const_2_power_13], $ac1 \n\t"
- "mthi $zero, $ac1 \n\t"
- "msub $ac1, %[step2_21], %[cospi_24_64] \n\t"
- "msub $ac1, %[step2_26], %[cospi_8_64] \n\t"
- "extp %[step3_21], $ac1, 31 \n\t"
-
- : [step3_21] "=r" (step3_21)
- : [const_2_power_13] "r" (const_2_power_13),
- [step2_21] "r" (step2_21), [step2_26] "r" (step2_26),
- [cospi_24_64] "r" (cospi_24_64), [cospi_8_64] "r" (cospi_8_64)
- );
-
- temp21 = -step2_21 * cospi_8_64 + step2_26 * cospi_24_64;
- step3_26 = (temp21 + DCT_CONST_ROUNDING) >> DCT_CONST_BITS;
-
- step3_22 = step1_21 + step1_22;
- step3_23 = step1_20 + step1_23;
- step3_24 = step1_24 + step1_27;
- step3_25 = step1_25 + step1_26;
-
- step2_16 = step3_16 + step3_23;
- step2_17 = step3_17 + step3_22;
- step2_18 = step3_18 + step3_21;
- step2_19 = step3_19 + step3_20;
- step2_20 = step3_19 - step3_20;
- step2_21 = step3_18 - step3_21;
- step2_22 = step3_17 - step3_22;
- step2_23 = step3_16 - step3_23;
-
- step2_24 = step3_31 - step3_24;
- step2_25 = step3_30 - step3_25;
- step2_26 = step3_29 - step3_26;
- step2_27 = step3_28 - step3_27;
- step2_28 = step3_28 + step3_27;
- step2_29 = step3_29 + step3_26;
- step2_30 = step3_30 + step3_25;
- step2_31 = step3_31 + step3_24;
-
- __asm__ __volatile__ (
- "lh %[load1], 0(%[input]) \n\t"
- "lh %[load2], 32(%[input]) \n\t"
- "lh %[load3], 16(%[input]) \n\t"
- "lh %[load4], 48(%[input]) \n\t"
-
- "mtlo %[const_2_power_13], $ac1 \n\t"
- "mthi $zero, $ac1 \n\t"
- "mtlo %[const_2_power_13], $ac2 \n\t"
- "mthi $zero, $ac2 \n\t"
- "add %[result1], %[load1], %[load2] \n\t"
- "sub %[result2], %[load1], %[load2] \n\t"
- "madd $ac1, %[result1], %[cospi_16_64] \n\t"
- "madd $ac2, %[result2], %[cospi_16_64] \n\t"
- "extp %[temp0], $ac1, 31 \n\t"
- "extp %[temp1], $ac2, 31 \n\t"
-
- "mtlo %[const_2_power_13], $ac3 \n\t"
- "mthi $zero, $ac3 \n\t"
- "madd $ac3, %[load3], %[cospi_24_64] \n\t"
- "msub $ac3, %[load4], %[cospi_8_64] \n\t"
- "extp %[temp2], $ac3, 31 \n\t"
-
- "mtlo %[const_2_power_13], $ac1 \n\t"
- "mthi $zero, $ac1 \n\t"
- "madd $ac1, %[load3], %[cospi_8_64] \n\t"
- "madd $ac1, %[load4], %[cospi_24_64] \n\t"
- "extp %[temp3], $ac1, 31 \n\t"
-
- "add %[step1_0], %[temp0], %[temp3] \n\t"
- "add %[step1_1], %[temp1], %[temp2] \n\t"
- "sub %[step1_2], %[temp1], %[temp2] \n\t"
- "sub %[step1_3], %[temp0], %[temp3] \n\t"
-
- : [load1] "=&r" (load1), [load2] "=&r" (load2),
- [load3] "=&r" (load3), [load4] "=&r" (load4),
- [result1] "=&r" (result1), [result2] "=&r" (result2),
- [temp0] "=&r" (temp0), [temp1] "=&r" (temp1),
- [temp2] "=&r" (temp2), [temp3] "=&r" (temp3),
- [step1_0] "=r" (step1_0), [step1_1] "=r" (step1_1),
- [step1_2] "=r" (step1_2), [step1_3] "=r" (step1_3)
- : [const_2_power_13] "r" (const_2_power_13), [input] "r" (input),
- [cospi_16_64] "r" (cospi_16_64),
- [cospi_24_64] "r" (cospi_24_64), [cospi_8_64] "r" (cospi_8_64)
-
- );
-
- __asm__ __volatile__ (
- "lh %[load1], 8(%[input]) \n\t"
- "lh %[load2], 56(%[input]) \n\t"
- "lh %[load3], 40(%[input]) \n\t"
- "lh %[load4], 24(%[input]) \n\t"
-
- "mtlo %[const_2_power_13], $ac1 \n\t"
- "mthi $zero, $ac1 \n\t"
- "mtlo %[const_2_power_13], $ac3 \n\t"
- "mthi $zero, $ac3 \n\t"
-
- "madd $ac1, %[load1], %[cospi_28_64] \n\t"
- "msub $ac1, %[load2], %[cospi_4_64] \n\t"
- "extp %[temp0], $ac1, 31 \n\t"
-
- "madd $ac3, %[load1], %[cospi_4_64] \n\t"
- "madd $ac3, %[load2], %[cospi_28_64] \n\t"
- "extp %[temp3], $ac3, 31 \n\t"
-
- "mtlo %[const_2_power_13], $ac1 \n\t"
- "mthi $zero, $ac1 \n\t"
- "mtlo %[const_2_power_13], $ac2 \n\t"
- "mthi $zero, $ac2 \n\t"
-
- "madd $ac2, %[load3], %[cospi_12_64] \n\t"
- "msub $ac2, %[load4], %[cospi_20_64] \n\t"
- "extp %[temp1], $ac2, 31 \n\t"
-
- "madd $ac1, %[load3], %[cospi_20_64] \n\t"
- "madd $ac1, %[load4], %[cospi_12_64] \n\t"
- "extp %[temp2], $ac1, 31 \n\t"
-
- "mtlo %[const_2_power_13], $ac1 \n\t"
- "mthi $zero, $ac1 \n\t"
- "mtlo %[const_2_power_13], $ac3 \n\t"
- "mthi $zero, $ac3 \n\t"
-
- "sub %[load1], %[temp3], %[temp2] \n\t"
- "sub %[load1], %[load1], %[temp0] \n\t"
- "add %[load1], %[load1], %[temp1] \n\t"
-
- "sub %[load2], %[temp0], %[temp1] \n\t"
- "sub %[load2], %[load2], %[temp2] \n\t"
- "add %[load2], %[load2], %[temp3] \n\t"
-
- "madd $ac1, %[load1], %[cospi_16_64] \n\t"
- "madd $ac3, %[load2], %[cospi_16_64] \n\t"
-
- "extp %[step1_5], $ac1, 31 \n\t"
- "extp %[step1_6], $ac3, 31 \n\t"
- "add %[step1_4], %[temp0], %[temp1] \n\t"
- "add %[step1_7], %[temp3], %[temp2] \n\t"
-
- : [load1] "=&r" (load1), [load2] "=&r" (load2),
- [load3] "=&r" (load3), [load4] "=&r" (load4),
- [temp0] "=&r" (temp0), [temp1] "=&r" (temp1),
- [temp2] "=&r" (temp2), [temp3] "=&r" (temp3),
- [step1_4] "=r" (step1_4), [step1_5] "=r" (step1_5),
- [step1_6] "=r" (step1_6), [step1_7] "=r" (step1_7)
- : [const_2_power_13] "r" (const_2_power_13), [input] "r" (input),
- [cospi_20_64] "r" (cospi_20_64), [cospi_12_64] "r" (cospi_12_64),
- [cospi_4_64] "r" (cospi_4_64), [cospi_28_64] "r" (cospi_28_64),
- [cospi_16_64] "r" (cospi_16_64)
- );
-
- step2_0 = step1_0 + step1_7;
- step2_1 = step1_1 + step1_6;
- step2_2 = step1_2 + step1_5;
- step2_3 = step1_3 + step1_4;
- step2_4 = step1_3 - step1_4;
- step2_5 = step1_2 - step1_5;
- step2_6 = step1_1 - step1_6;
- step2_7 = step1_0 - step1_7;
-
- step1_0 = step2_0 + step3_15;
- step1_1 = step2_1 + step3_14;
- step1_2 = step2_2 + step3_13;
- step1_3 = step2_3 + step3_12;
- step1_4 = step2_4 + step3_11;
- step1_5 = step2_5 + step3_10;
- step1_6 = step2_6 + step3_9;
- step1_7 = step2_7 + step3_8;
- step1_8 = step2_7 - step3_8;
- step1_9 = step2_6 - step3_9;
- step1_10 = step2_5 - step3_10;
- step1_11 = step2_4 - step3_11;
- step1_12 = step2_3 - step3_12;
- step1_13 = step2_2 - step3_13;
- step1_14 = step2_1 - step3_14;
- step1_15 = step2_0 - step3_15;
-
- __asm__ __volatile__ (
- "sub %[temp0], %[step2_27], %[step2_20] \n\t"
- "mtlo %[const_2_power_13], $ac0 \n\t"
- "mthi $zero, $ac0 \n\t"
- "madd $ac0, %[temp0], %[cospi_16_64] \n\t"
- "extp %[step1_20], $ac0, 31 \n\t"
-
- : [temp0] "=&r" (temp0), [step1_20] "=r" (step1_20)
- : [const_2_power_13] "r" (const_2_power_13),
- [step2_20] "r" (step2_20), [step2_27] "r" (step2_27),
- [cospi_16_64] "r" (cospi_16_64)
- );
-
- temp21 = (step2_20 + step2_27) * cospi_16_64;
- step1_27 = (temp21 + DCT_CONST_ROUNDING) >> DCT_CONST_BITS;
-
- __asm__ __volatile__ (
- "sub %[temp0], %[step2_26], %[step2_21] \n\t"
- "mtlo %[const_2_power_13], $ac0 \n\t"
- "mthi $zero, $ac0 \n\t"
- "madd $ac0, %[temp0], %[cospi_16_64] \n\t"
- "extp %[step1_21], $ac0, 31 \n\t"
-
- : [temp0] "=&r" (temp0), [step1_21] "=r" (step1_21)
- : [const_2_power_13] "r" (const_2_power_13),
- [step2_26] "r" (step2_26), [step2_21] "r" (step2_21),
- [cospi_16_64] "r" (cospi_16_64)
- );
-
- temp21 = (step2_21 + step2_26) * cospi_16_64;
- step1_26 = (temp21 + DCT_CONST_ROUNDING) >> DCT_CONST_BITS;
-
- __asm__ __volatile__ (
- "sub %[temp0], %[step2_25], %[step2_22] \n\t"
- "mtlo %[const_2_power_13], $ac0 \n\t"
- "mthi $zero, $ac0 \n\t"
- "madd $ac0, %[temp0], %[cospi_16_64] \n\t"
- "extp %[step1_22], $ac0, 31 \n\t"
-
- : [temp0] "=&r" (temp0), [step1_22] "=r" (step1_22)
- : [const_2_power_13] "r" (const_2_power_13),
- [step2_25] "r" (step2_25), [step2_22] "r" (step2_22),
- [cospi_16_64] "r" (cospi_16_64)
- );
-
- temp21 = (step2_22 + step2_25) * cospi_16_64;
- step1_25 = (temp21 + DCT_CONST_ROUNDING) >> DCT_CONST_BITS;
-
- __asm__ __volatile__ (
- "sub %[temp0], %[step2_24], %[step2_23] \n\t"
- "mtlo %[const_2_power_13], $ac0 \n\t"
- "mthi $zero, $ac0 \n\t"
- "madd $ac0, %[temp0], %[cospi_16_64] \n\t"
- "extp %[step1_23], $ac0, 31 \n\t"
-
- : [temp0] "=&r" (temp0), [step1_23] "=r" (step1_23)
- : [const_2_power_13] "r" (const_2_power_13),
- [step2_24] "r" (step2_24), [step2_23] "r" (step2_23),
- [cospi_16_64] "r" (cospi_16_64)
- );
-
- temp21 = (step2_23 + step2_24) * cospi_16_64;
- step1_24 = (temp21 + DCT_CONST_ROUNDING) >> DCT_CONST_BITS;
-
- // final stage
- output[0 * 32] = step1_0 + step2_31;
- output[1 * 32] = step1_1 + step2_30;
- output[2 * 32] = step1_2 + step2_29;
- output[3 * 32] = step1_3 + step2_28;
- output[4 * 32] = step1_4 + step1_27;
- output[5 * 32] = step1_5 + step1_26;
- output[6 * 32] = step1_6 + step1_25;
- output[7 * 32] = step1_7 + step1_24;
- output[8 * 32] = step1_8 + step1_23;
- output[9 * 32] = step1_9 + step1_22;
- output[10 * 32] = step1_10 + step1_21;
- output[11 * 32] = step1_11 + step1_20;
- output[12 * 32] = step1_12 + step2_19;
- output[13 * 32] = step1_13 + step2_18;
- output[14 * 32] = step1_14 + step2_17;
- output[15 * 32] = step1_15 + step2_16;
- output[16 * 32] = step1_15 - step2_16;
- output[17 * 32] = step1_14 - step2_17;
- output[18 * 32] = step1_13 - step2_18;
- output[19 * 32] = step1_12 - step2_19;
- output[20 * 32] = step1_11 - step1_20;
- output[21 * 32] = step1_10 - step1_21;
- output[22 * 32] = step1_9 - step1_22;
- output[23 * 32] = step1_8 - step1_23;
- output[24 * 32] = step1_7 - step1_24;
- output[25 * 32] = step1_6 - step1_25;
- output[26 * 32] = step1_5 - step1_26;
- output[27 * 32] = step1_4 - step1_27;
- output[28 * 32] = step1_3 - step2_28;
- output[29 * 32] = step1_2 - step2_29;
- output[30 * 32] = step1_1 - step2_30;
- output[31 * 32] = step1_0 - step2_31;
-
- input += 32;
- output += 1;
- }
-}
-
-void vp9_idct32x32_1024_add_dspr2(const int16_t *input, uint8_t *dest,
- int dest_stride) {
- DECLARE_ALIGNED(32, int16_t, out[32 * 32]);
- int16_t *outptr = out;
- uint32_t pos = 45;
-
- /* bit positon for extract from acc */
- __asm__ __volatile__ (
- "wrdsp %[pos], 1 \n\t"
- :
- : [pos] "r" (pos)
- );
-
- // Rows
- idct32_rows_dspr2(input, outptr, 32);
-
- // Columns
- vp9_idct32_cols_add_blk_dspr2(out, dest, dest_stride);
-}
-
-void vp9_idct32x32_34_add_dspr2(const int16_t *input, uint8_t *dest,
- int stride) {
- DECLARE_ALIGNED(32, int16_t, out[32 * 32]);
- int16_t *outptr = out;
- uint32_t i;
- uint32_t pos = 45;
-
- /* bit positon for extract from acc */
- __asm__ __volatile__ (
- "wrdsp %[pos], 1 \n\t"
- :
- : [pos] "r" (pos)
- );
-
- // Rows
- idct32_rows_dspr2(input, outptr, 8);
-
- outptr += 8;
- __asm__ __volatile__ (
- "sw $zero, 0(%[outptr]) \n\t"
- "sw $zero, 4(%[outptr]) \n\t"
- "sw $zero, 8(%[outptr]) \n\t"
- "sw $zero, 12(%[outptr]) \n\t"
- "sw $zero, 16(%[outptr]) \n\t"
- "sw $zero, 20(%[outptr]) \n\t"
- "sw $zero, 24(%[outptr]) \n\t"
- "sw $zero, 28(%[outptr]) \n\t"
- "sw $zero, 32(%[outptr]) \n\t"
- "sw $zero, 36(%[outptr]) \n\t"
- "sw $zero, 40(%[outptr]) \n\t"
- "sw $zero, 44(%[outptr]) \n\t"
-
- :
- : [outptr] "r" (outptr)
- );
-
- for (i = 0; i < 31; ++i) {
- outptr += 32;
-
- __asm__ __volatile__ (
- "sw $zero, 0(%[outptr]) \n\t"
- "sw $zero, 4(%[outptr]) \n\t"
- "sw $zero, 8(%[outptr]) \n\t"
- "sw $zero, 12(%[outptr]) \n\t"
- "sw $zero, 16(%[outptr]) \n\t"
- "sw $zero, 20(%[outptr]) \n\t"
- "sw $zero, 24(%[outptr]) \n\t"
- "sw $zero, 28(%[outptr]) \n\t"
- "sw $zero, 32(%[outptr]) \n\t"
- "sw $zero, 36(%[outptr]) \n\t"
- "sw $zero, 40(%[outptr]) \n\t"
- "sw $zero, 44(%[outptr]) \n\t"
-
- :
- : [outptr] "r" (outptr)
- );
- }
-
- // Columns
- vp9_idct32_cols_add_blk_dspr2(out, dest, stride);
-}
-
-void vp9_idct32x32_1_add_dspr2(const int16_t *input, uint8_t *dest,
- int stride) {
- int r, out;
- int32_t a1, absa1;
- int32_t vector_a1;
- int32_t t1, t2, t3, t4;
- int32_t vector_1, vector_2, vector_3, vector_4;
- uint32_t pos = 45;
-
- /* bit positon for extract from acc */
- __asm__ __volatile__ (
- "wrdsp %[pos], 1 \n\t"
-
- :
- : [pos] "r" (pos)
- );
-
- out = DCT_CONST_ROUND_SHIFT_TWICE_COSPI_16_64(input[0]);
- __asm__ __volatile__ (
- "addi %[out], %[out], 32 \n\t"
- "sra %[a1], %[out], 6 \n\t"
-
- : [out] "+r" (out), [a1] "=r" (a1)
- :
- );
-
- if (a1 < 0) {
- /* use quad-byte
- * input and output memory are four byte aligned */
- __asm__ __volatile__ (
- "abs %[absa1], %[a1] \n\t"
- "replv.qb %[vector_a1], %[absa1] \n\t"
-
- : [absa1] "=r" (absa1), [vector_a1] "=r" (vector_a1)
- : [a1] "r" (a1)
- );
-
- for (r = 32; r--;) {
- __asm__ __volatile__ (
- "lw %[t1], 0(%[dest]) \n\t"
- "lw %[t2], 4(%[dest]) \n\t"
- "lw %[t3], 8(%[dest]) \n\t"
- "lw %[t4], 12(%[dest]) \n\t"
- "subu_s.qb %[vector_1], %[t1], %[vector_a1] \n\t"
- "subu_s.qb %[vector_2], %[t2], %[vector_a1] \n\t"
- "subu_s.qb %[vector_3], %[t3], %[vector_a1] \n\t"
- "subu_s.qb %[vector_4], %[t4], %[vector_a1] \n\t"
- "sw %[vector_1], 0(%[dest]) \n\t"
- "sw %[vector_2], 4(%[dest]) \n\t"
- "sw %[vector_3], 8(%[dest]) \n\t"
- "sw %[vector_4], 12(%[dest]) \n\t"
-
- "lw %[t1], 16(%[dest]) \n\t"
- "lw %[t2], 20(%[dest]) \n\t"
- "lw %[t3], 24(%[dest]) \n\t"
- "lw %[t4], 28(%[dest]) \n\t"
- "subu_s.qb %[vector_1], %[t1], %[vector_a1] \n\t"
- "subu_s.qb %[vector_2], %[t2], %[vector_a1] \n\t"
- "subu_s.qb %[vector_3], %[t3], %[vector_a1] \n\t"
- "subu_s.qb %[vector_4], %[t4], %[vector_a1] \n\t"
- "sw %[vector_1], 16(%[dest]) \n\t"
- "sw %[vector_2], 20(%[dest]) \n\t"
- "sw %[vector_3], 24(%[dest]) \n\t"
- "sw %[vector_4], 28(%[dest]) \n\t"
-
- "add %[dest], %[dest], %[stride] \n\t"
-
- : [t1] "=&r" (t1), [t2] "=&r" (t2), [t3] "=&r" (t3), [t4] "=&r" (t4),
- [vector_1] "=&r" (vector_1), [vector_2] "=&r" (vector_2),
- [vector_3] "=&r" (vector_3), [vector_4] "=&r" (vector_4),
- [dest] "+&r" (dest)
- : [stride] "r" (stride), [vector_a1] "r" (vector_a1)
- );
- }
- } else {
- /* use quad-byte
- * input and output memory are four byte aligned */
- __asm__ __volatile__ (
- "replv.qb %[vector_a1], %[a1] \n\t"
-
- : [vector_a1] "=r" (vector_a1)
- : [a1] "r" (a1)
- );
-
- for (r = 32; r--;) {
- __asm__ __volatile__ (
- "lw %[t1], 0(%[dest]) \n\t"
- "lw %[t2], 4(%[dest]) \n\t"
- "lw %[t3], 8(%[dest]) \n\t"
- "lw %[t4], 12(%[dest]) \n\t"
- "addu_s.qb %[vector_1], %[t1], %[vector_a1] \n\t"
- "addu_s.qb %[vector_2], %[t2], %[vector_a1] \n\t"
- "addu_s.qb %[vector_3], %[t3], %[vector_a1] \n\t"
- "addu_s.qb %[vector_4], %[t4], %[vector_a1] \n\t"
- "sw %[vector_1], 0(%[dest]) \n\t"
- "sw %[vector_2], 4(%[dest]) \n\t"
- "sw %[vector_3], 8(%[dest]) \n\t"
- "sw %[vector_4], 12(%[dest]) \n\t"
-
- "lw %[t1], 16(%[dest]) \n\t"
- "lw %[t2], 20(%[dest]) \n\t"
- "lw %[t3], 24(%[dest]) \n\t"
- "lw %[t4], 28(%[dest]) \n\t"
- "addu_s.qb %[vector_1], %[t1], %[vector_a1] \n\t"
- "addu_s.qb %[vector_2], %[t2], %[vector_a1] \n\t"
- "addu_s.qb %[vector_3], %[t3], %[vector_a1] \n\t"
- "addu_s.qb %[vector_4], %[t4], %[vector_a1] \n\t"
- "sw %[vector_1], 16(%[dest]) \n\t"
- "sw %[vector_2], 20(%[dest]) \n\t"
- "sw %[vector_3], 24(%[dest]) \n\t"
- "sw %[vector_4], 28(%[dest]) \n\t"
-
- "add %[dest], %[dest], %[stride] \n\t"
-
- : [t1] "=&r" (t1), [t2] "=&r" (t2), [t3] "=&r" (t3), [t4] "=&r" (t4),
- [vector_1] "=&r" (vector_1), [vector_2] "=&r" (vector_2),
- [vector_3] "=&r" (vector_3), [vector_4] "=&r" (vector_4),
- [dest] "+&r" (dest)
- : [stride] "r" (stride), [vector_a1] "r" (vector_a1)
- );
- }
- }
-}
-#endif // #if HAVE_DSPR2
diff --git a/vp9/common/mips/dspr2/vp9_itrans4_dspr2.c b/vp9/common/mips/dspr2/vp9_itrans4_dspr2.c
index aa801ecb6..c10979b64 100644
--- a/vp9/common/mips/dspr2/vp9_itrans4_dspr2.c
+++ b/vp9/common/mips/dspr2/vp9_itrans4_dspr2.c
@@ -16,354 +16,11 @@
#include "vp9/common/vp9_common.h"
#include "vp9/common/vp9_blockd.h"
#include "vp9/common/vp9_idct.h"
-#include "vp9/common/mips/dspr2/vp9_common_dspr2.h"
+#include "vpx_dsp/mips/inv_txfm_dspr2.h"
#include "vpx_dsp/txfm_common.h"
#include "vpx_ports/mem.h"
#if HAVE_DSPR2
-static void vp9_idct4_rows_dspr2(const int16_t *input, int16_t *output) {
- int16_t step_0, step_1, step_2, step_3;
- int Temp0, Temp1, Temp2, Temp3;
- const int const_2_power_13 = 8192;
- int i;
-
- for (i = 4; i--; ) {
- __asm__ __volatile__ (
- /*
- temp_1 = (input[0] + input[2]) * cospi_16_64;
- step_0 = dct_const_round_shift(temp_1);
-
- temp_2 = (input[0] - input[2]) * cospi_16_64;
- step_1 = dct_const_round_shift(temp_2);
- */
- "lh %[Temp0], 0(%[input]) \n\t"
- "lh %[Temp1], 4(%[input]) \n\t"
- "mtlo %[const_2_power_13], $ac0 \n\t"
- "mthi $zero, $ac0 \n\t"
- "mtlo %[const_2_power_13], $ac1 \n\t"
- "mthi $zero, $ac1 \n\t"
- "add %[Temp2], %[Temp0], %[Temp1] \n\t"
- "sub %[Temp3], %[Temp0], %[Temp1] \n\t"
- "madd $ac0, %[Temp2], %[cospi_16_64] \n\t"
- "lh %[Temp0], 2(%[input]) \n\t"
- "lh %[Temp1], 6(%[input]) \n\t"
- "extp %[step_0], $ac0, 31 \n\t"
- "mtlo %[const_2_power_13], $ac0 \n\t"
- "mthi $zero, $ac0 \n\t"
-
- "madd $ac1, %[Temp3], %[cospi_16_64] \n\t"
- "extp %[step_1], $ac1, 31 \n\t"
- "mtlo %[const_2_power_13], $ac1 \n\t"
- "mthi $zero, $ac1 \n\t"
-
- /*
- temp1 = input[1] * cospi_24_64 - input[3] * cospi_8_64;
- step_2 = dct_const_round_shift(temp1);
- */
- "madd $ac0, %[Temp0], %[cospi_24_64] \n\t"
- "msub $ac0, %[Temp1], %[cospi_8_64] \n\t"
- "extp %[step_2], $ac0, 31 \n\t"
-
- /*
- temp2 = input[1] * cospi_8_64 + input[3] * cospi_24_64;
- step_3 = dct_const_round_shift(temp2);
- */
- "madd $ac1, %[Temp0], %[cospi_8_64] \n\t"
- "madd $ac1, %[Temp1], %[cospi_24_64] \n\t"
- "extp %[step_3], $ac1, 31 \n\t"
-
- /*
- output[0] = step_0 + step_3;
- output[4] = step_1 + step_2;
- output[8] = step_1 - step_2;
- output[12] = step_0 - step_3;
- */
- "add %[Temp0], %[step_0], %[step_3] \n\t"
- "sh %[Temp0], 0(%[output]) \n\t"
-
- "add %[Temp1], %[step_1], %[step_2] \n\t"
- "sh %[Temp1], 8(%[output]) \n\t"
-
- "sub %[Temp2], %[step_1], %[step_2] \n\t"
- "sh %[Temp2], 16(%[output]) \n\t"
-
- "sub %[Temp3], %[step_0], %[step_3] \n\t"
- "sh %[Temp3], 24(%[output]) \n\t"
-
- : [Temp0] "=&r" (Temp0), [Temp1] "=&r" (Temp1),
- [Temp2] "=&r" (Temp2), [Temp3] "=&r" (Temp3),
- [step_0] "=&r" (step_0), [step_1] "=&r" (step_1),
- [step_2] "=&r" (step_2), [step_3] "=&r" (step_3),
- [output] "+r" (output)
- : [const_2_power_13] "r" (const_2_power_13),
- [cospi_8_64] "r" (cospi_8_64), [cospi_16_64] "r" (cospi_16_64),
- [cospi_24_64] "r" (cospi_24_64),
- [input] "r" (input)
- );
-
- input += 4;
- output += 1;
- }
-}
-
-static void vp9_idct4_columns_add_blk_dspr2(int16_t *input, uint8_t *dest,
- int dest_stride) {
- int16_t step_0, step_1, step_2, step_3;
- int Temp0, Temp1, Temp2, Temp3;
- const int const_2_power_13 = 8192;
- int i;
- uint8_t *dest_pix;
- uint8_t *cm = vpx_ff_cropTbl;
-
- /* prefetch vpx_ff_cropTbl */
- prefetch_load(vpx_ff_cropTbl);
- prefetch_load(vpx_ff_cropTbl + 32);
- prefetch_load(vpx_ff_cropTbl + 64);
- prefetch_load(vpx_ff_cropTbl + 96);
- prefetch_load(vpx_ff_cropTbl + 128);
- prefetch_load(vpx_ff_cropTbl + 160);
- prefetch_load(vpx_ff_cropTbl + 192);
- prefetch_load(vpx_ff_cropTbl + 224);
-
- for (i = 0; i < 4; ++i) {
- dest_pix = (dest + i);
-
- __asm__ __volatile__ (
- /*
- temp_1 = (input[0] + input[2]) * cospi_16_64;
- step_0 = dct_const_round_shift(temp_1);
-
- temp_2 = (input[0] - input[2]) * cospi_16_64;
- step_1 = dct_const_round_shift(temp_2);
- */
- "lh %[Temp0], 0(%[input]) \n\t"
- "lh %[Temp1], 4(%[input]) \n\t"
- "mtlo %[const_2_power_13], $ac0 \n\t"
- "mthi $zero, $ac0 \n\t"
- "mtlo %[const_2_power_13], $ac1 \n\t"
- "mthi $zero, $ac1 \n\t"
- "add %[Temp2], %[Temp0], %[Temp1] \n\t"
- "sub %[Temp3], %[Temp0], %[Temp1] \n\t"
- "madd $ac0, %[Temp2], %[cospi_16_64] \n\t"
- "lh %[Temp0], 2(%[input]) \n\t"
- "lh %[Temp1], 6(%[input]) \n\t"
- "extp %[step_0], $ac0, 31 \n\t"
- "mtlo %[const_2_power_13], $ac0 \n\t"
- "mthi $zero, $ac0 \n\t"
-
- "madd $ac1, %[Temp3], %[cospi_16_64] \n\t"
- "extp %[step_1], $ac1, 31 \n\t"
- "mtlo %[const_2_power_13], $ac1 \n\t"
- "mthi $zero, $ac1 \n\t"
-
- /*
- temp1 = input[1] * cospi_24_64 - input[3] * cospi_8_64;
- step_2 = dct_const_round_shift(temp1);
- */
- "madd $ac0, %[Temp0], %[cospi_24_64] \n\t"
- "msub $ac0, %[Temp1], %[cospi_8_64] \n\t"
- "extp %[step_2], $ac0, 31 \n\t"
-
- /*
- temp2 = input[1] * cospi_8_64 + input[3] * cospi_24_64;
- step_3 = dct_const_round_shift(temp2);
- */
- "madd $ac1, %[Temp0], %[cospi_8_64] \n\t"
- "madd $ac1, %[Temp1], %[cospi_24_64] \n\t"
- "extp %[step_3], $ac1, 31 \n\t"
-
- /*
- output[0] = step_0 + step_3;
- output[4] = step_1 + step_2;
- output[8] = step_1 - step_2;
- output[12] = step_0 - step_3;
- */
- "add %[Temp0], %[step_0], %[step_3] \n\t"
- "addi %[Temp0], %[Temp0], 8 \n\t"
- "sra %[Temp0], %[Temp0], 4 \n\t"
- "lbu %[Temp1], 0(%[dest_pix]) \n\t"
- "add %[Temp1], %[Temp1], %[Temp0] \n\t"
- "add %[Temp0], %[step_1], %[step_2] \n\t"
- "lbux %[Temp2], %[Temp1](%[cm]) \n\t"
- "sb %[Temp2], 0(%[dest_pix]) \n\t"
- "addu %[dest_pix], %[dest_pix], %[dest_stride] \n\t"
-
- "addi %[Temp0], %[Temp0], 8 \n\t"
- "sra %[Temp0], %[Temp0], 4 \n\t"
- "lbu %[Temp1], 0(%[dest_pix]) \n\t"
- "add %[Temp1], %[Temp1], %[Temp0] \n\t"
- "sub %[Temp0], %[step_1], %[step_2] \n\t"
- "lbux %[Temp2], %[Temp1](%[cm]) \n\t"
- "sb %[Temp2], 0(%[dest_pix]) \n\t"
- "addu %[dest_pix], %[dest_pix], %[dest_stride] \n\t"
-
- "addi %[Temp0], %[Temp0], 8 \n\t"
- "sra %[Temp0], %[Temp0], 4 \n\t"
- "lbu %[Temp1], 0(%[dest_pix]) \n\t"
- "add %[Temp1], %[Temp1], %[Temp0] \n\t"
- "sub %[Temp0], %[step_0], %[step_3] \n\t"
- "lbux %[Temp2], %[Temp1](%[cm]) \n\t"
- "sb %[Temp2], 0(%[dest_pix]) \n\t"
- "addu %[dest_pix], %[dest_pix], %[dest_stride] \n\t"
-
- "addi %[Temp0], %[Temp0], 8 \n\t"
- "sra %[Temp0], %[Temp0], 4 \n\t"
- "lbu %[Temp1], 0(%[dest_pix]) \n\t"
- "add %[Temp1], %[Temp1], %[Temp0] \n\t"
- "lbux %[Temp2], %[Temp1](%[cm]) \n\t"
- "sb %[Temp2], 0(%[dest_pix]) \n\t"
-
- : [Temp0] "=&r" (Temp0), [Temp1] "=&r" (Temp1),
- [Temp2] "=&r" (Temp2), [Temp3] "=&r" (Temp3),
- [step_0] "=&r" (step_0), [step_1] "=&r" (step_1),
- [step_2] "=&r" (step_2), [step_3] "=&r" (step_3),
- [dest_pix] "+r" (dest_pix)
- : [const_2_power_13] "r" (const_2_power_13),
- [cospi_8_64] "r" (cospi_8_64), [cospi_16_64] "r" (cospi_16_64),
- [cospi_24_64] "r" (cospi_24_64),
- [input] "r" (input), [cm] "r" (cm), [dest_stride] "r" (dest_stride)
- );
-
- input += 4;
- }
-}
-
-void vp9_idct4x4_16_add_dspr2(const int16_t *input, uint8_t *dest,
- int dest_stride) {
- DECLARE_ALIGNED(32, int16_t, out[4 * 4]);
- int16_t *outptr = out;
- uint32_t pos = 45;
-
- /* bit positon for extract from acc */
- __asm__ __volatile__ (
- "wrdsp %[pos], 1 \n\t"
- :
- : [pos] "r" (pos)
- );
-
- // Rows
- vp9_idct4_rows_dspr2(input, outptr);
-
- // Columns
- vp9_idct4_columns_add_blk_dspr2(&out[0], dest, dest_stride);
-}
-
-void vp9_idct4x4_1_add_dspr2(const int16_t *input, uint8_t *dest,
- int dest_stride) {
- int a1, absa1;
- int r;
- int32_t out;
- int t2, vector_a1, vector_a;
- uint32_t pos = 45;
- int16_t input_dc = input[0];
-
- /* bit positon for extract from acc */
- __asm__ __volatile__ (
- "wrdsp %[pos], 1 \n\t"
-
- :
- : [pos] "r" (pos)
- );
-
- out = DCT_CONST_ROUND_SHIFT_TWICE_COSPI_16_64(input_dc);
- __asm__ __volatile__ (
- "addi %[out], %[out], 8 \n\t"
- "sra %[a1], %[out], 4 \n\t"
-
- : [out] "+r" (out), [a1] "=r" (a1)
- :
- );
-
- if (a1 < 0) {
- /* use quad-byte
- * input and output memory are four byte aligned */
- __asm__ __volatile__ (
- "abs %[absa1], %[a1] \n\t"
- "replv.qb %[vector_a1], %[absa1] \n\t"
-
- : [absa1] "=r" (absa1), [vector_a1] "=r" (vector_a1)
- : [a1] "r" (a1)
- );
-
- for (r = 4; r--;) {
- __asm__ __volatile__ (
- "lw %[t2], 0(%[dest]) \n\t"
- "subu_s.qb %[vector_a], %[t2], %[vector_a1] \n\t"
- "sw %[vector_a], 0(%[dest]) \n\t"
- "add %[dest], %[dest], %[dest_stride] \n\t"
-
- : [t2] "=&r" (t2), [vector_a] "=&r" (vector_a),
- [dest] "+&r" (dest)
- : [dest_stride] "r" (dest_stride), [vector_a1] "r" (vector_a1)
- );
- }
- } else {
- /* use quad-byte
- * input and output memory are four byte aligned */
- __asm__ __volatile__ (
- "replv.qb %[vector_a1], %[a1] \n\t"
- : [vector_a1] "=r" (vector_a1)
- : [a1] "r" (a1)
- );
-
- for (r = 4; r--;) {
- __asm__ __volatile__ (
- "lw %[t2], 0(%[dest]) \n\t"
- "addu_s.qb %[vector_a], %[t2], %[vector_a1] \n\t"
- "sw %[vector_a], 0(%[dest]) \n\t"
- "add %[dest], %[dest], %[dest_stride] \n\t"
-
- : [t2] "=&r" (t2), [vector_a] "=&r" (vector_a),
- [dest] "+&r" (dest)
- : [dest_stride] "r" (dest_stride), [vector_a1] "r" (vector_a1)
- );
- }
- }
-}
-
-static void iadst4_dspr2(const int16_t *input, int16_t *output) {
- int s0, s1, s2, s3, s4, s5, s6, s7;
- int x0, x1, x2, x3;
-
- x0 = input[0];
- x1 = input[1];
- x2 = input[2];
- x3 = input[3];
-
- if (!(x0 | x1 | x2 | x3)) {
- output[0] = output[1] = output[2] = output[3] = 0;
- return;
- }
-
- s0 = sinpi_1_9 * x0;
- s1 = sinpi_2_9 * x0;
- s2 = sinpi_3_9 * x1;
- s3 = sinpi_4_9 * x2;
- s4 = sinpi_1_9 * x2;
- s5 = sinpi_2_9 * x3;
- s6 = sinpi_4_9 * x3;
- s7 = x0 - x2 + x3;
-
- x0 = s0 + s3 + s5;
- x1 = s1 - s4 - s6;
- x2 = sinpi_3_9 * s7;
- x3 = s2;
-
- s0 = x0 + x3;
- s1 = x1 + x3;
- s2 = x2;
- s3 = x0 + x1 - x3;
-
- // 1-D transform scaling factor is sqrt(2).
- // The overall dynamic range is 14b (input) + 14b (multiplication scaling)
- // + 1b (addition) = 29b.
- // Hence the output bit depth is 15b.
- output[0] = dct_const_round_shift(s0);
- output[1] = dct_const_round_shift(s1);
- output[2] = dct_const_round_shift(s2);
- output[3] = dct_const_round_shift(s3);
-}
-
void vp9_iht4x4_16_add_dspr2(const int16_t *input, uint8_t *dest,
int dest_stride, int tx_type) {
int i, j;
@@ -381,11 +38,11 @@ void vp9_iht4x4_16_add_dspr2(const int16_t *input, uint8_t *dest,
switch (tx_type) {
case DCT_DCT: // DCT in both horizontal and vertical
- vp9_idct4_rows_dspr2(input, outptr);
- vp9_idct4_columns_add_blk_dspr2(&out[0], dest, dest_stride);
+ vpx_idct4_rows_dspr2(input, outptr);
+ vpx_idct4_columns_add_blk_dspr2(&out[0], dest, dest_stride);
break;
case ADST_DCT: // ADST in vertical, DCT in horizontal
- vp9_idct4_rows_dspr2(input, outptr);
+ vpx_idct4_rows_dspr2(input, outptr);
outptr = out;
@@ -412,7 +69,7 @@ void vp9_iht4x4_16_add_dspr2(const int16_t *input, uint8_t *dest,
temp_in[i * 4 + j] = out[j * 4 + i];
}
}
- vp9_idct4_columns_add_blk_dspr2(&temp_in[0], dest, dest_stride);
+ vpx_idct4_columns_add_blk_dspr2(&temp_in[0], dest, dest_stride);
break;
case ADST_ADST: // ADST in both directions
for (i = 0; i < 4; ++i) {
diff --git a/vp9/common/mips/dspr2/vp9_itrans8_dspr2.c b/vp9/common/mips/dspr2/vp9_itrans8_dspr2.c
index 5270fa17f..37f3ca9fc 100644
--- a/vp9/common/mips/dspr2/vp9_itrans8_dspr2.c
+++ b/vp9/common/mips/dspr2/vp9_itrans8_dspr2.c
@@ -15,538 +15,11 @@
#include "./vp9_rtcd.h"
#include "vp9/common/vp9_common.h"
#include "vp9/common/vp9_blockd.h"
-#include "vp9/common/mips/dspr2/vp9_common_dspr2.h"
+#include "vpx_dsp/mips/inv_txfm_dspr2.h"
#include "vpx_dsp/txfm_common.h"
#include "vpx_ports/mem.h"
#if HAVE_DSPR2
-static void idct8_rows_dspr2(const int16_t *input, int16_t *output,
- uint32_t no_rows) {
- int step1_0, step1_1, step1_2, step1_3, step1_4, step1_5, step1_6, step1_7;
- const int const_2_power_13 = 8192;
- int Temp0, Temp1, Temp2, Temp3, Temp4;
- int i;
-
- for (i = no_rows; i--; ) {
- __asm__ __volatile__ (
- /*
- temp_1 = (input[0] + input[4]) * cospi_16_64;
- step2_0 = dct_const_round_shift(temp_1);
-
- temp_2 = (input[0] - input[4]) * cospi_16_64;
- step2_1 = dct_const_round_shift(temp_2);
- */
- "lh %[Temp0], 0(%[input]) \n\t"
- "lh %[Temp1], 8(%[input]) \n\t"
- "mtlo %[const_2_power_13], $ac0 \n\t"
- "mthi $zero, $ac0 \n\t"
- "mtlo %[const_2_power_13], $ac1 \n\t"
- "mthi $zero, $ac1 \n\t"
- "add %[Temp2], %[Temp0], %[Temp1] \n\t"
- "madd $ac0, %[Temp2], %[cospi_16_64] \n\t"
- "extp %[Temp4], $ac0, 31 \n\t"
-
- "sub %[Temp3], %[Temp0], %[Temp1] \n\t"
- "madd $ac1, %[Temp3], %[cospi_16_64] \n\t"
- "mtlo %[const_2_power_13], $ac0 \n\t"
- "mthi $zero, $ac0 \n\t"
- "extp %[Temp2], $ac1, 31 \n\t"
-
- /*
- temp_1 = input[2] * cospi_24_64 - input[6] * cospi_8_64;
- step2_2 = dct_const_round_shift(temp_1);
- */
- "lh %[Temp0], 4(%[input]) \n\t"
- "lh %[Temp1], 12(%[input]) \n\t"
- "madd $ac0, %[Temp0], %[cospi_24_64] \n\t"
- "msub $ac0, %[Temp1], %[cospi_8_64] \n\t"
- "mtlo %[const_2_power_13], $ac1 \n\t"
- "mthi $zero, $ac1 \n\t"
- "extp %[Temp3], $ac0, 31 \n\t"
-
- /*
- step1_1 = step2_1 + step2_2;
- step1_2 = step2_1 - step2_2;
- */
- "add %[step1_1], %[Temp2], %[Temp3] \n\t"
- "sub %[step1_2], %[Temp2], %[Temp3] \n\t"
-
- /*
- temp_2 = input[2] * cospi_8_64 + input[6] * cospi_24_64;
- step2_3 = dct_const_round_shift(temp_2);
- */
- "madd $ac1, %[Temp0], %[cospi_8_64] \n\t"
- "madd $ac1, %[Temp1], %[cospi_24_64] \n\t"
- "extp %[Temp1], $ac1, 31 \n\t"
-
- "mtlo %[const_2_power_13], $ac0 \n\t"
- "mthi $zero, $ac0 \n\t"
-
- /*
- step1_0 = step2_0 + step2_3;
- step1_3 = step2_0 - step2_3;
- */
- "add %[step1_0], %[Temp4], %[Temp1] \n\t"
- "sub %[step1_3], %[Temp4], %[Temp1] \n\t"
-
- /*
- temp_1 = input[1] * cospi_28_64 - input[7] * cospi_4_64;
- step1_4 = dct_const_round_shift(temp_1);
- */
- "lh %[Temp0], 2(%[input]) \n\t"
- "madd $ac0, %[Temp0], %[cospi_28_64] \n\t"
- "mtlo %[const_2_power_13], $ac1 \n\t"
- "mthi $zero, $ac1 \n\t"
- "lh %[Temp1], 14(%[input]) \n\t"
- "lh %[Temp0], 2(%[input]) \n\t"
- "msub $ac0, %[Temp1], %[cospi_4_64] \n\t"
- "extp %[step1_4], $ac0, 31 \n\t"
-
- /*
- temp_2 = input[1] * cospi_4_64 + input[7] * cospi_28_64;
- step1_7 = dct_const_round_shift(temp_2);
- */
- "madd $ac1, %[Temp0], %[cospi_4_64] \n\t"
- "madd $ac1, %[Temp1], %[cospi_28_64] \n\t"
- "extp %[step1_7], $ac1, 31 \n\t"
-
- /*
- temp_1 = input[5] * cospi_12_64 - input[3] * cospi_20_64;
- step1_5 = dct_const_round_shift(temp_1);
- */
- "mtlo %[const_2_power_13], $ac0 \n\t"
- "mthi $zero, $ac0 \n\t"
- "lh %[Temp0], 10(%[input]) \n\t"
- "madd $ac0, %[Temp0], %[cospi_12_64] \n\t"
- "lh %[Temp1], 6(%[input]) \n\t"
- "msub $ac0, %[Temp1], %[cospi_20_64] \n\t"
- "extp %[step1_5], $ac0, 31 \n\t"
-
- /*
- temp_2 = input[5] * cospi_20_64 + input[3] * cospi_12_64;
- step1_6 = dct_const_round_shift(temp_2);
- */
- "mtlo %[const_2_power_13], $ac1 \n\t"
- "mthi $zero, $ac1 \n\t"
- "lh %[Temp0], 10(%[input]) \n\t"
- "madd $ac1, %[Temp0], %[cospi_20_64] \n\t"
- "lh %[Temp1], 6(%[input]) \n\t"
- "madd $ac1, %[Temp1], %[cospi_12_64] \n\t"
- "extp %[step1_6], $ac1, 31 \n\t"
-
- /*
- temp_1 = (step1_7 - step1_6 - step1_4 + step1_5) * cospi_16_64;
- temp_2 = (step1_4 - step1_5 - step1_6 + step1_7) * cospi_16_64;
- */
- "sub %[Temp0], %[step1_7], %[step1_6] \n\t"
- "sub %[Temp0], %[Temp0], %[step1_4] \n\t"
- "add %[Temp0], %[Temp0], %[step1_5] \n\t"
- "sub %[Temp1], %[step1_4], %[step1_5] \n\t"
- "sub %[Temp1], %[Temp1], %[step1_6] \n\t"
- "add %[Temp1], %[Temp1], %[step1_7] \n\t"
-
- "mtlo %[const_2_power_13], $ac0 \n\t"
- "mthi $zero, $ac0 \n\t"
- "mtlo %[const_2_power_13], $ac1 \n\t"
- "mthi $zero, $ac1 \n\t"
-
- "madd $ac0, %[Temp0], %[cospi_16_64] \n\t"
- "madd $ac1, %[Temp1], %[cospi_16_64] \n\t"
-
- /*
- step1_4 = step1_4 + step1_5;
- step1_7 = step1_6 + step1_7;
- */
- "add %[step1_4], %[step1_4], %[step1_5] \n\t"
- "add %[step1_7], %[step1_7], %[step1_6] \n\t"
-
- "extp %[step1_5], $ac0, 31 \n\t"
- "extp %[step1_6], $ac1, 31 \n\t"
-
- "add %[Temp0], %[step1_0], %[step1_7] \n\t"
- "sh %[Temp0], 0(%[output]) \n\t"
- "add %[Temp1], %[step1_1], %[step1_6] \n\t"
- "sh %[Temp1], 16(%[output]) \n\t"
- "add %[Temp0], %[step1_2], %[step1_5] \n\t"
- "sh %[Temp0], 32(%[output]) \n\t"
- "add %[Temp1], %[step1_3], %[step1_4] \n\t"
- "sh %[Temp1], 48(%[output]) \n\t"
-
- "sub %[Temp0], %[step1_3], %[step1_4] \n\t"
- "sh %[Temp0], 64(%[output]) \n\t"
- "sub %[Temp1], %[step1_2], %[step1_5] \n\t"
- "sh %[Temp1], 80(%[output]) \n\t"
- "sub %[Temp0], %[step1_1], %[step1_6] \n\t"
- "sh %[Temp0], 96(%[output]) \n\t"
- "sub %[Temp1], %[step1_0], %[step1_7] \n\t"
- "sh %[Temp1], 112(%[output]) \n\t"
-
- : [step1_0] "=&r" (step1_0), [step1_1] "=&r" (step1_1),
- [step1_2] "=&r" (step1_2), [step1_3] "=&r" (step1_3),
- [step1_4] "=&r" (step1_4), [step1_5] "=&r" (step1_5),
- [step1_6] "=&r" (step1_6), [step1_7] "=&r" (step1_7),
- [Temp0] "=&r" (Temp0), [Temp1] "=&r" (Temp1),
- [Temp2] "=&r" (Temp2), [Temp3] "=&r" (Temp3),
- [Temp4] "=&r" (Temp4)
- : [const_2_power_13] "r" (const_2_power_13),
- [cospi_16_64] "r" (cospi_16_64), [cospi_28_64] "r" (cospi_28_64),
- [cospi_4_64] "r" (cospi_4_64), [cospi_12_64] "r" (cospi_12_64),
- [cospi_20_64] "r" (cospi_20_64), [cospi_8_64] "r" (cospi_8_64),
- [cospi_24_64] "r" (cospi_24_64),
- [output] "r" (output), [input] "r" (input)
- );
-
- input += 8;
- output += 1;
- }
-}
-
-static void idct8_columns_add_blk_dspr2(int16_t *input, uint8_t *dest,
- int dest_stride) {
- int step1_0, step1_1, step1_2, step1_3, step1_4, step1_5, step1_6, step1_7;
- int Temp0, Temp1, Temp2, Temp3;
- int i;
- const int const_2_power_13 = 8192;
- uint8_t *dest_pix;
- uint8_t *cm = vpx_ff_cropTbl;
-
- /* prefetch vpx_ff_cropTbl */
- prefetch_load(vpx_ff_cropTbl);
- prefetch_load(vpx_ff_cropTbl + 32);
- prefetch_load(vpx_ff_cropTbl + 64);
- prefetch_load(vpx_ff_cropTbl + 96);
- prefetch_load(vpx_ff_cropTbl + 128);
- prefetch_load(vpx_ff_cropTbl + 160);
- prefetch_load(vpx_ff_cropTbl + 192);
- prefetch_load(vpx_ff_cropTbl + 224);
-
- for (i = 0; i < 8; ++i) {
- dest_pix = (dest + i);
-
- __asm__ __volatile__ (
- /*
- temp_1 = (input[0] + input[4]) * cospi_16_64;
- step2_0 = dct_const_round_shift(temp_1);
-
- temp_2 = (input[0] - input[4]) * cospi_16_64;
- step2_1 = dct_const_round_shift(temp_2);
- */
- "lh %[Temp0], 0(%[input]) \n\t"
- "lh %[Temp1], 8(%[input]) \n\t"
- "mtlo %[const_2_power_13], $ac0 \n\t"
- "mthi $zero, $ac0 \n\t"
- "mtlo %[const_2_power_13], $ac1 \n\t"
- "mthi $zero, $ac1 \n\t"
- "add %[Temp2], %[Temp0], %[Temp1] \n\t"
- "madd $ac0, %[Temp2], %[cospi_16_64] \n\t"
- "extp %[step1_6], $ac0, 31 \n\t"
-
- "sub %[Temp3], %[Temp0], %[Temp1] \n\t"
- "madd $ac1, %[Temp3], %[cospi_16_64] \n\t"
- "mtlo %[const_2_power_13], $ac0 \n\t"
- "mthi $zero, $ac0 \n\t"
- "extp %[Temp2], $ac1, 31 \n\t"
-
- /*
- temp_1 = input[2] * cospi_24_64 - input[6] * cospi_8_64;
- step2_2 = dct_const_round_shift(temp_1);
- */
- "lh %[Temp0], 4(%[input]) \n\t"
- "lh %[Temp1], 12(%[input]) \n\t"
- "madd $ac0, %[Temp0], %[cospi_24_64] \n\t"
- "msub $ac0, %[Temp1], %[cospi_8_64] \n\t"
- "mtlo %[const_2_power_13], $ac1 \n\t"
- "mthi $zero, $ac1 \n\t"
- "extp %[Temp3], $ac0, 31 \n\t"
-
- /*
- step1_1 = step2_1 + step2_2;
- step1_2 = step2_1 - step2_2;
- */
- "add %[step1_1], %[Temp2], %[Temp3] \n\t"
- "sub %[step1_2], %[Temp2], %[Temp3] \n\t"
-
- /*
- temp_2 = input[2] * cospi_8_64 + input[6] * cospi_24_64;
- step2_3 = dct_const_round_shift(temp_2);
- */
- "madd $ac1, %[Temp0], %[cospi_8_64] \n\t"
- "madd $ac1, %[Temp1], %[cospi_24_64] \n\t"
- "extp %[Temp1], $ac1, 31 \n\t"
-
- "mtlo %[const_2_power_13], $ac0 \n\t"
- "mthi $zero, $ac0 \n\t"
-
- /*
- step1_0 = step2_0 + step2_3;
- step1_3 = step2_0 - step2_3;
- */
- "add %[step1_0], %[step1_6], %[Temp1] \n\t"
- "sub %[step1_3], %[step1_6], %[Temp1] \n\t"
-
- /*
- temp_1 = input[1] * cospi_28_64 - input[7] * cospi_4_64;
- step1_4 = dct_const_round_shift(temp_1);
- */
- "lh %[Temp0], 2(%[input]) \n\t"
- "madd $ac0, %[Temp0], %[cospi_28_64] \n\t"
- "mtlo %[const_2_power_13], $ac1 \n\t"
- "mthi $zero, $ac1 \n\t"
- "lh %[Temp1], 14(%[input]) \n\t"
- "lh %[Temp0], 2(%[input]) \n\t"
- "msub $ac0, %[Temp1], %[cospi_4_64] \n\t"
- "extp %[step1_4], $ac0, 31 \n\t"
-
- /*
- temp_2 = input[1] * cospi_4_64 + input[7] * cospi_28_64;
- step1_7 = dct_const_round_shift(temp_2);
- */
- "madd $ac1, %[Temp0], %[cospi_4_64] \n\t"
- "madd $ac1, %[Temp1], %[cospi_28_64] \n\t"
- "extp %[step1_7], $ac1, 31 \n\t"
-
- /*
- temp_1 = input[5] * cospi_12_64 - input[3] * cospi_20_64;
- step1_5 = dct_const_round_shift(temp_1);
- */
- "mtlo %[const_2_power_13], $ac0 \n\t"
- "mthi $zero, $ac0 \n\t"
- "lh %[Temp0], 10(%[input]) \n\t"
- "madd $ac0, %[Temp0], %[cospi_12_64] \n\t"
- "lh %[Temp1], 6(%[input]) \n\t"
- "msub $ac0, %[Temp1], %[cospi_20_64] \n\t"
- "extp %[step1_5], $ac0, 31 \n\t"
-
- /*
- temp_2 = input[5] * cospi_20_64 + input[3] * cospi_12_64;
- step1_6 = dct_const_round_shift(temp_2);
- */
- "mtlo %[const_2_power_13], $ac1 \n\t"
- "mthi $zero, $ac1 \n\t"
- "lh %[Temp0], 10(%[input]) \n\t"
- "madd $ac1, %[Temp0], %[cospi_20_64] \n\t"
- "lh %[Temp1], 6(%[input]) \n\t"
- "madd $ac1, %[Temp1], %[cospi_12_64] \n\t"
- "extp %[step1_6], $ac1, 31 \n\t"
-
- /*
- temp_1 = (step1_7 - step1_6 - step1_4 + step1_5) * cospi_16_64;
- temp_2 = (step1_4 - step1_5 - step1_6 + step1_7) * cospi_16_64;
- */
- "sub %[Temp0], %[step1_7], %[step1_6] \n\t"
- "sub %[Temp0], %[Temp0], %[step1_4] \n\t"
- "add %[Temp0], %[Temp0], %[step1_5] \n\t"
- "sub %[Temp1], %[step1_4], %[step1_5] \n\t"
- "sub %[Temp1], %[Temp1], %[step1_6] \n\t"
- "add %[Temp1], %[Temp1], %[step1_7] \n\t"
-
- "mtlo %[const_2_power_13], $ac0 \n\t"
- "mthi $zero, $ac0 \n\t"
- "mtlo %[const_2_power_13], $ac1 \n\t"
- "mthi $zero, $ac1 \n\t"
-
- "madd $ac0, %[Temp0], %[cospi_16_64] \n\t"
- "madd $ac1, %[Temp1], %[cospi_16_64] \n\t"
-
- /*
- step1_4 = step1_4 + step1_5;
- step1_7 = step1_6 + step1_7;
- */
- "add %[step1_4], %[step1_4], %[step1_5] \n\t"
- "add %[step1_7], %[step1_7], %[step1_6] \n\t"
-
- "extp %[step1_5], $ac0, 31 \n\t"
- "extp %[step1_6], $ac1, 31 \n\t"
-
- /* add block */
- "lbu %[Temp1], 0(%[dest_pix]) \n\t"
- "add %[Temp0], %[step1_0], %[step1_7] \n\t"
- "addi %[Temp0], %[Temp0], 16 \n\t"
- "sra %[Temp0], %[Temp0], 5 \n\t"
- "add %[Temp1], %[Temp1], %[Temp0] \n\t"
- "add %[Temp0], %[step1_1], %[step1_6] \n\t"
- "lbux %[Temp2], %[Temp1](%[cm]) \n\t"
- "sb %[Temp2], 0(%[dest_pix]) \n\t"
- "addu %[dest_pix], %[dest_pix], %[dest_stride] \n\t"
-
- "lbu %[Temp1], 0(%[dest_pix]) \n\t"
- "addi %[Temp0], %[Temp0], 16 \n\t"
- "sra %[Temp0], %[Temp0], 5 \n\t"
- "add %[Temp1], %[Temp1], %[Temp0] \n\t"
- "add %[Temp0], %[step1_2], %[step1_5] \n\t"
- "lbux %[Temp2], %[Temp1](%[cm]) \n\t"
- "sb %[Temp2], 0(%[dest_pix]) \n\t"
- "addu %[dest_pix], %[dest_pix], %[dest_stride] \n\t"
-
- "lbu %[Temp1], 0(%[dest_pix]) \n\t"
- "addi %[Temp0], %[Temp0], 16 \n\t"
- "sra %[Temp0], %[Temp0], 5 \n\t"
- "add %[Temp1], %[Temp1], %[Temp0] \n\t"
- "add %[Temp0], %[step1_3], %[step1_4] \n\t"
- "lbux %[Temp2], %[Temp1](%[cm]) \n\t"
- "sb %[Temp2], 0(%[dest_pix]) \n\t"
- "addu %[dest_pix], %[dest_pix], %[dest_stride] \n\t"
-
- "lbu %[Temp1], 0(%[dest_pix]) \n\t"
- "addi %[Temp0], %[Temp0], 16 \n\t"
- "sra %[Temp0], %[Temp0], 5 \n\t"
- "add %[Temp1], %[Temp1], %[Temp0] \n\t"
- "sub %[Temp0], %[step1_3], %[step1_4] \n\t"
- "lbux %[Temp2], %[Temp1](%[cm]) \n\t"
- "sb %[Temp2], 0(%[dest_pix]) \n\t"
- "addu %[dest_pix], %[dest_pix], %[dest_stride] \n\t"
-
- "lbu %[Temp1], 0(%[dest_pix]) \n\t"
- "addi %[Temp0], %[Temp0], 16 \n\t"
- "sra %[Temp0], %[Temp0], 5 \n\t"
- "add %[Temp1], %[Temp1], %[Temp0] \n\t"
- "sub %[Temp0], %[step1_2], %[step1_5] \n\t"
- "lbux %[Temp2], %[Temp1](%[cm]) \n\t"
- "sb %[Temp2], 0(%[dest_pix]) \n\t"
- "addu %[dest_pix], %[dest_pix], %[dest_stride] \n\t"
-
- "lbu %[Temp1], 0(%[dest_pix]) \n\t"
- "addi %[Temp0], %[Temp0], 16 \n\t"
- "sra %[Temp0], %[Temp0], 5 \n\t"
- "add %[Temp1], %[Temp1], %[Temp0] \n\t"
- "sub %[Temp0], %[step1_1], %[step1_6] \n\t"
- "lbux %[Temp2], %[Temp1](%[cm]) \n\t"
- "sb %[Temp2], 0(%[dest_pix]) \n\t"
- "addu %[dest_pix], %[dest_pix], %[dest_stride] \n\t"
-
- "lbu %[Temp1], 0(%[dest_pix]) \n\t"
- "addi %[Temp0], %[Temp0], 16 \n\t"
- "sra %[Temp0], %[Temp0], 5 \n\t"
- "add %[Temp1], %[Temp1], %[Temp0] \n\t"
- "sub %[Temp0], %[step1_0], %[step1_7] \n\t"
- "lbux %[Temp2], %[Temp1](%[cm]) \n\t"
- "sb %[Temp2], 0(%[dest_pix]) \n\t"
- "addu %[dest_pix], %[dest_pix], %[dest_stride] \n\t"
-
- "lbu %[Temp1], 0(%[dest_pix]) \n\t"
- "addi %[Temp0], %[Temp0], 16 \n\t"
- "sra %[Temp0], %[Temp0], 5 \n\t"
- "add %[Temp1], %[Temp1], %[Temp0] \n\t"
- "lbux %[Temp2], %[Temp1](%[cm]) \n\t"
- "sb %[Temp2], 0(%[dest_pix]) \n\t"
-
- : [step1_0] "=&r" (step1_0), [step1_1] "=&r" (step1_1),
- [step1_2] "=&r" (step1_2), [step1_3] "=&r" (step1_3),
- [step1_4] "=&r" (step1_4), [step1_5] "=&r" (step1_5),
- [step1_6] "=&r" (step1_6), [step1_7] "=&r" (step1_7),
- [Temp0] "=&r" (Temp0), [Temp1] "=&r" (Temp1),
- [Temp2] "=&r" (Temp2), [Temp3] "=&r" (Temp3),
- [dest_pix] "+r" (dest_pix)
- : [const_2_power_13] "r" (const_2_power_13),
- [cospi_16_64] "r" (cospi_16_64), [cospi_28_64] "r" (cospi_28_64),
- [cospi_4_64] "r" (cospi_4_64), [cospi_12_64] "r" (cospi_12_64),
- [cospi_20_64] "r" (cospi_20_64), [cospi_8_64] "r" (cospi_8_64),
- [cospi_24_64] "r" (cospi_24_64),
- [input] "r" (input), [cm] "r" (cm), [dest_stride] "r" (dest_stride)
- );
-
- input += 8;
- }
-}
-
-void vp9_idct8x8_64_add_dspr2(const int16_t *input, uint8_t *dest,
- int dest_stride) {
- DECLARE_ALIGNED(32, int16_t, out[8 * 8]);
- int16_t *outptr = out;
- uint32_t pos = 45;
-
- /* bit positon for extract from acc */
- __asm__ __volatile__ (
- "wrdsp %[pos], 1 \n\t"
- :
- : [pos] "r" (pos)
- );
-
- // First transform rows
- idct8_rows_dspr2(input, outptr, 8);
-
- // Then transform columns and add to dest
- idct8_columns_add_blk_dspr2(&out[0], dest, dest_stride);
-}
-
-static void iadst8_dspr2(const int16_t *input, int16_t *output) {
- int s0, s1, s2, s3, s4, s5, s6, s7;
- int x0, x1, x2, x3, x4, x5, x6, x7;
-
- x0 = input[7];
- x1 = input[0];
- x2 = input[5];
- x3 = input[2];
- x4 = input[3];
- x5 = input[4];
- x6 = input[1];
- x7 = input[6];
-
- if (!(x0 | x1 | x2 | x3 | x4 | x5 | x6 | x7)) {
- output[0] = output[1] = output[2] = output[3] = output[4]
- = output[5] = output[6] = output[7] = 0;
- return;
- }
-
- // stage 1
- s0 = cospi_2_64 * x0 + cospi_30_64 * x1;
- s1 = cospi_30_64 * x0 - cospi_2_64 * x1;
- s2 = cospi_10_64 * x2 + cospi_22_64 * x3;
- s3 = cospi_22_64 * x2 - cospi_10_64 * x3;
- s4 = cospi_18_64 * x4 + cospi_14_64 * x5;
- s5 = cospi_14_64 * x4 - cospi_18_64 * x5;
- s6 = cospi_26_64 * x6 + cospi_6_64 * x7;
- s7 = cospi_6_64 * x6 - cospi_26_64 * x7;
-
- x0 = ROUND_POWER_OF_TWO((s0 + s4), DCT_CONST_BITS);
- x1 = ROUND_POWER_OF_TWO((s1 + s5), DCT_CONST_BITS);
- x2 = ROUND_POWER_OF_TWO((s2 + s6), DCT_CONST_BITS);
- x3 = ROUND_POWER_OF_TWO((s3 + s7), DCT_CONST_BITS);
- x4 = ROUND_POWER_OF_TWO((s0 - s4), DCT_CONST_BITS);
- x5 = ROUND_POWER_OF_TWO((s1 - s5), DCT_CONST_BITS);
- x6 = ROUND_POWER_OF_TWO((s2 - s6), DCT_CONST_BITS);
- x7 = ROUND_POWER_OF_TWO((s3 - s7), DCT_CONST_BITS);
-
- // stage 2
- s0 = x0;
- s1 = x1;
- s2 = x2;
- s3 = x3;
- s4 = cospi_8_64 * x4 + cospi_24_64 * x5;
- s5 = cospi_24_64 * x4 - cospi_8_64 * x5;
- s6 = -cospi_24_64 * x6 + cospi_8_64 * x7;
- s7 = cospi_8_64 * x6 + cospi_24_64 * x7;
-
- x0 = s0 + s2;
- x1 = s1 + s3;
- x2 = s0 - s2;
- x3 = s1 - s3;
- x4 = ROUND_POWER_OF_TWO((s4 + s6), DCT_CONST_BITS);
- x5 = ROUND_POWER_OF_TWO((s5 + s7), DCT_CONST_BITS);
- x6 = ROUND_POWER_OF_TWO((s4 - s6), DCT_CONST_BITS);
- x7 = ROUND_POWER_OF_TWO((s5 - s7), DCT_CONST_BITS);
-
- // stage 3
- s2 = cospi_16_64 * (x2 + x3);
- s3 = cospi_16_64 * (x2 - x3);
- s6 = cospi_16_64 * (x6 + x7);
- s7 = cospi_16_64 * (x6 - x7);
-
- x2 = ROUND_POWER_OF_TWO((s2), DCT_CONST_BITS);
- x3 = ROUND_POWER_OF_TWO((s3), DCT_CONST_BITS);
- x6 = ROUND_POWER_OF_TWO((s6), DCT_CONST_BITS);
- x7 = ROUND_POWER_OF_TWO((s7), DCT_CONST_BITS);
-
- output[0] = x0;
- output[1] = -x4;
- output[2] = x6;
- output[3] = -x2;
- output[4] = x3;
- output[5] = -x7;
- output[6] = x5;
- output[7] = -x1;
-}
-
void vp9_iht8x8_64_add_dspr2(const int16_t *input, uint8_t *dest,
int dest_stride, int tx_type) {
int i, j;
@@ -617,130 +90,4 @@ void vp9_iht8x8_64_add_dspr2(const int16_t *input, uint8_t *dest,
break;
}
}
-
-void vp9_idct8x8_12_add_dspr2(const int16_t *input, uint8_t *dest,
- int dest_stride) {
- DECLARE_ALIGNED(32, int16_t, out[8 * 8]);
- int16_t *outptr = out;
- uint32_t pos = 45;
-
- /* bit positon for extract from acc */
- __asm__ __volatile__ (
- "wrdsp %[pos], 1 \n\t"
- :
- : [pos] "r" (pos)
- );
-
- // First transform rows
- idct8_rows_dspr2(input, outptr, 4);
-
- outptr += 4;
-
- __asm__ __volatile__ (
- "sw $zero, 0(%[outptr]) \n\t"
- "sw $zero, 4(%[outptr]) \n\t"
- "sw $zero, 16(%[outptr]) \n\t"
- "sw $zero, 20(%[outptr]) \n\t"
- "sw $zero, 32(%[outptr]) \n\t"
- "sw $zero, 36(%[outptr]) \n\t"
- "sw $zero, 48(%[outptr]) \n\t"
- "sw $zero, 52(%[outptr]) \n\t"
- "sw $zero, 64(%[outptr]) \n\t"
- "sw $zero, 68(%[outptr]) \n\t"
- "sw $zero, 80(%[outptr]) \n\t"
- "sw $zero, 84(%[outptr]) \n\t"
- "sw $zero, 96(%[outptr]) \n\t"
- "sw $zero, 100(%[outptr]) \n\t"
- "sw $zero, 112(%[outptr]) \n\t"
- "sw $zero, 116(%[outptr]) \n\t"
-
- :
- : [outptr] "r" (outptr)
- );
-
-
- // Then transform columns and add to dest
- idct8_columns_add_blk_dspr2(&out[0], dest, dest_stride);
-}
-
-void vp9_idct8x8_1_add_dspr2(const int16_t *input, uint8_t *dest,
- int dest_stride) {
- uint32_t pos = 45;
- int32_t out;
- int32_t r;
- int32_t a1, absa1;
- int32_t t1, t2, vector_a1, vector_1, vector_2;
-
- /* bit positon for extract from acc */
- __asm__ __volatile__ (
- "wrdsp %[pos], 1 \n\t"
-
- :
- : [pos] "r" (pos)
- );
-
- out = DCT_CONST_ROUND_SHIFT_TWICE_COSPI_16_64(input[0]);
- __asm__ __volatile__ (
- "addi %[out], %[out], 16 \n\t"
- "sra %[a1], %[out], 5 \n\t"
-
- : [out] "+r" (out), [a1] "=r" (a1)
- :
- );
-
- if (a1 < 0) {
- /* use quad-byte
- * input and output memory are four byte aligned */
- __asm__ __volatile__ (
- "abs %[absa1], %[a1] \n\t"
- "replv.qb %[vector_a1], %[absa1] \n\t"
-
- : [absa1] "=r" (absa1), [vector_a1] "=r" (vector_a1)
- : [a1] "r" (a1)
- );
-
- for (r = 8; r--;) {
- __asm__ __volatile__ (
- "lw %[t1], 0(%[dest]) \n\t"
- "lw %[t2], 4(%[dest]) \n\t"
- "subu_s.qb %[vector_1], %[t1], %[vector_a1] \n\t"
- "subu_s.qb %[vector_2], %[t2], %[vector_a1] \n\t"
- "sw %[vector_1], 0(%[dest]) \n\t"
- "sw %[vector_2], 4(%[dest]) \n\t"
- "add %[dest], %[dest], %[dest_stride] \n\t"
-
- : [t1] "=&r" (t1), [t2] "=&r" (t2),
- [vector_1] "=&r" (vector_1), [vector_2] "=&r" (vector_2),
- [dest] "+&r" (dest)
- : [dest_stride] "r" (dest_stride), [vector_a1] "r" (vector_a1)
- );
- }
- } else {
- /* use quad-byte
- * input and output memory are four byte aligned */
- __asm__ __volatile__ (
- "replv.qb %[vector_a1], %[a1] \n\t"
-
- : [vector_a1] "=r" (vector_a1)
- : [a1] "r" (a1)
- );
-
- for (r = 8; r--;) {
- __asm__ __volatile__ (
- "lw %[t1], 0(%[dest]) \n\t"
- "lw %[t2], 4(%[dest]) \n\t"
- "addu_s.qb %[vector_1], %[t1], %[vector_a1] \n\t"
- "addu_s.qb %[vector_2], %[t2], %[vector_a1] \n\t"
- "sw %[vector_1], 0(%[dest]) \n\t"
- "sw %[vector_2], 4(%[dest]) \n\t"
- "add %[dest], %[dest], %[dest_stride] \n\t"
-
- : [t1] "=&r" (t1), [t2] "=&r" (t2),
- [vector_1] "=&r" (vector_1), [vector_2] "=&r" (vector_2),
- [dest] "+r" (dest)
- : [dest_stride] "r" (dest_stride), [vector_a1] "r" (vector_a1)
- );
- }
- }
-}
#endif // #if HAVE_DSPR2
diff --git a/vp9/common/mips/msa/vp9_idct16x16_msa.c b/vp9/common/mips/msa/vp9_idct16x16_msa.c
index 59b8930b5..5adf0aaac 100644
--- a/vp9/common/mips/msa/vp9_idct16x16_msa.c
+++ b/vp9/common/mips/msa/vp9_idct16x16_msa.c
@@ -24,13 +24,13 @@ void vp9_iht16x16_256_add_msa(const int16_t *input, uint8_t *dst,
/* transform rows */
for (i = 0; i < 2; ++i) {
/* process 16 * 8 block */
- vp9_idct16_1d_rows_msa((input + (i << 7)), (out_ptr + (i << 7)));
+ vpx_idct16_1d_rows_msa((input + (i << 7)), (out_ptr + (i << 7)));
}
/* transform columns */
for (i = 0; i < 2; ++i) {
/* process 8 * 16 block */
- vp9_idct16_1d_columns_addblk_msa((out_ptr + (i << 3)), (dst + (i << 3)),
+ vpx_idct16_1d_columns_addblk_msa((out_ptr + (i << 3)), (dst + (i << 3)),
dst_stride);
}
break;
@@ -38,12 +38,12 @@ void vp9_iht16x16_256_add_msa(const int16_t *input, uint8_t *dst,
/* transform rows */
for (i = 0; i < 2; ++i) {
/* process 16 * 8 block */
- vp9_idct16_1d_rows_msa((input + (i << 7)), (out_ptr + (i << 7)));
+ vpx_idct16_1d_rows_msa((input + (i << 7)), (out_ptr + (i << 7)));
}
/* transform columns */
for (i = 0; i < 2; ++i) {
- vp9_iadst16_1d_columns_addblk_msa((out_ptr + (i << 3)),
+ vpx_iadst16_1d_columns_addblk_msa((out_ptr + (i << 3)),
(dst + (i << 3)), dst_stride);
}
break;
@@ -51,13 +51,13 @@ void vp9_iht16x16_256_add_msa(const int16_t *input, uint8_t *dst,
/* transform rows */
for (i = 0; i < 2; ++i) {
/* process 16 * 8 block */
- vp9_iadst16_1d_rows_msa((input + (i << 7)), (out_ptr + (i << 7)));
+ vpx_iadst16_1d_rows_msa((input + (i << 7)), (out_ptr + (i << 7)));
}
/* transform columns */
for (i = 0; i < 2; ++i) {
/* process 8 * 16 block */
- vp9_idct16_1d_columns_addblk_msa((out_ptr + (i << 3)), (dst + (i << 3)),
+ vpx_idct16_1d_columns_addblk_msa((out_ptr + (i << 3)), (dst + (i << 3)),
dst_stride);
}
break;
@@ -65,12 +65,12 @@ void vp9_iht16x16_256_add_msa(const int16_t *input, uint8_t *dst,
/* transform rows */
for (i = 0; i < 2; ++i) {
/* process 16 * 8 block */
- vp9_iadst16_1d_rows_msa((input + (i << 7)), (out_ptr + (i << 7)));
+ vpx_iadst16_1d_rows_msa((input + (i << 7)), (out_ptr + (i << 7)));
}
/* transform columns */
for (i = 0; i < 2; ++i) {
- vp9_iadst16_1d_columns_addblk_msa((out_ptr + (i << 3)),
+ vpx_iadst16_1d_columns_addblk_msa((out_ptr + (i << 3)),
(dst + (i << 3)), dst_stride);
}
break;
diff --git a/vp9/common/vp9_alloccommon.c b/vp9/common/vp9_alloccommon.c
index 8eda491de..a3e01bf38 100644
--- a/vp9/common/vp9_alloccommon.c
+++ b/vp9/common/vp9_alloccommon.c
@@ -16,7 +16,6 @@
#include "vp9/common/vp9_entropymode.h"
#include "vp9/common/vp9_entropymv.h"
#include "vp9/common/vp9_onyxc_int.h"
-#include "vp9/common/vp9_systemdependent.h"
// TODO(hkuang): Don't need to lock the whole pool after implementing atomic
// frame reference count.
diff --git a/vp9/common/vp9_common.h b/vp9/common/vp9_common.h
index c249ad4d7..76e7cd440 100644
--- a/vp9/common/vp9_common.h
+++ b/vp9/common/vp9_common.h
@@ -19,7 +19,7 @@
#include "vpx_dsp/vpx_dsp_common.h"
#include "vpx_mem/vpx_mem.h"
#include "vpx/vpx_integer.h"
-#include "vp9/common/vp9_systemdependent.h"
+#include "vpx_ports/bitops.h"
#ifdef __cplusplus
extern "C" {
diff --git a/vp9/common/vp9_idct.c b/vp9/common/vp9_idct.c
index b15f7f370..d12cd76db 100644
--- a/vp9/common/vp9_idct.c
+++ b/vp9/common/vp9_idct.c
@@ -14,7 +14,6 @@
#include "./vpx_dsp_rtcd.h"
#include "vp9/common/vp9_blockd.h"
#include "vp9/common/vp9_idct.h"
-#include "vp9/common/vp9_systemdependent.h"
#include "vpx_dsp/inv_txfm.h"
#include "vpx_ports/mem.h"
@@ -123,18 +122,18 @@ void vp9_iht16x16_256_add_c(const tran_low_t *input, uint8_t *dest, int stride,
void vp9_idct4x4_add(const tran_low_t *input, uint8_t *dest, int stride,
int eob) {
if (eob > 1)
- vp9_idct4x4_16_add(input, dest, stride);
+ vpx_idct4x4_16_add(input, dest, stride);
else
- vp9_idct4x4_1_add(input, dest, stride);
+ vpx_idct4x4_1_add(input, dest, stride);
}
void vp9_iwht4x4_add(const tran_low_t *input, uint8_t *dest, int stride,
int eob) {
if (eob > 1)
- vp9_iwht4x4_16_add(input, dest, stride);
+ vpx_iwht4x4_16_add(input, dest, stride);
else
- vp9_iwht4x4_1_add(input, dest, stride);
+ vpx_iwht4x4_1_add(input, dest, stride);
}
void vp9_idct8x8_add(const tran_low_t *input, uint8_t *dest, int stride,
@@ -148,11 +147,11 @@ void vp9_idct8x8_add(const tran_low_t *input, uint8_t *dest, int stride,
// Combine that with code here.
if (eob == 1)
// DC only DCT coefficient
- vp9_idct8x8_1_add(input, dest, stride);
+ vpx_idct8x8_1_add(input, dest, stride);
else if (eob <= 12)
- vp9_idct8x8_12_add(input, dest, stride);
+ vpx_idct8x8_12_add(input, dest, stride);
else
- vp9_idct8x8_64_add(input, dest, stride);
+ vpx_idct8x8_64_add(input, dest, stride);
}
void vp9_idct16x16_add(const tran_low_t *input, uint8_t *dest, int stride,
@@ -161,22 +160,22 @@ void vp9_idct16x16_add(const tran_low_t *input, uint8_t *dest, int stride,
* coefficients. Use eobs to separate different cases. */
if (eob == 1)
/* DC only DCT coefficient. */
- vp9_idct16x16_1_add(input, dest, stride);
+ vpx_idct16x16_1_add(input, dest, stride);
else if (eob <= 10)
- vp9_idct16x16_10_add(input, dest, stride);
+ vpx_idct16x16_10_add(input, dest, stride);
else
- vp9_idct16x16_256_add(input, dest, stride);
+ vpx_idct16x16_256_add(input, dest, stride);
}
void vp9_idct32x32_add(const tran_low_t *input, uint8_t *dest, int stride,
int eob) {
if (eob == 1)
- vp9_idct32x32_1_add(input, dest, stride);
+ vpx_idct32x32_1_add(input, dest, stride);
else if (eob <= 34)
// non-zero coeff only in upper-left 8x8
- vp9_idct32x32_34_add(input, dest, stride);
+ vpx_idct32x32_34_add(input, dest, stride);
else
- vp9_idct32x32_1024_add(input, dest, stride);
+ vpx_idct32x32_1024_add(input, dest, stride);
}
// iht
@@ -210,10 +209,10 @@ void vp9_iht16x16_add(TX_TYPE tx_type, const tran_low_t *input, uint8_t *dest,
void vp9_highbd_iht4x4_16_add_c(const tran_low_t *input, uint8_t *dest8,
int stride, int tx_type, int bd) {
const highbd_transform_2d IHT_4[] = {
- { vp9_highbd_idct4_c, vp9_highbd_idct4_c }, // DCT_DCT = 0
- { highbd_iadst4_c, vp9_highbd_idct4_c }, // ADST_DCT = 1
- { vp9_highbd_idct4_c, highbd_iadst4_c }, // DCT_ADST = 2
- { highbd_iadst4_c, highbd_iadst4_c } // ADST_ADST = 3
+ { vpx_highbd_idct4_c, vpx_highbd_idct4_c }, // DCT_DCT = 0
+ { vpx_highbd_iadst4_c, vpx_highbd_idct4_c }, // ADST_DCT = 1
+ { vpx_highbd_idct4_c, vpx_highbd_iadst4_c }, // DCT_ADST = 2
+ { vpx_highbd_iadst4_c, vpx_highbd_iadst4_c } // ADST_ADST = 3
};
uint16_t *dest = CONVERT_TO_SHORTPTR(dest8);
@@ -242,10 +241,10 @@ void vp9_highbd_iht4x4_16_add_c(const tran_low_t *input, uint8_t *dest8,
}
static const highbd_transform_2d HIGH_IHT_8[] = {
- { vp9_highbd_idct8_c, vp9_highbd_idct8_c }, // DCT_DCT = 0
- { highbd_iadst8_c, vp9_highbd_idct8_c }, // ADST_DCT = 1
- { vp9_highbd_idct8_c, highbd_iadst8_c }, // DCT_ADST = 2
- { highbd_iadst8_c, highbd_iadst8_c } // ADST_ADST = 3
+ { vpx_highbd_idct8_c, vpx_highbd_idct8_c }, // DCT_DCT = 0
+ { vpx_highbd_iadst8_c, vpx_highbd_idct8_c }, // ADST_DCT = 1
+ { vpx_highbd_idct8_c, vpx_highbd_iadst8_c }, // DCT_ADST = 2
+ { vpx_highbd_iadst8_c, vpx_highbd_iadst8_c } // ADST_ADST = 3
};
void vp9_highbd_iht8x8_64_add_c(const tran_low_t *input, uint8_t *dest8,
@@ -277,10 +276,10 @@ void vp9_highbd_iht8x8_64_add_c(const tran_low_t *input, uint8_t *dest8,
}
static const highbd_transform_2d HIGH_IHT_16[] = {
- { vp9_highbd_idct16_c, vp9_highbd_idct16_c }, // DCT_DCT = 0
- { highbd_iadst16_c, vp9_highbd_idct16_c }, // ADST_DCT = 1
- { vp9_highbd_idct16_c, highbd_iadst16_c }, // DCT_ADST = 2
- { highbd_iadst16_c, highbd_iadst16_c } // ADST_ADST = 3
+ { vpx_highbd_idct16_c, vpx_highbd_idct16_c }, // DCT_DCT = 0
+ { vpx_highbd_iadst16_c, vpx_highbd_idct16_c }, // ADST_DCT = 1
+ { vpx_highbd_idct16_c, vpx_highbd_iadst16_c }, // DCT_ADST = 2
+ { vpx_highbd_iadst16_c, vpx_highbd_iadst16_c } // ADST_ADST = 3
};
void vp9_highbd_iht16x16_256_add_c(const tran_low_t *input, uint8_t *dest8,
@@ -315,18 +314,18 @@ void vp9_highbd_iht16x16_256_add_c(const tran_low_t *input, uint8_t *dest8,
void vp9_highbd_idct4x4_add(const tran_low_t *input, uint8_t *dest, int stride,
int eob, int bd) {
if (eob > 1)
- vp9_highbd_idct4x4_16_add(input, dest, stride, bd);
+ vpx_highbd_idct4x4_16_add(input, dest, stride, bd);
else
- vp9_highbd_idct4x4_1_add(input, dest, stride, bd);
+ vpx_highbd_idct4x4_1_add(input, dest, stride, bd);
}
void vp9_highbd_iwht4x4_add(const tran_low_t *input, uint8_t *dest, int stride,
int eob, int bd) {
if (eob > 1)
- vp9_highbd_iwht4x4_16_add(input, dest, stride, bd);
+ vpx_highbd_iwht4x4_16_add(input, dest, stride, bd);
else
- vp9_highbd_iwht4x4_1_add(input, dest, stride, bd);
+ vpx_highbd_iwht4x4_1_add(input, dest, stride, bd);
}
void vp9_highbd_idct8x8_add(const tran_low_t *input, uint8_t *dest, int stride,
@@ -340,11 +339,11 @@ void vp9_highbd_idct8x8_add(const tran_low_t *input, uint8_t *dest, int stride,
// Combine that with code here.
// DC only DCT coefficient
if (eob == 1) {
- vp9_highbd_idct8x8_1_add(input, dest, stride, bd);
+ vpx_highbd_idct8x8_1_add(input, dest, stride, bd);
} else if (eob <= 10) {
- vp9_highbd_idct8x8_10_add(input, dest, stride, bd);
+ vpx_highbd_idct8x8_10_add(input, dest, stride, bd);
} else {
- vp9_highbd_idct8x8_64_add(input, dest, stride, bd);
+ vpx_highbd_idct8x8_64_add(input, dest, stride, bd);
}
}
@@ -354,11 +353,11 @@ void vp9_highbd_idct16x16_add(const tran_low_t *input, uint8_t *dest,
// coefficients. Use eobs to separate different cases.
// DC only DCT coefficient.
if (eob == 1) {
- vp9_highbd_idct16x16_1_add(input, dest, stride, bd);
+ vpx_highbd_idct16x16_1_add(input, dest, stride, bd);
} else if (eob <= 10) {
- vp9_highbd_idct16x16_10_add(input, dest, stride, bd);
+ vpx_highbd_idct16x16_10_add(input, dest, stride, bd);
} else {
- vp9_highbd_idct16x16_256_add(input, dest, stride, bd);
+ vpx_highbd_idct16x16_256_add(input, dest, stride, bd);
}
}
@@ -366,11 +365,11 @@ void vp9_highbd_idct32x32_add(const tran_low_t *input, uint8_t *dest,
int stride, int eob, int bd) {
// Non-zero coeff only in upper-left 8x8
if (eob == 1) {
- vp9_highbd_idct32x32_1_add(input, dest, stride, bd);
+ vpx_highbd_idct32x32_1_add(input, dest, stride, bd);
} else if (eob <= 34) {
- vp9_highbd_idct32x32_34_add(input, dest, stride, bd);
+ vpx_highbd_idct32x32_34_add(input, dest, stride, bd);
} else {
- vp9_highbd_idct32x32_1024_add(input, dest, stride, bd);
+ vpx_highbd_idct32x32_1024_add(input, dest, stride, bd);
}
}
diff --git a/vp9/common/vp9_idct.h b/vp9/common/vp9_idct.h
index 7a7dc1d64..b5a3fbf36 100644
--- a/vp9/common/vp9_idct.h
+++ b/vp9/common/vp9_idct.h
@@ -44,8 +44,8 @@ void vp9_idct4x4_add(const tran_low_t *input, uint8_t *dest, int stride,
int eob);
void vp9_idct8x8_add(const tran_low_t *input, uint8_t *dest, int stride,
int eob);
-void vp9_idct16x16_add(const tran_low_t *input, uint8_t *dest, int stride, int
- eob);
+void vp9_idct16x16_add(const tran_low_t *input, uint8_t *dest, int stride,
+ int eob);
void vp9_idct32x32_add(const tran_low_t *input, uint8_t *dest, int stride,
int eob);
diff --git a/vp9/common/vp9_mfqe.c b/vp9/common/vp9_mfqe.c
index bebb37eda..6d560f438 100644
--- a/vp9/common/vp9_mfqe.c
+++ b/vp9/common/vp9_mfqe.c
@@ -120,8 +120,8 @@ static void copy_mem32x32(const uint8_t *src, int src_stride,
dst + dst_stride * 16 + 16, dst_stride);
}
-void copy_mem64x64(const uint8_t *src, int src_stride,
- uint8_t *dst, int dst_stride) {
+static void copy_mem64x64(const uint8_t *src, int src_stride,
+ uint8_t *dst, int dst_stride) {
copy_mem32x32(src, src_stride, dst, dst_stride);
copy_mem32x32(src + 32, src_stride, dst + 32, dst_stride);
copy_mem32x32(src + src_stride * 32, src_stride,
diff --git a/vp9/common/vp9_postproc.c b/vp9/common/vp9_postproc.c
index d26a6eb5c..1843bb95c 100644
--- a/vp9/common/vp9_postproc.c
+++ b/vp9/common/vp9_postproc.c
@@ -17,20 +17,20 @@
#include "./vp9_rtcd.h"
#include "vpx_ports/mem.h"
+#include "vpx_ports/system_state.h"
#include "vpx_scale/vpx_scale.h"
#include "vpx_scale/yv12config.h"
#include "vp9/common/vp9_onyxc_int.h"
#include "vp9/common/vp9_postproc.h"
-#include "vp9/common/vp9_systemdependent.h"
#include "vp9/common/vp9_textblit.h"
#if CONFIG_VP9_POSTPROC
-static const short kernel5[] = {
+static const int16_t kernel5[] = {
1, 1, 4, 1, 1
};
-const short vp9_rv[] = {
+const int16_t vp9_rv[] = {
8, 5, 2, 2, 8, 12, 4, 9, 8, 3,
0, 3, 9, 0, 0, 0, 8, 3, 14, 4,
10, 1, 11, 14, 1, 14, 9, 6, 12, 11,
@@ -320,7 +320,7 @@ void vp9_mbpost_proc_down_c(uint8_t *dst, int pitch,
int sumsq = 0;
int sum = 0;
uint8_t d[16];
- const short *rv2 = rv3 + ((c * 17) & 127);
+ const int16_t *rv2 = rv3 + ((c * 17) & 127);
for (i = -8; i <= 6; i++) {
sumsq += s[i * pitch] * s[i * pitch];
@@ -544,7 +544,7 @@ static void fillrd(struct postproc_state *state, int q, int a) {
double sigma;
int ai = a, qi = q, i;
- vp9_clear_system_state();
+ vpx_clear_system_state();
sigma = ai + .5 + .6 * (63 - qi) / 63.0;
@@ -638,7 +638,7 @@ int vp9_post_proc_frame(struct VP9Common *cm,
return 0;
}
- vp9_clear_system_state();
+ vpx_clear_system_state();
// Alloc memory for prev_mip in the first frame.
if (cm->current_video_frame == 1) {
diff --git a/vp9/common/vp9_reconinter.c b/vp9/common/vp9_reconinter.c
index db9971da7..f83f8257a 100644
--- a/vp9/common/vp9_reconinter.c
+++ b/vp9/common/vp9_reconinter.c
@@ -153,7 +153,7 @@ MV average_split_mvs(const struct macroblockd_plane *pd,
return res;
}
-void build_inter_predictors(MACROBLOCKD *xd, int plane, int block,
+static void build_inter_predictors(MACROBLOCKD *xd, int plane, int block,
int bw, int bh,
int x, int y, int w, int h,
int mi_x, int mi_y) {
diff --git a/vp9/common/vp9_reconinter.h b/vp9/common/vp9_reconinter.h
index 9bc62900a..7d907748e 100644
--- a/vp9/common/vp9_reconinter.h
+++ b/vp9/common/vp9_reconinter.h
@@ -50,11 +50,6 @@ MV average_split_mvs(const struct macroblockd_plane *pd, const MODE_INFO *mi,
MV clamp_mv_to_umv_border_sb(const MACROBLOCKD *xd, const MV *src_mv,
int bw, int bh, int ss_x, int ss_y);
-void build_inter_predictors(MACROBLOCKD *xd, int plane, int block,
- int bw, int bh,
- int x, int y, int w, int h,
- int mi_x, int mi_y);
-
void vp9_build_inter_predictors_sby(MACROBLOCKD *xd, int mi_row, int mi_col,
BLOCK_SIZE bsize);
diff --git a/vp9/common/vp9_rtcd_defs.pl b/vp9/common/vp9_rtcd_defs.pl
index c1731012f..737fc56dc 100644
--- a/vp9/common/vp9_rtcd_defs.pl
+++ b/vp9/common/vp9_rtcd_defs.pl
@@ -209,7 +209,7 @@ add_proto qw/int16_t vp9_int_pro_col/, "uint8_t const *ref, const int width";
specialize qw/vp9_int_pro_col sse2 neon/;
add_proto qw/int vp9_vector_var/, "int16_t const *ref, int16_t const *src, const int bwl";
-specialize qw/vp9_vector_var sse2/;
+specialize qw/vp9_vector_var neon sse2/;
if (vpx_config("CONFIG_VP9_HIGHBITDEPTH") eq "yes") {
add_proto qw/unsigned int vp9_highbd_avg_8x8/, "const uint8_t *, int p";
@@ -261,17 +261,6 @@ if (vpx_config("CONFIG_VP9_HIGHBITDEPTH") eq "yes") {
specialize qw/vp9_fdct8x8_quant sse2 ssse3 neon/;
}
-#
-# Structured Similarity (SSIM)
-#
-if (vpx_config("CONFIG_INTERNAL_STATS") eq "yes") {
- add_proto qw/void vp9_ssim_parms_8x8/, "uint8_t *s, int sp, uint8_t *r, int rp, unsigned long *sum_s, unsigned long *sum_r, unsigned long *sum_sq_s, unsigned long *sum_sq_r, unsigned long *sum_sxr";
- specialize qw/vp9_ssim_parms_8x8/, "$sse2_x86_64";
-
- add_proto qw/void vp9_ssim_parms_16x16/, "uint8_t *s, int sp, uint8_t *r, int rp, unsigned long *sum_s, unsigned long *sum_r, unsigned long *sum_sq_s, unsigned long *sum_sq_r, unsigned long *sum_sxr";
- specialize qw/vp9_ssim_parms_16x16/, "$sse2_x86_64";
-}
-
# fdct functions
if (vpx_config("CONFIG_VP9_HIGHBITDEPTH") eq "yes") {
@@ -330,14 +319,6 @@ if (vpx_config("CONFIG_VP9_HIGHBITDEPTH") eq "yes") {
add_proto qw/void vp9_highbd_quantize_fp_32x32/, "const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan";
specialize qw/vp9_highbd_quantize_fp_32x32/;
- #
- # Structured Similarity (SSIM)
- #
- if (vpx_config("CONFIG_INTERNAL_STATS") eq "yes") {
- add_proto qw/void vp9_highbd_ssim_parms_8x8/, "uint16_t *s, int sp, uint16_t *r, int rp, uint32_t *sum_s, uint32_t *sum_r, uint32_t *sum_sq_s, uint32_t *sum_sq_r, uint32_t *sum_sxr";
- specialize qw/vp9_highbd_ssim_parms_8x8/;
- }
-
# fdct functions
add_proto qw/void vp9_highbd_fht4x4/, "const int16_t *input, tran_low_t *output, int stride, int tx_type";
specialize qw/vp9_highbd_fht4x4/;
diff --git a/vp9/common/vp9_systemdependent.h b/vp9/common/vp9_systemdependent.h
deleted file mode 100644
index fc77762de..000000000
--- a/vp9/common/vp9_systemdependent.h
+++ /dev/null
@@ -1,84 +0,0 @@
-/*
- * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
- *
- * Use of this source code is governed by a BSD-style license
- * that can be found in the LICENSE file in the root of the source
- * tree. An additional intellectual property rights grant can be found
- * in the file PATENTS. All contributing project authors may
- * be found in the AUTHORS file in the root of the source tree.
- */
-
-#ifndef VP9_COMMON_VP9_SYSTEMDEPENDENT_H_
-#define VP9_COMMON_VP9_SYSTEMDEPENDENT_H_
-
-#include "vpx_ports/msvc.h"
-
-#ifdef _MSC_VER
-# include <math.h> // the ceil() definition must precede intrin.h
-# if _MSC_VER > 1310 && (defined(_M_X64) || defined(_M_IX86))
-# include <intrin.h>
-# define USE_MSC_INTRINSICS
-# endif
-#endif
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#include "./vpx_config.h"
-#if ARCH_X86 || ARCH_X86_64
-void vpx_reset_mmx_state(void);
-#define vp9_clear_system_state() vpx_reset_mmx_state()
-#else
-#define vp9_clear_system_state()
-#endif
-
-#if defined(_MSC_VER) && _MSC_VER < 1800
-// round is not defined in MSVC before VS2013.
-static INLINE int round(double x) {
- if (x < 0)
- return (int)ceil(x - 0.5);
- else
- return (int)floor(x + 0.5);
-}
-#endif
-
-// use GNU builtins where available.
-#if defined(__GNUC__) && \
- ((__GNUC__ == 3 && __GNUC_MINOR__ >= 4) || __GNUC__ >= 4)
-static INLINE int get_msb(unsigned int n) {
- return 31 ^ __builtin_clz(n);
-}
-#elif defined(USE_MSC_INTRINSICS)
-#pragma intrinsic(_BitScanReverse)
-
-static INLINE int get_msb(unsigned int n) {
- unsigned long first_set_bit;
- _BitScanReverse(&first_set_bit, n);
- return first_set_bit;
-}
-#undef USE_MSC_INTRINSICS
-#else
-// Returns (int)floor(log2(n)). n must be > 0.
-static INLINE int get_msb(unsigned int n) {
- int log = 0;
- unsigned int value = n;
- int i;
-
- for (i = 4; i >= 0; --i) {
- const int shift = (1 << i);
- const unsigned int x = value >> shift;
- if (x != 0) {
- value = x;
- log += shift;
- }
- }
- return log;
-}
-#endif
-
-#ifdef __cplusplus
-} // extern "C"
-#endif
-
-#endif // VP9_COMMON_VP9_SYSTEMDEPENDENT_H_
diff --git a/vp9/decoder/vp9_decodeframe.c b/vp9/decoder/vp9_decodeframe.c
index ecebe1efb..58b79ea27 100644
--- a/vp9/decoder/vp9_decodeframe.c
+++ b/vp9/decoder/vp9_decodeframe.c
@@ -2219,7 +2219,6 @@ void vp9_decode_frame(VP9Decoder *pbi,
} else {
vpx_internal_error(&cm->error, VPX_CODEC_CORRUPT_FRAME,
"Decode failed. Frame data is corrupted.");
-
}
} else {
*p_data_end = decode_tiles(pbi, data + first_partition_size, data_end);
diff --git a/vp9/decoder/vp9_decodemv.c b/vp9/decoder/vp9_decodemv.c
index 341e6d792..33818a993 100644
--- a/vp9/decoder/vp9_decodemv.c
+++ b/vp9/decoder/vp9_decodemv.c
@@ -560,7 +560,7 @@ static void read_inter_block_mode_info(VP9Decoder *const pbi,
is_compound, allow_hp, r)) {
xd->corrupted |= 1;
break;
- };
+ }
mi->bmi[j].as_mv[0].as_int = block[0].as_int;
if (is_compound)
diff --git a/vp9/decoder/vp9_decoder.c b/vp9/decoder/vp9_decoder.c
index 801145654..6734d0029 100644
--- a/vp9/decoder/vp9_decoder.c
+++ b/vp9/decoder/vp9_decoder.c
@@ -17,6 +17,7 @@
#include "./vpx_scale_rtcd.h"
#include "vpx_mem/vpx_mem.h"
+#include "vpx_ports/system_state.h"
#include "vpx_ports/vpx_once.h"
#include "vpx_ports/vpx_timer.h"
#include "vpx_scale/vpx_scale.h"
@@ -30,7 +31,6 @@
#endif
#include "vp9/common/vp9_quant_common.h"
#include "vp9/common/vp9_reconintra.h"
-#include "vp9/common/vp9_systemdependent.h"
#include "vp9/decoder/vp9_decodeframe.h"
#include "vp9/decoder/vp9_decoder.h"
@@ -365,7 +365,7 @@ int vp9_receive_compressed_data(VP9Decoder *pbi,
decrease_ref_count(cm->new_fb_idx, frame_bufs, pool);
unlock_buffer_pool(pool);
- vp9_clear_system_state();
+ vpx_clear_system_state();
return -1;
}
@@ -374,7 +374,7 @@ int vp9_receive_compressed_data(VP9Decoder *pbi,
swap_frame_buffers(pbi);
- vp9_clear_system_state();
+ vpx_clear_system_state();
if (!cm->show_existing_frame) {
cm->last_show_frame = cm->show_frame;
@@ -440,7 +440,7 @@ int vp9_get_raw_frame(VP9Decoder *pbi, YV12_BUFFER_CONFIG *sd,
*sd = *cm->frame_to_show;
ret = 0;
#endif /*!CONFIG_POSTPROC*/
- vp9_clear_system_state();
+ vpx_clear_system_state();
return ret;
}
diff --git a/vp9/encoder/arm/neon/vp9_avg_neon.c b/vp9/encoder/arm/neon/vp9_avg_neon.c
index 40d7e8779..d569ec95d 100644
--- a/vp9/encoder/arm/neon/vp9_avg_neon.c
+++ b/vp9/encoder/arm/neon/vp9_avg_neon.c
@@ -9,6 +9,8 @@
*/
#include <arm_neon.h>
+#include <assert.h>
+
#include "./vp9_rtcd.h"
#include "./vpx_config.h"
@@ -114,3 +116,45 @@ int16_t vp9_int_pro_col_neon(uint8_t const *ref, const int width) {
return horizontal_add_u16x8(vec_sum);
}
+
+// ref, src = [0, 510] - max diff = 16-bits
+// bwl = {2, 3, 4}, width = {16, 32, 64}
+int vp9_vector_var_neon(int16_t const *ref, int16_t const *src, const int bwl) {
+ int width = 4 << bwl;
+ int32x4_t sse = vdupq_n_s32(0);
+ int16x8_t total = vdupq_n_s16(0);
+
+ assert(width >= 8);
+ assert((width % 8) == 0);
+
+ do {
+ const int16x8_t r = vld1q_s16(ref);
+ const int16x8_t s = vld1q_s16(src);
+ const int16x8_t diff = vsubq_s16(r, s); // [-510, 510], 10 bits.
+ const int16x4_t diff_lo = vget_low_s16(diff);
+ const int16x4_t diff_hi = vget_high_s16(diff);
+ sse = vmlal_s16(sse, diff_lo, diff_lo); // dynamic range 26 bits.
+ sse = vmlal_s16(sse, diff_hi, diff_hi);
+ total = vaddq_s16(total, diff); // dynamic range 16 bits.
+
+ ref += 8;
+ src += 8;
+ width -= 8;
+ } while (width != 0);
+
+ {
+ // Note: 'total''s pairwise addition could be implemented similarly to
+ // horizontal_add_u16x8(), but one less vpaddl with 'total' when paired
+ // with the summation of 'sse' performed better on a Cortex-A15.
+ const int32x4_t t0 = vpaddlq_s16(total); // cascading summation of 'total'
+ const int32x2_t t1 = vadd_s32(vget_low_s32(t0), vget_high_s32(t0));
+ const int32x2_t t2 = vpadd_s32(t1, t1);
+ const int t = vget_lane_s32(t2, 0);
+ const int64x2_t s0 = vpaddlq_s32(sse); // cascading summation of 'sse'.
+ const int32x2_t s1 = vadd_s32(vreinterpret_s32_s64(vget_low_s64(s0)),
+ vreinterpret_s32_s64(vget_high_s64(s0)));
+ const int s = vget_lane_s32(s1, 0);
+ const int shift_factor = bwl + 2;
+ return s - ((t * t) >> shift_factor);
+ }
+}
diff --git a/vp9/encoder/vp9_aq_complexity.c b/vp9/encoder/vp9_aq_complexity.c
index bea7653d2..15f227fb8 100644
--- a/vp9/encoder/vp9_aq_complexity.c
+++ b/vp9/encoder/vp9_aq_complexity.c
@@ -10,6 +10,7 @@
#include <limits.h>
#include <math.h>
+#include "vpx_ports/system_state.h"
#include "vp9/encoder/vp9_aq_complexity.h"
#include "vp9/encoder/vp9_aq_variance.h"
@@ -47,7 +48,7 @@ void vp9_setup_in_frame_q_adj(VP9_COMP *cpi) {
struct segmentation *const seg = &cm->seg;
// Make SURE use of floating point in this function is safe.
- vp9_clear_system_state();
+ vpx_clear_system_state();
if (cm->frame_type == KEY_FRAME ||
cpi->refresh_alt_ref_frame ||
@@ -133,7 +134,7 @@ void vp9_caq_select_segment(VP9_COMP *cpi, MACROBLOCK *mb, BLOCK_SIZE bs,
double low_var_thresh;
const int aq_strength = get_aq_c_strength(cm->base_qindex, cm->bit_depth);
- vp9_clear_system_state();
+ vpx_clear_system_state();
low_var_thresh = (cpi->oxcf.pass == 2)
? MAX(cpi->twopass.mb_av_energy, MIN_DEFAULT_LV_THRESH)
: DEFAULT_LV_THRESH;
diff --git a/vp9/encoder/vp9_aq_cyclicrefresh.c b/vp9/encoder/vp9_aq_cyclicrefresh.c
index b61906388..e6b368617 100644
--- a/vp9/encoder/vp9_aq_cyclicrefresh.c
+++ b/vp9/encoder/vp9_aq_cyclicrefresh.c
@@ -11,6 +11,8 @@
#include <limits.h>
#include <math.h>
+#include "vpx_ports/system_state.h"
+
#include "vp9/encoder/vp9_aq_cyclicrefresh.h"
#include "vp9/common/vp9_seg_common.h"
@@ -459,7 +461,10 @@ void vp9_cyclic_refresh_update_parameters(VP9_COMP *const cpi) {
cr->time_for_refresh = 0;
// Use larger delta-qp (increase rate_ratio_qdelta) for first few (~4)
// periods of the refresh cycle, after a key frame.
- if (rc->frames_since_key < 4 * cr->percent_refresh)
+ // Account for larger interval on base layer for temporal layers.
+ if (cr->percent_refresh > 0 &&
+ rc->frames_since_key < (4 * cpi->svc.number_temporal_layers) *
+ (100 / cr->percent_refresh))
cr->rate_ratio_qdelta = 3.0;
else
cr->rate_ratio_qdelta = 2.0;
@@ -503,7 +508,7 @@ void vp9_cyclic_refresh_setup(VP9_COMP *const cpi) {
int qindex_delta = 0;
int qindex2;
const double q = vp9_convert_qindex_to_q(cm->base_qindex, cm->bit_depth);
- vp9_clear_system_state();
+ vpx_clear_system_state();
// Set rate threshold to some multiple (set to 2 for now) of the target
// rate (target is given by sb64_target_rate and scaled by 256).
cr->thresh_rate_sb = ((int64_t)(rc->sb64_target_rate) << 8) << 2;
diff --git a/vp9/encoder/vp9_aq_variance.c b/vp9/encoder/vp9_aq_variance.c
index f072717f1..1c99105d1 100644
--- a/vp9/encoder/vp9_aq_variance.c
+++ b/vp9/encoder/vp9_aq_variance.c
@@ -11,6 +11,7 @@
#include <math.h>
#include "vpx_ports/mem.h"
+#include "vpx_ports/system_state.h"
#include "vp9/encoder/vp9_aq_variance.h"
@@ -19,7 +20,6 @@
#include "vp9/encoder/vp9_ratectrl.h"
#include "vp9/encoder/vp9_rd.h"
#include "vp9/encoder/vp9_segmentation.h"
-#include "vp9/common/vp9_systemdependent.h"
#define ENERGY_MIN (-4)
#define ENERGY_MAX (1)
@@ -56,7 +56,7 @@ void vp9_vaq_frame_setup(VP9_COMP *cpi) {
seg->abs_delta = SEGMENT_DELTADATA;
- vp9_clear_system_state();
+ vpx_clear_system_state();
for (i = 0; i < MAX_SEGMENTS; ++i) {
int qindex_delta =
@@ -191,7 +191,7 @@ static unsigned int block_variance(VP9_COMP *cpi, MACROBLOCK *x,
double vp9_log_block_var(VP9_COMP *cpi, MACROBLOCK *x, BLOCK_SIZE bs) {
unsigned int var = block_variance(cpi, x, bs);
- vp9_clear_system_state();
+ vpx_clear_system_state();
return log(var + 1.0);
}
@@ -199,7 +199,7 @@ double vp9_log_block_var(VP9_COMP *cpi, MACROBLOCK *x, BLOCK_SIZE bs) {
int vp9_block_energy(VP9_COMP *cpi, MACROBLOCK *x, BLOCK_SIZE bs) {
double energy;
double energy_midpoint;
- vp9_clear_system_state();
+ vpx_clear_system_state();
energy_midpoint =
(cpi->oxcf.pass == 2) ? cpi->twopass.mb_av_energy : DEFAULT_E_MIDPOINT;
energy = vp9_log_block_var(cpi, x, bs) - energy_midpoint;
diff --git a/vp9/encoder/vp9_bitstream.c b/vp9/encoder/vp9_bitstream.c
index f06bd56ea..d0de095ac 100644
--- a/vp9/encoder/vp9_bitstream.c
+++ b/vp9/encoder/vp9_bitstream.c
@@ -16,6 +16,7 @@
#include "vpx_dsp/bitwriter_buffer.h"
#include "vpx_mem/vpx_mem.h"
#include "vpx_ports/mem_ops.h"
+#include "vpx_ports/system_state.h"
#include "vp9/common/vp9_entropy.h"
#include "vp9/common/vp9_entropymode.h"
@@ -23,7 +24,6 @@
#include "vp9/common/vp9_mvref_common.h"
#include "vp9/common/vp9_pred_common.h"
#include "vp9/common/vp9_seg_common.h"
-#include "vp9/common/vp9_systemdependent.h"
#include "vp9/common/vp9_tile_common.h"
#include "vp9/encoder/vp9_cost.h"
@@ -1240,7 +1240,7 @@ void vp9_pack_bitstream(VP9_COMP *cpi, uint8_t *dest, size_t *size) {
uncompressed_hdr_size = vpx_wb_bytes_written(&wb);
data += uncompressed_hdr_size;
- vp9_clear_system_state();
+ vpx_clear_system_state();
first_part_size = write_compressed_header(cpi, data);
data += first_part_size;
diff --git a/vp9/encoder/vp9_blockiness.c b/vp9/encoder/vp9_blockiness.c
index fc3eac6c7..661496ee0 100644
--- a/vp9/encoder/vp9_blockiness.c
+++ b/vp9/encoder/vp9_blockiness.c
@@ -17,6 +17,7 @@
#include "vpx_dsp/vpx_convolve.h"
#include "vpx_dsp/vpx_filter.h"
#include "vpx_ports/mem.h"
+#include "vpx_ports/system_state.h"
static int horizontal_filter(const uint8_t *s) {
return (s[1] - s[-2]) * 2 + (s[-1] - s[0]) * 6;
@@ -123,7 +124,7 @@ double vp9_get_blockiness(const unsigned char *img1, int img1_pitch,
int width, int height ) {
double blockiness = 0;
int i, j;
- vp9_clear_system_state();
+ vpx_clear_system_state();
for (i = 0; i < height; i += 4, img1 += img1_pitch * 4,
img2 += img2_pitch * 4) {
for (j = 0; j < width; j += 4) {
diff --git a/vp9/encoder/vp9_dct.c b/vp9/encoder/vp9_dct.c
index 8d0d10f81..f94540baa 100644
--- a/vp9/encoder/vp9_dct.c
+++ b/vp9/encoder/vp9_dct.c
@@ -17,7 +17,6 @@
#include "vp9/common/vp9_blockd.h"
#include "vp9/common/vp9_idct.h"
-#include "vp9/common/vp9_systemdependent.h"
#include "vpx_dsp/fwd_txfm.h"
#include "vpx_ports/mem.h"
diff --git a/vp9/encoder/vp9_denoiser.c b/vp9/encoder/vp9_denoiser.c
index f1d73790a..1f9a28c1e 100644
--- a/vp9/encoder/vp9_denoiser.c
+++ b/vp9/encoder/vp9_denoiser.c
@@ -63,10 +63,6 @@ static int sse_diff_thresh(BLOCK_SIZE bs, int increase_denoising,
}
}
-int total_adj_strong_thresh(BLOCK_SIZE bs, int increase_denoising) {
- return (1 << num_pels_log2_lookup[bs]) * (increase_denoising ? 3 : 2);
-}
-
static int total_adj_weak_thresh(BLOCK_SIZE bs, int increase_denoising) {
return (1 << num_pels_log2_lookup[bs]) * (increase_denoising ? 3 : 2);
}
diff --git a/vp9/encoder/vp9_denoiser.h b/vp9/encoder/vp9_denoiser.h
index 8eb5da1b8..b2af792bb 100644
--- a/vp9/encoder/vp9_denoiser.h
+++ b/vp9/encoder/vp9_denoiser.h
@@ -57,7 +57,12 @@ int vp9_denoiser_alloc(VP9_DENOISER *denoiser, int width, int height,
int border);
#if CONFIG_VP9_TEMPORAL_DENOISING
-int total_adj_strong_thresh(BLOCK_SIZE bs, int increase_denoising);
+// This function is used by both c and sse2 denoiser implementations.
+// Define it as a static function within the scope where vp9_denoiser.h
+// is referenced.
+static int total_adj_strong_thresh(BLOCK_SIZE bs, int increase_denoising) {
+ return (1 << num_pels_log2_lookup[bs]) * (increase_denoising ? 3 : 2);
+}
#endif
void vp9_denoiser_free(VP9_DENOISER *denoiser);
diff --git a/vp9/encoder/vp9_encodeframe.c b/vp9/encoder/vp9_encodeframe.c
index e7f14eb7e..63ab15773 100644
--- a/vp9/encoder/vp9_encodeframe.c
+++ b/vp9/encoder/vp9_encodeframe.c
@@ -18,6 +18,7 @@
#include "vpx_ports/mem.h"
#include "vpx_ports/vpx_timer.h"
+#include "vpx_ports/system_state.h"
#include "vp9/common/vp9_common.h"
#include "vp9/common/vp9_entropy.h"
@@ -29,7 +30,6 @@
#include "vp9/common/vp9_reconintra.h"
#include "vp9/common/vp9_reconinter.h"
#include "vp9/common/vp9_seg_common.h"
-#include "vp9/common/vp9_systemdependent.h"
#include "vp9/common/vp9_tile_common.h"
#include "vp9/encoder/vp9_aq_complexity.h"
@@ -1152,7 +1152,7 @@ static int set_segment_rdmult(VP9_COMP *const cpi,
int segment_qindex;
VP9_COMMON *const cm = &cpi->common;
vp9_init_plane_quantizers(cpi, x);
- vp9_clear_system_state();
+ vpx_clear_system_state();
segment_qindex = vp9_get_qindex(&cm->seg, segment_id,
cm->base_qindex);
return vp9_compute_rd_mult(cpi, segment_qindex + cm->y_dc_delta_q);
@@ -1173,7 +1173,7 @@ static void rd_pick_sb_modes(VP9_COMP *cpi,
const AQ_MODE aq_mode = cpi->oxcf.aq_mode;
int i, orig_rdmult;
- vp9_clear_system_state();
+ vpx_clear_system_state();
// Use the lower precision, but faster, 32x32 fdct for mode selection.
x->use_lp32x32fdct = 1;
@@ -3925,7 +3925,9 @@ static void encode_frame_internal(VP9_COMP *cpi) {
}
vp9_zero(x->zcoeff_blk);
- if (cm->frame_type != KEY_FRAME && cpi->rc.frames_since_golden == 0)
+ if (cm->frame_type != KEY_FRAME &&
+ cpi->rc.frames_since_golden == 0 &&
+ !cpi->use_svc)
cpi->ref_frame_flags &= (~VP9_GOLD_FLAG);
if (sf->partition_search_type == SOURCE_VAR_BASED_PARTITION)
diff --git a/vp9/encoder/vp9_encodemb.c b/vp9/encoder/vp9_encodemb.c
index 903839743..00e4c610b 100644
--- a/vp9/encoder/vp9_encodemb.c
+++ b/vp9/encoder/vp9_encodemb.c
@@ -21,7 +21,6 @@
#include "vp9/common/vp9_reconinter.h"
#include "vp9/common/vp9_reconintra.h"
#include "vp9/common/vp9_scan.h"
-#include "vp9/common/vp9_systemdependent.h"
#include "vp9/encoder/vp9_encodemb.h"
#include "vp9/encoder/vp9_rd.h"
@@ -58,7 +57,7 @@ typedef struct vp9_token_state {
int error;
int next;
int16_t token;
- short qc;
+ int16_t qc;
} vp9_token_state;
// TODO(jimbankoski): experiment to find optimal RD numbers.
@@ -434,25 +433,25 @@ void vp9_xform_quant_dc(MACROBLOCK *x, int plane, int block,
switch (tx_size) {
case TX_32X32:
vpx_highbd_fdct32x32_1(src_diff, coeff, diff_stride);
- vp9_highbd_quantize_dc_32x32(coeff, x->skip_block, p->round,
+ vpx_highbd_quantize_dc_32x32(coeff, x->skip_block, p->round,
p->quant_fp[0], qcoeff, dqcoeff,
pd->dequant[0], eob);
break;
case TX_16X16:
vpx_highbd_fdct16x16_1(src_diff, coeff, diff_stride);
- vp9_highbd_quantize_dc(coeff, 256, x->skip_block, p->round,
+ vpx_highbd_quantize_dc(coeff, 256, x->skip_block, p->round,
p->quant_fp[0], qcoeff, dqcoeff,
pd->dequant[0], eob);
break;
case TX_8X8:
vpx_highbd_fdct8x8_1(src_diff, coeff, diff_stride);
- vp9_highbd_quantize_dc(coeff, 64, x->skip_block, p->round,
+ vpx_highbd_quantize_dc(coeff, 64, x->skip_block, p->round,
p->quant_fp[0], qcoeff, dqcoeff,
pd->dequant[0], eob);
break;
case TX_4X4:
x->fwd_txm4x4(src_diff, coeff, diff_stride);
- vp9_highbd_quantize_dc(coeff, 16, x->skip_block, p->round,
+ vpx_highbd_quantize_dc(coeff, 16, x->skip_block, p->round,
p->quant_fp[0], qcoeff, dqcoeff,
pd->dequant[0], eob);
break;
@@ -466,25 +465,25 @@ void vp9_xform_quant_dc(MACROBLOCK *x, int plane, int block,
switch (tx_size) {
case TX_32X32:
vpx_fdct32x32_1(src_diff, coeff, diff_stride);
- vp9_quantize_dc_32x32(coeff, x->skip_block, p->round,
+ vpx_quantize_dc_32x32(coeff, x->skip_block, p->round,
p->quant_fp[0], qcoeff, dqcoeff,
pd->dequant[0], eob);
break;
case TX_16X16:
vpx_fdct16x16_1(src_diff, coeff, diff_stride);
- vp9_quantize_dc(coeff, 256, x->skip_block, p->round,
+ vpx_quantize_dc(coeff, 256, x->skip_block, p->round,
p->quant_fp[0], qcoeff, dqcoeff,
pd->dequant[0], eob);
break;
case TX_8X8:
vpx_fdct8x8_1(src_diff, coeff, diff_stride);
- vp9_quantize_dc(coeff, 64, x->skip_block, p->round,
+ vpx_quantize_dc(coeff, 64, x->skip_block, p->round,
p->quant_fp[0], qcoeff, dqcoeff,
pd->dequant[0], eob);
break;
case TX_4X4:
x->fwd_txm4x4(src_diff, coeff, diff_stride);
- vp9_quantize_dc(coeff, 16, x->skip_block, p->round,
+ vpx_quantize_dc(coeff, 16, x->skip_block, p->round,
p->quant_fp[0], qcoeff, dqcoeff,
pd->dequant[0], eob);
break;
@@ -515,28 +514,28 @@ void vp9_xform_quant(MACROBLOCK *x, int plane, int block,
switch (tx_size) {
case TX_32X32:
highbd_fdct32x32(x->use_lp32x32fdct, src_diff, coeff, diff_stride);
- vp9_highbd_quantize_b_32x32(coeff, 1024, x->skip_block, p->zbin,
+ vpx_highbd_quantize_b_32x32(coeff, 1024, x->skip_block, p->zbin,
p->round, p->quant, p->quant_shift, qcoeff,
dqcoeff, pd->dequant, eob,
scan_order->scan, scan_order->iscan);
break;
case TX_16X16:
vpx_highbd_fdct16x16(src_diff, coeff, diff_stride);
- vp9_highbd_quantize_b(coeff, 256, x->skip_block, p->zbin, p->round,
+ vpx_highbd_quantize_b(coeff, 256, x->skip_block, p->zbin, p->round,
p->quant, p->quant_shift, qcoeff, dqcoeff,
pd->dequant, eob,
scan_order->scan, scan_order->iscan);
break;
case TX_8X8:
vpx_highbd_fdct8x8(src_diff, coeff, diff_stride);
- vp9_highbd_quantize_b(coeff, 64, x->skip_block, p->zbin, p->round,
+ vpx_highbd_quantize_b(coeff, 64, x->skip_block, p->zbin, p->round,
p->quant, p->quant_shift, qcoeff, dqcoeff,
pd->dequant, eob,
scan_order->scan, scan_order->iscan);
break;
case TX_4X4:
x->fwd_txm4x4(src_diff, coeff, diff_stride);
- vp9_highbd_quantize_b(coeff, 16, x->skip_block, p->zbin, p->round,
+ vpx_highbd_quantize_b(coeff, 16, x->skip_block, p->zbin, p->round,
p->quant, p->quant_shift, qcoeff, dqcoeff,
pd->dequant, eob,
scan_order->scan, scan_order->iscan);
@@ -551,28 +550,28 @@ void vp9_xform_quant(MACROBLOCK *x, int plane, int block,
switch (tx_size) {
case TX_32X32:
fdct32x32(x->use_lp32x32fdct, src_diff, coeff, diff_stride);
- vp9_quantize_b_32x32(coeff, 1024, x->skip_block, p->zbin, p->round,
+ vpx_quantize_b_32x32(coeff, 1024, x->skip_block, p->zbin, p->round,
p->quant, p->quant_shift, qcoeff, dqcoeff,
pd->dequant, eob, scan_order->scan,
scan_order->iscan);
break;
case TX_16X16:
vpx_fdct16x16(src_diff, coeff, diff_stride);
- vp9_quantize_b(coeff, 256, x->skip_block, p->zbin, p->round,
+ vpx_quantize_b(coeff, 256, x->skip_block, p->zbin, p->round,
p->quant, p->quant_shift, qcoeff, dqcoeff,
pd->dequant, eob,
scan_order->scan, scan_order->iscan);
break;
case TX_8X8:
vpx_fdct8x8(src_diff, coeff, diff_stride);
- vp9_quantize_b(coeff, 64, x->skip_block, p->zbin, p->round,
+ vpx_quantize_b(coeff, 64, x->skip_block, p->zbin, p->round,
p->quant, p->quant_shift, qcoeff, dqcoeff,
pd->dequant, eob,
scan_order->scan, scan_order->iscan);
break;
case TX_4X4:
x->fwd_txm4x4(src_diff, coeff, diff_stride);
- vp9_quantize_b(coeff, 16, x->skip_block, p->zbin, p->round,
+ vpx_quantize_b(coeff, 16, x->skip_block, p->zbin, p->round,
p->quant, p->quant_shift, qcoeff, dqcoeff,
pd->dequant, eob,
scan_order->scan, scan_order->iscan);
@@ -815,7 +814,7 @@ void vp9_encode_block_intra(int plane, int block, BLOCK_SIZE plane_bsize,
vpx_highbd_subtract_block(32, 32, src_diff, diff_stride,
src, src_stride, dst, dst_stride, xd->bd);
highbd_fdct32x32(x->use_lp32x32fdct, src_diff, coeff, diff_stride);
- vp9_highbd_quantize_b_32x32(coeff, 1024, x->skip_block, p->zbin,
+ vpx_highbd_quantize_b_32x32(coeff, 1024, x->skip_block, p->zbin,
p->round, p->quant, p->quant_shift,
qcoeff, dqcoeff, pd->dequant, eob,
scan_order->scan, scan_order->iscan);
@@ -832,7 +831,7 @@ void vp9_encode_block_intra(int plane, int block, BLOCK_SIZE plane_bsize,
vpx_highbd_fdct16x16(src_diff, coeff, diff_stride);
else
vp9_highbd_fht16x16(src_diff, coeff, diff_stride, tx_type);
- vp9_highbd_quantize_b(coeff, 256, x->skip_block, p->zbin, p->round,
+ vpx_highbd_quantize_b(coeff, 256, x->skip_block, p->zbin, p->round,
p->quant, p->quant_shift, qcoeff, dqcoeff,
pd->dequant, eob,
scan_order->scan, scan_order->iscan);
@@ -850,7 +849,7 @@ void vp9_encode_block_intra(int plane, int block, BLOCK_SIZE plane_bsize,
vpx_highbd_fdct8x8(src_diff, coeff, diff_stride);
else
vp9_highbd_fht8x8(src_diff, coeff, diff_stride, tx_type);
- vp9_highbd_quantize_b(coeff, 64, x->skip_block, p->zbin, p->round,
+ vpx_highbd_quantize_b(coeff, 64, x->skip_block, p->zbin, p->round,
p->quant, p->quant_shift, qcoeff, dqcoeff,
pd->dequant, eob,
scan_order->scan, scan_order->iscan);
@@ -868,7 +867,7 @@ void vp9_encode_block_intra(int plane, int block, BLOCK_SIZE plane_bsize,
vp9_highbd_fht4x4(src_diff, coeff, diff_stride, tx_type);
else
x->fwd_txm4x4(src_diff, coeff, diff_stride);
- vp9_highbd_quantize_b(coeff, 16, x->skip_block, p->zbin, p->round,
+ vpx_highbd_quantize_b(coeff, 16, x->skip_block, p->zbin, p->round,
p->quant, p->quant_shift, qcoeff, dqcoeff,
pd->dequant, eob,
scan_order->scan, scan_order->iscan);
@@ -901,7 +900,7 @@ void vp9_encode_block_intra(int plane, int block, BLOCK_SIZE plane_bsize,
vpx_subtract_block(32, 32, src_diff, diff_stride,
src, src_stride, dst, dst_stride);
fdct32x32(x->use_lp32x32fdct, src_diff, coeff, diff_stride);
- vp9_quantize_b_32x32(coeff, 1024, x->skip_block, p->zbin, p->round,
+ vpx_quantize_b_32x32(coeff, 1024, x->skip_block, p->zbin, p->round,
p->quant, p->quant_shift, qcoeff, dqcoeff,
pd->dequant, eob, scan_order->scan,
scan_order->iscan);
@@ -914,7 +913,7 @@ void vp9_encode_block_intra(int plane, int block, BLOCK_SIZE plane_bsize,
vpx_subtract_block(16, 16, src_diff, diff_stride,
src, src_stride, dst, dst_stride);
vp9_fht16x16(src_diff, coeff, diff_stride, tx_type);
- vp9_quantize_b(coeff, 256, x->skip_block, p->zbin, p->round,
+ vpx_quantize_b(coeff, 256, x->skip_block, p->zbin, p->round,
p->quant, p->quant_shift, qcoeff, dqcoeff,
pd->dequant, eob, scan_order->scan,
scan_order->iscan);
@@ -927,7 +926,7 @@ void vp9_encode_block_intra(int plane, int block, BLOCK_SIZE plane_bsize,
vpx_subtract_block(8, 8, src_diff, diff_stride,
src, src_stride, dst, dst_stride);
vp9_fht8x8(src_diff, coeff, diff_stride, tx_type);
- vp9_quantize_b(coeff, 64, x->skip_block, p->zbin, p->round, p->quant,
+ vpx_quantize_b(coeff, 64, x->skip_block, p->zbin, p->round, p->quant,
p->quant_shift, qcoeff, dqcoeff,
pd->dequant, eob, scan_order->scan,
scan_order->iscan);
@@ -943,7 +942,7 @@ void vp9_encode_block_intra(int plane, int block, BLOCK_SIZE plane_bsize,
vp9_fht4x4(src_diff, coeff, diff_stride, tx_type);
else
x->fwd_txm4x4(src_diff, coeff, diff_stride);
- vp9_quantize_b(coeff, 16, x->skip_block, p->zbin, p->round, p->quant,
+ vpx_quantize_b(coeff, 16, x->skip_block, p->zbin, p->round, p->quant,
p->quant_shift, qcoeff, dqcoeff,
pd->dequant, eob, scan_order->scan,
scan_order->iscan);
diff --git a/vp9/encoder/vp9_encodemv.c b/vp9/encoder/vp9_encodemv.c
index a1d77dba8..7848c93a8 100644
--- a/vp9/encoder/vp9_encodemv.c
+++ b/vp9/encoder/vp9_encodemv.c
@@ -12,7 +12,6 @@
#include "vp9/common/vp9_common.h"
#include "vp9/common/vp9_entropymode.h"
-#include "vp9/common/vp9_systemdependent.h"
#include "vp9/encoder/vp9_cost.h"
#include "vp9/encoder/vp9_encodemv.h"
diff --git a/vp9/encoder/vp9_encoder.c b/vp9/encoder/vp9_encoder.c
index 8aee22756..370a581fd 100644
--- a/vp9/encoder/vp9_encoder.c
+++ b/vp9/encoder/vp9_encoder.c
@@ -18,7 +18,11 @@
#include "./vpx_scale_rtcd.h"
#include "vpx/internal/vpx_psnr.h"
#include "vpx_dsp/vpx_filter.h"
+#if CONFIG_INTERNAL_STATS
+#include "vpx_dsp/ssim.h"
+#endif
#include "vpx_ports/mem.h"
+#include "vpx_ports/system_state.h"
#include "vpx_ports/vpx_timer.h"
#include "vpx_scale/vpx_scale.h"
@@ -30,7 +34,6 @@
#endif
#include "vp9/common/vp9_reconinter.h"
#include "vp9/common/vp9_reconintra.h"
-#include "vp9/common/vp9_systemdependent.h"
#include "vp9/common/vp9_tile_common.h"
#include "vp9/encoder/vp9_aq_complexity.h"
@@ -51,9 +54,6 @@
#include "vp9/encoder/vp9_segmentation.h"
#include "vp9/encoder/vp9_skin_detection.h"
#include "vp9/encoder/vp9_speed_features.h"
-#if CONFIG_INTERNAL_STATS
-#include "vp9/encoder/vp9_ssim.h"
-#endif
#include "vp9/encoder/vp9_svc_layercontext.h"
#include "vp9/encoder/vp9_temporal_filter.h"
@@ -1709,7 +1709,8 @@ VP9_COMP *vp9_create_compressor(VP9EncoderConfig *oxcf,
}
if (cpi->b_calculate_consistency) {
- cpi->ssim_vars = vpx_malloc(sizeof(*cpi->ssim_vars)*720*480);
+ cpi->ssim_vars = vpx_malloc(sizeof(*cpi->ssim_vars) *
+ 4 * cpi->common.mi_rows * cpi->common.mi_cols);
cpi->worst_consistency = 100.0;
}
@@ -1925,7 +1926,7 @@ void vp9_remove_compressor(VP9_COMP *cpi) {
if (cpi && (cm->current_video_frame > 0)) {
#if CONFIG_INTERNAL_STATS
- vp9_clear_system_state();
+ vpx_clear_system_state();
if (cpi->oxcf.pass != 1) {
char headings[512] = {0};
@@ -2747,7 +2748,7 @@ static void loopfilter_frame(VP9_COMP *cpi, VP9_COMMON *cm) {
} else {
struct vpx_usec_timer timer;
- vp9_clear_system_state();
+ vpx_clear_system_state();
vpx_usec_timer_start(&timer);
@@ -2931,7 +2932,7 @@ static void output_frame_level_debug_stats(VP9_COMP *cpi) {
FILE *const f = fopen("tmp.stt", cm->current_video_frame ? "a" : "w");
int64_t recon_err;
- vp9_clear_system_state();
+ vpx_clear_system_state();
recon_err = vp9_get_y_sse(cpi->Source, get_frame_new_buffer(cm));
@@ -3183,7 +3184,7 @@ static void encode_without_recode_loop(VP9_COMP *cpi) {
VP9_COMMON *const cm = &cpi->common;
int q = 0, bottom_index = 0, top_index = 0; // Dummy variables.
- vp9_clear_system_state();
+ vpx_clear_system_state();
set_frame_size(cpi);
@@ -3247,7 +3248,7 @@ static void encode_without_recode_loop(VP9_COMP *cpi) {
// Update the skip mb flag probabilities based on the distribution
// seen in the last encoder iteration.
// update_base_skip_probs(cpi);
- vp9_clear_system_state();
+ vpx_clear_system_state();
}
static void encode_with_recode_loop(VP9_COMP *cpi,
@@ -3268,7 +3269,7 @@ static void encode_with_recode_loop(VP9_COMP *cpi,
set_size_independent_vars(cpi);
do {
- vp9_clear_system_state();
+ vpx_clear_system_state();
set_frame_size(cpi);
@@ -3332,7 +3333,7 @@ static void encode_with_recode_loop(VP9_COMP *cpi,
// seen in the last encoder iteration.
// update_base_skip_probs(cpi);
- vp9_clear_system_state();
+ vpx_clear_system_state();
// Dummy pack of the bitstream using up to date stats to get an
// accurate estimate of output frame size to determine if we need
@@ -3636,7 +3637,7 @@ static void encode_frame_to_data_rate(VP9_COMP *cpi,
TX_SIZE t;
set_ext_overrides(cpi);
- vp9_clear_system_state();
+ vpx_clear_system_state();
// Set the arf sign bias for this frame.
set_arf_sign_bias(cpi);
@@ -3727,7 +3728,7 @@ static void encode_frame_to_data_rate(VP9_COMP *cpi,
}
}
- vp9_clear_system_state();
+ vpx_clear_system_state();
#if CONFIG_INTERNAL_STATS
memset(cpi->mode_chosen_counts, 0,
@@ -4247,7 +4248,7 @@ int vp9_get_compressed_data(VP9_COMP *cpi, unsigned int *frame_flags,
}
// Clear down mmx registers
- vp9_clear_system_state();
+ vpx_clear_system_state();
// adjust frame rates based on timestamps given
if (cm->show_frame) {
@@ -4399,7 +4400,7 @@ int vp9_get_compressed_data(VP9_COMP *cpi, unsigned int *frame_flags,
vp9_deblock(cm->frame_to_show, &cm->post_proc_buffer,
cm->lf.filter_level * 10 / 6);
#endif
- vp9_clear_system_state();
+ vpx_clear_system_state();
#if CONFIG_VP9_HIGHBITDEPTH
calc_highbd_psnr(orig, pp, &psnr2, cpi->td.mb.e_mbd.bd,
@@ -4415,13 +4416,13 @@ int vp9_get_compressed_data(VP9_COMP *cpi, unsigned int *frame_flags,
#if CONFIG_VP9_HIGHBITDEPTH
if (cm->use_highbitdepth) {
- frame_ssim2 = vp9_highbd_calc_ssim(orig, recon, &weight,
+ frame_ssim2 = vpx_highbd_calc_ssim(orig, recon, &weight,
(int)cm->bit_depth);
} else {
- frame_ssim2 = vp9_calc_ssim(orig, recon, &weight);
+ frame_ssim2 = vpx_calc_ssim(orig, recon, &weight);
}
#else
- frame_ssim2 = vp9_calc_ssim(orig, recon, &weight);
+ frame_ssim2 = vpx_calc_ssim(orig, recon, &weight);
#endif // CONFIG_VP9_HIGHBITDEPTH
cpi->worst_ssim= MIN(cpi->worst_ssim, frame_ssim2);
@@ -4430,13 +4431,13 @@ int vp9_get_compressed_data(VP9_COMP *cpi, unsigned int *frame_flags,
#if CONFIG_VP9_HIGHBITDEPTH
if (cm->use_highbitdepth) {
- frame_ssim2 = vp9_highbd_calc_ssim(
+ frame_ssim2 = vpx_highbd_calc_ssim(
orig, &cm->post_proc_buffer, &weight, (int)cm->bit_depth);
} else {
- frame_ssim2 = vp9_calc_ssim(orig, &cm->post_proc_buffer, &weight);
+ frame_ssim2 = vpx_calc_ssim(orig, &cm->post_proc_buffer, &weight);
}
#else
- frame_ssim2 = vp9_calc_ssim(orig, &cm->post_proc_buffer, &weight);
+ frame_ssim2 = vpx_calc_ssim(orig, &cm->post_proc_buffer, &weight);
#endif // CONFIG_VP9_HIGHBITDEPTH
cpi->summedp_quality += frame_ssim2 * weight;
@@ -4471,7 +4472,7 @@ int vp9_get_compressed_data(VP9_COMP *cpi, unsigned int *frame_flags,
if (!cm->use_highbitdepth)
#endif
{
- double this_inconsistency = vp9_get_ssim_metrics(
+ double this_inconsistency = vpx_get_ssim_metrics(
cpi->Source->y_buffer, cpi->Source->y_stride,
cm->frame_to_show->y_buffer, cm->frame_to_show->y_stride,
cpi->Source->y_width, cpi->Source->y_height, cpi->ssim_vars,
@@ -4491,14 +4492,14 @@ int vp9_get_compressed_data(VP9_COMP *cpi, unsigned int *frame_flags,
double y, u, v, frame_all;
#if CONFIG_VP9_HIGHBITDEPTH
if (cm->use_highbitdepth) {
- frame_all = vp9_highbd_calc_ssimg(cpi->Source, cm->frame_to_show, &y,
+ frame_all = vpx_highbd_calc_ssimg(cpi->Source, cm->frame_to_show, &y,
&u, &v, (int)cm->bit_depth);
} else {
- frame_all = vp9_calc_ssimg(cpi->Source, cm->frame_to_show, &y, &u,
+ frame_all = vpx_calc_ssimg(cpi->Source, cm->frame_to_show, &y, &u,
&v);
}
#else
- frame_all = vp9_calc_ssimg(cpi->Source, cm->frame_to_show, &y, &u, &v);
+ frame_all = vpx_calc_ssimg(cpi->Source, cm->frame_to_show, &y, &u, &v);
#endif // CONFIG_VP9_HIGHBITDEPTH
adjust_image_stat(y, u, v, frame_all, &cpi->ssimg);
}
@@ -4507,7 +4508,7 @@ int vp9_get_compressed_data(VP9_COMP *cpi, unsigned int *frame_flags,
#endif
{
double y, u, v, frame_all;
- frame_all = vp9_calc_fastssim(cpi->Source, cm->frame_to_show, &y, &u,
+ frame_all = vpx_calc_fastssim(cpi->Source, cm->frame_to_show, &y, &u,
&v);
adjust_image_stat(y, u, v, frame_all, &cpi->fastssim);
/* TODO(JBB): add 10/12 bit support */
@@ -4517,7 +4518,7 @@ int vp9_get_compressed_data(VP9_COMP *cpi, unsigned int *frame_flags,
#endif
{
double y, u, v, frame_all;
- frame_all = vp9_psnrhvs(cpi->Source, cm->frame_to_show, &y, &u, &v);
+ frame_all = vpx_psnrhvs(cpi->Source, cm->frame_to_show, &y, &u, &v);
adjust_image_stat(y, u, v, frame_all, &cpi->psnrhvs);
}
}
@@ -4574,7 +4575,7 @@ int vp9_get_preview_raw_frame(VP9_COMP *cpi, YV12_BUFFER_CONFIG *dest,
ret = -1;
}
#endif // !CONFIG_VP9_POSTPROC
- vp9_clear_system_state();
+ vpx_clear_system_state();
return ret;
}
}
diff --git a/vp9/encoder/vp9_encoder.h b/vp9/encoder/vp9_encoder.h
index 4d4da9283..78d55e1ea 100644
--- a/vp9/encoder/vp9_encoder.h
+++ b/vp9/encoder/vp9_encoder.h
@@ -16,6 +16,10 @@
#include "./vpx_config.h"
#include "vpx/internal/vpx_codec_internal.h"
#include "vpx/vp8cx.h"
+#if CONFIG_INTERNAL_STATS
+#include "vpx_dsp/ssim.h"
+#endif
+#include "vpx_dsp/variance.h"
#include "vpx_util/vpx_thread.h"
#include "vp9/common/vp9_alloccommon.h"
@@ -34,13 +38,9 @@
#include "vp9/encoder/vp9_quantize.h"
#include "vp9/encoder/vp9_ratectrl.h"
#include "vp9/encoder/vp9_rd.h"
-#if CONFIG_INTERNAL_STATS
-#include "vp9/encoder/vp9_ssim.h"
-#endif
#include "vp9/encoder/vp9_speed_features.h"
#include "vp9/encoder/vp9_svc_layercontext.h"
#include "vp9/encoder/vp9_tokenize.h"
-#include "vpx_dsp/variance.h"
#if CONFIG_VP9_TEMPORAL_DENOISING
#include "vp9/encoder/vp9_denoiser.h"
diff --git a/vp9/encoder/vp9_fastssim.c b/vp9/encoder/vp9_fastssim.c
deleted file mode 100644
index f1d408cbe..000000000
--- a/vp9/encoder/vp9_fastssim.c
+++ /dev/null
@@ -1,465 +0,0 @@
-/*
- * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
- *
- * Use of this source code is governed by a BSD-style license
- * that can be found in the LICENSE file in the root of the source
- * tree. An additional intellectual property rights grant can be found
- * in the file PATENTS. All contributing project authors may
- * be found in the AUTHORS file in the root of the source tree.
- *
- * This code was originally written by: Nathan E. Egge, at the Daala
- * project.
- */
-#include <math.h>
-#include <string.h>
-#include "./vpx_config.h"
-#include "./vp9_rtcd.h"
-#include "vp9/encoder/vp9_ssim.h"
-/* TODO(jbb): High bit depth version of this code needed */
-typedef struct fs_level fs_level;
-typedef struct fs_ctx fs_ctx;
-
-#define SSIM_C1 (255 * 255 * 0.01 * 0.01)
-#define SSIM_C2 (255 * 255 * 0.03 * 0.03)
-
-#define FS_MINI(_a, _b) ((_a) < (_b) ? (_a) : (_b))
-#define FS_MAXI(_a, _b) ((_a) > (_b) ? (_a) : (_b))
-
-struct fs_level {
- uint16_t *im1;
- uint16_t *im2;
- double *ssim;
- int w;
- int h;
-};
-
-struct fs_ctx {
- fs_level *level;
- int nlevels;
- unsigned *col_buf;
-};
-
-static void fs_ctx_init(fs_ctx *_ctx, int _w, int _h, int _nlevels) {
- unsigned char *data;
- size_t data_size;
- int lw;
- int lh;
- int l;
- lw = (_w + 1) >> 1;
- lh = (_h + 1) >> 1;
- data_size = _nlevels * sizeof(fs_level)
- + 2 * (lw + 8) * 8 * sizeof(*_ctx->col_buf);
- for (l = 0; l < _nlevels; l++) {
- size_t im_size;
- size_t level_size;
- im_size = lw * (size_t) lh;
- level_size = 2 * im_size * sizeof(*_ctx->level[l].im1);
- level_size += sizeof(*_ctx->level[l].ssim) - 1;
- level_size /= sizeof(*_ctx->level[l].ssim);
- level_size += im_size;
- level_size *= sizeof(*_ctx->level[l].ssim);
- data_size += level_size;
- lw = (lw + 1) >> 1;
- lh = (lh + 1) >> 1;
- }
- data = (unsigned char *) malloc(data_size);
- _ctx->level = (fs_level *) data;
- _ctx->nlevels = _nlevels;
- data += _nlevels * sizeof(*_ctx->level);
- lw = (_w + 1) >> 1;
- lh = (_h + 1) >> 1;
- for (l = 0; l < _nlevels; l++) {
- size_t im_size;
- size_t level_size;
- _ctx->level[l].w = lw;
- _ctx->level[l].h = lh;
- im_size = lw * (size_t) lh;
- level_size = 2 * im_size * sizeof(*_ctx->level[l].im1);
- level_size += sizeof(*_ctx->level[l].ssim) - 1;
- level_size /= sizeof(*_ctx->level[l].ssim);
- level_size *= sizeof(*_ctx->level[l].ssim);
- _ctx->level[l].im1 = (uint16_t *) data;
- _ctx->level[l].im2 = _ctx->level[l].im1 + im_size;
- data += level_size;
- _ctx->level[l].ssim = (double *) data;
- data += im_size * sizeof(*_ctx->level[l].ssim);
- lw = (lw + 1) >> 1;
- lh = (lh + 1) >> 1;
- }
- _ctx->col_buf = (unsigned *) data;
-}
-
-static void fs_ctx_clear(fs_ctx *_ctx) {
- free(_ctx->level);
-}
-
-static void fs_downsample_level(fs_ctx *_ctx, int _l) {
- const uint16_t *src1;
- const uint16_t *src2;
- uint16_t *dst1;
- uint16_t *dst2;
- int w2;
- int h2;
- int w;
- int h;
- int i;
- int j;
- w = _ctx->level[_l].w;
- h = _ctx->level[_l].h;
- dst1 = _ctx->level[_l].im1;
- dst2 = _ctx->level[_l].im2;
- w2 = _ctx->level[_l - 1].w;
- h2 = _ctx->level[_l - 1].h;
- src1 = _ctx->level[_l - 1].im1;
- src2 = _ctx->level[_l - 1].im2;
- for (j = 0; j < h; j++) {
- int j0offs;
- int j1offs;
- j0offs = 2 * j * w2;
- j1offs = FS_MINI(2 * j + 1, h2) * w2;
- for (i = 0; i < w; i++) {
- int i0;
- int i1;
- i0 = 2 * i;
- i1 = FS_MINI(i0 + 1, w2);
- dst1[j * w + i] = src1[j0offs + i0] + src1[j0offs + i1]
- + src1[j1offs + i0] + src1[j1offs + i1];
- dst2[j * w + i] = src2[j0offs + i0] + src2[j0offs + i1]
- + src2[j1offs + i0] + src2[j1offs + i1];
- }
- }
-}
-
-static void fs_downsample_level0(fs_ctx *_ctx, const unsigned char *_src1,
- int _s1ystride, const unsigned char *_src2,
- int _s2ystride, int _w, int _h) {
- uint16_t *dst1;
- uint16_t *dst2;
- int w;
- int h;
- int i;
- int j;
- w = _ctx->level[0].w;
- h = _ctx->level[0].h;
- dst1 = _ctx->level[0].im1;
- dst2 = _ctx->level[0].im2;
- for (j = 0; j < h; j++) {
- int j0;
- int j1;
- j0 = 2 * j;
- j1 = FS_MINI(j0 + 1, _h);
- for (i = 0; i < w; i++) {
- int i0;
- int i1;
- i0 = 2 * i;
- i1 = FS_MINI(i0 + 1, _w);
- dst1[j * w + i] = _src1[j0 * _s1ystride + i0]
- + _src1[j0 * _s1ystride + i1] + _src1[j1 * _s1ystride + i0]
- + _src1[j1 * _s1ystride + i1];
- dst2[j * w + i] = _src2[j0 * _s2ystride + i0]
- + _src2[j0 * _s2ystride + i1] + _src2[j1 * _s2ystride + i0]
- + _src2[j1 * _s2ystride + i1];
- }
- }
-}
-
-static void fs_apply_luminance(fs_ctx *_ctx, int _l) {
- unsigned *col_sums_x;
- unsigned *col_sums_y;
- uint16_t *im1;
- uint16_t *im2;
- double *ssim;
- double c1;
- int w;
- int h;
- int j0offs;
- int j1offs;
- int i;
- int j;
- w = _ctx->level[_l].w;
- h = _ctx->level[_l].h;
- col_sums_x = _ctx->col_buf;
- col_sums_y = col_sums_x + w;
- im1 = _ctx->level[_l].im1;
- im2 = _ctx->level[_l].im2;
- for (i = 0; i < w; i++)
- col_sums_x[i] = 5 * im1[i];
- for (i = 0; i < w; i++)
- col_sums_y[i] = 5 * im2[i];
- for (j = 1; j < 4; j++) {
- j1offs = FS_MINI(j, h - 1) * w;
- for (i = 0; i < w; i++)
- col_sums_x[i] += im1[j1offs + i];
- for (i = 0; i < w; i++)
- col_sums_y[i] += im2[j1offs + i];
- }
- ssim = _ctx->level[_l].ssim;
- c1 = (double) (SSIM_C1 * 4096 * (1 << 4 * _l));
- for (j = 0; j < h; j++) {
- unsigned mux;
- unsigned muy;
- int i0;
- int i1;
- mux = 5 * col_sums_x[0];
- muy = 5 * col_sums_y[0];
- for (i = 1; i < 4; i++) {
- i1 = FS_MINI(i, w - 1);
- mux += col_sums_x[i1];
- muy += col_sums_y[i1];
- }
- for (i = 0; i < w; i++) {
- ssim[j * w + i] *= (2 * mux * (double) muy + c1)
- / (mux * (double) mux + muy * (double) muy + c1);
- if (i + 1 < w) {
- i0 = FS_MAXI(0, i - 4);
- i1 = FS_MINI(i + 4, w - 1);
- mux += col_sums_x[i1] - col_sums_x[i0];
- muy += col_sums_x[i1] - col_sums_x[i0];
- }
- }
- if (j + 1 < h) {
- j0offs = FS_MAXI(0, j - 4) * w;
- for (i = 0; i < w; i++)
- col_sums_x[i] -= im1[j0offs + i];
- for (i = 0; i < w; i++)
- col_sums_y[i] -= im2[j0offs + i];
- j1offs = FS_MINI(j + 4, h - 1) * w;
- for (i = 0; i < w; i++)
- col_sums_x[i] += im1[j1offs + i];
- for (i = 0; i < w; i++)
- col_sums_y[i] += im2[j1offs + i];
- }
- }
-}
-
-#define FS_COL_SET(_col, _joffs, _ioffs) \
- do { \
- unsigned gx; \
- unsigned gy; \
- gx = gx_buf[((j + (_joffs)) & 7) * stride + i + (_ioffs)]; \
- gy = gy_buf[((j + (_joffs)) & 7) * stride + i + (_ioffs)]; \
- col_sums_gx2[(_col)] = gx * (double)gx; \
- col_sums_gy2[(_col)] = gy * (double)gy; \
- col_sums_gxgy[(_col)] = gx * (double)gy; \
- } \
- while (0)
-
-#define FS_COL_ADD(_col, _joffs, _ioffs) \
- do { \
- unsigned gx; \
- unsigned gy; \
- gx = gx_buf[((j + (_joffs)) & 7) * stride + i + (_ioffs)]; \
- gy = gy_buf[((j + (_joffs)) & 7) * stride + i + (_ioffs)]; \
- col_sums_gx2[(_col)] += gx * (double)gx; \
- col_sums_gy2[(_col)] += gy * (double)gy; \
- col_sums_gxgy[(_col)] += gx * (double)gy; \
- } \
- while (0)
-
-#define FS_COL_SUB(_col, _joffs, _ioffs) \
- do { \
- unsigned gx; \
- unsigned gy; \
- gx = gx_buf[((j + (_joffs)) & 7) * stride + i + (_ioffs)]; \
- gy = gy_buf[((j + (_joffs)) & 7) * stride + i + (_ioffs)]; \
- col_sums_gx2[(_col)] -= gx * (double)gx; \
- col_sums_gy2[(_col)] -= gy * (double)gy; \
- col_sums_gxgy[(_col)] -= gx * (double)gy; \
- } \
- while (0)
-
-#define FS_COL_COPY(_col1, _col2) \
- do { \
- col_sums_gx2[(_col1)] = col_sums_gx2[(_col2)]; \
- col_sums_gy2[(_col1)] = col_sums_gy2[(_col2)]; \
- col_sums_gxgy[(_col1)] = col_sums_gxgy[(_col2)]; \
- } \
- while (0)
-
-#define FS_COL_HALVE(_col1, _col2) \
- do { \
- col_sums_gx2[(_col1)] = col_sums_gx2[(_col2)] * 0.5; \
- col_sums_gy2[(_col1)] = col_sums_gy2[(_col2)] * 0.5; \
- col_sums_gxgy[(_col1)] = col_sums_gxgy[(_col2)] * 0.5; \
- } \
- while (0)
-
-#define FS_COL_DOUBLE(_col1, _col2) \
- do { \
- col_sums_gx2[(_col1)] = col_sums_gx2[(_col2)] * 2; \
- col_sums_gy2[(_col1)] = col_sums_gy2[(_col2)] * 2; \
- col_sums_gxgy[(_col1)] = col_sums_gxgy[(_col2)] * 2; \
- } \
- while (0)
-
-static void fs_calc_structure(fs_ctx *_ctx, int _l) {
- uint16_t *im1;
- uint16_t *im2;
- unsigned *gx_buf;
- unsigned *gy_buf;
- double *ssim;
- double col_sums_gx2[8];
- double col_sums_gy2[8];
- double col_sums_gxgy[8];
- double c2;
- int stride;
- int w;
- int h;
- int i;
- int j;
- w = _ctx->level[_l].w;
- h = _ctx->level[_l].h;
- im1 = _ctx->level[_l].im1;
- im2 = _ctx->level[_l].im2;
- ssim = _ctx->level[_l].ssim;
- gx_buf = _ctx->col_buf;
- stride = w + 8;
- gy_buf = gx_buf + 8 * stride;
- memset(gx_buf, 0, 2 * 8 * stride * sizeof(*gx_buf));
- c2 = SSIM_C2 * (1 << 4 * _l) * 16 * 104;
- for (j = 0; j < h + 4; j++) {
- if (j < h - 1) {
- for (i = 0; i < w - 1; i++) {
- unsigned g1;
- unsigned g2;
- unsigned gx;
- unsigned gy;
- g1 = abs(im1[(j + 1) * w + i + 1] - im1[j * w + i]);
- g2 = abs(im1[(j + 1) * w + i] - im1[j * w + i + 1]);
- gx = 4 * FS_MAXI(g1, g2) + FS_MINI(g1, g2);
- g1 = abs(im2[(j + 1) * w + i + 1] - im2[j * w + i]);
- g2 = abs(im2[(j + 1) * w + i] - im2[j * w + i + 1]);
- gy = 4 * FS_MAXI(g1, g2) + FS_MINI(g1, g2);
- gx_buf[(j & 7) * stride + i + 4] = gx;
- gy_buf[(j & 7) * stride + i + 4] = gy;
- }
- } else {
- memset(gx_buf + (j & 7) * stride, 0, stride * sizeof(*gx_buf));
- memset(gy_buf + (j & 7) * stride, 0, stride * sizeof(*gy_buf));
- }
- if (j >= 4) {
- int k;
- col_sums_gx2[3] = col_sums_gx2[2] = col_sums_gx2[1] = col_sums_gx2[0] = 0;
- col_sums_gy2[3] = col_sums_gy2[2] = col_sums_gy2[1] = col_sums_gy2[0] = 0;
- col_sums_gxgy[3] = col_sums_gxgy[2] = col_sums_gxgy[1] =
- col_sums_gxgy[0] = 0;
- for (i = 4; i < 8; i++) {
- FS_COL_SET(i, -1, 0);
- FS_COL_ADD(i, 0, 0);
- for (k = 1; k < 8 - i; k++) {
- FS_COL_DOUBLE(i, i);
- FS_COL_ADD(i, -k - 1, 0);
- FS_COL_ADD(i, k, 0);
- }
- }
- for (i = 0; i < w; i++) {
- double mugx2;
- double mugy2;
- double mugxgy;
- mugx2 = col_sums_gx2[0];
- for (k = 1; k < 8; k++)
- mugx2 += col_sums_gx2[k];
- mugy2 = col_sums_gy2[0];
- for (k = 1; k < 8; k++)
- mugy2 += col_sums_gy2[k];
- mugxgy = col_sums_gxgy[0];
- for (k = 1; k < 8; k++)
- mugxgy += col_sums_gxgy[k];
- ssim[(j - 4) * w + i] = (2 * mugxgy + c2) / (mugx2 + mugy2 + c2);
- if (i + 1 < w) {
- FS_COL_SET(0, -1, 1);
- FS_COL_ADD(0, 0, 1);
- FS_COL_SUB(2, -3, 2);
- FS_COL_SUB(2, 2, 2);
- FS_COL_HALVE(1, 2);
- FS_COL_SUB(3, -4, 3);
- FS_COL_SUB(3, 3, 3);
- FS_COL_HALVE(2, 3);
- FS_COL_COPY(3, 4);
- FS_COL_DOUBLE(4, 5);
- FS_COL_ADD(4, -4, 5);
- FS_COL_ADD(4, 3, 5);
- FS_COL_DOUBLE(5, 6);
- FS_COL_ADD(5, -3, 6);
- FS_COL_ADD(5, 2, 6);
- FS_COL_DOUBLE(6, 7);
- FS_COL_ADD(6, -2, 7);
- FS_COL_ADD(6, 1, 7);
- FS_COL_SET(7, -1, 8);
- FS_COL_ADD(7, 0, 8);
- }
- }
- }
- }
-}
-
-#define FS_NLEVELS (4)
-
-/*These weights were derived from the default weights found in Wang's original
- Matlab implementation: {0.0448, 0.2856, 0.2363, 0.1333}.
- We drop the finest scale and renormalize the rest to sum to 1.*/
-
-static const double FS_WEIGHTS[FS_NLEVELS] = {0.2989654541015625,
- 0.3141326904296875, 0.2473602294921875, 0.1395416259765625};
-
-static double fs_average(fs_ctx *_ctx, int _l) {
- double *ssim;
- double ret;
- int w;
- int h;
- int i;
- int j;
- w = _ctx->level[_l].w;
- h = _ctx->level[_l].h;
- ssim = _ctx->level[_l].ssim;
- ret = 0;
- for (j = 0; j < h; j++)
- for (i = 0; i < w; i++)
- ret += ssim[j * w + i];
- return pow(ret / (w * h), FS_WEIGHTS[_l]);
-}
-
-static double calc_ssim(const unsigned char *_src, int _systride,
- const unsigned char *_dst, int _dystride, int _w, int _h) {
- fs_ctx ctx;
- double ret;
- int l;
- ret = 1;
- fs_ctx_init(&ctx, _w, _h, FS_NLEVELS);
- fs_downsample_level0(&ctx, _src, _systride, _dst, _dystride, _w, _h);
- for (l = 0; l < FS_NLEVELS - 1; l++) {
- fs_calc_structure(&ctx, l);
- ret *= fs_average(&ctx, l);
- fs_downsample_level(&ctx, l + 1);
- }
- fs_calc_structure(&ctx, l);
- fs_apply_luminance(&ctx, l);
- ret *= fs_average(&ctx, l);
- fs_ctx_clear(&ctx);
- return ret;
-}
-
-static double convert_ssim_db(double _ssim, double _weight) {
- return 10 * (log10(_weight) - log10(_weight - _ssim));
-}
-
-double vp9_calc_fastssim(YV12_BUFFER_CONFIG *source, YV12_BUFFER_CONFIG *dest,
- double *ssim_y, double *ssim_u, double *ssim_v) {
- double ssimv;
- vp9_clear_system_state();
-
- *ssim_y = calc_ssim(source->y_buffer, source->y_stride, dest->y_buffer,
- dest->y_stride, source->y_crop_width,
- source->y_crop_height);
-
- *ssim_u = calc_ssim(source->u_buffer, source->uv_stride, dest->u_buffer,
- dest->uv_stride, source->uv_crop_width,
- source->uv_crop_height);
-
- *ssim_v = calc_ssim(source->v_buffer, source->uv_stride, dest->v_buffer,
- dest->uv_stride, source->uv_crop_width,
- source->uv_crop_height);
- ssimv = (*ssim_y) * .8 + .1 * ((*ssim_u) + (*ssim_v));
-
- return convert_ssim_db(ssimv, 1.0);
-}
diff --git a/vp9/encoder/vp9_firstpass.c b/vp9/encoder/vp9_firstpass.c
index 5caf2cbd8..3c2545452 100644
--- a/vp9/encoder/vp9_firstpass.c
+++ b/vp9/encoder/vp9_firstpass.c
@@ -17,13 +17,13 @@
#include "vpx_mem/vpx_mem.h"
#include "vpx_ports/mem.h"
+#include "vpx_ports/system_state.h"
#include "vpx_scale/vpx_scale.h"
#include "vpx_scale/yv12config.h"
#include "vp9/common/vp9_entropymv.h"
#include "vp9/common/vp9_quant_common.h"
#include "vp9/common/vp9_reconinter.h" // vp9_setup_dst_planes()
-#include "vp9/common/vp9_systemdependent.h"
#include "vp9/encoder/vp9_aq_variance.h"
#include "vp9/encoder/vp9_block.h"
#include "vp9/encoder/vp9_encodeframe.h"
@@ -237,8 +237,7 @@ static void subtract_stats(FIRSTPASS_STATS *section,
#define MIN_ACTIVE_AREA 0.5
#define MAX_ACTIVE_AREA 1.0
static double calculate_active_area(const VP9_COMP *cpi,
- const FIRSTPASS_STATS *this_frame)
-{
+ const FIRSTPASS_STATS *this_frame) {
double active_pct;
active_pct = 1.0 -
@@ -546,7 +545,7 @@ void vp9_first_pass(VP9_COMP *cpi, const struct lookahead_entry *source) {
}
#endif
- vp9_clear_system_state();
+ vpx_clear_system_state();
intra_factor = 0.0;
brightness_factor = 0.0;
@@ -657,7 +656,7 @@ void vp9_first_pass(VP9_COMP *cpi, const struct lookahead_entry *source) {
const int mb_index = mb_row * cm->mb_cols + mb_col;
#endif
- vp9_clear_system_state();
+ vpx_clear_system_state();
xd->plane[0].dst.buf = new_yv12->y_buffer + recon_yoffset;
xd->plane[1].dst.buf = new_yv12->u_buffer + recon_uvoffset;
@@ -708,7 +707,7 @@ void vp9_first_pass(VP9_COMP *cpi, const struct lookahead_entry *source) {
}
#endif // CONFIG_VP9_HIGHBITDEPTH
- vp9_clear_system_state();
+ vpx_clear_system_state();
log_intra = log(this_error + 1.0);
if (log_intra < 10.0)
intra_factor += 1.0 + ((10.0 - log_intra) * 0.05);
@@ -878,7 +877,7 @@ void vp9_first_pass(VP9_COMP *cpi, const struct lookahead_entry *source) {
#endif
if (motion_error <= this_error) {
- vp9_clear_system_state();
+ vpx_clear_system_state();
// Keep a count of cases where the inter and intra were very close
// and very low. This helps with scene cut detection for example in
@@ -1013,7 +1012,7 @@ void vp9_first_pass(VP9_COMP *cpi, const struct lookahead_entry *source) {
x->plane[2].src.buf += uv_mb_height * x->plane[1].src.stride -
uv_mb_height * cm->mb_cols;
- vp9_clear_system_state();
+ vpx_clear_system_state();
}
// Clamp the image start to rows/2. This number of rows is discarded top
@@ -1891,7 +1890,7 @@ static void define_gf_group(VP9_COMP *cpi, FIRSTPASS_STATS *this_frame) {
vp9_zero(twopass->gf_group);
}
- vp9_clear_system_state();
+ vpx_clear_system_state();
vp9_zero(next_frame);
// Load stats for the current frame.
@@ -2632,7 +2631,7 @@ void vp9_rc_get_second_pass_params(VP9_COMP *cpi) {
return;
}
- vp9_clear_system_state();
+ vpx_clear_system_state();
if (cpi->oxcf.rc_mode == VPX_Q) {
twopass->active_worst_quality = cpi->oxcf.cq_level;
diff --git a/vp9/encoder/vp9_mbgraph.c b/vp9/encoder/vp9_mbgraph.c
index acbd7dd43..d59f3157a 100644
--- a/vp9/encoder/vp9_mbgraph.c
+++ b/vp9/encoder/vp9_mbgraph.c
@@ -14,12 +14,12 @@
#include "./vpx_dsp_rtcd.h"
#include "vpx_mem/vpx_mem.h"
+#include "vpx_ports/system_state.h"
#include "vp9/encoder/vp9_segmentation.h"
#include "vp9/encoder/vp9_mcomp.h"
#include "vp9/common/vp9_blockd.h"
#include "vp9/common/vp9_reconinter.h"
#include "vp9/common/vp9_reconintra.h"
-#include "vp9/common/vp9_systemdependent.h"
static unsigned int do_16x16_motion_iteration(VP9_COMP *cpi,
@@ -410,7 +410,7 @@ void vp9_update_mbgraph_stats(VP9_COMP *cpi) {
golden_ref, cpi->Source);
}
- vp9_clear_system_state();
+ vpx_clear_system_state();
separate_arf_mbs(cpi);
}
diff --git a/vp9/encoder/vp9_mcomp.c b/vp9/encoder/vp9_mcomp.c
index 081b99f9f..aa3e51ceb 100644
--- a/vp9/encoder/vp9_mcomp.c
+++ b/vp9/encoder/vp9_mcomp.c
@@ -256,6 +256,27 @@ static INLINE const uint8_t *pre(const uint8_t *buf, int stride, int r, int c) {
} \
}
+// TODO(yunqingwang): SECOND_LEVEL_CHECKS_BEST was a rewrote of
+// SECOND_LEVEL_CHECKS, and SECOND_LEVEL_CHECKS should be rewritten
+// later in the same way.
+#define SECOND_LEVEL_CHECKS_BEST \
+ { \
+ unsigned int second; \
+ int br0 = br; \
+ int bc0 = bc; \
+ assert(tr == br || tc == bc); \
+ if (tr == br && tc != bc) { \
+ kc = bc - tc; \
+ } else if (tr != br && tc == bc) { \
+ kr = br - tr; \
+ } \
+ CHECK_BETTER(second, br0 + kr, bc0); \
+ CHECK_BETTER(second, br0, bc0 + kc); \
+ if (br0 != br || bc0 != bc) { \
+ CHECK_BETTER(second, br0 + kr, bc0 + kc); \
+ } \
+ }
+
#define SETUP_SUBPEL_SEARCH \
const uint8_t *const z = x->plane[0].src.buf; \
const int src_stride = x->plane[0].src.stride; \
@@ -636,7 +657,6 @@ int vp9_find_best_sub_pixel_tree(const MACROBLOCK *x,
const MACROBLOCKD *xd = &x->e_mbd;
unsigned int besterr = INT_MAX;
unsigned int sse;
- unsigned int whichdir = 0;
int thismse;
const int y_stride = xd->plane[0].pre[0].stride;
const int offset = bestmv->row * y_stride + bestmv->col;
@@ -657,6 +677,7 @@ int vp9_find_best_sub_pixel_tree(const MACROBLOCK *x,
const MV *search_step = search_step_table;
int idx, best_idx = -1;
unsigned int cost_array[5];
+ int kr, kc;
if (!(allow_hp && vp9_use_mv_hp(ref_mv)))
if (round == 3)
@@ -703,8 +724,11 @@ int vp9_find_best_sub_pixel_tree(const MACROBLOCK *x,
}
// Check diagonal sub-pixel position
- tc = bc + (cost_array[0] < cost_array[1] ? -hstep : hstep);
- tr = br + (cost_array[2] < cost_array[3] ? -hstep : hstep);
+ kc = (cost_array[0] <= cost_array[1] ? -hstep : hstep);
+ kr = (cost_array[2] <= cost_array[3] ? -hstep : hstep);
+
+ tc = bc + kc;
+ tr = br + kr;
if (tc >= minc && tc <= maxc && tr >= minr && tr <= maxr) {
const uint8_t *const pre_address = y + (tr >> 3) * y_stride + (tc >> 3);
MV this_mv = {tr, tc};
@@ -735,8 +759,8 @@ int vp9_find_best_sub_pixel_tree(const MACROBLOCK *x,
bc = tc;
}
- if (iters_per_step > 1)
- SECOND_LEVEL_CHECKS;
+ if (iters_per_step > 1 && best_idx != -1)
+ SECOND_LEVEL_CHECKS_BEST;
tr = br;
tc = bc;
@@ -1704,7 +1728,7 @@ int vp9_diamond_search_sad_c(const MACROBLOCK *x,
}
}
break;
- };
+ }
#endif
} else if (best_address == in_what) {
(*num00)++;
diff --git a/vp9/encoder/vp9_pickmode.c b/vp9/encoder/vp9_pickmode.c
index 6831be8d5..cc018fcbe 100644
--- a/vp9/encoder/vp9_pickmode.c
+++ b/vp9/encoder/vp9_pickmode.c
@@ -1050,6 +1050,16 @@ static const REF_MODE ref_mode_set[RT_INTER_MODES] = {
{GOLDEN_FRAME, NEARMV},
{GOLDEN_FRAME, NEWMV}
};
+static const REF_MODE ref_mode_set_svc[RT_INTER_MODES] = {
+ {LAST_FRAME, ZEROMV},
+ {GOLDEN_FRAME, ZEROMV},
+ {LAST_FRAME, NEARESTMV},
+ {LAST_FRAME, NEARMV},
+ {GOLDEN_FRAME, NEARESTMV},
+ {GOLDEN_FRAME, NEARMV},
+ {LAST_FRAME, NEWMV},
+ {GOLDEN_FRAME, NEWMV}
+};
// TODO(jingning) placeholder for inter-frame non-RD mode decision.
// this needs various further optimizations. to be continued..
@@ -1157,12 +1167,11 @@ void vp9_pick_inter_mode(VP9_COMP *cpi, MACROBLOCK *x,
vp9_denoiser_reset_frame_stats(ctx);
#endif
- if (cpi->rc.frames_since_golden == 0) {
+ if (cpi->rc.frames_since_golden == 0 && !cpi->use_svc) {
usable_ref_frame = LAST_FRAME;
} else {
usable_ref_frame = GOLDEN_FRAME;
}
-
for (ref_frame = LAST_FRAME; ref_frame <= usable_ref_frame; ++ref_frame) {
const YV12_BUFFER_CONFIG *yv12 = get_ref_frame_buffer(cpi, ref_frame);
@@ -1204,15 +1213,19 @@ void vp9_pick_inter_mode(VP9_COMP *cpi, MACROBLOCK *x,
int mode_rd_thresh;
int mode_index;
int i;
- PREDICTION_MODE this_mode = ref_mode_set[idx].pred_mode;
int64_t this_sse;
int is_skippable;
int this_early_term = 0;
+ PREDICTION_MODE this_mode = ref_mode_set[idx].pred_mode;
+ if (cpi->use_svc)
+ this_mode = ref_mode_set_svc[idx].pred_mode;
if (!(cpi->sf.inter_mode_mask[bsize] & (1 << this_mode)))
continue;
ref_frame = ref_mode_set[idx].ref_frame;
+ if (cpi->use_svc)
+ ref_frame = ref_mode_set_svc[idx].ref_frame;
if (!(cpi->ref_frame_flags & flag_list[ref_frame]))
continue;
if (const_motion[ref_frame] && this_mode == NEARMV)
@@ -1240,7 +1253,7 @@ void vp9_pick_inter_mode(VP9_COMP *cpi, MACROBLOCK *x,
continue;
if (this_mode == NEWMV) {
- if (ref_frame > LAST_FRAME) {
+ if (ref_frame > LAST_FRAME && !cpi->use_svc) {
int tmp_sad;
int dis, cost_list[5];
@@ -1289,6 +1302,21 @@ void vp9_pick_inter_mode(VP9_COMP *cpi, MACROBLOCK *x,
x->pred_mv_sad[LAST_FRAME] = best_pred_sad;
}
+ if (cpi->use_svc) {
+ if (this_mode == NEWMV && ref_frame == GOLDEN_FRAME &&
+ frame_mv[NEWMV][GOLDEN_FRAME].as_int != INVALID_MV) {
+ const int pre_stride = xd->plane[0].pre[0].stride;
+ const uint8_t * const pre_buf = xd->plane[0].pre[0].buf +
+ (frame_mv[NEWMV][GOLDEN_FRAME].as_mv.row >> 3) * pre_stride +
+ (frame_mv[NEWMV][GOLDEN_FRAME].as_mv.col >> 3);
+ best_pred_sad = cpi->fn_ptr[bsize].sdf(x->plane[0].src.buf,
+ x->plane[0].src.stride,
+ pre_buf, pre_stride);
+ x->pred_mv_sad[GOLDEN_FRAME] = best_pred_sad;
+ }
+ }
+
+
if (this_mode != NEARESTMV &&
frame_mv[this_mode][ref_frame].as_int ==
frame_mv[NEARESTMV][ref_frame].as_int)
@@ -1311,7 +1339,8 @@ void vp9_pick_inter_mode(VP9_COMP *cpi, MACROBLOCK *x,
}
if ((this_mode == NEWMV || filter_ref == SWITCHABLE) && pred_filter_search
- && (ref_frame == LAST_FRAME)
+ && (ref_frame == LAST_FRAME ||
+ (ref_frame == GOLDEN_FRAME && cpi->use_svc))
&& (((mbmi->mv[0].as_mv.row | mbmi->mv[0].as_mv.col) & 0x07) != 0)) {
int pf_rate[3];
int64_t pf_dist[3];
diff --git a/vp9/encoder/vp9_psnrhvs.c b/vp9/encoder/vp9_psnrhvs.c
deleted file mode 100644
index 5104b9af6..000000000
--- a/vp9/encoder/vp9_psnrhvs.c
+++ /dev/null
@@ -1,224 +0,0 @@
-/*
- * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
- *
- * Use of this source code is governed by a BSD-style license
- * that can be found in the LICENSE file in the root of the source
- * tree. An additional intellectual property rights grant can be found
- * in the file PATENTS. All contributing project authors may
- * be found in the AUTHORS file in the root of the source tree.
- *
- * This code was originally written by: Gregory Maxwell, at the Daala
- * project.
- */
-#include <stdio.h>
-#include <stdlib.h>
-#include <math.h>
-
-#include "./vpx_config.h"
-#include "./vp9_rtcd.h"
-#include "./vpx_dsp_rtcd.h"
-#include "vp9/encoder/vp9_ssim.h"
-
-#if !defined(M_PI)
-# define M_PI (3.141592653589793238462643)
-#endif
-#include <string.h>
-
-void od_bin_fdct8x8(tran_low_t *y, int ystride, const int16_t *x, int xstride) {
- (void) xstride;
- vpx_fdct8x8(x, y, ystride);
-}
-
-/* Normalized inverse quantization matrix for 8x8 DCT at the point of
- * transparency. This is not the JPEG based matrix from the paper,
- this one gives a slightly higher MOS agreement.*/
-float csf_y[8][8] = {{1.6193873005, 2.2901594831, 2.08509755623, 1.48366094411,
- 1.00227514334, 0.678296995242, 0.466224900598, 0.3265091542}, {2.2901594831,
- 1.94321815382, 2.04793073064, 1.68731108984, 1.2305666963, 0.868920337363,
- 0.61280991668, 0.436405793551}, {2.08509755623, 2.04793073064,
- 1.34329019223, 1.09205635862, 0.875748795257, 0.670882927016,
- 0.501731932449, 0.372504254596}, {1.48366094411, 1.68731108984,
- 1.09205635862, 0.772819797575, 0.605636379554, 0.48309405692,
- 0.380429446972, 0.295774038565}, {1.00227514334, 1.2305666963,
- 0.875748795257, 0.605636379554, 0.448996256676, 0.352889268808,
- 0.283006984131, 0.226951348204}, {0.678296995242, 0.868920337363,
- 0.670882927016, 0.48309405692, 0.352889268808, 0.27032073436,
- 0.215017739696, 0.17408067321}, {0.466224900598, 0.61280991668,
- 0.501731932449, 0.380429446972, 0.283006984131, 0.215017739696,
- 0.168869545842, 0.136153931001}, {0.3265091542, 0.436405793551,
- 0.372504254596, 0.295774038565, 0.226951348204, 0.17408067321,
- 0.136153931001, 0.109083846276}};
-float csf_cb420[8][8] = {
- {1.91113096927, 2.46074210438, 1.18284184739, 1.14982565193, 1.05017074788,
- 0.898018824055, 0.74725392039, 0.615105596242}, {2.46074210438,
- 1.58529308355, 1.21363250036, 1.38190029285, 1.33100189972,
- 1.17428548929, 0.996404342439, 0.830890433625}, {1.18284184739,
- 1.21363250036, 0.978712413627, 1.02624506078, 1.03145147362,
- 0.960060382087, 0.849823426169, 0.731221236837}, {1.14982565193,
- 1.38190029285, 1.02624506078, 0.861317501629, 0.801821139099,
- 0.751437590932, 0.685398513368, 0.608694761374}, {1.05017074788,
- 1.33100189972, 1.03145147362, 0.801821139099, 0.676555426187,
- 0.605503172737, 0.55002013668, 0.495804539034}, {0.898018824055,
- 1.17428548929, 0.960060382087, 0.751437590932, 0.605503172737,
- 0.514674450957, 0.454353482512, 0.407050308965}, {0.74725392039,
- 0.996404342439, 0.849823426169, 0.685398513368, 0.55002013668,
- 0.454353482512, 0.389234902883, 0.342353999733}, {0.615105596242,
- 0.830890433625, 0.731221236837, 0.608694761374, 0.495804539034,
- 0.407050308965, 0.342353999733, 0.295530605237}};
-float csf_cr420[8][8] = {
- {2.03871978502, 2.62502345193, 1.26180942886, 1.11019789803, 1.01397751469,
- 0.867069376285, 0.721500455585, 0.593906509971}, {2.62502345193,
- 1.69112867013, 1.17180569821, 1.3342742857, 1.28513006198,
- 1.13381474809, 0.962064122248, 0.802254508198}, {1.26180942886,
- 1.17180569821, 0.944981930573, 0.990876405848, 0.995903384143,
- 0.926972725286, 0.820534991409, 0.706020324706}, {1.11019789803,
- 1.3342742857, 0.990876405848, 0.831632933426, 0.77418706195,
- 0.725539939514, 0.661776842059, 0.587716619023}, {1.01397751469,
- 1.28513006198, 0.995903384143, 0.77418706195, 0.653238524286,
- 0.584635025748, 0.531064164893, 0.478717061273}, {0.867069376285,
- 1.13381474809, 0.926972725286, 0.725539939514, 0.584635025748,
- 0.496936637883, 0.438694579826, 0.393021669543}, {0.721500455585,
- 0.962064122248, 0.820534991409, 0.661776842059, 0.531064164893,
- 0.438694579826, 0.375820256136, 0.330555063063}, {0.593906509971,
- 0.802254508198, 0.706020324706, 0.587716619023, 0.478717061273,
- 0.393021669543, 0.330555063063, 0.285345396658}};
-
-static double convert_score_db(double _score, double _weight) {
- return 10 * (log10(255 * 255) - log10(_weight * _score));
-}
-
-static double calc_psnrhvs(const unsigned char *_src, int _systride,
- const unsigned char *_dst, int _dystride,
- double _par, int _w, int _h, int _step,
- float _csf[8][8]) {
- float ret;
- int16_t dct_s[8 * 8], dct_d[8 * 8];
- tran_low_t dct_s_coef[8 * 8], dct_d_coef[8 * 8];
- float mask[8][8];
- int pixels;
- int x;
- int y;
- (void) _par;
- ret = pixels = 0;
- /*In the PSNR-HVS-M paper[1] the authors describe the construction of
- their masking table as "we have used the quantization table for the
- color component Y of JPEG [6] that has been also obtained on the
- basis of CSF. Note that the values in quantization table JPEG have
- been normalized and then squared." Their CSF matrix (from PSNR-HVS)
- was also constructed from the JPEG matrices. I can not find any obvious
- scheme of normalizing to produce their table, but if I multiply their
- CSF by 0.38857 and square the result I get their masking table.
- I have no idea where this constant comes from, but deviating from it
- too greatly hurts MOS agreement.
-
- [1] Nikolay Ponomarenko, Flavia Silvestri, Karen Egiazarian, Marco Carli,
- Jaakko Astola, Vladimir Lukin, "On between-coefficient contrast masking
- of DCT basis functions", CD-ROM Proceedings of the Third
- International Workshop on Video Processing and Quality Metrics for Consumer
- Electronics VPQM-07, Scottsdale, Arizona, USA, 25-26 January, 2007, 4 p.*/
- for (x = 0; x < 8; x++)
- for (y = 0; y < 8; y++)
- mask[x][y] = (_csf[x][y] * 0.3885746225901003)
- * (_csf[x][y] * 0.3885746225901003);
- for (y = 0; y < _h - 7; y += _step) {
- for (x = 0; x < _w - 7; x += _step) {
- int i;
- int j;
- float s_means[4];
- float d_means[4];
- float s_vars[4];
- float d_vars[4];
- float s_gmean = 0;
- float d_gmean = 0;
- float s_gvar = 0;
- float d_gvar = 0;
- float s_mask = 0;
- float d_mask = 0;
- for (i = 0; i < 4; i++)
- s_means[i] = d_means[i] = s_vars[i] = d_vars[i] = 0;
- for (i = 0; i < 8; i++) {
- for (j = 0; j < 8; j++) {
- int sub = ((i & 12) >> 2) + ((j & 12) >> 1);
- dct_s[i * 8 + j] = _src[(y + i) * _systride + (j + x)];
- dct_d[i * 8 + j] = _dst[(y + i) * _dystride + (j + x)];
- s_gmean += dct_s[i * 8 + j];
- d_gmean += dct_d[i * 8 + j];
- s_means[sub] += dct_s[i * 8 + j];
- d_means[sub] += dct_d[i * 8 + j];
- }
- }
- s_gmean /= 64.f;
- d_gmean /= 64.f;
- for (i = 0; i < 4; i++)
- s_means[i] /= 16.f;
- for (i = 0; i < 4; i++)
- d_means[i] /= 16.f;
- for (i = 0; i < 8; i++) {
- for (j = 0; j < 8; j++) {
- int sub = ((i & 12) >> 2) + ((j & 12) >> 1);
- s_gvar += (dct_s[i * 8 + j] - s_gmean) * (dct_s[i * 8 + j] - s_gmean);
- d_gvar += (dct_d[i * 8 + j] - d_gmean) * (dct_d[i * 8 + j] - d_gmean);
- s_vars[sub] += (dct_s[i * 8 + j] - s_means[sub])
- * (dct_s[i * 8 + j] - s_means[sub]);
- d_vars[sub] += (dct_d[i * 8 + j] - d_means[sub])
- * (dct_d[i * 8 + j] - d_means[sub]);
- }
- }
- s_gvar *= 1 / 63.f * 64;
- d_gvar *= 1 / 63.f * 64;
- for (i = 0; i < 4; i++)
- s_vars[i] *= 1 / 15.f * 16;
- for (i = 0; i < 4; i++)
- d_vars[i] *= 1 / 15.f * 16;
- if (s_gvar > 0)
- s_gvar = (s_vars[0] + s_vars[1] + s_vars[2] + s_vars[3]) / s_gvar;
- if (d_gvar > 0)
- d_gvar = (d_vars[0] + d_vars[1] + d_vars[2] + d_vars[3]) / d_gvar;
- od_bin_fdct8x8(dct_s_coef, 8, dct_s, 8);
- od_bin_fdct8x8(dct_d_coef, 8, dct_d, 8);
- for (i = 0; i < 8; i++)
- for (j = (i == 0); j < 8; j++)
- s_mask += dct_s_coef[i * 8 + j] * dct_s_coef[i * 8 + j] * mask[i][j];
- for (i = 0; i < 8; i++)
- for (j = (i == 0); j < 8; j++)
- d_mask += dct_d_coef[i * 8 + j] * dct_d_coef[i * 8 + j] * mask[i][j];
- s_mask = sqrt(s_mask * s_gvar) / 32.f;
- d_mask = sqrt(d_mask * d_gvar) / 32.f;
- if (d_mask > s_mask)
- s_mask = d_mask;
- for (i = 0; i < 8; i++) {
- for (j = 0; j < 8; j++) {
- float err;
- err = fabs(dct_s_coef[i * 8 + j] - dct_d_coef[i * 8 + j]);
- if (i != 0 || j != 0)
- err = err < s_mask / mask[i][j] ? 0 : err - s_mask / mask[i][j];
- ret += (err * _csf[i][j]) * (err * _csf[i][j]);
- pixels++;
- }
- }
- }
- }
- ret /= pixels;
- return ret;
-}
-double vp9_psnrhvs(YV12_BUFFER_CONFIG *source, YV12_BUFFER_CONFIG *dest,
- double *y_psnrhvs, double *u_psnrhvs, double *v_psnrhvs) {
- double psnrhvs;
- double par = 1.0;
- int step = 7;
- vp9_clear_system_state();
- *y_psnrhvs = calc_psnrhvs(source->y_buffer, source->y_stride, dest->y_buffer,
- dest->y_stride, par, source->y_crop_width,
- source->y_crop_height, step, csf_y);
-
- *u_psnrhvs = calc_psnrhvs(source->u_buffer, source->uv_stride, dest->u_buffer,
- dest->uv_stride, par, source->uv_crop_width,
- source->uv_crop_height, step, csf_cb420);
-
- *v_psnrhvs = calc_psnrhvs(source->v_buffer, source->uv_stride, dest->v_buffer,
- dest->uv_stride, par, source->uv_crop_width,
- source->uv_crop_height, step, csf_cr420);
- psnrhvs = (*y_psnrhvs) * .8 + .1 * ((*u_psnrhvs) + (*v_psnrhvs));
-
- return convert_score_db(psnrhvs, 1.0);
-}
diff --git a/vp9/encoder/vp9_quantize.c b/vp9/encoder/vp9_quantize.c
index d53d95d29..cb3e21a56 100644
--- a/vp9/encoder/vp9_quantize.c
+++ b/vp9/encoder/vp9_quantize.c
@@ -199,7 +199,7 @@ void vp9_regular_quantize_b_4x4(MACROBLOCK *x, int plane, int block,
#if CONFIG_VP9_HIGHBITDEPTH
if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
- vp9_highbd_quantize_b(BLOCK_OFFSET(p->coeff, block),
+ vpx_highbd_quantize_b(BLOCK_OFFSET(p->coeff, block),
16, x->skip_block,
p->zbin, p->round, p->quant, p->quant_shift,
BLOCK_OFFSET(p->qcoeff, block),
@@ -209,7 +209,7 @@ void vp9_regular_quantize_b_4x4(MACROBLOCK *x, int plane, int block,
return;
}
#endif
- vp9_quantize_b(BLOCK_OFFSET(p->coeff, block),
+ vpx_quantize_b(BLOCK_OFFSET(p->coeff, block),
16, x->skip_block,
p->zbin, p->round, p->quant, p->quant_shift,
BLOCK_OFFSET(p->qcoeff, block),
diff --git a/vp9/encoder/vp9_ratectrl.c b/vp9/encoder/vp9_ratectrl.c
index 7427ccfb9..a0b3f8a75 100644
--- a/vp9/encoder/vp9_ratectrl.c
+++ b/vp9/encoder/vp9_ratectrl.c
@@ -17,6 +17,7 @@
#include "vpx_mem/vpx_mem.h"
#include "vpx_ports/mem.h"
+#include "vpx_ports/system_state.h"
#include "vp9/common/vp9_alloccommon.h"
#include "vp9/encoder/vp9_aq_cyclicrefresh.h"
@@ -24,7 +25,6 @@
#include "vp9/common/vp9_entropymode.h"
#include "vp9/common/vp9_quant_common.h"
#include "vp9/common/vp9_seg_common.h"
-#include "vp9/common/vp9_systemdependent.h"
#include "vp9/encoder/vp9_encodemv.h"
#include "vp9/encoder/vp9_ratectrl.h"
@@ -455,7 +455,7 @@ void vp9_rc_update_rate_correction_factors(VP9_COMP *cpi) {
return;
// Clear down mmx registers to allow floating point in what follows
- vp9_clear_system_state();
+ vpx_clear_system_state();
// Work out how big we would have expected the frame to be at this Q given
// the current correction factor.
@@ -635,6 +635,7 @@ static int calc_active_worst_quality_one_pass_cbr(const VP9_COMP *cpi) {
int adjustment = 0;
int active_worst_quality;
int ambient_qp;
+ unsigned int num_frames_weight_key = 5 * cpi->svc.number_temporal_layers;
if (cm->frame_type == KEY_FRAME)
return rc->worst_quality;
// For ambient_qp we use minimum of avg_frame_qindex[KEY_FRAME/INTER_FRAME]
@@ -642,7 +643,7 @@ static int calc_active_worst_quality_one_pass_cbr(const VP9_COMP *cpi) {
// to worst_quality and updated with (3/4, 1/4) average in postencode_update.
// So for first few frames following key, the qp of that key frame is weighted
// into the active_worst_quality setting.
- ambient_qp = (cm->current_video_frame < 5) ?
+ ambient_qp = (cm->current_video_frame < num_frames_weight_key) ?
MIN(rc->avg_frame_qindex[INTER_FRAME], rc->avg_frame_qindex[KEY_FRAME]) :
rc->avg_frame_qindex[INTER_FRAME];
active_worst_quality = MIN(rc->worst_quality,
@@ -764,7 +765,7 @@ static int rc_pick_q_and_bounds_one_pass_cbr(const VP9_COMP *cpi,
!rc->this_key_frame_forced &&
!(cm->current_video_frame == 0)) {
int qdelta = 0;
- vp9_clear_system_state();
+ vpx_clear_system_state();
qdelta = vp9_compute_qdelta_by_rate(&cpi->rc, cm->frame_type,
active_worst_quality, 2.0,
cm->bit_depth);
@@ -823,7 +824,6 @@ static int rc_pick_q_and_bounds_one_pass_vbr(const VP9_COMP *cpi,
ASSIGN_MINQ_TABLE(cm->bit_depth, inter_minq);
if (frame_is_intra_only(cm)) {
-
// Handle the special case for key frames forced when we have reached
// the maximum key frame interval. Here force the Q to a range
// based on the ambient Q to reduce the risk of popping.
@@ -915,7 +915,7 @@ static int rc_pick_q_and_bounds_one_pass_vbr(const VP9_COMP *cpi,
#if LIMIT_QRANGE_FOR_ALTREF_AND_KEY
{
int qdelta = 0;
- vp9_clear_system_state();
+ vpx_clear_system_state();
// Limit Q range for the adaptive loop.
if (cm->frame_type == KEY_FRAME &&
@@ -1109,7 +1109,7 @@ static int rc_pick_q_and_bounds_two_pass(const VP9_COMP *cpi,
}
#if LIMIT_QRANGE_FOR_ALTREF_AND_KEY
- vp9_clear_system_state();
+ vpx_clear_system_state();
// Static forced key frames Q restrictions dealt with elsewhere.
if (!((frame_is_intra_only(cm) || vp9_is_upper_layer_key_frame(cpi))) ||
!rc->this_key_frame_forced ||
@@ -1286,6 +1286,18 @@ void vp9_rc_postencode_update(VP9_COMP *cpi, uint64_t bytes_used) {
rc->last_q[KEY_FRAME] = qindex;
rc->avg_frame_qindex[KEY_FRAME] =
ROUND_POWER_OF_TWO(3 * rc->avg_frame_qindex[KEY_FRAME] + qindex, 2);
+ if (cpi->use_svc) {
+ int i = 0;
+ SVC *svc = &cpi->svc;
+ for (i = 0; i < svc->number_temporal_layers; ++i) {
+ const int layer = LAYER_IDS_TO_IDX(svc->spatial_layer_id, i,
+ svc->number_temporal_layers);
+ LAYER_CONTEXT *lc = &svc->layer_context[layer];
+ RATE_CONTROL *lrc = &lc->rc;
+ lrc->last_q[KEY_FRAME] = rc->last_q[KEY_FRAME];
+ lrc->avg_frame_qindex[KEY_FRAME] = rc->avg_frame_qindex[KEY_FRAME];
+ }
+ }
} else {
if (rc->is_src_frame_alt_ref ||
!(cpi->refresh_golden_frame || cpi->refresh_alt_ref_frame) ||
@@ -1339,13 +1351,15 @@ void vp9_rc_postencode_update(VP9_COMP *cpi, uint64_t bytes_used) {
rc->total_target_vs_actual = rc->total_actual_bits - rc->total_target_bits;
- if (is_altref_enabled(cpi) && cpi->refresh_alt_ref_frame &&
- (cm->frame_type != KEY_FRAME))
- // Update the alternate reference frame stats as appropriate.
- update_alt_ref_frame_stats(cpi);
- else
- // Update the Golden frame stats as appropriate.
- update_golden_frame_stats(cpi);
+ if (!cpi->use_svc) {
+ if (is_altref_enabled(cpi) && cpi->refresh_alt_ref_frame &&
+ (cm->frame_type != KEY_FRAME))
+ // Update the alternate reference frame stats as appropriate.
+ update_alt_ref_frame_stats(cpi);
+ else
+ // Update the Golden frame stats as appropriate.
+ update_golden_frame_stats(cpi);
+ }
if (cm->frame_type == KEY_FRAME)
rc->frames_since_key = 0;
diff --git a/vp9/encoder/vp9_rd.c b/vp9/encoder/vp9_rd.c
index 3f6de42e3..3ee6fbec3 100644
--- a/vp9/encoder/vp9_rd.c
+++ b/vp9/encoder/vp9_rd.c
@@ -15,7 +15,9 @@
#include "./vp9_rtcd.h"
#include "vpx_mem/vpx_mem.h"
+#include "vpx_ports/bitops.h"
#include "vpx_ports/mem.h"
+#include "vpx_ports/system_state.h"
#include "vp9/common/vp9_common.h"
#include "vp9/common/vp9_entropy.h"
@@ -26,7 +28,6 @@
#include "vp9/common/vp9_reconinter.h"
#include "vp9/common/vp9_reconintra.h"
#include "vp9/common/vp9_seg_common.h"
-#include "vp9/common/vp9_systemdependent.h"
#include "vp9/encoder/vp9_cost.h"
#include "vp9/encoder/vp9_encodemb.h"
@@ -268,7 +269,7 @@ void vp9_initialize_rd_consts(VP9_COMP *cpi) {
RD_OPT *const rd = &cpi->rd;
int i;
- vp9_clear_system_state();
+ vpx_clear_system_state();
rd->RDDIV = RDDIV_BITS; // In bits (to multiply D by 128).
rd->RDMULT = vp9_compute_rd_mult(cpi, cm->base_qindex + cm->y_dc_delta_q);
diff --git a/vp9/encoder/vp9_rdopt.c b/vp9/encoder/vp9_rdopt.c
index b6cb8b57f..9f3873600 100644
--- a/vp9/encoder/vp9_rdopt.c
+++ b/vp9/encoder/vp9_rdopt.c
@@ -16,6 +16,7 @@
#include "vpx_mem/vpx_mem.h"
#include "vpx_ports/mem.h"
+#include "vpx_ports/system_state.h"
#include "vp9/common/vp9_common.h"
#include "vp9/common/vp9_entropy.h"
@@ -28,7 +29,6 @@
#include "vp9/common/vp9_reconintra.h"
#include "vp9/common/vp9_scan.h"
#include "vp9/common/vp9_seg_common.h"
-#include "vp9/common/vp9_systemdependent.h"
#include "vp9/encoder/vp9_cost.h"
#include "vp9/encoder/vp9_encodemb.h"
@@ -1045,7 +1045,6 @@ static int64_t rd_pick_intra_sby_mode(VP9_COMP *cpi, MACROBLOCK *x,
memset(x->skip_txfm, SKIP_TXFM_NONE, sizeof(x->skip_txfm));
/* Y Search for intra prediction mode */
for (mode = DC_PRED; mode <= TM_PRED; mode++) {
-
if (cpi->sf.use_nonrd_pick_mode) {
// These speed features are turned on in hybrid non-RD and RD mode
// for key frame coding in the context of real-time setting.
@@ -2489,9 +2488,8 @@ static int64_t handle_inter_mode(VP9_COMP *cpi, MACROBLOCK *x,
}
// We don't include the cost of the second reference here, because there
- // are only three options: Last/Golden, ARF/Last or Golden/ARF, or in other
- // words if you present them in that order, the second one is always known
- // if the first is known.
+ // are only two options: Last/ARF or Golden/ARF; The second one is always
+ // known, which is ARF.
//
// Under some circumstances we discount the cost of new mv mode to encourage
// initiation of a motion field.
diff --git a/vp9/encoder/vp9_ssim.c b/vp9/encoder/vp9_ssim.c
deleted file mode 100644
index 172de5d1d..000000000
--- a/vp9/encoder/vp9_ssim.c
+++ /dev/null
@@ -1,500 +0,0 @@
-/*
- * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
- *
- * Use of this source code is governed by a BSD-style license
- * that can be found in the LICENSE file in the root of the source
- * tree. An additional intellectual property rights grant can be found
- * in the file PATENTS. All contributing project authors may
- * be found in the AUTHORS file in the root of the source tree.
- */
-
-#include <math.h>
-#include "./vp9_rtcd.h"
-#include "vpx_ports/mem.h"
-#include "vp9/encoder/vp9_ssim.h"
-
-void vp9_ssim_parms_16x16_c(uint8_t *s, int sp, uint8_t *r,
- int rp, unsigned long *sum_s, unsigned long *sum_r,
- unsigned long *sum_sq_s, unsigned long *sum_sq_r,
- unsigned long *sum_sxr) {
- int i, j;
- for (i = 0; i < 16; i++, s += sp, r += rp) {
- for (j = 0; j < 16; j++) {
- *sum_s += s[j];
- *sum_r += r[j];
- *sum_sq_s += s[j] * s[j];
- *sum_sq_r += r[j] * r[j];
- *sum_sxr += s[j] * r[j];
- }
- }
-}
-void vp9_ssim_parms_8x8_c(uint8_t *s, int sp, uint8_t *r, int rp,
- unsigned long *sum_s, unsigned long *sum_r,
- unsigned long *sum_sq_s, unsigned long *sum_sq_r,
- unsigned long *sum_sxr) {
- int i, j;
- for (i = 0; i < 8; i++, s += sp, r += rp) {
- for (j = 0; j < 8; j++) {
- *sum_s += s[j];
- *sum_r += r[j];
- *sum_sq_s += s[j] * s[j];
- *sum_sq_r += r[j] * r[j];
- *sum_sxr += s[j] * r[j];
- }
- }
-}
-
-#if CONFIG_VP9_HIGHBITDEPTH
-void vp9_highbd_ssim_parms_8x8_c(uint16_t *s, int sp, uint16_t *r, int rp,
- uint32_t *sum_s, uint32_t *sum_r,
- uint32_t *sum_sq_s, uint32_t *sum_sq_r,
- uint32_t *sum_sxr) {
- int i, j;
- for (i = 0; i < 8; i++, s += sp, r += rp) {
- for (j = 0; j < 8; j++) {
- *sum_s += s[j];
- *sum_r += r[j];
- *sum_sq_s += s[j] * s[j];
- *sum_sq_r += r[j] * r[j];
- *sum_sxr += s[j] * r[j];
- }
- }
-}
-#endif // CONFIG_VP9_HIGHBITDEPTH
-
-static const int64_t cc1 = 26634; // (64^2*(.01*255)^2
-static const int64_t cc2 = 239708; // (64^2*(.03*255)^2
-
-static double similarity(unsigned long sum_s, unsigned long sum_r,
- unsigned long sum_sq_s, unsigned long sum_sq_r,
- unsigned long sum_sxr, int count) {
- int64_t ssim_n, ssim_d;
- int64_t c1, c2;
-
- // scale the constants by number of pixels
- c1 = (cc1 * count * count) >> 12;
- c2 = (cc2 * count * count) >> 12;
-
- ssim_n = (2 * sum_s * sum_r + c1) * ((int64_t) 2 * count * sum_sxr -
- (int64_t) 2 * sum_s * sum_r + c2);
-
- ssim_d = (sum_s * sum_s + sum_r * sum_r + c1) *
- ((int64_t)count * sum_sq_s - (int64_t)sum_s * sum_s +
- (int64_t)count * sum_sq_r - (int64_t) sum_r * sum_r + c2);
-
- return ssim_n * 1.0 / ssim_d;
-}
-
-static double ssim_8x8(uint8_t *s, int sp, uint8_t *r, int rp) {
- unsigned long sum_s = 0, sum_r = 0, sum_sq_s = 0, sum_sq_r = 0, sum_sxr = 0;
- vp9_ssim_parms_8x8(s, sp, r, rp, &sum_s, &sum_r, &sum_sq_s, &sum_sq_r,
- &sum_sxr);
- return similarity(sum_s, sum_r, sum_sq_s, sum_sq_r, sum_sxr, 64);
-}
-
-#if CONFIG_VP9_HIGHBITDEPTH
-static double highbd_ssim_8x8(uint16_t *s, int sp, uint16_t *r, int rp,
- unsigned int bd) {
- uint32_t sum_s = 0, sum_r = 0, sum_sq_s = 0, sum_sq_r = 0, sum_sxr = 0;
- const int oshift = bd - 8;
- vp9_highbd_ssim_parms_8x8(s, sp, r, rp, &sum_s, &sum_r, &sum_sq_s, &sum_sq_r,
- &sum_sxr);
- return similarity(sum_s >> oshift,
- sum_r >> oshift,
- sum_sq_s >> (2 * oshift),
- sum_sq_r >> (2 * oshift),
- sum_sxr >> (2 * oshift),
- 64);
-}
-#endif // CONFIG_VP9_HIGHBITDEPTH
-
-// We are using a 8x8 moving window with starting location of each 8x8 window
-// on the 4x4 pixel grid. Such arrangement allows the windows to overlap
-// block boundaries to penalize blocking artifacts.
-double vp9_ssim2(uint8_t *img1, uint8_t *img2, int stride_img1,
- int stride_img2, int width, int height) {
- int i, j;
- int samples = 0;
- double ssim_total = 0;
-
- // sample point start with each 4x4 location
- for (i = 0; i <= height - 8;
- i += 4, img1 += stride_img1 * 4, img2 += stride_img2 * 4) {
- for (j = 0; j <= width - 8; j += 4) {
- double v = ssim_8x8(img1 + j, stride_img1, img2 + j, stride_img2);
- ssim_total += v;
- samples++;
- }
- }
- ssim_total /= samples;
- return ssim_total;
-}
-
-#if CONFIG_VP9_HIGHBITDEPTH
-double vp9_highbd_ssim2(uint8_t *img1, uint8_t *img2, int stride_img1,
- int stride_img2, int width, int height,
- unsigned int bd) {
- int i, j;
- int samples = 0;
- double ssim_total = 0;
-
- // sample point start with each 4x4 location
- for (i = 0; i <= height - 8;
- i += 4, img1 += stride_img1 * 4, img2 += stride_img2 * 4) {
- for (j = 0; j <= width - 8; j += 4) {
- double v = highbd_ssim_8x8(CONVERT_TO_SHORTPTR(img1 + j), stride_img1,
- CONVERT_TO_SHORTPTR(img2 + j), stride_img2,
- bd);
- ssim_total += v;
- samples++;
- }
- }
- ssim_total /= samples;
- return ssim_total;
-}
-#endif // CONFIG_VP9_HIGHBITDEPTH
-
-double vp9_calc_ssim(YV12_BUFFER_CONFIG *source, YV12_BUFFER_CONFIG *dest,
- double *weight) {
- double a, b, c;
- double ssimv;
-
- a = vp9_ssim2(source->y_buffer, dest->y_buffer,
- source->y_stride, dest->y_stride,
- source->y_crop_width, source->y_crop_height);
-
- b = vp9_ssim2(source->u_buffer, dest->u_buffer,
- source->uv_stride, dest->uv_stride,
- source->uv_crop_width, source->uv_crop_height);
-
- c = vp9_ssim2(source->v_buffer, dest->v_buffer,
- source->uv_stride, dest->uv_stride,
- source->uv_crop_width, source->uv_crop_height);
-
- ssimv = a * .8 + .1 * (b + c);
-
- *weight = 1;
-
- return ssimv;
-}
-
-double vp9_calc_ssimg(YV12_BUFFER_CONFIG *source, YV12_BUFFER_CONFIG *dest,
- double *ssim_y, double *ssim_u, double *ssim_v) {
- double ssim_all = 0;
- double a, b, c;
-
- a = vp9_ssim2(source->y_buffer, dest->y_buffer,
- source->y_stride, dest->y_stride,
- source->y_crop_width, source->y_crop_height);
-
- b = vp9_ssim2(source->u_buffer, dest->u_buffer,
- source->uv_stride, dest->uv_stride,
- source->uv_crop_width, source->uv_crop_height);
-
- c = vp9_ssim2(source->v_buffer, dest->v_buffer,
- source->uv_stride, dest->uv_stride,
- source->uv_crop_width, source->uv_crop_height);
- *ssim_y = a;
- *ssim_u = b;
- *ssim_v = c;
- ssim_all = (a * 4 + b + c) / 6;
-
- return ssim_all;
-}
-
-// traditional ssim as per: http://en.wikipedia.org/wiki/Structural_similarity
-//
-// Re working out the math ->
-//
-// ssim(x,y) = (2*mean(x)*mean(y) + c1)*(2*cov(x,y)+c2) /
-// ((mean(x)^2+mean(y)^2+c1)*(var(x)+var(y)+c2))
-//
-// mean(x) = sum(x) / n
-//
-// cov(x,y) = (n*sum(xi*yi)-sum(x)*sum(y))/(n*n)
-//
-// var(x) = (n*sum(xi*xi)-sum(xi)*sum(xi))/(n*n)
-//
-// ssim(x,y) =
-// (2*sum(x)*sum(y)/(n*n) + c1)*(2*(n*sum(xi*yi)-sum(x)*sum(y))/(n*n)+c2) /
-// (((sum(x)*sum(x)+sum(y)*sum(y))/(n*n) +c1) *
-// ((n*sum(xi*xi) - sum(xi)*sum(xi))/(n*n)+
-// (n*sum(yi*yi) - sum(yi)*sum(yi))/(n*n)+c2)))
-//
-// factoring out n*n
-//
-// ssim(x,y) =
-// (2*sum(x)*sum(y) + n*n*c1)*(2*(n*sum(xi*yi)-sum(x)*sum(y))+n*n*c2) /
-// (((sum(x)*sum(x)+sum(y)*sum(y)) + n*n*c1) *
-// (n*sum(xi*xi)-sum(xi)*sum(xi)+n*sum(yi*yi)-sum(yi)*sum(yi)+n*n*c2))
-//
-// Replace c1 with n*n * c1 for the final step that leads to this code:
-// The final step scales by 12 bits so we don't lose precision in the constants.
-
-double ssimv_similarity(Ssimv *sv, int64_t n) {
- // Scale the constants by number of pixels.
- const int64_t c1 = (cc1 * n * n) >> 12;
- const int64_t c2 = (cc2 * n * n) >> 12;
-
- const double l = 1.0 * (2 * sv->sum_s * sv->sum_r + c1) /
- (sv->sum_s * sv->sum_s + sv->sum_r * sv->sum_r + c1);
-
- // Since these variables are unsigned sums, convert to double so
- // math is done in double arithmetic.
- const double v = (2.0 * n * sv->sum_sxr - 2 * sv->sum_s * sv->sum_r + c2)
- / (n * sv->sum_sq_s - sv->sum_s * sv->sum_s + n * sv->sum_sq_r
- - sv->sum_r * sv->sum_r + c2);
-
- return l * v;
-}
-
-// The first term of the ssim metric is a luminance factor.
-//
-// (2*mean(x)*mean(y) + c1)/ (mean(x)^2+mean(y)^2+c1)
-//
-// This luminance factor is super sensitive to the dark side of luminance
-// values and completely insensitive on the white side. check out 2 sets
-// (1,3) and (250,252) the term gives ( 2*1*3/(1+9) = .60
-// 2*250*252/ (250^2+252^2) => .99999997
-//
-// As a result in this tweaked version of the calculation in which the
-// luminance is taken as percentage off from peak possible.
-//
-// 255 * 255 - (sum_s - sum_r) / count * (sum_s - sum_r) / count
-//
-double ssimv_similarity2(Ssimv *sv, int64_t n) {
- // Scale the constants by number of pixels.
- const int64_t c1 = (cc1 * n * n) >> 12;
- const int64_t c2 = (cc2 * n * n) >> 12;
-
- const double mean_diff = (1.0 * sv->sum_s - sv->sum_r) / n;
- const double l = (255 * 255 - mean_diff * mean_diff + c1) / (255 * 255 + c1);
-
- // Since these variables are unsigned, sums convert to double so
- // math is done in double arithmetic.
- const double v = (2.0 * n * sv->sum_sxr - 2 * sv->sum_s * sv->sum_r + c2)
- / (n * sv->sum_sq_s - sv->sum_s * sv->sum_s +
- n * sv->sum_sq_r - sv->sum_r * sv->sum_r + c2);
-
- return l * v;
-}
-void ssimv_parms(uint8_t *img1, int img1_pitch, uint8_t *img2, int img2_pitch,
- Ssimv *sv) {
- vp9_ssim_parms_8x8(img1, img1_pitch, img2, img2_pitch,
- &sv->sum_s, &sv->sum_r, &sv->sum_sq_s, &sv->sum_sq_r,
- &sv->sum_sxr);
-}
-
-double vp9_get_ssim_metrics(uint8_t *img1, int img1_pitch,
- uint8_t *img2, int img2_pitch,
- int width, int height,
- Ssimv *sv2, Metrics *m,
- int do_inconsistency) {
- double dssim_total = 0;
- double ssim_total = 0;
- double ssim2_total = 0;
- double inconsistency_total = 0;
- int i, j;
- int c = 0;
- double norm;
- double old_ssim_total = 0;
- vp9_clear_system_state();
- // We can sample points as frequently as we like start with 1 per 4x4.
- for (i = 0; i < height; i += 4,
- img1 += img1_pitch * 4, img2 += img2_pitch * 4) {
- for (j = 0; j < width; j += 4, ++c) {
- Ssimv sv = {0};
- double ssim;
- double ssim2;
- double dssim;
- uint32_t var_new;
- uint32_t var_old;
- uint32_t mean_new;
- uint32_t mean_old;
- double ssim_new;
- double ssim_old;
-
- // Not sure there's a great way to handle the edge pixels
- // in ssim when using a window. Seems biased against edge pixels
- // however you handle this. This uses only samples that are
- // fully in the frame.
- if (j + 8 <= width && i + 8 <= height) {
- ssimv_parms(img1 + j, img1_pitch, img2 + j, img2_pitch, &sv);
- }
-
- ssim = ssimv_similarity(&sv, 64);
- ssim2 = ssimv_similarity2(&sv, 64);
-
- sv.ssim = ssim2;
-
- // dssim is calculated to use as an actual error metric and
- // is scaled up to the same range as sum square error.
- // Since we are subsampling every 16th point maybe this should be
- // *16 ?
- dssim = 255 * 255 * (1 - ssim2) / 2;
-
- // Here I introduce a new error metric: consistency-weighted
- // SSIM-inconsistency. This metric isolates frames where the
- // SSIM 'suddenly' changes, e.g. if one frame in every 8 is much
- // sharper or blurrier than the others. Higher values indicate a
- // temporally inconsistent SSIM. There are two ideas at work:
- //
- // 1) 'SSIM-inconsistency': the total inconsistency value
- // reflects how much SSIM values are changing between this
- // source / reference frame pair and the previous pair.
- //
- // 2) 'consistency-weighted': weights de-emphasize areas in the
- // frame where the scene content has changed. Changes in scene
- // content are detected via changes in local variance and local
- // mean.
- //
- // Thus the overall measure reflects how inconsistent the SSIM
- // values are, over consistent regions of the frame.
- //
- // The metric has three terms:
- //
- // term 1 -> uses change in scene Variance to weight error score
- // 2 * var(Fi)*var(Fi-1) / (var(Fi)^2+var(Fi-1)^2)
- // larger changes from one frame to the next mean we care
- // less about consistency.
- //
- // term 2 -> uses change in local scene luminance to weight error
- // 2 * avg(Fi)*avg(Fi-1) / (avg(Fi)^2+avg(Fi-1)^2)
- // larger changes from one frame to the next mean we care
- // less about consistency.
- //
- // term3 -> measures inconsistency in ssim scores between frames
- // 1 - ( 2 * ssim(Fi)*ssim(Fi-1)/(ssim(Fi)^2+sssim(Fi-1)^2).
- //
- // This term compares the ssim score for the same location in 2
- // subsequent frames.
- var_new = sv.sum_sq_s - sv.sum_s * sv.sum_s / 64;
- var_old = sv2[c].sum_sq_s - sv2[c].sum_s * sv2[c].sum_s / 64;
- mean_new = sv.sum_s;
- mean_old = sv2[c].sum_s;
- ssim_new = sv.ssim;
- ssim_old = sv2[c].ssim;
-
- if (do_inconsistency) {
- // We do the metric once for every 4x4 block in the image. Since
- // we are scaling the error to SSE for use in a psnr calculation
- // 1.0 = 4x4x255x255 the worst error we can possibly have.
- static const double kScaling = 4. * 4 * 255 * 255;
-
- // The constants have to be non 0 to avoid potential divide by 0
- // issues other than that they affect kind of a weighting between
- // the terms. No testing of what the right terms should be has been
- // done.
- static const double c1 = 1, c2 = 1, c3 = 1;
-
- // This measures how much consistent variance is in two consecutive
- // source frames. 1.0 means they have exactly the same variance.
- const double variance_term = (2.0 * var_old * var_new + c1) /
- (1.0 * var_old * var_old + 1.0 * var_new * var_new + c1);
-
- // This measures how consistent the local mean are between two
- // consecutive frames. 1.0 means they have exactly the same mean.
- const double mean_term = (2.0 * mean_old * mean_new + c2) /
- (1.0 * mean_old * mean_old + 1.0 * mean_new * mean_new + c2);
-
- // This measures how consistent the ssims of two
- // consecutive frames is. 1.0 means they are exactly the same.
- double ssim_term = pow((2.0 * ssim_old * ssim_new + c3) /
- (ssim_old * ssim_old + ssim_new * ssim_new + c3),
- 5);
-
- double this_inconsistency;
-
- // Floating point math sometimes makes this > 1 by a tiny bit.
- // We want the metric to scale between 0 and 1.0 so we can convert
- // it to an snr scaled value.
- if (ssim_term > 1)
- ssim_term = 1;
-
- // This converts the consistency metric to an inconsistency metric
- // ( so we can scale it like psnr to something like sum square error.
- // The reason for the variance and mean terms is the assumption that
- // if there are big changes in the source we shouldn't penalize
- // inconsistency in ssim scores a bit less as it will be less visible
- // to the user.
- this_inconsistency = (1 - ssim_term) * variance_term * mean_term;
-
- this_inconsistency *= kScaling;
- inconsistency_total += this_inconsistency;
- }
- sv2[c] = sv;
- ssim_total += ssim;
- ssim2_total += ssim2;
- dssim_total += dssim;
-
- old_ssim_total += ssim_old;
- }
- old_ssim_total += 0;
- }
-
- norm = 1. / (width / 4) / (height / 4);
- ssim_total *= norm;
- ssim2_total *= norm;
- m->ssim2 = ssim2_total;
- m->ssim = ssim_total;
- if (old_ssim_total == 0)
- inconsistency_total = 0;
-
- m->ssimc = inconsistency_total;
-
- m->dssim = dssim_total;
- return inconsistency_total;
-}
-
-
-#if CONFIG_VP9_HIGHBITDEPTH
-double vp9_highbd_calc_ssim(YV12_BUFFER_CONFIG *source,
- YV12_BUFFER_CONFIG *dest,
- double *weight, unsigned int bd) {
- double a, b, c;
- double ssimv;
-
- a = vp9_highbd_ssim2(source->y_buffer, dest->y_buffer,
- source->y_stride, dest->y_stride,
- source->y_crop_width, source->y_crop_height, bd);
-
- b = vp9_highbd_ssim2(source->u_buffer, dest->u_buffer,
- source->uv_stride, dest->uv_stride,
- source->uv_crop_width, source->uv_crop_height, bd);
-
- c = vp9_highbd_ssim2(source->v_buffer, dest->v_buffer,
- source->uv_stride, dest->uv_stride,
- source->uv_crop_width, source->uv_crop_height, bd);
-
- ssimv = a * .8 + .1 * (b + c);
-
- *weight = 1;
-
- return ssimv;
-}
-
-double vp9_highbd_calc_ssimg(YV12_BUFFER_CONFIG *source,
- YV12_BUFFER_CONFIG *dest, double *ssim_y,
- double *ssim_u, double *ssim_v, unsigned int bd) {
- double ssim_all = 0;
- double a, b, c;
-
- a = vp9_highbd_ssim2(source->y_buffer, dest->y_buffer,
- source->y_stride, dest->y_stride,
- source->y_crop_width, source->y_crop_height, bd);
-
- b = vp9_highbd_ssim2(source->u_buffer, dest->u_buffer,
- source->uv_stride, dest->uv_stride,
- source->uv_crop_width, source->uv_crop_height, bd);
-
- c = vp9_highbd_ssim2(source->v_buffer, dest->v_buffer,
- source->uv_stride, dest->uv_stride,
- source->uv_crop_width, source->uv_crop_height, bd);
- *ssim_y = a;
- *ssim_u = b;
- *ssim_v = c;
- ssim_all = (a * 4 + b + c) / 6;
-
- return ssim_all;
-}
-#endif // CONFIG_VP9_HIGHBITDEPTH
diff --git a/vp9/encoder/vp9_ssim.h b/vp9/encoder/vp9_ssim.h
deleted file mode 100644
index 10f14c4d2..000000000
--- a/vp9/encoder/vp9_ssim.h
+++ /dev/null
@@ -1,96 +0,0 @@
-/*
- * Copyright (c) 2014 The WebM project authors. All Rights Reserved.
- *
- * Use of this source code is governed by a BSD-style license
- * that can be found in the LICENSE file in the root of the source
- * tree. An additional intellectual property rights grant can be found
- * in the file PATENTS. All contributing project authors may
- * be found in the AUTHORS file in the root of the source tree.
- */
-
-#ifndef VP9_ENCODER_VP9_SSIM_H_
-#define VP9_ENCODER_VP9_SSIM_H_
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#include "vpx_scale/yv12config.h"
-
-// metrics used for calculating ssim, ssim2, dssim, and ssimc
-typedef struct {
- // source sum ( over 8x8 region )
- uint64_t sum_s;
-
- // reference sum (over 8x8 region )
- uint64_t sum_r;
-
- // source sum squared ( over 8x8 region )
- uint64_t sum_sq_s;
-
- // reference sum squared (over 8x8 region )
- uint64_t sum_sq_r;
-
- // sum of source times reference (over 8x8 region)
- uint64_t sum_sxr;
-
- // calculated ssim score between source and reference
- double ssim;
-} Ssimv;
-
-// metrics collected on a frame basis
-typedef struct {
- // ssim consistency error metric ( see code for explanation )
- double ssimc;
-
- // standard ssim
- double ssim;
-
- // revised ssim ( see code for explanation)
- double ssim2;
-
- // ssim restated as an error metric like sse
- double dssim;
-
- // dssim converted to decibels
- double dssimd;
-
- // ssimc converted to decibels
- double ssimcd;
-} Metrics;
-
-double vp9_get_ssim_metrics(uint8_t *img1, int img1_pitch, uint8_t *img2,
- int img2_pitch, int width, int height, Ssimv *sv2,
- Metrics *m, int do_inconsistency);
-
-double vp9_calc_ssim(YV12_BUFFER_CONFIG *source, YV12_BUFFER_CONFIG *dest,
- double *weight);
-
-double vp9_calc_ssimg(YV12_BUFFER_CONFIG *source, YV12_BUFFER_CONFIG *dest,
- double *ssim_y, double *ssim_u, double *ssim_v);
-
-double vp9_calc_fastssim(YV12_BUFFER_CONFIG *source, YV12_BUFFER_CONFIG *dest,
- double *ssim_y, double *ssim_u, double *ssim_v);
-
-double vp9_psnrhvs(YV12_BUFFER_CONFIG *source, YV12_BUFFER_CONFIG *dest,
- double *ssim_y, double *ssim_u, double *ssim_v);
-
-#if CONFIG_VP9_HIGHBITDEPTH
-double vp9_highbd_calc_ssim(YV12_BUFFER_CONFIG *source,
- YV12_BUFFER_CONFIG *dest,
- double *weight,
- unsigned int bd);
-
-double vp9_highbd_calc_ssimg(YV12_BUFFER_CONFIG *source,
- YV12_BUFFER_CONFIG *dest,
- double *ssim_y,
- double *ssim_u,
- double *ssim_v,
- unsigned int bd);
-#endif // CONFIG_VP9_HIGHBITDEPTH
-
-#ifdef __cplusplus
-} // extern "C"
-#endif
-
-#endif // VP9_ENCODER_VP9_SSIM_H_
diff --git a/vp9/encoder/vp9_temporal_filter.c b/vp9/encoder/vp9_temporal_filter.c
index 3475d58da..c4d66e021 100644
--- a/vp9/encoder/vp9_temporal_filter.c
+++ b/vp9/encoder/vp9_temporal_filter.c
@@ -15,7 +15,6 @@
#include "vp9/common/vp9_onyxc_int.h"
#include "vp9/common/vp9_quant_common.h"
#include "vp9/common/vp9_reconinter.h"
-#include "vp9/common/vp9_systemdependent.h"
#include "vp9/encoder/vp9_extend.h"
#include "vp9/encoder/vp9_firstpass.h"
#include "vp9/encoder/vp9_mcomp.h"
diff --git a/vp9/encoder/x86/vp9_dct_mmx.asm b/vp9/encoder/x86/vp9_dct_mmx.asm
index b41fbc8b3..7a7a6b655 100644
--- a/vp9/encoder/x86/vp9_dct_mmx.asm
+++ b/vp9/encoder/x86/vp9_dct_mmx.asm
@@ -7,6 +7,9 @@
; in the file PATENTS. All contributing project authors may
; be found in the AUTHORS file in the root of the source tree.
;
+
+%define private_prefix vp9
+
%include "third_party/x86inc/x86inc.asm"
SECTION .text
diff --git a/vp9/encoder/x86/vp9_dct_ssse3_x86_64.asm b/vp9/encoder/x86/vp9_dct_ssse3_x86_64.asm
index 29074e713..74c52df19 100644
--- a/vp9/encoder/x86/vp9_dct_ssse3_x86_64.asm
+++ b/vp9/encoder/x86/vp9_dct_ssse3_x86_64.asm
@@ -7,6 +7,9 @@
; in the file PATENTS. All contributing project authors may
; be found in the AUTHORS file in the root of the source tree.
;
+
+%define private_prefix vp9
+
%include "third_party/x86inc/x86inc.asm"
; This file provides SSSE3 version of the forward transformation. Part
diff --git a/vp9/encoder/x86/vp9_error_sse2.asm b/vp9/encoder/x86/vp9_error_sse2.asm
index 56373e897..5b0238272 100644
--- a/vp9/encoder/x86/vp9_error_sse2.asm
+++ b/vp9/encoder/x86/vp9_error_sse2.asm
@@ -8,6 +8,8 @@
; be found in the AUTHORS file in the root of the source tree.
;
+%define private_prefix vp9
+
%include "third_party/x86inc/x86inc.asm"
SECTION .text
diff --git a/vp9/encoder/x86/vp9_quantize_ssse3_x86_64.asm b/vp9/encoder/x86/vp9_quantize_ssse3_x86_64.asm
index ec2e87cb1..ec61c0c3a 100644
--- a/vp9/encoder/x86/vp9_quantize_ssse3_x86_64.asm
+++ b/vp9/encoder/x86/vp9_quantize_ssse3_x86_64.asm
@@ -8,6 +8,8 @@
; be found in the AUTHORS file in the root of the source tree.
;
+%define private_prefix vp9
+
%include "third_party/x86inc/x86inc.asm"
SECTION_RODATA
diff --git a/vp9/encoder/x86/vp9_ssim_opt_x86_64.asm b/vp9/encoder/x86/vp9_ssim_opt_x86_64.asm
deleted file mode 100644
index 455d10d2c..000000000
--- a/vp9/encoder/x86/vp9_ssim_opt_x86_64.asm
+++ /dev/null
@@ -1,216 +0,0 @@
-;
-; Copyright (c) 2010 The WebM project authors. All Rights Reserved.
-;
-; Use of this source code is governed by a BSD-style license
-; that can be found in the LICENSE file in the root of the source
-; tree. An additional intellectual property rights grant can be found
-; in the file PATENTS. All contributing project authors may
-; be found in the AUTHORS file in the root of the source tree.
-;
-
-%include "vpx_ports/x86_abi_support.asm"
-
-; tabulate_ssim - sums sum_s,sum_r,sum_sq_s,sum_sq_r, sum_sxr
-%macro TABULATE_SSIM 0
- paddusw xmm15, xmm3 ; sum_s
- paddusw xmm14, xmm4 ; sum_r
- movdqa xmm1, xmm3
- pmaddwd xmm1, xmm1
- paddd xmm13, xmm1 ; sum_sq_s
- movdqa xmm2, xmm4
- pmaddwd xmm2, xmm2
- paddd xmm12, xmm2 ; sum_sq_r
- pmaddwd xmm3, xmm4
- paddd xmm11, xmm3 ; sum_sxr
-%endmacro
-
-; Sum across the register %1 starting with q words
-%macro SUM_ACROSS_Q 1
- movdqa xmm2,%1
- punpckldq %1,xmm0
- punpckhdq xmm2,xmm0
- paddq %1,xmm2
- movdqa xmm2,%1
- punpcklqdq %1,xmm0
- punpckhqdq xmm2,xmm0
- paddq %1,xmm2
-%endmacro
-
-; Sum across the register %1 starting with q words
-%macro SUM_ACROSS_W 1
- movdqa xmm1, %1
- punpcklwd %1,xmm0
- punpckhwd xmm1,xmm0
- paddd %1, xmm1
- SUM_ACROSS_Q %1
-%endmacro
-;void ssim_parms_sse2(
-; unsigned char *s,
-; int sp,
-; unsigned char *r,
-; int rp
-; unsigned long *sum_s,
-; unsigned long *sum_r,
-; unsigned long *sum_sq_s,
-; unsigned long *sum_sq_r,
-; unsigned long *sum_sxr);
-;
-; TODO: Use parm passing through structure, probably don't need the pxors
-; ( calling app will initialize to 0 ) could easily fit everything in sse2
-; without too much hastle, and can probably do better estimates with psadw
-; or pavgb At this point this is just meant to be first pass for calculating
-; all the parms needed for 16x16 ssim so we can play with dssim as distortion
-; in mode selection code.
-global sym(vp9_ssim_parms_16x16_sse2) PRIVATE
-sym(vp9_ssim_parms_16x16_sse2):
- push rbp
- mov rbp, rsp
- SHADOW_ARGS_TO_STACK 9
- SAVE_XMM 15
- push rsi
- push rdi
- ; end prolog
-
- mov rsi, arg(0) ;s
- mov rcx, arg(1) ;sp
- mov rdi, arg(2) ;r
- mov rax, arg(3) ;rp
-
- pxor xmm0, xmm0
- pxor xmm15,xmm15 ;sum_s
- pxor xmm14,xmm14 ;sum_r
- pxor xmm13,xmm13 ;sum_sq_s
- pxor xmm12,xmm12 ;sum_sq_r
- pxor xmm11,xmm11 ;sum_sxr
-
- mov rdx, 16 ;row counter
-.NextRow:
-
- ;grab source and reference pixels
- movdqu xmm5, [rsi]
- movdqu xmm6, [rdi]
- movdqa xmm3, xmm5
- movdqa xmm4, xmm6
- punpckhbw xmm3, xmm0 ; high_s
- punpckhbw xmm4, xmm0 ; high_r
-
- TABULATE_SSIM
-
- movdqa xmm3, xmm5
- movdqa xmm4, xmm6
- punpcklbw xmm3, xmm0 ; low_s
- punpcklbw xmm4, xmm0 ; low_r
-
- TABULATE_SSIM
-
- add rsi, rcx ; next s row
- add rdi, rax ; next r row
-
- dec rdx ; counter
- jnz .NextRow
-
- SUM_ACROSS_W xmm15
- SUM_ACROSS_W xmm14
- SUM_ACROSS_Q xmm13
- SUM_ACROSS_Q xmm12
- SUM_ACROSS_Q xmm11
-
- mov rdi,arg(4)
- movd [rdi], xmm15;
- mov rdi,arg(5)
- movd [rdi], xmm14;
- mov rdi,arg(6)
- movd [rdi], xmm13;
- mov rdi,arg(7)
- movd [rdi], xmm12;
- mov rdi,arg(8)
- movd [rdi], xmm11;
-
- ; begin epilog
- pop rdi
- pop rsi
- RESTORE_XMM
- UNSHADOW_ARGS
- pop rbp
- ret
-
-;void ssim_parms_sse2(
-; unsigned char *s,
-; int sp,
-; unsigned char *r,
-; int rp
-; unsigned long *sum_s,
-; unsigned long *sum_r,
-; unsigned long *sum_sq_s,
-; unsigned long *sum_sq_r,
-; unsigned long *sum_sxr);
-;
-; TODO: Use parm passing through structure, probably don't need the pxors
-; ( calling app will initialize to 0 ) could easily fit everything in sse2
-; without too much hastle, and can probably do better estimates with psadw
-; or pavgb At this point this is just meant to be first pass for calculating
-; all the parms needed for 16x16 ssim so we can play with dssim as distortion
-; in mode selection code.
-global sym(vp9_ssim_parms_8x8_sse2) PRIVATE
-sym(vp9_ssim_parms_8x8_sse2):
- push rbp
- mov rbp, rsp
- SHADOW_ARGS_TO_STACK 9
- SAVE_XMM 15
- push rsi
- push rdi
- ; end prolog
-
- mov rsi, arg(0) ;s
- mov rcx, arg(1) ;sp
- mov rdi, arg(2) ;r
- mov rax, arg(3) ;rp
-
- pxor xmm0, xmm0
- pxor xmm15,xmm15 ;sum_s
- pxor xmm14,xmm14 ;sum_r
- pxor xmm13,xmm13 ;sum_sq_s
- pxor xmm12,xmm12 ;sum_sq_r
- pxor xmm11,xmm11 ;sum_sxr
-
- mov rdx, 8 ;row counter
-.NextRow:
-
- ;grab source and reference pixels
- movq xmm3, [rsi]
- movq xmm4, [rdi]
- punpcklbw xmm3, xmm0 ; low_s
- punpcklbw xmm4, xmm0 ; low_r
-
- TABULATE_SSIM
-
- add rsi, rcx ; next s row
- add rdi, rax ; next r row
-
- dec rdx ; counter
- jnz .NextRow
-
- SUM_ACROSS_W xmm15
- SUM_ACROSS_W xmm14
- SUM_ACROSS_Q xmm13
- SUM_ACROSS_Q xmm12
- SUM_ACROSS_Q xmm11
-
- mov rdi,arg(4)
- movd [rdi], xmm15;
- mov rdi,arg(5)
- movd [rdi], xmm14;
- mov rdi,arg(6)
- movd [rdi], xmm13;
- mov rdi,arg(7)
- movd [rdi], xmm12;
- mov rdi,arg(8)
- movd [rdi], xmm11;
-
- ; begin epilog
- pop rdi
- pop rsi
- RESTORE_XMM
- UNSHADOW_ARGS
- pop rbp
- ret
diff --git a/vp9/vp9_common.mk b/vp9/vp9_common.mk
index e95da4404..d0135c6f8 100644
--- a/vp9/vp9_common.mk
+++ b/vp9/vp9_common.mk
@@ -45,7 +45,6 @@ VP9_COMMON_SRCS-yes += common/vp9_scale.h
VP9_COMMON_SRCS-yes += common/vp9_scale.c
VP9_COMMON_SRCS-yes += common/vp9_seg_common.h
VP9_COMMON_SRCS-yes += common/vp9_seg_common.c
-VP9_COMMON_SRCS-yes += common/vp9_systemdependent.h
VP9_COMMON_SRCS-yes += common/vp9_textblit.h
VP9_COMMON_SRCS-yes += common/vp9_tile_common.h
VP9_COMMON_SRCS-yes += common/vp9_tile_common.c
@@ -71,16 +70,10 @@ VP9_COMMON_SRCS-$(HAVE_SSE2) += common/x86/vp9_mfqe_sse2.asm
VP9_COMMON_SRCS-$(HAVE_SSE2) += common/x86/vp9_postproc_sse2.asm
endif
-# common (c)
-VP9_COMMON_SRCS-$(HAVE_DSPR2) += common/mips/dspr2/vp9_common_dspr2.h
-VP9_COMMON_SRCS-$(HAVE_DSPR2) += common/mips/dspr2/vp9_common_dspr2.c
-
ifneq ($(CONFIG_VP9_HIGHBITDEPTH),yes)
VP9_COMMON_SRCS-$(HAVE_DSPR2) += common/mips/dspr2/vp9_itrans4_dspr2.c
VP9_COMMON_SRCS-$(HAVE_DSPR2) += common/mips/dspr2/vp9_itrans8_dspr2.c
VP9_COMMON_SRCS-$(HAVE_DSPR2) += common/mips/dspr2/vp9_itrans16_dspr2.c
-VP9_COMMON_SRCS-$(HAVE_DSPR2) += common/mips/dspr2/vp9_itrans32_cols_dspr2.c
-VP9_COMMON_SRCS-$(HAVE_DSPR2) += common/mips/dspr2/vp9_itrans32_dspr2.c
endif
# common (msa)
@@ -94,8 +87,6 @@ endif
VP9_COMMON_SRCS-$(HAVE_SSE2) += common/x86/vp9_idct_intrin_sse2.c
-VP9_COMMON_SRCS-$(HAVE_NEON_ASM) += common/arm/neon/vp9_save_reg_neon$(ASM)
-
ifneq ($(CONFIG_VP9_HIGHBITDEPTH),yes)
VP9_COMMON_SRCS-$(HAVE_NEON) += common/arm/neon/vp9_iht4x4_add_neon.c
VP9_COMMON_SRCS-$(HAVE_NEON) += common/arm/neon/vp9_iht8x8_add_neon.c
diff --git a/vp9/vp9cx.mk b/vp9/vp9cx.mk
index 186ce11f0..84b12d78e 100644
--- a/vp9/vp9cx.mk
+++ b/vp9/vp9cx.mk
@@ -33,7 +33,6 @@ VP9_CX_SRCS-yes += encoder/vp9_encodemv.c
VP9_CX_SRCS-yes += encoder/vp9_ethread.h
VP9_CX_SRCS-yes += encoder/vp9_ethread.c
VP9_CX_SRCS-yes += encoder/vp9_extend.c
-VP9_CX_SRCS-$(CONFIG_INTERNAL_STATS) += encoder/vp9_fastssim.c
VP9_CX_SRCS-yes += encoder/vp9_firstpass.c
VP9_CX_SRCS-yes += encoder/vp9_block.h
VP9_CX_SRCS-yes += encoder/vp9_bitstream.h
@@ -57,7 +56,6 @@ VP9_CX_SRCS-yes += encoder/vp9_mcomp.c
VP9_CX_SRCS-yes += encoder/vp9_encoder.c
VP9_CX_SRCS-yes += encoder/vp9_picklpf.c
VP9_CX_SRCS-yes += encoder/vp9_picklpf.h
-VP9_CX_SRCS-$(CONFIG_INTERNAL_STATS) += encoder/vp9_psnrhvs.c
VP9_CX_SRCS-yes += encoder/vp9_quantize.c
VP9_CX_SRCS-yes += encoder/vp9_ratectrl.c
VP9_CX_SRCS-yes += encoder/vp9_rd.c
@@ -72,8 +70,6 @@ VP9_CX_SRCS-yes += encoder/vp9_subexp.h
VP9_CX_SRCS-yes += encoder/vp9_svc_layercontext.c
VP9_CX_SRCS-yes += encoder/vp9_resize.c
VP9_CX_SRCS-yes += encoder/vp9_resize.h
-VP9_CX_SRCS-$(CONFIG_INTERNAL_STATS) += encoder/vp9_ssim.c
-VP9_CX_SRCS-$(CONFIG_INTERNAL_STATS) += encoder/vp9_ssim.h
VP9_CX_SRCS-$(CONFIG_INTERNAL_STATS) += encoder/vp9_blockiness.c
VP9_CX_SRCS-yes += encoder/vp9_tokenize.c
@@ -113,7 +109,6 @@ VP9_CX_SRCS-$(HAVE_SSSE3) += encoder/x86/vp9_quantize_ssse3_x86_64.asm
VP9_CX_SRCS-$(HAVE_SSSE3) += encoder/x86/vp9_dct_ssse3_x86_64.asm
endif
endif
-VP9_CX_SRCS-$(ARCH_X86_64) += encoder/x86/vp9_ssim_opt_x86_64.asm
VP9_CX_SRCS-$(HAVE_SSE2) += encoder/x86/vp9_dct_sse2.c
VP9_CX_SRCS-$(HAVE_SSSE3) += encoder/x86/vp9_dct_ssse3.c