summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--test/dct_test.cc11
-rw-r--r--vp9/common/ppc/vp9_idct_vsx.c47
-rw-r--r--vp9/common/vp9_rtcd_defs.pl2
-rw-r--r--vp9/vp9_common.mk1
-rw-r--r--vpx_dsp/ppc/inv_txfm_vsx.c120
-rw-r--r--vpx_dsp/ppc/inv_txfm_vsx.h6
6 files changed, 161 insertions, 26 deletions
diff --git a/test/dct_test.cc b/test/dct_test.cc
index e8ad0cd5d..0d13e6b14 100644
--- a/test/dct_test.cc
+++ b/test/dct_test.cc
@@ -683,6 +683,17 @@ INSTANTIATE_TEST_CASE_P(
VPX_BITS_12)));
#endif // HAVE_SSE4_1 && CONFIG_VP9_HIGHBITDEPTH
+#if HAVE_VSX && !CONFIG_EMULATE_HARDWARE && !CONFIG_VP9_HIGHBITDEPTH
+static const FuncInfo ht_vsx_func_info[1] = {
+ { &vp9_fht4x4_c, &iht_wrapper<vp9_iht4x4_16_add_vsx>, 4, 1 }
+};
+
+INSTANTIATE_TEST_CASE_P(VSX, TransHT,
+ ::testing::Combine(::testing::Range(0, 1),
+ ::testing::Values(ht_vsx_func_info),
+ ::testing::Range(0, 4),
+ ::testing::Values(VPX_BITS_8)));
+#endif // HAVE_VSX
#endif // !CONFIG_EMULATE_HARDWARE
/* -------------------------------------------------------------------------- */
diff --git a/vp9/common/ppc/vp9_idct_vsx.c b/vp9/common/ppc/vp9_idct_vsx.c
new file mode 100644
index 000000000..a7b66e7fd
--- /dev/null
+++ b/vp9/common/ppc/vp9_idct_vsx.c
@@ -0,0 +1,47 @@
+/*
+ * Copyright (c) 2018 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <assert.h>
+
+#include "vpx_dsp/vpx_dsp_common.h"
+#include "vpx_dsp/ppc/inv_txfm_vsx.h"
+#include "vpx_dsp/ppc/bitdepth_conversion_vsx.h"
+
+#include "vp9/common/vp9_enums.h"
+
+void vp9_iht4x4_16_add_vsx(const tran_low_t *input, uint8_t *dest, int stride,
+ int tx_type) {
+ int16x8_t in[2], out[2];
+
+ in[0] = load_tran_low(0, input);
+ in[1] = load_tran_low(8 * sizeof(*input), input);
+
+ switch (tx_type) {
+ case DCT_DCT:
+ vpx_idct4_vsx(in, out);
+ vpx_idct4_vsx(out, in);
+ break;
+ case ADST_DCT:
+ vpx_idct4_vsx(in, out);
+ vp9_iadst4_vsx(out, in);
+ break;
+ case DCT_ADST:
+ vp9_iadst4_vsx(in, out);
+ vpx_idct4_vsx(out, in);
+ break;
+ default:
+ assert(tx_type == ADST_ADST);
+ vp9_iadst4_vsx(in, out);
+ vp9_iadst4_vsx(out, in);
+ break;
+ }
+
+ vpx_round_store4x4_vsx(in, out, dest, stride);
+}
diff --git a/vp9/common/vp9_rtcd_defs.pl b/vp9/common/vp9_rtcd_defs.pl
index 20c9ed641..c5e99764a 100644
--- a/vp9/common/vp9_rtcd_defs.pl
+++ b/vp9/common/vp9_rtcd_defs.pl
@@ -67,7 +67,7 @@ add_proto qw/void vp9_iht16x16_256_add/, "const tran_low_t *input, uint8_t *outp
if (vpx_config("CONFIG_EMULATE_HARDWARE") ne "yes") {
# Note that there are more specializations appended when
# CONFIG_VP9_HIGHBITDEPTH is off.
- specialize qw/vp9_iht4x4_16_add neon sse2/;
+ specialize qw/vp9_iht4x4_16_add neon sse2 vsx/;
specialize qw/vp9_iht8x8_64_add neon sse2/;
specialize qw/vp9_iht16x16_256_add neon sse2/;
if (vpx_config("CONFIG_VP9_HIGHBITDEPTH") ne "yes") {
diff --git a/vp9/vp9_common.mk b/vp9/vp9_common.mk
index d40d3c445..7ca4004b0 100644
--- a/vp9/vp9_common.mk
+++ b/vp9/vp9_common.mk
@@ -68,6 +68,7 @@ VP9_COMMON_SRCS-$(HAVE_MSA) += common/mips/msa/vp9_idct4x4_msa.c
VP9_COMMON_SRCS-$(HAVE_MSA) += common/mips/msa/vp9_idct8x8_msa.c
VP9_COMMON_SRCS-$(HAVE_MSA) += common/mips/msa/vp9_idct16x16_msa.c
VP9_COMMON_SRCS-$(HAVE_SSE2) += common/x86/vp9_idct_intrin_sse2.c
+VP9_COMMON_SRCS-$(HAVE_VSX) += common/ppc/vp9_idct_vsx.c
VP9_COMMON_SRCS-$(HAVE_NEON) += common/arm/neon/vp9_iht4x4_add_neon.c
VP9_COMMON_SRCS-$(HAVE_NEON) += common/arm/neon/vp9_iht8x8_add_neon.c
VP9_COMMON_SRCS-$(HAVE_NEON) += common/arm/neon/vp9_iht16x16_add_neon.c
diff --git a/vpx_dsp/ppc/inv_txfm_vsx.c b/vpx_dsp/ppc/inv_txfm_vsx.c
index 6603b85ac..89b5d87bf 100644
--- a/vpx_dsp/ppc/inv_txfm_vsx.c
+++ b/vpx_dsp/ppc/inv_txfm_vsx.c
@@ -14,6 +14,7 @@
#include "vpx_dsp/ppc/bitdepth_conversion_vsx.h"
#include "vpx_dsp/ppc/types_vsx.h"
+#include "vpx_dsp/ppc/inv_txfm_vsx.h"
#include "./vpx_dsp_rtcd.h"
#include "vpx_dsp/inv_txfm.h"
@@ -76,8 +77,20 @@ static int16x8_t cospi29_v = { 2404, 2404, 2404, 2404, 2404, 2404, 2404, 2404 };
static int16x8_t cospi30_v = { 1606, 1606, 1606, 1606, 1606, 1606, 1606, 1606 };
static int16x8_t cospi31_v = { 804, 804, 804, 804, 804, 804, 804, 804 };
-static uint8x16_t mask1 = { 0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7,
- 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17 };
+static const int16x8_t sinpi_1_9_v = { 5283, 5283, 5283, 5283,
+ 5283, 5283, 5283, 5283 };
+static const int16x8_t sinpi_2_9_v = { 9929, 9929, 9929, 9929,
+ 9929, 9929, 9929, 9929 };
+static const int16x8_t sinpi_3_9_v = { 13377, 13377, 13377, 13377,
+ 13377, 13377, 13377, 13377 };
+static const int16x8_t sinpi_4_9_v = { 15212, 15212, 15212, 15212,
+ 15212, 15212, 15212, 15212 };
+
+static const uint8x16_t mask1 = {
+ 0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7,
+ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17
+};
+
#define ROUND_SHIFT_INIT \
const int32x4_t shift = vec_sl(vec_splat_s32(1), vec_splat_u32(13)); \
const uint32x4_t shift14 = vec_splat_u32(14);
@@ -118,17 +131,9 @@ static uint8x16_t mask1 = { 0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7,
for (i = 0; i < 4; i++) \
for (j = 0; j < 4; j++) dest[j * stride + i] = tmp_dest[j * 4 + i];
-void vpx_idct4x4_16_add_vsx(const tran_low_t *input, uint8_t *dest,
+void vpx_round_store4x4_vsx(int16x8_t *in, int16x8_t *out, uint8_t *dest,
int stride) {
int i, j;
- int32x4_t temp1, temp2, temp3, temp4;
- int16x8_t step0, step1, tmp16_0, tmp16_1, t_out0, t_out1;
- uint8x16_t mask0 = { 0x8, 0x9, 0xA, 0xB, 0xC, 0xD, 0xE, 0xF,
- 0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7 };
- int16x8_t v0 = load_tran_low(0, input);
- int16x8_t v1 = load_tran_low(8 * sizeof(*input), input);
- int16x8_t t0 = vec_mergeh(v0, v1);
- int16x8_t t1 = vec_mergel(v0, v1);
uint8x16_t dest0 = vec_vsx_ld(0, dest);
uint8x16_t dest1 = vec_vsx_ld(stride, dest);
uint8x16_t dest2 = vec_vsx_ld(2 * stride, dest);
@@ -138,27 +143,45 @@ void vpx_idct4x4_16_add_vsx(const tran_low_t *input, uint8_t *dest,
int16x8_t d_u1 = (int16x8_t)vec_mergeh(dest1, zerov);
int16x8_t d_u2 = (int16x8_t)vec_mergeh(dest2, zerov);
int16x8_t d_u3 = (int16x8_t)vec_mergeh(dest3, zerov);
-
+ int16x8_t tmp16_0, tmp16_1;
uint8x16_t output_v;
uint8_t tmp_dest[16];
- ROUND_SHIFT_INIT
PIXEL_ADD_INIT;
- v0 = vec_mergeh(t0, t1);
- v1 = vec_mergel(t0, t1);
+ PIXEL_ADD4(out[0], in[0]);
+ PIXEL_ADD4(out[1], in[1]);
- IDCT4(v0, v1, t_out0, t_out1);
- // transpose
- t0 = vec_mergeh(t_out0, t_out1);
- t1 = vec_mergel(t_out0, t_out1);
- v0 = vec_mergeh(t0, t1);
- v1 = vec_mergel(t0, t1);
- IDCT4(v0, v1, t_out0, t_out1);
+ PACK_STORE(out[0], out[1]);
+}
- PIXEL_ADD4(v0, t_out0);
- PIXEL_ADD4(v1, t_out1);
+void vpx_idct4_vsx(int16x8_t *in, int16x8_t *out) {
+ int32x4_t temp1, temp2, temp3, temp4;
+ int16x8_t step0, step1, tmp16_0;
+ uint8x16_t mask0 = { 0x8, 0x9, 0xA, 0xB, 0xC, 0xD, 0xE, 0xF,
+ 0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7 };
+ int16x8_t t0 = vec_mergeh(in[0], in[1]);
+ int16x8_t t1 = vec_mergel(in[0], in[1]);
+ ROUND_SHIFT_INIT
- PACK_STORE(v0, v1);
+ in[0] = vec_mergeh(t0, t1);
+ in[1] = vec_mergel(t0, t1);
+
+ IDCT4(in[0], in[1], out[0], out[1]);
+}
+
+void vpx_idct4x4_16_add_vsx(const tran_low_t *input, uint8_t *dest,
+ int stride) {
+ int16x8_t in[2], out[2];
+
+ in[0] = load_tran_low(0, input);
+ in[1] = load_tran_low(8 * sizeof(*input), input);
+ // Rows
+ vpx_idct4_vsx(in, out);
+
+ // Columns
+ vpx_idct4_vsx(out, in);
+
+ vpx_round_store4x4_vsx(in, out, dest, stride);
}
#define TRANSPOSE8x8(in0, in1, in2, in3, in4, in5, in6, in7, out0, out1, out2, \
@@ -1130,3 +1153,50 @@ void vpx_iwht4x4_16_add_vsx(const tran_low_t *input, uint8_t *dest,
PACK_STORE(v_a, v_c);
}
+
+void vp9_iadst4_vsx(int16x8_t *in, int16x8_t *out) {
+ int16x8_t sinpi_1_3_v, sinpi_4_2_v, sinpi_2_3_v, sinpi_1_4_v, sinpi_12_n3_v;
+ int32x4_t v_v[5], u_v[4];
+ int32x4_t zerov = vec_splat_s32(0);
+ int16x8_t tmp0, tmp1;
+ int16x8_t zero16v = vec_splat_s16(0);
+ uint32x4_t shift16 = vec_sl(vec_splat_u32(8), vec_splat_u32(1));
+ ROUND_SHIFT_INIT;
+
+ sinpi_1_3_v = vec_mergel(sinpi_1_9_v, sinpi_3_9_v);
+ sinpi_4_2_v = vec_mergel(sinpi_4_9_v, sinpi_2_9_v);
+ sinpi_2_3_v = vec_mergel(sinpi_2_9_v, sinpi_3_9_v);
+ sinpi_1_4_v = vec_mergel(sinpi_1_9_v, sinpi_4_9_v);
+ sinpi_12_n3_v = vec_mergel(vec_add(sinpi_1_9_v, sinpi_2_9_v),
+ vec_sub(zero16v, sinpi_3_9_v));
+
+ tmp0 = (int16x8_t)vec_mergeh((int32x4_t)in[0], (int32x4_t)in[1]);
+ tmp1 = (int16x8_t)vec_mergel((int32x4_t)in[0], (int32x4_t)in[1]);
+ in[0] = (int16x8_t)vec_mergeh((int32x4_t)tmp0, (int32x4_t)tmp1);
+ in[1] = (int16x8_t)vec_mergel((int32x4_t)tmp0, (int32x4_t)tmp1);
+
+ v_v[0] = vec_msum(in[0], sinpi_1_3_v, zerov);
+ v_v[1] = vec_msum(in[1], sinpi_4_2_v, zerov);
+ v_v[2] = vec_msum(in[0], sinpi_2_3_v, zerov);
+ v_v[3] = vec_msum(in[1], sinpi_1_4_v, zerov);
+ v_v[4] = vec_msum(in[0], sinpi_12_n3_v, zerov);
+
+ in[0] = vec_sub(in[0], in[1]);
+ in[1] = (int16x8_t)vec_sra((int32x4_t)in[1], shift16);
+ in[0] = vec_add(in[0], in[1]);
+ in[0] = (int16x8_t)vec_sl((int32x4_t)in[0], shift16);
+
+ u_v[0] = vec_add(v_v[0], v_v[1]);
+ u_v[1] = vec_sub(v_v[2], v_v[3]);
+ u_v[2] = vec_msum(in[0], sinpi_1_3_v, zerov);
+ u_v[3] = vec_sub(v_v[1], v_v[3]);
+ u_v[3] = vec_add(u_v[3], v_v[4]);
+
+ DCT_CONST_ROUND_SHIFT(u_v[0]);
+ DCT_CONST_ROUND_SHIFT(u_v[1]);
+ DCT_CONST_ROUND_SHIFT(u_v[2]);
+ DCT_CONST_ROUND_SHIFT(u_v[3]);
+
+ out[0] = vec_packs(u_v[0], u_v[1]);
+ out[1] = vec_packs(u_v[2], u_v[3]);
+}
diff --git a/vpx_dsp/ppc/inv_txfm_vsx.h b/vpx_dsp/ppc/inv_txfm_vsx.h
new file mode 100644
index 000000000..1b8c71d14
--- /dev/null
+++ b/vpx_dsp/ppc/inv_txfm_vsx.h
@@ -0,0 +1,6 @@
+#include "vpx_dsp/ppc/types_vsx.h"
+
+void vpx_round_store4x4_vsx(int16x8_t *in, int16x8_t *out, uint8_t *dest,
+ int stride);
+void vpx_idct4_vsx(int16x8_t *in, int16x8_t *out);
+void vp9_iadst4_vsx(int16x8_t *in, int16x8_t *out);