summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJames Zern <jzern@google.com>2017-05-12 15:24:59 +0000
committerGerrit Code Review <noreply-gerritcodereview@google.com>2017-05-12 15:24:59 +0000
commitac8f58f6ab73fea7948f40eaf608f832964d8a58 (patch)
tree5c46337efc9ceb0eed71c6c35acd4b6af19144bb
parentf48532e271c89144a98b931b3cf5bb721b936a7f (diff)
parent143b21e362efadf432352e57666d8d9853492d88 (diff)
downloadlibvpx-ac8f58f6ab73fea7948f40eaf608f832964d8a58.tar
libvpx-ac8f58f6ab73fea7948f40eaf608f832964d8a58.tar.gz
libvpx-ac8f58f6ab73fea7948f40eaf608f832964d8a58.tar.bz2
libvpx-ac8f58f6ab73fea7948f40eaf608f832964d8a58.zip
Merge changes I1b54a7a5,I3028bdad,I59788cd9
* changes: ppc: Add get_mb_ss_vsx ppc: Add get4x4sse_cs_vsx ppc: Add comp_avg_pred_vsx
-rw-r--r--test/comp_avg_pred_test.cc4
-rw-r--r--test/variance_test.cc9
-rw-r--r--vpx_dsp/ppc/variance_vsx.c103
-rw-r--r--vpx_dsp/vpx_dsp.mk1
-rw-r--r--vpx_dsp/vpx_dsp_rtcd_defs.pl6
5 files changed, 120 insertions, 3 deletions
diff --git a/test/comp_avg_pred_test.cc b/test/comp_avg_pred_test.cc
index c3ad28e2b..3feba7127 100644
--- a/test/comp_avg_pred_test.cc
+++ b/test/comp_avg_pred_test.cc
@@ -156,4 +156,8 @@ INSTANTIATE_TEST_CASE_P(C, AvgPredTest,
INSTANTIATE_TEST_CASE_P(SSE2, AvgPredTest,
::testing::Values(&vpx_comp_avg_pred_sse2));
#endif // HAVE_SSE2
+#if HAVE_VSX
+INSTANTIATE_TEST_CASE_P(VSX, AvgPredTest,
+ ::testing::Values(&vpx_comp_avg_pred_vsx));
+#endif // HAVE_VSX
} // namespace
diff --git a/test/variance_test.cc b/test/variance_test.cc
index 57d57c972..4fc5cf5d6 100644
--- a/test/variance_test.cc
+++ b/test/variance_test.cc
@@ -1338,4 +1338,13 @@ INSTANTIATE_TEST_CASE_P(
make_tuple(2, 3, &vpx_sub_pixel_avg_variance4x8_msa, 0),
make_tuple(2, 2, &vpx_sub_pixel_avg_variance4x4_msa, 0)));
#endif // HAVE_MSA
+
+#if HAVE_VSX
+INSTANTIATE_TEST_CASE_P(VSX, SumOfSquaresTest,
+ ::testing::Values(vpx_get_mb_ss_vsx));
+
+INSTANTIATE_TEST_CASE_P(VSX, VpxSseTest,
+ ::testing::Values(SseParams(2, 2,
+ &vpx_get4x4sse_cs_vsx)));
+#endif // HAVE_VSX
} // namespace
diff --git a/vpx_dsp/ppc/variance_vsx.c b/vpx_dsp/ppc/variance_vsx.c
new file mode 100644
index 000000000..1efe2f005
--- /dev/null
+++ b/vpx_dsp/ppc/variance_vsx.c
@@ -0,0 +1,103 @@
+/*
+ * Copyright (c) 2017 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <assert.h>
+
+#include "./vpx_dsp_rtcd.h"
+#include "vpx_dsp/ppc/types_vsx.h"
+
+static inline uint8x16_t read4x2(const uint8_t *a, int stride) {
+ const uint32x4_t a0 = (uint32x4_t)vec_vsx_ld(0, a);
+ const uint32x4_t a1 = (uint32x4_t)vec_vsx_ld(0, a + stride);
+
+ return (uint8x16_t)vec_mergeh(a0, a1);
+}
+
+uint32_t vpx_get4x4sse_cs_vsx(const uint8_t *a, int a_stride, const uint8_t *b,
+ int b_stride) {
+ int distortion;
+
+ const int16x8_t a0 = unpack_to_s16_h(read4x2(a, a_stride));
+ const int16x8_t a1 = unpack_to_s16_h(read4x2(a + a_stride * 2, a_stride));
+ const int16x8_t b0 = unpack_to_s16_h(read4x2(b, b_stride));
+ const int16x8_t b1 = unpack_to_s16_h(read4x2(b + b_stride * 2, b_stride));
+ const int16x8_t d0 = vec_sub(a0, b0);
+ const int16x8_t d1 = vec_sub(a1, b1);
+ const int32x4_t ds = vec_msum(d1, d1, vec_msum(d0, d0, vec_splat_s32(0)));
+ const int32x4_t d = vec_splat(vec_sums(ds, vec_splat_s32(0)), 3);
+
+ vec_ste(d, 0, &distortion);
+
+ return distortion;
+}
+
+// TODO(lu_zero): Unroll
+uint32_t vpx_get_mb_ss_vsx(const int16_t *a) {
+ unsigned int i, sum = 0;
+ int32x4_t s = vec_splat_s32(0);
+
+ for (i = 0; i < 256; i += 8) {
+ const int16x8_t v = vec_vsx_ld(0, a + i);
+ s = vec_msum(v, v, s);
+ }
+
+ s = vec_splat(vec_sums(s, vec_splat_s32(0)), 3);
+
+ vec_ste((uint32x4_t)s, 0, &sum);
+
+ return sum;
+}
+
+void vpx_comp_avg_pred_vsx(uint8_t *comp_pred, const uint8_t *pred, int width,
+ int height, const uint8_t *ref, int ref_stride) {
+ int i, j;
+ /* comp_pred and pred must be 16 byte aligned. */
+ assert(((intptr_t)comp_pred & 0xf) == 0);
+ assert(((intptr_t)pred & 0xf) == 0);
+ if (width >= 16) {
+ for (i = 0; i < height; ++i) {
+ for (j = 0; j < width; j += 16) {
+ const uint8x16_t v = vec_avg(vec_vsx_ld(j, pred), vec_vsx_ld(j, ref));
+ vec_vsx_st(v, j, comp_pred);
+ }
+ comp_pred += width;
+ pred += width;
+ ref += ref_stride;
+ }
+ } else if (width == 8) {
+ // Process 2 lines at time
+ for (i = 0; i < height / 2; ++i) {
+ const uint8x16_t r0 = vec_vsx_ld(0, ref);
+ const uint8x16_t r1 = vec_vsx_ld(0, ref + ref_stride);
+ const uint8x16_t r = xxpermdi(r0, r1, 0);
+ const uint8x16_t v = vec_avg(vec_vsx_ld(0, pred), r);
+ vec_vsx_st(v, 0, comp_pred);
+ comp_pred += 16; // width * 2;
+ pred += 16; // width * 2;
+ ref += ref_stride * 2;
+ }
+ } else {
+ assert(width == 4);
+ // process 4 lines at time
+ for (i = 0; i < height / 4; ++i) {
+ const uint32x4_t r0 = (uint32x4_t)vec_vsx_ld(0, ref);
+ const uint32x4_t r1 = (uint32x4_t)vec_vsx_ld(0, ref + ref_stride);
+ const uint32x4_t r2 = (uint32x4_t)vec_vsx_ld(0, ref + ref_stride * 2);
+ const uint32x4_t r3 = (uint32x4_t)vec_vsx_ld(0, ref + ref_stride * 3);
+ const uint8x16_t r =
+ (uint8x16_t)xxpermdi(vec_mergeh(r0, r1), vec_mergeh(r2, r3), 0);
+ const uint8x16_t v = vec_avg(vec_vsx_ld(0, pred), r);
+ vec_vsx_st(v, 0, comp_pred);
+ comp_pred += 16; // width * 4;
+ pred += 16; // width * 4;
+ ref += ref_stride * 4;
+ }
+ }
+}
diff --git a/vpx_dsp/vpx_dsp.mk b/vpx_dsp/vpx_dsp.mk
index 976db2b65..8d1ecbe8c 100644
--- a/vpx_dsp/vpx_dsp.mk
+++ b/vpx_dsp/vpx_dsp.mk
@@ -335,6 +335,7 @@ DSP_SRCS-$(HAVE_SSE2) += x86/avg_pred_sse2.c
DSP_SRCS-$(HAVE_SSE2) += x86/variance_sse2.c # Contains SSE2 and SSSE3
DSP_SRCS-$(HAVE_AVX2) += x86/variance_avx2.c
DSP_SRCS-$(HAVE_AVX2) += x86/variance_impl_avx2.c
+DSP_SRCS-$(HAVE_VSX) += ppc/variance_vsx.c
ifeq ($(ARCH_X86_64),yes)
DSP_SRCS-$(HAVE_SSE2) += x86/ssim_opt_x86_64.asm
diff --git a/vpx_dsp/vpx_dsp_rtcd_defs.pl b/vpx_dsp/vpx_dsp_rtcd_defs.pl
index 3e95e4473..5acbf2d68 100644
--- a/vpx_dsp/vpx_dsp_rtcd_defs.pl
+++ b/vpx_dsp/vpx_dsp_rtcd_defs.pl
@@ -1171,13 +1171,13 @@ add_proto qw/unsigned int vpx_mse8x8/, "const uint8_t *src_ptr, int source_stri
specialize qw/vpx_mse8x8 sse2 msa/;
add_proto qw/unsigned int vpx_get_mb_ss/, "const int16_t *";
- specialize qw/vpx_get_mb_ss sse2 msa/;
+ specialize qw/vpx_get_mb_ss sse2 msa vsx/;
add_proto qw/unsigned int vpx_get4x4sse_cs/, "const unsigned char *src_ptr, int source_stride, const unsigned char *ref_ptr, int ref_stride";
- specialize qw/vpx_get4x4sse_cs neon msa/;
+ specialize qw/vpx_get4x4sse_cs neon msa vsx/;
add_proto qw/void vpx_comp_avg_pred/, "uint8_t *comp_pred, const uint8_t *pred, int width, int height, const uint8_t *ref, int ref_stride";
- specialize qw/vpx_comp_avg_pred sse2/;
+ specialize qw/vpx_comp_avg_pred sse2 vsx/;
#
# Subpixel Variance