summaryrefslogtreecommitdiff
path: root/vpx_dsp
diff options
context:
space:
mode:
authoryuanhecai <yuanhecai@loongson.cn>2022-03-01 09:48:13 +0800
committeryuanhecai <yuanhecai@loongson.cn>2022-03-03 20:37:26 +0800
commit624b1367004801639ed35759d5f1759a092c8410 (patch)
treeb0c82d005d505ef4711d5c1d3e07ccea26acf221 /vpx_dsp
parent3b21aeac8b7d5a52b6360d878cb4df593e87113e (diff)
downloadlibvpx-624b1367004801639ed35759d5f1759a092c8410.tar
libvpx-624b1367004801639ed35759d5f1759a092c8410.tar.gz
libvpx-624b1367004801639ed35759d5f1759a092c8410.tar.bz2
libvpx-624b1367004801639ed35759d5f1759a092c8410.zip
vp9[loongarch]: Optimize horizontal/vertical_8_c
1. vpx_lpf_vertical_8_lsx 2. vpx_lpf_horizontal_8_lsx Bug: webm:1755 Change-Id: I6b05d6b1b2ac4d2a75beb9c9ca9700976fc3af55
Diffstat (limited to 'vpx_dsp')
-rw-r--r--vpx_dsp/loongarch/loopfilter_8_lsx.c199
-rw-r--r--vpx_dsp/vpx_dsp.mk2
-rw-r--r--vpx_dsp/vpx_dsp_rtcd_defs.pl4
3 files changed, 203 insertions, 2 deletions
diff --git a/vpx_dsp/loongarch/loopfilter_8_lsx.c b/vpx_dsp/loongarch/loopfilter_8_lsx.c
new file mode 100644
index 000000000..facf6f30e
--- /dev/null
+++ b/vpx_dsp/loongarch/loopfilter_8_lsx.c
@@ -0,0 +1,199 @@
+/*
+ * Copyright (c) 2022 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "./vpx_dsp_rtcd.h"
+#include "vpx_dsp/loongarch/loopfilter_lsx.h"
+
+void vpx_lpf_horizontal_8_lsx(uint8_t *dst, int32_t stride,
+ const uint8_t *b_limit_ptr,
+ const uint8_t *limit_ptr,
+ const uint8_t *thresh_ptr) {
+ __m128i mask, hev, flat, thresh, b_limit, limit;
+ __m128i p3, p2, p1, p0, q3, q2, q1, q0;
+ __m128i p2_out, p1_out, p0_out, q0_out, q1_out, q2_out;
+ __m128i p2_filter8, p1_filter8, p0_filter8;
+ __m128i q0_filter8, q1_filter8, q2_filter8;
+ __m128i p3_l, p2_l, p1_l, p0_l, q3_l, q2_l, q1_l, q0_l;
+ __m128i zero = __lsx_vldi(0);
+
+ int32_t stride2 = stride << 1;
+ int32_t stride3 = stride2 + stride;
+ int32_t stride4 = stride2 << 1;
+
+ /* load vector elements */
+ DUP4_ARG2(__lsx_vldx, dst, -stride4, dst, -stride3, dst, -stride2, dst,
+ -stride, p3, p2, p1, p0);
+ q0 = __lsx_vld(dst, 0);
+ DUP2_ARG2(__lsx_vldx, dst, stride, dst, stride2, q1, q2);
+ q3 = __lsx_vldx(dst, stride3);
+
+ thresh = __lsx_vreplgr2vr_b(*thresh_ptr);
+ b_limit = __lsx_vreplgr2vr_b(*b_limit_ptr);
+ limit = __lsx_vreplgr2vr_b(*limit_ptr);
+
+ LPF_MASK_HEV(p3, p2, p1, p0, q0, q1, q2, q3, limit, b_limit, thresh, hev,
+ mask, flat);
+ VP9_FLAT4(p3, p2, p0, q0, q2, q3, flat);
+ VP9_LPF_FILTER4_4W(p1, p0, q0, q1, mask, hev, p1_out, p0_out, q0_out, q1_out);
+
+ flat = __lsx_vilvl_d(zero, flat);
+
+ if (__lsx_bz_v(flat)) {
+ __lsx_vstelm_d(p1_out, dst - stride2, 0, 0);
+ __lsx_vstelm_d(p0_out, dst - stride, 0, 0);
+ __lsx_vstelm_d(q0_out, dst, 0, 0);
+ __lsx_vstelm_d(q1_out, dst + stride, 0, 0);
+ } else {
+ DUP4_ARG2(__lsx_vilvl_b, zero, p3, zero, p2, zero, p1, zero, p0, p3_l, p2_l,
+ p1_l, p0_l);
+ DUP4_ARG2(__lsx_vilvl_b, zero, q0, zero, q1, zero, q2, zero, q3, q0_l, q1_l,
+ q2_l, q3_l);
+ VP9_FILTER8(p3_l, p2_l, p1_l, p0_l, q0_l, q1_l, q2_l, q3_l, p2_filter8,
+ p1_filter8, p0_filter8, q0_filter8, q1_filter8, q2_filter8);
+
+ /* convert 16 bit output data into 8 bit */
+ DUP4_ARG2(__lsx_vpickev_b, zero, p2_filter8, zero, p1_filter8, zero,
+ p0_filter8, zero, q0_filter8, p2_filter8, p1_filter8, p0_filter8,
+ q0_filter8);
+ DUP2_ARG2(__lsx_vpickev_b, zero, q1_filter8, zero, q2_filter8, q1_filter8,
+ q2_filter8);
+ DUP4_ARG3(__lsx_vbitsel_v, p2, p2_filter8, flat, p1_out, p1_filter8, flat,
+ p0_out, p0_filter8, flat, q0_out, q0_filter8, flat, p2_out,
+ p1_out, p0_out, q0_out);
+ DUP2_ARG3(__lsx_vbitsel_v, q1_out, q1_filter8, flat, q2, q2_filter8, flat,
+ q1_out, q2_out);
+ dst -= stride3;
+
+ __lsx_vstelm_d(p2_out, dst, 0, 0);
+ __lsx_vstelm_d(p1_out, dst + stride, 0, 0);
+ __lsx_vstelm_d(p0_out, dst + stride2, 0, 0);
+ __lsx_vstelm_d(q0_out, dst + stride3, 0, 0);
+
+ dst += stride4;
+ __lsx_vstelm_d(q1_out, dst, 0, 0);
+ dst += stride;
+ __lsx_vstelm_d(q2_out, dst, 0, 0);
+ }
+}
+
+void vpx_lpf_vertical_8_lsx(uint8_t *dst, int32_t stride,
+ const uint8_t *b_limit_ptr,
+ const uint8_t *limit_ptr,
+ const uint8_t *thresh_ptr) {
+ __m128i p3, p2, p1, p0, q3, q2, q1, q0;
+ __m128i p1_out, p0_out, q0_out, q1_out;
+ __m128i flat, mask, hev, thresh, b_limit, limit;
+ __m128i p3_l, p2_l, p1_l, p0_l, q0_l, q1_l, q2_l, q3_l;
+ __m128i p2_filt8_l, p1_filt8_l, p0_filt8_l;
+ __m128i q0_filt8_l, q1_filt8_l, q2_filt8_l;
+ __m128i vec0, vec1, vec2, vec3, vec4;
+ __m128i zero = __lsx_vldi(0);
+
+ int32_t stride2 = stride << 1;
+ int32_t stride3 = stride2 + stride;
+ int32_t stride4 = stride2 << 1;
+ uint8_t *dst_tmp = dst - 4;
+
+ /* load vector elements */
+ p3 = __lsx_vld(dst_tmp, 0);
+ DUP2_ARG2(__lsx_vldx, dst_tmp, stride, dst_tmp, stride2, p2, p1);
+ p0 = __lsx_vldx(dst_tmp, stride3);
+ dst_tmp += stride4;
+ q0 = __lsx_vld(dst_tmp, 0);
+ DUP2_ARG2(__lsx_vldx, dst_tmp, stride, dst_tmp, stride2, q1, q2);
+ q3 = __lsx_vldx(dst_tmp, stride3);
+
+ LSX_TRANSPOSE8x8_B(p3, p2, p1, p0, q0, q1, q2, q3, p3, p2, p1, p0, q0, q1, q2,
+ q3);
+
+ thresh = __lsx_vreplgr2vr_b(*thresh_ptr);
+ b_limit = __lsx_vreplgr2vr_b(*b_limit_ptr);
+ limit = __lsx_vreplgr2vr_b(*limit_ptr);
+
+ /* mask and hev */
+ LPF_MASK_HEV(p3, p2, p1, p0, q0, q1, q2, q3, limit, b_limit, thresh, hev,
+ mask, flat);
+ /* flat4 */
+ VP9_FLAT4(p3, p2, p0, q0, q2, q3, flat);
+ /* filter4 */
+ VP9_LPF_FILTER4_4W(p1, p0, q0, q1, mask, hev, p1_out, p0_out, q0_out, q1_out);
+
+ flat = __lsx_vilvl_d(zero, flat);
+
+ /* if flat is zero for all pixels, then no need to calculate other filter */
+ if (__lsx_bz_v(flat)) {
+ /* Store 4 pixels p1-_q1 */
+ DUP2_ARG2(__lsx_vilvl_b, p0_out, p1_out, q1_out, q0_out, vec0, vec1);
+ vec2 = __lsx_vilvl_h(vec1, vec0);
+ vec3 = __lsx_vilvh_h(vec1, vec0);
+
+ dst -= 2;
+ __lsx_vstelm_w(vec2, dst, 0, 0);
+ __lsx_vstelm_w(vec2, dst + stride, 0, 1);
+ __lsx_vstelm_w(vec2, dst + stride2, 0, 2);
+ __lsx_vstelm_w(vec2, dst + stride3, 0, 3);
+ dst += stride4;
+ __lsx_vstelm_w(vec3, dst, 0, 0);
+ __lsx_vstelm_w(vec3, dst + stride, 0, 1);
+ __lsx_vstelm_w(vec3, dst + stride2, 0, 2);
+ __lsx_vstelm_w(vec3, dst + stride3, 0, 3);
+ } else {
+ DUP4_ARG2(__lsx_vilvl_b, zero, p3, zero, p2, zero, p1, zero, p0, p3_l, p2_l,
+ p1_l, p0_l);
+ DUP4_ARG2(__lsx_vilvl_b, zero, q0, zero, q1, zero, q2, zero, q3, q0_l, q1_l,
+ q2_l, q3_l);
+ VP9_FILTER8(p3_l, p2_l, p1_l, p0_l, q0_l, q1_l, q2_l, q3_l, p2_filt8_l,
+ p1_filt8_l, p0_filt8_l, q0_filt8_l, q1_filt8_l, q2_filt8_l);
+ /* convert 16 bit output data into 8 bit */
+ DUP4_ARG2(__lsx_vpickev_b, p2_filt8_l, p2_filt8_l, p1_filt8_l, p1_filt8_l,
+ p0_filt8_l, p0_filt8_l, q0_filt8_l, q0_filt8_l, p2_filt8_l,
+ p1_filt8_l, p0_filt8_l, q0_filt8_l);
+ DUP2_ARG2(__lsx_vpickev_b, q1_filt8_l, q1_filt8_l, q2_filt8_l, q2_filt8_l,
+ q1_filt8_l, q2_filt8_l);
+ /* store pixel values */
+ p2 = __lsx_vbitsel_v(p2, p2_filt8_l, flat);
+ p1 = __lsx_vbitsel_v(p1_out, p1_filt8_l, flat);
+ p0 = __lsx_vbitsel_v(p0_out, p0_filt8_l, flat);
+ q0 = __lsx_vbitsel_v(q0_out, q0_filt8_l, flat);
+ q1 = __lsx_vbitsel_v(q1_out, q1_filt8_l, flat);
+ q2 = __lsx_vbitsel_v(q2, q2_filt8_l, flat);
+
+ /* Store 6 pixels p2-_q2 */
+ DUP2_ARG2(__lsx_vilvl_b, p1, p2, q0, p0, vec0, vec1);
+ vec2 = __lsx_vilvl_h(vec1, vec0);
+ vec3 = __lsx_vilvh_h(vec1, vec0);
+ vec4 = __lsx_vilvl_b(q2, q1);
+
+ dst -= 3;
+ __lsx_vstelm_w(vec2, dst, 0, 0);
+ __lsx_vstelm_h(vec4, dst, 4, 0);
+ dst += stride;
+ __lsx_vstelm_w(vec2, dst, 0, 1);
+ __lsx_vstelm_h(vec4, dst, 4, 1);
+ dst += stride;
+ __lsx_vstelm_w(vec2, dst, 0, 2);
+ __lsx_vstelm_h(vec4, dst, 4, 2);
+ dst += stride;
+ __lsx_vstelm_w(vec2, dst, 0, 3);
+ __lsx_vstelm_h(vec4, dst, 4, 3);
+ dst += stride;
+ __lsx_vstelm_w(vec3, dst, 0, 0);
+ __lsx_vstelm_h(vec4, dst, 4, 4);
+ dst += stride;
+ __lsx_vstelm_w(vec3, dst, 0, 1);
+ __lsx_vstelm_h(vec4, dst, 4, 5);
+ dst += stride;
+ __lsx_vstelm_w(vec3, dst, 0, 2);
+ __lsx_vstelm_h(vec4, dst, 4, 6);
+ dst += stride;
+ __lsx_vstelm_w(vec3, dst, 0, 3);
+ __lsx_vstelm_h(vec4, dst, 4, 7);
+ }
+}
diff --git a/vpx_dsp/vpx_dsp.mk b/vpx_dsp/vpx_dsp.mk
index eb530db5a..976c65272 100644
--- a/vpx_dsp/vpx_dsp.mk
+++ b/vpx_dsp/vpx_dsp.mk
@@ -189,7 +189,9 @@ DSP_SRCS-$(HAVE_DSPR2) += mips/loopfilter_mb_dspr2.c
DSP_SRCS-$(HAVE_DSPR2) += mips/loopfilter_mb_horiz_dspr2.c
DSP_SRCS-$(HAVE_DSPR2) += mips/loopfilter_mb_vert_dspr2.c
+DSP_SRCS-$(HAVE_LSX) += loongarch/loopfilter_lsx.h
DSP_SRCS-$(HAVE_LSX) += loongarch/loopfilter_16_lsx.c
+DSP_SRCS-$(HAVE_LSX) += loongarch/loopfilter_8_lsx.c
ifeq ($(CONFIG_VP9_HIGHBITDEPTH),yes)
DSP_SRCS-$(HAVE_NEON) += arm/highbd_loopfilter_neon.c
diff --git a/vpx_dsp/vpx_dsp_rtcd_defs.pl b/vpx_dsp/vpx_dsp_rtcd_defs.pl
index 9cd58d3b8..ce0780fda 100644
--- a/vpx_dsp/vpx_dsp_rtcd_defs.pl
+++ b/vpx_dsp/vpx_dsp_rtcd_defs.pl
@@ -445,7 +445,7 @@ add_proto qw/void vpx_lpf_vertical_16_dual/, "uint8_t *s, int pitch, const uint8
specialize qw/vpx_lpf_vertical_16_dual sse2 neon dspr2 msa lsx/;
add_proto qw/void vpx_lpf_vertical_8/, "uint8_t *s, int pitch, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh";
-specialize qw/vpx_lpf_vertical_8 sse2 neon dspr2 msa/;
+specialize qw/vpx_lpf_vertical_8 sse2 neon dspr2 msa lsx/;
add_proto qw/void vpx_lpf_vertical_8_dual/, "uint8_t *s, int pitch, const uint8_t *blimit0, const uint8_t *limit0, const uint8_t *thresh0, const uint8_t *blimit1, const uint8_t *limit1, const uint8_t *thresh1";
specialize qw/vpx_lpf_vertical_8_dual sse2 neon dspr2 msa/;
@@ -463,7 +463,7 @@ add_proto qw/void vpx_lpf_horizontal_16_dual/, "uint8_t *s, int pitch, const uin
specialize qw/vpx_lpf_horizontal_16_dual sse2 avx2 neon dspr2 msa lsx/;
add_proto qw/void vpx_lpf_horizontal_8/, "uint8_t *s, int pitch, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh";
-specialize qw/vpx_lpf_horizontal_8 sse2 neon dspr2 msa/;
+specialize qw/vpx_lpf_horizontal_8 sse2 neon dspr2 msa lsx/;
add_proto qw/void vpx_lpf_horizontal_8_dual/, "uint8_t *s, int pitch, const uint8_t *blimit0, const uint8_t *limit0, const uint8_t *thresh0, const uint8_t *blimit1, const uint8_t *limit1, const uint8_t *thresh1";
specialize qw/vpx_lpf_horizontal_8_dual sse2 neon dspr2 msa/;