From 1c39c625264fa64db1c573cbac1f3a4f24c660d3 Mon Sep 17 00:00:00 2001 From: yuanhecai Date: Sun, 24 Apr 2022 10:34:21 +0800 Subject: vp8[loongarch]: Optimize vp8_sixtap_predict4x4 1. vp8_sixtap_predict4x4 Bug: webm:1755 Change-Id: If7d844496ef2cfe2252f2ef12bb7cded63ad03dd --- vp8/common/loongarch/sixtap_filter_lsx.c | 745 ++++++++++++++++++++++++++++++- vp8/common/rtcd_defs.pl | 2 +- 2 files changed, 743 insertions(+), 4 deletions(-) diff --git a/vp8/common/loongarch/sixtap_filter_lsx.c b/vp8/common/loongarch/sixtap_filter_lsx.c index 75fe533d9..a23ed16d2 100644 --- a/vp8/common/loongarch/sixtap_filter_lsx.c +++ b/vp8/common/loongarch/sixtap_filter_lsx.c @@ -50,9 +50,9 @@ static const uint8_t vp8_mc_filt_mask_arr[16 * 3] = { __m128i vec0_m, vec1_m, vec2_m; \ __m128i hz_out_m; \ \ - DUP2_ARG3(__lsx_vshuf_b, src0, src1, mask0, src0, src1, mask1, vec0_m, \ + DUP2_ARG3(__lsx_vshuf_b, src1, src0, mask0, src1, src0, mask1, vec0_m, \ vec1_m); \ - vec2_m = __lsx_vshuf_b(src0, src1, mask2); \ + vec2_m = __lsx_vshuf_b(src1, src0, mask2); \ hz_out_m = DPADD_H3(vec0_m, vec1_m, vec2_m, filt_h0, filt_h1, filt_h2); \ \ hz_out_m = __lsx_vsrari_h(hz_out_m, VP8_FILTER_SHIFT); \ @@ -61,6 +61,24 @@ static const uint8_t vp8_mc_filt_mask_arr[16 * 3] = { hz_out_m; \ }) +#define HORIZ_6TAP_4WID_4VECS_FILT(src0, src1, src2, src3, mask0, mask1, \ + mask2, filt0, filt1, filt2, out0, out1) \ + { \ + __m128i vec0_m, vec1_m, vec2_m, vec3_m, vec4_m, vec5_m; \ + \ + DUP2_ARG3(__lsx_vshuf_b, src1, src0, mask0, src3, src2, mask0, vec0_m, \ + vec1_m); \ + DUP2_ARG2(__lsx_vdp2_h_b, vec0_m, filt0, vec1_m, filt0, out0, out1); \ + DUP2_ARG3(__lsx_vshuf_b, src1, src0, mask1, src3, src2, mask1, vec2_m, \ + vec3_m); \ + DUP2_ARG3(__lsx_vdp2add_h_b, out0, vec2_m, filt1, out1, vec3_m, filt1, \ + out0, out1); \ + DUP2_ARG3(__lsx_vshuf_b, src1, src0, mask2, src3, src2, mask2, vec4_m, \ + vec5_m); \ + DUP2_ARG3(__lsx_vdp2add_h_b, out0, vec4_m, filt2, out1, vec5_m, filt2, \ + out0, out1); \ + } + #define HORIZ_6TAP_8WID_4VECS_FILT(src0, src1, src2, src3, mask0, mask1, \ mask2, filt0, filt1, filt2, out0, out1, \ out2, out3) \ @@ -104,7 +122,7 @@ static const uint8_t vp8_mc_filt_mask_arr[16 * 3] = { __m128i vec0_m, vec1_m; \ __m128i hz_out_m; \ \ - DUP2_ARG3(__lsx_vshuf_b, src0, src1, mask0, src0, src1, mask1, vec0_m, \ + DUP2_ARG3(__lsx_vshuf_b, src1, src0, mask0, src1, src0, mask1, vec0_m, \ vec1_m); \ hz_out_m = FILT_4TAP_DPADD_H(vec0_m, vec1_m, filt_h0, filt_h1); \ hz_out_m = __lsx_vsrari_h(hz_out_m, VP8_FILTER_SHIFT); \ @@ -113,6 +131,20 @@ static const uint8_t vp8_mc_filt_mask_arr[16 * 3] = { hz_out_m; \ }) +#define HORIZ_4TAP_4WID_4VECS_FILT(src0, src1, src2, src3, mask0, mask1, \ + filt0, filt1, out0, out1) \ + { \ + __m128i vec0_m, vec1_m, vec2_m, vec3_m; \ + \ + DUP2_ARG3(__lsx_vshuf_b, src1, src0, mask0, src3, src2, mask0, vec0_m, \ + vec1_m); \ + DUP2_ARG2(__lsx_vdp2_h_b, vec0_m, filt0, vec1_m, filt0, out0, out1); \ + DUP2_ARG3(__lsx_vshuf_b, src1, src0, mask1, src3, src2, mask1, vec2_m, \ + vec3_m); \ + DUP2_ARG3(__lsx_vdp2add_h_b, out0, vec2_m, filt1, out1, vec3_m, filt1, \ + out0, out1); \ + } + #define HORIZ_4TAP_8WID_4VECS_FILT(src0, src1, src2, src3, mask0, mask1, \ filt0, filt1, out0, out1, out2, out3) \ ({ \ @@ -133,6 +165,107 @@ static const uint8_t vp8_mc_filt_mask_arr[16 * 3] = { out3); \ }) +static inline void common_hz_6t_4x4_lsx(uint8_t *RESTRICT src, + int32_t src_stride, + uint8_t *RESTRICT dst, + int32_t dst_stride, + const int8_t *filter) { + __m128i src0, src1, src2, src3, filt0, filt1, filt2; + __m128i mask0, mask1, mask2, out0, out1; + int32_t src_stride_x2 = src_stride << 1; + int32_t src_stride_x3 = src_stride_x2 + src_stride; + + mask0 = __lsx_vld(vp8_mc_filt_mask_arr, 16); + src -= 2; + + DUP2_ARG2(__lsx_vldrepl_h, filter, 0, filter, 2, filt0, filt1); + filt2 = __lsx_vldrepl_h(filter, 4); + + DUP2_ARG2(__lsx_vaddi_bu, mask0, 2, mask0, 4, mask1, mask2); + src0 = __lsx_vld(src, 0); + DUP2_ARG2(__lsx_vldx, src, src_stride, src, src_stride_x2, src1, src2); + src3 = __lsx_vldx(src, src_stride_x3); + + DUP4_ARG2(__lsx_vxori_b, src0, 128, src1, 128, src2, 128, src3, 128, src0, + src1, src2, src3); + HORIZ_6TAP_4WID_4VECS_FILT(src0, src1, src2, src3, mask0, mask1, mask2, filt0, + filt1, filt2, out0, out1); + out0 = __lsx_vssrarni_b_h(out1, out0, VP8_FILTER_SHIFT); + out0 = __lsx_vxori_b(out0, 128); + + __lsx_vstelm_w(out0, dst, 0, 0); + dst += dst_stride; + __lsx_vstelm_w(out0, dst, 0, 1); + dst += dst_stride; + __lsx_vstelm_w(out0, dst, 0, 2); + dst += dst_stride; + __lsx_vstelm_w(out0, dst, 0, 3); +} + +static void common_hz_6t_4x8_lsx(uint8_t *RESTRICT src, int32_t src_stride, + uint8_t *RESTRICT dst, int32_t dst_stride, + const int8_t *filter) { + __m128i src0, src1, src2, src3, filt0, filt1, filt2; + __m128i mask0, mask1, mask2, out0, out1, out2, out3; + int32_t src_stride_x2 = src_stride << 1; + int32_t src_stride_x3 = src_stride_x2 + src_stride; + int32_t src_stride_x4 = src_stride_x2 << 1; + + mask0 = __lsx_vld(vp8_mc_filt_mask_arr, 16); + src -= 2; + + DUP2_ARG2(__lsx_vldrepl_h, filter, 0, filter, 2, filt0, filt1); + filt2 = __lsx_vldrepl_h(filter, 4); + DUP2_ARG2(__lsx_vaddi_bu, mask0, 2, mask0, 4, mask1, mask2); + + src0 = __lsx_vld(src, 0); + DUP2_ARG2(__lsx_vldx, src, src_stride, src, src_stride_x2, src1, src2); + src3 = __lsx_vldx(src, src_stride_x3); + DUP4_ARG2(__lsx_vxori_b, src0, 128, src1, 128, src2, 128, src3, 128, src0, + src1, src2, src3); + src += src_stride_x4; + HORIZ_6TAP_4WID_4VECS_FILT(src0, src1, src2, src3, mask0, mask1, mask2, filt0, + filt1, filt2, out0, out1); + + src0 = __lsx_vld(src, 0); + DUP2_ARG2(__lsx_vldx, src, src_stride, src, src_stride_x2, src1, src2); + src3 = __lsx_vldx(src, src_stride_x3); + DUP4_ARG2(__lsx_vxori_b, src0, 128, src1, 128, src2, 128, src3, 128, src0, + src1, src2, src3); + HORIZ_6TAP_4WID_4VECS_FILT(src0, src1, src2, src3, mask0, mask1, mask2, filt0, + filt1, filt2, out2, out3); + + DUP2_ARG3(__lsx_vssrarni_b_h, out1, out0, VP8_FILTER_SHIFT, out3, out2, + VP8_FILTER_SHIFT, out0, out1); + DUP2_ARG2(__lsx_vxori_b, out0, 128, out1, 128, out0, out1); + __lsx_vstelm_w(out0, dst, 0, 0); + dst += dst_stride; + __lsx_vstelm_w(out0, dst, 0, 1); + dst += dst_stride; + __lsx_vstelm_w(out0, dst, 0, 2); + dst += dst_stride; + __lsx_vstelm_w(out0, dst, 0, 3); + dst += dst_stride; + + __lsx_vstelm_w(out1, dst, 0, 0); + dst += dst_stride; + __lsx_vstelm_w(out1, dst, 0, 1); + dst += dst_stride; + __lsx_vstelm_w(out1, dst, 0, 2); + dst += dst_stride; + __lsx_vstelm_w(out1, dst, 0, 3); +} + +static void common_hz_6t_4w_lsx(uint8_t *RESTRICT src, int32_t src_stride, + uint8_t *RESTRICT dst, int32_t dst_stride, + const int8_t *filter, int32_t height) { + if (height == 4) { + common_hz_6t_4x4_lsx(src, src_stride, dst, dst_stride, filter); + } else if (height == 8) { + common_hz_6t_4x8_lsx(src, src_stride, dst, dst_stride, filter); + } +} + static void common_hz_6t_8w_lsx(uint8_t *RESTRICT src, int32_t src_stride, uint8_t *RESTRICT dst, int32_t dst_stride, const int8_t *filter, int32_t height) { @@ -254,6 +387,64 @@ static void common_hz_6t_16w_lsx(uint8_t *RESTRICT src, int32_t src_stride, } } +static void common_vt_6t_4w_lsx(uint8_t *RESTRICT src, int32_t src_stride, + uint8_t *RESTRICT dst, int32_t dst_stride, + const int8_t *filter, int32_t height) { + uint32_t loop_cnt; + __m128i src0, src1, src2, src3, src4, src5, src6, src7, src8; + __m128i src10_r, src32_r, src54_r, src76_r, src21_r, src43_r, src65_r; + __m128i src87_r, src2110, src4332, src6554, src8776, filt0, filt1, filt2; + __m128i out0, out1; + int32_t src_stride_x2 = src_stride << 1; + int32_t src_stride_x3 = src_stride_x2 + src_stride; + int32_t src_stride_x4 = src_stride << 2; + + DUP2_ARG2(__lsx_vldrepl_h, filter, 0, filter, 2, filt0, filt1); + filt2 = __lsx_vldrepl_h(filter, 4); + + DUP2_ARG2(__lsx_vldx, src, -src_stride_x2, src, -src_stride, src0, src1); + src2 = __lsx_vld(src, 0); + DUP2_ARG2(__lsx_vldx, src, src_stride, src, src_stride_x2, src3, src4); + src += src_stride_x3; + + DUP4_ARG2(__lsx_vilvl_b, src1, src0, src2, src1, src3, src2, src4, src3, + src10_r, src21_r, src32_r, src43_r); + DUP2_ARG2(__lsx_vilvl_d, src21_r, src10_r, src43_r, src32_r, src2110, + src4332); + DUP2_ARG2(__lsx_vxori_b, src2110, 128, src4332, 128, src2110, src4332); + + for (loop_cnt = (height >> 2); loop_cnt--;) { + src5 = __lsx_vld(src, 0); + DUP2_ARG2(__lsx_vldx, src, src_stride, src, src_stride_x2, src6, src7); + src8 = __lsx_vldx(src, src_stride_x3); + src += src_stride_x4; + + DUP4_ARG2(__lsx_vilvl_b, src5, src4, src6, src5, src7, src6, src8, src7, + src54_r, src65_r, src76_r, src87_r); + DUP2_ARG2(__lsx_vilvl_d, src65_r, src54_r, src87_r, src76_r, src6554, + src8776); + DUP2_ARG2(__lsx_vxori_b, src6554, 128, src8776, 128, src6554, src8776); + out0 = DPADD_H3(src2110, src4332, src6554, filt0, filt1, filt2); + out1 = DPADD_H3(src4332, src6554, src8776, filt0, filt1, filt2); + + out0 = __lsx_vssrarni_b_h(out1, out0, VP8_FILTER_SHIFT); + out0 = __lsx_vxori_b(out0, 128); + + __lsx_vstelm_w(out0, dst, 0, 0); + dst += dst_stride; + __lsx_vstelm_w(out0, dst, 0, 1); + dst += dst_stride; + __lsx_vstelm_w(out0, dst, 0, 2); + dst += dst_stride; + __lsx_vstelm_w(out0, dst, 0, 3); + dst += dst_stride; + + src2110 = src6554; + src4332 = src8776; + src4 = src8; + } +} + static void common_vt_6t_8w_lsx(uint8_t *RESTRICT src, int32_t src_stride, uint8_t *RESTRICT dst, int32_t dst_stride, const int8_t *filter, int32_t height) { @@ -395,6 +586,92 @@ static void common_vt_6t_16w_lsx(uint8_t *RESTRICT src, int32_t src_stride, } } +static void common_hv_6ht_6vt_4w_lsx(uint8_t *RESTRICT src, int32_t src_stride, + uint8_t *RESTRICT dst, int32_t dst_stride, + const int8_t *filter_horiz, + const int8_t *filter_vert, + int32_t height) { + uint32_t loop_cnt; + __m128i src0, src1, src2, src3, src4, src5, src6, src7, src8, tmp0, tmp1; + __m128i filt_hz0, filt_hz1, filt_hz2, mask0, mask1, mask2; + __m128i hz_out0, hz_out1, hz_out2, hz_out3, hz_out4, hz_out5, hz_out6; + __m128i hz_out7, filt_vt0, filt_vt1, filt_vt2, out0, out1, out2, out3; + __m128i shuff = { 0x0F0E0D0C0B0A0908, 0x1716151413121110 }; + int32_t src_stride_x2 = src_stride << 1; + int32_t src_stride_x3 = src_stride_x2 + src_stride; + + mask0 = __lsx_vld(vp8_mc_filt_mask_arr, 16); + src -= 2; + + DUP2_ARG2(__lsx_vldrepl_h, filter_horiz, 0, filter_horiz, 2, filt_hz0, + filt_hz1); + filt_hz2 = __lsx_vldrepl_h(filter_horiz, 4); + DUP2_ARG2(__lsx_vldrepl_h, filter_vert, 0, filter_vert, 2, filt_vt0, + filt_vt1); + filt_vt2 = __lsx_vldrepl_h(filter_vert, 4); + + DUP2_ARG2(__lsx_vaddi_bu, mask0, 2, mask0, 4, mask1, mask2); + + DUP2_ARG2(__lsx_vldx, src, -src_stride_x2, src, -src_stride, src0, src1); + src2 = __lsx_vld(src, 0); + DUP2_ARG2(__lsx_vldx, src, src_stride, src, src_stride_x2, src3, src4); + src += src_stride_x3; + + DUP4_ARG2(__lsx_vxori_b, src0, 128, src1, 128, src2, 128, src3, 128, src0, + src1, src2, src3); + src4 = __lsx_vxori_b(src4, 128); + + hz_out0 = HORIZ_6TAP_FILT(src0, src1, mask0, mask1, mask2, filt_hz0, filt_hz1, + filt_hz2); + hz_out2 = HORIZ_6TAP_FILT(src2, src3, mask0, mask1, mask2, filt_hz0, filt_hz1, + filt_hz2); + hz_out1 = __lsx_vshuf_b(hz_out2, hz_out0, shuff); + hz_out3 = HORIZ_6TAP_FILT(src3, src4, mask0, mask1, mask2, filt_hz0, filt_hz1, + filt_hz2); + DUP2_ARG2(__lsx_vpackev_b, hz_out1, hz_out0, hz_out3, hz_out2, out0, out1); + + for (loop_cnt = (height >> 2); loop_cnt--;) { + src5 = __lsx_vld(src, 0); + src6 = __lsx_vldx(src, src_stride); + src += src_stride_x2; + + DUP2_ARG2(__lsx_vxori_b, src5, 128, src6, 128, src5, src6); + hz_out5 = HORIZ_6TAP_FILT(src5, src6, mask0, mask1, mask2, filt_hz0, + filt_hz1, filt_hz2); + hz_out4 = __lsx_vshuf_b(hz_out5, hz_out3, shuff); + + src7 = __lsx_vld(src, 0); + src8 = __lsx_vldx(src, src_stride); + src += src_stride_x2; + + DUP2_ARG2(__lsx_vxori_b, src7, 128, src8, 128, src7, src8); + hz_out7 = HORIZ_6TAP_FILT(src7, src8, mask0, mask1, mask2, filt_hz0, + filt_hz1, filt_hz2); + hz_out6 = __lsx_vshuf_b(hz_out7, hz_out5, shuff); + + out2 = __lsx_vpackev_b(hz_out5, hz_out4); + tmp0 = DPADD_H3(out0, out1, out2, filt_vt0, filt_vt1, filt_vt2); + + out3 = __lsx_vpackev_b(hz_out7, hz_out6); + tmp1 = DPADD_H3(out1, out2, out3, filt_vt0, filt_vt1, filt_vt2); + + tmp0 = __lsx_vssrarni_b_h(tmp1, tmp0, 7); + tmp0 = __lsx_vxori_b(tmp0, 128); + __lsx_vstelm_w(tmp0, dst, 0, 0); + dst += dst_stride; + __lsx_vstelm_w(tmp0, dst, 0, 1); + dst += dst_stride; + __lsx_vstelm_w(tmp0, dst, 0, 2); + dst += dst_stride; + __lsx_vstelm_w(tmp0, dst, 0, 3); + dst += dst_stride; + + hz_out3 = hz_out7; + out0 = out2; + out1 = out3; + } +} + static void common_hv_6ht_6vt_8w_lsx(uint8_t *RESTRICT src, int32_t src_stride, uint8_t *RESTRICT dst, int32_t dst_stride, const int8_t *filter_horiz, @@ -506,6 +783,102 @@ static void common_hv_6ht_6vt_16w_lsx(uint8_t *RESTRICT src, int32_t src_stride, filter_horiz, filter_vert, height); } +static void common_hz_4t_4x4_lsx(uint8_t *RESTRICT src, int32_t src_stride, + uint8_t *RESTRICT dst, int32_t dst_stride, + const int8_t *filter) { + __m128i src0, src1, src2, src3, filt0, filt1, mask0, mask1; + __m128i out0, out1; + int32_t src_stride_x2 = src_stride << 1; + int32_t src_stride_x3 = src_stride_x2 + src_stride; + + mask0 = __lsx_vld(vp8_mc_filt_mask_arr, 16); + src -= 1; + + DUP2_ARG2(__lsx_vldrepl_h, filter, 0, filter, 2, filt0, filt1); + mask1 = __lsx_vaddi_bu(mask0, 2); + + src0 = __lsx_vld(src, 0); + DUP2_ARG2(__lsx_vldx, src, src_stride, src, src_stride_x2, src1, src2); + src3 = __lsx_vldx(src, src_stride_x3); + DUP4_ARG2(__lsx_vxori_b, src0, 128, src1, 128, src2, 128, src3, 128, src0, + src1, src2, src3); + HORIZ_4TAP_4WID_4VECS_FILT(src0, src1, src2, src3, mask0, mask1, filt0, filt1, + out0, out1); + + out0 = __lsx_vssrarni_b_h(out1, out0, VP8_FILTER_SHIFT); + out0 = __lsx_vxori_b(out0, 128); + + __lsx_vstelm_w(out0, dst, 0, 0); + dst += dst_stride; + __lsx_vstelm_w(out0, dst, 0, 1); + dst += dst_stride; + __lsx_vstelm_w(out0, dst, 0, 2); + dst += dst_stride; + __lsx_vstelm_w(out0, dst, 0, 3); +} + +static void common_hz_4t_4x8_lsx(uint8_t *RESTRICT src, int32_t src_stride, + uint8_t *RESTRICT dst, int32_t dst_stride, + const int8_t *filter) { + __m128i src0, src1, src2, src3, filt0, filt1, mask0, mask1; + __m128i out0, out1, out2, out3; + int32_t src_stride_x2 = src_stride << 1; + int32_t src_stride_x3 = src_stride_x2 + src_stride; + int32_t src_stride_x4 = src_stride << 2; + + mask0 = __lsx_vld(vp8_mc_filt_mask_arr, 16); + src -= 1; + + DUP2_ARG2(__lsx_vldrepl_h, filter, 0, filter, 2, filt0, filt1); + mask1 = __lsx_vaddi_bu(mask0, 2); + + src0 = __lsx_vld(src, 0); + DUP2_ARG2(__lsx_vldx, src, src_stride, src, src_stride_x2, src1, src2); + src3 = __lsx_vldx(src, src_stride_x3); + src += src_stride_x4; + DUP4_ARG2(__lsx_vxori_b, src0, 128, src1, 128, src2, 128, src3, 128, src0, + src1, src2, src3); + HORIZ_4TAP_4WID_4VECS_FILT(src0, src1, src2, src3, mask0, mask1, filt0, filt1, + out0, out1); + + src0 = __lsx_vld(src, 0); + DUP2_ARG2(__lsx_vldx, src, src_stride, src, src_stride_x2, src1, src2); + src3 = __lsx_vldx(src, src_stride_x3); + DUP4_ARG2(__lsx_vxori_b, src0, 128, src1, 128, src2, 128, src3, 128, src0, + src1, src2, src3); + HORIZ_4TAP_4WID_4VECS_FILT(src0, src1, src2, src3, mask0, mask1, filt0, filt1, + out2, out3); + DUP2_ARG3(__lsx_vssrarni_b_h, out1, out0, VP8_FILTER_SHIFT, out3, out2, + VP8_FILTER_SHIFT, out0, out1); + DUP2_ARG2(__lsx_vxori_b, out0, 128, out1, 128, out0, out1); + __lsx_vstelm_w(out0, dst, 0, 0); + dst += dst_stride; + __lsx_vstelm_w(out0, dst, 0, 1); + dst += dst_stride; + __lsx_vstelm_w(out0, dst, 0, 2); + dst += dst_stride; + __lsx_vstelm_w(out0, dst, 0, 3); + dst += dst_stride; + + __lsx_vstelm_w(out1, dst, 0, 0); + dst += dst_stride; + __lsx_vstelm_w(out1, dst, 0, 1); + dst += dst_stride; + __lsx_vstelm_w(out1, dst, 0, 2); + dst += dst_stride; + __lsx_vstelm_w(out1, dst, 0, 3); +} + +static void common_hz_4t_4w_lsx(uint8_t *RESTRICT src, int32_t src_stride, + uint8_t *RESTRICT dst, int32_t dst_stride, + const int8_t *filter, int32_t height) { + if (height == 4) { + common_hz_4t_4x4_lsx(src, src_stride, dst, dst_stride, filter); + } else if (height == 8) { + common_hz_4t_4x8_lsx(src, src_stride, dst, dst_stride, filter); + } +} + static void common_hz_4t_8w_lsx(uint8_t *RESTRICT src, int32_t src_stride, uint8_t *RESTRICT dst, int32_t dst_stride, const int8_t *filter, int32_t height) { @@ -597,6 +970,55 @@ static void common_hz_4t_16w_lsx(uint8_t *RESTRICT src, int32_t src_stride, } } +static void common_vt_4t_4w_lsx(uint8_t *RESTRICT src, int32_t src_stride, + uint8_t *RESTRICT dst, int32_t dst_stride, + const int8_t *filter, int32_t height) { + uint32_t loop_cnt; + __m128i src0, src1, src2, src3, src4, src5; + __m128i src10_r, src32_r, src54_r, src21_r, src43_r, src65_r; + __m128i src2110, src4332, filt0, filt1, out0, out1; + int32_t src_stride_x2 = src_stride << 1; + int32_t src_stride_x3 = src_stride_x2 + src_stride; + + DUP2_ARG2(__lsx_vldrepl_h, filter, 0, filter, 2, filt0, filt1); + DUP2_ARG2(__lsx_vldx, src, -src_stride, src, src_stride, src0, src2); + src1 = __lsx_vld(src, 0); + src += src_stride_x2; + + DUP2_ARG2(__lsx_vilvl_b, src1, src0, src2, src1, src10_r, src21_r); + + src2110 = __lsx_vilvl_d(src21_r, src10_r); + src2110 = __lsx_vxori_b(src2110, 128); + + for (loop_cnt = (height >> 2); loop_cnt--;) { + src3 = __lsx_vld(src, 0); + DUP2_ARG2(__lsx_vldx, src, src_stride, src, src_stride_x2, src4, src5); + src += src_stride_x3; + DUP2_ARG2(__lsx_vilvl_b, src3, src2, src4, src3, src32_r, src43_r); + src4332 = __lsx_vilvl_d(src43_r, src32_r); + src4332 = __lsx_vxori_b(src4332, 128); + out0 = FILT_4TAP_DPADD_H(src2110, src4332, filt0, filt1); + + src2 = __lsx_vld(src, 0); + src += src_stride; + DUP2_ARG2(__lsx_vilvl_b, src5, src4, src2, src5, src54_r, src65_r); + src2110 = __lsx_vilvl_d(src65_r, src54_r); + src2110 = __lsx_vxori_b(src2110, 128); + out1 = FILT_4TAP_DPADD_H(src4332, src2110, filt0, filt1); + out0 = __lsx_vssrarni_b_h(out1, out0, VP8_FILTER_SHIFT); + out0 = __lsx_vxori_b(out0, 128); + + __lsx_vstelm_w(out0, dst, 0, 0); + dst += dst_stride; + __lsx_vstelm_w(out0, dst, 0, 1); + dst += dst_stride; + __lsx_vstelm_w(out0, dst, 0, 2); + dst += dst_stride; + __lsx_vstelm_w(out0, dst, 0, 3); + dst += dst_stride; + } +} + static void common_vt_4t_8w_lsx(uint8_t *RESTRICT src, int32_t src_stride, uint8_t *RESTRICT dst, int32_t dst_stride, const int8_t *filter, int32_t height) { @@ -719,6 +1141,74 @@ static void common_vt_4t_16w_lsx(uint8_t *RESTRICT src, int32_t src_stride, } } +static void common_hv_4ht_4vt_4w_lsx(uint8_t *RESTRICT src, int32_t src_stride, + uint8_t *RESTRICT dst, int32_t dst_stride, + const int8_t *filter_horiz, + const int8_t *filter_vert, + int32_t height) { + uint32_t loop_cnt; + __m128i src0, src1, src2, src3, src4, src5, src6, filt_hz0, filt_hz1; + __m128i mask0, mask1, filt_vt0, filt_vt1, tmp0, tmp1, vec0, vec1, vec2; + __m128i hz_out0, hz_out1, hz_out2, hz_out3, hz_out4, hz_out5; + __m128i shuff = { 0x0F0E0D0C0B0A0908, 0x1716151413121110 }; + int32_t src_stride_x2 = src_stride << 1; + int32_t src_stride_x3 = src_stride_x2 + src_stride; + int32_t src_stride_x4 = src_stride << 2; + + mask0 = __lsx_vld(vp8_mc_filt_mask_arr, 16); + src -= 1; + + DUP2_ARG2(__lsx_vldrepl_h, filter_horiz, 0, filter_horiz, 2, filt_hz0, + filt_hz1); + mask1 = __lsx_vaddi_bu(mask0, 2); + + src1 = __lsx_vld(src, 0); + DUP2_ARG2(__lsx_vldx, src, -src_stride, src, src_stride, src0, src2); + src += src_stride_x2; + + DUP2_ARG2(__lsx_vxori_b, src0, 128, src1, 128, src0, src1); + src2 = __lsx_vxori_b(src2, 128); + hz_out0 = HORIZ_4TAP_FILT(src0, src1, mask0, mask1, filt_hz0, filt_hz1); + hz_out1 = HORIZ_4TAP_FILT(src1, src2, mask0, mask1, filt_hz0, filt_hz1); + vec0 = __lsx_vpackev_b(hz_out1, hz_out0); + + DUP2_ARG2(__lsx_vldrepl_h, filter_vert, 0, filter_vert, 2, filt_vt0, + filt_vt1); + + for (loop_cnt = (height >> 2); loop_cnt--;) { + src3 = __lsx_vld(src, 0); + DUP2_ARG2(__lsx_vldx, src, src_stride, src, src_stride_x2, src4, src5); + src6 = __lsx_vldx(src, src_stride_x3); + src += src_stride_x4; + + DUP2_ARG2(__lsx_vxori_b, src3, 128, src4, 128, src3, src4); + hz_out3 = HORIZ_4TAP_FILT(src3, src4, mask0, mask1, filt_hz0, filt_hz1); + hz_out2 = __lsx_vshuf_b(hz_out3, hz_out1, shuff); + vec1 = __lsx_vpackev_b(hz_out3, hz_out2); + tmp0 = FILT_4TAP_DPADD_H(vec0, vec1, filt_vt0, filt_vt1); + + DUP2_ARG2(__lsx_vxori_b, src5, 128, src6, 128, src5, src6); + hz_out5 = HORIZ_4TAP_FILT(src5, src6, mask0, mask1, filt_hz0, filt_hz1); + hz_out4 = __lsx_vshuf_b(hz_out5, hz_out3, shuff); + vec2 = __lsx_vpackev_b(hz_out5, hz_out4); + tmp1 = FILT_4TAP_DPADD_H(vec1, vec2, filt_vt0, filt_vt1); + + tmp0 = __lsx_vssrarni_b_h(tmp1, tmp0, 7); + tmp0 = __lsx_vxori_b(tmp0, 128); + __lsx_vstelm_w(tmp0, dst, 0, 0); + dst += dst_stride; + __lsx_vstelm_w(tmp0, dst, 0, 1); + dst += dst_stride; + __lsx_vstelm_w(tmp0, dst, 0, 2); + dst += dst_stride; + __lsx_vstelm_w(tmp0, dst, 0, 3); + dst += dst_stride; + + hz_out1 = hz_out5; + vec0 = vec2; + } +} + static inline void common_hv_4ht_4vt_8w_lsx( uint8_t *RESTRICT src, int32_t src_stride, uint8_t *RESTRICT dst, int32_t dst_stride, const int8_t *filter_horiz, const int8_t *filter_vert, @@ -804,6 +1294,82 @@ static void common_hv_4ht_4vt_16w_lsx(uint8_t *RESTRICT src, int32_t src_stride, filter_horiz, filter_vert, height); } +static void common_hv_6ht_4vt_4w_lsx(uint8_t *RESTRICT src, int32_t src_stride, + uint8_t *RESTRICT dst, int32_t dst_stride, + const int8_t *filter_horiz, + const int8_t *filter_vert, + int32_t height) { + uint32_t loop_cnt; + __m128i src0, src1, src2, src3, src4, src5, src6; + __m128i filt_hz0, filt_hz1, filt_hz2, mask0, mask1, mask2; + __m128i filt_vt0, filt_vt1, tmp0, tmp1, vec0, vec1, vec2; + __m128i hz_out0, hz_out1, hz_out2, hz_out3, hz_out4, hz_out5; + __m128i shuff = { 0x0F0E0D0C0B0A0908, 0x1716151413121110 }; + int32_t src_stride_x2 = src_stride << 1; + int32_t src_stride_x3 = src_stride_x2 + src_stride; + int32_t src_stride_x4 = src_stride << 2; + + mask0 = __lsx_vld(vp8_mc_filt_mask_arr, 16); + src -= 2; + + DUP2_ARG2(__lsx_vldrepl_h, filter_horiz, 0, filter_horiz, 2, filt_hz0, + filt_hz1); + filt_hz2 = __lsx_vldrepl_h(filter_horiz, 4); + DUP2_ARG2(__lsx_vaddi_bu, mask0, 2, mask0, 4, mask1, mask2); + + src1 = __lsx_vld(src, 0); + DUP2_ARG2(__lsx_vldx, src, -src_stride, src, src_stride, src0, src2); + src += src_stride_x2; + + DUP2_ARG2(__lsx_vxori_b, src0, 128, src1, 128, src0, src1); + src2 = __lsx_vxori_b(src2, 128); + + hz_out0 = HORIZ_6TAP_FILT(src0, src1, mask0, mask1, mask2, filt_hz0, filt_hz1, + filt_hz2); + hz_out1 = HORIZ_6TAP_FILT(src1, src2, mask0, mask1, mask2, filt_hz0, filt_hz1, + filt_hz2); + vec0 = __lsx_vpackev_b(hz_out1, hz_out0); + + DUP2_ARG2(__lsx_vldrepl_h, filter_vert, 0, filter_vert, 2, filt_vt0, + filt_vt1); + + for (loop_cnt = (height >> 2); loop_cnt--;) { + src3 = __lsx_vld(src, 0); + DUP2_ARG2(__lsx_vldx, src, src_stride, src, src_stride_x2, src4, src5); + src6 = __lsx_vldx(src, src_stride_x3); + src += src_stride_x4; + DUP4_ARG2(__lsx_vxori_b, src3, 128, src4, 128, src5, 128, src6, 128, src3, + src4, src5, src6); + + hz_out3 = HORIZ_6TAP_FILT(src3, src4, mask0, mask1, mask2, filt_hz0, + filt_hz1, filt_hz2); + hz_out2 = __lsx_vshuf_b(hz_out3, hz_out1, shuff); + vec1 = __lsx_vpackev_b(hz_out3, hz_out2); + tmp0 = FILT_4TAP_DPADD_H(vec0, vec1, filt_vt0, filt_vt1); + + hz_out5 = HORIZ_6TAP_FILT(src5, src6, mask0, mask1, mask2, filt_hz0, + filt_hz1, filt_hz2); + hz_out4 = __lsx_vshuf_b(hz_out5, hz_out3, shuff); + vec2 = __lsx_vpackev_b(hz_out5, hz_out4); + tmp1 = FILT_4TAP_DPADD_H(vec1, vec2, filt_vt0, filt_vt1); + + DUP2_ARG3(__lsx_vssrarni_b_h, tmp0, tmp0, 7, tmp1, tmp1, 7, tmp0, tmp1); + DUP2_ARG2(__lsx_vxori_b, tmp0, 128, tmp1, 128, tmp0, tmp1); + + __lsx_vstelm_w(tmp0, dst, 0, 0); + dst += dst_stride; + __lsx_vstelm_w(tmp0, dst, 0, 1); + dst += dst_stride; + __lsx_vstelm_w(tmp1, dst, 0, 0); + dst += dst_stride; + __lsx_vstelm_w(tmp1, dst, 0, 1); + dst += dst_stride; + + hz_out1 = hz_out5; + vec0 = vec2; + } +} + static inline void common_hv_6ht_4vt_8w_lsx( uint8_t *RESTRICT src, int32_t src_stride, uint8_t *RESTRICT dst, int32_t dst_stride, const int8_t *filter_horiz, const int8_t *filter_vert, @@ -895,6 +1461,82 @@ static void common_hv_6ht_4vt_16w_lsx(uint8_t *RESTRICT src, int32_t src_stride, filter_horiz, filter_vert, height); } +static void common_hv_4ht_6vt_4w_lsx(uint8_t *RESTRICT src, int32_t src_stride, + uint8_t *RESTRICT dst, int32_t dst_stride, + const int8_t *filter_horiz, + const int8_t *filter_vert, + int32_t height) { + uint32_t loop_cnt; + __m128i src0, src1, src2, src3, src4, src5, src6, src7, src8; + __m128i filt_hz0, filt_hz1, filt_vt0, filt_vt1, filt_vt2, mask0, mask1; + __m128i hz_out0, hz_out1, hz_out2, hz_out3, hz_out4, hz_out5, hz_out6; + __m128i hz_out7, tmp0, tmp1, out0, out1, out2, out3; + __m128i shuff = { 0x0F0E0D0C0B0A0908, 0x1716151413121110 }; + int32_t src_stride_x2 = src_stride << 1; + int32_t src_stride_x3 = src_stride_x2 + src_stride; + int32_t src_stride_x4 = src_stride << 2; + + mask0 = __lsx_vld(vp8_mc_filt_mask_arr, 16); + + src -= 1; + + DUP2_ARG2(__lsx_vldrepl_h, filter_horiz, 0, filter_horiz, 2, filt_hz0, + filt_hz1); + mask1 = __lsx_vaddi_bu(mask0, 2); + + DUP4_ARG2(__lsx_vldx, src, -src_stride_x2, src, -src_stride, src, src_stride, + src, src_stride_x2, src0, src1, src3, src4); + src2 = __lsx_vld(src, 0); + src += src_stride_x3; + + DUP4_ARG2(__lsx_vxori_b, src0, 128, src1, 128, src2, 128, src3, 128, src0, + src1, src2, src3); + src4 = __lsx_vxori_b(src4, 128); + hz_out0 = HORIZ_4TAP_FILT(src0, src1, mask0, mask1, filt_hz0, filt_hz1); + hz_out2 = HORIZ_4TAP_FILT(src2, src3, mask0, mask1, filt_hz0, filt_hz1); + hz_out3 = HORIZ_4TAP_FILT(src3, src4, mask0, mask1, filt_hz0, filt_hz1); + hz_out1 = __lsx_vshuf_b(hz_out2, hz_out0, shuff); + DUP2_ARG2(__lsx_vpackev_b, hz_out1, hz_out0, hz_out3, hz_out2, out0, out1); + + DUP2_ARG2(__lsx_vldrepl_h, filter_vert, 0, filter_vert, 2, filt_vt0, + filt_vt1); + filt_vt2 = __lsx_vldrepl_h(filter_vert, 4); + + for (loop_cnt = (height >> 2); loop_cnt--;) { + src5 = __lsx_vld(src, 0); + DUP2_ARG2(__lsx_vldx, src, src_stride, src, src_stride_x2, src6, src7); + src8 = __lsx_vldx(src, src_stride_x3); + DUP4_ARG2(__lsx_vxori_b, src5, 128, src6, 128, src7, 128, src8, 128, src5, + src6, src7, src8); + src += src_stride_x4; + + hz_out5 = HORIZ_4TAP_FILT(src5, src6, mask0, mask1, filt_hz0, filt_hz1); + hz_out4 = __lsx_vshuf_b(hz_out5, hz_out3, shuff); + out2 = __lsx_vpackev_b(hz_out5, hz_out4); + tmp0 = DPADD_H3(out0, out1, out2, filt_vt0, filt_vt1, filt_vt2); + + hz_out7 = HORIZ_4TAP_FILT(src7, src8, mask0, mask1, filt_hz0, filt_hz1); + hz_out6 = __lsx_vshuf_b(hz_out7, hz_out5, shuff); + out3 = __lsx_vpackev_b(hz_out7, hz_out6); + tmp1 = DPADD_H3(out1, out2, out3, filt_vt0, filt_vt1, filt_vt2); + + tmp0 = __lsx_vssrarni_b_h(tmp1, tmp0, 7); + tmp0 = __lsx_vxori_b(tmp0, 128); + __lsx_vstelm_w(tmp0, dst, 0, 0); + dst += dst_stride; + __lsx_vstelm_w(tmp0, dst, 0, 1); + dst += dst_stride; + __lsx_vstelm_w(tmp0, dst, 0, 2); + dst += dst_stride; + __lsx_vstelm_w(tmp0, dst, 0, 3); + dst += dst_stride; + + hz_out3 = hz_out7; + out0 = out2; + out1 = out3; + } +} + static inline void common_hv_4ht_6vt_8w_lsx( uint8_t *RESTRICT src, int32_t src_stride, uint8_t *RESTRICT dst, int32_t dst_stride, const int8_t *filter_horiz, const int8_t *filter_vert, @@ -1000,6 +1642,103 @@ typedef void (*PVp8SixtapPredictFunc2)(uint8_t *RESTRICT src, int32_t dst_stride, const int8_t *filter, int32_t height); +void vp8_sixtap_predict4x4_lsx(uint8_t *RESTRICT src, int32_t src_stride, + int32_t xoffset, int32_t yoffset, + uint8_t *RESTRICT dst, int32_t dst_stride) { + const int8_t *h_filter = vp8_subpel_filters_lsx[xoffset - 1]; + const int8_t *v_filter = vp8_subpel_filters_lsx[yoffset - 1]; + + static PVp8SixtapPredictFunc1 Predict4x4Funcs1[4] = { + common_hv_6ht_6vt_4w_lsx, + common_hv_6ht_4vt_4w_lsx, + common_hv_4ht_6vt_4w_lsx, + common_hv_4ht_4vt_4w_lsx, + }; + + static PVp8SixtapPredictFunc2 Predict4x4Funcs2[4] = { common_vt_6t_4w_lsx, + common_vt_4t_4w_lsx, + common_hz_6t_4w_lsx, + common_hz_4t_4w_lsx }; + if (yoffset < 8 && xoffset < 8) { + if (yoffset) { + if (xoffset) { + switch (xoffset & 1) { + case 0: + switch (yoffset & 1) { + case 0: + Predict4x4Funcs1[0](src, src_stride, dst, dst_stride, h_filter, + v_filter, 4); + break; + case 1: + Predict4x4Funcs1[1](src, src_stride, dst, dst_stride, h_filter, + v_filter + 1, 4); + break; + } + break; + + case 1: + switch (yoffset & 1) { + case 0: + Predict4x4Funcs1[2](src, src_stride, dst, dst_stride, + h_filter + 1, v_filter, 4); + break; + + case 1: + Predict4x4Funcs1[3](src, src_stride, dst, dst_stride, + h_filter + 1, v_filter + 1, 4); + break; + } + break; + } + } else { + switch (yoffset & 1) { + case 0: + Predict4x4Funcs2[0](src, src_stride, dst, dst_stride, v_filter, 4); + break; + + case 1: + Predict4x4Funcs2[1](src, src_stride, dst, dst_stride, v_filter + 1, + 4); + break; + } + } + } else { + switch (xoffset) { + case 0: { + __m128i tp0; + tp0 = __lsx_vinsgr2vr_w(tp0, src, 0); + src += src_stride; + tp0 = __lsx_vinsgr2vr_w(tp0, src, 0); + src += src_stride; + tp0 = __lsx_vinsgr2vr_w(tp0, src, 0); + src += src_stride; + tp0 = __lsx_vinsgr2vr_w(tp0, src, 0); + + __lsx_vstelm_w(tp0, dst, 0, 0); + dst += dst_stride; + __lsx_vstelm_w(tp0, dst, 0, 1); + dst += dst_stride; + __lsx_vstelm_w(tp0, dst, 0, 2); + dst += dst_stride; + __lsx_vstelm_w(tp0, dst, 0, 3); + break; + } + case 2: + case 4: + case 6: + Predict4x4Funcs2[2](src, src_stride, dst, dst_stride, h_filter, 4); + break; + } + switch (xoffset & 1) { + case 1: + Predict4x4Funcs2[3](src, src_stride, dst, dst_stride, h_filter + 1, + 4); + break; + } + } + } +} + void vp8_sixtap_predict8x8_lsx(uint8_t *RESTRICT src, int32_t src_stride, int32_t xoffset, int32_t yoffset, uint8_t *RESTRICT dst, int32_t dst_stride) { diff --git a/vp8/common/rtcd_defs.pl b/vp8/common/rtcd_defs.pl index 7bc866faa..739a61284 100644 --- a/vp8/common/rtcd_defs.pl +++ b/vp8/common/rtcd_defs.pl @@ -155,7 +155,7 @@ add_proto qw/void vp8_sixtap_predict8x4/, "unsigned char *src_ptr, int src_pixel specialize qw/vp8_sixtap_predict8x4 sse2 ssse3 neon dspr2 msa mmi/; add_proto qw/void vp8_sixtap_predict4x4/, "unsigned char *src_ptr, int src_pixels_per_line, int xoffset, int yoffset, unsigned char *dst_ptr, int dst_pitch"; -specialize qw/vp8_sixtap_predict4x4 mmx ssse3 neon dspr2 msa mmi/; +specialize qw/vp8_sixtap_predict4x4 mmx ssse3 neon dspr2 msa mmi lsx/; add_proto qw/void vp8_bilinear_predict16x16/, "unsigned char *src_ptr, int src_pixels_per_line, int xoffset, int yoffset, unsigned char *dst_ptr, int dst_pitch"; specialize qw/vp8_bilinear_predict16x16 sse2 ssse3 neon msa/; -- cgit v1.2.3