summaryrefslogtreecommitdiff
path: root/vp9
diff options
context:
space:
mode:
authorlevytamar82 <levytamar82@gmail.com>2014-02-03 18:40:33 -0700
committerlevytamar82 <levytamar82@gmail.com>2014-02-14 16:59:11 -0700
commit52dac5d1cb08a563551e312929910be736f708c6 (patch)
treebf2407997bc82ab659b084290e95e73a07595858 /vp9
parent0876302e4e2b1d83bacdf0395c50ee118cfafef3 (diff)
downloadlibvpx-52dac5d1cb08a563551e312929910be736f708c6.tar
libvpx-52dac5d1cb08a563551e312929910be736f708c6.tar.gz
libvpx-52dac5d1cb08a563551e312929910be736f708c6.tar.bz2
libvpx-52dac5d1cb08a563551e312929910be736f708c6.zip
AVX2 SubPixel Variance Optimization
Optimizing 2 functions to process 32 elements in parallel instead of 16: 1. vp9_sub_pixel_variance64x64 2. vp9_sub_pixel_variance32x32 both of those function were calling vp9_sub_pixel_variance16xh_ssse3 instead of calling that function, it calls vp9_sub_pixel_variance32xh_avx2 that is written in avx2 and process 32 elements in parallel. This Optimization gave 70% function level gain and 2% user level gain Change-Id: I4f5cb386b346ff6c878a094e1c3b37e418e50bde
Diffstat (limited to 'vp9')
-rw-r--r--vp9/common/vp9_rtcd_defs.sh4
-rw-r--r--vp9/encoder/x86/vp9_subpel_variance_impl_intrin_avx2.c640
-rw-r--r--vp9/encoder/x86/vp9_variance_avx2.c52
-rw-r--r--vp9/vp9cx.mk1
4 files changed, 695 insertions, 2 deletions
diff --git a/vp9/common/vp9_rtcd_defs.sh b/vp9/common/vp9_rtcd_defs.sh
index 63171033c..a18ae9bab 100644
--- a/vp9/common/vp9_rtcd_defs.sh
+++ b/vp9/common/vp9_rtcd_defs.sh
@@ -386,7 +386,7 @@ prototype unsigned int vp9_variance4x4 "const uint8_t *src_ptr, int source_strid
specialize vp9_variance4x4 mmx $sse2_x86inc
prototype unsigned int vp9_sub_pixel_variance64x64 "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse"
-specialize vp9_sub_pixel_variance64x64 $sse2_x86inc $ssse3_x86inc
+specialize vp9_sub_pixel_variance64x64 $sse2_x86inc $ssse3_x86inc avx2
prototype unsigned int vp9_sub_pixel_avg_variance64x64 "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse, const uint8_t *second_pred"
specialize vp9_sub_pixel_avg_variance64x64 $sse2_x86inc $ssse3_x86inc
@@ -416,7 +416,7 @@ prototype unsigned int vp9_sub_pixel_avg_variance16x32 "const uint8_t *src_ptr,
specialize vp9_sub_pixel_avg_variance16x32 $sse2_x86inc $ssse3_x86inc
prototype unsigned int vp9_sub_pixel_variance32x32 "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse"
-specialize vp9_sub_pixel_variance32x32 $sse2_x86inc $ssse3_x86inc
+specialize vp9_sub_pixel_variance32x32 $sse2_x86inc $ssse3_x86inc avx2
prototype unsigned int vp9_sub_pixel_avg_variance32x32 "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse, const uint8_t *second_pred"
specialize vp9_sub_pixel_avg_variance32x32 $sse2_x86inc $ssse3_x86inc
diff --git a/vp9/encoder/x86/vp9_subpel_variance_impl_intrin_avx2.c b/vp9/encoder/x86/vp9_subpel_variance_impl_intrin_avx2.c
new file mode 100644
index 000000000..a8f98e94b
--- /dev/null
+++ b/vp9/encoder/x86/vp9_subpel_variance_impl_intrin_avx2.c
@@ -0,0 +1,640 @@
+/*
+ * Copyright (c) 2012 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <immintrin.h> // AVX2
+#include "vpx_ports/mem.h"
+#include "vp9/encoder/vp9_variance.h"
+
+DECLARE_ALIGNED(32, const unsigned char, vp9_bilinear_filters_avx2[512])= {
+ 16, 0, 16, 0, 16, 0, 16, 0, 16, 0, 16, 0, 16, 0, 16, 0,
+ 16, 0, 16, 0, 16, 0, 16, 0, 16, 0, 16, 0, 16, 0, 16, 0,
+ 15, 1, 15, 1, 15, 1, 15, 1, 15, 1, 15, 1, 15, 1, 15, 1,
+ 15, 1, 15, 1, 15, 1, 15, 1, 15, 1, 15, 1, 15, 1, 15, 1,
+ 14, 2, 14, 2, 14, 2, 14, 2, 14, 2, 14, 2, 14, 2, 14, 2,
+ 14, 2, 14, 2, 14, 2, 14, 2, 14, 2, 14, 2, 14, 2, 14, 2,
+ 13, 3, 13, 3, 13, 3, 13, 3, 13, 3, 13, 3, 13, 3, 13, 3,
+ 13, 3, 13, 3, 13, 3, 13, 3, 13, 3, 13, 3, 13, 3, 13, 3,
+ 12, 4, 12, 4, 12, 4, 12, 4, 12, 4, 12, 4, 12, 4, 12, 4,
+ 12, 4, 12, 4, 12, 4, 12, 4, 12, 4, 12, 4, 12, 4, 12, 4,
+ 11, 5, 11, 5, 11, 5, 11, 5, 11, 5, 11, 5, 11, 5, 11, 5,
+ 11, 5, 11, 5, 11, 5, 11, 5, 11, 5, 11, 5, 11, 5, 11, 5,
+ 10, 6, 10, 6, 10, 6, 10, 6, 10, 6, 10, 6, 10, 6, 10, 6,
+ 10, 6, 10, 6, 10, 6, 10, 6, 10, 6, 10, 6, 10, 6, 10, 6,
+ 9, 7, 9, 7, 9, 7, 9, 7, 9, 7, 9, 7, 9, 7, 9, 7,
+ 9, 7, 9, 7, 9, 7, 9, 7, 9, 7, 9, 7, 9, 7, 9, 7,
+ 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
+ 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
+ 7, 9, 7, 9, 7, 9, 7, 9, 7, 9, 7, 9, 7, 9, 7, 9,
+ 7, 9, 7, 9, 7, 9, 7, 9, 7, 9, 7, 9, 7, 9, 7, 9,
+ 6, 10, 6, 10, 6, 10, 6, 10, 6, 10, 6, 10, 6, 10, 6, 10,
+ 6, 10, 6, 10, 6, 10, 6, 10, 6, 10, 6, 10, 6, 10, 6, 10,
+ 5, 11, 5, 11, 5, 11, 5, 11, 5, 11, 5, 11, 5, 11, 5, 11,
+ 5, 11, 5, 11, 5, 11, 5, 11, 5, 11, 5, 11, 5, 11, 5, 11,
+ 4, 12, 4, 12, 4, 12, 4, 12, 4, 12, 4, 12, 4, 12, 4, 12,
+ 4, 12, 4, 12, 4, 12, 4, 12, 4, 12, 4, 12, 4, 12, 4, 12,
+ 3, 13, 3, 13, 3, 13, 3, 13, 3, 13, 3, 13, 3, 13, 3, 13,
+ 3, 13, 3, 13, 3, 13, 3, 13, 3, 13, 3, 13, 3, 13, 3, 13,
+ 2, 14, 2, 14, 2, 14, 2, 14, 2, 14, 2, 14, 2, 14, 2, 14,
+ 2, 14, 2, 14, 2, 14, 2, 14, 2, 14, 2, 14, 2, 14, 2, 14,
+ 1, 15, 1, 15, 1, 15, 1, 15, 1, 15, 1, 15, 1, 15, 1, 15,
+ 1, 15, 1, 15, 1, 15, 1, 15, 1, 15, 1, 15, 1, 15, 1, 15};
+
+unsigned int vp9_sub_pixel_variance32xh_avx2(const uint8_t *src,
+ int src_stride,
+ int x_offset,
+ int y_offset,
+ const uint8_t *dst,
+ int dst_stride,
+ int height,
+ unsigned int *sse) {
+ __m256i src_reg, dst_reg, exp_src_lo, exp_src_hi, exp_dst_lo, exp_dst_hi;
+ __m256i sse_reg, sum_reg, sse_reg_hi, res_cmp, sum_reg_lo, sum_reg_hi;
+ __m256i zero_reg;
+ int i, sum;
+ sum_reg = _mm256_set1_epi16(0);
+ sse_reg = _mm256_set1_epi16(0);
+ zero_reg = _mm256_set1_epi16(0);
+
+ if (x_offset == 0) {
+ // x_offset = 0 and y_offset = 0
+ if (y_offset == 0) {
+ for (i = 0; i < height ; i++) {
+ // load source and destination
+ src_reg = _mm256_loadu_si256((__m256i const *) (src));
+ dst_reg = _mm256_load_si256((__m256i const *) (dst));
+
+ // expend each byte to 2 bytes
+ exp_src_lo = _mm256_unpacklo_epi8(src_reg, zero_reg);
+ exp_src_hi = _mm256_unpackhi_epi8(src_reg, zero_reg);
+
+ exp_dst_lo = _mm256_unpacklo_epi8(dst_reg, zero_reg);
+ exp_dst_hi = _mm256_unpackhi_epi8(dst_reg, zero_reg);
+
+ // source - dest
+ exp_src_lo = _mm256_sub_epi16(exp_src_lo, exp_dst_lo);
+ exp_src_hi = _mm256_sub_epi16(exp_src_hi, exp_dst_hi);
+
+ // calculate sum
+ sum_reg = _mm256_add_epi16(sum_reg, exp_src_lo);
+ exp_src_lo = _mm256_madd_epi16(exp_src_lo, exp_src_lo);
+ sum_reg = _mm256_add_epi16(sum_reg, exp_src_hi);
+ exp_src_hi = _mm256_madd_epi16(exp_src_hi, exp_src_hi);
+
+ // calculate sse
+ sse_reg = _mm256_add_epi32(sse_reg, exp_src_lo);
+ sse_reg = _mm256_add_epi32(sse_reg, exp_src_hi);
+
+ src+= src_stride;
+ dst+= dst_stride;
+ }
+ // x_offset = 0 and y_offset = 8
+ } else if (y_offset == 8) {
+ __m256i src_next_reg;
+ for (i = 0; i < height ; i++) {
+ // load source + next source + destination
+ src_reg = _mm256_loadu_si256((__m256i const *) (src));
+ src_next_reg = _mm256_loadu_si256((__m256i const *)
+ (src + src_stride));
+ dst_reg = _mm256_load_si256((__m256i const *) (dst));
+ // average between current and next stride source
+ src_reg = _mm256_avg_epu8(src_reg, src_next_reg);
+
+ // expend each byte to 2 bytes
+ exp_src_lo = _mm256_unpacklo_epi8(src_reg, zero_reg);
+ exp_src_hi = _mm256_unpackhi_epi8(src_reg, zero_reg);
+
+ exp_dst_lo = _mm256_unpacklo_epi8(dst_reg, zero_reg);
+ exp_dst_hi = _mm256_unpackhi_epi8(dst_reg, zero_reg);
+
+ // source - dest
+ exp_src_lo = _mm256_sub_epi16(exp_src_lo, exp_dst_lo);
+ exp_src_hi = _mm256_sub_epi16(exp_src_hi, exp_dst_hi);
+
+ // calculate sum
+ sum_reg = _mm256_add_epi16(sum_reg, exp_src_lo);
+ exp_src_lo = _mm256_madd_epi16(exp_src_lo, exp_src_lo);
+ sum_reg = _mm256_add_epi16(sum_reg, exp_src_hi);
+ exp_src_hi = _mm256_madd_epi16(exp_src_hi, exp_src_hi);
+ sse_reg = _mm256_add_epi32(sse_reg, exp_src_lo);
+
+ // calculate sse
+ sse_reg = _mm256_add_epi32(sse_reg, exp_src_hi);
+
+ src+= src_stride;
+ dst+= dst_stride;
+ }
+ // x_offset = 0 and y_offset = bilin interpolation
+ } else {
+ __m256i filter, pw8, src_next_reg;
+#if (ARCH_X86_64)
+ int64_t y_offset64;
+ y_offset64 = y_offset;
+ y_offset64 <<= 5;
+ filter = _mm256_load_si256((__m256i const *)
+ (vp9_bilinear_filters_avx2 + y_offset64));
+#else
+ y_offset <<= 5;
+ filter = _mm256_load_si256((__m256i const *)
+ (vp9_bilinear_filters_avx2 + y_offset));
+#endif
+ pw8 = _mm256_set1_epi16(8);
+ for (i = 0; i < height ; i++) {
+ // load current and next source + destination
+ src_reg = _mm256_loadu_si256((__m256i const *) (src));
+ src_next_reg = _mm256_loadu_si256((__m256i const *)
+ (src + src_stride));
+ dst_reg = _mm256_load_si256((__m256i const *) (dst));
+
+ // merge current and next source
+ exp_src_lo = _mm256_unpacklo_epi8(src_reg, src_next_reg);
+ exp_src_hi = _mm256_unpackhi_epi8(src_reg, src_next_reg);
+
+ // filter the source
+ exp_src_lo = _mm256_maddubs_epi16(exp_src_lo, filter);
+ exp_src_hi = _mm256_maddubs_epi16(exp_src_hi, filter);
+
+ // add 8 to the source
+ exp_src_lo = _mm256_add_epi16(exp_src_lo, pw8);
+ exp_src_hi = _mm256_add_epi16(exp_src_hi, pw8);
+
+ // divide by 16
+ exp_src_lo = _mm256_srai_epi16(exp_src_lo, 4);
+ exp_src_hi = _mm256_srai_epi16(exp_src_hi, 4);
+
+ // expand each byte to 2 byte in the destination
+ exp_dst_lo = _mm256_unpacklo_epi8(dst_reg, zero_reg);
+ exp_dst_hi = _mm256_unpackhi_epi8(dst_reg, zero_reg);
+
+ // source - dest
+ exp_src_lo = _mm256_sub_epi16(exp_src_lo, exp_dst_lo);
+ exp_src_hi = _mm256_sub_epi16(exp_src_hi, exp_dst_hi);
+
+ // calculate sum
+ sum_reg = _mm256_add_epi16(sum_reg, exp_src_lo);
+ exp_src_lo = _mm256_madd_epi16(exp_src_lo, exp_src_lo);
+ sum_reg = _mm256_add_epi16(sum_reg, exp_src_hi);
+ exp_src_hi = _mm256_madd_epi16(exp_src_hi, exp_src_hi);
+
+ // calculate sse
+ sse_reg = _mm256_add_epi32(sse_reg, exp_src_lo);
+ sse_reg = _mm256_add_epi32(sse_reg, exp_src_hi);
+
+ src+= src_stride;
+ dst+= dst_stride;
+ }
+ }
+ // x_offset = 8 and y_offset = 0
+ } else if (x_offset == 8) {
+ if (y_offset == 0) {
+ __m256i src_next_reg;
+ for (i = 0; i < height ; i++) {
+ // load source and another source starting from the next
+ // following byte + destination
+ src_reg = _mm256_loadu_si256((__m256i const *) (src));
+ src_next_reg = _mm256_loadu_si256((__m256i const *) (src + 1));
+ dst_reg = _mm256_load_si256((__m256i const *) (dst));
+
+ // average between source and the next byte following source
+ src_reg = _mm256_avg_epu8(src_reg, src_next_reg);
+
+ // expand each byte to 2 bytes
+ exp_src_lo = _mm256_unpacklo_epi8(src_reg, zero_reg);
+ exp_src_hi = _mm256_unpackhi_epi8(src_reg, zero_reg);
+
+ exp_dst_lo = _mm256_unpacklo_epi8(dst_reg, zero_reg);
+ exp_dst_hi = _mm256_unpackhi_epi8(dst_reg, zero_reg);
+
+ // source - dest
+ exp_src_lo = _mm256_sub_epi16(exp_src_lo, exp_dst_lo);
+ exp_src_hi = _mm256_sub_epi16(exp_src_hi, exp_dst_hi);
+
+ // calculate sum
+ sum_reg = _mm256_add_epi16(sum_reg, exp_src_lo);
+ exp_src_lo = _mm256_madd_epi16(exp_src_lo, exp_src_lo);
+ sum_reg = _mm256_add_epi16(sum_reg, exp_src_hi);
+ exp_src_hi = _mm256_madd_epi16(exp_src_hi, exp_src_hi);
+
+ // calculate sse
+ sse_reg = _mm256_add_epi32(sse_reg, exp_src_lo);
+ sse_reg = _mm256_add_epi32(sse_reg, exp_src_hi);
+
+ src+= src_stride;
+ dst+= dst_stride;
+ }
+ // x_offset = 8 and y_offset = 8
+ } else if (y_offset == 8) {
+ __m256i src_next_reg, src_avg;
+ // load source and another source starting from the next
+ // following byte
+ src_reg = _mm256_loadu_si256((__m256i const *) (src));
+ src_next_reg = _mm256_loadu_si256((__m256i const *) (src + 1));
+
+ // average between source and the next byte following source
+ src_avg = _mm256_avg_epu8(src_reg, src_next_reg);
+ for (i = 0; i < height ; i++) {
+ src+= src_stride;
+ // load source and another source starting from the next
+ // following byte + destination
+ src_reg = _mm256_loadu_si256((__m256i const *) (src));
+ src_next_reg = _mm256_loadu_si256((__m256i const *) (src + 1));
+ dst_reg = _mm256_load_si256((__m256i const *) (dst));
+ // average between source and the next byte following source
+ src_reg = _mm256_avg_epu8(src_reg, src_next_reg);
+
+ // expand each byte to 2 bytes
+ exp_dst_lo = _mm256_unpacklo_epi8(dst_reg, zero_reg);
+ exp_dst_hi = _mm256_unpackhi_epi8(dst_reg, zero_reg);
+
+ // average between previous average to current average
+ src_avg = _mm256_avg_epu8(src_avg, src_reg);
+ // expand each byte to 2 bytes
+ exp_src_lo = _mm256_unpacklo_epi8(src_avg, zero_reg);
+ exp_src_hi = _mm256_unpackhi_epi8(src_avg, zero_reg);
+
+ // save current source average
+ src_avg = src_reg;
+ // source - dest
+ exp_src_lo = _mm256_sub_epi16(exp_src_lo, exp_dst_lo);
+ exp_src_hi = _mm256_sub_epi16(exp_src_hi, exp_dst_hi);
+
+ // calculate sum
+ sum_reg = _mm256_add_epi16(sum_reg, exp_src_lo);
+ exp_src_lo = _mm256_madd_epi16(exp_src_lo, exp_src_lo);
+ sum_reg = _mm256_add_epi16(sum_reg, exp_src_hi);
+ exp_src_hi = _mm256_madd_epi16(exp_src_hi, exp_src_hi);
+
+ // calculate sse
+ sse_reg = _mm256_add_epi32(sse_reg, exp_src_lo);
+ sse_reg = _mm256_add_epi32(sse_reg, exp_src_hi);
+
+ dst+= dst_stride;
+ }
+ // x_offset = 8 and y_offset = bilin interpolation
+ } else {
+ __m256i filter, pw8, src_next_reg, src_avg;
+#if (ARCH_X86_64)
+ int64_t y_offset64;
+ y_offset64 = y_offset;
+ y_offset64 <<= 5;
+ filter = _mm256_load_si256((__m256i const *)
+ (vp9_bilinear_filters_avx2+y_offset64));
+#else
+ y_offset <<= 5;
+ filter = _mm256_load_si256((__m256i const *)
+ (vp9_bilinear_filters_avx2 + y_offset));
+#endif
+ pw8 = _mm256_set1_epi16(8);
+ // load source and another source starting from the next
+ // following byte
+ src_reg = _mm256_loadu_si256((__m256i const *) (src));
+ src_next_reg = _mm256_loadu_si256((__m256i const *) (src + 1));
+ // average between source and the next byte following source
+ src_avg = _mm256_avg_epu8(src_reg, src_next_reg);
+ for (i = 0; i < height ; i++) {
+ src+= src_stride;
+ // load source and another source starting from the next
+ // following byte + destination
+ src_reg = _mm256_loadu_si256((__m256i const *) (src));
+ src_next_reg = _mm256_loadu_si256((__m256i const *) (src + 1));
+ dst_reg = _mm256_load_si256((__m256i const *) (dst));
+ // average between source and the next byte following source
+ src_reg = _mm256_avg_epu8(src_reg, src_next_reg);
+
+ // merge previous average and current average
+ exp_src_lo = _mm256_unpacklo_epi8(src_avg, src_reg);
+ exp_src_hi = _mm256_unpackhi_epi8(src_avg, src_reg);
+
+ // filter the source
+ exp_src_lo = _mm256_maddubs_epi16(exp_src_lo, filter);
+ exp_src_hi = _mm256_maddubs_epi16(exp_src_hi, filter);
+
+ // add 8 to the source
+ exp_src_lo = _mm256_add_epi16(exp_src_lo, pw8);
+ exp_src_hi = _mm256_add_epi16(exp_src_hi, pw8);
+
+ // divide the source by 16
+ exp_src_lo = _mm256_srai_epi16(exp_src_lo, 4);
+ exp_src_hi = _mm256_srai_epi16(exp_src_hi, 4);
+
+ // expand each byte to 2 bytes
+ exp_dst_lo = _mm256_unpacklo_epi8(dst_reg, zero_reg);
+ exp_dst_hi = _mm256_unpackhi_epi8(dst_reg, zero_reg);
+
+ // save current source average
+ src_avg = src_reg;
+ // source - dest
+ exp_src_lo = _mm256_sub_epi16(exp_src_lo, exp_dst_lo);
+ exp_src_hi = _mm256_sub_epi16(exp_src_hi, exp_dst_hi);
+
+ // calculate sum
+ sum_reg = _mm256_add_epi16(sum_reg, exp_src_lo);
+ exp_src_lo = _mm256_madd_epi16(exp_src_lo, exp_src_lo);
+ sum_reg = _mm256_add_epi16(sum_reg, exp_src_hi);
+ exp_src_hi = _mm256_madd_epi16(exp_src_hi, exp_src_hi);
+
+ // calculate sse
+ sse_reg = _mm256_add_epi32(sse_reg, exp_src_lo);
+ sse_reg = _mm256_add_epi32(sse_reg, exp_src_hi);
+
+ dst+= dst_stride;
+ }
+ }
+ // x_offset = bilin interpolation and y_offset = 0
+ } else {
+ if (y_offset == 0) {
+ __m256i filter, pw8, src_next_reg;
+#if (ARCH_X86_64)
+ int64_t x_offset64;
+ x_offset64 = x_offset;
+ x_offset64 <<= 5;
+ filter = _mm256_load_si256((__m256i const *)
+ (vp9_bilinear_filters_avx2+x_offset64));
+#else
+ x_offset <<= 5;
+ filter = _mm256_load_si256((__m256i const *)
+ (vp9_bilinear_filters_avx2 + x_offset));
+#endif
+ pw8 = _mm256_set1_epi16(8);
+ for (i = 0; i < height ; i++) {
+ // load source and another source starting from the next
+ // following byte + destination
+ src_reg = _mm256_loadu_si256((__m256i const *) (src));
+ src_next_reg = _mm256_loadu_si256((__m256i const *) (src + 1));
+ dst_reg = _mm256_load_si256((__m256i const *) (dst));
+
+ // merge current and next source
+ exp_src_lo = _mm256_unpacklo_epi8(src_reg, src_next_reg);
+ exp_src_hi = _mm256_unpackhi_epi8(src_reg, src_next_reg);
+
+ // filter the source
+ exp_src_lo = _mm256_maddubs_epi16(exp_src_lo, filter);
+ exp_src_hi = _mm256_maddubs_epi16(exp_src_hi, filter);
+
+ // add 8 to source
+ exp_src_lo = _mm256_add_epi16(exp_src_lo, pw8);
+ exp_src_hi = _mm256_add_epi16(exp_src_hi, pw8);
+
+ // divide the source by 16
+ exp_src_lo = _mm256_srai_epi16(exp_src_lo, 4);
+ exp_src_hi = _mm256_srai_epi16(exp_src_hi, 4);
+
+ // expand each byte to 2 bytes
+ exp_dst_lo = _mm256_unpacklo_epi8(dst_reg, zero_reg);
+ exp_dst_hi = _mm256_unpackhi_epi8(dst_reg, zero_reg);
+
+ // source - dest
+ exp_src_lo = _mm256_sub_epi16(exp_src_lo, exp_dst_lo);
+ exp_src_hi = _mm256_sub_epi16(exp_src_hi, exp_dst_hi);
+
+ // calculate sum
+ sum_reg = _mm256_add_epi16(sum_reg, exp_src_lo);
+ exp_src_lo = _mm256_madd_epi16(exp_src_lo, exp_src_lo);
+ sum_reg = _mm256_add_epi16(sum_reg, exp_src_hi);
+ exp_src_hi = _mm256_madd_epi16(exp_src_hi, exp_src_hi);
+
+ // calculate sse
+ sse_reg = _mm256_add_epi32(sse_reg, exp_src_lo);
+ sse_reg = _mm256_add_epi32(sse_reg, exp_src_hi);
+
+ src+= src_stride;
+ dst+= dst_stride;
+ }
+ // x_offset = bilin interpolation and y_offset = 8
+ } else if (y_offset == 8) {
+ __m256i filter, pw8, src_next_reg, src_pack;
+#if (ARCH_X86_64)
+ int64_t x_offset64;
+ x_offset64 = x_offset;
+ x_offset64 <<= 5;
+ filter = _mm256_load_si256((__m256i const *)
+ (vp9_bilinear_filters_avx2+x_offset64));
+#else
+ x_offset <<= 5;
+ filter = _mm256_load_si256((__m256i const *)
+ (vp9_bilinear_filters_avx2 + x_offset));
+#endif
+ pw8 = _mm256_set1_epi16(8);
+ // load source and another source starting from the next
+ // following byte
+ src_reg = _mm256_loadu_si256((__m256i const *) (src));
+ src_next_reg = _mm256_loadu_si256((__m256i const *) (src + 1));
+
+ // merge current and next stride source
+ exp_src_lo = _mm256_unpacklo_epi8(src_reg, src_next_reg);
+ exp_src_hi = _mm256_unpackhi_epi8(src_reg, src_next_reg);
+
+ // filter the source
+ exp_src_lo = _mm256_maddubs_epi16(exp_src_lo, filter);
+ exp_src_hi = _mm256_maddubs_epi16(exp_src_hi, filter);
+
+ // add 8 to source
+ exp_src_lo = _mm256_add_epi16(exp_src_lo, pw8);
+ exp_src_hi = _mm256_add_epi16(exp_src_hi, pw8);
+
+ // divide source by 16
+ exp_src_lo = _mm256_srai_epi16(exp_src_lo, 4);
+ exp_src_hi = _mm256_srai_epi16(exp_src_hi, 4);
+
+ // convert each 16 bit to 8 bit to each low and high lane source
+ src_pack = _mm256_packus_epi16(exp_src_lo, exp_src_hi);
+ for (i = 0; i < height ; i++) {
+ src+= src_stride;
+
+ // load source and another source starting from the next
+ // following byte + destination
+ src_reg = _mm256_loadu_si256((__m256i const *) (src));
+ src_next_reg = _mm256_loadu_si256((__m256i const *) (src + 1));
+ dst_reg = _mm256_load_si256((__m256i const *) (dst));
+
+ // merge current and next stride source
+ exp_src_lo = _mm256_unpacklo_epi8(src_reg, src_next_reg);
+ exp_src_hi = _mm256_unpackhi_epi8(src_reg, src_next_reg);
+
+ // filter the source
+ exp_src_lo = _mm256_maddubs_epi16(exp_src_lo, filter);
+ exp_src_hi = _mm256_maddubs_epi16(exp_src_hi, filter);
+
+ // add 8 to source
+ exp_src_lo = _mm256_add_epi16(exp_src_lo, pw8);
+ exp_src_hi = _mm256_add_epi16(exp_src_hi, pw8);
+
+ // divide source by 16
+ exp_src_lo = _mm256_srai_epi16(exp_src_lo, 4);
+ exp_src_hi = _mm256_srai_epi16(exp_src_hi, 4);
+
+ // convert each 16 bit to 8 bit to each low and high lane source
+ src_reg = _mm256_packus_epi16(exp_src_lo, exp_src_hi);
+ // average between previous pack to the current
+ src_pack = _mm256_avg_epu8(src_pack, src_reg);
+
+ // expand each byte to 2 bytes
+ exp_dst_lo = _mm256_unpacklo_epi8(dst_reg, zero_reg);
+ exp_dst_hi = _mm256_unpackhi_epi8(dst_reg, zero_reg);
+
+ exp_src_lo = _mm256_unpacklo_epi8(src_pack, zero_reg);
+ exp_src_hi = _mm256_unpackhi_epi8(src_pack, zero_reg);
+
+ // source - dest
+ exp_src_lo = _mm256_sub_epi16(exp_src_lo, exp_dst_lo);
+ exp_src_hi = _mm256_sub_epi16(exp_src_hi, exp_dst_hi);
+
+ // calculate sum
+ sum_reg = _mm256_add_epi16(sum_reg, exp_src_lo);
+ exp_src_lo = _mm256_madd_epi16(exp_src_lo, exp_src_lo);
+ sum_reg = _mm256_add_epi16(sum_reg, exp_src_hi);
+ exp_src_hi = _mm256_madd_epi16(exp_src_hi, exp_src_hi);
+
+ // calculate sse
+ sse_reg = _mm256_add_epi32(sse_reg, exp_src_lo);
+ sse_reg = _mm256_add_epi32(sse_reg, exp_src_hi);
+
+ // save previous pack
+ src_pack = src_reg;
+ dst+= dst_stride;
+ }
+ // x_offset = bilin interpolation and y_offset = bilin interpolation
+ } else {
+ __m256i xfilter, yfilter, pw8, src_next_reg, src_pack;
+#if (ARCH_X86_64)
+ int64_t x_offset64, y_offset64;
+ x_offset64 = x_offset;
+ x_offset64 <<= 5;
+ y_offset64 = y_offset;
+ y_offset64 <<= 5;
+ xfilter = _mm256_load_si256((__m256i const *)
+ (vp9_bilinear_filters_avx2+x_offset64));
+ yfilter = _mm256_load_si256((__m256i const *)
+ (vp9_bilinear_filters_avx2+y_offset64));
+#else
+ x_offset <<= 5;
+ xfilter = _mm256_load_si256((__m256i const *)
+ (vp9_bilinear_filters_avx2 + x_offset));
+ y_offset <<= 5;
+ yfilter = _mm256_load_si256((__m256i const *)
+ (vp9_bilinear_filters_avx2 + y_offset));
+#endif
+ pw8 = _mm256_set1_epi16(8);
+ // load source and another source starting from the next
+ // following byte
+ src_reg = _mm256_loadu_si256((__m256i const *) (src));
+ src_next_reg = _mm256_loadu_si256((__m256i const *) (src + 1));
+ // merge current and next stride source
+ exp_src_lo = _mm256_unpacklo_epi8(src_reg, src_next_reg);
+ exp_src_hi = _mm256_unpackhi_epi8(src_reg, src_next_reg);
+
+ // filter the source
+ exp_src_lo = _mm256_maddubs_epi16(exp_src_lo, xfilter);
+ exp_src_hi = _mm256_maddubs_epi16(exp_src_hi, xfilter);
+
+ // add 8 to the source
+ exp_src_lo = _mm256_add_epi16(exp_src_lo, pw8);
+ exp_src_hi = _mm256_add_epi16(exp_src_hi, pw8);
+
+ // divide the source by 16
+ exp_src_lo = _mm256_srai_epi16(exp_src_lo, 4);
+ exp_src_hi = _mm256_srai_epi16(exp_src_hi, 4);
+
+ // convert each 16 bit to 8 bit to each low and high lane source
+ src_pack = _mm256_packus_epi16(exp_src_lo, exp_src_hi);
+ for (i = 0; i < height ; i++) {
+ src+= src_stride;
+ // load source and another source starting from the next
+ // following byte + destination
+ src_reg = _mm256_loadu_si256((__m256i const *) (src));
+ src_next_reg = _mm256_loadu_si256((__m256i const *) (src + 1));
+ dst_reg = _mm256_load_si256((__m256i const *) (dst));
+
+ // merge current and next stride source
+ exp_src_lo = _mm256_unpacklo_epi8(src_reg, src_next_reg);
+ exp_src_hi = _mm256_unpackhi_epi8(src_reg, src_next_reg);
+
+ // filter the source
+ exp_src_lo = _mm256_maddubs_epi16(exp_src_lo, xfilter);
+ exp_src_hi = _mm256_maddubs_epi16(exp_src_hi, xfilter);
+
+ // add 8 to source
+ exp_src_lo = _mm256_add_epi16(exp_src_lo, pw8);
+ exp_src_hi = _mm256_add_epi16(exp_src_hi, pw8);
+
+ // divide source by 16
+ exp_src_lo = _mm256_srai_epi16(exp_src_lo, 4);
+ exp_src_hi = _mm256_srai_epi16(exp_src_hi, 4);
+
+ // convert each 16 bit to 8 bit to each low and high lane source
+ src_reg = _mm256_packus_epi16(exp_src_lo, exp_src_hi);
+
+ // merge previous pack to current pack source
+ exp_src_lo = _mm256_unpacklo_epi8(src_pack, src_reg);
+ exp_src_hi = _mm256_unpackhi_epi8(src_pack, src_reg);
+
+ // filter the source
+ exp_src_lo = _mm256_maddubs_epi16(exp_src_lo, yfilter);
+ exp_src_hi = _mm256_maddubs_epi16(exp_src_hi, yfilter);
+
+ // expand each byte to 2 bytes
+ exp_dst_lo = _mm256_unpacklo_epi8(dst_reg, zero_reg);
+ exp_dst_hi = _mm256_unpackhi_epi8(dst_reg, zero_reg);
+
+ // add 8 to source
+ exp_src_lo = _mm256_add_epi16(exp_src_lo, pw8);
+ exp_src_hi = _mm256_add_epi16(exp_src_hi, pw8);
+
+ // divide source by 16
+ exp_src_lo = _mm256_srai_epi16(exp_src_lo, 4);
+ exp_src_hi = _mm256_srai_epi16(exp_src_hi, 4);
+
+ // source - dest
+ exp_src_lo = _mm256_sub_epi16(exp_src_lo, exp_dst_lo);
+ exp_src_hi = _mm256_sub_epi16(exp_src_hi, exp_dst_hi);
+
+ // caculate sum
+ sum_reg = _mm256_add_epi16(sum_reg, exp_src_lo);
+ exp_src_lo = _mm256_madd_epi16(exp_src_lo, exp_src_lo);
+ sum_reg = _mm256_add_epi16(sum_reg, exp_src_hi);
+ exp_src_hi = _mm256_madd_epi16(exp_src_hi, exp_src_hi);
+
+ // calculate sse
+ sse_reg = _mm256_add_epi32(sse_reg, exp_src_lo);
+ sse_reg = _mm256_add_epi32(sse_reg, exp_src_hi);
+
+ src_pack = src_reg;
+ dst+= dst_stride;
+ }
+ }
+ }
+ // sum < 0
+ res_cmp = _mm256_cmpgt_epi16(zero_reg, sum_reg);
+ // save the next 8 bytes of each lane of sse
+ sse_reg_hi = _mm256_srli_si256(sse_reg, 8);
+ // merge the result of sum < 0 with sum to add sign to the next 16 bits
+ sum_reg_lo = _mm256_unpacklo_epi16(sum_reg, res_cmp);
+ sum_reg_hi = _mm256_unpackhi_epi16(sum_reg, res_cmp);
+ // add each 8 bytes from every lane of sse and sum
+ sse_reg = _mm256_add_epi32(sse_reg, sse_reg_hi);
+ sum_reg = _mm256_add_epi32(sum_reg_lo, sum_reg_hi);
+
+ // save the next 4 bytes of each lane sse
+ sse_reg_hi = _mm256_srli_si256(sse_reg, 4);
+ // save the next 8 bytes of each lane of sum
+ sum_reg_hi = _mm256_srli_si256(sum_reg, 8);
+
+ // add the first 4 bytes to the next 4 bytes sse
+ sse_reg = _mm256_add_epi32(sse_reg, sse_reg_hi);
+ // add the first 8 bytes to the next 8 bytes
+ sum_reg = _mm256_add_epi32(sum_reg, sum_reg_hi);
+ // extract the low lane and the high lane and add the results
+ *((int*)sse)= _mm_cvtsi128_si32(_mm256_castsi256_si128(sse_reg)) +
+ _mm_cvtsi128_si32(_mm256_extractf128_si256(sse_reg, 1));
+ sum_reg_hi = _mm256_srli_si256(sum_reg, 4);
+ sum_reg = _mm256_add_epi32(sum_reg, sum_reg_hi);
+ sum = _mm_cvtsi128_si32(_mm256_castsi256_si128(sum_reg)) +
+ _mm_cvtsi128_si32(_mm256_extractf128_si256(sum_reg, 1));
+ return sum;
+}
diff --git a/vp9/encoder/x86/vp9_variance_avx2.c b/vp9/encoder/x86/vp9_variance_avx2.c
index c9b90d52d..02007a3bd 100644
--- a/vp9/encoder/x86/vp9_variance_avx2.c
+++ b/vp9/encoder/x86/vp9_variance_avx2.c
@@ -42,6 +42,18 @@ void vp9_get32x32var_avx2
int *Sum
);
+unsigned int vp9_sub_pixel_variance32xh_avx2
+(
+ const uint8_t *src,
+ int src_stride,
+ int x_offset,
+ int y_offset,
+ const uint8_t *dst,
+ int dst_stride,
+ int height,
+ unsigned int *sse
+);
+
static void variance_avx2(const unsigned char *src_ptr, int source_stride,
const unsigned char *ref_ptr, int recon_stride,
int w, int h, unsigned int *sse, int *sum,
@@ -155,3 +167,43 @@ unsigned int vp9_variance64x32_avx2(const uint8_t *src_ptr,
*sse = var;
return (var - (((int64_t)avg * avg) >> 11));
}
+
+unsigned int vp9_sub_pixel_variance64x64_avx2(const uint8_t *src,
+ int src_stride,
+ int x_offset,
+ int y_offset,
+ const uint8_t *dst,
+ int dst_stride,
+ unsigned int *sse_ptr) {
+ // processing 32 elements in parallel
+ unsigned int sse;
+ int se = vp9_sub_pixel_variance32xh_avx2(src, src_stride, x_offset,
+ y_offset, dst, dst_stride,
+ 64, &sse);
+ // processing the next 32 elements in parallel
+ unsigned int sse2;
+ int se2 = vp9_sub_pixel_variance32xh_avx2(src + 32, src_stride,
+ x_offset, y_offset,
+ dst + 32, dst_stride,
+ 64, &sse2);
+ se += se2;
+ sse += sse2;
+ *sse_ptr = sse;
+ return sse - (((int64_t)se * se) >> 12);
+}
+
+unsigned int vp9_sub_pixel_variance32x32_avx2(const uint8_t *src,
+ int src_stride,
+ int x_offset,
+ int y_offset,
+ const uint8_t *dst,
+ int dst_stride,
+ unsigned int *sse_ptr) {
+ // processing 32 element in parallel
+ unsigned int sse;
+ int se = vp9_sub_pixel_variance32xh_avx2(src, src_stride, x_offset,
+ y_offset, dst, dst_stride,
+ 32, &sse);
+ *sse_ptr = sse;
+ return sse - (((int64_t)se * se) >> 10);
+}
diff --git a/vp9/vp9cx.mk b/vp9/vp9cx.mk
index c0d973b4f..27dd6f625 100644
--- a/vp9/vp9cx.mk
+++ b/vp9/vp9cx.mk
@@ -86,6 +86,7 @@ VP9_CX_SRCS-$(HAVE_SSE2) += encoder/x86/vp9_variance_impl_sse2.asm
VP9_CX_SRCS-$(HAVE_AVX2) += encoder/x86/vp9_variance_impl_intrin_avx2.c
VP9_CX_SRCS-$(HAVE_SSE2) += encoder/x86/vp9_sad4d_sse2.asm
VP9_CX_SRCS-$(HAVE_SSE2) += encoder/x86/vp9_subpel_variance_impl_sse2.asm
+VP9_CX_SRCS-$(HAVE_AVX2) += encoder/x86/vp9_subpel_variance_impl_intrin_avx2.c
VP9_CX_SRCS-$(HAVE_SSE2) += encoder/x86/vp9_temporal_filter_apply_sse2.asm
VP9_CX_SRCS-$(HAVE_SSE3) += encoder/x86/vp9_sad_sse3.asm