summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorYunqing Wang <yunqingwang@google.com>2014-03-03 09:01:36 -0800
committerGerrit Code Review <gerrit@gerrit.golo.chromium.org>2014-03-03 09:01:36 -0800
commitd4648d93f430343a99e032183de564a83f0fdf0e (patch)
treee9761162c59a71487c606669b974e2e42fc373f5
parenta9288e297fd8a853e3a159e68627376c5811b1e0 (diff)
parentea1490968762b96f41548d792e84c7a195bb881f (diff)
downloadlibvpx-d4648d93f430343a99e032183de564a83f0fdf0e.tar
libvpx-d4648d93f430343a99e032183de564a83f0fdf0e.tar.gz
libvpx-d4648d93f430343a99e032183de564a83f0fdf0e.tar.bz2
libvpx-d4648d93f430343a99e032183de564a83f0fdf0e.zip
Merge "AVX2 SubPixel AVG Variance Optimization"
-rw-r--r--vp9/common/vp9_rtcd_defs.sh4
-rw-r--r--vp9/encoder/x86/vp9_subpel_variance_impl_intrin_avx2.c1004
-rw-r--r--vp9/encoder/x86/vp9_variance_avx2.c59
3 files changed, 512 insertions, 555 deletions
diff --git a/vp9/common/vp9_rtcd_defs.sh b/vp9/common/vp9_rtcd_defs.sh
index 83ee69b7e..5b449705a 100644
--- a/vp9/common/vp9_rtcd_defs.sh
+++ b/vp9/common/vp9_rtcd_defs.sh
@@ -389,7 +389,7 @@ prototype unsigned int vp9_sub_pixel_variance64x64 "const uint8_t *src_ptr, int
specialize vp9_sub_pixel_variance64x64 $sse2_x86inc $ssse3_x86inc avx2
prototype unsigned int vp9_sub_pixel_avg_variance64x64 "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse, const uint8_t *second_pred"
-specialize vp9_sub_pixel_avg_variance64x64 $sse2_x86inc $ssse3_x86inc
+specialize vp9_sub_pixel_avg_variance64x64 $sse2_x86inc $ssse3_x86inc avx2
prototype unsigned int vp9_sub_pixel_variance32x64 "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse"
specialize vp9_sub_pixel_variance32x64 $sse2_x86inc $ssse3_x86inc
@@ -419,7 +419,7 @@ prototype unsigned int vp9_sub_pixel_variance32x32 "const uint8_t *src_ptr, int
specialize vp9_sub_pixel_variance32x32 $sse2_x86inc $ssse3_x86inc avx2
prototype unsigned int vp9_sub_pixel_avg_variance32x32 "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse, const uint8_t *second_pred"
-specialize vp9_sub_pixel_avg_variance32x32 $sse2_x86inc $ssse3_x86inc
+specialize vp9_sub_pixel_avg_variance32x32 $sse2_x86inc $ssse3_x86inc avx2
prototype unsigned int vp9_sub_pixel_variance16x16 "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse"
specialize vp9_sub_pixel_variance16x16 $sse2_x86inc $ssse3_x86inc
diff --git a/vp9/encoder/x86/vp9_subpel_variance_impl_intrin_avx2.c b/vp9/encoder/x86/vp9_subpel_variance_impl_intrin_avx2.c
index b8bfa8900..34ed1867f 100644
--- a/vp9/encoder/x86/vp9_subpel_variance_impl_intrin_avx2.c
+++ b/vp9/encoder/x86/vp9_subpel_variance_impl_intrin_avx2.c
@@ -47,6 +47,77 @@ DECLARE_ALIGNED(32, static const uint8_t, bilinear_filters_avx2[512]) = {
1, 15, 1, 15, 1, 15, 1, 15, 1, 15, 1, 15, 1, 15, 1, 15
};
+#define FILTER_SRC(filter) \
+ /* filter the source */ \
+ exp_src_lo = _mm256_maddubs_epi16(exp_src_lo, filter); \
+ exp_src_hi = _mm256_maddubs_epi16(exp_src_hi, filter); \
+ \
+ /* add 8 to source */ \
+ exp_src_lo = _mm256_add_epi16(exp_src_lo, pw8); \
+ exp_src_hi = _mm256_add_epi16(exp_src_hi, pw8); \
+ \
+ /* divide source by 16 */ \
+ exp_src_lo = _mm256_srai_epi16(exp_src_lo, 4); \
+ exp_src_hi = _mm256_srai_epi16(exp_src_hi, 4);
+
+#define MERGE_WITH_SRC(src_reg, reg) \
+ exp_src_lo = _mm256_unpacklo_epi8(src_reg, reg); \
+ exp_src_hi = _mm256_unpackhi_epi8(src_reg, reg);
+
+#define LOAD_SRC_DST \
+ /* load source and destination */ \
+ src_reg = _mm256_loadu_si256((__m256i const *) (src)); \
+ dst_reg = _mm256_load_si256((__m256i const *) (dst));
+
+#define AVG_NEXT_SRC(src_reg, size_stride) \
+ src_next_reg = _mm256_loadu_si256((__m256i const *) \
+ (src + size_stride)); \
+ /* average between current and next stride source */ \
+ src_reg = _mm256_avg_epu8(src_reg, src_next_reg);
+
+#define MERGE_NEXT_SRC(src_reg, size_stride) \
+ src_next_reg = _mm256_loadu_si256((__m256i const *) \
+ (src + size_stride)); \
+ MERGE_WITH_SRC(src_reg, src_next_reg)
+
+#define CALC_SUM_SSE_INSIDE_LOOP \
+ /* expand each byte to 2 bytes */ \
+ exp_dst_lo = _mm256_unpacklo_epi8(dst_reg, zero_reg); \
+ exp_dst_hi = _mm256_unpackhi_epi8(dst_reg, zero_reg); \
+ /* source - dest */ \
+ exp_src_lo = _mm256_sub_epi16(exp_src_lo, exp_dst_lo); \
+ exp_src_hi = _mm256_sub_epi16(exp_src_hi, exp_dst_hi); \
+ /* caculate sum */ \
+ sum_reg = _mm256_add_epi16(sum_reg, exp_src_lo); \
+ exp_src_lo = _mm256_madd_epi16(exp_src_lo, exp_src_lo); \
+ sum_reg = _mm256_add_epi16(sum_reg, exp_src_hi); \
+ exp_src_hi = _mm256_madd_epi16(exp_src_hi, exp_src_hi); \
+ /* calculate sse */ \
+ sse_reg = _mm256_add_epi32(sse_reg, exp_src_lo); \
+ sse_reg = _mm256_add_epi32(sse_reg, exp_src_hi);
+
+// final calculation to sum and sse
+#define CALC_SUM_AND_SSE \
+ res_cmp = _mm256_cmpgt_epi16(zero_reg, sum_reg); \
+ sse_reg_hi = _mm256_srli_si256(sse_reg, 8); \
+ sum_reg_lo = _mm256_unpacklo_epi16(sum_reg, res_cmp); \
+ sum_reg_hi = _mm256_unpackhi_epi16(sum_reg, res_cmp); \
+ sse_reg = _mm256_add_epi32(sse_reg, sse_reg_hi); \
+ sum_reg = _mm256_add_epi32(sum_reg_lo, sum_reg_hi); \
+ \
+ sse_reg_hi = _mm256_srli_si256(sse_reg, 4); \
+ sum_reg_hi = _mm256_srli_si256(sum_reg, 8); \
+ \
+ sse_reg = _mm256_add_epi32(sse_reg, sse_reg_hi); \
+ sum_reg = _mm256_add_epi32(sum_reg, sum_reg_hi); \
+ *((int*)sse)= _mm_cvtsi128_si32(_mm256_castsi256_si128(sse_reg)) + \
+ _mm_cvtsi128_si32(_mm256_extractf128_si256(sse_reg, 1)); \
+ sum_reg_hi = _mm256_srli_si256(sum_reg, 4); \
+ sum_reg = _mm256_add_epi32(sum_reg, sum_reg_hi); \
+ sum = _mm_cvtsi128_si32(_mm256_castsi256_si128(sum_reg)) + \
+ _mm_cvtsi128_si32(_mm256_extractf128_si256(sum_reg, 1));
+
+
unsigned int vp9_sub_pixel_variance32xh_avx2(const uint8_t *src,
int src_stride,
int x_offset,
@@ -55,587 +126,414 @@ unsigned int vp9_sub_pixel_variance32xh_avx2(const uint8_t *src,
int dst_stride,
int height,
unsigned int *sse) {
- __m256i src_reg, dst_reg, exp_src_lo, exp_src_hi, exp_dst_lo, exp_dst_hi;
- __m256i sse_reg, sum_reg, sse_reg_hi, res_cmp, sum_reg_lo, sum_reg_hi;
+ __m256i src_reg, dst_reg, exp_src_lo, exp_src_hi, exp_dst_lo, exp_dst_hi;
+ __m256i sse_reg, sum_reg, sse_reg_hi, res_cmp, sum_reg_lo, sum_reg_hi;
__m256i zero_reg;
int i, sum;
sum_reg = _mm256_set1_epi16(0);
sse_reg = _mm256_set1_epi16(0);
zero_reg = _mm256_set1_epi16(0);
+ // x_offset = 0 and y_offset = 0
if (x_offset == 0) {
- // x_offset = 0 and y_offset = 0
if (y_offset == 0) {
for (i = 0; i < height ; i++) {
- // load source and destination
- src_reg = _mm256_loadu_si256((__m256i const *) (src));
- dst_reg = _mm256_load_si256((__m256i const *) (dst));
-
+ LOAD_SRC_DST
// expend each byte to 2 bytes
- exp_src_lo = _mm256_unpacklo_epi8(src_reg, zero_reg);
- exp_src_hi = _mm256_unpackhi_epi8(src_reg, zero_reg);
-
- exp_dst_lo = _mm256_unpacklo_epi8(dst_reg, zero_reg);
- exp_dst_hi = _mm256_unpackhi_epi8(dst_reg, zero_reg);
-
- // source - dest
- exp_src_lo = _mm256_sub_epi16(exp_src_lo, exp_dst_lo);
- exp_src_hi = _mm256_sub_epi16(exp_src_hi, exp_dst_hi);
-
- // calculate sum
- sum_reg = _mm256_add_epi16(sum_reg, exp_src_lo);
- exp_src_lo = _mm256_madd_epi16(exp_src_lo, exp_src_lo);
- sum_reg = _mm256_add_epi16(sum_reg, exp_src_hi);
- exp_src_hi = _mm256_madd_epi16(exp_src_hi, exp_src_hi);
-
- // calculate sse
- sse_reg = _mm256_add_epi32(sse_reg, exp_src_lo);
- sse_reg = _mm256_add_epi32(sse_reg, exp_src_hi);
-
+ MERGE_WITH_SRC(src_reg, zero_reg)
+ CALC_SUM_SSE_INSIDE_LOOP
src+= src_stride;
dst+= dst_stride;
}
// x_offset = 0 and y_offset = 8
} else if (y_offset == 8) {
- __m256i src_next_reg;
- for (i = 0; i < height ; i++) {
- // load source + next source + destination
- src_reg = _mm256_loadu_si256((__m256i const *) (src));
- src_next_reg = _mm256_loadu_si256((__m256i const *)
- (src + src_stride));
- dst_reg = _mm256_load_si256((__m256i const *) (dst));
- // average between current and next stride source
- src_reg = _mm256_avg_epu8(src_reg, src_next_reg);
-
- // expend each byte to 2 bytes
- exp_src_lo = _mm256_unpacklo_epi8(src_reg, zero_reg);
- exp_src_hi = _mm256_unpackhi_epi8(src_reg, zero_reg);
-
- exp_dst_lo = _mm256_unpacklo_epi8(dst_reg, zero_reg);
- exp_dst_hi = _mm256_unpackhi_epi8(dst_reg, zero_reg);
-
- // source - dest
- exp_src_lo = _mm256_sub_epi16(exp_src_lo, exp_dst_lo);
- exp_src_hi = _mm256_sub_epi16(exp_src_hi, exp_dst_hi);
-
- // calculate sum
- sum_reg = _mm256_add_epi16(sum_reg, exp_src_lo);
- exp_src_lo = _mm256_madd_epi16(exp_src_lo, exp_src_lo);
- sum_reg = _mm256_add_epi16(sum_reg, exp_src_hi);
- exp_src_hi = _mm256_madd_epi16(exp_src_hi, exp_src_hi);
- sse_reg = _mm256_add_epi32(sse_reg, exp_src_lo);
-
- // calculate sse
- sse_reg = _mm256_add_epi32(sse_reg, exp_src_hi);
-
- src+= src_stride;
- dst+= dst_stride;
- }
+ __m256i src_next_reg;
+ for (i = 0; i < height ; i++) {
+ LOAD_SRC_DST
+ AVG_NEXT_SRC(src_reg, src_stride)
+ // expend each byte to 2 bytes
+ MERGE_WITH_SRC(src_reg, zero_reg)
+ CALC_SUM_SSE_INSIDE_LOOP
+ src+= src_stride;
+ dst+= dst_stride;
+ }
// x_offset = 0 and y_offset = bilin interpolation
} else {
- __m256i filter, pw8, src_next_reg;
-#if (ARCH_X86_64)
- int64_t y_offset64;
- y_offset64 = y_offset;
- y_offset64 <<= 5;
- filter = _mm256_load_si256(
- (__m256i const *)(bilinear_filters_avx2 + y_offset64));
-#else
- y_offset <<= 5;
- filter = _mm256_load_si256(
- (__m256i const *)(bilinear_filters_avx2 + y_offset));
-#endif
- pw8 = _mm256_set1_epi16(8);
- for (i = 0; i < height ; i++) {
- // load current and next source + destination
- src_reg = _mm256_loadu_si256((__m256i const *) (src));
- src_next_reg = _mm256_loadu_si256((__m256i const *)
- (src + src_stride));
- dst_reg = _mm256_load_si256((__m256i const *) (dst));
-
- // merge current and next source
- exp_src_lo = _mm256_unpacklo_epi8(src_reg, src_next_reg);
- exp_src_hi = _mm256_unpackhi_epi8(src_reg, src_next_reg);
-
- // filter the source
- exp_src_lo = _mm256_maddubs_epi16(exp_src_lo, filter);
- exp_src_hi = _mm256_maddubs_epi16(exp_src_hi, filter);
-
- // add 8 to the source
- exp_src_lo = _mm256_add_epi16(exp_src_lo, pw8);
- exp_src_hi = _mm256_add_epi16(exp_src_hi, pw8);
-
- // divide by 16
- exp_src_lo = _mm256_srai_epi16(exp_src_lo, 4);
- exp_src_hi = _mm256_srai_epi16(exp_src_hi, 4);
-
- // expand each byte to 2 byte in the destination
- exp_dst_lo = _mm256_unpacklo_epi8(dst_reg, zero_reg);
- exp_dst_hi = _mm256_unpackhi_epi8(dst_reg, zero_reg);
+ __m256i filter, pw8, src_next_reg;
- // source - dest
- exp_src_lo = _mm256_sub_epi16(exp_src_lo, exp_dst_lo);
- exp_src_hi = _mm256_sub_epi16(exp_src_hi, exp_dst_hi);
-
- // calculate sum
- sum_reg = _mm256_add_epi16(sum_reg, exp_src_lo);
- exp_src_lo = _mm256_madd_epi16(exp_src_lo, exp_src_lo);
- sum_reg = _mm256_add_epi16(sum_reg, exp_src_hi);
- exp_src_hi = _mm256_madd_epi16(exp_src_hi, exp_src_hi);
-
- // calculate sse
- sse_reg = _mm256_add_epi32(sse_reg, exp_src_lo);
- sse_reg = _mm256_add_epi32(sse_reg, exp_src_hi);
-
- src+= src_stride;
- dst+= dst_stride;
- }
+ y_offset <<= 5;
+ filter = _mm256_load_si256((__m256i const *)
+ (bilinear_filters_avx2 + y_offset));
+ pw8 = _mm256_set1_epi16(8);
+ for (i = 0; i < height ; i++) {
+ LOAD_SRC_DST
+ MERGE_NEXT_SRC(src_reg, src_stride)
+ FILTER_SRC(filter)
+ CALC_SUM_SSE_INSIDE_LOOP
+ src+= src_stride;
+ dst+= dst_stride;
+ }
}
// x_offset = 8 and y_offset = 0
} else if (x_offset == 8) {
- if (y_offset == 0) {
- __m256i src_next_reg;
- for (i = 0; i < height ; i++) {
- // load source and another source starting from the next
- // following byte + destination
- src_reg = _mm256_loadu_si256((__m256i const *) (src));
- src_next_reg = _mm256_loadu_si256((__m256i const *) (src + 1));
- dst_reg = _mm256_load_si256((__m256i const *) (dst));
-
- // average between source and the next byte following source
- src_reg = _mm256_avg_epu8(src_reg, src_next_reg);
-
- // expand each byte to 2 bytes
- exp_src_lo = _mm256_unpacklo_epi8(src_reg, zero_reg);
- exp_src_hi = _mm256_unpackhi_epi8(src_reg, zero_reg);
-
- exp_dst_lo = _mm256_unpacklo_epi8(dst_reg, zero_reg);
- exp_dst_hi = _mm256_unpackhi_epi8(dst_reg, zero_reg);
-
- // source - dest
- exp_src_lo = _mm256_sub_epi16(exp_src_lo, exp_dst_lo);
- exp_src_hi = _mm256_sub_epi16(exp_src_hi, exp_dst_hi);
-
- // calculate sum
- sum_reg = _mm256_add_epi16(sum_reg, exp_src_lo);
- exp_src_lo = _mm256_madd_epi16(exp_src_lo, exp_src_lo);
- sum_reg = _mm256_add_epi16(sum_reg, exp_src_hi);
- exp_src_hi = _mm256_madd_epi16(exp_src_hi, exp_src_hi);
-
- // calculate sse
- sse_reg = _mm256_add_epi32(sse_reg, exp_src_lo);
- sse_reg = _mm256_add_epi32(sse_reg, exp_src_hi);
-
- src+= src_stride;
- dst+= dst_stride;
- }
- // x_offset = 8 and y_offset = 8
- } else if (y_offset == 8) {
- __m256i src_next_reg, src_avg;
- // load source and another source starting from the next
- // following byte
- src_reg = _mm256_loadu_si256((__m256i const *) (src));
- src_next_reg = _mm256_loadu_si256((__m256i const *) (src + 1));
-
- // average between source and the next byte following source
- src_avg = _mm256_avg_epu8(src_reg, src_next_reg);
- for (i = 0; i < height ; i++) {
- src+= src_stride;
- // load source and another source starting from the next
- // following byte + destination
- src_reg = _mm256_loadu_si256((__m256i const *) (src));
- src_next_reg = _mm256_loadu_si256((__m256i const *) (src + 1));
- dst_reg = _mm256_load_si256((__m256i const *) (dst));
- // average between source and the next byte following source
- src_reg = _mm256_avg_epu8(src_reg, src_next_reg);
-
- // expand each byte to 2 bytes
- exp_dst_lo = _mm256_unpacklo_epi8(dst_reg, zero_reg);
- exp_dst_hi = _mm256_unpackhi_epi8(dst_reg, zero_reg);
-
- // average between previous average to current average
- src_avg = _mm256_avg_epu8(src_avg, src_reg);
- // expand each byte to 2 bytes
- exp_src_lo = _mm256_unpacklo_epi8(src_avg, zero_reg);
- exp_src_hi = _mm256_unpackhi_epi8(src_avg, zero_reg);
-
- // save current source average
- src_avg = src_reg;
- // source - dest
- exp_src_lo = _mm256_sub_epi16(exp_src_lo, exp_dst_lo);
- exp_src_hi = _mm256_sub_epi16(exp_src_hi, exp_dst_hi);
-
- // calculate sum
- sum_reg = _mm256_add_epi16(sum_reg, exp_src_lo);
- exp_src_lo = _mm256_madd_epi16(exp_src_lo, exp_src_lo);
- sum_reg = _mm256_add_epi16(sum_reg, exp_src_hi);
- exp_src_hi = _mm256_madd_epi16(exp_src_hi, exp_src_hi);
-
- // calculate sse
- sse_reg = _mm256_add_epi32(sse_reg, exp_src_lo);
- sse_reg = _mm256_add_epi32(sse_reg, exp_src_hi);
-
- dst+= dst_stride;
- }
- // x_offset = 8 and y_offset = bilin interpolation
- } else {
- __m256i filter, pw8, src_next_reg, src_avg;
-#if (ARCH_X86_64)
- int64_t y_offset64;
- y_offset64 = y_offset;
- y_offset64 <<= 5;
- filter = _mm256_load_si256(
- (__m256i const *)(bilinear_filters_avx2 + y_offset64));
-#else
- y_offset <<= 5;
- filter = _mm256_load_si256(
- (__m256i const *)(bilinear_filters_avx2 + y_offset));
-#endif
- pw8 = _mm256_set1_epi16(8);
- // load source and another source starting from the next
- // following byte
- src_reg = _mm256_loadu_si256((__m256i const *) (src));
- src_next_reg = _mm256_loadu_si256((__m256i const *) (src + 1));
- // average between source and the next byte following source
- src_avg = _mm256_avg_epu8(src_reg, src_next_reg);
- for (i = 0; i < height ; i++) {
- src+= src_stride;
- // load source and another source starting from the next
- // following byte + destination
- src_reg = _mm256_loadu_si256((__m256i const *) (src));
- src_next_reg = _mm256_loadu_si256((__m256i const *) (src + 1));
- dst_reg = _mm256_load_si256((__m256i const *) (dst));
- // average between source and the next byte following source
- src_reg = _mm256_avg_epu8(src_reg, src_next_reg);
-
- // merge previous average and current average
- exp_src_lo = _mm256_unpacklo_epi8(src_avg, src_reg);
- exp_src_hi = _mm256_unpackhi_epi8(src_avg, src_reg);
-
- // filter the source
- exp_src_lo = _mm256_maddubs_epi16(exp_src_lo, filter);
- exp_src_hi = _mm256_maddubs_epi16(exp_src_hi, filter);
-
- // add 8 to the source
- exp_src_lo = _mm256_add_epi16(exp_src_lo, pw8);
- exp_src_hi = _mm256_add_epi16(exp_src_hi, pw8);
-
- // divide the source by 16
- exp_src_lo = _mm256_srai_epi16(exp_src_lo, 4);
- exp_src_hi = _mm256_srai_epi16(exp_src_hi, 4);
-
- // expand each byte to 2 bytes
- exp_dst_lo = _mm256_unpacklo_epi8(dst_reg, zero_reg);
- exp_dst_hi = _mm256_unpackhi_epi8(dst_reg, zero_reg);
-
- // save current source average
- src_avg = src_reg;
- // source - dest
- exp_src_lo = _mm256_sub_epi16(exp_src_lo, exp_dst_lo);
- exp_src_hi = _mm256_sub_epi16(exp_src_hi, exp_dst_hi);
-
- // calculate sum
- sum_reg = _mm256_add_epi16(sum_reg, exp_src_lo);
- exp_src_lo = _mm256_madd_epi16(exp_src_lo, exp_src_lo);
- sum_reg = _mm256_add_epi16(sum_reg, exp_src_hi);
- exp_src_hi = _mm256_madd_epi16(exp_src_hi, exp_src_hi);
-
- // calculate sse
- sse_reg = _mm256_add_epi32(sse_reg, exp_src_lo);
- sse_reg = _mm256_add_epi32(sse_reg, exp_src_hi);
-
- dst+= dst_stride;
- }
+ if (y_offset == 0) {
+ __m256i src_next_reg;
+ for (i = 0; i < height ; i++) {
+ LOAD_SRC_DST
+ AVG_NEXT_SRC(src_reg, 1)
+ // expand each byte to 2 bytes
+ MERGE_WITH_SRC(src_reg, zero_reg)
+ CALC_SUM_SSE_INSIDE_LOOP
+ src+= src_stride;
+ dst+= dst_stride;
}
+ // x_offset = 8 and y_offset = 8
+ } else if (y_offset == 8) {
+ __m256i src_next_reg, src_avg;
+ // load source and another source starting from the next
+ // following byte
+ src_reg = _mm256_loadu_si256((__m256i const *) (src));
+ AVG_NEXT_SRC(src_reg, 1)
+ for (i = 0; i < height ; i++) {
+ src_avg = src_reg;
+ src+= src_stride;
+ LOAD_SRC_DST
+ AVG_NEXT_SRC(src_reg, 1)
+ // average between previous average to current average
+ src_avg = _mm256_avg_epu8(src_avg, src_reg);
+ // expand each byte to 2 bytes
+ MERGE_WITH_SRC(src_avg, zero_reg)
+ // save current source average
+ CALC_SUM_SSE_INSIDE_LOOP
+ dst+= dst_stride;
+ }
+ // x_offset = 8 and y_offset = bilin interpolation
+ } else {
+ __m256i filter, pw8, src_next_reg, src_avg;
+ y_offset <<= 5;
+ filter = _mm256_load_si256((__m256i const *)
+ (bilinear_filters_avx2 + y_offset));
+ pw8 = _mm256_set1_epi16(8);
+ // load source and another source starting from the next
+ // following byte
+ src_reg = _mm256_loadu_si256((__m256i const *) (src));
+ AVG_NEXT_SRC(src_reg, 1)
+ for (i = 0; i < height ; i++) {
+ // save current source average
+ src_avg = src_reg;
+ src+= src_stride;
+ LOAD_SRC_DST
+ AVG_NEXT_SRC(src_reg, 1)
+ MERGE_WITH_SRC(src_avg, src_reg)
+ FILTER_SRC(filter)
+ CALC_SUM_SSE_INSIDE_LOOP
+ dst+= dst_stride;
+ }
+ }
// x_offset = bilin interpolation and y_offset = 0
} else {
- if (y_offset == 0) {
- __m256i filter, pw8, src_next_reg;
-#if (ARCH_X86_64)
- int64_t x_offset64;
- x_offset64 = x_offset;
- x_offset64 <<= 5;
- filter = _mm256_load_si256(
- (__m256i const *)(bilinear_filters_avx2 + x_offset64));
-#else
- x_offset <<= 5;
- filter = _mm256_load_si256(
- (__m256i const *)(bilinear_filters_avx2 + x_offset));
-#endif
- pw8 = _mm256_set1_epi16(8);
- for (i = 0; i < height ; i++) {
- // load source and another source starting from the next
- // following byte + destination
- src_reg = _mm256_loadu_si256((__m256i const *) (src));
- src_next_reg = _mm256_loadu_si256((__m256i const *) (src + 1));
- dst_reg = _mm256_load_si256((__m256i const *) (dst));
-
- // merge current and next source
- exp_src_lo = _mm256_unpacklo_epi8(src_reg, src_next_reg);
- exp_src_hi = _mm256_unpackhi_epi8(src_reg, src_next_reg);
-
- // filter the source
- exp_src_lo = _mm256_maddubs_epi16(exp_src_lo, filter);
- exp_src_hi = _mm256_maddubs_epi16(exp_src_hi, filter);
-
- // add 8 to source
- exp_src_lo = _mm256_add_epi16(exp_src_lo, pw8);
- exp_src_hi = _mm256_add_epi16(exp_src_hi, pw8);
-
- // divide the source by 16
- exp_src_lo = _mm256_srai_epi16(exp_src_lo, 4);
- exp_src_hi = _mm256_srai_epi16(exp_src_hi, 4);
-
- // expand each byte to 2 bytes
- exp_dst_lo = _mm256_unpacklo_epi8(dst_reg, zero_reg);
- exp_dst_hi = _mm256_unpackhi_epi8(dst_reg, zero_reg);
-
- // source - dest
- exp_src_lo = _mm256_sub_epi16(exp_src_lo, exp_dst_lo);
- exp_src_hi = _mm256_sub_epi16(exp_src_hi, exp_dst_hi);
-
- // calculate sum
- sum_reg = _mm256_add_epi16(sum_reg, exp_src_lo);
- exp_src_lo = _mm256_madd_epi16(exp_src_lo, exp_src_lo);
- sum_reg = _mm256_add_epi16(sum_reg, exp_src_hi);
- exp_src_hi = _mm256_madd_epi16(exp_src_hi, exp_src_hi);
-
- // calculate sse
- sse_reg = _mm256_add_epi32(sse_reg, exp_src_lo);
- sse_reg = _mm256_add_epi32(sse_reg, exp_src_hi);
-
- src+= src_stride;
- dst+= dst_stride;
- }
- // x_offset = bilin interpolation and y_offset = 8
- } else if (y_offset == 8) {
- __m256i filter, pw8, src_next_reg, src_pack;
-#if (ARCH_X86_64)
- int64_t x_offset64;
- x_offset64 = x_offset;
- x_offset64 <<= 5;
- filter = _mm256_load_si256(
- (__m256i const *)(bilinear_filters_avx2 + x_offset64));
-#else
- x_offset <<= 5;
- filter = _mm256_load_si256(
- (__m256i const *)(bilinear_filters_avx2 + x_offset));
-#endif
- pw8 = _mm256_set1_epi16(8);
- // load source and another source starting from the next
- // following byte
- src_reg = _mm256_loadu_si256((__m256i const *) (src));
- src_next_reg = _mm256_loadu_si256((__m256i const *) (src + 1));
-
- // merge current and next stride source
- exp_src_lo = _mm256_unpacklo_epi8(src_reg, src_next_reg);
- exp_src_hi = _mm256_unpackhi_epi8(src_reg, src_next_reg);
-
- // filter the source
- exp_src_lo = _mm256_maddubs_epi16(exp_src_lo, filter);
- exp_src_hi = _mm256_maddubs_epi16(exp_src_hi, filter);
-
- // add 8 to source
- exp_src_lo = _mm256_add_epi16(exp_src_lo, pw8);
- exp_src_hi = _mm256_add_epi16(exp_src_hi, pw8);
-
- // divide source by 16
- exp_src_lo = _mm256_srai_epi16(exp_src_lo, 4);
- exp_src_hi = _mm256_srai_epi16(exp_src_hi, 4);
-
- // convert each 16 bit to 8 bit to each low and high lane source
- src_pack = _mm256_packus_epi16(exp_src_lo, exp_src_hi);
- for (i = 0; i < height ; i++) {
- src+= src_stride;
-
- // load source and another source starting from the next
- // following byte + destination
- src_reg = _mm256_loadu_si256((__m256i const *) (src));
- src_next_reg = _mm256_loadu_si256((__m256i const *) (src + 1));
- dst_reg = _mm256_load_si256((__m256i const *) (dst));
-
- // merge current and next stride source
- exp_src_lo = _mm256_unpacklo_epi8(src_reg, src_next_reg);
- exp_src_hi = _mm256_unpackhi_epi8(src_reg, src_next_reg);
-
- // filter the source
- exp_src_lo = _mm256_maddubs_epi16(exp_src_lo, filter);
- exp_src_hi = _mm256_maddubs_epi16(exp_src_hi, filter);
-
- // add 8 to source
- exp_src_lo = _mm256_add_epi16(exp_src_lo, pw8);
- exp_src_hi = _mm256_add_epi16(exp_src_hi, pw8);
-
- // divide source by 16
- exp_src_lo = _mm256_srai_epi16(exp_src_lo, 4);
- exp_src_hi = _mm256_srai_epi16(exp_src_hi, 4);
-
- // convert each 16 bit to 8 bit to each low and high lane source
- src_reg = _mm256_packus_epi16(exp_src_lo, exp_src_hi);
- // average between previous pack to the current
- src_pack = _mm256_avg_epu8(src_pack, src_reg);
-
- // expand each byte to 2 bytes
- exp_dst_lo = _mm256_unpacklo_epi8(dst_reg, zero_reg);
- exp_dst_hi = _mm256_unpackhi_epi8(dst_reg, zero_reg);
-
- exp_src_lo = _mm256_unpacklo_epi8(src_pack, zero_reg);
- exp_src_hi = _mm256_unpackhi_epi8(src_pack, zero_reg);
-
- // source - dest
- exp_src_lo = _mm256_sub_epi16(exp_src_lo, exp_dst_lo);
- exp_src_hi = _mm256_sub_epi16(exp_src_hi, exp_dst_hi);
-
- // calculate sum
- sum_reg = _mm256_add_epi16(sum_reg, exp_src_lo);
- exp_src_lo = _mm256_madd_epi16(exp_src_lo, exp_src_lo);
- sum_reg = _mm256_add_epi16(sum_reg, exp_src_hi);
- exp_src_hi = _mm256_madd_epi16(exp_src_hi, exp_src_hi);
-
- // calculate sse
- sse_reg = _mm256_add_epi32(sse_reg, exp_src_lo);
- sse_reg = _mm256_add_epi32(sse_reg, exp_src_hi);
-
- // save previous pack
- src_pack = src_reg;
- dst+= dst_stride;
- }
- // x_offset = bilin interpolation and y_offset = bilin interpolation
- } else {
- __m256i xfilter, yfilter, pw8, src_next_reg, src_pack;
-#if (ARCH_X86_64)
- int64_t x_offset64, y_offset64;
- x_offset64 = x_offset;
- x_offset64 <<= 5;
- y_offset64 = y_offset;
- y_offset64 <<= 5;
- xfilter = _mm256_load_si256(
- (__m256i const *)(bilinear_filters_avx2 + x_offset64));
- yfilter = _mm256_load_si256(
- (__m256i const *)(bilinear_filters_avx2 + y_offset64));
-#else
- x_offset <<= 5;
- xfilter = _mm256_load_si256(
- (__m256i const *)(bilinear_filters_avx2 + x_offset));
- y_offset <<= 5;
- yfilter = _mm256_load_si256(
- (__m256i const *)(bilinear_filters_avx2 + y_offset));
-#endif
- pw8 = _mm256_set1_epi16(8);
- // load source and another source starting from the next
- // following byte
- src_reg = _mm256_loadu_si256((__m256i const *) (src));
- src_next_reg = _mm256_loadu_si256((__m256i const *) (src + 1));
- // merge current and next stride source
- exp_src_lo = _mm256_unpacklo_epi8(src_reg, src_next_reg);
- exp_src_hi = _mm256_unpackhi_epi8(src_reg, src_next_reg);
-
- // filter the source
- exp_src_lo = _mm256_maddubs_epi16(exp_src_lo, xfilter);
- exp_src_hi = _mm256_maddubs_epi16(exp_src_hi, xfilter);
-
- // add 8 to the source
- exp_src_lo = _mm256_add_epi16(exp_src_lo, pw8);
- exp_src_hi = _mm256_add_epi16(exp_src_hi, pw8);
-
- // divide the source by 16
- exp_src_lo = _mm256_srai_epi16(exp_src_lo, 4);
- exp_src_hi = _mm256_srai_epi16(exp_src_hi, 4);
-
- // convert each 16 bit to 8 bit to each low and high lane source
- src_pack = _mm256_packus_epi16(exp_src_lo, exp_src_hi);
- for (i = 0; i < height ; i++) {
- src+= src_stride;
- // load source and another source starting from the next
- // following byte + destination
- src_reg = _mm256_loadu_si256((__m256i const *) (src));
- src_next_reg = _mm256_loadu_si256((__m256i const *) (src + 1));
- dst_reg = _mm256_load_si256((__m256i const *) (dst));
-
- // merge current and next stride source
- exp_src_lo = _mm256_unpacklo_epi8(src_reg, src_next_reg);
- exp_src_hi = _mm256_unpackhi_epi8(src_reg, src_next_reg);
-
- // filter the source
- exp_src_lo = _mm256_maddubs_epi16(exp_src_lo, xfilter);
- exp_src_hi = _mm256_maddubs_epi16(exp_src_hi, xfilter);
-
- // add 8 to source
- exp_src_lo = _mm256_add_epi16(exp_src_lo, pw8);
- exp_src_hi = _mm256_add_epi16(exp_src_hi, pw8);
-
- // divide source by 16
- exp_src_lo = _mm256_srai_epi16(exp_src_lo, 4);
- exp_src_hi = _mm256_srai_epi16(exp_src_hi, 4);
-
- // convert each 16 bit to 8 bit to each low and high lane source
- src_reg = _mm256_packus_epi16(exp_src_lo, exp_src_hi);
-
- // merge previous pack to current pack source
- exp_src_lo = _mm256_unpacklo_epi8(src_pack, src_reg);
- exp_src_hi = _mm256_unpackhi_epi8(src_pack, src_reg);
-
- // filter the source
- exp_src_lo = _mm256_maddubs_epi16(exp_src_lo, yfilter);
- exp_src_hi = _mm256_maddubs_epi16(exp_src_hi, yfilter);
-
- // expand each byte to 2 bytes
- exp_dst_lo = _mm256_unpacklo_epi8(dst_reg, zero_reg);
- exp_dst_hi = _mm256_unpackhi_epi8(dst_reg, zero_reg);
-
- // add 8 to source
- exp_src_lo = _mm256_add_epi16(exp_src_lo, pw8);
- exp_src_hi = _mm256_add_epi16(exp_src_hi, pw8);
-
- // divide source by 16
- exp_src_lo = _mm256_srai_epi16(exp_src_lo, 4);
- exp_src_hi = _mm256_srai_epi16(exp_src_hi, 4);
-
- // source - dest
- exp_src_lo = _mm256_sub_epi16(exp_src_lo, exp_dst_lo);
- exp_src_hi = _mm256_sub_epi16(exp_src_hi, exp_dst_hi);
+ if (y_offset == 0) {
+ __m256i filter, pw8, src_next_reg;
+ x_offset <<= 5;
+ filter = _mm256_load_si256((__m256i const *)
+ (bilinear_filters_avx2 + x_offset));
+ pw8 = _mm256_set1_epi16(8);
+ for (i = 0; i < height ; i++) {
+ LOAD_SRC_DST
+ MERGE_NEXT_SRC(src_reg, 1)
+ FILTER_SRC(filter)
+ CALC_SUM_SSE_INSIDE_LOOP
+ src+= src_stride;
+ dst+= dst_stride;
+ }
+ // x_offset = bilin interpolation and y_offset = 8
+ } else if (y_offset == 8) {
+ __m256i filter, pw8, src_next_reg, src_pack;
+ x_offset <<= 5;
+ filter = _mm256_load_si256((__m256i const *)
+ (bilinear_filters_avx2 + x_offset));
+ pw8 = _mm256_set1_epi16(8);
+ src_reg = _mm256_loadu_si256((__m256i const *) (src));
+ MERGE_NEXT_SRC(src_reg, 1)
+ FILTER_SRC(filter)
+ // convert each 16 bit to 8 bit to each low and high lane source
+ src_pack = _mm256_packus_epi16(exp_src_lo, exp_src_hi);
+ for (i = 0; i < height ; i++) {
+ src+= src_stride;
+ LOAD_SRC_DST
+ MERGE_NEXT_SRC(src_reg, 1)
+ FILTER_SRC(filter)
+ src_reg = _mm256_packus_epi16(exp_src_lo, exp_src_hi);
+ // average between previous pack to the current
+ src_pack = _mm256_avg_epu8(src_pack, src_reg);
+ MERGE_WITH_SRC(src_pack, zero_reg)
+ CALC_SUM_SSE_INSIDE_LOOP
+ src_pack = src_reg;
+ dst+= dst_stride;
+ }
+ // x_offset = bilin interpolation and y_offset = bilin interpolation
+ } else {
+ __m256i xfilter, yfilter, pw8, src_next_reg, src_pack;
+ x_offset <<= 5;
+ xfilter = _mm256_load_si256((__m256i const *)
+ (bilinear_filters_avx2 + x_offset));
+ y_offset <<= 5;
+ yfilter = _mm256_load_si256((__m256i const *)
+ (bilinear_filters_avx2 + y_offset));
+ pw8 = _mm256_set1_epi16(8);
+ // load source and another source starting from the next
+ // following byte
+ src_reg = _mm256_loadu_si256((__m256i const *) (src));
+ MERGE_NEXT_SRC(src_reg, 1)
+
+ FILTER_SRC(xfilter)
+ // convert each 16 bit to 8 bit to each low and high lane source
+ src_pack = _mm256_packus_epi16(exp_src_lo, exp_src_hi);
+ for (i = 0; i < height ; i++) {
+ src+= src_stride;
+ LOAD_SRC_DST
+ MERGE_NEXT_SRC(src_reg, 1)
+ FILTER_SRC(xfilter)
+ src_reg = _mm256_packus_epi16(exp_src_lo, exp_src_hi);
+ // merge previous pack to current pack source
+ MERGE_WITH_SRC(src_pack, src_reg)
+ // filter the source
+ FILTER_SRC(yfilter)
+ src_pack = src_reg;
+ CALC_SUM_SSE_INSIDE_LOOP
+ dst+= dst_stride;
+ }
+ }
+ }
+ CALC_SUM_AND_SSE
+ return sum;
+}
- // caculate sum
- sum_reg = _mm256_add_epi16(sum_reg, exp_src_lo);
- exp_src_lo = _mm256_madd_epi16(exp_src_lo, exp_src_lo);
- sum_reg = _mm256_add_epi16(sum_reg, exp_src_hi);
- exp_src_hi = _mm256_madd_epi16(exp_src_hi, exp_src_hi);
+unsigned int vp9_sub_pixel_avg_variance32xh_avx2(const uint8_t *src,
+ int src_stride,
+ int x_offset,
+ int y_offset,
+ const uint8_t *dst,
+ int dst_stride,
+ const uint8_t *sec,
+ int sec_stride,
+ int height,
+ unsigned int *sse) {
+ __m256i sec_reg;
+ __m256i src_reg, dst_reg, exp_src_lo, exp_src_hi, exp_dst_lo, exp_dst_hi;
+ __m256i sse_reg, sum_reg, sse_reg_hi, res_cmp, sum_reg_lo, sum_reg_hi;
+ __m256i zero_reg;
+ int i, sum;
+ sum_reg = _mm256_set1_epi16(0);
+ sse_reg = _mm256_set1_epi16(0);
+ zero_reg = _mm256_set1_epi16(0);
- // calculate sse
- sse_reg = _mm256_add_epi32(sse_reg, exp_src_lo);
- sse_reg = _mm256_add_epi32(sse_reg, exp_src_hi);
+ // x_offset = 0 and y_offset = 0
+ if (x_offset == 0) {
+ if (y_offset == 0) {
+ for (i = 0; i < height ; i++) {
+ LOAD_SRC_DST
+ sec_reg = _mm256_load_si256((__m256i const *) (sec));
+ src_reg = _mm256_avg_epu8(src_reg, sec_reg);
+ sec+= sec_stride;
+ // expend each byte to 2 bytes
+ MERGE_WITH_SRC(src_reg, zero_reg)
+ CALC_SUM_SSE_INSIDE_LOOP
+ src+= src_stride;
+ dst+= dst_stride;
+ }
+ } else if (y_offset == 8) {
+ __m256i src_next_reg;
+ for (i = 0; i < height ; i++) {
+ LOAD_SRC_DST
+ AVG_NEXT_SRC(src_reg, src_stride)
+ sec_reg = _mm256_load_si256((__m256i const *) (sec));
+ src_reg = _mm256_avg_epu8(src_reg, sec_reg);
+ sec+= sec_stride;
+ // expend each byte to 2 bytes
+ MERGE_WITH_SRC(src_reg, zero_reg)
+ CALC_SUM_SSE_INSIDE_LOOP
+ src+= src_stride;
+ dst+= dst_stride;
+ }
+ // x_offset = 0 and y_offset = bilin interpolation
+ } else {
+ __m256i filter, pw8, src_next_reg;
- src_pack = src_reg;
- dst+= dst_stride;
- }
+ y_offset <<= 5;
+ filter = _mm256_load_si256((__m256i const *)
+ (bilinear_filters_avx2 + y_offset));
+ pw8 = _mm256_set1_epi16(8);
+ for (i = 0; i < height ; i++) {
+ LOAD_SRC_DST
+ MERGE_NEXT_SRC(src_reg, src_stride)
+ FILTER_SRC(filter)
+ src_reg = _mm256_packus_epi16(exp_src_lo, exp_src_hi);
+ sec_reg = _mm256_load_si256((__m256i const *) (sec));
+ src_reg = _mm256_avg_epu8(src_reg, sec_reg);
+ sec+= sec_stride;
+ MERGE_WITH_SRC(src_reg, zero_reg)
+ CALC_SUM_SSE_INSIDE_LOOP
+ src+= src_stride;
+ dst+= dst_stride;
+ }
+ }
+ // x_offset = 8 and y_offset = 0
+ } else if (x_offset == 8) {
+ if (y_offset == 0) {
+ __m256i src_next_reg;
+ for (i = 0; i < height ; i++) {
+ LOAD_SRC_DST
+ AVG_NEXT_SRC(src_reg, 1)
+ sec_reg = _mm256_load_si256((__m256i const *) (sec));
+ src_reg = _mm256_avg_epu8(src_reg, sec_reg);
+ sec+= sec_stride;
+ // expand each byte to 2 bytes
+ MERGE_WITH_SRC(src_reg, zero_reg)
+ CALC_SUM_SSE_INSIDE_LOOP
+ src+= src_stride;
+ dst+= dst_stride;
+ }
+ // x_offset = 8 and y_offset = 8
+ } else if (y_offset == 8) {
+ __m256i src_next_reg, src_avg;
+ // load source and another source starting from the next
+ // following byte
+ src_reg = _mm256_loadu_si256((__m256i const *) (src));
+ AVG_NEXT_SRC(src_reg, 1)
+ for (i = 0; i < height ; i++) {
+ // save current source average
+ src_avg = src_reg;
+ src+= src_stride;
+ LOAD_SRC_DST
+ AVG_NEXT_SRC(src_reg, 1)
+ // average between previous average to current average
+ src_avg = _mm256_avg_epu8(src_avg, src_reg);
+ sec_reg = _mm256_load_si256((__m256i const *) (sec));
+ src_avg = _mm256_avg_epu8(src_avg, sec_reg);
+ sec+= sec_stride;
+ // expand each byte to 2 bytes
+ MERGE_WITH_SRC(src_avg, zero_reg)
+ CALC_SUM_SSE_INSIDE_LOOP
+ dst+= dst_stride;
+ }
+ // x_offset = 8 and y_offset = bilin interpolation
+ } else {
+ __m256i filter, pw8, src_next_reg, src_avg;
+ y_offset <<= 5;
+ filter = _mm256_load_si256((__m256i const *)
+ (bilinear_filters_avx2 + y_offset));
+ pw8 = _mm256_set1_epi16(8);
+ // load source and another source starting from the next
+ // following byte
+ src_reg = _mm256_loadu_si256((__m256i const *) (src));
+ AVG_NEXT_SRC(src_reg, 1)
+ for (i = 0; i < height ; i++) {
+ // save current source average
+ src_avg = src_reg;
+ src+= src_stride;
+ LOAD_SRC_DST
+ AVG_NEXT_SRC(src_reg, 1)
+ MERGE_WITH_SRC(src_avg, src_reg)
+ FILTER_SRC(filter)
+ src_avg = _mm256_packus_epi16(exp_src_lo, exp_src_hi);
+ sec_reg = _mm256_load_si256((__m256i const *) (sec));
+ src_avg = _mm256_avg_epu8(src_avg, sec_reg);
+ // expand each byte to 2 bytes
+ MERGE_WITH_SRC(src_avg, zero_reg)
+ sec+= sec_stride;
+ CALC_SUM_SSE_INSIDE_LOOP
+ dst+= dst_stride;
}
+ }
+ // x_offset = bilin interpolation and y_offset = 0
+ } else {
+ if (y_offset == 0) {
+ __m256i filter, pw8, src_next_reg;
+ x_offset <<= 5;
+ filter = _mm256_load_si256((__m256i const *)
+ (bilinear_filters_avx2 + x_offset));
+ pw8 = _mm256_set1_epi16(8);
+ for (i = 0; i < height ; i++) {
+ LOAD_SRC_DST
+ MERGE_NEXT_SRC(src_reg, 1)
+ FILTER_SRC(filter)
+ src_reg = _mm256_packus_epi16(exp_src_lo, exp_src_hi);
+ sec_reg = _mm256_load_si256((__m256i const *) (sec));
+ src_reg = _mm256_avg_epu8(src_reg, sec_reg);
+ MERGE_WITH_SRC(src_reg, zero_reg)
+ sec+= sec_stride;
+ CALC_SUM_SSE_INSIDE_LOOP
+ src+= src_stride;
+ dst+= dst_stride;
+ }
+ // x_offset = bilin interpolation and y_offset = 8
+ } else if (y_offset == 8) {
+ __m256i filter, pw8, src_next_reg, src_pack;
+ x_offset <<= 5;
+ filter = _mm256_load_si256((__m256i const *)
+ (bilinear_filters_avx2 + x_offset));
+ pw8 = _mm256_set1_epi16(8);
+ src_reg = _mm256_loadu_si256((__m256i const *) (src));
+ MERGE_NEXT_SRC(src_reg, 1)
+ FILTER_SRC(filter)
+ // convert each 16 bit to 8 bit to each low and high lane source
+ src_pack = _mm256_packus_epi16(exp_src_lo, exp_src_hi);
+ for (i = 0; i < height ; i++) {
+ src+= src_stride;
+ LOAD_SRC_DST
+ MERGE_NEXT_SRC(src_reg, 1)
+ FILTER_SRC(filter)
+ src_reg = _mm256_packus_epi16(exp_src_lo, exp_src_hi);
+ // average between previous pack to the current
+ src_pack = _mm256_avg_epu8(src_pack, src_reg);
+ sec_reg = _mm256_load_si256((__m256i const *) (sec));
+ src_pack = _mm256_avg_epu8(src_pack, sec_reg);
+ sec+= sec_stride;
+ MERGE_WITH_SRC(src_pack, zero_reg)
+ src_pack = src_reg;
+ CALC_SUM_SSE_INSIDE_LOOP
+ dst+= dst_stride;
+ }
+ // x_offset = bilin interpolation and y_offset = bilin interpolation
+ } else {
+ __m256i xfilter, yfilter, pw8, src_next_reg, src_pack;
+ x_offset <<= 5;
+ xfilter = _mm256_load_si256((__m256i const *)
+ (bilinear_filters_avx2 + x_offset));
+ y_offset <<= 5;
+ yfilter = _mm256_load_si256((__m256i const *)
+ (bilinear_filters_avx2 + y_offset));
+ pw8 = _mm256_set1_epi16(8);
+ // load source and another source starting from the next
+ // following byte
+ src_reg = _mm256_loadu_si256((__m256i const *) (src));
+ MERGE_NEXT_SRC(src_reg, 1)
+
+ FILTER_SRC(xfilter)
+ // convert each 16 bit to 8 bit to each low and high lane source
+ src_pack = _mm256_packus_epi16(exp_src_lo, exp_src_hi);
+ for (i = 0; i < height ; i++) {
+ src+= src_stride;
+ LOAD_SRC_DST
+ MERGE_NEXT_SRC(src_reg, 1)
+ FILTER_SRC(xfilter)
+ src_reg = _mm256_packus_epi16(exp_src_lo, exp_src_hi);
+ // merge previous pack to current pack source
+ MERGE_WITH_SRC(src_pack, src_reg)
+ // filter the source
+ FILTER_SRC(yfilter)
+ src_pack = _mm256_packus_epi16(exp_src_lo, exp_src_hi);
+ sec_reg = _mm256_load_si256((__m256i const *) (sec));
+ src_pack = _mm256_avg_epu8(src_pack, sec_reg);
+ MERGE_WITH_SRC(src_pack, zero_reg)
+ src_pack = src_reg;
+ sec+= sec_stride;
+ CALC_SUM_SSE_INSIDE_LOOP
+ dst+= dst_stride;
+ }
+ }
}
- // sum < 0
- res_cmp = _mm256_cmpgt_epi16(zero_reg, sum_reg);
- // save the next 8 bytes of each lane of sse
- sse_reg_hi = _mm256_srli_si256(sse_reg, 8);
- // merge the result of sum < 0 with sum to add sign to the next 16 bits
- sum_reg_lo = _mm256_unpacklo_epi16(sum_reg, res_cmp);
- sum_reg_hi = _mm256_unpackhi_epi16(sum_reg, res_cmp);
- // add each 8 bytes from every lane of sse and sum
- sse_reg = _mm256_add_epi32(sse_reg, sse_reg_hi);
- sum_reg = _mm256_add_epi32(sum_reg_lo, sum_reg_hi);
-
- // save the next 4 bytes of each lane sse
- sse_reg_hi = _mm256_srli_si256(sse_reg, 4);
- // save the next 8 bytes of each lane of sum
- sum_reg_hi = _mm256_srli_si256(sum_reg, 8);
-
- // add the first 4 bytes to the next 4 bytes sse
- sse_reg = _mm256_add_epi32(sse_reg, sse_reg_hi);
- // add the first 8 bytes to the next 8 bytes
- sum_reg = _mm256_add_epi32(sum_reg, sum_reg_hi);
- // extract the low lane and the high lane and add the results
- *((int*)sse)= _mm_cvtsi128_si32(_mm256_castsi256_si128(sse_reg)) +
- _mm_cvtsi128_si32(_mm256_extractf128_si256(sse_reg, 1));
- sum_reg_hi = _mm256_srli_si256(sum_reg, 4);
- sum_reg = _mm256_add_epi32(sum_reg, sum_reg_hi);
- sum = _mm_cvtsi128_si32(_mm256_castsi256_si128(sum_reg)) +
- _mm_cvtsi128_si32(_mm256_extractf128_si256(sum_reg, 1));
+ CALC_SUM_AND_SSE
return sum;
}
diff --git a/vp9/encoder/x86/vp9_variance_avx2.c b/vp9/encoder/x86/vp9_variance_avx2.c
index 02007a3bd..835c51957 100644
--- a/vp9/encoder/x86/vp9_variance_avx2.c
+++ b/vp9/encoder/x86/vp9_variance_avx2.c
@@ -54,6 +54,20 @@ unsigned int vp9_sub_pixel_variance32xh_avx2
unsigned int *sse
);
+unsigned int vp9_sub_pixel_avg_variance32xh_avx2
+(
+ const uint8_t *src,
+ int src_stride,
+ int x_offset,
+ int y_offset,
+ const uint8_t *dst,
+ int dst_stride,
+ const uint8_t *sec,
+ int sec_stride,
+ int height,
+ unsigned int *sseptr
+);
+
static void variance_avx2(const unsigned char *src_ptr, int source_stride,
const unsigned char *ref_ptr, int recon_stride,
int w, int h, unsigned int *sse, int *sum,
@@ -207,3 +221,48 @@ unsigned int vp9_sub_pixel_variance32x32_avx2(const uint8_t *src,
*sse_ptr = sse;
return sse - (((int64_t)se * se) >> 10);
}
+
+unsigned int vp9_sub_pixel_avg_variance64x64_avx2(const uint8_t *src,
+ int src_stride,
+ int x_offset,
+ int y_offset,
+ const uint8_t *dst,
+ int dst_stride,
+ unsigned int *sseptr,
+ const uint8_t *sec) {
+ // processing 32 elements in parallel
+ unsigned int sse;
+
+ int se = vp9_sub_pixel_avg_variance32xh_avx2(src, src_stride, x_offset,
+ y_offset, dst, dst_stride,
+ sec, 64, 64, &sse);
+ unsigned int sse2;
+ // processing the next 32 elements in parallel
+ int se2 = vp9_sub_pixel_avg_variance32xh_avx2(src + 32, src_stride, x_offset,
+ y_offset, dst + 32, dst_stride,
+ sec + 32, 64, 64, &sse2);
+ se += se2;
+ sse += sse2;
+ *sseptr = sse;
+
+ return sse - (((int64_t)se * se) >> 12);
+}
+
+unsigned int vp9_sub_pixel_avg_variance32x32_avx2(const uint8_t *src,
+ int src_stride,
+ int x_offset,
+ int y_offset,
+ const uint8_t *dst,
+ int dst_stride,
+ unsigned int *sseptr,
+ const uint8_t *sec) {
+ // processing 32 element in parallel
+ unsigned int sse;
+ int se = vp9_sub_pixel_avg_variance32xh_avx2(src, src_stride, x_offset,
+ y_offset, dst, dst_stride,
+ sec, 32, 32, &sse);
+ *sseptr = sse;
+ return sse - (((int64_t)se * se) >> 10);
+}
+
+