summaryrefslogtreecommitdiff
path: root/vp9/encoder
diff options
context:
space:
mode:
authorlevytamar82 <levytamar82@gmail.com>2014-03-13 14:47:30 -0700
committerYunqing Wang <yunqingwang@google.com>2014-03-21 13:53:32 -0700
commit0fa8b668c1019005030b3f584c048d8ddf3012d8 (patch)
tree48c8a34b3064b30ee471bef980f10afe49612cc0 /vp9/encoder
parent3967435f655fec96dba08051a58c5f66a91b1c5b (diff)
downloadlibvpx-0fa8b668c1019005030b3f584c048d8ddf3012d8.tar
libvpx-0fa8b668c1019005030b3f584c048d8ddf3012d8.tar.gz
libvpx-0fa8b668c1019005030b3f584c048d8ddf3012d8.tar.bz2
libvpx-0fa8b668c1019005030b3f584c048d8ddf3012d8.zip
AVX2 SAD Optimization:
2 functions were optimized for avx2 by using full 256 bit register In order to handle 32 elements in parallel instead of only 16 in parallel: 1. vp9_sad32x32x4d 2. vp9_sad64x64x4d The function level gain is 66% and the user level gain is ~1%. Change-Id: I4efbb3bc7d8bc03b64b6c98f5cd5c4a9dd3212cb
Diffstat (limited to 'vp9/encoder')
-rw-r--r--vp9/encoder/x86/vp9_sad4d_intrin_avx2.c167
1 files changed, 167 insertions, 0 deletions
diff --git a/vp9/encoder/x86/vp9_sad4d_intrin_avx2.c b/vp9/encoder/x86/vp9_sad4d_intrin_avx2.c
new file mode 100644
index 000000000..f31b176e5
--- /dev/null
+++ b/vp9/encoder/x86/vp9_sad4d_intrin_avx2.c
@@ -0,0 +1,167 @@
+/*
+ * Copyright (c) 2014 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+#include <immintrin.h> // AVX2
+#include "vpx/vpx_integer.h"
+
+void vp9_sad32x32x4d_avx2(uint8_t *src,
+ int src_stride,
+ uint8_t *ref[4],
+ int ref_stride,
+ unsigned int res[4]) {
+ __m256i src_reg, ref0_reg, ref1_reg, ref2_reg, ref3_reg;
+ __m256i sum_ref0, sum_ref1, sum_ref2, sum_ref3;
+ __m256i sum_mlow, sum_mhigh;
+ int i;
+ uint8_t *ref0, *ref1, *ref2, *ref3;
+
+ ref0 = ref[0];
+ ref1 = ref[1];
+ ref2 = ref[2];
+ ref3 = ref[3];
+ sum_ref0 = _mm256_set1_epi16(0);
+ sum_ref1 = _mm256_set1_epi16(0);
+ sum_ref2 = _mm256_set1_epi16(0);
+ sum_ref3 = _mm256_set1_epi16(0);
+ for (i = 0; i < 32 ; i++) {
+ // load src and all refs
+ src_reg = _mm256_load_si256((__m256i *)(src));
+ ref0_reg = _mm256_loadu_si256((__m256i *) (ref0));
+ ref1_reg = _mm256_loadu_si256((__m256i *) (ref1));
+ ref2_reg = _mm256_loadu_si256((__m256i *) (ref2));
+ ref3_reg = _mm256_loadu_si256((__m256i *) (ref3));
+ // sum of the absolute differences between every ref-i to src
+ ref0_reg = _mm256_sad_epu8(ref0_reg, src_reg);
+ ref1_reg = _mm256_sad_epu8(ref1_reg, src_reg);
+ ref2_reg = _mm256_sad_epu8(ref2_reg, src_reg);
+ ref3_reg = _mm256_sad_epu8(ref3_reg, src_reg);
+ // sum every ref-i
+ sum_ref0 = _mm256_add_epi32(sum_ref0, ref0_reg);
+ sum_ref1 = _mm256_add_epi32(sum_ref1, ref1_reg);
+ sum_ref2 = _mm256_add_epi32(sum_ref2, ref2_reg);
+ sum_ref3 = _mm256_add_epi32(sum_ref3, ref3_reg);
+
+ src+= src_stride;
+ ref0+= ref_stride;
+ ref1+= ref_stride;
+ ref2+= ref_stride;
+ ref3+= ref_stride;
+ }
+ {
+ __m128i sum;
+ // in sum_ref-i the result is saved in the first 4 bytes
+ // the other 4 bytes are zeroed.
+ // sum_ref1 and sum_ref3 are shifted left by 4 bytes
+ sum_ref1 = _mm256_slli_si256(sum_ref1, 4);
+ sum_ref3 = _mm256_slli_si256(sum_ref3, 4);
+
+ // merge sum_ref0 and sum_ref1 also sum_ref2 and sum_ref3
+ sum_ref0 = _mm256_or_si256(sum_ref0, sum_ref1);
+ sum_ref2 = _mm256_or_si256(sum_ref2, sum_ref3);
+
+ // merge every 64 bit from each sum_ref-i
+ sum_mlow = _mm256_unpacklo_epi64(sum_ref0, sum_ref2);
+ sum_mhigh = _mm256_unpackhi_epi64(sum_ref0, sum_ref2);
+
+ // add the low 64 bit to the high 64 bit
+ sum_mlow = _mm256_add_epi32(sum_mlow, sum_mhigh);
+
+ // add the low 128 bit to the high 128 bit
+ sum = _mm_add_epi32(_mm256_castsi256_si128(sum_mlow),
+ _mm256_extractf128_si256(sum_mlow, 1));
+
+ _mm_storeu_si128((__m128i *)(res), sum);
+ }
+}
+
+void vp9_sad64x64x4d_avx2(uint8_t *src,
+ int src_stride,
+ uint8_t *ref[4],
+ int ref_stride,
+ unsigned int res[4]) {
+ __m256i src_reg, srcnext_reg, ref0_reg, ref0next_reg;
+ __m256i ref1_reg, ref1next_reg, ref2_reg, ref2next_reg;
+ __m256i ref3_reg, ref3next_reg;
+ __m256i sum_ref0, sum_ref1, sum_ref2, sum_ref3;
+ __m256i sum_mlow, sum_mhigh;
+ int i;
+ uint8_t *ref0, *ref1, *ref2, *ref3;
+
+ ref0 = ref[0];
+ ref1 = ref[1];
+ ref2 = ref[2];
+ ref3 = ref[3];
+ sum_ref0 = _mm256_set1_epi16(0);
+ sum_ref1 = _mm256_set1_epi16(0);
+ sum_ref2 = _mm256_set1_epi16(0);
+ sum_ref3 = _mm256_set1_epi16(0);
+ for (i = 0; i < 64 ; i++) {
+ // load 64 bytes from src and all refs
+ src_reg = _mm256_load_si256((__m256i *)(src));
+ srcnext_reg = _mm256_load_si256((__m256i *)(src + 32));
+ ref0_reg = _mm256_loadu_si256((__m256i *) (ref0));
+ ref0next_reg = _mm256_loadu_si256((__m256i *) (ref0 + 32));
+ ref1_reg = _mm256_loadu_si256((__m256i *) (ref1));
+ ref1next_reg = _mm256_loadu_si256((__m256i *) (ref1 + 32));
+ ref2_reg = _mm256_loadu_si256((__m256i *) (ref2));
+ ref2next_reg = _mm256_loadu_si256((__m256i *) (ref2 + 32));
+ ref3_reg = _mm256_loadu_si256((__m256i *) (ref3));
+ ref3next_reg = _mm256_loadu_si256((__m256i *) (ref3 + 32));
+ // sum of the absolute differences between every ref-i to src
+ ref0_reg = _mm256_sad_epu8(ref0_reg, src_reg);
+ ref1_reg = _mm256_sad_epu8(ref1_reg, src_reg);
+ ref2_reg = _mm256_sad_epu8(ref2_reg, src_reg);
+ ref3_reg = _mm256_sad_epu8(ref3_reg, src_reg);
+ ref0next_reg = _mm256_sad_epu8(ref0next_reg, srcnext_reg);
+ ref1next_reg = _mm256_sad_epu8(ref1next_reg, srcnext_reg);
+ ref2next_reg = _mm256_sad_epu8(ref2next_reg, srcnext_reg);
+ ref3next_reg = _mm256_sad_epu8(ref3next_reg, srcnext_reg);
+
+ // sum every ref-i
+ sum_ref0 = _mm256_add_epi32(sum_ref0, ref0_reg);
+ sum_ref1 = _mm256_add_epi32(sum_ref1, ref1_reg);
+ sum_ref2 = _mm256_add_epi32(sum_ref2, ref2_reg);
+ sum_ref3 = _mm256_add_epi32(sum_ref3, ref3_reg);
+ sum_ref0 = _mm256_add_epi32(sum_ref0, ref0next_reg);
+ sum_ref1 = _mm256_add_epi32(sum_ref1, ref1next_reg);
+ sum_ref2 = _mm256_add_epi32(sum_ref2, ref2next_reg);
+ sum_ref3 = _mm256_add_epi32(sum_ref3, ref3next_reg);
+ src+= src_stride;
+ ref0+= ref_stride;
+ ref1+= ref_stride;
+ ref2+= ref_stride;
+ ref3+= ref_stride;
+ }
+ {
+ __m128i sum;
+
+ // in sum_ref-i the result is saved in the first 4 bytes
+ // the other 4 bytes are zeroed.
+ // sum_ref1 and sum_ref3 are shifted left by 4 bytes
+ sum_ref1 = _mm256_slli_si256(sum_ref1, 4);
+ sum_ref3 = _mm256_slli_si256(sum_ref3, 4);
+
+ // merge sum_ref0 and sum_ref1 also sum_ref2 and sum_ref3
+ sum_ref0 = _mm256_or_si256(sum_ref0, sum_ref1);
+ sum_ref2 = _mm256_or_si256(sum_ref2, sum_ref3);
+
+ // merge every 64 bit from each sum_ref-i
+ sum_mlow = _mm256_unpacklo_epi64(sum_ref0, sum_ref2);
+ sum_mhigh = _mm256_unpackhi_epi64(sum_ref0, sum_ref2);
+
+ // add the low 64 bit to the high 64 bit
+ sum_mlow = _mm256_add_epi32(sum_mlow, sum_mhigh);
+
+ // add the low 128 bit to the high 128 bit
+ sum = _mm_add_epi32(_mm256_castsi256_si128(sum_mlow),
+ _mm256_extractf128_si256(sum_mlow, 1));
+
+ _mm_storeu_si128((__m128i *)(res), sum);
+ }
+}