summaryrefslogtreecommitdiff
path: root/vp9/encoder
diff options
context:
space:
mode:
authorlevytamar82 <tamar.levy@intel.com>2014-10-01 23:47:31 -0700
committerlevytamar82 <tamar.levy@intel.com>2014-10-19 13:59:10 -0700
commit7045aec00a94bd49ed979b8dbd73bb81d58670dc (patch)
tree9db7c0d26d8eaa4d9e462961545535fd219d384e /vp9/encoder
parentfeee7d97b797dff46e9eaef0871098dee463d508 (diff)
downloadlibvpx-7045aec00a94bd49ed979b8dbd73bb81d58670dc.tar
libvpx-7045aec00a94bd49ed979b8dbd73bb81d58670dc.tar.gz
libvpx-7045aec00a94bd49ed979b8dbd73bb81d58670dc.tar.bz2
libvpx-7045aec00a94bd49ed979b8dbd73bb81d58670dc.zip
SAD32xh and SAD64xh for AVX2
All sad function that process above 32 consecutive elements are optimized for AVX2: vp9_sad64x64 vp9_sad64x32 vp9_sad32x64 vp9_sad32x32 vp9_sad32x16 vp9_sad64x64_avg vp9_sad64x32_avg vp9_sad32x64_avg vp9_sad32x32_avg vp9_sad32x16_avg The functions that appeared as a hotspot is vp9_sad32x32 and vp9_sad64x64 vp9_sad32x32 was optimized by 68% and vp9_sad64x64 was optimized by 90% both of them gave and overall ~2.3% user level gain Change-Id: Iccf86b375a2b54c5fbbe685902ead0c9a561b9fd
Diffstat (limited to 'vp9/encoder')
-rw-r--r--vp9/encoder/x86/vp9_sad_intrin_avx2.c180
1 files changed, 180 insertions, 0 deletions
diff --git a/vp9/encoder/x86/vp9_sad_intrin_avx2.c b/vp9/encoder/x86/vp9_sad_intrin_avx2.c
new file mode 100644
index 000000000..113193070
--- /dev/null
+++ b/vp9/encoder/x86/vp9_sad_intrin_avx2.c
@@ -0,0 +1,180 @@
+/*
+ * Copyright (c) 2012 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+#include <immintrin.h>
+#include "vpx_ports/mem.h"
+
+#define FSAD64_H(h) \
+unsigned int vp9_sad64x##h##_avx2(const uint8_t *src_ptr, \
+ int src_stride, \
+ const uint8_t *ref_ptr, \
+ int ref_stride) { \
+ int i, res; \
+ __m256i sad1_reg, sad2_reg, ref1_reg, ref2_reg; \
+ __m256i sum_sad = _mm256_setzero_si256(); \
+ __m256i sum_sad_h; \
+ __m128i sum_sad128; \
+ for (i = 0 ; i < h ; i++) { \
+ ref1_reg = _mm256_loadu_si256((__m256i const *)ref_ptr); \
+ ref2_reg = _mm256_loadu_si256((__m256i const *)(ref_ptr + 32)); \
+ sad1_reg = _mm256_sad_epu8(ref1_reg, \
+ _mm256_loadu_si256((__m256i const *)src_ptr)); \
+ sad2_reg = _mm256_sad_epu8(ref2_reg, \
+ _mm256_loadu_si256((__m256i const *)(src_ptr + 32))); \
+ sum_sad = _mm256_add_epi32(sum_sad, _mm256_add_epi32(sad1_reg, sad2_reg)); \
+ ref_ptr+= ref_stride; \
+ src_ptr+= src_stride; \
+ } \
+ sum_sad_h = _mm256_srli_si256(sum_sad, 8); \
+ sum_sad = _mm256_add_epi32(sum_sad, sum_sad_h); \
+ sum_sad128 = _mm256_extracti128_si256(sum_sad, 1); \
+ sum_sad128 = _mm_add_epi32(_mm256_castsi256_si128(sum_sad), sum_sad128); \
+ res = _mm_cvtsi128_si32(sum_sad128); \
+ return res; \
+}
+
+#define FSAD32_H(h) \
+unsigned int vp9_sad32x##h##_avx2(const uint8_t *src_ptr, \
+ int src_stride, \
+ const uint8_t *ref_ptr, \
+ int ref_stride) { \
+ int i, res; \
+ __m256i sad1_reg, sad2_reg, ref1_reg, ref2_reg; \
+ __m256i sum_sad = _mm256_setzero_si256(); \
+ __m256i sum_sad_h; \
+ __m128i sum_sad128; \
+ int ref2_stride = ref_stride << 1; \
+ int src2_stride = src_stride << 1; \
+ int max = h >> 1; \
+ for (i = 0 ; i < max ; i++) { \
+ ref1_reg = _mm256_loadu_si256((__m256i const *)ref_ptr); \
+ ref2_reg = _mm256_loadu_si256((__m256i const *)(ref_ptr + ref_stride)); \
+ sad1_reg = _mm256_sad_epu8(ref1_reg, \
+ _mm256_loadu_si256((__m256i const *)src_ptr)); \
+ sad2_reg = _mm256_sad_epu8(ref2_reg, \
+ _mm256_loadu_si256((__m256i const *)(src_ptr + src_stride))); \
+ sum_sad = _mm256_add_epi32(sum_sad, _mm256_add_epi32(sad1_reg, sad2_reg)); \
+ ref_ptr+= ref2_stride; \
+ src_ptr+= src2_stride; \
+ } \
+ sum_sad_h = _mm256_srli_si256(sum_sad, 8); \
+ sum_sad = _mm256_add_epi32(sum_sad, sum_sad_h); \
+ sum_sad128 = _mm256_extracti128_si256(sum_sad, 1); \
+ sum_sad128 = _mm_add_epi32(_mm256_castsi256_si128(sum_sad), sum_sad128); \
+ res = _mm_cvtsi128_si32(sum_sad128); \
+ return res; \
+}
+
+#define FSAD64 \
+FSAD64_H(64); \
+FSAD64_H(32);
+
+#define FSAD32 \
+FSAD32_H(64); \
+FSAD32_H(32); \
+FSAD32_H(16);
+
+FSAD64;
+FSAD32;
+
+#undef FSAD64
+#undef FSAD32
+#undef FSAD64_H
+#undef FSAD32_H
+
+#define FSADAVG64_H(h) \
+unsigned int vp9_sad64x##h##_avg_avx2(const uint8_t *src_ptr, \
+ int src_stride, \
+ const uint8_t *ref_ptr, \
+ int ref_stride, \
+ const uint8_t *second_pred) { \
+ int i, res; \
+ __m256i sad1_reg, sad2_reg, ref1_reg, ref2_reg; \
+ __m256i sum_sad = _mm256_setzero_si256(); \
+ __m256i sum_sad_h; \
+ __m128i sum_sad128; \
+ for (i = 0 ; i < h ; i++) { \
+ ref1_reg = _mm256_loadu_si256((__m256i const *)ref_ptr); \
+ ref2_reg = _mm256_loadu_si256((__m256i const *)(ref_ptr + 32)); \
+ ref1_reg = _mm256_avg_epu8(ref1_reg, \
+ _mm256_loadu_si256((__m256i const *)second_pred)); \
+ ref2_reg = _mm256_avg_epu8(ref2_reg, \
+ _mm256_loadu_si256((__m256i const *)(second_pred +32))); \
+ sad1_reg = _mm256_sad_epu8(ref1_reg, \
+ _mm256_loadu_si256((__m256i const *)src_ptr)); \
+ sad2_reg = _mm256_sad_epu8(ref2_reg, \
+ _mm256_loadu_si256((__m256i const *)(src_ptr + 32))); \
+ sum_sad = _mm256_add_epi32(sum_sad, _mm256_add_epi32(sad1_reg, sad2_reg)); \
+ ref_ptr+= ref_stride; \
+ src_ptr+= src_stride; \
+ second_pred+= 64; \
+ } \
+ sum_sad_h = _mm256_srli_si256(sum_sad, 8); \
+ sum_sad = _mm256_add_epi32(sum_sad, sum_sad_h); \
+ sum_sad128 = _mm256_extracti128_si256(sum_sad, 1); \
+ sum_sad128 = _mm_add_epi32(_mm256_castsi256_si128(sum_sad), sum_sad128); \
+ res = _mm_cvtsi128_si32(sum_sad128); \
+ return res; \
+}
+
+#define FSADAVG32_H(h) \
+unsigned int vp9_sad32x##h##_avg_avx2(const uint8_t *src_ptr, \
+ int src_stride, \
+ const uint8_t *ref_ptr, \
+ int ref_stride, \
+ const uint8_t *second_pred) { \
+ int i, res; \
+ __m256i sad1_reg, sad2_reg, ref1_reg, ref2_reg; \
+ __m256i sum_sad = _mm256_setzero_si256(); \
+ __m256i sum_sad_h; \
+ __m128i sum_sad128; \
+ int ref2_stride = ref_stride << 1; \
+ int src2_stride = src_stride << 1; \
+ int max = h >> 1; \
+ for (i = 0 ; i < max ; i++) { \
+ ref1_reg = _mm256_loadu_si256((__m256i const *)ref_ptr); \
+ ref2_reg = _mm256_loadu_si256((__m256i const *)(ref_ptr + ref_stride)); \
+ ref1_reg = _mm256_avg_epu8(ref1_reg, \
+ _mm256_loadu_si256((__m256i const *)second_pred)); \
+ ref2_reg = _mm256_avg_epu8(ref2_reg, \
+ _mm256_loadu_si256((__m256i const *)(second_pred +32))); \
+ sad1_reg = _mm256_sad_epu8(ref1_reg, \
+ _mm256_loadu_si256((__m256i const *)src_ptr)); \
+ sad2_reg = _mm256_sad_epu8(ref2_reg, \
+ _mm256_loadu_si256((__m256i const *)(src_ptr + src_stride))); \
+ sum_sad = _mm256_add_epi32(sum_sad, \
+ _mm256_add_epi32(sad1_reg, sad2_reg)); \
+ ref_ptr+= ref2_stride; \
+ src_ptr+= src2_stride; \
+ second_pred+= 64; \
+ } \
+ sum_sad_h = _mm256_srli_si256(sum_sad, 8); \
+ sum_sad = _mm256_add_epi32(sum_sad, sum_sad_h); \
+ sum_sad128 = _mm256_extracti128_si256(sum_sad, 1); \
+ sum_sad128 = _mm_add_epi32(_mm256_castsi256_si128(sum_sad), sum_sad128); \
+ res = _mm_cvtsi128_si32(sum_sad128); \
+ return res; \
+}
+
+#define FSADAVG64 \
+FSADAVG64_H(64); \
+FSADAVG64_H(32);
+
+#define FSADAVG32 \
+FSADAVG32_H(64); \
+FSADAVG32_H(32); \
+FSADAVG32_H(16);
+
+FSADAVG64;
+FSADAVG32;
+
+#undef FSADAVG64
+#undef FSADAVG32
+#undef FSADAVG64_H
+#undef FSADAVG32_H