summaryrefslogtreecommitdiff
path: root/vpx_dsp
diff options
context:
space:
mode:
Diffstat (limited to 'vpx_dsp')
-rw-r--r--vpx_dsp/arm/variance_neon.c12
-rw-r--r--vpx_dsp/x86/inv_txfm_sse2.c10
-rw-r--r--vpx_dsp/x86/variance_avx2.c16
3 files changed, 14 insertions, 24 deletions
diff --git a/vpx_dsp/arm/variance_neon.c b/vpx_dsp/arm/variance_neon.c
index f469afc4e..b6d7f86a4 100644
--- a/vpx_dsp/arm/variance_neon.c
+++ b/vpx_dsp/arm/variance_neon.c
@@ -75,7 +75,7 @@ unsigned int vpx_variance8x8_neon(const uint8_t *a, int a_stride,
unsigned int *sse) {
int sum;
variance_neon_w8(a, a_stride, b, b_stride, 8, 8, sse, &sum);
- return *sse - (((int64_t)sum * sum) >> 6); // >> 6 = / 8 * 8
+ return *sse - ((sum * sum) >> 6);
}
unsigned int vpx_variance16x16_neon(const uint8_t *a, int a_stride,
@@ -83,7 +83,7 @@ unsigned int vpx_variance16x16_neon(const uint8_t *a, int a_stride,
unsigned int *sse) {
int sum;
variance_neon_w8(a, a_stride, b, b_stride, 16, 16, sse, &sum);
- return *sse - (((int64_t)sum * sum) >> 8); // >> 8 = / 16 * 16
+ return *sse - (((uint32_t)((int64_t)sum * sum)) >> 8);
}
unsigned int vpx_variance32x32_neon(const uint8_t *a, int a_stride,
@@ -91,7 +91,7 @@ unsigned int vpx_variance32x32_neon(const uint8_t *a, int a_stride,
unsigned int *sse) {
int sum;
variance_neon_w8(a, a_stride, b, b_stride, 32, 32, sse, &sum);
- return *sse - (((int64_t)sum * sum) >> 10); // >> 10 = / 32 * 32
+ return *sse - (unsigned int)(((int64_t)sum * sum) >> 10);
}
unsigned int vpx_variance32x64_neon(const uint8_t *a, int a_stride,
@@ -104,7 +104,7 @@ unsigned int vpx_variance32x64_neon(const uint8_t *a, int a_stride,
32, 32, &sse2, &sum2);
*sse = sse1 + sse2;
sum1 += sum2;
- return *sse - (((int64_t)sum1 * sum1) >> 11); // >> 11 = / 32 * 64
+ return *sse - (unsigned int)(((int64_t)sum1 * sum1) >> 11);
}
unsigned int vpx_variance64x32_neon(const uint8_t *a, int a_stride,
@@ -117,7 +117,7 @@ unsigned int vpx_variance64x32_neon(const uint8_t *a, int a_stride,
64, 16, &sse2, &sum2);
*sse = sse1 + sse2;
sum1 += sum2;
- return *sse - (((int64_t)sum1 * sum1) >> 11); // >> 11 = / 32 * 64
+ return *sse - (unsigned int)(((int64_t)sum1 * sum1) >> 11);
}
unsigned int vpx_variance64x64_neon(const uint8_t *a, int a_stride,
@@ -141,7 +141,7 @@ unsigned int vpx_variance64x64_neon(const uint8_t *a, int a_stride,
b_stride, 64, 16, &sse2, &sum2);
*sse = sse1 + sse2;
sum1 += sum2;
- return *sse - (((int64_t)sum1 * sum1) >> 12); // >> 12 = / 64 * 64
+ return *sse - (unsigned int)(((int64_t)sum1 * sum1) >> 12);
}
unsigned int vpx_variance16x8_neon(const unsigned char *src_ptr,
diff --git a/vpx_dsp/x86/inv_txfm_sse2.c b/vpx_dsp/x86/inv_txfm_sse2.c
index 330ae8d6a..cb56ad078 100644
--- a/vpx_dsp/x86/inv_txfm_sse2.c
+++ b/vpx_dsp/x86/inv_txfm_sse2.c
@@ -3066,17 +3066,7 @@ void vpx_idct32x32_34_add_sse2(const tran_low_t *input, uint8_t *dest,
in[6] = load_input_data(input + 192);
in[7] = load_input_data(input + 224);
- for (i = 8; i < 32; ++i) {
- in[i] = _mm_setzero_si128();
- }
-
array_transpose_8x8(in, in);
- // TODO(hkuang): Following transposes are unnecessary. But remove them will
- // lead to performance drop on some devices.
- array_transpose_8x8(in + 8, in + 8);
- array_transpose_8x8(in + 16, in + 16);
- array_transpose_8x8(in + 24, in + 24);
-
IDCT32_34
// 1_D: Store 32 intermediate results for each 8x32 block.
diff --git a/vpx_dsp/x86/variance_avx2.c b/vpx_dsp/x86/variance_avx2.c
index 7bc2693cf..8428e0520 100644
--- a/vpx_dsp/x86/variance_avx2.c
+++ b/vpx_dsp/x86/variance_avx2.c
@@ -61,7 +61,7 @@ unsigned int vpx_variance32x16_avx2(const uint8_t *src, int src_stride,
int sum;
variance_avx2(src, src_stride, ref, ref_stride, 32, 16, sse, &sum,
vpx_get32x32var_avx2, 32);
- return *sse - (((int64_t)sum * sum) >> 9);
+ return *sse - (uint32_t)(((int64_t)sum * sum) >> 9);
}
unsigned int vpx_variance32x32_avx2(const uint8_t *src, int src_stride,
@@ -70,7 +70,7 @@ unsigned int vpx_variance32x32_avx2(const uint8_t *src, int src_stride,
int sum;
variance_avx2(src, src_stride, ref, ref_stride, 32, 32, sse, &sum,
vpx_get32x32var_avx2, 32);
- return *sse - (((int64_t)sum * sum) >> 10);
+ return *sse - (uint32_t)(((int64_t)sum * sum) >> 10);
}
unsigned int vpx_variance64x64_avx2(const uint8_t *src, int src_stride,
@@ -79,7 +79,7 @@ unsigned int vpx_variance64x64_avx2(const uint8_t *src, int src_stride,
int sum;
variance_avx2(src, src_stride, ref, ref_stride, 64, 64, sse, &sum,
vpx_get32x32var_avx2, 32);
- return *sse - (((int64_t)sum * sum) >> 12);
+ return *sse - (uint32_t)(((int64_t)sum * sum) >> 12);
}
unsigned int vpx_variance64x32_avx2(const uint8_t *src, int src_stride,
@@ -88,7 +88,7 @@ unsigned int vpx_variance64x32_avx2(const uint8_t *src, int src_stride,
int sum;
variance_avx2(src, src_stride, ref, ref_stride, 64, 32, sse, &sum,
vpx_get32x32var_avx2, 32);
- return *sse - (((int64_t)sum * sum) >> 11);
+ return *sse - (uint32_t)(((int64_t)sum * sum) >> 11);
}
unsigned int vpx_sub_pixel_variance32xh_avx2(const uint8_t *src, int src_stride,
@@ -115,7 +115,7 @@ unsigned int vpx_sub_pixel_variance64x64_avx2(const uint8_t *src,
dst + 32, dst_stride, 64, &sse2);
const int se = se1 + se2;
*sse = sse1 + sse2;
- return *sse - (((int64_t)se * se) >> 12);
+ return *sse - (uint32_t)(((int64_t)se * se) >> 12);
}
unsigned int vpx_sub_pixel_variance32x32_avx2(const uint8_t *src,
@@ -125,7 +125,7 @@ unsigned int vpx_sub_pixel_variance32x32_avx2(const uint8_t *src,
unsigned int *sse) {
const int se = vpx_sub_pixel_variance32xh_avx2(
src, src_stride, x_offset, y_offset, dst, dst_stride, 32, sse);
- return *sse - (((int64_t)se * se) >> 10);
+ return *sse - (uint32_t)(((int64_t)se * se) >> 10);
}
unsigned int vpx_sub_pixel_avg_variance64x64_avx2(
@@ -142,7 +142,7 @@ unsigned int vpx_sub_pixel_avg_variance64x64_avx2(
*sse = sse1 + sse2;
- return *sse - (((int64_t)se * se) >> 12);
+ return *sse - (uint32_t)(((int64_t)se * se) >> 12);
}
unsigned int vpx_sub_pixel_avg_variance32x32_avx2(
@@ -151,5 +151,5 @@ unsigned int vpx_sub_pixel_avg_variance32x32_avx2(
// Process 32 elements in parallel.
const int se = vpx_sub_pixel_avg_variance32xh_avx2(
src, src_stride, x_offset, y_offset, dst, dst_stride, sec, 32, 32, sse);
- return *sse - (((int64_t)se * se) >> 10);
+ return *sse - (uint32_t)(((int64_t)se * se) >> 10);
}