summaryrefslogtreecommitdiff
path: root/vpx_dsp
diff options
context:
space:
mode:
Diffstat (limited to 'vpx_dsp')
-rw-r--r--vpx_dsp/mips/sad_mmi.c2
-rw-r--r--vpx_dsp/mips/sub_pixel_variance_msa.c7
-rw-r--r--vpx_dsp/mips/variance_mmi.c72
-rw-r--r--vpx_dsp/mips/variance_msa.c5
-rw-r--r--vpx_dsp/ppc/inv_txfm_vsx.c18
-rw-r--r--vpx_dsp/ppc/sad_vsx.c6
-rw-r--r--vpx_dsp/ppc/types_vsx.h2
-rw-r--r--vpx_dsp/ppc/variance_vsx.c2
-rw-r--r--vpx_dsp/prob.h2
-rw-r--r--vpx_dsp/vpx_dsp_common.h4
-rw-r--r--vpx_dsp/x86/highbd_variance_sse2.c6
11 files changed, 64 insertions, 62 deletions
diff --git a/vpx_dsp/mips/sad_mmi.c b/vpx_dsp/mips/sad_mmi.c
index 33bd3fe7f..4368db5fd 100644
--- a/vpx_dsp/mips/sad_mmi.c
+++ b/vpx_dsp/mips/sad_mmi.c
@@ -341,7 +341,7 @@
const uint8_t *ref_array, int ref_stride, \
uint32_t *sad_array) { \
int i; \
- for (i = 0; i < k; ++i) \
+ for (i = 0; i < (k); ++i) \
sad_array[i] = \
vpx_sad##m##x##n##_mmi(src, src_stride, &ref_array[i], ref_stride); \
}
diff --git a/vpx_dsp/mips/sub_pixel_variance_msa.c b/vpx_dsp/mips/sub_pixel_variance_msa.c
index 6c2649d7e..572fcabfc 100644
--- a/vpx_dsp/mips/sub_pixel_variance_msa.c
+++ b/vpx_dsp/mips/sub_pixel_variance_msa.c
@@ -27,13 +27,14 @@ static const uint8_t bilinear_filters_msa[8][2] = {
HSUB_UB2_SH(src_l0_m, src_l1_m, res_l0_m, res_l1_m); \
DPADD_SH2_SW(res_l0_m, res_l1_m, res_l0_m, res_l1_m, var, var); \
\
- sub += res_l0_m + res_l1_m; \
+ (sub) += res_l0_m + res_l1_m; \
}
-#define VARIANCE_WxH(sse, diff, shift) sse - (((uint32_t)diff * diff) >> shift)
+#define VARIANCE_WxH(sse, diff, shift) \
+ (sse) - (((uint32_t)(diff) * (diff)) >> (shift))
#define VARIANCE_LARGE_WxH(sse, diff, shift) \
- sse - (((int64_t)diff * diff) >> shift)
+ (sse) - (((int64_t)(diff) * (diff)) >> (shift))
static uint32_t avg_sse_diff_4width_msa(const uint8_t *src_ptr,
int32_t src_stride,
diff --git a/vpx_dsp/mips/variance_mmi.c b/vpx_dsp/mips/variance_mmi.c
index 88908e01a..c1780c33a 100644
--- a/vpx_dsp/mips/variance_mmi.c
+++ b/vpx_dsp/mips/variance_mmi.c
@@ -992,19 +992,19 @@ static inline uint32_t vpx_mse8x(const uint8_t *src_ptr, int src_stride,
vpx_mse8xN(16);
vpx_mse8xN(8);
-#define SUBPIX_VAR(W, H) \
- uint32_t vpx_sub_pixel_variance##W##x##H##_mmi( \
- const uint8_t *src_ptr, int src_stride, int x_offset, int y_offset, \
- const uint8_t *ref_ptr, int ref_stride, uint32_t *sse) { \
- uint16_t fdata3[(H + 1) * W]; \
- uint8_t temp2[H * W]; \
- \
- var_filter_block2d_bil_first_pass(src_ptr, fdata3, src_stride, 1, H + 1, \
- W, bilinear_filters[x_offset]); \
- var_filter_block2d_bil_second_pass(fdata3, temp2, W, W, H, W, \
- bilinear_filters[y_offset]); \
- \
- return vpx_variance##W##x##H##_mmi(temp2, W, ref_ptr, ref_stride, sse); \
+#define SUBPIX_VAR(W, H) \
+ uint32_t vpx_sub_pixel_variance##W##x##H##_mmi( \
+ const uint8_t *src_ptr, int src_stride, int x_offset, int y_offset, \
+ const uint8_t *ref_ptr, int ref_stride, uint32_t *sse) { \
+ uint16_t fdata3[((H) + 1) * (W)]; \
+ uint8_t temp2[(H) * (W)]; \
+ \
+ var_filter_block2d_bil_first_pass(src_ptr, fdata3, src_stride, 1, (H) + 1, \
+ W, bilinear_filters[x_offset]); \
+ var_filter_block2d_bil_second_pass(fdata3, temp2, W, W, H, W, \
+ bilinear_filters[y_offset]); \
+ \
+ return vpx_variance##W##x##H##_mmi(temp2, W, ref_ptr, ref_stride, sse); \
}
SUBPIX_VAR(64, 64)
@@ -1087,9 +1087,9 @@ static inline void var_filter_block2d_bil_16x(const uint8_t *src_ptr,
uint32_t vpx_sub_pixel_variance16x##H##_mmi( \
const uint8_t *src_ptr, int src_stride, int x_offset, int y_offset, \
const uint8_t *ref_ptr, int ref_stride, uint32_t *sse) { \
- uint8_t temp2[16 * H]; \
+ uint8_t temp2[16 * (H)]; \
var_filter_block2d_bil_16x(src_ptr, src_stride, x_offset, y_offset, temp2, \
- (H - 2) / 2); \
+ ((H)-2) / 2); \
\
return vpx_variance16x##H##_mmi(temp2, 16, ref_ptr, ref_stride, sse); \
}
@@ -1169,9 +1169,9 @@ static inline void var_filter_block2d_bil_8x(const uint8_t *src_ptr,
uint32_t vpx_sub_pixel_variance8x##H##_mmi( \
const uint8_t *src_ptr, int src_stride, int x_offset, int y_offset, \
const uint8_t *ref_ptr, int ref_stride, uint32_t *sse) { \
- uint8_t temp2[8 * H]; \
+ uint8_t temp2[8 * (H)]; \
var_filter_block2d_bil_8x(src_ptr, src_stride, x_offset, y_offset, temp2, \
- (H - 2) / 2); \
+ ((H)-2) / 2); \
\
return vpx_variance8x##H##_mmi(temp2, 8, ref_ptr, ref_stride, sse); \
}
@@ -1247,9 +1247,9 @@ static inline void var_filter_block2d_bil_4x(const uint8_t *src_ptr,
uint32_t vpx_sub_pixel_variance4x##H##_mmi( \
const uint8_t *src_ptr, int src_stride, int x_offset, int y_offset, \
const uint8_t *ref_ptr, int ref_stride, uint32_t *sse) { \
- uint8_t temp2[4 * H]; \
+ uint8_t temp2[4 * (H)]; \
var_filter_block2d_bil_4x(src_ptr, src_stride, x_offset, y_offset, temp2, \
- (H - 2) / 2); \
+ ((H)-2) / 2); \
\
return vpx_variance4x##H##_mmi(temp2, 4, ref_ptr, ref_stride, sse); \
}
@@ -1257,23 +1257,23 @@ static inline void var_filter_block2d_bil_4x(const uint8_t *src_ptr,
SUBPIX_VAR4XN(8)
SUBPIX_VAR4XN(4)
-#define SUBPIX_AVG_VAR(W, H) \
- uint32_t vpx_sub_pixel_avg_variance##W##x##H##_mmi( \
- const uint8_t *src_ptr, int src_stride, int x_offset, int y_offset, \
- const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, \
- const uint8_t *second_pred) { \
- uint16_t fdata3[(H + 1) * W]; \
- uint8_t temp2[H * W]; \
- DECLARE_ALIGNED(16, uint8_t, temp3[H * W]); \
- \
- var_filter_block2d_bil_first_pass(src_ptr, fdata3, src_stride, 1, H + 1, \
- W, bilinear_filters[x_offset]); \
- var_filter_block2d_bil_second_pass(fdata3, temp2, W, W, H, W, \
- bilinear_filters[y_offset]); \
- \
- vpx_comp_avg_pred_c(temp3, second_pred, W, H, temp2, W); \
- \
- return vpx_variance##W##x##H##_mmi(temp3, W, ref_ptr, ref_stride, sse); \
+#define SUBPIX_AVG_VAR(W, H) \
+ uint32_t vpx_sub_pixel_avg_variance##W##x##H##_mmi( \
+ const uint8_t *src_ptr, int src_stride, int x_offset, int y_offset, \
+ const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, \
+ const uint8_t *second_pred) { \
+ uint16_t fdata3[((H) + 1) * (W)]; \
+ uint8_t temp2[(H) * (W)]; \
+ DECLARE_ALIGNED(16, uint8_t, temp3[(H) * (W)]); \
+ \
+ var_filter_block2d_bil_first_pass(src_ptr, fdata3, src_stride, 1, (H) + 1, \
+ W, bilinear_filters[x_offset]); \
+ var_filter_block2d_bil_second_pass(fdata3, temp2, W, W, H, W, \
+ bilinear_filters[y_offset]); \
+ \
+ vpx_comp_avg_pred_c(temp3, second_pred, W, H, temp2, W); \
+ \
+ return vpx_variance##W##x##H##_mmi(temp3, W, ref_ptr, ref_stride, sse); \
}
SUBPIX_AVG_VAR(64, 64)
diff --git a/vpx_dsp/mips/variance_msa.c b/vpx_dsp/mips/variance_msa.c
index 49b2f9923..444b086a6 100644
--- a/vpx_dsp/mips/variance_msa.c
+++ b/vpx_dsp/mips/variance_msa.c
@@ -33,10 +33,11 @@
sub += res_l0_m + res_l1_m; \
}
-#define VARIANCE_WxH(sse, diff, shift) sse - (((uint32_t)diff * diff) >> shift)
+#define VARIANCE_WxH(sse, diff, shift) \
+ (sse) - (((uint32_t)(diff) * (diff)) >> (shift))
#define VARIANCE_LARGE_WxH(sse, diff, shift) \
- sse - (((int64_t)diff * diff) >> shift)
+ (sse) - (((int64_t)(diff) * (diff)) >> (shift))
static uint32_t sse_diff_4width_msa(const uint8_t *src_ptr, int32_t src_stride,
const uint8_t *ref_ptr, int32_t ref_stride,
diff --git a/vpx_dsp/ppc/inv_txfm_vsx.c b/vpx_dsp/ppc/inv_txfm_vsx.c
index 8d8fb4401..e99412eca 100644
--- a/vpx_dsp/ppc/inv_txfm_vsx.c
+++ b/vpx_dsp/ppc/inv_txfm_vsx.c
@@ -1074,15 +1074,15 @@ void vpx_idct16x16_256_add_vsx(const tran_low_t *input, uint8_t *dest,
PIXEL_ADD(in3, d_ul, add, shift6); \
vec_vsx_st(vec_packsu(d_uh, d_ul), (step)*stride + 16, dest);
-#define ADD_STORE_BLOCK(in, offset) \
- PIXEL_ADD_STORE32(in[0][0], in[1][0], in[2][0], in[3][0], offset + 0); \
- PIXEL_ADD_STORE32(in[0][1], in[1][1], in[2][1], in[3][1], offset + 1); \
- PIXEL_ADD_STORE32(in[0][2], in[1][2], in[2][2], in[3][2], offset + 2); \
- PIXEL_ADD_STORE32(in[0][3], in[1][3], in[2][3], in[3][3], offset + 3); \
- PIXEL_ADD_STORE32(in[0][4], in[1][4], in[2][4], in[3][4], offset + 4); \
- PIXEL_ADD_STORE32(in[0][5], in[1][5], in[2][5], in[3][5], offset + 5); \
- PIXEL_ADD_STORE32(in[0][6], in[1][6], in[2][6], in[3][6], offset + 6); \
- PIXEL_ADD_STORE32(in[0][7], in[1][7], in[2][7], in[3][7], offset + 7);
+#define ADD_STORE_BLOCK(in, offset) \
+ PIXEL_ADD_STORE32(in[0][0], in[1][0], in[2][0], in[3][0], (offset) + 0); \
+ PIXEL_ADD_STORE32(in[0][1], in[1][1], in[2][1], in[3][1], (offset) + 1); \
+ PIXEL_ADD_STORE32(in[0][2], in[1][2], in[2][2], in[3][2], (offset) + 2); \
+ PIXEL_ADD_STORE32(in[0][3], in[1][3], in[2][3], in[3][3], (offset) + 3); \
+ PIXEL_ADD_STORE32(in[0][4], in[1][4], in[2][4], in[3][4], (offset) + 4); \
+ PIXEL_ADD_STORE32(in[0][5], in[1][5], in[2][5], in[3][5], (offset) + 5); \
+ PIXEL_ADD_STORE32(in[0][6], in[1][6], in[2][6], in[3][6], (offset) + 6); \
+ PIXEL_ADD_STORE32(in[0][7], in[1][7], in[2][7], in[3][7], (offset) + 7);
void vpx_idct32x32_1024_add_vsx(const tran_low_t *input, uint8_t *dest,
int stride) {
diff --git a/vpx_dsp/ppc/sad_vsx.c b/vpx_dsp/ppc/sad_vsx.c
index cdee8f3d1..a08ae1241 100644
--- a/vpx_dsp/ppc/sad_vsx.c
+++ b/vpx_dsp/ppc/sad_vsx.c
@@ -115,7 +115,7 @@ SAD64(64);
unsigned int vpx_sad16x##height##_avg_vsx( \
const uint8_t *src, int src_stride, const uint8_t *ref, int ref_stride, \
const uint8_t *second_pred) { \
- DECLARE_ALIGNED(16, uint8_t, comp_pred[16 * height]); \
+ DECLARE_ALIGNED(16, uint8_t, comp_pred[16 * (height)]); \
vpx_comp_avg_pred_vsx(comp_pred, second_pred, 16, height, ref, \
ref_stride); \
\
@@ -126,7 +126,7 @@ SAD64(64);
unsigned int vpx_sad32x##height##_avg_vsx( \
const uint8_t *src, int src_stride, const uint8_t *ref, int ref_stride, \
const uint8_t *second_pred) { \
- DECLARE_ALIGNED(32, uint8_t, comp_pred[32 * height]); \
+ DECLARE_ALIGNED(32, uint8_t, comp_pred[32 * (height)]); \
vpx_comp_avg_pred_vsx(comp_pred, second_pred, 32, height, ref, \
ref_stride); \
\
@@ -137,7 +137,7 @@ SAD64(64);
unsigned int vpx_sad64x##height##_avg_vsx( \
const uint8_t *src, int src_stride, const uint8_t *ref, int ref_stride, \
const uint8_t *second_pred) { \
- DECLARE_ALIGNED(64, uint8_t, comp_pred[64 * height]); \
+ DECLARE_ALIGNED(64, uint8_t, comp_pred[64 * (height)]); \
vpx_comp_avg_pred_vsx(comp_pred, second_pred, 64, height, ref, \
ref_stride); \
return vpx_sad64x##height##_vsx(src, src_stride, comp_pred, 64); \
diff --git a/vpx_dsp/ppc/types_vsx.h b/vpx_dsp/ppc/types_vsx.h
index 4cba7d365..b89116924 100644
--- a/vpx_dsp/ppc/types_vsx.h
+++ b/vpx_dsp/ppc/types_vsx.h
@@ -64,7 +64,7 @@ static const uint8x16_t xxpermdi3_perm = { 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D,
#define unpack_to_s16_l(v) \
(int16x8_t) vec_mergel((uint8x16_t)v, vec_splat_u8(0))
#ifndef xxpermdi
-#define xxpermdi(a, b, c) vec_xxpermdi(b, a, ((c >> 1) | (c & 1) << 1) ^ 3)
+#define xxpermdi(a, b, c) vec_xxpermdi(b, a, (((c) >> 1) | ((c)&1) << 1) ^ 3)
#endif
#endif
diff --git a/vpx_dsp/ppc/variance_vsx.c b/vpx_dsp/ppc/variance_vsx.c
index 8926160c2..be9614a35 100644
--- a/vpx_dsp/ppc/variance_vsx.c
+++ b/vpx_dsp/ppc/variance_vsx.c
@@ -243,7 +243,7 @@ static INLINE void variance(const uint8_t *src_ptr, int src_stride,
uint32_t *sse) { \
int sum; \
variance(src_ptr, src_stride, ref_ptr, ref_stride, W, H, sse, &sum); \
- return *sse - (uint32_t)(((int64_t)sum * sum) / (W * H)); \
+ return *sse - (uint32_t)(((int64_t)sum * sum) / ((W) * (H))); \
}
#define VARIANCES(W, H) VAR(W, H)
diff --git a/vpx_dsp/prob.h b/vpx_dsp/prob.h
index 30d37c6a7..7a71c0041 100644
--- a/vpx_dsp/prob.h
+++ b/vpx_dsp/prob.h
@@ -32,7 +32,7 @@ typedef int8_t vpx_tree_index;
#define TREE_SIZE(leaf_count) (2 * (leaf_count)-2)
-#define vpx_complement(x) (255 - x)
+#define vpx_complement(x) (255 - (x))
#define MODE_MV_COUNT_SAT 20
diff --git a/vpx_dsp/vpx_dsp_common.h b/vpx_dsp/vpx_dsp_common.h
index 6782c9063..f1fcf2df5 100644
--- a/vpx_dsp/vpx_dsp_common.h
+++ b/vpx_dsp/vpx_dsp_common.h
@@ -25,8 +25,8 @@ extern "C" {
#define VPX_SWAP(type, a, b) \
do { \
type c = (b); \
- b = a; \
- a = c; \
+ (b) = a; \
+ (a) = c; \
} while (0)
#if CONFIG_VP9_HIGHBITDEPTH
diff --git a/vpx_dsp/x86/highbd_variance_sse2.c b/vpx_dsp/x86/highbd_variance_sse2.c
index cc4e260df..dd6cfbb2c 100644
--- a/vpx_dsp/x86/highbd_variance_sse2.c
+++ b/vpx_dsp/x86/highbd_variance_sse2.c
@@ -136,7 +136,7 @@ HIGH_GET_VAR(8);
highbd_8_variance_sse2( \
src, src_stride, ref, ref_stride, w, h, sse, &sum, \
vpx_highbd_calc##block_size##x##block_size##var_sse2, block_size); \
- return *sse - (uint32_t)(((int64_t)sum * sum) >> shift); \
+ return *sse - (uint32_t)(((int64_t)sum * sum) >> (shift)); \
} \
\
uint32_t vpx_highbd_10_variance##w##x##h##_sse2( \
@@ -149,7 +149,7 @@ HIGH_GET_VAR(8);
highbd_10_variance_sse2( \
src, src_stride, ref, ref_stride, w, h, sse, &sum, \
vpx_highbd_calc##block_size##x##block_size##var_sse2, block_size); \
- var = (int64_t)(*sse) - (((int64_t)sum * sum) >> shift); \
+ var = (int64_t)(*sse) - (((int64_t)sum * sum) >> (shift)); \
return (var >= 0) ? (uint32_t)var : 0; \
} \
\
@@ -163,7 +163,7 @@ HIGH_GET_VAR(8);
highbd_12_variance_sse2( \
src, src_stride, ref, ref_stride, w, h, sse, &sum, \
vpx_highbd_calc##block_size##x##block_size##var_sse2, block_size); \
- var = (int64_t)(*sse) - (((int64_t)sum * sum) >> shift); \
+ var = (int64_t)(*sse) - (((int64_t)sum * sum) >> (shift)); \
return (var >= 0) ? (uint32_t)var : 0; \
}