summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--examples/vpx_temporal_svc_encoder.c2
-rw-r--r--vp8/common/common.h12
-rw-r--r--vp8/common/treecoder.h2
-rw-r--r--vp8/decoder/onyxd_int.h8
-rw-r--r--vp8/encoder/lookahead.h2
-rw-r--r--vp8/encoder/onyx_if.c4
-rw-r--r--vp8/encoder/onyx_int.h8
-rw-r--r--vp8/vp8_cx_iface.c8
-rw-r--r--vp8/vp8_dx_iface.c2
-rw-r--r--vp9/common/vp9_alloccommon.h2
-rw-r--r--vp9/common/vp9_blockd.h2
-rw-r--r--vp9/common/vp9_common.h18
-rw-r--r--vp9/common/vp9_mvref_common.h4
-rw-r--r--vp9/common/vp9_scale.h2
-rw-r--r--vp9/encoder/vp9_encodemb.c2
-rw-r--r--vp9/encoder/vp9_firstpass.h2
-rw-r--r--vp9/encoder/vp9_rd.h6
-rw-r--r--vp9/vp9_cx_iface.c8
-rw-r--r--vp9/vp9_dx_iface.c8
-rw-r--r--vpx/src/vpx_encoder.c2
-rw-r--r--vpx/vpx_codec.h6
-rw-r--r--vpx_dsp/mips/sad_mmi.c2
-rw-r--r--vpx_dsp/mips/sub_pixel_variance_msa.c7
-rw-r--r--vpx_dsp/mips/variance_mmi.c72
-rw-r--r--vpx_dsp/mips/variance_msa.c5
-rw-r--r--vpx_dsp/ppc/inv_txfm_vsx.c18
-rw-r--r--vpx_dsp/ppc/sad_vsx.c6
-rw-r--r--vpx_dsp/ppc/types_vsx.h2
-rw-r--r--vpx_dsp/ppc/variance_vsx.c2
-rw-r--r--vpx_dsp/prob.h2
-rw-r--r--vpx_dsp/vpx_dsp_common.h4
-rw-r--r--vpx_dsp/x86/highbd_variance_sse2.c6
-rw-r--r--vpx_ports/x86.h2
33 files changed, 120 insertions, 118 deletions
diff --git a/examples/vpx_temporal_svc_encoder.c b/examples/vpx_temporal_svc_encoder.c
index ba71ca712..02144b474 100644
--- a/examples/vpx_temporal_svc_encoder.c
+++ b/examples/vpx_temporal_svc_encoder.c
@@ -30,7 +30,7 @@
#define ROI_MAP 0
-#define zero(Dest) memset(&Dest, 0, sizeof(Dest));
+#define zero(Dest) memset(&(Dest), 0, sizeof(Dest));
static const char *exec_name;
diff --git a/vp8/common/common.h b/vp8/common/common.h
index 8593e822b..2c30e8d6c 100644
--- a/vp8/common/common.h
+++ b/vp8/common/common.h
@@ -31,15 +31,15 @@ extern "C" {
/* Use this for variably-sized arrays. */
-#define vp8_copy_array(Dest, Src, N) \
- { \
- assert(sizeof(*Dest) == sizeof(*Src)); \
- memcpy(Dest, Src, N * sizeof(*Src)); \
+#define vp8_copy_array(Dest, Src, N) \
+ { \
+ assert(sizeof(*(Dest)) == sizeof(*(Src))); \
+ memcpy(Dest, Src, (N) * sizeof(*(Src))); \
}
-#define vp8_zero(Dest) memset(&Dest, 0, sizeof(Dest));
+#define vp8_zero(Dest) memset(&(Dest), 0, sizeof(Dest));
-#define vp8_zero_array(Dest, N) memset(Dest, 0, N * sizeof(*Dest));
+#define vp8_zero_array(Dest, N) memset(Dest, 0, (N) * sizeof(*(Dest)));
#ifdef __cplusplus
} // extern "C"
diff --git a/vp8/common/treecoder.h b/vp8/common/treecoder.h
index 00b1e7b24..d7d8d0ead 100644
--- a/vp8/common/treecoder.h
+++ b/vp8/common/treecoder.h
@@ -32,7 +32,7 @@ typedef const bool_coder_spec c_bool_coder_spec;
typedef const bool_writer c_bool_writer;
typedef const bool_reader c_bool_reader;
-#define vp8_complement(x) (255 - x)
+#define vp8_complement(x) (255 - (x))
/* We build coding trees compactly in arrays.
Each node of the tree is a pair of vp8_tree_indices.
diff --git a/vp8/decoder/onyxd_int.h b/vp8/decoder/onyxd_int.h
index 75286e430..cf2c066d9 100644
--- a/vp8/decoder/onyxd_int.h
+++ b/vp8/decoder/onyxd_int.h
@@ -136,8 +136,8 @@ int vp8_remove_decoder_instances(struct frame_buffers *fb);
#if CONFIG_DEBUG
#define CHECK_MEM_ERROR(lval, expr) \
do { \
- lval = (expr); \
- if (!lval) \
+ (lval) = (expr); \
+ if (!(lval)) \
vpx_internal_error(&pbi->common.error, VPX_CODEC_MEM_ERROR, \
"Failed to allocate " #lval " at %s:%d", __FILE__, \
__LINE__); \
@@ -145,8 +145,8 @@ int vp8_remove_decoder_instances(struct frame_buffers *fb);
#else
#define CHECK_MEM_ERROR(lval, expr) \
do { \
- lval = (expr); \
- if (!lval) \
+ (lval) = (expr); \
+ if (!(lval)) \
vpx_internal_error(&pbi->common.error, VPX_CODEC_MEM_ERROR, \
"Failed to allocate " #lval); \
} while (0)
diff --git a/vp8/encoder/lookahead.h b/vp8/encoder/lookahead.h
index 4e2766b1c..bf0401190 100644
--- a/vp8/encoder/lookahead.h
+++ b/vp8/encoder/lookahead.h
@@ -74,7 +74,7 @@ int vp8_lookahead_push(struct lookahead_ctx *ctx, YV12_BUFFER_CONFIG *src,
struct lookahead_entry *vp8_lookahead_pop(struct lookahead_ctx *ctx, int drain);
#define PEEK_FORWARD 1
-#define PEEK_BACKWARD -1
+#define PEEK_BACKWARD (-1)
/**\brief Get a future source buffer to encode
*
* \param[in] ctx Pointer to the lookahead context
diff --git a/vp8/encoder/onyx_if.c b/vp8/encoder/onyx_if.c
index a30821ac1..bdc4bdc42 100644
--- a/vp8/encoder/onyx_if.c
+++ b/vp8/encoder/onyx_if.c
@@ -687,8 +687,8 @@ static void set_default_lf_deltas(VP8_COMP *cpi) {
/* Convenience macros for mapping speed and mode into a continuous
* range
*/
-#define GOOD(x) (x + 1)
-#define RT(x) (x + 7)
+#define GOOD(x) ((x) + 1)
+#define RT(x) ((x) + 7)
static int speed_map(int speed, const int *map) {
int res;
diff --git a/vp8/encoder/onyx_int.h b/vp8/encoder/onyx_int.h
index a4e3f2f79..a48c4d5f7 100644
--- a/vp8/encoder/onyx_int.h
+++ b/vp8/encoder/onyx_int.h
@@ -716,8 +716,8 @@ void vp8_set_speed_features(VP8_COMP *cpi);
#if CONFIG_DEBUG
#define CHECK_MEM_ERROR(lval, expr) \
do { \
- lval = (expr); \
- if (!lval) \
+ (lval) = (expr); \
+ if (!(lval)) \
vpx_internal_error(&cpi->common.error, VPX_CODEC_MEM_ERROR, \
"Failed to allocate " #lval " at %s:%d", __FILE__, \
__LINE__); \
@@ -725,8 +725,8 @@ void vp8_set_speed_features(VP8_COMP *cpi);
#else
#define CHECK_MEM_ERROR(lval, expr) \
do { \
- lval = (expr); \
- if (!lval) \
+ (lval) = (expr); \
+ if (!(lval)) \
vpx_internal_error(&cpi->common.error, VPX_CODEC_MEM_ERROR, \
"Failed to allocate " #lval); \
} while (0)
diff --git a/vp8/vp8_cx_iface.c b/vp8/vp8_cx_iface.c
index d01d2095f..c0e3ada0c 100644
--- a/vp8/vp8_cx_iface.c
+++ b/vp8/vp8_cx_iface.c
@@ -106,10 +106,10 @@ static vpx_codec_err_t update_error_state(
return VPX_CODEC_INVALID_PARAM; \
} while (0)
-#define RANGE_CHECK(p, memb, lo, hi) \
- do { \
- if (!(((p)->memb == lo || (p)->memb > (lo)) && (p)->memb <= hi)) \
- ERROR(#memb " out of range [" #lo ".." #hi "]"); \
+#define RANGE_CHECK(p, memb, lo, hi) \
+ do { \
+ if (!(((p)->memb == (lo) || (p)->memb > (lo)) && (p)->memb <= (hi))) \
+ ERROR(#memb " out of range [" #lo ".." #hi "]"); \
} while (0)
#define RANGE_CHECK_HI(p, memb, hi) \
diff --git a/vp8/vp8_dx_iface.c b/vp8/vp8_dx_iface.c
index 7db77195b..f441ed46f 100644
--- a/vp8/vp8_dx_iface.c
+++ b/vp8/vp8_dx_iface.c
@@ -38,7 +38,7 @@ typedef vpx_codec_stream_info_t vp8_stream_info_t;
/* Structures for handling memory allocations */
typedef enum { VP8_SEG_ALG_PRIV = 256, VP8_SEG_MAX } mem_seg_id_t;
-#define NELEMENTS(x) ((int)(sizeof(x) / sizeof(x[0])))
+#define NELEMENTS(x) ((int)(sizeof(x) / sizeof((x)[0])))
struct vpx_codec_alg_priv {
vpx_codec_priv_t base;
diff --git a/vp9/common/vp9_alloccommon.h b/vp9/common/vp9_alloccommon.h
index 5faa4f2be..8900038ea 100644
--- a/vp9/common/vp9_alloccommon.h
+++ b/vp9/common/vp9_alloccommon.h
@@ -11,7 +11,7 @@
#ifndef VPX_VP9_COMMON_VP9_ALLOCCOMMON_H_
#define VPX_VP9_COMMON_VP9_ALLOCCOMMON_H_
-#define INVALID_IDX -1 // Invalid buffer index.
+#define INVALID_IDX (-1) // Invalid buffer index.
#ifdef __cplusplus
extern "C" {
diff --git a/vp9/common/vp9_blockd.h b/vp9/common/vp9_blockd.h
index e07a9f2d3..504342fdf 100644
--- a/vp9/common/vp9_blockd.h
+++ b/vp9/common/vp9_blockd.h
@@ -54,7 +54,7 @@ typedef struct {
// decoder implementation modules critically rely on the defined entry values
// specified herein. They should be refactored concurrently.
-#define NONE -1
+#define NONE (-1)
#define INTRA_FRAME 0
#define LAST_FRAME 1
#define GOLDEN_FRAME 2
diff --git a/vp9/common/vp9_common.h b/vp9/common/vp9_common.h
index ae8dad38e..e3c5535dd 100644
--- a/vp9/common/vp9_common.h
+++ b/vp9/common/vp9_common.h
@@ -33,14 +33,14 @@ extern "C" {
}
// Use this for variably-sized arrays.
-#define vp9_copy_array(dest, src, n) \
- { \
- assert(sizeof(*dest) == sizeof(*src)); \
- memcpy(dest, src, n * sizeof(*src)); \
+#define vp9_copy_array(dest, src, n) \
+ { \
+ assert(sizeof(*(dest)) == sizeof(*(src))); \
+ memcpy(dest, src, (n) * sizeof(*(src))); \
}
#define vp9_zero(dest) memset(&(dest), 0, sizeof(dest))
-#define vp9_zero_array(dest, n) memset(dest, 0, n * sizeof(*dest))
+#define vp9_zero_array(dest, n) memset(dest, 0, (n) * sizeof(*(dest)))
static INLINE int get_unsigned_bits(unsigned int num_values) {
return num_values > 0 ? get_msb(num_values) + 1 : 0;
@@ -49,8 +49,8 @@ static INLINE int get_unsigned_bits(unsigned int num_values) {
#if CONFIG_DEBUG
#define CHECK_MEM_ERROR(cm, lval, expr) \
do { \
- lval = (expr); \
- if (!lval) \
+ (lval) = (expr); \
+ if (!(lval)) \
vpx_internal_error(&(cm)->error, VPX_CODEC_MEM_ERROR, \
"Failed to allocate " #lval " at %s:%d", __FILE__, \
__LINE__); \
@@ -58,8 +58,8 @@ static INLINE int get_unsigned_bits(unsigned int num_values) {
#else
#define CHECK_MEM_ERROR(cm, lval, expr) \
do { \
- lval = (expr); \
- if (!lval) \
+ (lval) = (expr); \
+ if (!(lval)) \
vpx_internal_error(&(cm)->error, VPX_CODEC_MEM_ERROR, \
"Failed to allocate " #lval); \
} while (0)
diff --git a/vp9/common/vp9_mvref_common.h b/vp9/common/vp9_mvref_common.h
index ebe5fdad1..5db6772dc 100644
--- a/vp9/common/vp9_mvref_common.h
+++ b/vp9/common/vp9_mvref_common.h
@@ -263,10 +263,10 @@ static INLINE int_mv scale_mv(const MODE_INFO *mi, int ref,
mv_ref_list, Done) \
do { \
if (is_inter_block(mbmi)) { \
- if ((mbmi)->ref_frame[0] != ref_frame) \
+ if ((mbmi)->ref_frame[0] != (ref_frame)) \
ADD_MV_REF_LIST(scale_mv((mbmi), 0, ref_frame, ref_sign_bias), \
refmv_count, mv_ref_list, Done); \
- if (has_second_ref(mbmi) && (mbmi)->ref_frame[1] != ref_frame && \
+ if (has_second_ref(mbmi) && (mbmi)->ref_frame[1] != (ref_frame) && \
(mbmi)->mv[1].as_int != (mbmi)->mv[0].as_int) \
ADD_MV_REF_LIST(scale_mv((mbmi), 1, ref_frame, ref_sign_bias), \
refmv_count, mv_ref_list, Done); \
diff --git a/vp9/common/vp9_scale.h b/vp9/common/vp9_scale.h
index aaafdf867..2f3b60948 100644
--- a/vp9/common/vp9_scale.h
+++ b/vp9/common/vp9_scale.h
@@ -20,7 +20,7 @@ extern "C" {
#define REF_SCALE_SHIFT 14
#define REF_NO_SCALE (1 << REF_SCALE_SHIFT)
-#define REF_INVALID_SCALE -1
+#define REF_INVALID_SCALE (-1)
struct scale_factors {
int x_scale_fp; // horizontal fixed point scale factor
diff --git a/vp9/encoder/vp9_encodemb.c b/vp9/encoder/vp9_encodemb.c
index a68a0926a..276510204 100644
--- a/vp9/encoder/vp9_encodemb.c
+++ b/vp9/encoder/vp9_encodemb.c
@@ -56,7 +56,7 @@ static const int plane_rd_mult[REF_TYPES][PLANE_TYPES] = {
// 'num' can be negative, but 'shift' must be non-negative.
#define RIGHT_SHIFT_POSSIBLY_NEGATIVE(num, shift) \
- ((num) >= 0) ? (num) >> (shift) : -((-(num)) >> (shift))
+ (((num) >= 0) ? (num) >> (shift) : -((-(num)) >> (shift)))
int vp9_optimize_b(MACROBLOCK *mb, int plane, int block, TX_SIZE tx_size,
int ctx) {
diff --git a/vp9/encoder/vp9_firstpass.h b/vp9/encoder/vp9_firstpass.h
index 0807097ac..5d9a7fd0d 100644
--- a/vp9/encoder/vp9_firstpass.h
+++ b/vp9/encoder/vp9_firstpass.h
@@ -41,7 +41,7 @@ typedef struct {
} FIRSTPASS_MB_STATS;
#endif
-#define INVALID_ROW -1
+#define INVALID_ROW (-1)
#define MAX_ARF_LAYERS 6
diff --git a/vp9/encoder/vp9_rd.h b/vp9/encoder/vp9_rd.h
index 062ca3277..21f8ddd37 100644
--- a/vp9/encoder/vp9_rd.h
+++ b/vp9/encoder/vp9_rd.h
@@ -27,7 +27,7 @@ extern "C" {
#define RD_EPB_SHIFT 6
#define RDCOST(RM, DM, R, D) \
- (ROUND_POWER_OF_TWO(((int64_t)R) * (RM), VP9_PROB_COST_SHIFT) + (D << DM))
+ ROUND_POWER_OF_TWO(((int64_t)(R)) * (RM), VP9_PROB_COST_SHIFT) + ((D) << (DM))
#define QIDX_SKIP_THRESH 115
#define MV_COST_WEIGHT 108
@@ -101,8 +101,8 @@ typedef enum {
typedef struct RD_OPT {
// Thresh_mult is used to set a threshold for the rd score. A higher value
// means that we will accept the best mode so far more often. This number
- // is used in combination with the current block size, and thresh_freq_fact
- // to pick a threshold.
+ // is used in combination with the current block size, and thresh_freq_fact to
+ // pick a threshold.
int thresh_mult[MAX_MODES];
int thresh_mult_sub8x8[MAX_REFS];
diff --git a/vp9/vp9_cx_iface.c b/vp9/vp9_cx_iface.c
index 85f83a662..2baa21364 100644
--- a/vp9/vp9_cx_iface.c
+++ b/vp9/vp9_cx_iface.c
@@ -130,10 +130,10 @@ static vpx_codec_err_t update_error_state(
return VPX_CODEC_INVALID_PARAM; \
} while (0)
-#define RANGE_CHECK(p, memb, lo, hi) \
- do { \
- if (!(((p)->memb == lo || (p)->memb > (lo)) && (p)->memb <= hi)) \
- ERROR(#memb " out of range [" #lo ".." #hi "]"); \
+#define RANGE_CHECK(p, memb, lo, hi) \
+ do { \
+ if (!(((p)->memb == (lo) || (p)->memb > (lo)) && (p)->memb <= (hi))) \
+ ERROR(#memb " out of range [" #lo ".." #hi "]"); \
} while (0)
#define RANGE_CHECK_HI(p, memb, hi) \
diff --git a/vp9/vp9_dx_iface.c b/vp9/vp9_dx_iface.c
index 6a4cb9acf..fa79f7aed 100644
--- a/vp9/vp9_dx_iface.c
+++ b/vp9/vp9_dx_iface.c
@@ -245,10 +245,10 @@ static void set_ppflags(const vpx_codec_alg_priv_t *ctx, vp9_ppflags_t *flags) {
return VPX_CODEC_INVALID_PARAM; \
} while (0)
-#define RANGE_CHECK(p, memb, lo, hi) \
- do { \
- if (!(((p)->memb == lo || (p)->memb > (lo)) && (p)->memb <= hi)) \
- ERROR(#memb " out of range [" #lo ".." #hi "]"); \
+#define RANGE_CHECK(p, memb, lo, hi) \
+ do { \
+ if (!(((p)->memb == (lo) || (p)->memb > (lo)) && (p)->memb <= (hi))) \
+ ERROR(#memb " out of range [" #lo ".." #hi "]"); \
} while (0)
static vpx_codec_err_t init_decoder(vpx_codec_alg_priv_t *ctx) {
diff --git a/vpx/src/vpx_encoder.c b/vpx/src/vpx_encoder.c
index ac1e3d061..3385c22c0 100644
--- a/vpx/src/vpx_encoder.c
+++ b/vpx/src/vpx_encoder.c
@@ -20,7 +20,7 @@
#include "vpx_config.h"
#include "vpx/internal/vpx_codec_internal.h"
-#define SAVE_STATUS(ctx, var) (ctx ? (ctx->err = var) : var)
+#define SAVE_STATUS(ctx, var) ((ctx) ? ((ctx)->err = (var)) : (var))
static vpx_codec_alg_priv_t *get_alg_priv(vpx_codec_ctx_t *ctx) {
return (vpx_codec_alg_priv_t *)ctx->priv;
diff --git a/vpx/vpx_codec.h b/vpx/vpx_codec.h
index 0f8d7851e..6371a6ca2 100644
--- a/vpx/vpx_codec.h
+++ b/vpx/vpx_codec.h
@@ -241,11 +241,11 @@ typedef enum vpx_bit_depth {
*/
int vpx_codec_version(void);
#define VPX_VERSION_MAJOR(v) \
- ((v >> 16) & 0xff) /**< extract major from packed version */
+ (((v) >> 16) & 0xff) /**< extract major from packed version */
#define VPX_VERSION_MINOR(v) \
- ((v >> 8) & 0xff) /**< extract minor from packed version */
+ (((v) >> 8) & 0xff) /**< extract minor from packed version */
#define VPX_VERSION_PATCH(v) \
- ((v >> 0) & 0xff) /**< extract patch from packed version */
+ (((v) >> 0) & 0xff) /**< extract patch from packed version */
/*!\brief Return the version major number */
#define vpx_codec_version_major() ((vpx_codec_version() >> 16) & 0xff)
diff --git a/vpx_dsp/mips/sad_mmi.c b/vpx_dsp/mips/sad_mmi.c
index 33bd3fe7f..4368db5fd 100644
--- a/vpx_dsp/mips/sad_mmi.c
+++ b/vpx_dsp/mips/sad_mmi.c
@@ -341,7 +341,7 @@
const uint8_t *ref_array, int ref_stride, \
uint32_t *sad_array) { \
int i; \
- for (i = 0; i < k; ++i) \
+ for (i = 0; i < (k); ++i) \
sad_array[i] = \
vpx_sad##m##x##n##_mmi(src, src_stride, &ref_array[i], ref_stride); \
}
diff --git a/vpx_dsp/mips/sub_pixel_variance_msa.c b/vpx_dsp/mips/sub_pixel_variance_msa.c
index 6c2649d7e..572fcabfc 100644
--- a/vpx_dsp/mips/sub_pixel_variance_msa.c
+++ b/vpx_dsp/mips/sub_pixel_variance_msa.c
@@ -27,13 +27,14 @@ static const uint8_t bilinear_filters_msa[8][2] = {
HSUB_UB2_SH(src_l0_m, src_l1_m, res_l0_m, res_l1_m); \
DPADD_SH2_SW(res_l0_m, res_l1_m, res_l0_m, res_l1_m, var, var); \
\
- sub += res_l0_m + res_l1_m; \
+ (sub) += res_l0_m + res_l1_m; \
}
-#define VARIANCE_WxH(sse, diff, shift) sse - (((uint32_t)diff * diff) >> shift)
+#define VARIANCE_WxH(sse, diff, shift) \
+ (sse) - (((uint32_t)(diff) * (diff)) >> (shift))
#define VARIANCE_LARGE_WxH(sse, diff, shift) \
- sse - (((int64_t)diff * diff) >> shift)
+ (sse) - (((int64_t)(diff) * (diff)) >> (shift))
static uint32_t avg_sse_diff_4width_msa(const uint8_t *src_ptr,
int32_t src_stride,
diff --git a/vpx_dsp/mips/variance_mmi.c b/vpx_dsp/mips/variance_mmi.c
index 88908e01a..c1780c33a 100644
--- a/vpx_dsp/mips/variance_mmi.c
+++ b/vpx_dsp/mips/variance_mmi.c
@@ -992,19 +992,19 @@ static inline uint32_t vpx_mse8x(const uint8_t *src_ptr, int src_stride,
vpx_mse8xN(16);
vpx_mse8xN(8);
-#define SUBPIX_VAR(W, H) \
- uint32_t vpx_sub_pixel_variance##W##x##H##_mmi( \
- const uint8_t *src_ptr, int src_stride, int x_offset, int y_offset, \
- const uint8_t *ref_ptr, int ref_stride, uint32_t *sse) { \
- uint16_t fdata3[(H + 1) * W]; \
- uint8_t temp2[H * W]; \
- \
- var_filter_block2d_bil_first_pass(src_ptr, fdata3, src_stride, 1, H + 1, \
- W, bilinear_filters[x_offset]); \
- var_filter_block2d_bil_second_pass(fdata3, temp2, W, W, H, W, \
- bilinear_filters[y_offset]); \
- \
- return vpx_variance##W##x##H##_mmi(temp2, W, ref_ptr, ref_stride, sse); \
+#define SUBPIX_VAR(W, H) \
+ uint32_t vpx_sub_pixel_variance##W##x##H##_mmi( \
+ const uint8_t *src_ptr, int src_stride, int x_offset, int y_offset, \
+ const uint8_t *ref_ptr, int ref_stride, uint32_t *sse) { \
+ uint16_t fdata3[((H) + 1) * (W)]; \
+ uint8_t temp2[(H) * (W)]; \
+ \
+ var_filter_block2d_bil_first_pass(src_ptr, fdata3, src_stride, 1, (H) + 1, \
+ W, bilinear_filters[x_offset]); \
+ var_filter_block2d_bil_second_pass(fdata3, temp2, W, W, H, W, \
+ bilinear_filters[y_offset]); \
+ \
+ return vpx_variance##W##x##H##_mmi(temp2, W, ref_ptr, ref_stride, sse); \
}
SUBPIX_VAR(64, 64)
@@ -1087,9 +1087,9 @@ static inline void var_filter_block2d_bil_16x(const uint8_t *src_ptr,
uint32_t vpx_sub_pixel_variance16x##H##_mmi( \
const uint8_t *src_ptr, int src_stride, int x_offset, int y_offset, \
const uint8_t *ref_ptr, int ref_stride, uint32_t *sse) { \
- uint8_t temp2[16 * H]; \
+ uint8_t temp2[16 * (H)]; \
var_filter_block2d_bil_16x(src_ptr, src_stride, x_offset, y_offset, temp2, \
- (H - 2) / 2); \
+ ((H)-2) / 2); \
\
return vpx_variance16x##H##_mmi(temp2, 16, ref_ptr, ref_stride, sse); \
}
@@ -1169,9 +1169,9 @@ static inline void var_filter_block2d_bil_8x(const uint8_t *src_ptr,
uint32_t vpx_sub_pixel_variance8x##H##_mmi( \
const uint8_t *src_ptr, int src_stride, int x_offset, int y_offset, \
const uint8_t *ref_ptr, int ref_stride, uint32_t *sse) { \
- uint8_t temp2[8 * H]; \
+ uint8_t temp2[8 * (H)]; \
var_filter_block2d_bil_8x(src_ptr, src_stride, x_offset, y_offset, temp2, \
- (H - 2) / 2); \
+ ((H)-2) / 2); \
\
return vpx_variance8x##H##_mmi(temp2, 8, ref_ptr, ref_stride, sse); \
}
@@ -1247,9 +1247,9 @@ static inline void var_filter_block2d_bil_4x(const uint8_t *src_ptr,
uint32_t vpx_sub_pixel_variance4x##H##_mmi( \
const uint8_t *src_ptr, int src_stride, int x_offset, int y_offset, \
const uint8_t *ref_ptr, int ref_stride, uint32_t *sse) { \
- uint8_t temp2[4 * H]; \
+ uint8_t temp2[4 * (H)]; \
var_filter_block2d_bil_4x(src_ptr, src_stride, x_offset, y_offset, temp2, \
- (H - 2) / 2); \
+ ((H)-2) / 2); \
\
return vpx_variance4x##H##_mmi(temp2, 4, ref_ptr, ref_stride, sse); \
}
@@ -1257,23 +1257,23 @@ static inline void var_filter_block2d_bil_4x(const uint8_t *src_ptr,
SUBPIX_VAR4XN(8)
SUBPIX_VAR4XN(4)
-#define SUBPIX_AVG_VAR(W, H) \
- uint32_t vpx_sub_pixel_avg_variance##W##x##H##_mmi( \
- const uint8_t *src_ptr, int src_stride, int x_offset, int y_offset, \
- const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, \
- const uint8_t *second_pred) { \
- uint16_t fdata3[(H + 1) * W]; \
- uint8_t temp2[H * W]; \
- DECLARE_ALIGNED(16, uint8_t, temp3[H * W]); \
- \
- var_filter_block2d_bil_first_pass(src_ptr, fdata3, src_stride, 1, H + 1, \
- W, bilinear_filters[x_offset]); \
- var_filter_block2d_bil_second_pass(fdata3, temp2, W, W, H, W, \
- bilinear_filters[y_offset]); \
- \
- vpx_comp_avg_pred_c(temp3, second_pred, W, H, temp2, W); \
- \
- return vpx_variance##W##x##H##_mmi(temp3, W, ref_ptr, ref_stride, sse); \
+#define SUBPIX_AVG_VAR(W, H) \
+ uint32_t vpx_sub_pixel_avg_variance##W##x##H##_mmi( \
+ const uint8_t *src_ptr, int src_stride, int x_offset, int y_offset, \
+ const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, \
+ const uint8_t *second_pred) { \
+ uint16_t fdata3[((H) + 1) * (W)]; \
+ uint8_t temp2[(H) * (W)]; \
+ DECLARE_ALIGNED(16, uint8_t, temp3[(H) * (W)]); \
+ \
+ var_filter_block2d_bil_first_pass(src_ptr, fdata3, src_stride, 1, (H) + 1, \
+ W, bilinear_filters[x_offset]); \
+ var_filter_block2d_bil_second_pass(fdata3, temp2, W, W, H, W, \
+ bilinear_filters[y_offset]); \
+ \
+ vpx_comp_avg_pred_c(temp3, second_pred, W, H, temp2, W); \
+ \
+ return vpx_variance##W##x##H##_mmi(temp3, W, ref_ptr, ref_stride, sse); \
}
SUBPIX_AVG_VAR(64, 64)
diff --git a/vpx_dsp/mips/variance_msa.c b/vpx_dsp/mips/variance_msa.c
index 49b2f9923..444b086a6 100644
--- a/vpx_dsp/mips/variance_msa.c
+++ b/vpx_dsp/mips/variance_msa.c
@@ -33,10 +33,11 @@
sub += res_l0_m + res_l1_m; \
}
-#define VARIANCE_WxH(sse, diff, shift) sse - (((uint32_t)diff * diff) >> shift)
+#define VARIANCE_WxH(sse, diff, shift) \
+ (sse) - (((uint32_t)(diff) * (diff)) >> (shift))
#define VARIANCE_LARGE_WxH(sse, diff, shift) \
- sse - (((int64_t)diff * diff) >> shift)
+ (sse) - (((int64_t)(diff) * (diff)) >> (shift))
static uint32_t sse_diff_4width_msa(const uint8_t *src_ptr, int32_t src_stride,
const uint8_t *ref_ptr, int32_t ref_stride,
diff --git a/vpx_dsp/ppc/inv_txfm_vsx.c b/vpx_dsp/ppc/inv_txfm_vsx.c
index 8d8fb4401..e99412eca 100644
--- a/vpx_dsp/ppc/inv_txfm_vsx.c
+++ b/vpx_dsp/ppc/inv_txfm_vsx.c
@@ -1074,15 +1074,15 @@ void vpx_idct16x16_256_add_vsx(const tran_low_t *input, uint8_t *dest,
PIXEL_ADD(in3, d_ul, add, shift6); \
vec_vsx_st(vec_packsu(d_uh, d_ul), (step)*stride + 16, dest);
-#define ADD_STORE_BLOCK(in, offset) \
- PIXEL_ADD_STORE32(in[0][0], in[1][0], in[2][0], in[3][0], offset + 0); \
- PIXEL_ADD_STORE32(in[0][1], in[1][1], in[2][1], in[3][1], offset + 1); \
- PIXEL_ADD_STORE32(in[0][2], in[1][2], in[2][2], in[3][2], offset + 2); \
- PIXEL_ADD_STORE32(in[0][3], in[1][3], in[2][3], in[3][3], offset + 3); \
- PIXEL_ADD_STORE32(in[0][4], in[1][4], in[2][4], in[3][4], offset + 4); \
- PIXEL_ADD_STORE32(in[0][5], in[1][5], in[2][5], in[3][5], offset + 5); \
- PIXEL_ADD_STORE32(in[0][6], in[1][6], in[2][6], in[3][6], offset + 6); \
- PIXEL_ADD_STORE32(in[0][7], in[1][7], in[2][7], in[3][7], offset + 7);
+#define ADD_STORE_BLOCK(in, offset) \
+ PIXEL_ADD_STORE32(in[0][0], in[1][0], in[2][0], in[3][0], (offset) + 0); \
+ PIXEL_ADD_STORE32(in[0][1], in[1][1], in[2][1], in[3][1], (offset) + 1); \
+ PIXEL_ADD_STORE32(in[0][2], in[1][2], in[2][2], in[3][2], (offset) + 2); \
+ PIXEL_ADD_STORE32(in[0][3], in[1][3], in[2][3], in[3][3], (offset) + 3); \
+ PIXEL_ADD_STORE32(in[0][4], in[1][4], in[2][4], in[3][4], (offset) + 4); \
+ PIXEL_ADD_STORE32(in[0][5], in[1][5], in[2][5], in[3][5], (offset) + 5); \
+ PIXEL_ADD_STORE32(in[0][6], in[1][6], in[2][6], in[3][6], (offset) + 6); \
+ PIXEL_ADD_STORE32(in[0][7], in[1][7], in[2][7], in[3][7], (offset) + 7);
void vpx_idct32x32_1024_add_vsx(const tran_low_t *input, uint8_t *dest,
int stride) {
diff --git a/vpx_dsp/ppc/sad_vsx.c b/vpx_dsp/ppc/sad_vsx.c
index cdee8f3d1..a08ae1241 100644
--- a/vpx_dsp/ppc/sad_vsx.c
+++ b/vpx_dsp/ppc/sad_vsx.c
@@ -115,7 +115,7 @@ SAD64(64);
unsigned int vpx_sad16x##height##_avg_vsx( \
const uint8_t *src, int src_stride, const uint8_t *ref, int ref_stride, \
const uint8_t *second_pred) { \
- DECLARE_ALIGNED(16, uint8_t, comp_pred[16 * height]); \
+ DECLARE_ALIGNED(16, uint8_t, comp_pred[16 * (height)]); \
vpx_comp_avg_pred_vsx(comp_pred, second_pred, 16, height, ref, \
ref_stride); \
\
@@ -126,7 +126,7 @@ SAD64(64);
unsigned int vpx_sad32x##height##_avg_vsx( \
const uint8_t *src, int src_stride, const uint8_t *ref, int ref_stride, \
const uint8_t *second_pred) { \
- DECLARE_ALIGNED(32, uint8_t, comp_pred[32 * height]); \
+ DECLARE_ALIGNED(32, uint8_t, comp_pred[32 * (height)]); \
vpx_comp_avg_pred_vsx(comp_pred, second_pred, 32, height, ref, \
ref_stride); \
\
@@ -137,7 +137,7 @@ SAD64(64);
unsigned int vpx_sad64x##height##_avg_vsx( \
const uint8_t *src, int src_stride, const uint8_t *ref, int ref_stride, \
const uint8_t *second_pred) { \
- DECLARE_ALIGNED(64, uint8_t, comp_pred[64 * height]); \
+ DECLARE_ALIGNED(64, uint8_t, comp_pred[64 * (height)]); \
vpx_comp_avg_pred_vsx(comp_pred, second_pred, 64, height, ref, \
ref_stride); \
return vpx_sad64x##height##_vsx(src, src_stride, comp_pred, 64); \
diff --git a/vpx_dsp/ppc/types_vsx.h b/vpx_dsp/ppc/types_vsx.h
index 4cba7d365..b89116924 100644
--- a/vpx_dsp/ppc/types_vsx.h
+++ b/vpx_dsp/ppc/types_vsx.h
@@ -64,7 +64,7 @@ static const uint8x16_t xxpermdi3_perm = { 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D,
#define unpack_to_s16_l(v) \
(int16x8_t) vec_mergel((uint8x16_t)v, vec_splat_u8(0))
#ifndef xxpermdi
-#define xxpermdi(a, b, c) vec_xxpermdi(b, a, ((c >> 1) | (c & 1) << 1) ^ 3)
+#define xxpermdi(a, b, c) vec_xxpermdi(b, a, (((c) >> 1) | ((c)&1) << 1) ^ 3)
#endif
#endif
diff --git a/vpx_dsp/ppc/variance_vsx.c b/vpx_dsp/ppc/variance_vsx.c
index 8926160c2..be9614a35 100644
--- a/vpx_dsp/ppc/variance_vsx.c
+++ b/vpx_dsp/ppc/variance_vsx.c
@@ -243,7 +243,7 @@ static INLINE void variance(const uint8_t *src_ptr, int src_stride,
uint32_t *sse) { \
int sum; \
variance(src_ptr, src_stride, ref_ptr, ref_stride, W, H, sse, &sum); \
- return *sse - (uint32_t)(((int64_t)sum * sum) / (W * H)); \
+ return *sse - (uint32_t)(((int64_t)sum * sum) / ((W) * (H))); \
}
#define VARIANCES(W, H) VAR(W, H)
diff --git a/vpx_dsp/prob.h b/vpx_dsp/prob.h
index 30d37c6a7..7a71c0041 100644
--- a/vpx_dsp/prob.h
+++ b/vpx_dsp/prob.h
@@ -32,7 +32,7 @@ typedef int8_t vpx_tree_index;
#define TREE_SIZE(leaf_count) (2 * (leaf_count)-2)
-#define vpx_complement(x) (255 - x)
+#define vpx_complement(x) (255 - (x))
#define MODE_MV_COUNT_SAT 20
diff --git a/vpx_dsp/vpx_dsp_common.h b/vpx_dsp/vpx_dsp_common.h
index 6782c9063..f1fcf2df5 100644
--- a/vpx_dsp/vpx_dsp_common.h
+++ b/vpx_dsp/vpx_dsp_common.h
@@ -25,8 +25,8 @@ extern "C" {
#define VPX_SWAP(type, a, b) \
do { \
type c = (b); \
- b = a; \
- a = c; \
+ (b) = a; \
+ (a) = c; \
} while (0)
#if CONFIG_VP9_HIGHBITDEPTH
diff --git a/vpx_dsp/x86/highbd_variance_sse2.c b/vpx_dsp/x86/highbd_variance_sse2.c
index cc4e260df..dd6cfbb2c 100644
--- a/vpx_dsp/x86/highbd_variance_sse2.c
+++ b/vpx_dsp/x86/highbd_variance_sse2.c
@@ -136,7 +136,7 @@ HIGH_GET_VAR(8);
highbd_8_variance_sse2( \
src, src_stride, ref, ref_stride, w, h, sse, &sum, \
vpx_highbd_calc##block_size##x##block_size##var_sse2, block_size); \
- return *sse - (uint32_t)(((int64_t)sum * sum) >> shift); \
+ return *sse - (uint32_t)(((int64_t)sum * sum) >> (shift)); \
} \
\
uint32_t vpx_highbd_10_variance##w##x##h##_sse2( \
@@ -149,7 +149,7 @@ HIGH_GET_VAR(8);
highbd_10_variance_sse2( \
src, src_stride, ref, ref_stride, w, h, sse, &sum, \
vpx_highbd_calc##block_size##x##block_size##var_sse2, block_size); \
- var = (int64_t)(*sse) - (((int64_t)sum * sum) >> shift); \
+ var = (int64_t)(*sse) - (((int64_t)sum * sum) >> (shift)); \
return (var >= 0) ? (uint32_t)var : 0; \
} \
\
@@ -163,7 +163,7 @@ HIGH_GET_VAR(8);
highbd_12_variance_sse2( \
src, src_stride, ref, ref_stride, w, h, sse, &sum, \
vpx_highbd_calc##block_size##x##block_size##var_sse2, block_size); \
- var = (int64_t)(*sse) - (((int64_t)sum * sum) >> shift); \
+ var = (int64_t)(*sse) - (((int64_t)sum * sum) >> (shift)); \
return (var >= 0) ? (uint32_t)var : 0; \
}
diff --git a/vpx_ports/x86.h b/vpx_ports/x86.h
index 58eeb7b63..9b48a1f4c 100644
--- a/vpx_ports/x86.h
+++ b/vpx_ports/x86.h
@@ -161,7 +161,7 @@ static INLINE uint64_t xgetbv(void) {
#define HAS_AVX2 0x080
#define HAS_AVX512 0x100
#ifndef BIT
-#define BIT(n) (1u << n)
+#define BIT(n) (1u << (n))
#endif
static INLINE int x86_simd_caps(void) {