summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJames Zern <jzern@google.com>2021-12-02 13:11:56 -0800
committerJames Zern <jzern@google.com>2021-12-02 16:53:20 -0800
commit31b954debe62026f957e9a13354c8c75b12e537a (patch)
treee803b50c646bf50f23f7c3b863d08375739c2ae0
parent13f984c2162d8392d3fd0ffb8666ee518f60665a (diff)
downloadlibvpx-31b954debe62026f957e9a13354c8c75b12e537a.tar
libvpx-31b954debe62026f957e9a13354c8c75b12e537a.tar.gz
libvpx-31b954debe62026f957e9a13354c8c75b12e537a.tar.bz2
libvpx-31b954debe62026f957e9a13354c8c75b12e537a.zip
clear -Wextra-semi/-Wextra-semi-stmt warnings
Bug: chromium:1257449 Change-Id: Ia9aafccc09b611521d4a7aedfe3723393a840c62
-rwxr-xr-xconfigure2
-rw-r--r--examples/postproc.c2
-rw-r--r--examples/vpx_temporal_svc_encoder.c2
-rw-r--r--test/pp_filter_test.cc4
-rw-r--r--test/set_roi.cc2
-rw-r--r--test/vp8_fdct4x4_test.cc4
-rw-r--r--test/yuv_temporal_filter_test.cc8
-rw-r--r--tools/tiny_ssim.c30
-rw-r--r--vp8/common/blockd.h2
-rw-r--r--vp8/common/common.h12
-rw-r--r--vp8/encoder/encodeframe.c13
-rw-r--r--vp8/encoder/encodemv.c7
-rw-r--r--vp8/encoder/mcomp.c54
-rw-r--r--vp8/encoder/onyx_if.c2
-rw-r--r--vp9/common/vp9_common.h4
-rw-r--r--vp9/encoder/vp9_mcomp.c194
-rw-r--r--vpx/src/vpx_encoder.c2
-rw-r--r--vpx_dsp/x86/highbd_convolve_avx2.c12
-rw-r--r--vpx_dsp/x86/highbd_variance_sse2.c86
-rw-r--r--vpx_dsp/x86/sad_avx2.c36
-rw-r--r--vpx_dsp/x86/variance_sse2.c60
-rw-r--r--vpx_dsp/x86/vpx_subpixel_4t_intrin_sse2.c24
-rw-r--r--vpx_dsp/x86/vpx_subpixel_8t_intrin_avx2.c12
-rw-r--r--vpx_dsp/x86/vpx_subpixel_8t_intrin_ssse3.c12
-rw-r--r--vpx_ports/x86.h8
25 files changed, 307 insertions, 287 deletions
diff --git a/configure b/configure
index d39db6cb0..b68f9fd78 100755
--- a/configure
+++ b/configure
@@ -622,6 +622,8 @@ process_toolchain() {
check_add_cflags -Wall
check_add_cflags -Wdeclaration-after-statement
check_add_cflags -Wdisabled-optimization
+ check_add_cflags -Wextra-semi
+ check_add_cflags -Wextra-semi-stmt
check_add_cflags -Wfloat-conversion
check_add_cflags -Wformat=2
check_add_cflags -Wparentheses-equality
diff --git a/examples/postproc.c b/examples/postproc.c
index be999b429..b53c15ea1 100644
--- a/examples/postproc.c
+++ b/examples/postproc.c
@@ -109,7 +109,7 @@ int main(int argc, char **argv) {
0 };
if (vpx_codec_control(&codec, VP8_SET_POSTPROC, &pp))
die_codec(&codec, "Failed to turn on postproc.");
- };
+ }
// Decode the frame with 15ms deadline
if (vpx_codec_decode(&codec, frame, (unsigned int)frame_size, NULL, 15000))
diff --git a/examples/vpx_temporal_svc_encoder.c b/examples/vpx_temporal_svc_encoder.c
index 47f30751e..bb761a411 100644
--- a/examples/vpx_temporal_svc_encoder.c
+++ b/examples/vpx_temporal_svc_encoder.c
@@ -30,7 +30,7 @@
#define ROI_MAP 0
-#define zero(Dest) memset(&(Dest), 0, sizeof(Dest));
+#define zero(Dest) memset(&(Dest), 0, sizeof(Dest))
static const char *exec_name;
diff --git a/test/pp_filter_test.cc b/test/pp_filter_test.cc
index a511ffbe9..775f7f36a 100644
--- a/test/pp_filter_test.cc
+++ b/test/pp_filter_test.cc
@@ -115,7 +115,7 @@ TEST_P(VpxPostProcDownAndAcrossMbRowTest, CheckFilterOutput) {
}
vpx_free(flimits_);
-};
+}
TEST_P(VpxPostProcDownAndAcrossMbRowTest, CheckCvsAssembly) {
// Size of the underlying data block that will be filtered.
@@ -214,7 +214,7 @@ TEST_P(VpxPostProcDownAndAcrossMbRowTest, DISABLED_Speed) {
PrintMedian("16x16");
vpx_free(flimits_);
-};
+}
class VpxMbPostProcAcrossIpTest
: public AbstractBench,
diff --git a/test/set_roi.cc b/test/set_roi.cc
index f63954752..167cf908f 100644
--- a/test/set_roi.cc
+++ b/test/set_roi.cc
@@ -161,6 +161,6 @@ TEST(VP8RoiMapTest, ParameterCheck) {
// Free allocated memory
if (cpi.segmentation_map) vpx_free(cpi.segmentation_map);
if (roi_map) vpx_free(roi_map);
-};
+}
} // namespace
diff --git a/test/vp8_fdct4x4_test.cc b/test/vp8_fdct4x4_test.cc
index d5ac25300..3e4305be7 100644
--- a/test/vp8_fdct4x4_test.cc
+++ b/test/vp8_fdct4x4_test.cc
@@ -148,7 +148,7 @@ TEST_P(FdctTest, SignBiasCheck) {
EXPECT_EQ(true, bias_acceptable)
<< "Error: 4x4 FDCT has a sign bias > 10% for input range [-15, 15]";
-};
+}
TEST_P(FdctTest, RoundTripErrorCheck) {
int max_error = 0;
@@ -181,7 +181,7 @@ TEST_P(FdctTest, RoundTripErrorCheck) {
EXPECT_GE(count_test_block, total_error)
<< "Error: FDCT/IDCT has average roundtrip error > 1 per block";
-};
+}
INSTANTIATE_TEST_SUITE_P(C, FdctTest, ::testing::Values(vp8_short_fdct4x4_c));
diff --git a/test/yuv_temporal_filter_test.cc b/test/yuv_temporal_filter_test.cc
index cfdc88d89..2bdcf4d86 100644
--- a/test/yuv_temporal_filter_test.cc
+++ b/test/yuv_temporal_filter_test.cc
@@ -674,8 +674,8 @@ TEST_P(YUVTemporalFilterTest, DISABLED_Speed) {
v_count); \
}
-WRAP_HIGHBD_FUNC(vp9_highbd_apply_temporal_filter_c, 10);
-WRAP_HIGHBD_FUNC(vp9_highbd_apply_temporal_filter_c, 12);
+WRAP_HIGHBD_FUNC(vp9_highbd_apply_temporal_filter_c, 10)
+WRAP_HIGHBD_FUNC(vp9_highbd_apply_temporal_filter_c, 12)
INSTANTIATE_TEST_SUITE_P(
C, YUVTemporalFilterTest,
@@ -683,8 +683,8 @@ INSTANTIATE_TEST_SUITE_P(
TemporalFilterWithBd(&wrap_vp9_highbd_apply_temporal_filter_c_10, 10),
TemporalFilterWithBd(&wrap_vp9_highbd_apply_temporal_filter_c_12, 12)));
#if HAVE_SSE4_1
-WRAP_HIGHBD_FUNC(vp9_highbd_apply_temporal_filter_sse4_1, 10);
-WRAP_HIGHBD_FUNC(vp9_highbd_apply_temporal_filter_sse4_1, 12);
+WRAP_HIGHBD_FUNC(vp9_highbd_apply_temporal_filter_sse4_1, 10)
+WRAP_HIGHBD_FUNC(vp9_highbd_apply_temporal_filter_sse4_1, 12)
INSTANTIATE_TEST_SUITE_P(
SSE4_1, YUVTemporalFilterTest,
diff --git a/tools/tiny_ssim.c b/tools/tiny_ssim.c
index ff4634ade..157797048 100644
--- a/tools/tiny_ssim.c
+++ b/tools/tiny_ssim.c
@@ -425,20 +425,24 @@ int main(int argc, char *argv[]) {
break;
}
#if CONFIG_VP9_HIGHBITDEPTH
-#define psnr_and_ssim(ssim, psnr, buf0, buf1, w, h) \
- if (bit_depth < 9) { \
- ssim = ssim2(buf0, buf1, w, w, w, h); \
- psnr = calc_plane_error(buf0, w, buf1, w, w, h); \
- } else { \
- ssim = highbd_ssim2(CONVERT_TO_BYTEPTR(buf0), CONVERT_TO_BYTEPTR(buf1), w, \
- w, w, h, bit_depth); \
- psnr = calc_plane_error16(CAST_TO_SHORTPTR(buf0), w, \
- CAST_TO_SHORTPTR(buf1), w, w, h); \
- }
+#define psnr_and_ssim(ssim, psnr, buf0, buf1, w, h) \
+ do { \
+ if (bit_depth < 9) { \
+ ssim = ssim2(buf0, buf1, w, w, w, h); \
+ psnr = calc_plane_error(buf0, w, buf1, w, w, h); \
+ } else { \
+ ssim = highbd_ssim2(CONVERT_TO_BYTEPTR(buf0), CONVERT_TO_BYTEPTR(buf1), \
+ w, w, w, h, bit_depth); \
+ psnr = calc_plane_error16(CAST_TO_SHORTPTR(buf0), w, \
+ CAST_TO_SHORTPTR(buf1), w, w, h); \
+ } \
+ } while (0)
#else
-#define psnr_and_ssim(ssim, psnr, buf0, buf1, w, h) \
- ssim = ssim2(buf0, buf1, w, w, w, h); \
- psnr = calc_plane_error(buf0, w, buf1, w, w, h);
+#define psnr_and_ssim(ssim, psnr, buf0, buf1, w, h) \
+ do { \
+ ssim = ssim2(buf0, buf1, w, w, w, h); \
+ psnr = calc_plane_error(buf0, w, buf1, w, w, h); \
+ } while (0)
#endif // CONFIG_VP9_HIGHBITDEPTH
if (n_frames == allocated_frames) {
diff --git a/vp8/common/blockd.h b/vp8/common/blockd.h
index 02abe053c..405443449 100644
--- a/vp8/common/blockd.h
+++ b/vp8/common/blockd.h
@@ -58,7 +58,7 @@ typedef struct {
extern const unsigned char vp8_block2left[25];
extern const unsigned char vp8_block2above[25];
-#define VP8_COMBINEENTROPYCONTEXTS(Dest, A, B) Dest = (A) + (B);
+#define VP8_COMBINEENTROPYCONTEXTS(Dest, A, B) Dest = (A) + (B)
typedef enum { KEY_FRAME = 0, INTER_FRAME = 1 } FRAME_TYPE;
diff --git a/vp8/common/common.h b/vp8/common/common.h
index 2c30e8d6c..562569f9a 100644
--- a/vp8/common/common.h
+++ b/vp8/common/common.h
@@ -24,22 +24,22 @@ extern "C" {
/* Only need this for fixed-size arrays, for structs just assign. */
#define vp8_copy(Dest, Src) \
- { \
+ do { \
assert(sizeof(Dest) == sizeof(Src)); \
memcpy(Dest, Src, sizeof(Src)); \
- }
+ } while (0)
/* Use this for variably-sized arrays. */
#define vp8_copy_array(Dest, Src, N) \
- { \
+ do { \
assert(sizeof(*(Dest)) == sizeof(*(Src))); \
memcpy(Dest, Src, (N) * sizeof(*(Src))); \
- }
+ } while (0)
-#define vp8_zero(Dest) memset(&(Dest), 0, sizeof(Dest));
+#define vp8_zero(Dest) memset(&(Dest), 0, sizeof(Dest))
-#define vp8_zero_array(Dest, N) memset(Dest, 0, (N) * sizeof(*(Dest)));
+#define vp8_zero_array(Dest, N) memset(Dest, 0, (N) * sizeof(*(Dest)))
#ifdef __cplusplus
} // extern "C"
diff --git a/vp8/encoder/encodeframe.c b/vp8/encoder/encodeframe.c
index 69271f1a7..4df35f6ed 100644
--- a/vp8/encoder/encodeframe.c
+++ b/vp8/encoder/encodeframe.c
@@ -639,7 +639,8 @@ static void init_encode_frame_mb_context(VP8_COMP *cpi) {
vp8_zero(x->coef_counts);
vp8_zero(x->ymode_count);
- vp8_zero(x->uv_mode_count) x->prediction_error = 0;
+ vp8_zero(x->uv_mode_count);
+ x->prediction_error = 0;
x->intra_error = 0;
vp8_zero(x->count_mb_ref_frame_usage);
}
@@ -766,12 +767,12 @@ void vp8_encode_frame(VP8_COMP *cpi) {
for (mb_row = 0; mb_row < cm->mb_rows;
mb_row += (cpi->encoding_thread_count + 1)) {
- vp8_zero(cm->left_context)
+ vp8_zero(cm->left_context);
#if CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING
- tp = cpi->tok;
+ tp = cpi->tok;
#else
- tp = cpi->tok + mb_row * (cm->mb_cols * 16 * 24);
+ tp = cpi->tok + mb_row * (cm->mb_cols * 16 * 24);
#endif
encode_mb_row(cpi, cm, mb_row, x, xd, &tp, segment_counts, &totalrate);
@@ -858,10 +859,10 @@ void vp8_encode_frame(VP8_COMP *cpi) {
/* for each macroblock row in image */
for (mb_row = 0; mb_row < cm->mb_rows; ++mb_row) {
- vp8_zero(cm->left_context)
+ vp8_zero(cm->left_context);
#if CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING
- tp = cpi->tok;
+ tp = cpi->tok;
#endif
encode_mb_row(cpi, cm, mb_row, x, xd, &tp, segment_counts, &totalrate);
diff --git a/vp8/encoder/encodemv.c b/vp8/encoder/encodemv.c
index ff3896539..c88ea1653 100644
--- a/vp8/encoder/encodemv.c
+++ b/vp8/encoder/encodemv.c
@@ -205,8 +205,11 @@ static void write_component_probs(vp8_writer *const w,
(void)rc;
vp8_copy_array(Pnew, default_mvc, MVPcount);
- vp8_zero(is_short_ct) vp8_zero(sign_ct) vp8_zero(bit_ct) vp8_zero(short_ct)
- vp8_zero(short_bct)
+ vp8_zero(is_short_ct);
+ vp8_zero(sign_ct);
+ vp8_zero(bit_ct);
+ vp8_zero(short_ct);
+ vp8_zero(short_bct);
/* j=0 */
{
diff --git a/vp8/encoder/mcomp.c b/vp8/encoder/mcomp.c
index 9e7f5c7ac..4ab6c7b3d 100644
--- a/vp8/encoder/mcomp.c
+++ b/vp8/encoder/mcomp.c
@@ -204,19 +204,21 @@ void vp8_init3smotion_compensation(MACROBLOCK *x, int stride) {
/* returns distortion + motion vector cost */
#define ERR(r, c) (MVC(r, c) + DIST(r, c))
/* checks if (r,c) has better score than previous best */
-#define CHECK_BETTER(v, r, c) \
- IFMVCV(r, c, \
- { \
- thismse = DIST(r, c); \
- if ((v = (MVC(r, c) + thismse)) < besterr) { \
- besterr = v; \
- br = r; \
- bc = c; \
- *distortion = thismse; \
- *sse1 = sse; \
- } \
- }, \
- v = UINT_MAX;)
+#define CHECK_BETTER(v, r, c) \
+ do { \
+ IFMVCV(r, c, \
+ { \
+ thismse = DIST(r, c); \
+ if ((v = (MVC(r, c) + thismse)) < besterr) { \
+ besterr = v; \
+ br = r; \
+ bc = c; \
+ *distortion = thismse; \
+ *sse1 = sse; \
+ } \
+ }, \
+ v = UINT_MAX;) \
+ } while (0)
int vp8_find_best_sub_pixel_step_iteratively(MACROBLOCK *x, BLOCK *b, BLOCKD *d,
int_mv *bestmv, int_mv *ref_mv,
@@ -800,13 +802,13 @@ int vp8_find_best_half_pixel_step(MACROBLOCK *x, BLOCK *b, BLOCKD *d,
}
#define CHECK_BOUNDS(range) \
- { \
+ do { \
all_in = 1; \
all_in &= ((br - range) >= x->mv_row_min); \
all_in &= ((br + range) <= x->mv_row_max); \
all_in &= ((bc - range) >= x->mv_col_min); \
all_in &= ((bc + range) <= x->mv_col_max); \
- }
+ } while (0)
#define CHECK_POINT \
{ \
@@ -817,7 +819,7 @@ int vp8_find_best_half_pixel_step(MACROBLOCK *x, BLOCK *b, BLOCKD *d,
}
#define CHECK_BETTER \
- { \
+ do { \
if (thissad < bestsad) { \
thissad += \
mvsad_err_cost(&this_mv, &fcenter_mv, mvsadcost, sad_per_bit); \
@@ -826,7 +828,7 @@ int vp8_find_best_half_pixel_step(MACROBLOCK *x, BLOCK *b, BLOCKD *d,
best_site = i; \
} \
} \
- }
+ } while (0)
static const MV next_chkpts[6][3] = {
{ { -2, 0 }, { -1, -2 }, { 1, -2 } }, { { -1, -2 }, { 1, -2 }, { 2, 0 } },
@@ -901,7 +903,7 @@ int vp8_hex_search(MACROBLOCK *x, BLOCK *b, BLOCKD *d, int_mv *ref_mv,
#endif
/* hex search */
- CHECK_BOUNDS(2)
+ CHECK_BOUNDS(2);
if (all_in) {
for (i = 0; i < 6; ++i) {
@@ -910,7 +912,7 @@ int vp8_hex_search(MACROBLOCK *x, BLOCK *b, BLOCKD *d, int_mv *ref_mv,
this_offset = base_offset + (this_mv.as_mv.row * in_what_stride) +
this_mv.as_mv.col;
thissad = vfp->sdf(what, what_stride, this_offset, in_what_stride);
- CHECK_BETTER
+ CHECK_BETTER;
}
} else {
for (i = 0; i < 6; ++i) {
@@ -920,7 +922,7 @@ int vp8_hex_search(MACROBLOCK *x, BLOCK *b, BLOCKD *d, int_mv *ref_mv,
this_offset = base_offset + (this_mv.as_mv.row * in_what_stride) +
this_mv.as_mv.col;
thissad = vfp->sdf(what, what_stride, this_offset, in_what_stride);
- CHECK_BETTER
+ CHECK_BETTER;
}
}
@@ -934,7 +936,7 @@ int vp8_hex_search(MACROBLOCK *x, BLOCK *b, BLOCKD *d, int_mv *ref_mv,
for (j = 1; j < hex_range; ++j) {
best_site = -1;
- CHECK_BOUNDS(2)
+ CHECK_BOUNDS(2);
if (all_in) {
for (i = 0; i < 3; ++i) {
@@ -943,7 +945,7 @@ int vp8_hex_search(MACROBLOCK *x, BLOCK *b, BLOCKD *d, int_mv *ref_mv,
this_offset = base_offset + (this_mv.as_mv.row * (in_what_stride)) +
this_mv.as_mv.col;
thissad = vfp->sdf(what, what_stride, this_offset, in_what_stride);
- CHECK_BETTER
+ CHECK_BETTER;
}
} else {
for (i = 0; i < 3; ++i) {
@@ -953,7 +955,7 @@ int vp8_hex_search(MACROBLOCK *x, BLOCK *b, BLOCKD *d, int_mv *ref_mv,
this_offset = base_offset + (this_mv.as_mv.row * (in_what_stride)) +
this_mv.as_mv.col;
thissad = vfp->sdf(what, what_stride, this_offset, in_what_stride);
- CHECK_BETTER
+ CHECK_BETTER;
}
}
@@ -975,7 +977,7 @@ int vp8_hex_search(MACROBLOCK *x, BLOCK *b, BLOCKD *d, int_mv *ref_mv,
cal_neighbors:
for (j = 0; j < dia_range; ++j) {
best_site = -1;
- CHECK_BOUNDS(1)
+ CHECK_BOUNDS(1);
if (all_in) {
for (i = 0; i < 4; ++i) {
@@ -984,7 +986,7 @@ cal_neighbors:
this_offset = base_offset + (this_mv.as_mv.row * (in_what_stride)) +
this_mv.as_mv.col;
thissad = vfp->sdf(what, what_stride, this_offset, in_what_stride);
- CHECK_BETTER
+ CHECK_BETTER;
}
} else {
for (i = 0; i < 4; ++i) {
@@ -994,7 +996,7 @@ cal_neighbors:
this_offset = base_offset + (this_mv.as_mv.row * (in_what_stride)) +
this_mv.as_mv.col;
thissad = vfp->sdf(what, what_stride, this_offset, in_what_stride);
- CHECK_BETTER
+ CHECK_BETTER;
}
}
diff --git a/vp8/encoder/onyx_if.c b/vp8/encoder/onyx_if.c
index 15c9d72f5..59bce951e 100644
--- a/vp8/encoder/onyx_if.c
+++ b/vp8/encoder/onyx_if.c
@@ -1023,7 +1023,7 @@ void vp8_set_speed_features(VP8_COMP *cpi) {
memset(cpi->mb.error_bins, 0, sizeof(cpi->mb.error_bins));
- }; /* switch */
+ } /* switch */
/* Slow quant, dct and trellis not worthwhile for first pass
* so make sure they are always turned off.
diff --git a/vp9/common/vp9_common.h b/vp9/common/vp9_common.h
index e3c5535dd..3cec53bfd 100644
--- a/vp9/common/vp9_common.h
+++ b/vp9/common/vp9_common.h
@@ -27,10 +27,10 @@ extern "C" {
// Only need this for fixed-size arrays, for structs just assign.
#define vp9_copy(dest, src) \
- { \
+ do { \
assert(sizeof(dest) == sizeof(src)); \
memcpy(dest, src, sizeof(src)); \
- }
+ } while (0)
// Use this for variably-sized arrays.
#define vp9_copy_array(dest, src, n) \
diff --git a/vp9/encoder/vp9_mcomp.c b/vp9/encoder/vp9_mcomp.c
index ac29f36ec..cd6706420 100644
--- a/vp9/encoder/vp9_mcomp.c
+++ b/vp9/encoder/vp9_mcomp.c
@@ -159,59 +159,63 @@ static INLINE const uint8_t *pre(const uint8_t *buf, int stride, int r, int c) {
#if CONFIG_VP9_HIGHBITDEPTH
/* checks if (r, c) has better score than previous best */
-#define CHECK_BETTER(v, r, c) \
- if (c >= minc && c <= maxc && r >= minr && r <= maxr) { \
- int64_t tmpmse; \
- const MV mv = { r, c }; \
- const MV ref_mv = { rr, rc }; \
- if (second_pred == NULL) { \
- thismse = vfp->svf(pre(y, y_stride, r, c), y_stride, sp(c), sp(r), z, \
- src_stride, &sse); \
- } else { \
- thismse = vfp->svaf(pre(y, y_stride, r, c), y_stride, sp(c), sp(r), z, \
- src_stride, &sse, second_pred); \
- } \
- tmpmse = thismse; \
- tmpmse += mv_err_cost(&mv, &ref_mv, mvjcost, mvcost, error_per_bit); \
- if (tmpmse >= INT_MAX) { \
- v = INT_MAX; \
- } else if ((v = (uint32_t)tmpmse) < besterr) { \
- besterr = v; \
- br = r; \
- bc = c; \
- *distortion = thismse; \
- *sse1 = sse; \
- } \
- } else { \
- v = INT_MAX; \
- }
+#define CHECK_BETTER(v, r, c) \
+ do { \
+ if (c >= minc && c <= maxc && r >= minr && r <= maxr) { \
+ int64_t tmpmse; \
+ const MV mv = { r, c }; \
+ const MV ref_mv = { rr, rc }; \
+ if (second_pred == NULL) { \
+ thismse = vfp->svf(pre(y, y_stride, r, c), y_stride, sp(c), sp(r), z, \
+ src_stride, &sse); \
+ } else { \
+ thismse = vfp->svaf(pre(y, y_stride, r, c), y_stride, sp(c), sp(r), z, \
+ src_stride, &sse, second_pred); \
+ } \
+ tmpmse = thismse; \
+ tmpmse += mv_err_cost(&mv, &ref_mv, mvjcost, mvcost, error_per_bit); \
+ if (tmpmse >= INT_MAX) { \
+ v = INT_MAX; \
+ } else if ((v = (uint32_t)tmpmse) < besterr) { \
+ besterr = v; \
+ br = r; \
+ bc = c; \
+ *distortion = thismse; \
+ *sse1 = sse; \
+ } \
+ } else { \
+ v = INT_MAX; \
+ } \
+ } while (0)
#else
/* checks if (r, c) has better score than previous best */
-#define CHECK_BETTER(v, r, c) \
- if (c >= minc && c <= maxc && r >= minr && r <= maxr) { \
- const MV mv = { r, c }; \
- const MV ref_mv = { rr, rc }; \
- if (second_pred == NULL) \
- thismse = vfp->svf(pre(y, y_stride, r, c), y_stride, sp(c), sp(r), z, \
- src_stride, &sse); \
- else \
- thismse = vfp->svaf(pre(y, y_stride, r, c), y_stride, sp(c), sp(r), z, \
- src_stride, &sse, second_pred); \
- if ((v = mv_err_cost(&mv, &ref_mv, mvjcost, mvcost, error_per_bit) + \
- thismse) < besterr) { \
- besterr = v; \
- br = r; \
- bc = c; \
- *distortion = thismse; \
- *sse1 = sse; \
- } \
- } else { \
- v = INT_MAX; \
- }
+#define CHECK_BETTER(v, r, c) \
+ do { \
+ if (c >= minc && c <= maxc && r >= minr && r <= maxr) { \
+ const MV mv = { r, c }; \
+ const MV ref_mv = { rr, rc }; \
+ if (second_pred == NULL) \
+ thismse = vfp->svf(pre(y, y_stride, r, c), y_stride, sp(c), sp(r), z, \
+ src_stride, &sse); \
+ else \
+ thismse = vfp->svaf(pre(y, y_stride, r, c), y_stride, sp(c), sp(r), z, \
+ src_stride, &sse, second_pred); \
+ if ((v = mv_err_cost(&mv, &ref_mv, mvjcost, mvcost, error_per_bit) + \
+ thismse) < besterr) { \
+ besterr = v; \
+ br = r; \
+ bc = c; \
+ *distortion = thismse; \
+ *sse1 = sse; \
+ } \
+ } else { \
+ v = INT_MAX; \
+ } \
+ } while (0)
#endif
#define FIRST_LEVEL_CHECKS \
- { \
+ do { \
unsigned int left, right, up, down, diag; \
CHECK_BETTER(left, tr, tc - hstep); \
CHECK_BETTER(right, tr, tc + hstep); \
@@ -224,10 +228,10 @@ static INLINE const uint8_t *pre(const uint8_t *buf, int stride, int r, int c) {
case 2: CHECK_BETTER(diag, tr + hstep, tc - hstep); break; \
case 3: CHECK_BETTER(diag, tr + hstep, tc + hstep); break; \
} \
- }
+ } while (0)
#define SECOND_LEVEL_CHECKS \
- { \
+ do { \
int kr, kc; \
unsigned int second; \
if (tr != br && tc != bc) { \
@@ -256,7 +260,7 @@ static INLINE const uint8_t *pre(const uint8_t *buf, int stride, int r, int c) {
case 3: CHECK_BETTER(second, tr + kr, tc - hstep); break; \
} \
} \
- }
+ } while (0)
#define SETUP_SUBPEL_SEARCH \
const uint8_t *const z = x->plane[0].src.buf; \
@@ -290,7 +294,7 @@ static INLINE const uint8_t *pre(const uint8_t *buf, int stride, int r, int c) {
maxr = subpel_mv_limits.row_max; \
\
bestmv->row *= 8; \
- bestmv->col *= 8;
+ bestmv->col *= 8
static unsigned int setup_center_error(
const MACROBLOCKD *xd, const MV *bestmv, const MV *ref_mv,
@@ -678,48 +682,52 @@ static int accurate_sub_pel_search(
// TODO(yunqing): this part can be further refactored.
#if CONFIG_VP9_HIGHBITDEPTH
/* checks if (r, c) has better score than previous best */
-#define CHECK_BETTER1(v, r, c) \
- if (c >= minc && c <= maxc && r >= minr && r <= maxr) { \
- int64_t tmpmse; \
- const MV mv = { r, c }; \
- const MV ref_mv = { rr, rc }; \
- thismse = \
- accurate_sub_pel_search(xd, &mv, x->me_sf, kernel, vfp, z, src_stride, \
- y, y_stride, second_pred, w, h, &sse); \
- tmpmse = thismse; \
- tmpmse += mv_err_cost(&mv, &ref_mv, mvjcost, mvcost, error_per_bit); \
- if (tmpmse >= INT_MAX) { \
- v = INT_MAX; \
- } else if ((v = (uint32_t)tmpmse) < besterr) { \
- besterr = v; \
- br = r; \
- bc = c; \
- *distortion = thismse; \
- *sse1 = sse; \
- } \
- } else { \
- v = INT_MAX; \
- }
+#define CHECK_BETTER1(v, r, c) \
+ do { \
+ if (c >= minc && c <= maxc && r >= minr && r <= maxr) { \
+ int64_t tmpmse; \
+ const MV mv = { r, c }; \
+ const MV ref_mv = { rr, rc }; \
+ thismse = accurate_sub_pel_search(xd, &mv, x->me_sf, kernel, vfp, z, \
+ src_stride, y, y_stride, second_pred, \
+ w, h, &sse); \
+ tmpmse = thismse; \
+ tmpmse += mv_err_cost(&mv, &ref_mv, mvjcost, mvcost, error_per_bit); \
+ if (tmpmse >= INT_MAX) { \
+ v = INT_MAX; \
+ } else if ((v = (uint32_t)tmpmse) < besterr) { \
+ besterr = v; \
+ br = r; \
+ bc = c; \
+ *distortion = thismse; \
+ *sse1 = sse; \
+ } \
+ } else { \
+ v = INT_MAX; \
+ } \
+ } while (0)
#else
/* checks if (r, c) has better score than previous best */
-#define CHECK_BETTER1(v, r, c) \
- if (c >= minc && c <= maxc && r >= minr && r <= maxr) { \
- const MV mv = { r, c }; \
- const MV ref_mv = { rr, rc }; \
- thismse = \
- accurate_sub_pel_search(xd, &mv, x->me_sf, kernel, vfp, z, src_stride, \
- y, y_stride, second_pred, w, h, &sse); \
- if ((v = mv_err_cost(&mv, &ref_mv, mvjcost, mvcost, error_per_bit) + \
- thismse) < besterr) { \
- besterr = v; \
- br = r; \
- bc = c; \
- *distortion = thismse; \
- *sse1 = sse; \
- } \
- } else { \
- v = INT_MAX; \
- }
+#define CHECK_BETTER1(v, r, c) \
+ do { \
+ if (c >= minc && c <= maxc && r >= minr && r <= maxr) { \
+ const MV mv = { r, c }; \
+ const MV ref_mv = { rr, rc }; \
+ thismse = accurate_sub_pel_search(xd, &mv, x->me_sf, kernel, vfp, z, \
+ src_stride, y, y_stride, second_pred, \
+ w, h, &sse); \
+ if ((v = mv_err_cost(&mv, &ref_mv, mvjcost, mvcost, error_per_bit) + \
+ thismse) < besterr) { \
+ besterr = v; \
+ br = r; \
+ bc = c; \
+ *distortion = thismse; \
+ *sse1 = sse; \
+ } \
+ } else { \
+ v = INT_MAX; \
+ } \
+ } while (0)
#endif
@@ -2962,7 +2970,7 @@ int vp9_full_pixel_search(const VP9_COMP *const cpi, const MACROBLOCK *const x,
(void)sse; \
(void)thismse; \
(void)cost_list; \
- (void)use_accurate_subpel_search;
+ (void)use_accurate_subpel_search
// Return the maximum MV.
uint32_t vp9_return_max_sub_pixel_mv(
diff --git a/vpx/src/vpx_encoder.c b/vpx/src/vpx_encoder.c
index f636b54a3..846638fe5 100644
--- a/vpx/src/vpx_encoder.c
+++ b/vpx/src/vpx_encoder.c
@@ -173,7 +173,7 @@ vpx_codec_err_t vpx_codec_enc_config_default(vpx_codec_iface_t *iface,
#include "vpx_ports/x86.h"
#define FLOATING_POINT_INIT() \
do { \
- unsigned short x87_orig_mode = x87_set_double_precision();
+ unsigned short x87_orig_mode = x87_set_double_precision()
#define FLOATING_POINT_RESTORE() \
x87_set_control_word(x87_orig_mode); \
} \
diff --git a/vpx_dsp/x86/highbd_convolve_avx2.c b/vpx_dsp/x86/highbd_convolve_avx2.c
index 320962561..01a52ec8b 100644
--- a/vpx_dsp/x86/highbd_convolve_avx2.c
+++ b/vpx_dsp/x86/highbd_convolve_avx2.c
@@ -1465,10 +1465,10 @@ highbd_filter8_1dfunction vpx_highbd_filter_block1d4_v2_sse2;
#define vpx_highbd_filter_block1d4_h4_avg_avx2 \
vpx_highbd_filter_block1d4_h8_avg_avx2
-HIGH_FUN_CONV_1D(horiz, x0_q4, x_step_q4, h, src, , avx2, 0);
+HIGH_FUN_CONV_1D(horiz, x0_q4, x_step_q4, h, src, , avx2, 0)
HIGH_FUN_CONV_1D(vert, y0_q4, y_step_q4, v,
- src - src_stride * (num_taps / 2 - 1), , avx2, 0);
-HIGH_FUN_CONV_2D(, avx2, 0);
+ src - src_stride * (num_taps / 2 - 1), , avx2, 0)
+HIGH_FUN_CONV_2D(, avx2, 0)
// From vpx_dsp/x86/vpx_high_subpixel_8t_sse2.asm.
highbd_filter8_1dfunction vpx_highbd_filter_block1d4_h8_avg_sse2;
@@ -1487,9 +1487,9 @@ highbd_filter8_1dfunction vpx_highbd_filter_block1d4_v2_avg_sse2;
#define vpx_highbd_filter_block1d4_v2_avg_avx2 \
vpx_highbd_filter_block1d4_v2_avg_sse2
-HIGH_FUN_CONV_1D(avg_horiz, x0_q4, x_step_q4, h, src, avg_, avx2, 1);
+HIGH_FUN_CONV_1D(avg_horiz, x0_q4, x_step_q4, h, src, avg_, avx2, 1)
HIGH_FUN_CONV_1D(avg_vert, y0_q4, y_step_q4, v,
- src - src_stride * (num_taps / 2 - 1), avg_, avx2, 1);
-HIGH_FUN_CONV_2D(avg_, avx2, 1);
+ src - src_stride * (num_taps / 2 - 1), avg_, avx2, 1)
+HIGH_FUN_CONV_2D(avg_, avx2, 1)
#undef HIGHBD_FUNC
diff --git a/vpx_dsp/x86/highbd_variance_sse2.c b/vpx_dsp/x86/highbd_variance_sse2.c
index dd6cfbb2c..7c8d79b09 100644
--- a/vpx_dsp/x86/highbd_variance_sse2.c
+++ b/vpx_dsp/x86/highbd_variance_sse2.c
@@ -121,8 +121,8 @@ static void highbd_12_variance_sse2(const uint16_t *src, int src_stride,
*sse = ROUND_POWER_OF_TWO(*sse, 8); \
}
-HIGH_GET_VAR(16);
-HIGH_GET_VAR(8);
+HIGH_GET_VAR(16)
+HIGH_GET_VAR(8)
#undef HIGH_GET_VAR
@@ -167,16 +167,16 @@ HIGH_GET_VAR(8);
return (var >= 0) ? (uint32_t)var : 0; \
}
-VAR_FN(64, 64, 16, 12);
-VAR_FN(64, 32, 16, 11);
-VAR_FN(32, 64, 16, 11);
-VAR_FN(32, 32, 16, 10);
-VAR_FN(32, 16, 16, 9);
-VAR_FN(16, 32, 16, 9);
-VAR_FN(16, 16, 16, 8);
-VAR_FN(16, 8, 8, 7);
-VAR_FN(8, 16, 8, 7);
-VAR_FN(8, 8, 8, 6);
+VAR_FN(64, 64, 16, 12)
+VAR_FN(64, 32, 16, 11)
+VAR_FN(32, 64, 16, 11)
+VAR_FN(32, 32, 16, 10)
+VAR_FN(32, 16, 16, 9)
+VAR_FN(16, 32, 16, 9)
+VAR_FN(16, 16, 16, 8)
+VAR_FN(16, 8, 8, 7)
+VAR_FN(8, 16, 8, 7)
+VAR_FN(8, 8, 8, 6)
#undef VAR_FN
@@ -255,10 +255,10 @@ unsigned int vpx_highbd_12_mse8x8_sse2(const uint8_t *src8, int src_stride,
const uint16_t *ref, ptrdiff_t ref_stride, int height, \
unsigned int *sse, void *unused0, void *unused);
#define DECLS(opt) \
- DECL(8, opt); \
+ DECL(8, opt) \
DECL(16, opt)
-DECLS(sse2);
+DECLS(sse2)
#undef DECLS
#undef DECL
@@ -383,20 +383,20 @@ DECLS(sse2);
return (var >= 0) ? (uint32_t)var : 0; \
}
-#define FNS(opt) \
- FN(64, 64, 16, 6, 6, opt, (int64_t)); \
- FN(64, 32, 16, 6, 5, opt, (int64_t)); \
- FN(32, 64, 16, 5, 6, opt, (int64_t)); \
- FN(32, 32, 16, 5, 5, opt, (int64_t)); \
- FN(32, 16, 16, 5, 4, opt, (int64_t)); \
- FN(16, 32, 16, 4, 5, opt, (int64_t)); \
- FN(16, 16, 16, 4, 4, opt, (int64_t)); \
- FN(16, 8, 16, 4, 3, opt, (int64_t)); \
- FN(8, 16, 8, 3, 4, opt, (int64_t)); \
- FN(8, 8, 8, 3, 3, opt, (int64_t)); \
- FN(8, 4, 8, 3, 2, opt, (int64_t));
-
-FNS(sse2);
+#define FNS(opt) \
+ FN(64, 64, 16, 6, 6, opt, (int64_t)) \
+ FN(64, 32, 16, 6, 5, opt, (int64_t)) \
+ FN(32, 64, 16, 5, 6, opt, (int64_t)) \
+ FN(32, 32, 16, 5, 5, opt, (int64_t)) \
+ FN(32, 16, 16, 5, 4, opt, (int64_t)) \
+ FN(16, 32, 16, 4, 5, opt, (int64_t)) \
+ FN(16, 16, 16, 4, 4, opt, (int64_t)) \
+ FN(16, 8, 16, 4, 3, opt, (int64_t)) \
+ FN(8, 16, 8, 3, 4, opt, (int64_t)) \
+ FN(8, 8, 8, 3, 3, opt, (int64_t)) \
+ FN(8, 4, 8, 3, 2, opt, (int64_t))
+
+FNS(sse2)
#undef FNS
#undef FN
@@ -412,7 +412,7 @@ FNS(sse2);
DECL(16, opt1) \
DECL(8, opt1)
-DECLS(sse2);
+DECLS(sse2)
#undef DECL
#undef DECLS
@@ -542,20 +542,20 @@ DECLS(sse2);
return (var >= 0) ? (uint32_t)var : 0; \
}
-#define FNS(opt1) \
- FN(64, 64, 16, 6, 6, opt1, (int64_t)); \
- FN(64, 32, 16, 6, 5, opt1, (int64_t)); \
- FN(32, 64, 16, 5, 6, opt1, (int64_t)); \
- FN(32, 32, 16, 5, 5, opt1, (int64_t)); \
- FN(32, 16, 16, 5, 4, opt1, (int64_t)); \
- FN(16, 32, 16, 4, 5, opt1, (int64_t)); \
- FN(16, 16, 16, 4, 4, opt1, (int64_t)); \
- FN(16, 8, 16, 4, 3, opt1, (int64_t)); \
- FN(8, 16, 8, 4, 3, opt1, (int64_t)); \
- FN(8, 8, 8, 3, 3, opt1, (int64_t)); \
- FN(8, 4, 8, 3, 2, opt1, (int64_t));
-
-FNS(sse2);
+#define FNS(opt1) \
+ FN(64, 64, 16, 6, 6, opt1, (int64_t)) \
+ FN(64, 32, 16, 6, 5, opt1, (int64_t)) \
+ FN(32, 64, 16, 5, 6, opt1, (int64_t)) \
+ FN(32, 32, 16, 5, 5, opt1, (int64_t)) \
+ FN(32, 16, 16, 5, 4, opt1, (int64_t)) \
+ FN(16, 32, 16, 4, 5, opt1, (int64_t)) \
+ FN(16, 16, 16, 4, 4, opt1, (int64_t)) \
+ FN(16, 8, 16, 4, 3, opt1, (int64_t)) \
+ FN(8, 16, 8, 4, 3, opt1, (int64_t)) \
+ FN(8, 8, 8, 3, 3, opt1, (int64_t)) \
+ FN(8, 4, 8, 3, 2, opt1, (int64_t))
+
+FNS(sse2)
#undef FNS
#undef FN
diff --git a/vpx_dsp/x86/sad_avx2.c b/vpx_dsp/x86/sad_avx2.c
index d94413430..3b48acd51 100644
--- a/vpx_dsp/x86/sad_avx2.c
+++ b/vpx_dsp/x86/sad_avx2.c
@@ -71,17 +71,17 @@
return res; \
}
-#define FSAD64 \
- FSAD64_H(64); \
- FSAD64_H(32);
+#define FSAD64 \
+ FSAD64_H(64) \
+ FSAD64_H(32)
-#define FSAD32 \
- FSAD32_H(64); \
- FSAD32_H(32); \
- FSAD32_H(16);
+#define FSAD32 \
+ FSAD32_H(64) \
+ FSAD32_H(32) \
+ FSAD32_H(16)
-FSAD64;
-FSAD32;
+FSAD64
+FSAD32
#undef FSAD64
#undef FSAD32
@@ -160,17 +160,17 @@ FSAD32;
return res; \
}
-#define FSADAVG64 \
- FSADAVG64_H(64); \
- FSADAVG64_H(32);
+#define FSADAVG64 \
+ FSADAVG64_H(64) \
+ FSADAVG64_H(32)
-#define FSADAVG32 \
- FSADAVG32_H(64); \
- FSADAVG32_H(32); \
- FSADAVG32_H(16);
+#define FSADAVG32 \
+ FSADAVG32_H(64) \
+ FSADAVG32_H(32) \
+ FSADAVG32_H(16)
-FSADAVG64;
-FSADAVG32;
+FSADAVG64
+FSADAVG32
#undef FSADAVG64
#undef FSADAVG32
diff --git a/vpx_dsp/x86/variance_sse2.c b/vpx_dsp/x86/variance_sse2.c
index 67645c57a..a67c92aad 100644
--- a/vpx_dsp/x86/variance_sse2.c
+++ b/vpx_dsp/x86/variance_sse2.c
@@ -471,23 +471,23 @@ DECLS(ssse3, ssse3);
(unsigned int)(cast_prod(cast se * se) >> (wlog2 + hlog2)); \
}
-#define FNS(opt1, opt2) \
- FN(64, 64, 16, 6, 6, opt1, (int64_t), (int64_t)); \
- FN(64, 32, 16, 6, 5, opt1, (int64_t), (int64_t)); \
- FN(32, 64, 16, 5, 6, opt1, (int64_t), (int64_t)); \
- FN(32, 32, 16, 5, 5, opt1, (int64_t), (int64_t)); \
- FN(32, 16, 16, 5, 4, opt1, (int64_t), (int64_t)); \
- FN(16, 32, 16, 4, 5, opt1, (int64_t), (int64_t)); \
- FN(16, 16, 16, 4, 4, opt1, (uint32_t), (int64_t)); \
- FN(16, 8, 16, 4, 3, opt1, (int32_t), (int32_t)); \
- FN(8, 16, 8, 3, 4, opt1, (int32_t), (int32_t)); \
- FN(8, 8, 8, 3, 3, opt1, (int32_t), (int32_t)); \
- FN(8, 4, 8, 3, 2, opt1, (int32_t), (int32_t)); \
- FN(4, 8, 4, 2, 3, opt1, (int32_t), (int32_t)); \
+#define FNS(opt1, opt2) \
+ FN(64, 64, 16, 6, 6, opt1, (int64_t), (int64_t)) \
+ FN(64, 32, 16, 6, 5, opt1, (int64_t), (int64_t)) \
+ FN(32, 64, 16, 5, 6, opt1, (int64_t), (int64_t)) \
+ FN(32, 32, 16, 5, 5, opt1, (int64_t), (int64_t)) \
+ FN(32, 16, 16, 5, 4, opt1, (int64_t), (int64_t)) \
+ FN(16, 32, 16, 4, 5, opt1, (int64_t), (int64_t)) \
+ FN(16, 16, 16, 4, 4, opt1, (uint32_t), (int64_t)) \
+ FN(16, 8, 16, 4, 3, opt1, (int32_t), (int32_t)) \
+ FN(8, 16, 8, 3, 4, opt1, (int32_t), (int32_t)) \
+ FN(8, 8, 8, 3, 3, opt1, (int32_t), (int32_t)) \
+ FN(8, 4, 8, 3, 2, opt1, (int32_t), (int32_t)) \
+ FN(4, 8, 4, 2, 3, opt1, (int32_t), (int32_t)) \
FN(4, 4, 4, 2, 2, opt1, (int32_t), (int32_t))
-FNS(sse2, sse2);
-FNS(ssse3, ssse3);
+FNS(sse2, sse2)
+FNS(ssse3, ssse3)
#undef FNS
#undef FN
@@ -543,23 +543,23 @@ DECLS(ssse3, ssse3);
(unsigned int)(cast_prod(cast se * se) >> (wlog2 + hlog2)); \
}
-#define FNS(opt1, opt2) \
- FN(64, 64, 16, 6, 6, opt1, (int64_t), (int64_t)); \
- FN(64, 32, 16, 6, 5, opt1, (int64_t), (int64_t)); \
- FN(32, 64, 16, 5, 6, opt1, (int64_t), (int64_t)); \
- FN(32, 32, 16, 5, 5, opt1, (int64_t), (int64_t)); \
- FN(32, 16, 16, 5, 4, opt1, (int64_t), (int64_t)); \
- FN(16, 32, 16, 4, 5, opt1, (int64_t), (int64_t)); \
- FN(16, 16, 16, 4, 4, opt1, (uint32_t), (int64_t)); \
- FN(16, 8, 16, 4, 3, opt1, (uint32_t), (int32_t)); \
- FN(8, 16, 8, 3, 4, opt1, (uint32_t), (int32_t)); \
- FN(8, 8, 8, 3, 3, opt1, (uint32_t), (int32_t)); \
- FN(8, 4, 8, 3, 2, opt1, (uint32_t), (int32_t)); \
- FN(4, 8, 4, 2, 3, opt1, (uint32_t), (int32_t)); \
+#define FNS(opt1, opt2) \
+ FN(64, 64, 16, 6, 6, opt1, (int64_t), (int64_t)) \
+ FN(64, 32, 16, 6, 5, opt1, (int64_t), (int64_t)) \
+ FN(32, 64, 16, 5, 6, opt1, (int64_t), (int64_t)) \
+ FN(32, 32, 16, 5, 5, opt1, (int64_t), (int64_t)) \
+ FN(32, 16, 16, 5, 4, opt1, (int64_t), (int64_t)) \
+ FN(16, 32, 16, 4, 5, opt1, (int64_t), (int64_t)) \
+ FN(16, 16, 16, 4, 4, opt1, (uint32_t), (int64_t)) \
+ FN(16, 8, 16, 4, 3, opt1, (uint32_t), (int32_t)) \
+ FN(8, 16, 8, 3, 4, opt1, (uint32_t), (int32_t)) \
+ FN(8, 8, 8, 3, 3, opt1, (uint32_t), (int32_t)) \
+ FN(8, 4, 8, 3, 2, opt1, (uint32_t), (int32_t)) \
+ FN(4, 8, 4, 2, 3, opt1, (uint32_t), (int32_t)) \
FN(4, 4, 4, 2, 2, opt1, (uint32_t), (int32_t))
-FNS(sse2, sse);
-FNS(ssse3, ssse3);
+FNS(sse2, sse)
+FNS(ssse3, ssse3)
#undef FNS
#undef FN
diff --git a/vpx_dsp/x86/vpx_subpixel_4t_intrin_sse2.c b/vpx_dsp/x86/vpx_subpixel_4t_intrin_sse2.c
index 239179028..0cbd151dc 100644
--- a/vpx_dsp/x86/vpx_subpixel_4t_intrin_sse2.c
+++ b/vpx_dsp/x86/vpx_subpixel_4t_intrin_sse2.c
@@ -1040,12 +1040,12 @@ filter8_1dfunction vpx_filter_block1d4_h2_avg_sse2;
// const InterpKernel *filter, int x0_q4,
// int32_t x_step_q4, int y0_q4, int y_step_q4,
// int w, int h);
-FUN_CONV_1D(horiz, x0_q4, x_step_q4, h, src, , sse2, 0);
+FUN_CONV_1D(horiz, x0_q4, x_step_q4, h, src, , sse2, 0)
FUN_CONV_1D(vert, y0_q4, y_step_q4, v, src - (num_taps / 2 - 1) * src_stride, ,
- sse2, 0);
-FUN_CONV_1D(avg_horiz, x0_q4, x_step_q4, h, src, avg_, sse2, 1);
+ sse2, 0)
+FUN_CONV_1D(avg_horiz, x0_q4, x_step_q4, h, src, avg_, sse2, 1)
FUN_CONV_1D(avg_vert, y0_q4, y_step_q4, v,
- src - (num_taps / 2 - 1) * src_stride, avg_, sse2, 1);
+ src - (num_taps / 2 - 1) * src_stride, avg_, sse2, 1)
// void vpx_convolve8_sse2(const uint8_t *src, ptrdiff_t src_stride,
// uint8_t *dst, ptrdiff_t dst_stride,
@@ -1057,8 +1057,8 @@ FUN_CONV_1D(avg_vert, y0_q4, y_step_q4, v,
// const InterpKernel *filter, int x0_q4,
// int32_t x_step_q4, int y0_q4, int y_step_q4,
// int w, int h);
-FUN_CONV_2D(, sse2, 0);
-FUN_CONV_2D(avg_, sse2, 1);
+FUN_CONV_2D(, sse2, 0)
+FUN_CONV_2D(avg_, sse2, 1)
#if CONFIG_VP9_HIGHBITDEPTH && VPX_ARCH_X86_64
// From vpx_dsp/x86/vpx_high_subpixel_8t_sse2.asm.
@@ -1139,12 +1139,12 @@ highbd_filter8_1dfunction vpx_highbd_filter_block1d4_h2_avg_sse2;
// const int16_t *filter_y,
// int y_step_q4,
// int w, int h, int bd);
-HIGH_FUN_CONV_1D(horiz, x0_q4, x_step_q4, h, src, , sse2, 0);
+HIGH_FUN_CONV_1D(horiz, x0_q4, x_step_q4, h, src, , sse2, 0)
HIGH_FUN_CONV_1D(vert, y0_q4, y_step_q4, v,
- src - src_stride * (num_taps / 2 - 1), , sse2, 0);
-HIGH_FUN_CONV_1D(avg_horiz, x0_q4, x_step_q4, h, src, avg_, sse2, 1);
+ src - src_stride * (num_taps / 2 - 1), , sse2, 0)
+HIGH_FUN_CONV_1D(avg_horiz, x0_q4, x_step_q4, h, src, avg_, sse2, 1)
HIGH_FUN_CONV_1D(avg_vert, y0_q4, y_step_q4, v,
- src - src_stride * (num_taps / 2 - 1), avg_, sse2, 1);
+ src - src_stride * (num_taps / 2 - 1), avg_, sse2, 1)
// void vpx_highbd_convolve8_sse2(const uint8_t *src, ptrdiff_t src_stride,
// uint8_t *dst, ptrdiff_t dst_stride,
@@ -1156,6 +1156,6 @@ HIGH_FUN_CONV_1D(avg_vert, y0_q4, y_step_q4, v,
// const InterpKernel *filter, int x0_q4,
// int32_t x_step_q4, int y0_q4,
// int y_step_q4, int w, int h, int bd);
-HIGH_FUN_CONV_2D(, sse2, 0);
-HIGH_FUN_CONV_2D(avg_, sse2, 1);
+HIGH_FUN_CONV_2D(, sse2, 0)
+HIGH_FUN_CONV_2D(avg_, sse2, 1)
#endif // CONFIG_VP9_HIGHBITDEPTH && VPX_ARCH_X86_64
diff --git a/vpx_dsp/x86/vpx_subpixel_8t_intrin_avx2.c b/vpx_dsp/x86/vpx_subpixel_8t_intrin_avx2.c
index 1eaa19bfc..6f2983a4b 100644
--- a/vpx_dsp/x86/vpx_subpixel_8t_intrin_avx2.c
+++ b/vpx_dsp/x86/vpx_subpixel_8t_intrin_avx2.c
@@ -969,12 +969,12 @@ filter8_1dfunction vpx_filter_block1d4_h2_avg_ssse3;
// const InterpKernel *filter, int x0_q4,
// int32_t x_step_q4, int y0_q4,
// int y_step_q4, int w, int h);
-FUN_CONV_1D(horiz, x0_q4, x_step_q4, h, src, , avx2, 0);
+FUN_CONV_1D(horiz, x0_q4, x_step_q4, h, src, , avx2, 0)
FUN_CONV_1D(vert, y0_q4, y_step_q4, v, src - src_stride * (num_taps / 2 - 1), ,
- avx2, 0);
-FUN_CONV_1D(avg_horiz, x0_q4, x_step_q4, h, src, avg_, avx2, 1);
+ avx2, 0)
+FUN_CONV_1D(avg_horiz, x0_q4, x_step_q4, h, src, avg_, avx2, 1)
FUN_CONV_1D(avg_vert, y0_q4, y_step_q4, v,
- src - src_stride * (num_taps / 2 - 1), avg_, avx2, 1);
+ src - src_stride * (num_taps / 2 - 1), avg_, avx2, 1)
// void vpx_convolve8_avx2(const uint8_t *src, ptrdiff_t src_stride,
// uint8_t *dst, ptrdiff_t dst_stride,
@@ -986,6 +986,6 @@ FUN_CONV_1D(avg_vert, y0_q4, y_step_q4, v,
// const InterpKernel *filter, int x0_q4,
// int32_t x_step_q4, int y0_q4, int y_step_q4,
// int w, int h);
-FUN_CONV_2D(, avx2, 0);
-FUN_CONV_2D(avg_, avx2, 1);
+FUN_CONV_2D(, avx2, 0)
+FUN_CONV_2D(avg_, avx2, 1)
#endif // HAVE_AX2 && HAVE_SSSE3
diff --git a/vpx_dsp/x86/vpx_subpixel_8t_intrin_ssse3.c b/vpx_dsp/x86/vpx_subpixel_8t_intrin_ssse3.c
index 77355a208..ed46d6245 100644
--- a/vpx_dsp/x86/vpx_subpixel_8t_intrin_ssse3.c
+++ b/vpx_dsp/x86/vpx_subpixel_8t_intrin_ssse3.c
@@ -731,12 +731,12 @@ filter8_1dfunction vpx_filter_block1d4_h2_avg_ssse3;
// const InterpKernel *filter, int x0_q4,
// int32_t x_step_q4, int y0_q4,
// int y_step_q4, int w, int h);
-FUN_CONV_1D(horiz, x0_q4, x_step_q4, h, src, , ssse3, 0);
+FUN_CONV_1D(horiz, x0_q4, x_step_q4, h, src, , ssse3, 0)
FUN_CONV_1D(vert, y0_q4, y_step_q4, v, src - src_stride * (num_taps / 2 - 1), ,
- ssse3, 0);
-FUN_CONV_1D(avg_horiz, x0_q4, x_step_q4, h, src, avg_, ssse3, 1);
+ ssse3, 0)
+FUN_CONV_1D(avg_horiz, x0_q4, x_step_q4, h, src, avg_, ssse3, 1)
FUN_CONV_1D(avg_vert, y0_q4, y_step_q4, v,
- src - src_stride * (num_taps / 2 - 1), avg_, ssse3, 1);
+ src - src_stride * (num_taps / 2 - 1), avg_, ssse3, 1)
static void filter_horiz_w8_ssse3(const uint8_t *const src,
const ptrdiff_t src_stride,
@@ -1083,5 +1083,5 @@ void vpx_scaled_2d_ssse3(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst,
// const InterpKernel *filter, int x0_q4,
// int32_t x_step_q4, int y0_q4, int y_step_q4,
// int w, int h);
-FUN_CONV_2D(, ssse3, 0);
-FUN_CONV_2D(avg_, ssse3, 1);
+FUN_CONV_2D(, ssse3, 0)
+FUN_CONV_2D(avg_, ssse3, 1)
diff --git a/vpx_ports/x86.h b/vpx_ports/x86.h
index 651ff6460..795fb2923 100644
--- a/vpx_ports/x86.h
+++ b/vpx_ports/x86.h
@@ -47,7 +47,7 @@ typedef enum {
#define cpuid(func, func2, ax, bx, cx, dx) \
__asm__ __volatile__("cpuid \n\t" \
: "=a"(ax), "=b"(bx), "=c"(cx), "=d"(dx) \
- : "a"(func), "c"(func2));
+ : "a"(func), "c"(func2))
#else
#define cpuid(func, func2, ax, bx, cx, dx) \
__asm__ __volatile__( \
@@ -55,7 +55,7 @@ typedef enum {
"cpuid \n\t" \
"xchg %%edi, %%ebx \n\t" \
: "=a"(ax), "=D"(bx), "=c"(cx), "=d"(dx) \
- : "a"(func), "c"(func2));
+ : "a"(func), "c"(func2))
#endif
#elif defined(__SUNPRO_C) || \
defined(__SUNPRO_CC) /* end __GNUC__ or __ANDROID__*/
@@ -67,7 +67,7 @@ typedef enum {
"movl %ebx, %edi \n\t" \
"xchg %rsi, %rbx \n\t" \
: "=a"(ax), "=D"(bx), "=c"(cx), "=d"(dx) \
- : "a"(func), "c"(func2));
+ : "a"(func), "c"(func2))
#else
#define cpuid(func, func2, ax, bx, cx, dx) \
asm volatile( \
@@ -76,7 +76,7 @@ typedef enum {
"movl %ebx, %edi \n\t" \
"popl %ebx \n\t" \
: "=a"(ax), "=D"(bx), "=c"(cx), "=d"(dx) \
- : "a"(func), "c"(func2));
+ : "a"(func), "c"(func2))
#endif
#else /* end __SUNPRO__ */
#if VPX_ARCH_X86_64