summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDmitry Kovalev <dkovalev@google.com>2013-08-08 14:52:39 -0700
committerDmitry Kovalev <dkovalev@google.com>2013-08-08 21:12:34 -0700
commit6a8ec3eac29d7937c7b8c0d45e272dd98aae8188 (patch)
tree4dbb0c5cfa3ea7829db5432c7b63468f88437ad6
parentee40e1a63739101ec7a6a3b3f6373a88888faae1 (diff)
downloadlibvpx-6a8ec3eac29d7937c7b8c0d45e272dd98aae8188.tar
libvpx-6a8ec3eac29d7937c7b8c0d45e272dd98aae8188.tar.gz
libvpx-6a8ec3eac29d7937c7b8c0d45e272dd98aae8188.tar.bz2
libvpx-6a8ec3eac29d7937c7b8c0d45e272dd98aae8188.zip
General code cleanup.
Removing redundant parenthesis and curly braces. Combining declarations with initializations. Adding useful intermediate variables instead of recalculating expressions every time. Change-Id: I00106f404afd60bfc189905b0fded881684f941a
-rw-r--r--vp9/common/vp9_entropy.c2
-rw-r--r--vp9/common/vp9_entropymode.c6
-rw-r--r--vp9/common/vp9_reconintra.c48
-rw-r--r--vp9/decoder/vp9_decodemv.c24
-rw-r--r--vp9/decoder/vp9_detokenize.c11
-rw-r--r--vp9/decoder/vp9_onyxd_if.c41
-rw-r--r--vp9/encoder/vp9_mcomp.c150
7 files changed, 119 insertions, 163 deletions
diff --git a/vp9/common/vp9_entropy.c b/vp9/common/vp9_entropy.c
index d8b20a553..21e0e0471 100644
--- a/vp9/common/vp9_entropy.c
+++ b/vp9/common/vp9_entropy.c
@@ -377,7 +377,7 @@ static const vp9_prob modelcoefprobs_pareto8[COEFPROB_MODELS][MODEL_NODES] = {
static void extend_model_to_full_distribution(vp9_prob p,
vp9_prob *tree_probs) {
- const int l = ((p - 1) / 2);
+ const int l = (p - 1) / 2;
const vp9_prob (*model)[MODEL_NODES] = modelcoefprobs_pareto8;
if (p & 1) {
vpx_memcpy(tree_probs + UNCONSTRAINED_NODES,
diff --git a/vp9/common/vp9_entropymode.c b/vp9/common/vp9_entropymode.c
index c84b9e393..7c232f05b 100644
--- a/vp9/common/vp9_entropymode.c
+++ b/vp9/common/vp9_entropymode.c
@@ -440,14 +440,12 @@ void vp9_adapt_mode_probs(VP9_COMMON *cm) {
fc->tx_probs.p8x8[i][j] = update_ct2(pre_fc->tx_probs.p8x8[i][j],
branch_ct_8x8p[j]);
- tx_counts_to_branch_counts_16x16(counts->tx.p16x16[i],
- branch_ct_16x16p);
+ tx_counts_to_branch_counts_16x16(counts->tx.p16x16[i], branch_ct_16x16p);
for (j = 0; j < TX_SIZES - 2; ++j)
fc->tx_probs.p16x16[i][j] = update_ct2(pre_fc->tx_probs.p16x16[i][j],
branch_ct_16x16p[j]);
- tx_counts_to_branch_counts_32x32(counts->tx.p32x32[i],
- branch_ct_32x32p);
+ tx_counts_to_branch_counts_32x32(counts->tx.p32x32[i], branch_ct_32x32p);
for (j = 0; j < TX_SIZES - 1; ++j)
fc->tx_probs.p32x32[i][j] = update_ct2(pre_fc->tx_probs.p32x32[i][j],
branch_ct_32x32p[j]);
diff --git a/vp9/common/vp9_reconintra.c b/vp9/common/vp9_reconintra.c
index f351224a7..6209e1815 100644
--- a/vp9/common/vp9_reconintra.c
+++ b/vp9/common/vp9_reconintra.c
@@ -51,18 +51,17 @@ static INLINE void d27_predictor(uint8_t *pred_ptr, ptrdiff_t stride, int bs,
uint8_t *above_row, uint8_t *left_col) {
int r, c;
// first column
- for (r = 0; r < bs - 1; ++r) {
- pred_ptr[r * stride] = ROUND_POWER_OF_TWO(left_col[r] +
- left_col[r + 1], 1);
- }
+ for (r = 0; r < bs - 1; ++r)
+ pred_ptr[r * stride] = ROUND_POWER_OF_TWO(left_col[r] + left_col[r + 1], 1);
+
pred_ptr[(bs - 1) * stride] = left_col[bs - 1];
pred_ptr++;
// second column
- for (r = 0; r < bs - 2; ++r) {
- pred_ptr[r * stride] = ROUND_POWER_OF_TWO(left_col[r] +
- left_col[r + 1] * 2 +
- left_col[r + 2], 2);
- }
+ for (r = 0; r < bs - 2; ++r)
+ pred_ptr[r * stride] = ROUND_POWER_OF_TWO(left_col[r] +
+ left_col[r + 1] * 2 +
+ left_col[r + 2], 2);
+
pred_ptr[(bs - 2) * stride] = ROUND_POWER_OF_TWO(left_col[bs - 2] +
left_col[bs - 1] * 3,
2);
@@ -70,15 +69,12 @@ static INLINE void d27_predictor(uint8_t *pred_ptr, ptrdiff_t stride, int bs,
pred_ptr++;
// rest of last row
- for (c = 0; c < bs - 2; ++c) {
+ for (c = 0; c < bs - 2; ++c)
pred_ptr[(bs - 1) * stride + c] = left_col[bs - 1];
- }
- for (r = bs - 2; r >= 0; --r) {
- for (c = 0; c < bs - 2; ++c) {
+ for (r = bs - 2; r >= 0; --r)
+ for (c = 0; c < bs - 2; ++c)
pred_ptr[r * stride + c] = pred_ptr[(r + 1) * stride + c - 2];
- }
- }
}
intra_pred_allsizes(d27)
@@ -86,16 +82,12 @@ static INLINE void d63_predictor(uint8_t *pred_ptr, ptrdiff_t stride, int bs,
uint8_t *above_row, uint8_t *left_col) {
int r, c;
for (r = 0; r < bs; ++r) {
- for (c = 0; c < bs; ++c) {
- if (r & 1) {
- pred_ptr[c] = ROUND_POWER_OF_TWO(above_row[r/2 + c] +
- above_row[r/2 + c + 1] * 2 +
- above_row[r/2 + c + 2], 2);
- } else {
- pred_ptr[c] = ROUND_POWER_OF_TWO(above_row[r/2 + c] +
- above_row[r/2+ c + 1], 1);
- }
- }
+ for (c = 0; c < bs; ++c)
+ pred_ptr[c] = r & 1 ? ROUND_POWER_OF_TWO(above_row[r/2 + c] +
+ above_row[r/2 + c + 1] * 2 +
+ above_row[r/2 + c + 2], 2)
+ : ROUND_POWER_OF_TWO(above_row[r/2 + c] +
+ above_row[r/2+ c + 1], 1);
pred_ptr += stride;
}
}
@@ -141,9 +133,9 @@ static INLINE void d117_predictor(uint8_t *pred_ptr, ptrdiff_t stride, int bs,
left_col[0] * 2 +
left_col[1], 2);
for (r = 3; r < bs; ++r)
- pred_ptr[(r-2) * stride] = ROUND_POWER_OF_TWO(left_col[r - 3] +
- left_col[r - 2] * 2 +
- left_col[r - 1], 2);
+ pred_ptr[(r - 2) * stride] = ROUND_POWER_OF_TWO(left_col[r - 3] +
+ left_col[r - 2] * 2 +
+ left_col[r - 1], 2);
// the rest of the block
for (r = 2; r < bs; ++r) {
for (c = 1; c < bs; c++)
diff --git a/vp9/decoder/vp9_decodemv.c b/vp9/decoder/vp9_decodemv.c
index 074b3e9ac..e5de8f65a 100644
--- a/vp9/decoder/vp9_decodemv.c
+++ b/vp9/decoder/vp9_decodemv.c
@@ -446,10 +446,12 @@ static void read_inter_block_mode_info(VP9D_COMP *pbi, MODE_INFO *mi,
int_mv nearest_second, nearby_second, best_mv_second;
uint8_t inter_mode_ctx;
MV_REFERENCE_FRAME ref0, ref1;
+ int is_compound;
read_ref_frames(pbi, r, mbmi->segment_id, mbmi->ref_frame);
ref0 = mbmi->ref_frame[0];
ref1 = mbmi->ref_frame[1];
+ is_compound = ref1 > INTRA_FRAME;
vp9_find_mv_refs(cm, xd, mi, xd->prev_mode_info_context,
ref0, mbmi->ref_mvs[ref0], cm->ref_frame_sign_bias,
@@ -474,7 +476,7 @@ static void read_inter_block_mode_info(VP9D_COMP *pbi, MODE_INFO *mi,
? read_switchable_filter_type(pbi, r)
: cm->mcomp_filter_type;
- if (ref1 > INTRA_FRAME) {
+ if (is_compound) {
vp9_find_mv_refs(cm, xd, mi, xd->prev_mode_info_context,
ref1, mbmi->ref_mvs[ref1], cm->ref_frame_sign_bias,
mi_row, mi_col);
@@ -500,7 +502,7 @@ static void read_inter_block_mode_info(VP9D_COMP *pbi, MODE_INFO *mi,
vp9_append_sub8x8_mvs_for_idx(cm, xd, &nearest, &nearby, j, 0,
mi_row, mi_col);
- if (ref1 > 0)
+ if (is_compound)
vp9_append_sub8x8_mvs_for_idx(cm, xd, &nearest_second,
&nearby_second, j, 1,
mi_row, mi_col);
@@ -511,30 +513,30 @@ static void read_inter_block_mode_info(VP9D_COMP *pbi, MODE_INFO *mi,
read_mv(r, &blockmv.as_mv, &best_mv.as_mv, nmvc,
&cm->counts.mv, allow_hp);
- if (ref1 > 0)
+ if (is_compound)
read_mv(r, &secondmv.as_mv, &best_mv_second.as_mv, nmvc,
&cm->counts.mv, allow_hp);
break;
case NEARESTMV:
blockmv.as_int = nearest.as_int;
- if (ref1 > 0)
+ if (is_compound)
secondmv.as_int = nearest_second.as_int;
break;
case NEARMV:
blockmv.as_int = nearby.as_int;
- if (ref1 > 0)
+ if (is_compound)
secondmv.as_int = nearby_second.as_int;
break;
case ZEROMV:
blockmv.as_int = 0;
- if (ref1 > 0)
+ if (is_compound)
secondmv.as_int = 0;
break;
default:
assert(!"Invalid inter mode value");
}
mi->bmi[j].as_mv[0].as_int = blockmv.as_int;
- if (ref1 > 0)
+ if (is_compound)
mi->bmi[j].as_mv[1].as_int = secondmv.as_int;
if (num_4x4_h == 2)
@@ -553,7 +555,7 @@ static void read_inter_block_mode_info(VP9D_COMP *pbi, MODE_INFO *mi,
mv0->as_int = nearby.as_int;
clamp_mv2(&mv0->as_mv, xd);
- if (ref1 > 0) {
+ if (is_compound) {
mv1->as_int = nearby_second.as_int;
clamp_mv2(&mv1->as_mv, xd);
}
@@ -563,7 +565,7 @@ static void read_inter_block_mode_info(VP9D_COMP *pbi, MODE_INFO *mi,
mv0->as_int = nearest.as_int;
clamp_mv2(&mv0->as_mv, xd);
- if (ref1 > 0) {
+ if (is_compound) {
mv1->as_int = nearest_second.as_int;
clamp_mv2(&mv1->as_mv, xd);
}
@@ -571,13 +573,13 @@ static void read_inter_block_mode_info(VP9D_COMP *pbi, MODE_INFO *mi,
case ZEROMV:
mv0->as_int = 0;
- if (ref1 > 0)
+ if (is_compound)
mv1->as_int = 0;
break;
case NEWMV:
read_mv(r, &mv0->as_mv, &best_mv.as_mv, nmvc, &cm->counts.mv, allow_hp);
- if (ref1 > 0)
+ if (is_compound)
read_mv(r, &mv1->as_mv, &best_mv_second.as_mv, nmvc, &cm->counts.mv,
allow_hp);
break;
diff --git a/vp9/decoder/vp9_detokenize.c b/vp9/decoder/vp9_detokenize.c
index 002164307..916b7210e 100644
--- a/vp9/decoder/vp9_detokenize.c
+++ b/vp9/decoder/vp9_detokenize.c
@@ -105,31 +105,28 @@ static int decode_coefs(VP9_COMMON *cm, const MACROBLOCKD *xd,
vp9_coeff_count_model *coef_counts = counts->coef[tx_size];
const int16_t *scan, *nb;
uint8_t token_cache[1024];
- const uint8_t * band_translate;
+ const uint8_t *band_translate;
switch (tx_size) {
default:
- case TX_4X4: {
+ case TX_4X4:
scan = get_scan_4x4(get_tx_type_4x4(type, xd, block_idx));
above_ec = A[0] != 0;
left_ec = L[0] != 0;
band_translate = vp9_coefband_trans_4x4;
break;
- }
- case TX_8X8: {
+ case TX_8X8:
scan = get_scan_8x8(get_tx_type_8x8(type, xd));
above_ec = !!*(uint16_t *)A;
left_ec = !!*(uint16_t *)L;
band_translate = vp9_coefband_trans_8x8plus;
break;
- }
- case TX_16X16: {
+ case TX_16X16:
scan = get_scan_16x16(get_tx_type_16x16(type, xd));
above_ec = !!*(uint32_t *)A;
left_ec = !!*(uint32_t *)L;
band_translate = vp9_coefband_trans_8x8plus;
break;
- }
case TX_32X32:
scan = vp9_default_scan_32x32;
above_ec = !!*(uint64_t *)A;
diff --git a/vp9/decoder/vp9_onyxd_if.c b/vp9/decoder/vp9_onyxd_if.c
index 5a01dd790..b701fae23 100644
--- a/vp9/decoder/vp9_onyxd_if.c
+++ b/vp9/decoder/vp9_onyxd_if.c
@@ -187,21 +187,21 @@ vpx_codec_err_t vp9_copy_reference_dec(VP9D_PTR ptr,
* later commit that adds VP9-specific controls for this functionality.
*/
if (ref_frame_flag == VP9_LAST_FLAG) {
- ref_fb_idx = pbi->common.ref_frame_map[0];
+ ref_fb_idx = cm->ref_frame_map[0];
} else {
- vpx_internal_error(&pbi->common.error, VPX_CODEC_ERROR,
+ vpx_internal_error(&cm->error, VPX_CODEC_ERROR,
"Invalid reference frame");
- return pbi->common.error.error_code;
+ return cm->error.error_code;
}
if (!equal_dimensions(&cm->yv12_fb[ref_fb_idx], sd)) {
- vpx_internal_error(&pbi->common.error, VPX_CODEC_ERROR,
+ vpx_internal_error(&cm->error, VPX_CODEC_ERROR,
"Incorrect buffer dimensions");
} else {
vp8_yv12_copy_frame(&cm->yv12_fb[ref_fb_idx], sd);
}
- return pbi->common.error.error_code;
+ return cm->error.error_code;
}
@@ -261,22 +261,21 @@ int vp9_get_reference_dec(VP9D_PTR ptr, int index, YV12_BUFFER_CONFIG **fb) {
/* If any buffer updating is signaled it should be done here. */
static void swap_frame_buffers(VP9D_COMP *pbi) {
int ref_index = 0, mask;
+ VP9_COMMON *const cm = &pbi->common;
for (mask = pbi->refresh_frame_flags; mask; mask >>= 1) {
- if (mask & 1) {
- ref_cnt_fb(pbi->common.fb_idx_ref_cnt,
- &pbi->common.ref_frame_map[ref_index],
- pbi->common.new_fb_idx);
- }
+ if (mask & 1)
+ ref_cnt_fb(cm->fb_idx_ref_cnt, &cm->ref_frame_map[ref_index],
+ cm->new_fb_idx);
++ref_index;
}
- pbi->common.frame_to_show = &pbi->common.yv12_fb[pbi->common.new_fb_idx];
- pbi->common.fb_idx_ref_cnt[pbi->common.new_fb_idx]--;
+ cm->frame_to_show = &cm->yv12_fb[cm->new_fb_idx];
+ cm->fb_idx_ref_cnt[cm->new_fb_idx]--;
- /* Invalidate these references until the next frame starts. */
+ // Invalidate these references until the next frame starts.
for (ref_index = 0; ref_index < 3; ref_index++)
- pbi->common.active_ref_idx[ref_index] = INT_MAX;
+ cm->active_ref_idx[ref_index] = INT_MAX;
}
int vp9_receive_compressed_data(VP9D_PTR ptr,
@@ -293,7 +292,7 @@ int vp9_receive_compressed_data(VP9D_PTR ptr,
if (ptr == 0)
return -1;
- pbi->common.error.error_code = VPX_CODEC_OK;
+ cm->error.error_code = VPX_CODEC_OK;
pbi->source = source;
pbi->source_sz = size;
@@ -314,8 +313,8 @@ int vp9_receive_compressed_data(VP9D_PTR ptr,
cm->new_fb_idx = get_free_fb(cm);
- if (setjmp(pbi->common.error.jmp)) {
- pbi->common.error.setjmp = 0;
+ if (setjmp(cm->error.jmp)) {
+ cm->error.setjmp = 0;
/* We do not know if the missing frame(s) was supposed to update
* any of the reference buffers, but we act conservative and
@@ -334,13 +333,13 @@ int vp9_receive_compressed_data(VP9D_PTR ptr,
return -1;
}
- pbi->common.error.setjmp = 1;
+ cm->error.setjmp = 1;
retcode = vp9_decode_frame(pbi, psource);
if (retcode < 0) {
- pbi->common.error.error_code = VPX_CODEC_ERROR;
- pbi->common.error.setjmp = 0;
+ cm->error.error_code = VPX_CODEC_ERROR;
+ cm->error.setjmp = 0;
if (cm->fb_idx_ref_cnt[cm->new_fb_idx] > 0)
cm->fb_idx_ref_cnt[cm->new_fb_idx]--;
return retcode;
@@ -403,7 +402,7 @@ int vp9_receive_compressed_data(VP9D_PTR ptr,
pbi->last_time_stamp = time_stamp;
pbi->source_sz = 0;
- pbi->common.error.setjmp = 0;
+ cm->error.setjmp = 0;
return retcode;
}
diff --git a/vp9/encoder/vp9_mcomp.c b/vp9/encoder/vp9_mcomp.c
index 96e66f076..ca02df881 100644
--- a/vp9/encoder/vp9_mcomp.c
+++ b/vp9/encoder/vp9_mcomp.c
@@ -8,26 +8,29 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#include <stdio.h>
#include <limits.h>
#include <math.h>
+#include <stdio.h>
-#include "vp9/encoder/vp9_onyx_int.h"
-#include "vp9/encoder/vp9_mcomp.h"
-#include "vpx_mem/vpx_mem.h"
#include "./vpx_config.h"
+
+#include "vpx_mem/vpx_mem.h"
+
#include "vp9/common/vp9_findnearmv.h"
#include "vp9/common/vp9_common.h"
+#include "vp9/encoder/vp9_onyx_int.h"
+#include "vp9/encoder/vp9_mcomp.h"
+
// #define NEW_DIAMOND_SEARCH
void vp9_clamp_mv_min_max(MACROBLOCK *x, int_mv *ref_mv) {
- int col_min = (ref_mv->as_mv.col >> 3) - MAX_FULL_PEL_VAL +
+ const int col_min = (ref_mv->as_mv.col >> 3) - MAX_FULL_PEL_VAL +
((ref_mv->as_mv.col & 7) ? 1 : 0);
- int row_min = (ref_mv->as_mv.row >> 3) - MAX_FULL_PEL_VAL +
+ const int row_min = (ref_mv->as_mv.row >> 3) - MAX_FULL_PEL_VAL +
((ref_mv->as_mv.row & 7) ? 1 : 0);
- int col_max = (ref_mv->as_mv.col >> 3) + MAX_FULL_PEL_VAL;
- int row_max = (ref_mv->as_mv.row >> 3) + MAX_FULL_PEL_VAL;
+ const int col_max = (ref_mv->as_mv.col >> 3) + MAX_FULL_PEL_VAL;
+ const int row_max = (ref_mv->as_mv.row >> 3) + MAX_FULL_PEL_VAL;
/* Get intersection of UMV window and valid MV window to reduce # of checks in diamond search. */
if (x->mv_col_min < col_min)
@@ -323,8 +326,6 @@ int vp9_find_best_sub_pixel_iterative(MACROBLOCK *x,
int src_stride = x->plane[0].src.stride;
MACROBLOCKD *xd = &x->e_mbd;
- int rr, rc, br, bc, hstep;
- int tr, tc;
unsigned int besterr = INT_MAX;
unsigned int sse;
unsigned int whichdir;
@@ -332,30 +333,27 @@ int vp9_find_best_sub_pixel_iterative(MACROBLOCK *x,
unsigned int quarteriters = iters_per_step;
unsigned int eighthiters = iters_per_step;
int thismse;
- int maxc, minc, maxr, minr;
- int y_stride;
- int offset;
uint8_t *y = xd->plane[0].pre[0].buf +
(bestmv->as_mv.row) * xd->plane[0].pre[0].stride +
bestmv->as_mv.col;
- y_stride = xd->plane[0].pre[0].stride;
+ const int y_stride = xd->plane[0].pre[0].stride;
- rr = ref_mv->as_mv.row;
- rc = ref_mv->as_mv.col;
- br = bestmv->as_mv.row << 3;
- bc = bestmv->as_mv.col << 3;
- hstep = 4;
- minc = MAX(x->mv_col_min << 3, (ref_mv->as_mv.col) - ((1 << MV_MAX_BITS) - 1));
- maxc = MIN(x->mv_col_max << 3, (ref_mv->as_mv.col) + ((1 << MV_MAX_BITS) - 1));
- minr = MAX(x->mv_row_min << 3, (ref_mv->as_mv.row) - ((1 << MV_MAX_BITS) - 1));
- maxr = MIN(x->mv_row_max << 3, (ref_mv->as_mv.row) + ((1 << MV_MAX_BITS) - 1));
+ int rr = ref_mv->as_mv.row;
+ int rc = ref_mv->as_mv.col;
+ int br = bestmv->as_mv.row << 3;
+ int bc = bestmv->as_mv.col << 3;
+ int hstep = 4;
+ const int minc = MAX(x->mv_col_min << 3, ref_mv->as_mv.col - MV_MAX);
+ const int maxc = MIN(x->mv_col_max << 3, ref_mv->as_mv.col + MV_MAX);
+ const int minr = MAX(x->mv_row_min << 3, ref_mv->as_mv.row - MV_MAX);
+ const int maxr = MIN(x->mv_row_max << 3, ref_mv->as_mv.row + MV_MAX);
- tr = br;
- tc = bc;
+ int tr = br;
+ int tc = bc;
- offset = (bestmv->as_mv.row) * y_stride + bestmv->as_mv.col;
+ const int offset = (bestmv->as_mv.row) * y_stride + bestmv->as_mv.col;
// central mv
bestmv->as_mv.row <<= 3;
@@ -533,12 +531,10 @@ int vp9_find_best_sub_pixel_comp_iterative(MACROBLOCK *x,
unsigned int *sse1,
const uint8_t *second_pred,
int w, int h) {
- uint8_t *z = x->plane[0].src.buf;
- int src_stride = x->plane[0].src.stride;
- MACROBLOCKD *xd = &x->e_mbd;
+ uint8_t *const z = x->plane[0].src.buf;
+ const int src_stride = x->plane[0].src.stride;
+ MACROBLOCKD *const xd = &x->e_mbd;
- int rr, rc, br, bc, hstep;
- int tr, tc;
unsigned int besterr = INT_MAX;
unsigned int sse;
unsigned int whichdir;
@@ -546,35 +542,28 @@ int vp9_find_best_sub_pixel_comp_iterative(MACROBLOCK *x,
unsigned int quarteriters = iters_per_step;
unsigned int eighthiters = iters_per_step;
int thismse;
- int maxc, minc, maxr, minr;
- int y_stride;
- int offset;
DECLARE_ALIGNED_ARRAY(16, uint8_t, comp_pred, 64 * 64);
- uint8_t *y = xd->plane[0].pre[0].buf +
+ uint8_t *const y = xd->plane[0].pre[0].buf +
(bestmv->as_mv.row) * xd->plane[0].pre[0].stride +
bestmv->as_mv.col;
- y_stride = xd->plane[0].pre[0].stride;
+ const int y_stride = xd->plane[0].pre[0].stride;
- rr = ref_mv->as_mv.row;
- rc = ref_mv->as_mv.col;
- br = bestmv->as_mv.row << 3;
- bc = bestmv->as_mv.col << 3;
- hstep = 4;
- minc = MAX(x->mv_col_min << 3, (ref_mv->as_mv.col) -
- ((1 << MV_MAX_BITS) - 1));
- maxc = MIN(x->mv_col_max << 3, (ref_mv->as_mv.col) +
- ((1 << MV_MAX_BITS) - 1));
- minr = MAX(x->mv_row_min << 3, (ref_mv->as_mv.row) -
- ((1 << MV_MAX_BITS) - 1));
- maxr = MIN(x->mv_row_max << 3, (ref_mv->as_mv.row) +
- ((1 << MV_MAX_BITS) - 1));
+ int rr = ref_mv->as_mv.row;
+ int rc = ref_mv->as_mv.col;
+ int br = bestmv->as_mv.row << 3;
+ int bc = bestmv->as_mv.col << 3;
+ int hstep = 4;
+ const int minc = MAX(x->mv_col_min << 3, ref_mv->as_mv.col - MV_MAX);
+ const int maxc = MIN(x->mv_col_max << 3, ref_mv->as_mv.col + MV_MAX);
+ const int minr = MAX(x->mv_row_min << 3, ref_mv->as_mv.row - MV_MAX);
+ const int maxr = MIN(x->mv_row_max << 3, ref_mv->as_mv.row + MV_MAX);
- tr = br;
- tc = bc;
+ int tr = br;
+ int tc = bc;
- offset = (bestmv->as_mv.row) * y_stride + bestmv->as_mv.col;
+ const int offset = (bestmv->as_mv.row) * y_stride + bestmv->as_mv.col;
// central mv
bestmv->as_mv.row <<= 3;
@@ -753,9 +742,6 @@ int vp9_find_best_sub_pixel_comp_tree(MACROBLOCK *x,
#undef DIST
#undef IFMVCV
#undef CHECK_BETTER
-#undef MIN
-#undef MAX
-
#undef SP
#define CHECK_BOUNDS(range) \
@@ -1588,18 +1574,12 @@ int vp9_full_search_sad_c(MACROBLOCK *x, int_mv *ref_mv,
+ mvsad_err_cost(best_mv, &fcenter_mv, mvjsadcost, mvsadcost,
sad_per_bit);
- // Apply further limits to prevent us looking using vectors that stretch beyiond the UMV border
- if (col_min < x->mv_col_min)
- col_min = x->mv_col_min;
-
- if (col_max > x->mv_col_max)
- col_max = x->mv_col_max;
-
- if (row_min < x->mv_row_min)
- row_min = x->mv_row_min;
-
- if (row_max > x->mv_row_max)
- row_max = x->mv_row_max;
+ // Apply further limits to prevent us looking using vectors that stretch
+ // beyond the UMV border
+ col_min = MAX(col_min, x->mv_col_min);
+ col_max = MIN(col_max, x->mv_col_max);
+ row_min = MAX(row_min, x->mv_row_min);
+ row_max = MIN(row_max, x->mv_row_max);
for (r = row_min; r < row_max; r++) {
this_mv.as_mv.row = r;
@@ -1684,18 +1664,12 @@ int vp9_full_search_sadx3(MACROBLOCK *x, int_mv *ref_mv,
+ mvsad_err_cost(best_mv, &fcenter_mv, mvjsadcost, mvsadcost,
sad_per_bit);
- // Apply further limits to prevent us looking using vectors that stretch beyiond the UMV border
- if (col_min < x->mv_col_min)
- col_min = x->mv_col_min;
-
- if (col_max > x->mv_col_max)
- col_max = x->mv_col_max;
-
- if (row_min < x->mv_row_min)
- row_min = x->mv_row_min;
-
- if (row_max > x->mv_row_max)
- row_max = x->mv_row_max;
+ // Apply further limits to prevent us looking using vectors that stretch
+ // beyond the UMV border
+ col_min = MAX(col_min, x->mv_col_min);
+ col_max = MIN(col_max, x->mv_col_max);
+ row_min = MAX(row_min, x->mv_row_min);
+ row_max = MIN(row_max, x->mv_row_max);
for (r = row_min; r < row_max; r++) {
this_mv.as_mv.row = r;
@@ -1813,18 +1787,12 @@ int vp9_full_search_sadx8(MACROBLOCK *x, int_mv *ref_mv,
+ mvsad_err_cost(best_mv, &fcenter_mv, mvjsadcost, mvsadcost,
sad_per_bit);
- // Apply further limits to prevent us looking using vectors that stretch beyiond the UMV border
- if (col_min < x->mv_col_min)
- col_min = x->mv_col_min;
-
- if (col_max > x->mv_col_max)
- col_max = x->mv_col_max;
-
- if (row_min < x->mv_row_min)
- row_min = x->mv_row_min;
-
- if (row_max > x->mv_row_max)
- row_max = x->mv_row_max;
+ // Apply further limits to prevent us looking using vectors that stretch
+ // beyond the UMV border
+ col_min = MAX(col_min, x->mv_col_min);
+ col_max = MIN(col_max, x->mv_col_max);
+ row_min = MAX(row_min, x->mv_row_min);
+ row_max = MIN(row_max, x->mv_row_max);
for (r = row_min; r < row_max; r++) {
this_mv.as_mv.row = r;