summaryrefslogtreecommitdiff
path: root/vp9
diff options
context:
space:
mode:
Diffstat (limited to 'vp9')
-rw-r--r--vp9/common/vp9_blockd.h10
-rw-r--r--vp9/common/vp9_pred_common.h8
-rw-r--r--vp9/decoder/vp9_decodemv.c12
-rw-r--r--vp9/decoder/vp9_decodframe.c16
-rw-r--r--vp9/encoder/vp9_bitstream.c7
-rw-r--r--vp9/encoder/vp9_encodeframe.c44
-rw-r--r--vp9/encoder/vp9_encodeintra.c2
-rw-r--r--vp9/encoder/vp9_encodemb.c6
-rw-r--r--vp9/encoder/vp9_encodemv.c2
-rw-r--r--vp9/encoder/vp9_firstpass.c20
-rw-r--r--vp9/encoder/vp9_mbgraph.c4
-rw-r--r--vp9/encoder/vp9_onyx_if.c20
-rw-r--r--vp9/encoder/vp9_rdopt.c42
-rw-r--r--vp9/encoder/vp9_segmentation.c2
14 files changed, 96 insertions, 99 deletions
diff --git a/vp9/common/vp9_blockd.h b/vp9/common/vp9_blockd.h
index 3bdee4ba5..f68c5c6ea 100644
--- a/vp9/common/vp9_blockd.h
+++ b/vp9/common/vp9_blockd.h
@@ -317,7 +317,7 @@ static INLINE void update_partition_context(MACROBLOCKD *xd,
const int bsl = b_width_log2(sb_size), bs = (1 << bsl) / 2;
const int bwl = b_width_log2(sb_type);
const int bhl = b_height_log2(sb_type);
- const int boffset = b_width_log2(BLOCK_SIZE_SB64X64) - bsl;
+ const int boffset = b_width_log2(BLOCK_64X64) - bsl;
const char pcval0 = ~(0xe << boffset);
const char pcval1 = ~(0xf << boffset);
const char pcvalue[2] = {pcval0, pcval1};
@@ -335,7 +335,7 @@ static INLINE int partition_plane_context(MACROBLOCKD *xd,
BLOCK_SIZE_TYPE sb_type) {
int bsl = mi_width_log2(sb_type), bs = 1 << bsl;
int above = 0, left = 0, i;
- int boffset = mi_width_log2(BLOCK_SIZE_SB64X64) - bsl;
+ int boffset = mi_width_log2(BLOCK_64X64) - bsl;
assert(mi_width_log2(sb_type) == mi_height_log2(sb_type));
assert(bsl >= 0);
@@ -371,7 +371,7 @@ static INLINE TX_TYPE get_tx_type_4x4(PLANE_TYPE plane_type,
is_inter_block(mbmi))
return DCT_DCT;
- return mode2txfm_map[mbmi->sb_type < BLOCK_SIZE_SB8X8 ?
+ return mode2txfm_map[mbmi->sb_type < BLOCK_8X8 ?
mi->bmi[ib].as_mode : mbmi->mode];
}
@@ -565,8 +565,8 @@ static INLINE void foreach_predicted_block_in_plane(
// size of the predictor to use.
int pred_w, pred_h;
- if (xd->mode_info_context->mbmi.sb_type < BLOCK_SIZE_SB8X8) {
- assert(bsize == BLOCK_SIZE_SB8X8);
+ if (xd->mode_info_context->mbmi.sb_type < BLOCK_8X8) {
+ assert(bsize == BLOCK_8X8);
pred_w = 0;
pred_h = 0;
} else {
diff --git a/vp9/common/vp9_pred_common.h b/vp9/common/vp9_pred_common.h
index e4b6575e3..238290b41 100644
--- a/vp9/common/vp9_pred_common.h
+++ b/vp9/common/vp9_pred_common.h
@@ -110,9 +110,9 @@ unsigned char vp9_get_pred_context_tx_size(const MACROBLOCKD *xd);
static const vp9_prob *get_tx_probs(BLOCK_SIZE_TYPE bsize, uint8_t context,
const struct tx_probs *tx_probs) {
- if (bsize < BLOCK_SIZE_MB16X16)
+ if (bsize < BLOCK_16X16)
return tx_probs->p8x8[context];
- else if (bsize < BLOCK_SIZE_SB32X32)
+ else if (bsize < BLOCK_32X32)
return tx_probs->p16x16[context];
else
return tx_probs->p32x32[context];
@@ -127,9 +127,9 @@ static const vp9_prob *get_tx_probs2(const MACROBLOCKD *xd,
static void update_tx_counts(BLOCK_SIZE_TYPE bsize, uint8_t context,
TX_SIZE tx_size, struct tx_counts *tx_counts) {
- if (bsize >= BLOCK_SIZE_SB32X32)
+ if (bsize >= BLOCK_32X32)
tx_counts->p32x32[context][tx_size]++;
- else if (bsize >= BLOCK_SIZE_MB16X16)
+ else if (bsize >= BLOCK_16X16)
tx_counts->p16x16[context][tx_size]++;
else
tx_counts->p8x8[context][tx_size]++;
diff --git a/vp9/decoder/vp9_decodemv.c b/vp9/decoder/vp9_decodemv.c
index aa1ebc53e..a3e2ad39d 100644
--- a/vp9/decoder/vp9_decodemv.c
+++ b/vp9/decoder/vp9_decodemv.c
@@ -47,9 +47,9 @@ static TX_SIZE read_selected_tx_size(VP9_COMMON *cm, MACROBLOCKD *xd,
const uint8_t context = vp9_get_pred_context_tx_size(xd);
const vp9_prob *tx_probs = get_tx_probs(bsize, context, &cm->fc.tx_probs);
TX_SIZE tx_size = vp9_read(r, tx_probs[0]);
- if (tx_size != TX_4X4 && bsize >= BLOCK_SIZE_MB16X16) {
+ if (tx_size != TX_4X4 && bsize >= BLOCK_16X16) {
tx_size += vp9_read(r, tx_probs[1]);
- if (tx_size != TX_8X8 && bsize >= BLOCK_SIZE_SB32X32)
+ if (tx_size != TX_8X8 && bsize >= BLOCK_32X32)
tx_size += vp9_read(r, tx_probs[2]);
}
@@ -63,13 +63,13 @@ static TX_SIZE read_tx_size(VP9D_COMP *pbi, TX_MODE tx_mode,
VP9_COMMON *const cm = &pbi->common;
MACROBLOCKD *const xd = &pbi->mb;
- if (allow_select && tx_mode == TX_MODE_SELECT && bsize >= BLOCK_SIZE_SB8X8)
+ if (allow_select && tx_mode == TX_MODE_SELECT && bsize >= BLOCK_8X8)
return read_selected_tx_size(cm, xd, bsize, r);
- else if (tx_mode >= ALLOW_32X32 && bsize >= BLOCK_SIZE_SB32X32)
+ else if (tx_mode >= ALLOW_32X32 && bsize >= BLOCK_32X32)
return TX_32X32;
- else if (tx_mode >= ALLOW_16X16 && bsize >= BLOCK_SIZE_MB16X16)
+ else if (tx_mode >= ALLOW_16X16 && bsize >= BLOCK_16X16)
return TX_16X16;
- else if (tx_mode >= ALLOW_8X8 && bsize >= BLOCK_SIZE_SB8X8)
+ else if (tx_mode >= ALLOW_8X8 && bsize >= BLOCK_8X8)
return TX_8X8;
else
return TX_4X4;
diff --git a/vp9/decoder/vp9_decodframe.c b/vp9/decoder/vp9_decodframe.c
index 2c3c7f053..de76d4ee0 100644
--- a/vp9/decoder/vp9_decodframe.c
+++ b/vp9/decoder/vp9_decodframe.c
@@ -140,8 +140,8 @@ static void decode_block_intra(int plane, int block, BLOCK_SIZE_TYPE bsize,
const int mode = plane == 0 ? mi->mbmi.mode
: mi->mbmi.uv_mode;
- if (plane == 0 && mi->mbmi.sb_type < BLOCK_SIZE_SB8X8) {
- assert(bsize == BLOCK_SIZE_SB8X8);
+ if (plane == 0 && mi->mbmi.sb_type < BLOCK_8X8) {
+ assert(bsize == BLOCK_8X8);
b_mode = mi->bmi[raster_block].as_mode;
} else {
b_mode = mode;
@@ -225,7 +225,7 @@ static void decode_modes_b(VP9D_COMP *pbi, int mi_row, int mi_col,
vp9_reader *r, BLOCK_SIZE_TYPE bsize) {
VP9_COMMON *const cm = &pbi->common;
MACROBLOCKD *const xd = &pbi->mb;
- const int less8x8 = bsize < BLOCK_SIZE_SB8X8;
+ const int less8x8 = bsize < BLOCK_8X8;
MB_MODE_INFO *mbmi;
if (less8x8)
@@ -236,7 +236,7 @@ static void decode_modes_b(VP9D_COMP *pbi, int mi_row, int mi_col,
vp9_read_mode_info(pbi, mi_row, mi_col, r);
if (less8x8)
- bsize = BLOCK_SIZE_SB8X8;
+ bsize = BLOCK_8X8;
// Has to be called after set_offsets
mbmi = &xd->mode_info_context->mbmi;
@@ -282,7 +282,7 @@ static void decode_modes_sb(VP9D_COMP *pbi, int mi_row, int mi_col,
if (mi_row >= pc->mi_rows || mi_col >= pc->mi_cols)
return;
- if (bsize < BLOCK_SIZE_SB8X8) {
+ if (bsize < BLOCK_8X8) {
if (xd->ab_index != 0)
return;
} else {
@@ -334,8 +334,8 @@ static void decode_modes_sb(VP9D_COMP *pbi, int mi_row, int mi_col,
}
// update partition context
- if (bsize >= BLOCK_SIZE_SB8X8 &&
- (bsize == BLOCK_SIZE_SB8X8 || partition != PARTITION_SPLIT)) {
+ if (bsize >= BLOCK_8X8 &&
+ (bsize == BLOCK_8X8 || partition != PARTITION_SPLIT)) {
set_partition_seg_context(pc, xd, mi_row, mi_col);
update_partition_context(xd, subsize, bsize);
}
@@ -597,7 +597,7 @@ static void decode_tile(VP9D_COMP *pbi, vp9_reader *r) {
vpx_memset(pc->left_seg_context, 0, sizeof(pc->left_seg_context));
for (mi_col = pc->cur_tile_mi_col_start; mi_col < pc->cur_tile_mi_col_end;
mi_col += MI_BLOCK_SIZE) {
- decode_modes_sb(pbi, mi_row, mi_col, r, BLOCK_SIZE_SB64X64);
+ decode_modes_sb(pbi, mi_row, mi_col, r, BLOCK_64X64);
}
if (pbi->do_loopfilter_inline) {
diff --git a/vp9/encoder/vp9_bitstream.c b/vp9/encoder/vp9_bitstream.c
index 5e816cd3d..a866024e8 100644
--- a/vp9/encoder/vp9_bitstream.c
+++ b/vp9/encoder/vp9_bitstream.c
@@ -203,9 +203,9 @@ static void write_selected_tx_size(const VP9_COMP *cpi, TX_SIZE tx_size,
const MACROBLOCKD *const xd = &cpi->mb.e_mbd;
const vp9_prob *tx_probs = get_tx_probs2(xd, &cpi->common.fc.tx_probs);
vp9_write(w, tx_size != TX_4X4, tx_probs[0]);
- if (bsize >= BLOCK_SIZE_MB16X16 && tx_size != TX_4X4) {
+ if (bsize >= BLOCK_16X16 && tx_size != TX_4X4) {
vp9_write(w, tx_size != TX_8X8, tx_probs[1]);
- if (bsize >= BLOCK_SIZE_SB32X32 && tx_size != TX_8X8)
+ if (bsize >= BLOCK_32X32 && tx_size != TX_8X8)
vp9_write(w, tx_size != TX_16X16, tx_probs[2]);
}
}
@@ -693,8 +693,7 @@ static void write_modes(VP9_COMP *cpi, vp9_writer* const bc,
vp9_zero(c->left_seg_context);
for (mi_col = c->cur_tile_mi_col_start; mi_col < c->cur_tile_mi_col_end;
mi_col += MI_BLOCK_SIZE, m += MI_BLOCK_SIZE)
- write_modes_sb(cpi, m, bc, tok, tok_end, mi_row, mi_col,
- BLOCK_SIZE_SB64X64);
+ write_modes_sb(cpi, m, bc, tok, tok_end, mi_row, mi_col, BLOCK_64X64);
}
}
diff --git a/vp9/encoder/vp9_encodeframe.c b/vp9/encoder/vp9_encodeframe.c
index 762a548fe..a8f01c56a 100644
--- a/vp9/encoder/vp9_encodeframe.c
+++ b/vp9/encoder/vp9_encodeframe.c
@@ -362,13 +362,13 @@ static void update_state(VP9_COMP *cpi, PICK_MODE_CONTEXT *ctx,
}
// FIXME(rbultje) I'm pretty sure this should go to the end of this block
// (i.e. after the output_enabled)
- if (bsize < BLOCK_SIZE_SB32X32) {
- if (bsize < BLOCK_SIZE_MB16X16)
+ if (bsize < BLOCK_32X32) {
+ if (bsize < BLOCK_16X16)
ctx->tx_rd_diff[ALLOW_16X16] = ctx->tx_rd_diff[ALLOW_8X8];
ctx->tx_rd_diff[ALLOW_32X32] = ctx->tx_rd_diff[ALLOW_16X16];
}
- if (is_inter_block(mbmi) && mbmi->sb_type < BLOCK_SIZE_SB8X8) {
+ if (is_inter_block(mbmi) && mbmi->sb_type < BLOCK_8X8) {
*x->partition_info = ctx->partition_info;
mbmi->mv[0].as_int = mi->bmi[3].as_mv[0].as_int;
mbmi->mv[1].as_int = mi->bmi[3].as_mv[1].as_int;
@@ -802,7 +802,7 @@ static void encode_sb(VP9_COMP *cpi, TOKENEXTRA **tp, int mi_row, int mi_col,
if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols)
return;
- c1 = BLOCK_SIZE_AB4X4;
+ c1 = BLOCK_4X4;
if (bsize >= BLOCK_SIZE_SB8X8) {
set_partition_seg_context(cm, xd, mi_row, mi_col);
pl = partition_plane_context(xd, bsize);
@@ -1155,32 +1155,32 @@ static void choose_partitioning(VP9_COMP *cpi, MODE_INFO *m, int mi_row,
// values.
for (i = 0; i < 4; i++) {
for (j = 0; j < 4; j++) {
- fill_variance_tree(&vt.split[i].split[j], BLOCK_SIZE_MB16X16);
+ fill_variance_tree(&vt.split[i].split[j], BLOCK_16X16);
}
- fill_variance_tree(&vt.split[i], BLOCK_SIZE_SB32X32);
+ fill_variance_tree(&vt.split[i], BLOCK_32X32);
}
- fill_variance_tree(&vt, BLOCK_SIZE_SB64X64);
+ fill_variance_tree(&vt, BLOCK_64X64);
// Now go through the entire structure, splitting every block size until
// we get to one that's got a variance lower than our threshold, or we
// hit 8x8.
- if (!set_vt_partitioning(cpi, &vt, m, BLOCK_SIZE_SB64X64, mi_row, mi_col,
+ if (!set_vt_partitioning(cpi, &vt, m, BLOCK_64X64, mi_row, mi_col,
4)) {
for (i = 0; i < 4; ++i) {
const int x32_idx = ((i & 1) << 2);
const int y32_idx = ((i >> 1) << 2);
- if (!set_vt_partitioning(cpi, &vt.split[i], m, BLOCK_SIZE_SB32X32,
+ if (!set_vt_partitioning(cpi, &vt.split[i], m, BLOCK_32X32,
(mi_row + y32_idx), (mi_col + x32_idx), 2)) {
for (j = 0; j < 4; ++j) {
const int x16_idx = ((j & 1) << 1);
const int y16_idx = ((j >> 1) << 1);
if (!set_vt_partitioning(cpi, &vt.split[i].split[j], m,
- BLOCK_SIZE_MB16X16,
+ BLOCK_16X16,
(mi_row + y32_idx + y16_idx),
(mi_col + x32_idx + x16_idx), 1)) {
for (k = 0; k < 4; ++k) {
const int x8_idx = (k & 1);
const int y8_idx = (k >> 1);
- set_block_size(cm, m, BLOCK_SIZE_SB8X8, mis,
+ set_block_size(cm, m, BLOCK_8X8, mis,
(mi_row + y32_idx + y16_idx + y8_idx),
(mi_col + x32_idx + x16_idx + x8_idx));
}
@@ -1217,7 +1217,7 @@ static void rd_use_partition(VP9_COMP *cpi, MODE_INFO *m, TOKENEXTRA **tp,
int64_t none_dist = INT_MAX;
int chosen_rate = INT_MAX;
int64_t chosen_dist = INT_MAX;
- BLOCK_SIZE_TYPE sub_subsize = BLOCK_SIZE_AB4X4;
+ BLOCK_SIZE_TYPE sub_subsize = BLOCK_4X4;
int splits_below = 0;
BLOCK_SIZE_TYPE bs_type = m->mbmi.sb_type;
@@ -2249,13 +2249,13 @@ static void reset_skip_txfm_size_sb(VP9_COMP *cpi, MODE_INFO *mi,
int n;
assert(bwl < bsl && bhl < bsl);
- if (bsize == BLOCK_SIZE_SB64X64) {
- subsize = BLOCK_SIZE_SB32X32;
- } else if (bsize == BLOCK_SIZE_SB32X32) {
- subsize = BLOCK_SIZE_MB16X16;
+ if (bsize == BLOCK_64X64) {
+ subsize = BLOCK_32X32;
+ } else if (bsize == BLOCK_32X32) {
+ subsize = BLOCK_16X16;
} else {
- assert(bsize == BLOCK_SIZE_MB16X16);
- subsize = BLOCK_SIZE_SB8X8;
+ assert(bsize == BLOCK_16X16);
+ subsize = BLOCK_8X8;
}
for (n = 0; n < 4; n++) {
@@ -2656,13 +2656,13 @@ static void encode_superblock(VP9_COMP *cpi, TOKENEXTRA **t, int output_enabled,
TX_SIZE sz = (cm->tx_mode == TX_MODE_SELECT) ? TX_32X32 : cm->tx_mode;
// The new intra coding scheme requires no change of transform size
if (is_inter_block(&mi->mbmi)) {
- if (sz == TX_32X32 && bsize < BLOCK_SIZE_SB32X32)
+ if (sz == TX_32X32 && bsize < BLOCK_32X32)
sz = TX_16X16;
- if (sz == TX_16X16 && bsize < BLOCK_SIZE_MB16X16)
+ if (sz == TX_16X16 && bsize < BLOCK_16X16)
sz = TX_8X8;
- if (sz == TX_8X8 && bsize < BLOCK_SIZE_SB8X8)
+ if (sz == TX_8X8 && bsize < BLOCK_8X8)
sz = TX_4X4;
- } else if (bsize >= BLOCK_SIZE_SB8X8) {
+ } else if (bsize >= BLOCK_8X8) {
sz = mbmi->txfm_size;
} else {
sz = TX_4X4;
diff --git a/vp9/encoder/vp9_encodeintra.c b/vp9/encoder/vp9_encodeintra.c
index d49e53258..edbd2d909 100644
--- a/vp9/encoder/vp9_encodeintra.c
+++ b/vp9/encoder/vp9_encodeintra.c
@@ -21,7 +21,7 @@ int vp9_encode_intra(VP9_COMP *cpi, MACROBLOCK *x, int use_16x16_pred) {
x->skip_encode = 0;
mbmi->mode = DC_PRED;
mbmi->ref_frame[0] = INTRA_FRAME;
- mbmi->txfm_size = use_16x16_pred ? (mbmi->sb_type >= BLOCK_SIZE_MB16X16 ?
+ mbmi->txfm_size = use_16x16_pred ? (mbmi->sb_type >= BLOCK_16X16 ?
TX_16X16 : TX_8X8) : TX_4X4;
vp9_encode_intra_block_y(&cpi->common, x, mbmi->sb_type);
return vp9_get_mb_ss(x->plane[0].src_diff);
diff --git a/vp9/encoder/vp9_encodemb.c b/vp9/encoder/vp9_encodemb.c
index 9726927cf..40b0a4e5a 100644
--- a/vp9/encoder/vp9_encodemb.c
+++ b/vp9/encoder/vp9_encodemb.c
@@ -742,11 +742,11 @@ void encode_block_intra(int plane, int block, BLOCK_SIZE_TYPE bsize,
tx_type = get_tx_type_4x4(pd->plane_type, xd, block);
scan = get_scan_4x4(tx_type);
iscan = get_iscan_4x4(tx_type);
- if (mbmi->sb_type < BLOCK_SIZE_SB8X8 && plane == 0) {
+ if (mbmi->sb_type < BLOCK_8X8 && plane == 0)
mode = xd->mode_info_context->bmi[block].as_mode;
- } else {
+ else
mode = plane == 0 ? mbmi->mode : mbmi->uv_mode;
- }
+
xoff = 4 * (block & twmask);
yoff = 4 * (block >> twl);
dst = pd->dst.buf + yoff * pd->dst.stride + xoff;
diff --git a/vp9/encoder/vp9_encodemv.c b/vp9/encoder/vp9_encodemv.c
index 2f5e16ccf..1c6fa3a3d 100644
--- a/vp9/encoder/vp9_encodemv.c
+++ b/vp9/encoder/vp9_encodemv.c
@@ -478,7 +478,7 @@ void vp9_update_nmv_count(VP9_COMP *cpi, MACROBLOCK *x,
const int num_4x4_blocks_high = num_4x4_blocks_high_lookup[mbmi->sb_type];
int idx, idy;
- if (mbmi->sb_type < BLOCK_SIZE_SB8X8) {
+ if (mbmi->sb_type < BLOCK_8X8) {
PARTITION_INFO *pi = x->partition_info;
for (idy = 0; idy < 2; idy += num_4x4_blocks_high) {
for (idx = 0; idx < 2; idx += num_4x4_blocks_wide) {
diff --git a/vp9/encoder/vp9_firstpass.c b/vp9/encoder/vp9_firstpass.c
index bad10c6eb..6ba2a4fc9 100644
--- a/vp9/encoder/vp9_firstpass.c
+++ b/vp9/encoder/vp9_firstpass.c
@@ -347,17 +347,17 @@ static void zz_motion_search(VP9_COMP *cpi, MACROBLOCK *x, YV12_BUFFER_CONFIG *r
xd->plane[0].pre[0].buf = recon_buffer->y_buffer + recon_yoffset;
switch (xd->mode_info_context->mbmi.sb_type) {
- case BLOCK_SIZE_SB8X8:
+ case BLOCK_8X8:
vp9_mse8x8(x->plane[0].src.buf, x->plane[0].src.stride,
xd->plane[0].pre[0].buf, xd->plane[0].pre[0].stride,
(unsigned int *)(best_motion_err));
break;
- case BLOCK_SIZE_SB16X8:
+ case BLOCK_16X8:
vp9_mse16x8(x->plane[0].src.buf, x->plane[0].src.stride,
xd->plane[0].pre[0].buf, xd->plane[0].pre[0].stride,
(unsigned int *)(best_motion_err));
break;
- case BLOCK_SIZE_SB8X16:
+ case BLOCK_8X16:
vp9_mse8x16(x->plane[0].src.buf, x->plane[0].src.stride,
xd->plane[0].pre[0].buf, xd->plane[0].pre[0].stride,
(unsigned int *)(best_motion_err));
@@ -403,13 +403,13 @@ static void first_pass_motion_search(VP9_COMP *cpi, MACROBLOCK *x,
// override the default variance function to use MSE
switch (xd->mode_info_context->mbmi.sb_type) {
- case BLOCK_SIZE_SB8X8:
+ case BLOCK_8X8:
v_fn_ptr.vf = vp9_mse8x8;
break;
- case BLOCK_SIZE_SB16X8:
+ case BLOCK_16X8:
v_fn_ptr.vf = vp9_mse16x8;
break;
- case BLOCK_SIZE_SB8X16:
+ case BLOCK_8X16:
v_fn_ptr.vf = vp9_mse8x16;
break;
default:
@@ -549,15 +549,15 @@ void vp9_first_pass(VP9_COMP *cpi) {
if (mb_col * 2 + 1 < cm->mi_cols) {
if (mb_row * 2 + 1 < cm->mi_rows) {
- xd->mode_info_context->mbmi.sb_type = BLOCK_SIZE_MB16X16;
+ xd->mode_info_context->mbmi.sb_type = BLOCK_16X16;
} else {
- xd->mode_info_context->mbmi.sb_type = BLOCK_SIZE_SB16X8;
+ xd->mode_info_context->mbmi.sb_type = BLOCK_16X8;
}
} else {
if (mb_row * 2 + 1 < cm->mi_rows) {
- xd->mode_info_context->mbmi.sb_type = BLOCK_SIZE_SB8X16;
+ xd->mode_info_context->mbmi.sb_type = BLOCK_8X16;
} else {
- xd->mode_info_context->mbmi.sb_type = BLOCK_SIZE_SB8X8;
+ xd->mode_info_context->mbmi.sb_type = BLOCK_8X8;
}
}
xd->mode_info_context->mbmi.ref_frame[0] = INTRA_FRAME;
diff --git a/vp9/encoder/vp9_mbgraph.c b/vp9/encoder/vp9_mbgraph.c
index 173e4ca8f..154d31af6 100644
--- a/vp9/encoder/vp9_mbgraph.c
+++ b/vp9/encoder/vp9_mbgraph.c
@@ -63,7 +63,7 @@ static unsigned int do_16x16_motion_iteration(VP9_COMP *cpi,
}
vp9_set_mbmode_and_mvs(x, NEWMV, dst_mv);
- vp9_build_inter_predictors_sby(xd, mb_row, mb_col, BLOCK_SIZE_MB16X16);
+ vp9_build_inter_predictors_sby(xd, mb_row, mb_col, BLOCK_16X16);
best_err = vp9_sad16x16(x->plane[0].src.buf, x->plane[0].src.stride,
xd->plane[0].dst.buf, xd->plane[0].dst.stride,
INT_MAX);
@@ -255,7 +255,7 @@ static void update_mbgraph_frame_stats(VP9_COMP *cpi,
xd->plane[0].pre[0].stride = buf->y_stride;
xd->plane[1].dst.stride = buf->uv_stride;
xd->mode_info_context = &mi_local;
- mi_local.mbmi.sb_type = BLOCK_SIZE_MB16X16;
+ mi_local.mbmi.sb_type = BLOCK_16X16;
mi_local.mbmi.ref_frame[0] = LAST_FRAME;
mi_local.mbmi.ref_frame[1] = NONE;
diff --git a/vp9/encoder/vp9_onyx_if.c b/vp9/encoder/vp9_onyx_if.c
index 57584526e..f2fa552f2 100644
--- a/vp9/encoder/vp9_onyx_if.c
+++ b/vp9/encoder/vp9_onyx_if.c
@@ -720,7 +720,7 @@ void vp9_set_speed_features(VP9_COMP *cpi) {
sf->reduce_first_step_size = 0;
sf->auto_mv_step_size = 0;
sf->max_step_search_steps = MAX_MVSEARCH_STEPS;
- sf->comp_inter_joint_search_thresh = BLOCK_SIZE_AB4X4;
+ sf->comp_inter_joint_search_thresh = BLOCK_4X4;
sf->adaptive_rd_thresh = 0;
sf->use_lastframe_partitioning = 0;
sf->tx_size_search_method = USE_FULL_RD;
@@ -750,8 +750,8 @@ void vp9_set_speed_features(VP9_COMP *cpi) {
sf->use_uv_intra_rd_estimate = 0;
sf->using_small_partition_info = 0;
// Skip any mode not chosen at size < X for all sizes > X
- // Hence BLOCK_SIZE_SB64X64 (skip is off)
- sf->unused_mode_skip_lvl = BLOCK_SIZE_SB64X64;
+ // Hence BLOCK_64X64 (skip is off)
+ sf->unused_mode_skip_lvl = BLOCK_64X64;
#if CONFIG_MULTIPLE_ARF
// Switch segmentation off.
@@ -787,7 +787,7 @@ void vp9_set_speed_features(VP9_COMP *cpi) {
cpi->common.show_frame == 0);
sf->disable_splitmv =
(MIN(cpi->common.width, cpi->common.height) >= 720)? 1 : 0;
- sf->unused_mode_skip_lvl = BLOCK_SIZE_SB32X32;
+ sf->unused_mode_skip_lvl = BLOCK_32X32;
sf->mode_search_skip_flags = FLAG_SKIP_INTRA_DIRMISMATCH |
FLAG_SKIP_INTRA_BESTINTER |
FLAG_SKIP_COMP_BESTINTRA;
@@ -809,7 +809,7 @@ void vp9_set_speed_features(VP9_COMP *cpi) {
sf->use_lastframe_partitioning = 1;
sf->adjust_partitioning_from_last_frame = 1;
sf->last_partitioning_redo_frequency = 3;
- sf->unused_mode_skip_lvl = BLOCK_SIZE_SB32X32;
+ sf->unused_mode_skip_lvl = BLOCK_32X32;
sf->tx_size_search_method = ((cpi->common.frame_type == KEY_FRAME ||
cpi->common.intra_only ||
cpi->common.show_frame == 0) ?
@@ -848,7 +848,7 @@ void vp9_set_speed_features(VP9_COMP *cpi) {
if (speed == 4) {
sf->comp_inter_joint_search_thresh = BLOCK_SIZE_TYPES;
sf->use_one_partition_size_always = 1;
- sf->always_this_block_size = BLOCK_SIZE_MB16X16;
+ sf->always_this_block_size = BLOCK_16X16;
sf->tx_size_search_method = ((cpi->common.frame_type == KEY_FRAME ||
cpi->common.intra_only ||
cpi->common.show_frame == 0) ?
@@ -869,15 +869,15 @@ void vp9_set_speed_features(VP9_COMP *cpi) {
/*
if (speed == 2) {
sf->first_step = 0;
- sf->comp_inter_joint_search_thresh = BLOCK_SIZE_SB8X8;
+ sf->comp_inter_joint_search_thresh = BLOCK_8X8;
sf->use_max_partition_size = 1;
- sf->max_partition_size = BLOCK_SIZE_MB16X16;
+ sf->max_partition_size = BLOCK_16X16;
}
if (speed == 3) {
sf->first_step = 0;
- sf->comp_inter_joint_search_thresh = BLOCK_SIZE_SB8X8;
+ sf->comp_inter_joint_search_thresh = BLOCK_B8X8;
sf->use_min_partition_size = 1;
- sf->min_partition_size = BLOCK_SIZE_SB8X8;
+ sf->min_partition_size = BLOCK_8X8;
}
*/
diff --git a/vp9/encoder/vp9_rdopt.c b/vp9/encoder/vp9_rdopt.c
index b110cd985..61ced9baf 100644
--- a/vp9/encoder/vp9_rdopt.c
+++ b/vp9/encoder/vp9_rdopt.c
@@ -453,7 +453,7 @@ static void model_rd_for_sb_y_tx(VP9_COMP *cpi, BLOCK_SIZE_TYPE bsize,
int *out_rate_sum, int64_t *out_dist_sum,
int *out_skip) {
int t = 4, j, k;
- BLOCK_SIZE_TYPE bs = BLOCK_SIZE_AB4X4;
+ BLOCK_SIZE_TYPE bs = BLOCK_4X4;
struct macroblock_plane *const p = &x->plane[0];
struct macroblockd_plane *const pd = &xd->plane[0];
const int width = plane_block_width(bsize, pd);
@@ -838,7 +838,7 @@ static void choose_largest_txfm_size(VP9_COMP *cpi, MACROBLOCK *x,
int64_t ref_best_rd,
BLOCK_SIZE_TYPE bs) {
const TX_SIZE max_txfm_size = TX_32X32
- - (bs < BLOCK_SIZE_SB32X32) - (bs < BLOCK_SIZE_MB16X16);
+ - (bs < BLOCK_32X32) - (bs < BLOCK_16X16);
VP9_COMMON *const cm = &cpi->common;
MACROBLOCKD *const xd = &x->e_mbd;
MB_MODE_INFO *const mbmi = &xd->mode_info_context->mbmi;
@@ -869,7 +869,7 @@ static void choose_txfm_size_from_rd(VP9_COMP *cpi, MACROBLOCK *x,
int64_t tx_cache[TX_MODES],
BLOCK_SIZE_TYPE bs) {
const TX_SIZE max_tx_size = TX_32X32
- - (bs < BLOCK_SIZE_SB32X32) - (bs < BLOCK_SIZE_MB16X16);
+ - (bs < BLOCK_32X32) - (bs < BLOCK_16X16);
VP9_COMMON *const cm = &cpi->common;
MACROBLOCKD *const xd = &x->e_mbd;
MB_MODE_INFO *const mbmi = &xd->mode_info_context->mbmi;
@@ -974,7 +974,7 @@ static void choose_txfm_size_from_modelrd(VP9_COMP *cpi, MACROBLOCK *x,
BLOCK_SIZE_TYPE bs,
int *model_used) {
const TX_SIZE max_txfm_size = TX_32X32
- - (bs < BLOCK_SIZE_SB32X32) - (bs < BLOCK_SIZE_MB16X16);
+ - (bs < BLOCK_32X32) - (bs < BLOCK_16X16);
VP9_COMMON *const cm = &cpi->common;
MACROBLOCKD *const xd = &x->e_mbd;
MB_MODE_INFO *const mbmi = &xd->mode_info_context->mbmi;
@@ -1098,48 +1098,46 @@ static void super_block_yrd(VP9_COMP *cpi,
if (cpi->sf.tx_size_search_method == USE_LARGESTINTRA_MODELINTER &&
mbmi->ref_frame[0] > INTRA_FRAME) {
int model_used[TX_SIZES] = {1, 1, 1, 1};
- if (bs >= BLOCK_SIZE_SB32X32) {
- if (model_used[TX_32X32]) {
+ if (bs >= BLOCK_32X32) {
+ if (model_used[TX_32X32])
model_rd_for_sb_y_tx(cpi, bs, TX_32X32, x, xd,
&r[TX_32X32][0], &d[TX_32X32], &s[TX_32X32]);
- } else {
+ else
super_block_yrd_for_txfm(cm, x, &r[TX_32X32][0], &d[TX_32X32],
&s[TX_32X32], &sse[TX_32X32], INT64_MAX,
bs, TX_32X32);
- }
}
- if (bs >= BLOCK_SIZE_MB16X16) {
- if (model_used[TX_16X16]) {
+ if (bs >= BLOCK_16X16) {
+ if (model_used[TX_16X16])
model_rd_for_sb_y_tx(cpi, bs, TX_16X16, x, xd,
&r[TX_16X16][0], &d[TX_16X16], &s[TX_16X16]);
- } else {
+ else
super_block_yrd_for_txfm(cm, x, &r[TX_16X16][0], &d[TX_16X16],
&s[TX_16X16], &sse[TX_16X16], INT64_MAX,
bs, TX_16X16);
- }
}
- if (model_used[TX_8X8]) {
+ if (model_used[TX_8X8])
model_rd_for_sb_y_tx(cpi, bs, TX_8X8, x, xd,
&r[TX_8X8][0], &d[TX_8X8], &s[TX_8X8]);
- } else {
+ else
super_block_yrd_for_txfm(cm, x, &r[TX_8X8][0], &d[TX_8X8], &s[TX_8X8],
&sse[TX_8X8], INT64_MAX, bs, TX_8X8);
- }
- if (model_used[TX_4X4]) {
+
+ if (model_used[TX_4X4])
model_rd_for_sb_y_tx(cpi, bs, TX_4X4, x, xd,
&r[TX_4X4][0], &d[TX_4X4], &s[TX_4X4]);
- } else {
+ else
super_block_yrd_for_txfm(cm, x, &r[TX_4X4][0], &d[TX_4X4], &s[TX_4X4],
&sse[TX_4X4], INT64_MAX, bs, TX_4X4);
- }
+
choose_txfm_size_from_modelrd(cpi, x, r, rate, d, distortion, s,
skip, sse, ref_best_rd, bs, model_used);
} else {
- if (bs >= BLOCK_SIZE_SB32X32)
+ if (bs >= BLOCK_32X32)
super_block_yrd_for_txfm(cm, x, &r[TX_32X32][0], &d[TX_32X32],
&s[TX_32X32], &sse[TX_32X32], ref_best_rd,
bs, TX_32X32);
- if (bs >= BLOCK_SIZE_MB16X16)
+ if (bs >= BLOCK_16X16)
super_block_yrd_for_txfm(cm, x, &r[TX_16X16][0], &d[TX_16X16],
&s[TX_16X16], &sse[TX_16X16], ref_best_rd,
bs, TX_16X16);
@@ -3946,8 +3944,8 @@ int64_t vp9_rd_pick_inter_mode_sb(VP9_COMP *cpi, MACROBLOCK *x,
}
/* keep record of best txfm size */
- if (bsize < BLOCK_SIZE_SB32X32) {
- if (bsize < BLOCK_SIZE_MB16X16) {
+ if (bsize < BLOCK_32X32) {
+ if (bsize < BLOCK_16X16) {
if (this_mode == SPLITMV || this_mode == I4X4_PRED)
tx_cache[ALLOW_8X8] = tx_cache[ONLY_4X4];
tx_cache[ALLOW_16X16] = tx_cache[ALLOW_8X8];
diff --git a/vp9/encoder/vp9_segmentation.c b/vp9/encoder/vp9_segmentation.c
index e1fa72d0d..9564edc84 100644
--- a/vp9/encoder/vp9_segmentation.c
+++ b/vp9/encoder/vp9_segmentation.c
@@ -248,7 +248,7 @@ void vp9_choose_segmap_coding_method(VP9_COMP *cpi) {
for (mi_col = cm->cur_tile_mi_col_start; mi_col < cm->cur_tile_mi_col_end;
mi_col += 8, mi += 8)
count_segs_sb(cpi, mi, no_pred_segcounts, temporal_predictor_count,
- t_unpred_seg_counts, mi_row, mi_col, BLOCK_SIZE_SB64X64);
+ t_unpred_seg_counts, mi_row, mi_col, BLOCK_64X64);
}
}