summaryrefslogtreecommitdiff
path: root/vp9/encoder/vp9_encodeframe.c
diff options
context:
space:
mode:
Diffstat (limited to 'vp9/encoder/vp9_encodeframe.c')
-rw-r--r--vp9/encoder/vp9_encodeframe.c375
1 files changed, 278 insertions, 97 deletions
diff --git a/vp9/encoder/vp9_encodeframe.c b/vp9/encoder/vp9_encodeframe.c
index 1ca3c2881..61a5022ec 100644
--- a/vp9/encoder/vp9_encodeframe.c
+++ b/vp9/encoder/vp9_encodeframe.c
@@ -1048,9 +1048,9 @@ static void rd_pick_sb_modes(VP9_COMP *cpi, const TileInfo *const tile,
(mi_col <= 1) || (mi_col >= (cm->mi_cols - 2));
if (!is_edge && (complexity > 128))
x->rdmult += ((x->rdmult * (complexity - 128)) / 256);
- } else if (cpi->oxcf.aq_mode == CYCLIC_REFRESH_AQ) {
+ } else if (aq_mode == CYCLIC_REFRESH_AQ) {
const uint8_t *const map = cm->seg.update_map ? cpi->segmentation_map
- : cm->last_frame_seg_map;
+ : cm->last_frame_seg_map;
// If segment 1, use rdmult for that segment.
if (vp9_get_segment_id(cm, map, bsize, mi_row, mi_col))
x->rdmult = vp9_cyclic_refresh_get_rdmult(cpi->cyclic_refresh);
@@ -1076,8 +1076,7 @@ static void rd_pick_sb_modes(VP9_COMP *cpi, const TileInfo *const tile,
vp9_clear_system_state();
*totalrate = (int)round(*totalrate * rdmult_ratio);
}
- } else if ((cpi->oxcf.aq_mode == COMPLEXITY_AQ) ||
- (cpi->oxcf.aq_mode == CYCLIC_REFRESH_AQ)) {
+ } else if (aq_mode == COMPLEXITY_AQ || aq_mode == CYCLIC_REFRESH_AQ) {
x->rdmult = orig_rdmult;
}
}
@@ -1365,6 +1364,69 @@ static void set_fixed_partitioning(VP9_COMP *cpi, const TileInfo *const tile,
}
}
+static void constrain_copy_partitioning(VP9_COMP *const cpi,
+ const TileInfo *const tile,
+ MODE_INFO **mi_8x8,
+ MODE_INFO **prev_mi_8x8,
+ int mi_row, int mi_col,
+ BLOCK_SIZE bsize) {
+ VP9_COMMON *const cm = &cpi->common;
+ const int mis = cm->mi_stride;
+ const int row8x8_remaining = tile->mi_row_end - mi_row;
+ const int col8x8_remaining = tile->mi_col_end - mi_col;
+ MODE_INFO *const mi_upper_left = cm->mi + mi_row * mis + mi_col;
+ const int bh = num_8x8_blocks_high_lookup[bsize];
+ const int bw = num_8x8_blocks_wide_lookup[bsize];
+ int block_row, block_col;
+
+ assert((row8x8_remaining > 0) && (col8x8_remaining > 0));
+
+ // If the SB64 if it is all "in image".
+ if ((col8x8_remaining >= MI_BLOCK_SIZE) &&
+ (row8x8_remaining >= MI_BLOCK_SIZE)) {
+ for (block_row = 0; block_row < MI_BLOCK_SIZE; block_row += bh) {
+ for (block_col = 0; block_col < MI_BLOCK_SIZE; block_col += bw) {
+ const int index = block_row * mis + block_col;
+ MODE_INFO *prev_mi = prev_mi_8x8[index];
+ const BLOCK_SIZE sb_type = prev_mi ? prev_mi->mbmi.sb_type : 0;
+ // Use previous partition if block size is not larger than bsize.
+ if (prev_mi && sb_type <= bsize) {
+ int block_row2, block_col2;
+ for (block_row2 = 0; block_row2 < bh; ++block_row2) {
+ for (block_col2 = 0; block_col2 < bw; ++block_col2) {
+ const int index2 = (block_row + block_row2) * mis +
+ block_col + block_col2;
+ prev_mi = prev_mi_8x8[index2];
+ if (prev_mi) {
+ const ptrdiff_t offset = prev_mi - cm->prev_mi;
+ mi_8x8[index2] = cm->mi + offset;
+ mi_8x8[index2]->mbmi.sb_type = prev_mi->mbmi.sb_type;
+ }
+ }
+ }
+ } else {
+ // Otherwise, use fixed partition of size bsize.
+ mi_8x8[index] = mi_upper_left + index;
+ mi_8x8[index]->mbmi.sb_type = bsize;
+ }
+ }
+ }
+ } else {
+ // Else this is a partial SB64, copy previous partition.
+ for (block_row = 0; block_row < 8; ++block_row) {
+ for (block_col = 0; block_col < 8; ++block_col) {
+ MODE_INFO *const prev_mi = prev_mi_8x8[block_row * mis + block_col];
+ const BLOCK_SIZE sb_type = prev_mi ? prev_mi->mbmi.sb_type : 0;
+ if (prev_mi) {
+ const ptrdiff_t offset = prev_mi - cm->prev_mi;
+ mi_8x8[block_row * mis + block_col] = cm->mi + offset;
+ mi_8x8[block_row * mis + block_col]->mbmi.sb_type = sb_type;
+ }
+ }
+ }
+ }
+}
+
static void copy_partitioning(VP9_COMMON *cm, MODE_INFO **mi_8x8,
MODE_INFO **prev_mi_8x8) {
const int mis = cm->mi_stride;
@@ -1384,6 +1446,125 @@ static void copy_partitioning(VP9_COMMON *cm, MODE_INFO **mi_8x8,
}
}
+const struct {
+ int row;
+ int col;
+} coord_lookup[16] = {
+ // 32x32 index = 0
+ {0, 0}, {0, 2}, {2, 0}, {2, 2},
+ // 32x32 index = 1
+ {0, 4}, {0, 6}, {2, 4}, {2, 6},
+ // 32x32 index = 2
+ {4, 0}, {4, 2}, {6, 0}, {6, 2},
+ // 32x32 index = 3
+ {4, 4}, {4, 6}, {6, 4}, {6, 6},
+};
+
+static void set_source_var_based_partition(VP9_COMP *cpi,
+ const TileInfo *const tile,
+ MODE_INFO **mi_8x8,
+ int mi_row, int mi_col) {
+ VP9_COMMON *const cm = &cpi->common;
+ MACROBLOCK *x = &cpi->mb;
+ const int mis = cm->mi_stride;
+ int row8x8_remaining = tile->mi_row_end - mi_row;
+ int col8x8_remaining = tile->mi_col_end - mi_col;
+ int r, c;
+ MODE_INFO *mi_upper_left = cm->mi + mi_row * mis + mi_col;
+
+ assert((row8x8_remaining > 0) && (col8x8_remaining > 0));
+
+ // In-image SB64
+ if ((col8x8_remaining >= MI_BLOCK_SIZE) &&
+ (row8x8_remaining >= MI_BLOCK_SIZE)) {
+ const int src_stride = x->plane[0].src.stride;
+ const int pre_stride = cpi->Last_Source->y_stride;
+ const uint8_t *src = x->plane[0].src.buf;
+ const int pre_offset = (mi_row * MI_SIZE) * pre_stride +
+ (mi_col * MI_SIZE);
+ const uint8_t *pre_src = cpi->Last_Source->y_buffer + pre_offset;
+ const int thr_32x32 = cpi->sf.source_var_thresh;
+ const int thr_64x64 = thr_32x32 << 1;
+ int i, j;
+ int index;
+ diff d32[4];
+ int use16x16 = 0;
+
+ for (i = 0; i < 4; i++) {
+ diff d16[4];
+
+ for (j = 0; j < 4; j++) {
+ int b_mi_row = coord_lookup[i * 4 + j].row;
+ int b_mi_col = coord_lookup[i * 4 + j].col;
+ int b_offset = b_mi_row * MI_SIZE * src_stride +
+ b_mi_col * MI_SIZE;
+
+ vp9_get_sse_sum_16x16(src + b_offset,
+ src_stride,
+ pre_src + b_offset,
+ pre_stride, &d16[j].sse, &d16[j].sum);
+
+ d16[j].var = d16[j].sse -
+ (((uint32_t)d16[j].sum * d16[j].sum) >> 8);
+
+ index = b_mi_row * mis + b_mi_col;
+ mi_8x8[index] = mi_upper_left + index;
+ mi_8x8[index]->mbmi.sb_type = BLOCK_16X16;
+
+ // TODO(yunqingwang): If d16[j].var is very large, use 8x8 partition
+ // size to further improve quality.
+ }
+
+ if (d16[0].var < thr_32x32 && d16[1].var < thr_32x32 &&
+ d16[2].var < thr_32x32 && d16[3].var < thr_32x32) {
+ d32[i].sse = d16[0].sse;
+ d32[i].sum = d16[0].sum;
+
+ for (j = 1; j < 4; j++) {
+ d32[i].sse += d16[j].sse;
+ d32[i].sum += d16[j].sum;
+ }
+
+ d32[i].var = d32[i].sse - (((int64_t)d32[i].sum * d32[i].sum) >> 10);
+
+ index = coord_lookup[i*4].row * mis + coord_lookup[i*4].col;
+ mi_8x8[index] = mi_upper_left + index;
+ mi_8x8[index]->mbmi.sb_type = BLOCK_32X32;
+
+ if (!((cm->current_video_frame - 1) %
+ cpi->sf.search_type_check_frequency))
+ cpi->use_large_partition_rate += 1;
+ } else {
+ use16x16 = 1;
+ }
+ }
+
+ if (!use16x16) {
+ if (d32[0].var < thr_64x64 && d32[1].var < thr_64x64 &&
+ d32[2].var < thr_64x64 && d32[3].var < thr_64x64) {
+ mi_8x8[0] = mi_upper_left;
+ mi_8x8[0]->mbmi.sb_type = BLOCK_64X64;
+ }
+ }
+ } else { // partial in-image SB64
+ BLOCK_SIZE bsize = BLOCK_16X16;
+ int bh = num_8x8_blocks_high_lookup[bsize];
+ int bw = num_8x8_blocks_wide_lookup[bsize];
+
+ for (r = 0; r < MI_BLOCK_SIZE; r += bh) {
+ for (c = 0; c < MI_BLOCK_SIZE; c += bw) {
+ int index = r * mis + c;
+ // Find a partition size that fits
+ bsize = find_partition_size(bsize,
+ (row8x8_remaining - r),
+ (col8x8_remaining - c), &bh, &bw);
+ mi_8x8[index] = mi_upper_left + index;
+ mi_8x8[index]->mbmi.sb_type = bsize;
+ }
+ }
+ }
+}
+
static int sb_has_motion(const VP9_COMMON *cm, MODE_INFO **prev_mi_8x8) {
const int mis = cm->mi_stride;
int block_row, block_col;
@@ -2297,6 +2478,7 @@ static void encode_rd_sb_row(VP9_COMP *cpi, const TileInfo *const tile,
int mi_row, TOKENEXTRA **tp) {
VP9_COMMON *const cm = &cpi->common;
MACROBLOCKD *const xd = &cpi->mb.e_mbd;
+ SPEED_FEATURES *const sf = &cpi->sf;
int mi_col;
// Initialize the left context for the new SB row
@@ -2312,7 +2494,7 @@ static void encode_rd_sb_row(VP9_COMP *cpi, const TileInfo *const tile,
BLOCK_SIZE i;
MACROBLOCK *x = &cpi->mb;
- if (cpi->sf.adaptive_pred_interp_filter) {
+ if (sf->adaptive_pred_interp_filter) {
for (i = BLOCK_4X4; i < BLOCK_8X8; ++i) {
const int num_4x4_w = num_4x4_blocks_wide_lookup[i];
const int num_4x4_h = num_4x4_blocks_high_lookup[i];
@@ -2326,64 +2508,69 @@ static void encode_rd_sb_row(VP9_COMP *cpi, const TileInfo *const tile,
vp9_zero(cpi->mb.pred_mv);
- if ((cpi->sf.partition_search_type == SEARCH_PARTITION &&
- cpi->sf.use_lastframe_partitioning) ||
- cpi->sf.partition_search_type == FIXED_PARTITION ||
- cpi->sf.partition_search_type == VAR_BASED_PARTITION ||
- cpi->sf.partition_search_type == VAR_BASED_FIXED_PARTITION) {
+ if ((sf->partition_search_type == SEARCH_PARTITION &&
+ sf->use_lastframe_partitioning) ||
+ sf->partition_search_type == FIXED_PARTITION ||
+ sf->partition_search_type == VAR_BASED_PARTITION ||
+ sf->partition_search_type == VAR_BASED_FIXED_PARTITION) {
const int idx_str = cm->mi_stride * mi_row + mi_col;
MODE_INFO **mi_8x8 = cm->mi_grid_visible + idx_str;
MODE_INFO **prev_mi_8x8 = cm->prev_mi_grid_visible + idx_str;
cpi->mb.source_variance = UINT_MAX;
- if (cpi->sf.partition_search_type == FIXED_PARTITION) {
+ if (sf->partition_search_type == FIXED_PARTITION) {
set_offsets(cpi, tile, mi_row, mi_col, BLOCK_64X64);
set_fixed_partitioning(cpi, tile, mi_8x8, mi_row, mi_col,
- cpi->sf.always_this_block_size);
+ sf->always_this_block_size);
rd_use_partition(cpi, tile, mi_8x8, tp, mi_row, mi_col, BLOCK_64X64,
&dummy_rate, &dummy_dist, 1);
- } else if (cpi->sf.partition_search_type == VAR_BASED_FIXED_PARTITION) {
+ } else if (sf->partition_search_type == VAR_BASED_FIXED_PARTITION) {
BLOCK_SIZE bsize;
set_offsets(cpi, tile, mi_row, mi_col, BLOCK_64X64);
bsize = get_rd_var_based_fixed_partition(cpi, mi_row, mi_col);
set_fixed_partitioning(cpi, tile, mi_8x8, mi_row, mi_col, bsize);
rd_use_partition(cpi, tile, mi_8x8, tp, mi_row, mi_col, BLOCK_64X64,
&dummy_rate, &dummy_dist, 1);
- } else if (cpi->sf.partition_search_type == VAR_BASED_PARTITION) {
+ } else if (sf->partition_search_type == VAR_BASED_PARTITION) {
choose_partitioning(cpi, tile, mi_row, mi_col);
rd_use_partition(cpi, tile, mi_8x8, tp, mi_row, mi_col, BLOCK_64X64,
&dummy_rate, &dummy_dist, 1);
} else {
if ((cm->current_video_frame
- % cpi->sf.last_partitioning_redo_frequency) == 0
+ % sf->last_partitioning_redo_frequency) == 0
|| cm->prev_mi == 0
|| cm->show_frame == 0
|| cm->frame_type == KEY_FRAME
|| cpi->rc.is_src_frame_alt_ref
- || ((cpi->sf.use_lastframe_partitioning ==
+ || ((sf->use_lastframe_partitioning ==
LAST_FRAME_PARTITION_LOW_MOTION) &&
sb_has_motion(cm, prev_mi_8x8))) {
// If required set upper and lower partition size limits
- if (cpi->sf.auto_min_max_partition_size) {
+ if (sf->auto_min_max_partition_size) {
set_offsets(cpi, tile, mi_row, mi_col, BLOCK_64X64);
rd_auto_partition_range(cpi, tile, mi_row, mi_col,
- &cpi->sf.min_partition_size,
- &cpi->sf.max_partition_size);
+ &sf->min_partition_size,
+ &sf->max_partition_size);
}
rd_pick_partition(cpi, tile, tp, mi_row, mi_col, BLOCK_64X64,
&dummy_rate, &dummy_dist, 1, INT64_MAX);
} else {
- copy_partitioning(cm, mi_8x8, prev_mi_8x8);
+ if (sf->constrain_copy_partition &&
+ sb_has_motion(cm, prev_mi_8x8))
+ constrain_copy_partitioning(cpi, tile, mi_8x8, prev_mi_8x8,
+ mi_row, mi_col, BLOCK_16X16);
+ else
+ copy_partitioning(cm, mi_8x8, prev_mi_8x8);
rd_use_partition(cpi, tile, mi_8x8, tp, mi_row, mi_col, BLOCK_64X64,
&dummy_rate, &dummy_dist, 1);
}
}
} else {
// If required set upper and lower partition size limits
- if (cpi->sf.auto_min_max_partition_size) {
+ if (sf->auto_min_max_partition_size) {
set_offsets(cpi, tile, mi_row, mi_col, BLOCK_64X64);
rd_auto_partition_range(cpi, tile, mi_row, mi_col,
- &cpi->sf.min_partition_size,
- &cpi->sf.max_partition_size);
+ &sf->min_partition_size,
+ &sf->max_partition_size);
}
rd_pick_partition(cpi, tile, tp, mi_row, mi_col, BLOCK_64X64,
&dummy_rate, &dummy_dist, 1, INT64_MAX);
@@ -2448,19 +2635,6 @@ static int check_dual_ref_flags(VP9_COMP *cpi) {
}
}
-static int get_skip_flag(MODE_INFO **mi_8x8, int mis, int ymbs, int xmbs) {
- int x, y;
-
- for (y = 0; y < ymbs; y++) {
- for (x = 0; x < xmbs; x++) {
- if (!mi_8x8[y * mis + x]->mbmi.skip)
- return 0;
- }
- }
-
- return 1;
-}
-
static void reset_skip_txfm_size(VP9_COMMON *cm, TX_SIZE txfm_max) {
int mi_row, mi_col;
const int mis = cm->mi_stride;
@@ -2911,12 +3085,7 @@ static void nonrd_use_partition(VP9_COMP *cpi,
if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols)
return;
- if (bsize >= BLOCK_8X8) {
- subsize = mi_8x8[0]->mbmi.sb_type;
- } else {
- subsize = BLOCK_4X4;
- }
-
+ subsize = (bsize >= BLOCK_8X8) ? mi_8x8[0]->mbmi.sb_type : BLOCK_4X4;
partition = partition_lookup[bsl][subsize];
switch (partition) {
@@ -3019,10 +3188,7 @@ static void encode_nonrd_sb_row(VP9_COMP *cpi, const TileInfo *const tile,
const int idx_str = cm->mi_stride * mi_row + mi_col;
MODE_INFO **mi_8x8 = cm->mi_grid_visible + idx_str;
MODE_INFO **prev_mi_8x8 = cm->prev_mi_grid_visible + idx_str;
-
- BLOCK_SIZE bsize = cpi->sf.partition_search_type == FIXED_PARTITION ?
- cpi->sf.always_this_block_size :
- get_nonrd_var_based_fixed_partition(cpi, mi_row, mi_col);
+ BLOCK_SIZE bsize;
cpi->mb.source_variance = UINT_MAX;
vp9_zero(cpi->mb.pred_mv);
@@ -3034,8 +3200,17 @@ static void encode_nonrd_sb_row(VP9_COMP *cpi, const TileInfo *const tile,
nonrd_use_partition(cpi, tile, mi_8x8, tp, mi_row, mi_col, BLOCK_64X64,
1, &dummy_rate, &dummy_dist);
break;
+ case SOURCE_VAR_BASED_PARTITION:
+ set_offsets(cpi, tile, mi_row, mi_col, BLOCK_64X64);
+ set_source_var_based_partition(cpi, tile, mi_8x8, mi_row, mi_col);
+ nonrd_use_partition(cpi, tile, mi_8x8, tp, mi_row, mi_col, BLOCK_64X64,
+ 1, &dummy_rate, &dummy_dist);
+ break;
case VAR_BASED_FIXED_PARTITION:
case FIXED_PARTITION:
+ bsize = cpi->sf.partition_search_type == FIXED_PARTITION ?
+ cpi->sf.always_this_block_size :
+ get_nonrd_var_based_fixed_partition(cpi, mi_row, mi_col);
set_fixed_partitioning(cpi, tile, mi_8x8, mi_row, mi_col, bsize);
nonrd_use_partition(cpi, tile, mi_8x8, tp, mi_row, mi_col, BLOCK_64X64,
1, &dummy_rate, &dummy_dist);
@@ -3058,53 +3233,42 @@ static void encode_nonrd_sb_row(VP9_COMP *cpi, const TileInfo *const tile,
// end RTC play code
static void encode_frame_internal(VP9_COMP *cpi) {
- int mi_row;
+ SPEED_FEATURES *const sf = &cpi->sf;
MACROBLOCK *const x = &cpi->mb;
VP9_COMMON *const cm = &cpi->common;
MACROBLOCKD *const xd = &x->e_mbd;
-// fprintf(stderr, "encode_frame_internal frame %d (%d) type %d\n",
-// cpi->common.current_video_frame, cpi->common.show_frame,
-// cm->frame_type);
-
xd->mi = cm->mi_grid_visible;
xd->mi[0] = cm->mi;
vp9_zero(cm->counts);
vp9_zero(cpi->coef_counts);
vp9_zero(cpi->tx_stepdown_count);
+ vp9_zero(cpi->rd_comp_pred_diff);
+ vp9_zero(cpi->rd_filter_diff);
+ vp9_zero(cpi->rd_tx_select_diff);
+ vp9_zero(cpi->rd_tx_select_threshes);
- // Set frame level transform size use case
cm->tx_mode = select_tx_mode(cpi);
- cpi->mb.e_mbd.lossless = cm->base_qindex == 0 && cm->y_dc_delta_q == 0
- && cm->uv_dc_delta_q == 0 && cm->uv_ac_delta_q == 0;
+ cpi->mb.e_mbd.lossless = cm->base_qindex == 0 &&
+ cm->y_dc_delta_q == 0 &&
+ cm->uv_dc_delta_q == 0 &&
+ cm->uv_ac_delta_q == 0;
switch_lossless_mode(cpi, cpi->mb.e_mbd.lossless);
vp9_frame_init_quantizer(cpi);
vp9_initialize_rd_consts(cpi);
vp9_initialize_me_consts(cpi, cm->base_qindex);
-
- if (cpi->oxcf.tuning == VP8_TUNE_SSIM) {
- // Initialize encode frame context.
- init_encode_frame_mb_context(cpi);
-
- // Build a frame level activity map
- build_activity_map(cpi);
- }
-
- // Re-initialize encode frame context.
init_encode_frame_mb_context(cpi);
- vp9_zero(cpi->rd_comp_pred_diff);
- vp9_zero(cpi->rd_filter_diff);
- vp9_zero(cpi->rd_tx_select_diff);
- vp9_zero(cpi->rd_tx_select_threshes);
+ if (cpi->oxcf.tuning == VP8_TUNE_SSIM)
+ build_activity_map(cpi);
- set_prev_mi(cm);
+ cm->prev_mi = get_prev_mi(cm);
- if (cpi->sf.use_nonrd_pick_mode) {
+ if (sf->use_nonrd_pick_mode) {
// Initialize internal buffer pointers for rtc coding, where non-RD
// mode decision is used and hence no buffer pointer swap needed.
int i;
@@ -3119,6 +3283,29 @@ static void encode_frame_internal(VP9_COMP *cpi) {
p[i].eobs = ctx->eobs_pbuf[i][0];
}
vp9_zero(x->zcoeff_blk);
+
+ if (cpi->sf.partition_search_type == SOURCE_VAR_BASED_PARTITION &&
+ cm->current_video_frame > 0) {
+ int check_freq = cpi->sf.search_type_check_frequency;
+
+ if ((cm->current_video_frame - 1) % check_freq == 0) {
+ cpi->use_large_partition_rate = 0;
+ }
+
+ if ((cm->current_video_frame - 1) % check_freq == 1) {
+ const int mbs_in_b32x32 = 1 << ((b_width_log2_lookup[BLOCK_32X32] -
+ b_width_log2_lookup[BLOCK_16X16]) +
+ (b_height_log2_lookup[BLOCK_32X32] -
+ b_height_log2_lookup[BLOCK_16X16]));
+ cpi->use_large_partition_rate = cpi->use_large_partition_rate * 100 *
+ mbs_in_b32x32 / cm->MBs;
+ }
+
+ if ((cm->current_video_frame - 1) % check_freq >= 1) {
+ if (cpi->use_large_partition_rate < 15)
+ cpi->sf.partition_search_type = FIXED_PARTITION;
+ }
+ }
}
{
@@ -3136,12 +3323,13 @@ static void encode_frame_internal(VP9_COMP *cpi) {
for (tile_col = 0; tile_col < tile_cols; tile_col++) {
TileInfo tile;
TOKENEXTRA *tp_old = tp;
+ int mi_row;
// For each row of SBs in the frame
vp9_tile_init(&tile, cm, tile_row, tile_col);
for (mi_row = tile.mi_row_start;
mi_row < tile.mi_row_end; mi_row += MI_BLOCK_SIZE) {
- if (cpi->sf.use_nonrd_pick_mode && cm->frame_type != KEY_FRAME)
+ if (sf->use_nonrd_pick_mode && cm->frame_type != KEY_FRAME)
encode_nonrd_sb_row(cpi, &tile, mi_row, &tp);
else
encode_rd_sb_row(cpi, &tile, mi_row, &tp);
@@ -3156,18 +3344,18 @@ static void encode_frame_internal(VP9_COMP *cpi) {
cpi->time_encode_sb_row += vpx_usec_timer_elapsed(&emr_timer);
}
- if (cpi->sf.skip_encode_sb) {
+ if (sf->skip_encode_sb) {
int j;
unsigned int intra_count = 0, inter_count = 0;
for (j = 0; j < INTRA_INTER_CONTEXTS; ++j) {
intra_count += cm->counts.intra_inter[j][0];
inter_count += cm->counts.intra_inter[j][1];
}
- cpi->sf.skip_encode_frame = (intra_count << 2) < inter_count &&
- cm->frame_type != KEY_FRAME &&
- cm->show_frame;
+ sf->skip_encode_frame = (intra_count << 2) < inter_count &&
+ cm->frame_type != KEY_FRAME &&
+ cm->show_frame;
} else {
- cpi->sf.skip_encode_frame = 0;
+ sf->skip_encode_frame = 0;
}
#if 0
@@ -3201,33 +3389,31 @@ void vp9_encode_frame(VP9_COMP *cpi) {
if (cpi->sf.frame_parameter_update) {
int i;
- REFERENCE_MODE reference_mode;
- /*
- * This code does a single RD pass over the whole frame assuming
- * either compound, single or hybrid prediction as per whatever has
- * worked best for that type of frame in the past.
- * It also predicts whether another coding mode would have worked
- * better that this coding mode. If that is the case, it remembers
- * that for subsequent frames.
- * It does the same analysis for transform size selection also.
- */
+
+ // This code does a single RD pass over the whole frame assuming
+ // either compound, single or hybrid prediction as per whatever has
+ // worked best for that type of frame in the past.
+ // It also predicts whether another coding mode would have worked
+ // better that this coding mode. If that is the case, it remembers
+ // that for subsequent frames.
+ // It does the same analysis for transform size selection also.
const MV_REFERENCE_FRAME frame_type = get_frame_type(cpi);
const int64_t *mode_thresh = cpi->rd_prediction_type_threshes[frame_type];
const int64_t *filter_thresh = cpi->rd_filter_threshes[frame_type];
/* prediction (compound, single or hybrid) mode selection */
- if (frame_type == 3 || !cm->allow_comp_inter_inter)
- reference_mode = SINGLE_REFERENCE;
+ if (frame_type == ALTREF_FRAME || !cm->allow_comp_inter_inter)
+ cm->reference_mode = SINGLE_REFERENCE;
else if (mode_thresh[COMPOUND_REFERENCE] > mode_thresh[SINGLE_REFERENCE] &&
mode_thresh[COMPOUND_REFERENCE] >
mode_thresh[REFERENCE_MODE_SELECT] &&
check_dual_ref_flags(cpi) &&
cpi->static_mb_pct == 100)
- reference_mode = COMPOUND_REFERENCE;
+ cm->reference_mode = COMPOUND_REFERENCE;
else if (mode_thresh[SINGLE_REFERENCE] > mode_thresh[REFERENCE_MODE_SELECT])
- reference_mode = SINGLE_REFERENCE;
+ cm->reference_mode = SINGLE_REFERENCE;
else
- reference_mode = REFERENCE_MODE_SELECT;
+ cm->reference_mode = REFERENCE_MODE_SELECT;
if (cm->interp_filter == SWITCHABLE) {
if (frame_type != ALTREF_FRAME &&
@@ -3243,9 +3429,6 @@ void vp9_encode_frame(VP9_COMP *cpi) {
}
}
- cpi->mb.e_mbd.lossless = cpi->oxcf.lossless;
- cm->reference_mode = reference_mode;
-
encode_frame_internal(cpi);
for (i = 0; i < REFERENCE_MODES; ++i) {
@@ -3324,10 +3507,8 @@ void vp9_encode_frame(VP9_COMP *cpi) {
}
}
} else {
- cpi->mb.e_mbd.lossless = cpi->oxcf.lossless;
cm->reference_mode = SINGLE_REFERENCE;
- // Force the usage of the BILINEAR interp_filter.
- cm->interp_filter = BILINEAR;
+ cm->interp_filter = SWITCHABLE;
encode_frame_internal(cpi);
}
}