summaryrefslogtreecommitdiff
path: root/vp9/encoder
diff options
context:
space:
mode:
Diffstat (limited to 'vp9/encoder')
-rw-r--r--vp9/encoder/vp9_encodeframe.c33
-rw-r--r--vp9/encoder/vp9_encoder.h6
-rw-r--r--vp9/encoder/vp9_pickmode.c329
-rw-r--r--vp9/encoder/vp9_rdopt.c12
-rw-r--r--vp9/encoder/x86/vp9_error_sse2.asm46
5 files changed, 382 insertions, 44 deletions
diff --git a/vp9/encoder/vp9_encodeframe.c b/vp9/encoder/vp9_encodeframe.c
index 10a62ef69..dfc88d8ed 100644
--- a/vp9/encoder/vp9_encodeframe.c
+++ b/vp9/encoder/vp9_encodeframe.c
@@ -482,9 +482,9 @@ void vp9_set_vbp_thresholds(VP9_COMP *cpi, int q) {
} else {
VP9_COMMON *const cm = &cpi->common;
const int is_key_frame = (cm->frame_type == KEY_FRAME);
- const int threshold_multiplier = is_key_frame ? 80 : 4;
+ const int threshold_multiplier = is_key_frame ? 20 : 1;
const int64_t threshold_base = (int64_t)(threshold_multiplier *
- vp9_convert_qindex_to_q(q, cm->bit_depth));
+ cpi->y_dequant[q][1]);
// TODO(marpan): Allow 4x4 partitions for inter-frames.
// use_4x4_partition = (variance4x4downsample[i2 + j] == 1);
@@ -492,21 +492,20 @@ void vp9_set_vbp_thresholds(VP9_COMP *cpi, int q) {
// if variance of 16x16 block is very high, so use larger threshold
// for 16x16 (threshold_bsize_min) in that case.
if (is_key_frame) {
- cpi->vbp_threshold = threshold_base >> 2;
- cpi->vbp_threshold_bsize_max = threshold_base;
- cpi->vbp_threshold_bsize_min = threshold_base << 2;
- cpi->vbp_threshold_16x16 = cpi->vbp_threshold;
+ cpi->vbp_threshold_64x64 = threshold_base;
+ cpi->vbp_threshold_32x32 = threshold_base >> 2;
+ cpi->vbp_threshold_16x16 = threshold_base >> 2;
+ cpi->vbp_threshold_8x8 = threshold_base << 2;
cpi->vbp_bsize_min = BLOCK_8X8;
} else {
- cpi->vbp_threshold = threshold_base;
+ cpi->vbp_threshold_32x32 = threshold_base;
if (cm->width <= 352 && cm->height <= 288) {
- cpi->vbp_threshold_bsize_max = threshold_base >> 2;
- cpi->vbp_threshold_bsize_min = threshold_base << 3;
+ cpi->vbp_threshold_64x64 = threshold_base >> 2;
+ cpi->vbp_threshold_16x16 = threshold_base << 3;
} else {
- cpi->vbp_threshold_bsize_max = threshold_base;
- cpi->vbp_threshold_bsize_min = threshold_base << cpi->oxcf.speed;
+ cpi->vbp_threshold_64x64 = threshold_base;
+ cpi->vbp_threshold_16x16 = threshold_base << cpi->oxcf.speed;
}
- cpi->vbp_threshold_16x16 = cpi->vbp_threshold_bsize_min;
cpi->vbp_bsize_min = BLOCK_16X16;
}
}
@@ -692,7 +691,7 @@ static void choose_partitioning(VP9_COMP *cpi,
}
if (is_key_frame || (low_res &&
vt.split[i].split[j].part_variances.none.variance >
- (cpi->vbp_threshold << 1))) {
+ (cpi->vbp_threshold_32x32 << 1))) {
// Go down to 4x4 down-sampling for variance.
variance4x4downsample[i2 + j] = 1;
for (k = 0; k < 4; k++) {
@@ -757,7 +756,7 @@ static void choose_partitioning(VP9_COMP *cpi,
// If variance of this 32x32 block is above the threshold, force the block
// to split. This also forces a split on the upper (64x64) level.
get_variance(&vt.split[i].part_variances.none);
- if (vt.split[i].part_variances.none.variance > cpi->vbp_threshold) {
+ if (vt.split[i].part_variances.none.variance > cpi->vbp_threshold_32x32) {
force_split[i + 1] = 1;
force_split[0] = 1;
}
@@ -769,7 +768,7 @@ static void choose_partitioning(VP9_COMP *cpi,
// we get to one that's got a variance lower than our threshold.
if ( mi_col + 8 > cm->mi_cols || mi_row + 8 > cm->mi_rows ||
!set_vt_partitioning(cpi, xd, &vt, BLOCK_64X64, mi_row, mi_col,
- cpi->vbp_threshold_bsize_max, BLOCK_16X16,
+ cpi->vbp_threshold_64x64, BLOCK_16X16,
force_split[0])) {
for (i = 0; i < 4; ++i) {
const int x32_idx = ((i & 1) << 2);
@@ -777,7 +776,7 @@ static void choose_partitioning(VP9_COMP *cpi,
const int i2 = i << 2;
if (!set_vt_partitioning(cpi, xd, &vt.split[i], BLOCK_32X32,
(mi_row + y32_idx), (mi_col + x32_idx),
- cpi->vbp_threshold,
+ cpi->vbp_threshold_32x32,
BLOCK_16X16, force_split[i + 1])) {
for (j = 0; j < 4; ++j) {
const int x16_idx = ((j & 1) << 1);
@@ -801,7 +800,7 @@ static void choose_partitioning(VP9_COMP *cpi,
BLOCK_8X8,
mi_row + y32_idx + y16_idx + y8_idx,
mi_col + x32_idx + x16_idx + x8_idx,
- cpi->vbp_threshold_bsize_min,
+ cpi->vbp_threshold_8x8,
BLOCK_8X8, 0)) {
set_block_size(cpi, xd,
(mi_row + y32_idx + y16_idx + y8_idx),
diff --git a/vp9/encoder/vp9_encoder.h b/vp9/encoder/vp9_encoder.h
index 3c57c8613..ee231f7fd 100644
--- a/vp9/encoder/vp9_encoder.h
+++ b/vp9/encoder/vp9_encoder.h
@@ -460,10 +460,10 @@ typedef struct VP9_COMP {
int resize_pending;
// VAR_BASED_PARTITION thresholds
- int64_t vbp_threshold;
- int64_t vbp_threshold_bsize_min;
- int64_t vbp_threshold_bsize_max;
+ int64_t vbp_threshold_64x64;
+ int64_t vbp_threshold_32x32;
int64_t vbp_threshold_16x16;
+ int64_t vbp_threshold_8x8;
BLOCK_SIZE vbp_bsize_min;
// Multi-threading
diff --git a/vp9/encoder/vp9_pickmode.c b/vp9/encoder/vp9_pickmode.c
index f8a5e6ae7..ffa87779a 100644
--- a/vp9/encoder/vp9_pickmode.c
+++ b/vp9/encoder/vp9_pickmode.c
@@ -202,6 +202,248 @@ static int combined_motion_search(VP9_COMP *cpi, MACROBLOCK *x,
return rv;
}
+static void block_variance(const uint8_t *src, int src_stride,
+ const uint8_t *ref, int ref_stride,
+ int w, int h, unsigned int *sse, int *sum,
+ int block_size, unsigned int *sse8x8,
+ int *sum8x8, unsigned int *var8x8) {
+ int i, j, k = 0;
+
+ *sse = 0;
+ *sum = 0;
+
+ for (i = 0; i < h; i += block_size) {
+ for (j = 0; j < w; j += block_size) {
+ vp9_get8x8var(src + src_stride * i + j, src_stride,
+ ref + ref_stride * i + j, ref_stride,
+ &sse8x8[k], &sum8x8[k]);
+ *sse += sse8x8[k];
+ *sum += sum8x8[k];
+ var8x8[k] = sse8x8[k] - (((unsigned int)sum8x8[k] * sum8x8[k]) >> 6);
+ k++;
+ }
+ }
+}
+
+static void calculate_variance(int bw, int bh, TX_SIZE tx_size,
+ unsigned int *sse_i, int *sum_i,
+ unsigned int *var_o, unsigned int *sse_o,
+ int *sum_o) {
+ const BLOCK_SIZE unit_size = txsize_to_bsize[tx_size];
+ const int nw = 1 << (bw - b_width_log2_lookup[unit_size]);
+ const int nh = 1 << (bh - b_height_log2_lookup[unit_size]);
+ int i, j, k = 0;
+
+ for (i = 0; i < nh; i += 2) {
+ for (j = 0; j < nw; j += 2) {
+ sse_o[k] = sse_i[i * nw + j] + sse_i[i * nw + j + 1] +
+ sse_i[(i + 1) * nw + j] + sse_i[(i + 1) * nw + j + 1];
+ sum_o[k] = sum_i[i * nw + j] + sum_i[i * nw + j + 1] +
+ sum_i[(i + 1) * nw + j] + sum_i[(i + 1) * nw + j + 1];
+ var_o[k] = sse_o[k] - (((unsigned int)sum_o[k] * sum_o[k]) >>
+ (b_width_log2_lookup[unit_size] +
+ b_height_log2_lookup[unit_size] + 6));
+ k++;
+ }
+ }
+}
+
+static void model_rd_for_sb_y_large(VP9_COMP *cpi, BLOCK_SIZE bsize,
+ MACROBLOCK *x, MACROBLOCKD *xd,
+ int *out_rate_sum, int64_t *out_dist_sum,
+ unsigned int *var_y, unsigned int *sse_y,
+ int mi_row, int mi_col, int *early_term) {
+ // Note our transform coeffs are 8 times an orthogonal transform.
+ // Hence quantizer step is also 8 times. To get effective quantizer
+ // we need to divide by 8 before sending to modeling function.
+ unsigned int sse;
+ int rate;
+ int64_t dist;
+ struct macroblock_plane *const p = &x->plane[0];
+ struct macroblockd_plane *const pd = &xd->plane[0];
+ const uint32_t dc_quant = pd->dequant[0];
+ const uint32_t ac_quant = pd->dequant[1];
+ const int64_t dc_thr = dc_quant * dc_quant >> 6;
+ const int64_t ac_thr = ac_quant * ac_quant >> 6;
+ unsigned int var;
+ int sum;
+ int skip_dc = 0;
+
+ const int bw = b_width_log2_lookup[bsize];
+ const int bh = b_height_log2_lookup[bsize];
+ const int num8x8 = 1 << (bw + bh - 2);
+ unsigned int sse8x8[64] = {0};
+ int sum8x8[64] = {0};
+ unsigned int var8x8[64] = {0};
+ TX_SIZE tx_size;
+ int i, k;
+
+ // Calculate variance for whole partition, and also save 8x8 blocks' variance
+ // to be used in following transform skipping test.
+ block_variance(p->src.buf, p->src.stride, pd->dst.buf, pd->dst.stride,
+ 4 << bw, 4 << bh, &sse, &sum, 8, sse8x8, sum8x8, var8x8);
+ var = sse - (((int64_t)sum * sum) >> (bw + bh + 4));
+
+ *var_y = var;
+ *sse_y = sse;
+
+ if (cpi->common.tx_mode == TX_MODE_SELECT) {
+ if (sse > (var << 2))
+ tx_size = MIN(max_txsize_lookup[bsize],
+ tx_mode_to_biggest_tx_size[cpi->common.tx_mode]);
+ else
+ tx_size = TX_8X8;
+
+ if (cpi->sf.partition_search_type == VAR_BASED_PARTITION) {
+ if (cpi->oxcf.aq_mode == CYCLIC_REFRESH_AQ &&
+ cyclic_refresh_segment_id_boosted(xd->mi[0].src_mi->mbmi.segment_id))
+ tx_size = TX_8X8;
+ else if (tx_size > TX_16X16)
+ tx_size = TX_16X16;
+ }
+ } else {
+ tx_size = MIN(max_txsize_lookup[bsize],
+ tx_mode_to_biggest_tx_size[cpi->common.tx_mode]);
+ }
+
+ assert(tx_size >= TX_8X8);
+ xd->mi[0].src_mi->mbmi.tx_size = tx_size;
+
+ // Evaluate if the partition block is a skippable block in Y plane.
+ {
+ unsigned int sse16x16[16] = {0};
+ int sum16x16[16] = {0};
+ unsigned int var16x16[16] = {0};
+ const int num16x16 = num8x8 >> 2;
+
+ unsigned int sse32x32[4] = {0};
+ int sum32x32[4] = {0};
+ unsigned int var32x32[4] = {0};
+ const int num32x32 = num8x8 >> 4;
+
+ int ac_test = 1;
+ int dc_test = 1;
+ const int num = (tx_size == TX_8X8) ? num8x8 :
+ ((tx_size == TX_16X16) ? num16x16 : num32x32);
+ const unsigned int *sse_tx = (tx_size == TX_8X8) ? sse8x8 :
+ ((tx_size == TX_16X16) ? sse16x16 : sse32x32);
+ const unsigned int *var_tx = (tx_size == TX_8X8) ? var8x8 :
+ ((tx_size == TX_16X16) ? var16x16 : var32x32);
+
+ // Calculate variance if tx_size > TX_8X8
+ if (tx_size >= TX_16X16)
+ calculate_variance(bw, bh, TX_8X8, sse8x8, sum8x8, var16x16, sse16x16,
+ sum16x16);
+ if (tx_size == TX_32X32)
+ calculate_variance(bw, bh, TX_16X16, sse16x16, sum16x16, var32x32,
+ sse32x32, sum32x32);
+
+ // Skipping test
+ x->skip_txfm[0] = 0;
+ for (k = 0; k < num; k++)
+ // Check if all ac coefficients can be quantized to zero.
+ if (!(var_tx[k] < ac_thr || var == 0)) {
+ ac_test = 0;
+ break;
+ }
+
+ for (k = 0; k < num; k++)
+ // Check if dc coefficient can be quantized to zero.
+ if (!(sse_tx[k] - var_tx[k] < dc_thr || sse == var)) {
+ dc_test = 0;
+ break;
+ }
+
+ if (ac_test) {
+ x->skip_txfm[0] = 2;
+
+ if (dc_test)
+ x->skip_txfm[0] = 1;
+ } else if (dc_test) {
+ skip_dc = 1;
+ }
+ }
+
+ if (x->skip_txfm[0] == 1) {
+ int skip_uv[2] = {0};
+ unsigned int var_uv[2];
+ unsigned int sse_uv[2];
+
+ *out_rate_sum = 0;
+ *out_dist_sum = sse << 4;
+
+ // Transform skipping test in UV planes.
+ for (i = 1; i <= 2; i++) {
+ struct macroblock_plane *const p = &x->plane[i];
+ struct macroblockd_plane *const pd = &xd->plane[i];
+ const TX_SIZE uv_tx_size = get_uv_tx_size(&xd->mi[0].src_mi->mbmi, pd);
+ const BLOCK_SIZE unit_size = txsize_to_bsize[uv_tx_size];
+ const int sf = (bw - b_width_log2_lookup[unit_size]) +
+ (bh - b_height_log2_lookup[unit_size]);
+ const BLOCK_SIZE bs = get_plane_block_size(bsize, pd);
+ const uint32_t uv_dc_thr = pd->dequant[0] * pd->dequant[0] >> (6 - sf);
+ const uint32_t uv_ac_thr = pd->dequant[1] * pd->dequant[1] >> (6 - sf);
+ int j = i - 1;
+
+ vp9_build_inter_predictors_sbp(xd, mi_row, mi_col, bsize, i);
+ var_uv[j] = cpi->fn_ptr[bs].vf(p->src.buf, p->src.stride,
+ pd->dst.buf, pd->dst.stride, &sse_uv[j]);
+
+ if (var_uv[j] < uv_ac_thr || var_uv[j] == 0) {
+ if (sse_uv[j] - var_uv[j] < uv_dc_thr || sse_uv[j] == var_uv[j])
+ skip_uv[j] = 1;
+ }
+ }
+
+ // If the transform in YUV planes are skippable, the mode search checks
+ // fewer inter modes and doesn't check intra modes.
+ if (skip_uv[0] & skip_uv[1]) {
+ *early_term = 1;
+ }
+
+ return;
+ }
+
+ if (!skip_dc) {
+#if CONFIG_VP9_HIGHBITDEPTH
+ if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
+ vp9_model_rd_from_var_lapndz(sse - var, num_pels_log2_lookup[bsize],
+ dc_quant >> (xd->bd - 5), &rate, &dist);
+ } else {
+ vp9_model_rd_from_var_lapndz(sse - var, num_pels_log2_lookup[bsize],
+ dc_quant >> 3, &rate, &dist);
+ }
+#else
+ vp9_model_rd_from_var_lapndz(sse - var, num_pels_log2_lookup[bsize],
+ dc_quant >> 3, &rate, &dist);
+#endif // CONFIG_VP9_HIGHBITDEPTH
+ }
+
+ if (!skip_dc) {
+ *out_rate_sum = rate >> 1;
+ *out_dist_sum = dist << 3;
+ } else {
+ *out_rate_sum = 0;
+ *out_dist_sum = (sse - var) << 4;
+ }
+
+#if CONFIG_VP9_HIGHBITDEPTH
+ if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
+ vp9_model_rd_from_var_lapndz(var, num_pels_log2_lookup[bsize],
+ ac_quant >> (xd->bd - 5), &rate, &dist);
+ } else {
+ vp9_model_rd_from_var_lapndz(var, num_pels_log2_lookup[bsize],
+ ac_quant >> 3, &rate, &dist);
+ }
+#else
+ vp9_model_rd_from_var_lapndz(var, num_pels_log2_lookup[bsize],
+ ac_quant >> 3, &rate, &dist);
+#endif // CONFIG_VP9_HIGHBITDEPTH
+
+ *out_rate_sum += rate;
+ *out_dist_sum += dist << 4;
+}
+
static void model_rd_for_sb_y(VP9_COMP *cpi, BLOCK_SIZE bsize,
MACROBLOCK *x, MACROBLOCKD *xd,
int *out_rate_sum, int64_t *out_dist_sum,
@@ -315,6 +557,20 @@ static void model_rd_for_sb_y(VP9_COMP *cpi, BLOCK_SIZE bsize,
*out_dist_sum += dist << 4;
}
+#if CONFIG_VP9_HIGHBITDEPTH
+static void block_yrd(VP9_COMP *cpi, MACROBLOCK *x, int *rate, int64_t *dist,
+ int *skippable, int64_t *sse, int plane,
+ BLOCK_SIZE bsize, TX_SIZE tx_size) {
+ MACROBLOCKD *xd = &x->e_mbd;
+ unsigned int var_y, sse_y;
+ (void)plane;
+ (void)tx_size;
+ model_rd_for_sb_y(cpi, bsize, x, xd, rate, dist, &var_y, &sse_y);
+ *sse = INT_MAX;
+ *skippable = 0;
+ return;
+}
+#else
static void block_yrd(VP9_COMP *cpi, MACROBLOCK *x, int *rate, int64_t *dist,
int *skippable, int64_t *sse, int plane,
BLOCK_SIZE bsize, TX_SIZE tx_size) {
@@ -332,23 +588,9 @@ static void block_yrd(VP9_COMP *cpi, MACROBLOCK *x, int *rate, int64_t *dist,
const int max_blocks_high = num_4x4_h + (xd->mb_to_bottom_edge >= 0 ? 0 :
xd->mb_to_bottom_edge >> (5 + pd->subsampling_y));
-#if CONFIG_VP9_HIGHBITDEPTH
- unsigned int var_y, sse_y;
- model_rd_for_sb_y(cpi, bsize, x, xd, rate, dist, &var_y, &sse_y);
- *sse = INT_MAX;
- *skippable = 0;
- return;
-#else
(void)cpi;
-#endif
-
vp9_subtract_plane(x, bsize, plane);
-
*skippable = 1;
- *rate = 0;
- *dist = 0;
- *sse = 0;
-
// Keep track of the row and column of the blocks we use so that we know
// if we are in the unrestricted motion border.
for (r = 0; r < max_blocks_high; r += block_step) {
@@ -360,11 +602,8 @@ static void block_yrd(VP9_COMP *cpi, MACROBLOCK *x, int *rate, int64_t *dist,
tran_low_t *const dqcoeff = BLOCK_OFFSET(pd->dqcoeff, block);
uint16_t *const eob = &p->eobs[block];
const int diff_stride = 4 * num_4x4_blocks_wide_lookup[bsize];
- int i, j;
const int16_t *src_diff;
- int64_t this_sse;
- txfrm_block_to_raster_xy(bsize, tx_size, block, &i, &j);
- src_diff = &p->src_diff[4 * (j * diff_stride + i)];
+ src_diff = &p->src_diff[(r * diff_stride + c) << 2];
switch (tx_size) {
case TX_32X32:
@@ -399,16 +638,36 @@ static void block_yrd(VP9_COMP *cpi, MACROBLOCK *x, int *rate, int64_t *dist,
assert(0);
break;
}
+ *skippable &= (*eob == 0);
+ }
+ block += step;
+ }
+ }
- *dist += vp9_block_error(coeff, dqcoeff, step << 4, &this_sse) >> shift;
+ if (*skippable && *sse < INT64_MAX) {
+ *dist = (*sse << 6) >> shift;
+ *sse = *dist;
+ return;
+ }
+
+ block = 0;
+ *rate = 0;
+ *dist = 0;
+ *sse = (*sse << 6) >> shift;
+ for (r = 0; r < max_blocks_high; r += block_step) {
+ for (c = 0; c < num_4x4_w; c += block_step) {
+ if (c < max_blocks_wide) {
+ tran_low_t *const coeff = BLOCK_OFFSET(p->coeff, block);
+ tran_low_t *const qcoeff = BLOCK_OFFSET(p->qcoeff, block);
+ tran_low_t *const dqcoeff = BLOCK_OFFSET(pd->dqcoeff, block);
+ uint16_t *const eob = &p->eobs[block];
if (*eob == 1)
*rate += (int)abs(qcoeff[0]);
else if (*eob > 1)
*rate += (int)vp9_satd((const int16_t *)qcoeff, step << 4);
- *sse += (this_sse >> shift);
- *skippable &= (*eob == 0);
+ *dist += vp9_block_error_fp(coeff, dqcoeff, step << 4) >> shift;
}
block += step;
}
@@ -417,6 +676,7 @@ static void block_yrd(VP9_COMP *cpi, MACROBLOCK *x, int *rate, int64_t *dist,
*rate <<= 8;
*rate *= 6;
}
+#endif
static void model_rd_for_sb_uv(VP9_COMP *cpi, BLOCK_SIZE bsize,
MACROBLOCK *x, MACROBLOCKD *xd,
@@ -624,7 +884,7 @@ static void estimate_block_intra(int plane, int block, BLOCK_SIZE plane_bsize,
int i, j;
int rate;
int64_t dist;
- int64_t this_sse;
+ int64_t this_sse = INT64_MAX;
int is_skippable;
txfrm_block_to_raster_xy(plane_bsize, tx_size, block, &i, &j);
@@ -799,6 +1059,7 @@ void vp9_pick_inter_mode(VP9_COMP *cpi, MACROBLOCK *x,
int ref_frame_skip_mask = 0;
int idx;
int best_pred_sad = INT_MAX;
+ int best_early_term = 0;
int ref_frame_cost[MAX_REF_FRAMES];
vp9_prob intra_inter_p = vp9_get_intra_inter_prob(cm, xd);
vp9_prob ref_single_p1 = vp9_get_pred_prob_single_ref_p1(cm, xd);
@@ -906,6 +1167,7 @@ void vp9_pick_inter_mode(VP9_COMP *cpi, MACROBLOCK *x,
PREDICTION_MODE this_mode = ref_mode_set[idx].pred_mode;
int64_t this_sse;
int is_skippable;
+ int this_early_term = 0;
if (!(cpi->sf.inter_mode_mask[bsize] & (1 << this_mode)))
continue;
@@ -1066,14 +1328,25 @@ void vp9_pick_inter_mode(VP9_COMP *cpi, MACROBLOCK *x,
} else {
mbmi->interp_filter = (filter_ref == SWITCHABLE) ? EIGHTTAP : filter_ref;
vp9_build_inter_predictors_sby(xd, mi_row, mi_col, bsize);
- model_rd_for_sb_y(cpi, bsize, x, xd, &this_rdc.rate, &this_rdc.dist,
- &var_y, &sse_y);
+
+ // For large partition blocks, extra testing is done.
+ if (bsize > BLOCK_32X32 && xd->mi[0].src_mi->mbmi.segment_id != 1 &&
+ cm->base_qindex) {
+ model_rd_for_sb_y_large(cpi, bsize, x, xd, &this_rdc.rate,
+ &this_rdc.dist, &var_y, &sse_y, mi_row, mi_col,
+ &this_early_term);
+ } else {
+ model_rd_for_sb_y(cpi, bsize, x, xd, &this_rdc.rate, &this_rdc.dist,
+ &var_y, &sse_y);
+ }
+
this_rdc.rate +=
cm->interp_filter == SWITCHABLE ?
vp9_get_switchable_rate(cpi, xd) : 0;
}
if (bsize <= BLOCK_16X16) {
+ this_sse = (int64_t)sse_y;
block_yrd(cpi, x, &this_rdc.rate, &this_rdc.dist, &is_skippable,
&this_sse, 0, bsize, mbmi->tx_size);
x->skip_txfm[0] = is_skippable;
@@ -1143,6 +1416,7 @@ void vp9_pick_inter_mode(VP9_COMP *cpi, MACROBLOCK *x,
best_tx_size = mbmi->tx_size;
best_ref_frame = ref_frame;
best_mode_skip_txfm = x->skip_txfm[0];
+ best_early_term = this_early_term;
if (reuse_inter_pred) {
free_pred_buffer(best_pred);
@@ -1155,6 +1429,13 @@ void vp9_pick_inter_mode(VP9_COMP *cpi, MACROBLOCK *x,
if (x->skip)
break;
+
+ // If early termination flag is 1 and at least 2 modes are checked,
+ // the mode search is terminated.
+ if (best_early_term && idx > 0) {
+ x->skip = 1;
+ break;
+ }
}
mbmi->mode = best_mode;
diff --git a/vp9/encoder/vp9_rdopt.c b/vp9/encoder/vp9_rdopt.c
index e8c3f767e..166535b8b 100644
--- a/vp9/encoder/vp9_rdopt.c
+++ b/vp9/encoder/vp9_rdopt.c
@@ -292,6 +292,18 @@ int64_t vp9_block_error_c(const tran_low_t *coeff, const tran_low_t *dqcoeff,
return error;
}
+int64_t vp9_block_error_fp_c(const int16_t *coeff, const int16_t *dqcoeff,
+ int block_size) {
+ int i;
+ int64_t error = 0;
+
+ for (i = 0; i < block_size; i++) {
+ const int diff = coeff[i] - dqcoeff[i];
+ error += diff * diff;
+ }
+
+ return error;
+}
#if CONFIG_VP9_HIGHBITDEPTH
int64_t vp9_highbd_block_error_c(const tran_low_t *coeff,
diff --git a/vp9/encoder/x86/vp9_error_sse2.asm b/vp9/encoder/x86/vp9_error_sse2.asm
index 1126fdb61..318379777 100644
--- a/vp9/encoder/x86/vp9_error_sse2.asm
+++ b/vp9/encoder/x86/vp9_error_sse2.asm
@@ -72,3 +72,49 @@ cglobal block_error, 3, 3, 8, uqc, dqc, size, ssz
movd edx, m5
%endif
RET
+
+; Compute the sum of squared difference between two int16_t vectors.
+; int64_t vp9_block_error_fp(int16_t *coeff, int16_t *dqcoeff,
+; intptr_t block_size)
+
+INIT_XMM sse2
+cglobal block_error_fp, 3, 3, 8, uqc, dqc, size
+ pxor m4, m4 ; sse accumulator
+ pxor m5, m5 ; dedicated zero register
+ lea uqcq, [uqcq+sizeq*2]
+ lea dqcq, [dqcq+sizeq*2]
+ neg sizeq
+.loop:
+ mova m2, [uqcq+sizeq*2]
+ mova m0, [dqcq+sizeq*2]
+ mova m3, [uqcq+sizeq*2+mmsize]
+ mova m1, [dqcq+sizeq*2+mmsize]
+ psubw m0, m2
+ psubw m1, m3
+ ; individual errors are max. 15bit+sign, so squares are 30bit, and
+ ; thus the sum of 2 should fit in a 31bit integer (+ unused sign bit)
+ pmaddwd m0, m0
+ pmaddwd m1, m1
+ ; accumulate in 64bit
+ punpckldq m7, m0, m5
+ punpckhdq m0, m5
+ paddq m4, m7
+ punpckldq m7, m1, m5
+ paddq m4, m0
+ punpckhdq m1, m5
+ paddq m4, m7
+ paddq m4, m1
+ add sizeq, mmsize
+ jl .loop
+
+ ; accumulate horizontally and store in return value
+ movhlps m5, m4
+ paddq m4, m5
+%if ARCH_X86_64
+ movq rax, m4
+%else
+ pshufd m5, m4, 0x1
+ movd eax, m4
+ movd edx, m5
+%endif
+ RET