summaryrefslogtreecommitdiff
path: root/vp9/encoder
diff options
context:
space:
mode:
authorRonald S. Bultje <rbultje@google.com>2013-01-08 10:29:22 -0800
committerRonald S. Bultje <rbultje@google.com>2013-01-08 12:54:45 -0800
commit4455036cfc3c6b7fb9d7b85af1982e7df3711a05 (patch)
tree4294eb7d43141c3406d3f3f184b522a1b052fadd /vp9/encoder
parent879cb7d96259a71eea0038452a00241650589084 (diff)
downloadlibvpx-4455036cfc3c6b7fb9d7b85af1982e7df3711a05.tar
libvpx-4455036cfc3c6b7fb9d7b85af1982e7df3711a05.tar.gz
libvpx-4455036cfc3c6b7fb9d7b85af1982e7df3711a05.tar.bz2
libvpx-4455036cfc3c6b7fb9d7b85af1982e7df3711a05.zip
Merge superblocks (32x32) experiment.
Change-Id: I0df99742029834a85c4933652b0587cf5b6b2587
Diffstat (limited to 'vp9/encoder')
-rw-r--r--vp9/encoder/vp9_bitstream.c65
-rw-r--r--vp9/encoder/vp9_block.h17
-rw-r--r--vp9/encoder/vp9_encodeframe.c134
-rw-r--r--vp9/encoder/vp9_encodemb.c4
-rw-r--r--vp9/encoder/vp9_encodemb.h4
-rw-r--r--vp9/encoder/vp9_firstpass.c5
-rw-r--r--vp9/encoder/vp9_mcomp.c44
-rw-r--r--vp9/encoder/vp9_onyx_if.c8
-rw-r--r--vp9/encoder/vp9_onyx_int.h14
-rw-r--r--vp9/encoder/vp9_quantize.c16
-rw-r--r--vp9/encoder/vp9_quantize.h2
-rw-r--r--vp9/encoder/vp9_ratectrl.c8
-rw-r--r--vp9/encoder/vp9_rdopt.c219
-rw-r--r--vp9/encoder/vp9_segmentation.c11
-rw-r--r--vp9/encoder/vp9_tokenize.c28
-rw-r--r--vp9/encoder/vp9_tokenize.h6
-rw-r--r--vp9/encoder/vp9_variance_c.c25
17 files changed, 204 insertions, 406 deletions
diff --git a/vp9/encoder/vp9_bitstream.c b/vp9/encoder/vp9_bitstream.c
index a8fdc6626..e03651493 100644
--- a/vp9/encoder/vp9_bitstream.c
+++ b/vp9/encoder/vp9_bitstream.c
@@ -146,11 +146,9 @@ static void update_mbintra_mode_probs(VP9_COMP* const cpi,
bc, VP9_YMODES, vp9_ymode_encodings, vp9_ymode_tree,
Pnew, cm->fc.ymode_prob, bct, (unsigned int *)cpi->ymode_count
);
-#if CONFIG_SUPERBLOCKS
update_mode(bc, VP9_I32X32_MODES, vp9_sb_ymode_encodings,
vp9_sb_ymode_tree, Pnew, cm->fc.sb_ymode_prob, bct,
(unsigned int *)cpi->sb_ymode_count);
-#endif
}
}
@@ -318,7 +316,6 @@ static void kfwrite_ymode(vp9_writer *bc, int m, const vp9_prob *p) {
write_token(bc, vp9_kf_ymode_tree, p, vp9_kf_ymode_encodings + m);
}
-#if CONFIG_SUPERBLOCKS
static void write_sb_ymode(vp9_writer *bc, int m, const vp9_prob *p) {
write_token(bc, vp9_sb_ymode_tree, p, vp9_sb_ymode_encodings + m);
}
@@ -326,7 +323,6 @@ static void write_sb_ymode(vp9_writer *bc, int m, const vp9_prob *p) {
static void sb_kfwrite_ymode(vp9_writer *bc, int m, const vp9_prob *p) {
write_token(bc, vp9_uv_mode_tree, p, vp9_sb_kf_ymode_encodings + m);
}
-#endif
static void write_i8x8_mode(vp9_writer *bc, int m, const vp9_prob *p) {
write_token(bc, vp9_i8x8_mode_tree, p, vp9_i8x8_mode_encodings + m);
@@ -492,7 +488,6 @@ static void write_mv_ref
vp9_mv_ref_encoding_array - NEARESTMV + m);
}
-#if CONFIG_SUPERBLOCKS
static void write_sb_mv_ref(vp9_writer *bc, MB_PREDICTION_MODE m,
const vp9_prob *p) {
#if CONFIG_DEBUG
@@ -501,7 +496,6 @@ static void write_sb_mv_ref(vp9_writer *bc, MB_PREDICTION_MODE m,
write_token(bc, vp9_sb_mv_ref_tree, p,
vp9_sb_mv_ref_encoding_array - NEARESTMV + m);
}
-#endif
static void write_sub_mv_ref
(
@@ -703,11 +697,7 @@ static void pack_inter_mode_mvs(VP9_COMP *cpi, MODE_INFO *m,
const MV_REFERENCE_FRAME rf = mi->ref_frame;
const MB_PREDICTION_MODE mode = mi->mode;
const int segment_id = mi->segment_id;
-#if CONFIG_SUPERBLOCKS
const int mb_size = 1 << mi->sb_type;
-#else
- const int mb_size = 1;
-#endif
int skip_coeff;
int mb_row = pc->mb_rows - mb_rows_left;
@@ -781,11 +771,9 @@ static void pack_inter_mode_mvs(VP9_COMP *cpi, MODE_INFO *m,
#endif
if (!vp9_segfeature_active(xd, segment_id, SEG_LVL_MODE)) {
-#if CONFIG_SUPERBLOCKS
if (m->mbmi.sb_type)
write_sb_ymode(bc, mode, pc->fc.sb_ymode_prob);
else
-#endif
write_ymode(bc, mode, pc->fc.ymode_prob);
}
if (mode == B_PRED) {
@@ -835,12 +823,9 @@ static void pack_inter_mode_mvs(VP9_COMP *cpi, MODE_INFO *m,
// Is the segment coding of mode enabled
if (!vp9_segfeature_active(xd, segment_id, SEG_LVL_MODE)) {
-#if CONFIG_SUPERBLOCKS
if (mi->sb_type) {
write_sb_mv_ref(bc, mode, mv_ref_p);
- } else
-#endif
- {
+ } else {
write_mv_ref(bc, mode, mv_ref_p);
}
vp9_accum_mv_refs(&cpi->common, mode, mi->mb_mode_context[rf]);
@@ -998,7 +983,7 @@ static void pack_inter_mode_mvs(VP9_COMP *cpi, MODE_INFO *m,
vp9_write(bc, sz != TX_4X4, pc->prob_tx[0]);
if (sz != TX_4X4 && mode != I8X8_PRED && mode != SPLITMV) {
vp9_write(bc, sz != TX_8X8, pc->prob_tx[1]);
-#if CONFIG_TX32X32 && CONFIG_SUPERBLOCKS
+#if CONFIG_TX32X32
if (mi->sb_type && sz != TX_8X8)
vp9_write(bc, sz != TX_16X16, pc->prob_tx[2]);
#endif
@@ -1027,11 +1012,7 @@ static void write_mb_modes_kf(const VP9_COMP *cpi,
vp9_get_segdata(xd, segment_id, SEG_LVL_EOB) == 0) {
skip_coeff = 1;
} else {
-#if CONFIG_SUPERBLOCKS
const int nmbs = 1 << m->mbmi.sb_type;
-#else
- const int nmbs = 1;
-#endif
const int xmbs = MIN(nmbs, mb_cols_left);
const int ymbs = MIN(nmbs, mb_rows_left);
int x, y;
@@ -1047,13 +1028,10 @@ static void write_mb_modes_kf(const VP9_COMP *cpi,
vp9_get_pred_prob(c, xd, PRED_MBSKIP));
}
-#if CONFIG_SUPERBLOCKS
if (m->mbmi.sb_type) {
sb_kfwrite_ymode(bc, ym,
c->sb_kf_ymode_prob[c->kf_ymode_probs_index]);
- } else
-#endif
- {
+ } else {
kfwrite_ymode(bc, ym,
c->kf_ymode_prob[c->kf_ymode_probs_index]);
}
@@ -1111,7 +1089,7 @@ static void write_mb_modes_kf(const VP9_COMP *cpi,
vp9_write(bc, sz != TX_4X4, c->prob_tx[0]);
if (sz != TX_4X4 && ym <= TM_PRED) {
vp9_write(bc, sz != TX_8X8, c->prob_tx[1]);
-#if CONFIG_TX32X32 && CONFIG_SUPERBLOCKS
+#if CONFIG_TX32X32
if (m->mbmi.sb_type && sz != TX_8X8)
vp9_write(bc, sz != TX_16X16, c->prob_tx[2]);
#endif
@@ -1155,7 +1133,7 @@ static void write_modes(VP9_COMP *cpi, vp9_writer* const bc) {
for (mb_row = 0; mb_row < c->mb_rows; mb_row += 4, m_ptr += 4 * mis) {
m = m_ptr;
for (mb_col = 0; mb_col < c->mb_cols; mb_col += 4, m += 4) {
-#if CONFIG_SUPERBLOCKS && CONFIG_SUPERBLOCKS64
+#if CONFIG_SUPERBLOCKS64
vp9_write(bc, m->mbmi.sb_type == BLOCK_SIZE_SB64X64, c->sb64_coded);
if (m->mbmi.sb_type == BLOCK_SIZE_SB64X64) {
write_modes_b(cpi, m, bc, &tok, tok_end, mb_row, mb_col);
@@ -1166,23 +1144,18 @@ static void write_modes(VP9_COMP *cpi, vp9_writer* const bc) {
for (j = 0; j < 4; j++) {
const int x_idx_sb = (j & 1) << 1, y_idx_sb = j & 2;
-#if CONFIG_SUPERBLOCKS
MODE_INFO *sb_m = m + y_idx_sb * mis + x_idx_sb;
-#endif
if (mb_col + x_idx_sb >= c->mb_cols ||
mb_row + y_idx_sb >= c->mb_rows)
continue;
-#if CONFIG_SUPERBLOCKS
vp9_write(bc, sb_m->mbmi.sb_type, c->sb32_coded);
if (sb_m->mbmi.sb_type) {
assert(sb_m->mbmi.sb_type == BLOCK_SIZE_SB32X32);
write_modes_b(cpi, sb_m, bc, &tok, tok_end,
mb_row + y_idx_sb, mb_col + x_idx_sb);
- } else
-#endif
- {
+ } else {
// Process the 4 MBs in the order:
// top-left, top-right, bottom-left, bottom-right
for (i = 0; i < 4; i++) {
@@ -1195,9 +1168,7 @@ static void write_modes(VP9_COMP *cpi, vp9_writer* const bc) {
continue;
}
-#if CONFIG_SUPERBLOCKS
assert(mb_m->mbmi.sb_type == BLOCK_SIZE_MB16X16);
-#endif
write_modes_b(cpi, mb_m, bc, &tok, tok_end,
mb_row + y_idx, mb_col + x_idx);
}
@@ -1305,7 +1276,7 @@ static void build_coeff_contexts(VP9_COMP *cpi) {
cpi, hybrid_context_counters_16x16,
#endif
cpi->frame_hybrid_branch_ct_16x16, BLOCK_TYPES_16X16);
-#if CONFIG_TX32X32 && CONFIG_SUPERBLOCKS
+#if CONFIG_TX32X32
build_tree_distribution(cpi->frame_coef_probs_32x32,
cpi->coef_counts_32x32,
#ifdef ENTROPY_STATS
@@ -1489,7 +1460,7 @@ static void update_coef_probs(VP9_COMP* const cpi, vp9_writer* const bc) {
BLOCK_TYPES_16X16);
}
-#if CONFIG_TX32X32 && CONFIG_SUPERBLOCKS
+#if CONFIG_TX32X32
if (cpi->common.txfm_mode > ALLOW_16X16) {
update_coef_probs_common(bc,
#ifdef ENTROPY_STATS
@@ -1535,13 +1506,11 @@ static void decide_kf_ymode_entropy(VP9_COMP *cpi) {
for (j = 0; j < VP9_YMODES; j++) {
cost += mode_cost[j] * cpi->ymode_count[j];
}
-#if CONFIG_SUPERBLOCKS
vp9_cost_tokens(mode_cost, cpi->common.sb_kf_ymode_prob[i],
vp9_sb_ymode_tree);
for (j = 0; j < VP9_I32X32_MODES; j++) {
cost += mode_cost[j] * cpi->sb_ymode_count[j];
}
-#endif
if (cost < bestcost) {
bestindex = i;
bestcost = cost;
@@ -1731,14 +1700,12 @@ void vp9_pack_bitstream(VP9_COMP *cpi, unsigned char *dest,
}
}
-#if CONFIG_SUPERBLOCKS
#if CONFIG_SUPERBLOCKS64
pc->sb64_coded = get_binary_prob(cpi->sb64_count[0], cpi->sb64_count[1]);
vp9_write_literal(&header_bc, pc->sb64_coded, 8);
#endif
pc->sb32_coded = get_binary_prob(cpi->sb32_count[0], cpi->sb32_count[1]);
vp9_write_literal(&header_bc, pc->sb32_coded, 8);
-#endif
{
if (pc->txfm_mode == TX_MODE_SELECT) {
@@ -1748,7 +1715,7 @@ void vp9_pack_bitstream(VP9_COMP *cpi, unsigned char *dest,
cpi->txfm_count_32x32p[TX_4X4] +
cpi->txfm_count_32x32p[TX_8X8] +
cpi->txfm_count_32x32p[TX_16X16] +
-#if CONFIG_TX32X32 && CONFIG_SUPERBLOCKS
+#if CONFIG_TX32X32
cpi->txfm_count_32x32p[TX_32X32] +
#endif
cpi->txfm_count_16x16p[TX_4X4] +
@@ -1760,12 +1727,12 @@ void vp9_pack_bitstream(VP9_COMP *cpi, unsigned char *dest,
cpi->txfm_count_16x16p[TX_8X8],
cpi->txfm_count_32x32p[TX_8X8] +
cpi->txfm_count_32x32p[TX_16X16] +
-#if CONFIG_TX32X32 && CONFIG_SUPERBLOCKS
+#if CONFIG_TX32X32
cpi->txfm_count_32x32p[TX_32X32] +
#endif
cpi->txfm_count_16x16p[TX_8X8] +
cpi->txfm_count_16x16p[TX_16X16]);
-#if CONFIG_TX32X32 && CONFIG_SUPERBLOCKS
+#if CONFIG_TX32X32
pc->prob_tx[2] = get_prob(cpi->txfm_count_32x32p[TX_16X16],
cpi->txfm_count_32x32p[TX_16X16] +
cpi->txfm_count_32x32p[TX_32X32]);
@@ -1773,12 +1740,12 @@ void vp9_pack_bitstream(VP9_COMP *cpi, unsigned char *dest,
} else {
pc->prob_tx[0] = 128;
pc->prob_tx[1] = 128;
-#if CONFIG_TX32X32 && CONFIG_SUPERBLOCKS
+#if CONFIG_TX32X32
pc->prob_tx[2] = 128;
#endif
}
vp9_write_literal(&header_bc, pc->txfm_mode <= 3 ? pc->txfm_mode : 3, 2);
-#if CONFIG_TX32X32 && CONFIG_SUPERBLOCKS
+#if CONFIG_TX32X32
if (pc->txfm_mode > ALLOW_16X16) {
vp9_write_bit(&header_bc, pc->txfm_mode == TX_MODE_SELECT);
}
@@ -1786,7 +1753,7 @@ void vp9_pack_bitstream(VP9_COMP *cpi, unsigned char *dest,
if (pc->txfm_mode == TX_MODE_SELECT) {
vp9_write_literal(&header_bc, pc->prob_tx[0], 8);
vp9_write_literal(&header_bc, pc->prob_tx[1], 8);
-#if CONFIG_TX32X32 && CONFIG_SUPERBLOCKS
+#if CONFIG_TX32X32
vp9_write_literal(&header_bc, pc->prob_tx[2], 8);
#endif
}
@@ -2009,13 +1976,11 @@ void vp9_pack_bitstream(VP9_COMP *cpi, unsigned char *dest,
cpi->common.fc.coef_probs_16x16);
vp9_copy(cpi->common.fc.pre_hybrid_coef_probs_16x16,
cpi->common.fc.hybrid_coef_probs_16x16);
-#if CONFIG_TX32X32 && CONFIG_SUPERBLOCKS
+#if CONFIG_TX32X32
vp9_copy(cpi->common.fc.pre_coef_probs_32x32,
cpi->common.fc.coef_probs_32x32);
#endif
-#if CONFIG_SUPERBLOCKS
vp9_copy(cpi->common.fc.pre_sb_ymode_prob, cpi->common.fc.sb_ymode_prob);
-#endif
vp9_copy(cpi->common.fc.pre_ymode_prob, cpi->common.fc.ymode_prob);
vp9_copy(cpi->common.fc.pre_uv_mode_prob, cpi->common.fc.uv_mode_prob);
vp9_copy(cpi->common.fc.pre_bmode_prob, cpi->common.fc.bmode_prob);
diff --git a/vp9/encoder/vp9_block.h b/vp9/encoder/vp9_block.h
index 94078970b..18e5f73ff 100644
--- a/vp9/encoder/vp9_block.h
+++ b/vp9/encoder/vp9_block.h
@@ -35,13 +35,13 @@ typedef struct block {
int16_t *zbin;
int16_t *zbin_8x8;
int16_t *zbin_16x16;
-#if CONFIG_TX32X32 && CONFIG_SUPERBLOCKS
+#if CONFIG_TX32X32
int16_t *zbin_32x32;
#endif
int16_t *zrun_zbin_boost;
int16_t *zrun_zbin_boost_8x8;
int16_t *zrun_zbin_boost_16x16;
-#if CONFIG_TX32X32 && CONFIG_SUPERBLOCKS
+#if CONFIG_TX32X32
int16_t *zrun_zbin_boost_32x32;
#endif
int16_t *round;
@@ -57,7 +57,7 @@ typedef struct block {
int eob_max_offset;
int eob_max_offset_8x8;
int eob_max_offset_16x16;
-#if CONFIG_TX32X32 && CONFIG_SUPERBLOCKS
+#if CONFIG_TX32X32
int eob_max_offset_32x32;
#endif
} BLOCK;
@@ -92,7 +92,7 @@ typedef struct {
int64_t txfm_rd_diff[NB_TXFM_MODES];
} PICK_MODE_CONTEXT;
-#if CONFIG_SUPERBLOCKS && CONFIG_TX32X32
+#if CONFIG_TX32X32
typedef struct superblock {
DECLARE_ALIGNED(16, int16_t, src_diff[32*32+16*16*2]);
DECLARE_ALIGNED(16, int16_t, coeff[32*32+16*16*2]);
@@ -102,16 +102,11 @@ typedef struct superblock {
typedef struct macroblock {
DECLARE_ALIGNED(16, int16_t, src_diff[400]); // 16x16 Y 8x8 U 8x8 V 4x4 2nd Y
DECLARE_ALIGNED(16, int16_t, coeff[400]); // 16x16 Y 8x8 U 8x8 V 4x4 2nd Y
-#if !CONFIG_SUPERBLOCKS
- DECLARE_ALIGNED(16, uint8_t, thismb[256]); // 16x16 Y
-
- unsigned char *thismb_ptr;
-#endif
// 16 Y blocks, 4 U blocks, 4 V blocks,
// 1 DC 2nd order block each with 16 entries
BLOCK block[25];
-#if CONFIG_SUPERBLOCKS && CONFIG_TX32X32
+#if CONFIG_TX32X32
SUPERBLOCK sb_coeff_data;
#endif
@@ -183,13 +178,11 @@ typedef struct macroblock {
// Structure to hold context for each of the 4 MBs within a SB:
// when encoded as 4 independent MBs:
PICK_MODE_CONTEXT mb_context[4][4];
-#if CONFIG_SUPERBLOCKS
// when 4 MBs share coding parameters:
PICK_MODE_CONTEXT sb32_context[4];
#if CONFIG_SUPERBLOCKS64
PICK_MODE_CONTEXT sb64_context;
#endif // CONFIG_SUPERBLOCKS64
-#endif
void (*vp9_short_fdct4x4)(int16_t *input, int16_t *output, int pitch);
void (*vp9_short_fdct8x4)(int16_t *input, int16_t *output, int pitch);
diff --git a/vp9/encoder/vp9_encodeframe.c b/vp9/encoder/vp9_encodeframe.c
index 509c426d8..ad27c6f39 100644
--- a/vp9/encoder/vp9_encodeframe.c
+++ b/vp9/encoder/vp9_encodeframe.c
@@ -285,10 +285,6 @@ static void build_activity_map(VP9_COMP *cpi) {
xd->left_available = (mb_col != 0);
recon_yoffset += 16;
#endif
-#if !CONFIG_SUPERBLOCKS
- // Copy current mb to a buffer
- vp9_copy_mem16x16(x->src.y_buffer, x->src.y_stride, x->thismb, 16);
-#endif
// measure activity
mb_activity = mb_activity_measure(cpi, x, mb_row, mb_col);
@@ -444,20 +440,14 @@ static void update_state(VP9_COMP *cpi,
int mb_mode = mi->mbmi.mode;
int mb_mode_index = ctx->best_mode_index;
const int mis = cpi->common.mode_info_stride;
-#if CONFIG_SUPERBLOCKS
int mb_block_size = 1 << mi->mbmi.sb_type;
-#else
- int mb_block_size = 1;
-#endif
#if CONFIG_DEBUG
assert(mb_mode < MB_MODE_COUNT);
assert(mb_mode_index < MAX_MODES);
assert(mi->mbmi.ref_frame < MAX_REF_FRAMES);
#endif
-#if CONFIG_SUPERBLOCKS
assert(mi->mbmi.sb_type == (block_size >> 5));
-#endif
// Restore the coding context of the MB to that that was in place
// when the mode was picked for it
@@ -471,7 +461,7 @@ static void update_state(VP9_COMP *cpi,
}
}
}
-#if CONFIG_TX32X32 && CONFIG_SUPERBLOCKS
+#if CONFIG_TX32X32
if (block_size == 16) {
ctx->txfm_rd_diff[ALLOW_32X32] = ctx->txfm_rd_diff[ALLOW_16X16];
}
@@ -787,18 +777,11 @@ static void pick_mb_modes(VP9_COMP *cpi,
set_offsets(cpi, mb_row + y_idx, mb_col + x_idx, 16,
&recon_yoffset, &recon_uvoffset);
-#if !CONFIG_SUPERBLOCKS
- // Copy current MB to a work buffer
- vp9_copy_mem16x16(x->src.y_buffer, x->src.y_stride, x->thismb, 16);
-#endif
-
if (cpi->oxcf.tuning == VP8_TUNE_SSIM)
vp9_activity_masking(cpi, x);
mbmi = &xd->mode_info_context->mbmi;
-#if CONFIG_SUPERBLOCKS
mbmi->sb_type = BLOCK_SIZE_MB16X16;
-#endif
cpi->update_context = 0; // TODO Do we need this now??
@@ -869,7 +852,6 @@ static void pick_mb_modes(VP9_COMP *cpi,
sizeof(above_context));
}
-#if CONFIG_SUPERBLOCKS
static void pick_sb_modes(VP9_COMP *cpi,
int mb_row,
int mb_col,
@@ -943,7 +925,6 @@ static void pick_sb64_modes(VP9_COMP *cpi,
}
}
#endif // CONFIG_SUPERBLOCKS64
-#endif // CONFIG_SUPERBLOCKS
static void update_stats(VP9_COMP *cpi) {
VP9_COMMON *const cm = &cpi->common;
@@ -1011,7 +992,6 @@ static void encode_sb(VP9_COMP *cpi,
MACROBLOCKD *const xd = &x->e_mbd;
int recon_yoffset, recon_uvoffset;
-#if CONFIG_SUPERBLOCKS
cpi->sb32_count[is_sb]++;
if (is_sb) {
set_offsets(cpi, mb_row, mb_col, 32, &recon_yoffset, &recon_uvoffset);
@@ -1028,9 +1008,7 @@ static void encode_sb(VP9_COMP *cpi,
if (mb_row < cm->mb_rows)
cpi->tplist[mb_row].stop = *tp;
}
- } else
-#endif
- {
+ } else {
int i;
for (i = 0; i < 4; i++) {
@@ -1046,11 +1024,6 @@ static void encode_sb(VP9_COMP *cpi,
xd->mb_index = i;
update_state(cpi, &x->mb_context[xd->sb_index][i], 16, output_enabled);
-#if !CONFIG_SUPERBLOCKS
- // Copy current MB to a work buffer
- vp9_copy_mem16x16(x->src.y_buffer, x->src.y_stride, x->thismb, 16);
-#endif
-
if (cpi->oxcf.tuning == VP8_TUNE_SSIM)
vp9_activity_masking(cpi, x);
@@ -1081,7 +1054,7 @@ static void encode_sb(VP9_COMP *cpi,
#endif
}
-#if CONFIG_SUPERBLOCKS && CONFIG_SUPERBLOCKS64
+#if CONFIG_SUPERBLOCKS64
static void encode_sb64(VP9_COMP *cpi,
int mb_row,
int mb_col,
@@ -1121,7 +1094,7 @@ static void encode_sb64(VP9_COMP *cpi,
}
}
}
-#endif // CONFIG_SUPERBLOCKS && CONFIG_SUPERBLOCKS64
+#endif // CONFIG_SUPERBLOCKS64
static void encode_sb_row(VP9_COMP *cpi,
int mb_row,
@@ -1141,20 +1114,18 @@ static void encode_sb_row(VP9_COMP *cpi,
int i;
int sb32_rate = 0, sb32_dist = 0;
int is_sb[4];
-#if CONFIG_SUPERBLOCKS && CONFIG_SUPERBLOCKS64
+#if CONFIG_SUPERBLOCKS64
int sb64_rate = INT_MAX, sb64_dist;
ENTROPY_CONTEXT_PLANES l[4], a[4];
TOKENEXTRA *tp_orig = *tp;
memcpy(&a, cm->above_context + mb_col, sizeof(a));
memcpy(&l, cm->left_context, sizeof(l));
-#endif // CONFIG_SUPERBLOCKS && CONFIG_SUPERBLOCKS64
+#endif // CONFIG_SUPERBLOCKS64
for (i = 0; i < 4; i++) {
const int x_idx = (i & 1) << 1, y_idx = i & 2;
int mb_rate = 0, mb_dist = 0;
-#if CONFIG_SUPERBLOCKS
int sb_rate = INT_MAX, sb_dist;
-#endif
if (mb_row + y_idx >= cm->mb_rows || mb_col + x_idx >= cm->mb_cols)
continue;
@@ -1163,11 +1134,8 @@ static void encode_sb_row(VP9_COMP *cpi,
pick_mb_modes(cpi, mb_row + y_idx, mb_col + x_idx,
tp, &mb_rate, &mb_dist);
-#if CONFIG_SUPERBLOCKS
mb_rate += vp9_cost_bit(cm->sb32_coded, 0);
-#endif
-#if CONFIG_SUPERBLOCKS
if (!((( mb_cols & 1) && mb_col + x_idx == mb_cols - 1) ||
((cm->mb_rows & 1) && mb_row + y_idx == cm->mb_rows - 1))) {
/* Pick a mode assuming that it applies to all 4 of the MBs in the SB */
@@ -1183,12 +1151,8 @@ static void encode_sb_row(VP9_COMP *cpi,
is_sb[i] = 1;
sb32_rate += sb_rate;
sb32_dist += sb_dist;
- } else
-#endif
- {
-#if CONFIG_SUPERBLOCKS
+ } else {
is_sb[i] = 0;
-#endif
sb32_rate += mb_rate;
sb32_dist += mb_dist;
}
@@ -1200,11 +1164,10 @@ static void encode_sb_row(VP9_COMP *cpi,
// instead of small->big) means we can use as threshold for small, which
// may enable breakouts if RD is not good enough (i.e. faster)
encode_sb(cpi, mb_row + y_idx, mb_col + x_idx,
- !(CONFIG_SUPERBLOCKS && CONFIG_SUPERBLOCKS64),
- tp, is_sb[i]);
+ !CONFIG_SUPERBLOCKS64, tp, is_sb[i]);
}
-#if CONFIG_SUPERBLOCKS && CONFIG_SUPERBLOCKS64
+#if CONFIG_SUPERBLOCKS64
memcpy(cm->above_context + mb_col, &a, sizeof(a));
memcpy(cm->left_context, &l, sizeof(l));
sb32_rate += vp9_cost_bit(cm->sb64_coded, 0);
@@ -1227,11 +1190,11 @@ static void encode_sb_row(VP9_COMP *cpi,
*totalrate += sb32_rate;
}
-#if CONFIG_SUPERBLOCKS && CONFIG_SUPERBLOCKS64
+#if CONFIG_SUPERBLOCKS64
assert(tp_orig == *tp);
encode_sb64(cpi, mb_row, mb_col, tp, is_sb);
assert(tp_orig < *tp);
-#endif // CONFIG_SUPERBLOCKS && CONFIG_SUPERBLOCKS64
+#endif // CONFIG_SUPERBLOCKS64
}
}
@@ -1279,13 +1242,11 @@ static void init_encode_frame_mb_context(VP9_COMP *cpi) {
vp9_zero(cpi->sub_mv_ref_count)
vp9_zero(cpi->mbsplit_count)
vp9_zero(cpi->common.fc.mv_ref_ct)
-#if CONFIG_SUPERBLOCKS
vp9_zero(cpi->sb_ymode_count)
vp9_zero(cpi->sb32_count);
#if CONFIG_SUPERBLOCKS64
vp9_zero(cpi->sb64_count);
#endif // CONFIG_SUPERBLOCKS64
-#endif // CONFIG_SUPERBLOCKS
#if CONFIG_COMP_INTERINTRA_PRED
vp9_zero(cpi->interintra_count);
vp9_zero(cpi->interintra_select_count);
@@ -1362,7 +1323,7 @@ static void encode_frame_internal(VP9_COMP *cpi) {
vp9_zero(cpi->hybrid_coef_counts_8x8);
vp9_zero(cpi->coef_counts_16x16);
vp9_zero(cpi->hybrid_coef_counts_16x16);
-#if CONFIG_TX32X32 && CONFIG_SUPERBLOCKS
+#if CONFIG_TX32X32
vp9_zero(cpi->coef_counts_32x32);
#endif
#if CONFIG_NEW_MVREF
@@ -1462,7 +1423,6 @@ static void reset_skip_txfm_size_mb(VP9_COMP *cpi,
}
}
-#if CONFIG_SUPERBLOCKS
static int get_skip_flag(MODE_INFO *mi, int mis, int ymbs, int xmbs) {
int x, y;
@@ -1530,7 +1490,6 @@ static void reset_skip_txfm_size_sb64(VP9_COMP *cpi, MODE_INFO *mi,
}
}
#endif
-#endif
static void reset_skip_txfm_size(VP9_COMP *cpi, TX_SIZE txfm_max) {
VP9_COMMON *const cm = &cpi->common;
@@ -1541,33 +1500,28 @@ static void reset_skip_txfm_size(VP9_COMP *cpi, TX_SIZE txfm_max) {
for (mb_row = 0; mb_row < cm->mb_rows; mb_row += 4, mi_ptr += 4 * mis) {
mi = mi_ptr;
for (mb_col = 0; mb_col < cm->mb_cols; mb_col += 4, mi += 4) {
-#if CONFIG_SUPERBLOCKS && CONFIG_SUPERBLOCKS64
+#if CONFIG_SUPERBLOCKS64
if (mi->mbmi.sb_type == BLOCK_SIZE_SB64X64) {
reset_skip_txfm_size_sb64(cpi, mi, mis, txfm_max,
cm->mb_rows - mb_row, cm->mb_cols - mb_col);
} else
-#endif // CONFIG_SUPERBLOCKS && CONFIG_SUPERBLOCKS64
+#endif // CONFIG_SUPERBLOCKS64
{
int i;
for (i = 0; i < 4; i++) {
const int x_idx_sb = (i & 1) << 1, y_idx_sb = i & 2;
-#if CONFIG_SUPERBLOCKS
MODE_INFO *sb_mi = mi + y_idx_sb * mis + x_idx_sb;
-#endif
if (mb_row + y_idx_sb >= cm->mb_rows ||
mb_col + x_idx_sb >= cm->mb_cols)
continue;
-#if CONFIG_SUPERBLOCKS
if (sb_mi->mbmi.sb_type) {
reset_skip_txfm_size_sb32(cpi, sb_mi, mis, txfm_max,
cm->mb_rows - mb_row - y_idx_sb,
cm->mb_cols - mb_col - x_idx_sb);
- } else
-#endif
- {
+ } else {
int m;
for (m = 0; m < 4; m++) {
@@ -1579,9 +1533,7 @@ static void reset_skip_txfm_size(VP9_COMP *cpi, TX_SIZE txfm_max) {
continue;
mb_mi = mi + y_idx * mis + x_idx;
-#if CONFIG_SUPERBLOCKS
assert(mb_mi->mbmi.sb_type == BLOCK_SIZE_MB16X16);
-#endif
reset_skip_txfm_size_mb(cpi, mb_mi, txfm_max);
}
}
@@ -1647,7 +1599,7 @@ void vp9_encode_frame(VP9_COMP *cpi) {
* keyframe's probabilities as an estimate of what the current keyframe's
* coefficient cost distributions may look like. */
if (frame_type == 0) {
-#if CONFIG_TX32X32 && CONFIG_SUPERBLOCKS
+#if CONFIG_TX32X32
txfm_type = ALLOW_32X32;
#else
txfm_type = ALLOW_16X16;
@@ -1682,7 +1634,7 @@ void vp9_encode_frame(VP9_COMP *cpi) {
} else
txfm_type = ALLOW_8X8;
#else
-#if CONFIG_TX32X32 && CONFIG_SUPERBLOCKS
+#if CONFIG_TX32X32
txfm_type = cpi->rd_tx_select_threshes[frame_type][ALLOW_32X32] >=
cpi->rd_tx_select_threshes[frame_type][TX_MODE_SELECT] ?
ALLOW_32X32 : TX_MODE_SELECT;
@@ -1742,7 +1694,7 @@ void vp9_encode_frame(VP9_COMP *cpi) {
const int count8x8_8x8p = cpi->txfm_count_8x8p[TX_8X8];
const int count16x16_16x16p = cpi->txfm_count_16x16p[TX_16X16];
const int count16x16_lp = cpi->txfm_count_32x32p[TX_16X16];
-#if CONFIG_TX32X32 && CONFIG_SUPERBLOCKS
+#if CONFIG_TX32X32
const int count32x32 = cpi->txfm_count_32x32p[TX_32X32];
#else
const int count32x32 = 0;
@@ -1756,13 +1708,13 @@ void vp9_encode_frame(VP9_COMP *cpi) {
count8x8_lp == 0 && count16x16_lp == 0 && count32x32 == 0) {
cpi->common.txfm_mode = ONLY_4X4;
reset_skip_txfm_size(cpi, TX_4X4);
-#if CONFIG_TX32X32 && CONFIG_SUPERBLOCKS
+#if CONFIG_TX32X32
} else if (count8x8_lp == 0 && count16x16_lp == 0 && count4x4 == 0) {
cpi->common.txfm_mode = ALLOW_32X32;
#endif
} else if (count32x32 == 0 && count8x8_lp == 0 && count4x4 == 0) {
cpi->common.txfm_mode = ALLOW_16X16;
-#if CONFIG_TX32X32 && CONFIG_SUPERBLOCKS
+#if CONFIG_TX32X32
reset_skip_txfm_size(cpi, TX_16X16);
#endif
}
@@ -1814,22 +1766,6 @@ void vp9_build_block_offsets(MACROBLOCK *x) {
vp9_build_block_doffsets(&x->e_mbd);
-#if !CONFIG_SUPERBLOCKS
- // y blocks
- x->thismb_ptr = &x->thismb[0];
- for (br = 0; br < 4; br++) {
- for (bc = 0; bc < 4; bc++) {
- BLOCK *this_block = &x->block[block];
- // this_block->base_src = &x->src.y_buffer;
- // this_block->src_stride = x->src.y_stride;
- // this_block->src = 4 * br * this_block->src_stride + 4 * bc;
- this_block->base_src = &x->thismb_ptr;
- this_block->src_stride = 16;
- this_block->src = 4 * br * 16 + 4 * bc;
- ++block;
- }
- }
-#else
for (br = 0; br < 4; br++) {
for (bc = 0; bc < 4; bc++) {
BLOCK *this_block = &x->block[block];
@@ -1842,7 +1778,6 @@ void vp9_build_block_offsets(MACROBLOCK *x) {
++block;
}
}
-#endif
// u blocks
for (br = 0; br < 2; br++) {
@@ -1896,12 +1831,11 @@ static void sum_intra_stats(VP9_COMP *cpi, MACROBLOCK *x) {
}
#endif
-#if CONFIG_SUPERBLOCKS
if (xd->mode_info_context->mbmi.sb_type) {
++cpi->sb_ymode_count[m];
- } else
-#endif
+ } else {
++cpi->ymode_count[m];
+ }
if (m != I8X8_PRED)
++cpi->y_uv_mode_count[m][uvm];
else {
@@ -1943,7 +1877,6 @@ static void adjust_act_zbin(VP9_COMP *cpi, MACROBLOCK *x) {
#endif
}
-#if CONFIG_SUPERBLOCKS
static void update_sb_skip_coeff_state(VP9_COMP *cpi,
ENTROPY_CONTEXT_PLANES ta[4],
ENTROPY_CONTEXT_PLANES tl[4],
@@ -2010,6 +1943,7 @@ static void update_sb64_skip_coeff_state(VP9_COMP *cpi,
int skip[16], int output_enabled) {
MACROBLOCK *const x = &cpi->mb;
+#if CONFIG_TX32X32
if (x->e_mbd.mode_info_context->mbmi.txfm_size == TX_32X32) {
TOKENEXTRA tokens[4][1024+512];
int n_tokens[4], n;
@@ -2057,7 +1991,9 @@ static void update_sb64_skip_coeff_state(VP9_COMP *cpi,
(*tp) += n_tokens[n];
}
}
- } else {
+ } else
+#endif // CONFIG_TX32X32
+ {
TOKENEXTRA tokens[16][16 * 25];
int n_tokens[16], n;
@@ -2113,7 +2049,6 @@ static void update_sb64_skip_coeff_state(VP9_COMP *cpi,
}
}
#endif // CONFIG_SUPERBLOCKS64
-#endif /* CONFIG_SUPERBLOCKS */
static void encode_macroblock(VP9_COMP *cpi, TOKENEXTRA **t,
int recon_yoffset, int recon_uvoffset,
@@ -2125,9 +2060,7 @@ static void encode_macroblock(VP9_COMP *cpi, TOKENEXTRA **t,
MB_MODE_INFO *const mbmi = &xd->mode_info_context->mbmi;
unsigned char ref_pred_flag;
-#if CONFIG_SUPERBLOCKS
assert(!xd->mode_info_context->mbmi.sb_type);
-#endif
#ifdef ENC_DEBUG
enc_debug = (cpi->common.current_video_frame == 46 &&
@@ -2375,7 +2308,6 @@ static void encode_macroblock(VP9_COMP *cpi, TOKENEXTRA **t,
}
}
-#if CONFIG_SUPERBLOCKS
static void encode_superblock32(VP9_COMP *cpi, TOKENEXTRA **t,
int recon_yoffset, int recon_uvoffset,
int output_enabled, int mb_row, int mb_col) {
@@ -2769,8 +2701,7 @@ static void encode_superblock64(VP9_COMP *cpi, TOKENEXTRA **t,
vp9_inverse_transform_sbuv_16x16(&x->e_mbd.sb_coeff_data);
vp9_inverse_transform_sby_32x32(&x->e_mbd.sb_coeff_data);
vp9_recon_sby_s_c(&x->e_mbd,
- dst + 32 * x_idx + 32 * y_idx * dst_y_stride,
- dst_y_stride);
+ dst + 32 * x_idx + 32 * y_idx * dst_y_stride);
vp9_recon_sbuv_s_c(&x->e_mbd,
udst + x_idx * 16 + y_idx * 16 * dst_uv_stride,
vdst + x_idx * 16 + y_idx * 16 * dst_uv_stride);
@@ -2867,9 +2798,15 @@ static void encode_superblock64(VP9_COMP *cpi, TOKENEXTRA **t,
if (output_enabled) {
if (cm->txfm_mode == TX_MODE_SELECT &&
!((cm->mb_no_coeff_skip &&
- ((mi->mbmi.txfm_size == TX_32X32 &&
+ (
+#if CONFIG_TX32X32
+ (mi->mbmi.txfm_size == TX_32X32 &&
skip[0] && skip[1] && skip[2] && skip[3]) ||
- (mi->mbmi.txfm_size != TX_32X32 &&
+#endif // CONFIG_TX32X32
+ (
+#if CONFIG_TX32X32
+ mi->mbmi.txfm_size != TX_32X32 &&
+#endif // CONFIG_TX32X32
skip[0] && skip[1] && skip[2] && skip[3] &&
skip[4] && skip[5] && skip[6] && skip[7] &&
skip[8] && skip[9] && skip[10] && skip[11] &&
@@ -2897,4 +2834,3 @@ static void encode_superblock64(VP9_COMP *cpi, TOKENEXTRA **t,
}
}
#endif // CONFIG_SUPERBLOCKS64
-#endif
diff --git a/vp9/encoder/vp9_encodemb.c b/vp9/encoder/vp9_encodemb.c
index 2ca146c3b..216a7fb95 100644
--- a/vp9/encoder/vp9_encodemb.c
+++ b/vp9/encoder/vp9_encodemb.c
@@ -108,7 +108,7 @@ void vp9_subtract_mby_s_c(int16_t *diff, const uint8_t *src, int src_stride,
}
}
-#if CONFIG_TX32X32 && CONFIG_SUPERBLOCKS
+#if CONFIG_TX32X32
void vp9_subtract_sby_s_c(int16_t *diff, const uint8_t *src, int src_stride,
const uint8_t *pred, int dst_stride) {
int r, c;
@@ -311,7 +311,7 @@ void vp9_transform_mb_16x16(MACROBLOCK *x) {
vp9_transform_mbuv_8x8(x);
}
-#if CONFIG_TX32X32 && CONFIG_SUPERBLOCKS
+#if CONFIG_TX32X32
void vp9_transform_sby_32x32(MACROBLOCK *x) {
SUPERBLOCK * const x_sb = &x->sb_coeff_data;
vp9_short_fdct32x32(x_sb->src_diff, x_sb->coeff, 64);
diff --git a/vp9/encoder/vp9_encodemb.h b/vp9/encoder/vp9_encodemb.h
index 75c3a8a8b..084d20ed9 100644
--- a/vp9/encoder/vp9_encodemb.h
+++ b/vp9/encoder/vp9_encodemb.h
@@ -46,7 +46,7 @@ void vp9_transform_mb_16x16(MACROBLOCK *mb);
void vp9_transform_mby_16x16(MACROBLOCK *x);
void vp9_optimize_mby_16x16(MACROBLOCK *x);
-#if CONFIG_SUPERBLOCKS && CONFIG_TX32X32
+#if CONFIG_TX32X32
void vp9_transform_sby_32x32(MACROBLOCK *x);
void vp9_transform_sbuv_16x16(MACROBLOCK *x);
#endif
@@ -55,7 +55,6 @@ void vp9_fidct_mb(MACROBLOCK *x);
void vp9_subtract_4b_c(BLOCK *be, BLOCKD *bd, int pitch);
-#if CONFIG_SUPERBLOCKS
void vp9_subtract_mbuv_s_c(int16_t *diff, const uint8_t *usrc,
const uint8_t *vsrc, int src_stride,
const uint8_t *upred,
@@ -71,6 +70,5 @@ void vp9_subtract_sbuv_s_c(int16_t *diff, const uint8_t *usrc,
const uint8_t *upred,
const uint8_t *vpred, int dst_stride);
#endif // CONFIG_TX32X32
-#endif // CONFIG_SUPERBLOCKS
#endif // VP9_ENCODER_VP9_ENCODEMB_H_
diff --git a/vp9/encoder/vp9_firstpass.c b/vp9/encoder/vp9_firstpass.c
index a14867292..44b140319 100644
--- a/vp9/encoder/vp9_firstpass.c
+++ b/vp9/encoder/vp9_firstpass.c
@@ -516,11 +516,6 @@ void vp9_first_pass(VP9_COMP *cpi) {
xd->dst.v_buffer = new_yv12->v_buffer + recon_uvoffset;
xd->left_available = (mb_col != 0);
-#if !CONFIG_SUPERBLOCKS
- // Copy current mb to a buffer
- vp9_copy_mem16x16(x->src.y_buffer, x->src.y_stride, x->thismb, 16);
-#endif
-
// do intra 16x16 prediction
this_error = vp9_encode_intra(cpi, x, use_dc_pred);
diff --git a/vp9/encoder/vp9_mcomp.c b/vp9/encoder/vp9_mcomp.c
index 9769d6344..4694a92c6 100644
--- a/vp9/encoder/vp9_mcomp.c
+++ b/vp9/encoder/vp9_mcomp.c
@@ -267,31 +267,9 @@ int vp9_find_best_sub_pixel_step_iteratively(MACROBLOCK *x, BLOCK *b, BLOCKD *d,
int offset;
int usehp = xd->allow_high_precision_mv;
-#if !CONFIG_SUPERBLOCKS && (ARCH_X86 || ARCH_X86_64)
- uint8_t *y0 = *(d->base_pre) + d->pre +
- (bestmv->as_mv.row) * d->pre_stride + bestmv->as_mv.col;
- uint8_t *y;
- int buf_r1, buf_r2, buf_c1, buf_c2;
-
- // Clamping to avoid out-of-range data access
- buf_r1 = ((bestmv->as_mv.row - VP9_INTERP_EXTEND) < x->mv_row_min) ?
- (bestmv->as_mv.row - x->mv_row_min) : VP9_INTERP_EXTEND - 1;
- buf_r2 = ((bestmv->as_mv.row + VP9_INTERP_EXTEND) > x->mv_row_max) ?
- (x->mv_row_max - bestmv->as_mv.row) : VP9_INTERP_EXTEND - 1;
- buf_c1 = ((bestmv->as_mv.col - VP9_INTERP_EXTEND) < x->mv_col_min) ?
- (bestmv->as_mv.col - x->mv_col_min) : VP9_INTERP_EXTEND - 1;
- buf_c2 = ((bestmv->as_mv.col + VP9_INTERP_EXTEND) > x->mv_col_max) ?
- (x->mv_col_max - bestmv->as_mv.col) : VP9_INTERP_EXTEND - 1;
- y_stride = 32;
-
- /* Copy to intermediate buffer before searching. */
- vfp->copymem(y0 - buf_c1 - d->pre_stride * buf_r1, d->pre_stride, xd->y_buf, y_stride, 16 + buf_r1 + buf_r2);
- y = xd->y_buf + y_stride * buf_r1 + buf_c1;
-#else
uint8_t *y = *(d->base_pre) + d->pre +
(bestmv->as_mv.row) * d->pre_stride + bestmv->as_mv.col;
y_stride = d->pre_stride;
-#endif
rr = ref_mv->as_mv.row;
rc = ref_mv->as_mv.col;
@@ -463,20 +441,9 @@ int vp9_find_best_sub_pixel_step(MACROBLOCK *x, BLOCK *b, BLOCKD *d,
MACROBLOCKD *xd = &x->e_mbd;
int usehp = xd->allow_high_precision_mv;
-#if !CONFIG_SUPERBLOCKS && (ARCH_X86 || ARCH_X86_64)
- uint8_t *y0 = *(d->base_pre) + d->pre +
- (bestmv->as_mv.row) * d->pre_stride + bestmv->as_mv.col;
- uint8_t *y;
-
- y_stride = 32;
- /* Copy 18 rows x 32 cols area to intermediate buffer before searching. */
- vfp->copymem(y0 - 1 - d->pre_stride, d->pre_stride, xd->y_buf, y_stride, 18);
- y = xd->y_buf + y_stride + 1;
-#else
uint8_t *y = *(d->base_pre) + d->pre +
(bestmv->as_mv.row) * d->pre_stride + bestmv->as_mv.col;
y_stride = d->pre_stride;
-#endif
// central mv
bestmv->as_mv.row <<= 3;
@@ -943,20 +910,9 @@ int vp9_find_best_half_pixel_step(MACROBLOCK *x, BLOCK *b, BLOCKD *d,
int y_stride;
MACROBLOCKD *xd = &x->e_mbd;
-#if !CONFIG_SUPERBLOCKS && (ARCH_X86 || ARCH_X86_64)
- uint8_t *y0 = *(d->base_pre) + d->pre +
- (bestmv->as_mv.row) * d->pre_stride + bestmv->as_mv.col;
- uint8_t *y;
-
- y_stride = 32;
- /* Copy 18 rows x 32 cols area to intermediate buffer before searching. */
- vfp->copymem(y0 - 1 - d->pre_stride, d->pre_stride, xd->y_buf, y_stride, 18);
- y = xd->y_buf + y_stride + 1;
-#else
uint8_t *y = *(d->base_pre) + d->pre +
(bestmv->as_mv.row) * d->pre_stride + bestmv->as_mv.col;
y_stride = d->pre_stride;
-#endif
// central mv
bestmv->as_mv.row <<= 3;
diff --git a/vp9/encoder/vp9_onyx_if.c b/vp9/encoder/vp9_onyx_if.c
index 0e4b47ddf..a56fcc0c0 100644
--- a/vp9/encoder/vp9_onyx_if.c
+++ b/vp9/encoder/vp9_onyx_if.c
@@ -1761,12 +1761,10 @@ VP9_PTR vp9_create_compressor(VP9_CONFIG *oxcf) {
cm->prob_last_coded = 128;
cm->prob_gf_coded = 128;
cm->prob_intra_coded = 63;
-#if CONFIG_SUPERBLOCKS
cm->sb32_coded = 200;
#if CONFIG_SUPERBLOCKS64
cm->sb64_coded = 200;
#endif
-#endif
for (i = 0; i < COMP_PRED_CONTEXTS; i++)
cm->prob_comppred[i] = 128;
for (i = 0; i < TX_SIZE_MAX_SB - 1; i++)
@@ -1966,7 +1964,6 @@ VP9_PTR vp9_create_compressor(VP9_CONFIG *oxcf) {
cpi->fn_ptr[BT].sdx4df = SDX4DF;
-#if CONFIG_SUPERBLOCKS
BFP(BLOCK_32X32, vp9_sad32x32, vp9_variance32x32, vp9_sub_pixel_variance32x32,
vp9_variance_halfpixvar32x32_h, vp9_variance_halfpixvar32x32_v,
vp9_variance_halfpixvar32x32_hv, vp9_sad32x32x3, vp9_sad32x32x8,
@@ -1978,7 +1975,6 @@ VP9_PTR vp9_create_compressor(VP9_CONFIG *oxcf) {
vp9_variance_halfpixvar64x64_hv, vp9_sad64x64x3, vp9_sad64x64x8,
vp9_sad64x64x4d)
#endif
-#endif
BFP(BLOCK_16X16, vp9_sad16x16, vp9_variance16x16, vp9_sub_pixel_variance16x16,
vp9_variance_halfpixvar16x16_h, vp9_variance_halfpixvar16x16_v,
@@ -3655,14 +3651,12 @@ static void encode_frame_to_data_rate(VP9_COMP *cpi,
vp9_copy(cpi->common.fc.coef_counts_16x16, cpi->coef_counts_16x16);
vp9_copy(cpi->common.fc.hybrid_coef_counts_16x16,
cpi->hybrid_coef_counts_16x16);
-#if CONFIG_TX32X32 && CONFIG_SUPERBLOCKS
+#if CONFIG_TX32X32
vp9_copy(cpi->common.fc.coef_counts_32x32, cpi->coef_counts_32x32);
#endif
vp9_adapt_coef_probs(&cpi->common);
if (cpi->common.frame_type != KEY_FRAME) {
-#if CONFIG_SUPERBLOCKS
vp9_copy(cpi->common.fc.sb_ymode_counts, cpi->sb_ymode_count);
-#endif
vp9_copy(cpi->common.fc.ymode_counts, cpi->ymode_count);
vp9_copy(cpi->common.fc.uv_mode_counts, cpi->y_uv_mode_count);
vp9_copy(cpi->common.fc.bmode_counts, cpi->bmode_count);
diff --git a/vp9/encoder/vp9_onyx_int.h b/vp9/encoder/vp9_onyx_int.h
index c9ee14425..dbe6e2bd6 100644
--- a/vp9/encoder/vp9_onyx_int.h
+++ b/vp9/encoder/vp9_onyx_int.h
@@ -100,13 +100,11 @@ typedef struct {
vp9_coeff_probs hybrid_coef_probs_8x8[BLOCK_TYPES_8X8];
vp9_coeff_probs coef_probs_16x16[BLOCK_TYPES_16X16];
vp9_coeff_probs hybrid_coef_probs_16x16[BLOCK_TYPES_16X16];
-#if CONFIG_TX32X32 && CONFIG_SUPERBLOCKS
+#if CONFIG_TX32X32
vp9_coeff_probs coef_probs_32x32[BLOCK_TYPES_32X32];
#endif
-#if CONFIG_SUPERBLOCKS
vp9_prob sb_ymode_prob[VP9_I32X32_MODES - 1];
-#endif
vp9_prob ymode_prob[VP9_YMODES - 1]; /* interframe intra mode probs */
vp9_prob uv_mode_prob[VP9_YMODES][VP9_UV_MODES - 1];
vp9_prob bmode_prob[VP9_NKF_BINTRAMODES - 1];
@@ -390,15 +388,11 @@ enum BlockSize {
BLOCK_4X4 = PARTITIONING_4X4,
BLOCK_16X16,
BLOCK_MAX_SEGMENTS,
-#if CONFIG_SUPERBLOCKS
BLOCK_32X32 = BLOCK_MAX_SEGMENTS,
#if CONFIG_SUPERBLOCKS64
BLOCK_64X64,
#endif // CONFIG_SUPERBLOCKS64
BLOCK_MAX_SB_SEGMENTS,
-#else // CONFIG_SUPERBLOCKS
- BLOCK_MAX_SB_SEGMENTS = BLOCK_MAX_SEGMENTS,
-#endif // CONFIG_SUPERBLOCKS
};
typedef struct VP9_COMP {
@@ -436,7 +430,7 @@ typedef struct VP9_COMP {
DECLARE_ALIGNED(16, short, zrun_zbin_boost_y2_16x16[QINDEX_RANGE][256]);
DECLARE_ALIGNED(16, short, zrun_zbin_boost_uv_16x16[QINDEX_RANGE][256]);
-#if CONFIG_TX32X32 && CONFIG_SUPERBLOCKS
+#if CONFIG_TX32X32
DECLARE_ALIGNED(16, short, Y1zbin_32x32[QINDEX_RANGE][1024]);
DECLARE_ALIGNED(16, short, Y2zbin_32x32[QINDEX_RANGE][1024]);
DECLARE_ALIGNED(16, short, UVzbin_32x32[QINDEX_RANGE][1024]);
@@ -577,13 +571,11 @@ typedef struct VP9_COMP {
int cq_target_quality;
-#if CONFIG_SUPERBLOCKS
int sb32_count[2];
#if CONFIG_SUPERBLOCKS64
int sb64_count[2];
#endif
int sb_ymode_count [VP9_I32X32_MODES];
-#endif
int ymode_count[VP9_YMODES]; /* intra MB type cts this frame */
int bmode_count[VP9_NKF_BINTRAMODES];
int i8x8_mode_count[VP9_I8X8_MODES];
@@ -618,7 +610,7 @@ typedef struct VP9_COMP {
vp9_coeff_probs frame_hybrid_coef_probs_16x16[BLOCK_TYPES_16X16];
vp9_coeff_stats frame_hybrid_branch_ct_16x16[BLOCK_TYPES_16X16];
-#if CONFIG_TX32X32 && CONFIG_SUPERBLOCKS
+#if CONFIG_TX32X32
vp9_coeff_count coef_counts_32x32[BLOCK_TYPES_32X32];
vp9_coeff_probs frame_coef_probs_32x32[BLOCK_TYPES_32X32];
vp9_coeff_stats frame_branch_ct_32x32[BLOCK_TYPES_32X32];
diff --git a/vp9/encoder/vp9_quantize.c b/vp9/encoder/vp9_quantize.c
index 051bd6461..98396a1a7 100644
--- a/vp9/encoder/vp9_quantize.c
+++ b/vp9/encoder/vp9_quantize.c
@@ -379,7 +379,7 @@ void vp9_regular_quantize_b_16x16(BLOCK *b, BLOCKD *d) {
&d->eob, vp9_default_zig_zag1d_16x16, 1);
}
-#if CONFIG_TX32X32 && CONFIG_SUPERBLOCKS
+#if CONFIG_TX32X32
void vp9_quantize_sby_32x32(MACROBLOCK *x) {
x->e_mbd.block[0].eob = 0;
quantize(x->block[0].zrun_zbin_boost_32x32,
@@ -472,7 +472,7 @@ void vp9_init_quantizer(VP9_COMP *cpi) {
48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48,
48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48,
};
-#if CONFIG_TX32X32 && CONFIG_SUPERBLOCKS
+#if CONFIG_TX32X32
static const int zbin_boost_32x32[1024] = {
0, 0, 0, 8, 8, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28,
30, 32, 34, 36, 38, 40, 42, 44, 46, 48, 48, 48, 48, 48, 48, 48,
@@ -569,7 +569,7 @@ void vp9_init_quantizer(VP9_COMP *cpi) {
((quant_val * zbin_boost_8x8[0]) + 64) >> 7;
cpi->zrun_zbin_boost_y1_16x16[Q][0] =
((quant_val * zbin_boost_16x16[0]) + 64) >> 7;
-#if CONFIG_TX32X32 && CONFIG_SUPERBLOCKS
+#if CONFIG_TX32X32
cpi->Y1zbin_32x32[Q][0] = ((qzbin_factor * quant_val) + 64) >> 7;
cpi->zrun_zbin_boost_y1_32x32[Q][0] =
((quant_val * zbin_boost_32x32[0]) + 64) >> 7;
@@ -677,7 +677,7 @@ void vp9_init_quantizer(VP9_COMP *cpi) {
cpi->zrun_zbin_boost_uv_16x16[Q][i] =
((quant_val * zbin_boost_16x16[i]) + 64) >> 7;
}
-#if CONFIG_TX32X32 && CONFIG_SUPERBLOCKS
+#if CONFIG_TX32X32
// 32x32 structures. Same comment above applies.
for (i = 1; i < 1024; i++) {
int rc = vp9_default_zig_zag1d_32x32[i];
@@ -727,7 +727,7 @@ void vp9_mb_init_quantizer(VP9_COMP *cpi, MACROBLOCK *x) {
x->block[i].zbin = cpi->Y1zbin[QIndex];
x->block[i].zbin_8x8 = cpi->Y1zbin_8x8[QIndex];
x->block[i].zbin_16x16 = cpi->Y1zbin_16x16[QIndex];
-#if CONFIG_TX32X32 && CONFIG_SUPERBLOCKS
+#if CONFIG_TX32X32
x->block[i].zbin_32x32 = cpi->Y1zbin_32x32[QIndex];
#endif
x->block[i].round = cpi->Y1round[QIndex];
@@ -735,7 +735,7 @@ void vp9_mb_init_quantizer(VP9_COMP *cpi, MACROBLOCK *x) {
x->block[i].zrun_zbin_boost = cpi->zrun_zbin_boost_y1[QIndex];
x->block[i].zrun_zbin_boost_8x8 = cpi->zrun_zbin_boost_y1_8x8[QIndex];
x->block[i].zrun_zbin_boost_16x16 = cpi->zrun_zbin_boost_y1_16x16[QIndex];
-#if CONFIG_TX32X32 && CONFIG_SUPERBLOCKS
+#if CONFIG_TX32X32
x->block[i].zrun_zbin_boost_32x32 = cpi->zrun_zbin_boost_y1_32x32[QIndex];
#endif
x->block[i].zbin_extra = (int16_t)zbin_extra;
@@ -748,7 +748,7 @@ void vp9_mb_init_quantizer(VP9_COMP *cpi, MACROBLOCK *x) {
vp9_get_segdata(xd, segment_id, SEG_LVL_EOB);
x->block[i].eob_max_offset_16x16 =
vp9_get_segdata(xd, segment_id, SEG_LVL_EOB);
-#if CONFIG_TX32X32 && CONFIG_SUPERBLOCKS
+#if CONFIG_TX32X32
x->block[i].eob_max_offset_32x32 =
vp9_get_segdata(xd, segment_id, SEG_LVL_EOB);
#endif
@@ -756,7 +756,7 @@ void vp9_mb_init_quantizer(VP9_COMP *cpi, MACROBLOCK *x) {
x->block[i].eob_max_offset = 16;
x->block[i].eob_max_offset_8x8 = 64;
x->block[i].eob_max_offset_16x16 = 256;
-#if CONFIG_TX32X32 && CONFIG_SUPERBLOCKS
+#if CONFIG_TX32X32
x->block[i].eob_max_offset_32x32 = 1024;
#endif
}
diff --git a/vp9/encoder/vp9_quantize.h b/vp9/encoder/vp9_quantize.h
index dbc3246b5..4ca3081db 100644
--- a/vp9/encoder/vp9_quantize.h
+++ b/vp9/encoder/vp9_quantize.h
@@ -78,7 +78,7 @@ void vp9_quantize_mb_16x16(MACROBLOCK *x);
extern prototype_quantize_block(vp9_quantize_quantb_16x16);
extern prototype_quantize_mb(vp9_quantize_mby_16x16);
-#if CONFIG_SUPERBLOCKS && CONFIG_TX32X32
+#if CONFIG_TX32X32
void vp9_quantize_sby_32x32(MACROBLOCK *x);
void vp9_quantize_sbuv_16x16(MACROBLOCK *x);
#endif
diff --git a/vp9/encoder/vp9_ratectrl.c b/vp9/encoder/vp9_ratectrl.c
index ee87fba03..353ab979f 100644
--- a/vp9/encoder/vp9_ratectrl.c
+++ b/vp9/encoder/vp9_ratectrl.c
@@ -139,9 +139,7 @@ void vp9_save_coding_context(VP9_COMP *cpi) {
vp9_copy(cc->vp9_mode_contexts, cm->fc.vp9_mode_contexts);
vp9_copy(cc->ymode_prob, cm->fc.ymode_prob);
-#if CONFIG_SUPERBLOCKS
vp9_copy(cc->sb_ymode_prob, cm->fc.sb_ymode_prob);
-#endif
vp9_copy(cc->bmode_prob, cm->fc.bmode_prob);
vp9_copy(cc->uv_mode_prob, cm->fc.uv_mode_prob);
vp9_copy(cc->i8x8_mode_prob, cm->fc.i8x8_mode_prob);
@@ -175,7 +173,7 @@ void vp9_save_coding_context(VP9_COMP *cpi) {
vp9_copy(cc->hybrid_coef_probs_8x8, cm->fc.hybrid_coef_probs_8x8);
vp9_copy(cc->coef_probs_16x16, cm->fc.coef_probs_16x16);
vp9_copy(cc->hybrid_coef_probs_16x16, cm->fc.hybrid_coef_probs_16x16);
-#if CONFIG_TX32X32 && CONFIG_SUPERBLOCKS
+#if CONFIG_TX32X32
vp9_copy(cc->coef_probs_32x32, cm->fc.coef_probs_32x32);
#endif
vp9_copy(cc->switchable_interp_prob, cm->fc.switchable_interp_prob);
@@ -200,9 +198,7 @@ void vp9_restore_coding_context(VP9_COMP *cpi) {
vp9_copy(cm->fc.vp9_mode_contexts, cc->vp9_mode_contexts);
vp9_copy(cm->fc.ymode_prob, cc->ymode_prob);
-#if CONFIG_SUPERBLOCKS
vp9_copy(cm->fc.sb_ymode_prob, cc->sb_ymode_prob);
-#endif
vp9_copy(cm->fc.bmode_prob, cc->bmode_prob);
vp9_copy(cm->fc.i8x8_mode_prob, cc->i8x8_mode_prob);
vp9_copy(cm->fc.uv_mode_prob, cc->uv_mode_prob);
@@ -237,7 +233,7 @@ void vp9_restore_coding_context(VP9_COMP *cpi) {
vp9_copy(cm->fc.hybrid_coef_probs_8x8, cc->hybrid_coef_probs_8x8);
vp9_copy(cm->fc.coef_probs_16x16, cc->coef_probs_16x16);
vp9_copy(cm->fc.hybrid_coef_probs_16x16, cc->hybrid_coef_probs_16x16);
-#if CONFIG_TX32X32 && CONFIG_SUPERBLOCKS
+#if CONFIG_TX32X32
vp9_copy(cm->fc.coef_probs_32x32, cc->coef_probs_32x32);
#endif
vp9_copy(cm->fc.switchable_interp_prob, cc->switchable_interp_prob);
diff --git a/vp9/encoder/vp9_rdopt.c b/vp9/encoder/vp9_rdopt.c
index 8e91d828f..c695c049a 100644
--- a/vp9/encoder/vp9_rdopt.c
+++ b/vp9/encoder/vp9_rdopt.c
@@ -385,7 +385,7 @@ void vp9_initialize_rd_consts(VP9_COMP *cpi, int QIndex) {
fill_token_costs(cpi->mb.hybrid_token_costs[TX_16X16],
cpi->common.fc.hybrid_coef_probs_16x16, BLOCK_TYPES_16X16);
-#if CONFIG_TX32X32 && CONFIG_SUPERBLOCKS
+#if CONFIG_TX32X32
fill_token_costs(cpi->mb.token_costs[TX_32X32],
cpi->common.fc.coef_probs_32x32, BLOCK_TYPES_32X32);
#endif
@@ -527,7 +527,7 @@ static int cost_coeffs(MACROBLOCK *mb,
int pt;
const int eob = b->eob;
MACROBLOCKD *xd = &mb->e_mbd;
-#if CONFIG_TX32X32 && CONFIG_SUPERBLOCKS
+#if CONFIG_TX32X32
const int ib = (int)(b - xd->block);
#endif
int c = (type == PLANE_TYPE_Y_NO_DC) ? 1 : 0;
@@ -572,14 +572,14 @@ static int cost_coeffs(MACROBLOCK *mb,
scan = vp9_default_zig_zag1d_16x16;
band = vp9_coef_bands_16x16;
seg_eob = 256;
-#if CONFIG_TX32X32 && CONFIG_SUPERBLOCKS
+#if CONFIG_TX32X32
if (type == PLANE_TYPE_UV) {
const int uv_idx = ib - 16;
qcoeff_ptr = xd->sb_coeff_data.qcoeff + 1024 + 64 * uv_idx;
}
#endif
break;
-#if CONFIG_TX32X32 && CONFIG_SUPERBLOCKS
+#if CONFIG_TX32X32
case TX_32X32:
scan = vp9_default_zig_zag1d_32x32;
band = vp9_coef_bands_32x32;
@@ -853,7 +853,7 @@ static void choose_txfm_size_from_rd(VP9_COMP *cpi, MACROBLOCK *x,
}
}
-#if CONFIG_TX32X32 && CONFIG_SUPERBLOCKS
+#if CONFIG_TX32X32
if (max_txfm_size == TX_32X32 &&
(cm->txfm_mode == ALLOW_32X32 ||
(cm->txfm_mode == TX_MODE_SELECT &&
@@ -863,7 +863,7 @@ static void choose_txfm_size_from_rd(VP9_COMP *cpi, MACROBLOCK *x,
} else
#endif
if ( cm->txfm_mode == ALLOW_16X16 ||
-#if CONFIG_TX32X32 && CONFIG_SUPERBLOCKS
+#if CONFIG_TX32X32
(max_txfm_size == TX_16X16 && cm->txfm_mode == ALLOW_32X32) ||
#endif
(cm->txfm_mode == TX_MODE_SELECT &&
@@ -884,7 +884,7 @@ static void choose_txfm_size_from_rd(VP9_COMP *cpi, MACROBLOCK *x,
txfm_cache[ONLY_4X4] = rd[TX_4X4][0];
txfm_cache[ALLOW_8X8] = rd[TX_8X8][0];
txfm_cache[ALLOW_16X16] = rd[TX_16X16][0];
-#if CONFIG_TX32X32 && CONFIG_SUPERBLOCKS
+#if CONFIG_TX32X32
txfm_cache[ALLOW_32X32] = rd[max_txfm_size][0];
if (max_txfm_size == TX_32X32 &&
rd[TX_32X32][1] < rd[TX_16X16][1] && rd[TX_32X32][1] < rd[TX_8X8][1] &&
@@ -925,7 +925,6 @@ static void copy_predictor(uint8_t *dst, const uint8_t *predictor) {
d[12] = p[12];
}
-#if CONFIG_SUPERBLOCKS
#if CONFIG_TX32X32
static int rdcost_sby_32x32(MACROBLOCK *x, int backup) {
MACROBLOCKD * const xd = &x->e_mbd;
@@ -1075,6 +1074,7 @@ static void super_block_yrd(VP9_COMP *cpi,
xd->left_context = orig_left;
}
+#if CONFIG_SUPERBLOCKS64
static void super_block_64_yrd(VP9_COMP *cpi,
MACROBLOCK *x, int *rate, int *distortion,
int *skip,
@@ -1172,7 +1172,7 @@ static void super_block_64_yrd(VP9_COMP *cpi,
xd->above_context = orig_above;
xd->left_context = orig_left;
}
-#endif
+#endif // CONFIG_SUPERBLOCKS64
static void copy_predictor_8x8(uint8_t *dst, const uint8_t *predictor) {
const unsigned int *p = (const unsigned int *)predictor;
@@ -1426,7 +1426,6 @@ static int64_t rd_pick_intra4x4mby_modes(VP9_COMP *cpi, MACROBLOCK *mb, int *Rat
return RDCOST(mb->rdmult, mb->rddiv, cost, distortion);
}
-#if CONFIG_SUPERBLOCKS
static int64_t rd_pick_intra_sby_mode(VP9_COMP *cpi,
MACROBLOCK *x,
int *rate,
@@ -1508,7 +1507,6 @@ static int64_t rd_pick_intra_sb64y_mode(VP9_COMP *cpi,
return best_rd;
}
#endif // CONFIG_SUPERBLOCKS64
-#endif
static int64_t rd_pick_intra16x16mby_mode(VP9_COMP *cpi,
MACROBLOCK *x,
@@ -1882,7 +1880,6 @@ static int64_t rd_inter16x16_uv_8x8(VP9_COMP *cpi, MACROBLOCK *x, int *rate,
return RDCOST(x->rdmult, x->rddiv, *rate, *distortion);
}
-#if CONFIG_SUPERBLOCKS
#if CONFIG_TX32X32
static int rd_cost_sbuv_16x16(MACROBLOCK *x, int backup) {
int b;
@@ -1939,56 +1936,56 @@ static int64_t rd_inter32x32_uv(VP9_COMP *cpi, MACROBLOCK *x, int *rate,
usrc, vsrc, src_uv_stride,
udst, vdst, dst_uv_stride);
rd_inter32x32_uv_16x16(x, rate, distortion, skip, 1);
- } else {
+ } else
#endif
- int n, r = 0, d = 0;
- int skippable = 1;
- ENTROPY_CONTEXT_PLANES t_above[2], t_left[2];
- ENTROPY_CONTEXT_PLANES *ta = xd->above_context;
- ENTROPY_CONTEXT_PLANES *tl = xd->left_context;
+ {
+ int n, r = 0, d = 0;
+ int skippable = 1;
+ ENTROPY_CONTEXT_PLANES t_above[2], t_left[2];
+ ENTROPY_CONTEXT_PLANES *ta = xd->above_context;
+ ENTROPY_CONTEXT_PLANES *tl = xd->left_context;
- memcpy(t_above, xd->above_context, sizeof(t_above));
- memcpy(t_left, xd->left_context, sizeof(t_left));
+ memcpy(t_above, xd->above_context, sizeof(t_above));
+ memcpy(t_left, xd->left_context, sizeof(t_left));
- for (n = 0; n < 4; n++) {
- int x_idx = n & 1, y_idx = n >> 1;
- int d_tmp, s_tmp, r_tmp;
-
- xd->above_context = ta + x_idx;
- xd->left_context = tl + y_idx;
- vp9_subtract_mbuv_s_c(x->src_diff,
- usrc + x_idx * 8 + y_idx * 8 * src_uv_stride,
- vsrc + x_idx * 8 + y_idx * 8 * src_uv_stride,
- src_uv_stride,
- udst + x_idx * 8 + y_idx * 8 * dst_uv_stride,
- vdst + x_idx * 8 + y_idx * 8 * dst_uv_stride,
- dst_uv_stride);
-
- if (mbmi->txfm_size == TX_4X4) {
- rd_inter16x16_uv_4x4(cpi, x, &r_tmp, &d_tmp, fullpixel, &s_tmp, 0);
- } else {
- rd_inter16x16_uv_8x8(cpi, x, &r_tmp, &d_tmp, fullpixel, &s_tmp, 0);
- }
+ for (n = 0; n < 4; n++) {
+ int x_idx = n & 1, y_idx = n >> 1;
+ int d_tmp, s_tmp, r_tmp;
- r += r_tmp;
- d += d_tmp;
- skippable = skippable && s_tmp;
- }
+ xd->above_context = ta + x_idx;
+ xd->left_context = tl + y_idx;
+ vp9_subtract_mbuv_s_c(x->src_diff,
+ usrc + x_idx * 8 + y_idx * 8 * src_uv_stride,
+ vsrc + x_idx * 8 + y_idx * 8 * src_uv_stride,
+ src_uv_stride,
+ udst + x_idx * 8 + y_idx * 8 * dst_uv_stride,
+ vdst + x_idx * 8 + y_idx * 8 * dst_uv_stride,
+ dst_uv_stride);
- *rate = r;
- *distortion = d;
- *skip = skippable;
- xd->left_context = tl;
- xd->above_context = ta;
- memcpy(xd->above_context, t_above, sizeof(t_above));
- memcpy(xd->left_context, t_left, sizeof(t_left));
-#if CONFIG_TX32X32
+ if (mbmi->txfm_size == TX_4X4) {
+ rd_inter16x16_uv_4x4(cpi, x, &r_tmp, &d_tmp, fullpixel, &s_tmp, 0);
+ } else {
+ rd_inter16x16_uv_8x8(cpi, x, &r_tmp, &d_tmp, fullpixel, &s_tmp, 0);
+ }
+
+ r += r_tmp;
+ d += d_tmp;
+ skippable = skippable && s_tmp;
+ }
+
+ *rate = r;
+ *distortion = d;
+ *skip = skippable;
+ xd->left_context = tl;
+ xd->above_context = ta;
+ memcpy(xd->above_context, t_above, sizeof(t_above));
+ memcpy(xd->left_context, t_left, sizeof(t_left));
}
-#endif
return RDCOST(x->rdmult, x->rddiv, *rate, *distortion);
}
+#if CONFIG_SUPERBLOCKS64
static void super_block_64_uvrd(MACROBLOCK *x, int *rate,
int *distortion, int *skip);
static int64_t rd_inter64x64_uv(VP9_COMP *cpi, MACROBLOCK *x, int *rate,
@@ -1996,7 +1993,7 @@ static int64_t rd_inter64x64_uv(VP9_COMP *cpi, MACROBLOCK *x, int *rate,
super_block_64_uvrd(x, rate, distortion, skip);
return RDCOST(x->rdmult, x->rddiv, *rate, *distortion);
}
-#endif
+#endif // CONFIG_SUPERBLOCKS64
static int64_t rd_inter4x4_uv(VP9_COMP *cpi, MACROBLOCK *x, int *rate,
int *distortion, int *skip, int fullpixel) {
@@ -2130,7 +2127,6 @@ static void rd_pick_intra_mbuv_mode_8x8(VP9_COMP *cpi,
mbmi->uv_mode = mode_selected;
}
-#if CONFIG_SUPERBLOCKS
// TODO(rbultje) very similar to rd_inter32x32_uv(), merge?
static void super_block_uvrd(MACROBLOCK *x,
int *rate,
@@ -2148,57 +2144,57 @@ static void super_block_uvrd(MACROBLOCK *x,
usrc, vsrc, src_uv_stride,
udst, vdst, dst_uv_stride);
rd_inter32x32_uv_16x16(x, rate, distortion, skippable, 1);
- } else {
+ } else
#endif
- int d = 0, r = 0, n, s = 1;
- ENTROPY_CONTEXT_PLANES t_above[2], t_left[2];
- ENTROPY_CONTEXT_PLANES *ta_orig = xd->above_context;
- ENTROPY_CONTEXT_PLANES *tl_orig = xd->left_context;
+ {
+ int d = 0, r = 0, n, s = 1;
+ ENTROPY_CONTEXT_PLANES t_above[2], t_left[2];
+ ENTROPY_CONTEXT_PLANES *ta_orig = xd->above_context;
+ ENTROPY_CONTEXT_PLANES *tl_orig = xd->left_context;
- memcpy(t_above, xd->above_context, sizeof(t_above));
- memcpy(t_left, xd->left_context, sizeof(t_left));
+ memcpy(t_above, xd->above_context, sizeof(t_above));
+ memcpy(t_left, xd->left_context, sizeof(t_left));
- for (n = 0; n < 4; n++) {
- int x_idx = n & 1, y_idx = n >> 1;
+ for (n = 0; n < 4; n++) {
+ int x_idx = n & 1, y_idx = n >> 1;
- vp9_subtract_mbuv_s_c(x->src_diff,
- usrc + x_idx * 8 + y_idx * 8 * src_uv_stride,
- vsrc + x_idx * 8 + y_idx * 8 * src_uv_stride,
- src_uv_stride,
- udst + x_idx * 8 + y_idx * 8 * dst_uv_stride,
- vdst + x_idx * 8 + y_idx * 8 * dst_uv_stride,
- dst_uv_stride);
- if (mbmi->txfm_size == TX_4X4) {
- vp9_transform_mbuv_4x4(x);
- vp9_quantize_mbuv_4x4(x);
- s &= vp9_mbuv_is_skippable_4x4(xd);
- } else {
- vp9_transform_mbuv_8x8(x);
- vp9_quantize_mbuv_8x8(x);
- s &= vp9_mbuv_is_skippable_8x8(xd);
- }
+ vp9_subtract_mbuv_s_c(x->src_diff,
+ usrc + x_idx * 8 + y_idx * 8 * src_uv_stride,
+ vsrc + x_idx * 8 + y_idx * 8 * src_uv_stride,
+ src_uv_stride,
+ udst + x_idx * 8 + y_idx * 8 * dst_uv_stride,
+ vdst + x_idx * 8 + y_idx * 8 * dst_uv_stride,
+ dst_uv_stride);
+ if (mbmi->txfm_size == TX_4X4) {
+ vp9_transform_mbuv_4x4(x);
+ vp9_quantize_mbuv_4x4(x);
+ s &= vp9_mbuv_is_skippable_4x4(xd);
+ } else {
+ vp9_transform_mbuv_8x8(x);
+ vp9_quantize_mbuv_8x8(x);
+ s &= vp9_mbuv_is_skippable_8x8(xd);
+ }
- d += vp9_mbuverror(x) >> 2;
- xd->above_context = t_above + x_idx;
- xd->left_context = t_left + y_idx;
- if (mbmi->txfm_size == TX_4X4) {
- r += rd_cost_mbuv_4x4(x, 0);
- } else {
- r += rd_cost_mbuv_8x8(x, 0);
+ d += vp9_mbuverror(x) >> 2;
+ xd->above_context = t_above + x_idx;
+ xd->left_context = t_left + y_idx;
+ if (mbmi->txfm_size == TX_4X4) {
+ r += rd_cost_mbuv_4x4(x, 0);
+ } else {
+ r += rd_cost_mbuv_8x8(x, 0);
+ }
}
- }
- xd->above_context = ta_orig;
- xd->left_context = tl_orig;
+ xd->above_context = ta_orig;
+ xd->left_context = tl_orig;
- *distortion = d;
- *rate = r;
- *skippable = s;
-#if CONFIG_TX32X32
+ *distortion = d;
+ *rate = r;
+ *skippable = s;
}
-#endif
}
+#if CONFIG_SUPERBLOCKS64
static void super_block_64_uvrd(MACROBLOCK *x,
int *rate,
int *distortion,
@@ -2239,8 +2235,9 @@ static void super_block_64_uvrd(MACROBLOCK *x,
d += d_tmp;
s = s && s_tmp;
}
- } else {
+ } else
#endif
+ {
for (n = 0; n < 16; n++) {
int x_idx = n & 3, y_idx = n >> 2;
@@ -2270,9 +2267,7 @@ static void super_block_64_uvrd(MACROBLOCK *x,
r += rd_cost_mbuv_8x8(x, 0);
}
}
-#if CONFIG_TX32X32
}
-#endif
*distortion = d;
*rate = r;
@@ -2281,6 +2276,7 @@ static void super_block_64_uvrd(MACROBLOCK *x,
xd->left_context = tl_orig;
xd->above_context = ta_orig;
}
+#endif // CONFIG_SUPERBLOCKS64
static int64_t rd_pick_intra_sbuv_mode(VP9_COMP *cpi,
MACROBLOCK *x,
@@ -2357,7 +2353,6 @@ static int64_t rd_pick_intra_sb64uv_mode(VP9_COMP *cpi,
return best_rd;
}
#endif // CONFIG_SUPERBLOCKS64
-#endif
int vp9_cost_mv_ref(VP9_COMP *cpi,
MB_PREDICTION_MODE m,
@@ -3651,7 +3646,6 @@ static int64_t handle_inter_mode(VP9_COMP *cpi, MACROBLOCK *x,
}
#endif
-#if CONFIG_SUPERBLOCKS
#if CONFIG_SUPERBLOCKS64
if (block_size == BLOCK_64X64) {
vp9_build_inter64x64_predictors_sb(xd,
@@ -3669,9 +3663,7 @@ static int64_t handle_inter_mode(VP9_COMP *cpi, MACROBLOCK *x,
xd->dst.v_buffer,
xd->dst.y_stride,
xd->dst.uv_stride);
- } else
-#endif // CONFIG_SUPERBLOCKS
- {
+ } else {
assert(block_size == BLOCK_16X16);
vp9_build_1st_inter16x16_predictors_mby(xd, xd->predictor, 16, 0);
if (is_comp_pred)
@@ -3693,7 +3685,6 @@ static int64_t handle_inter_mode(VP9_COMP *cpi, MACROBLOCK *x,
if (threshold < x->encode_breakout)
threshold = x->encode_breakout;
-#if CONFIG_SUPERBLOCKS
#if CONFIG_SUPERBLOCKS64
if (block_size == BLOCK_64X64) {
var = vp9_variance64x64(*(b->base_src), b->src_stride,
@@ -3703,9 +3694,7 @@ static int64_t handle_inter_mode(VP9_COMP *cpi, MACROBLOCK *x,
if (block_size == BLOCK_32X32) {
var = vp9_variance32x32(*(b->base_src), b->src_stride,
xd->dst.y_buffer, xd->dst.y_stride, &sse);
- } else
-#endif // CONFIG_SUPERBLOCK
- {
+ } else {
assert(block_size == BLOCK_16X16);
var = vp9_variance16x16(*(b->base_src), b->src_stride,
xd->predictor, 16, &sse);
@@ -3720,7 +3709,6 @@ static int64_t handle_inter_mode(VP9_COMP *cpi, MACROBLOCK *x,
// Check u and v to make sure skip is ok
int sse2;
-#if CONFIG_SUPERBLOCKS
#if CONFIG_SUPERBLOCKS64
if (block_size == BLOCK_64X64) {
unsigned int sse2u, sse2v;
@@ -3738,9 +3726,7 @@ static int64_t handle_inter_mode(VP9_COMP *cpi, MACROBLOCK *x,
var = vp9_variance16x16(x->src.v_buffer, x->src.uv_stride,
xd->dst.v_buffer, xd->dst.uv_stride, &sse2v);
sse2 = sse2u + sse2v;
- } else
-#endif // CONFIG_SUPERBLOCKS
- {
+ } else {
assert(block_size == BLOCK_16X16);
sse2 = vp9_uvsse(x);
}
@@ -3773,7 +3759,6 @@ static int64_t handle_inter_mode(VP9_COMP *cpi, MACROBLOCK *x,
}
if (!x->skip) {
-#if CONFIG_SUPERBLOCKS
#if CONFIG_SUPERBLOCKS64
if (block_size == BLOCK_64X64) {
int skippable_y, skippable_uv;
@@ -3807,9 +3792,7 @@ static int64_t handle_inter_mode(VP9_COMP *cpi, MACROBLOCK *x,
*rate2 += *rate_uv;
*distortion += *distortion_uv;
*skippable = skippable_y && skippable_uv;
- } else
-#endif // CONFIG_SUPERBLOCKS
- {
+ } else {
assert(block_size == BLOCK_16X16);
vp9_build_1st_inter16x16_predictors_mbuv(xd, &xd->predictor[256],
@@ -4652,7 +4635,6 @@ end:
best_pred_diff, best_txfm_diff);
}
-#if CONFIG_SUPERBLOCKS
void vp9_rd_pick_intra_mode_sb32(VP9_COMP *cpi, MACROBLOCK *x,
int *returnrate,
int *returndist) {
@@ -4711,8 +4693,7 @@ void vp9_rd_pick_intra_mode_sb64(VP9_COMP *cpi, MACROBLOCK *x,
*returndist = dist_y + (dist_uv >> 2);
}
}
-#endif
-#endif
+#endif // CONFIG_SUPERBLOCKS64
void vp9_rd_pick_intra_mode(VP9_COMP *cpi, MACROBLOCK *x,
int *returnrate, int *returndist) {
@@ -4870,7 +4851,6 @@ void vp9_rd_pick_intra_mode(VP9_COMP *cpi, MACROBLOCK *x,
*returndist = dist;
}
-#if CONFIG_SUPERBLOCKS
static int64_t vp9_rd_pick_inter_mode_sb(VP9_COMP *cpi, MACROBLOCK *x,
int recon_yoffset, int recon_uvoffset,
int *returnrate,
@@ -4920,7 +4900,7 @@ static int64_t vp9_rd_pick_inter_mode_sb(VP9_COMP *cpi, MACROBLOCK *x,
#if CONFIG_TX32X32
int rate_uv_16x16 = 0, rate_uv_tokenonly_16x16 = 0;
int dist_uv_16x16 = 0, uv_skip_16x16 = 0;
- MB_PREDICTION_MODE mode_uv_16x16;
+ MB_PREDICTION_MODE mode_uv_16x16 = NEARESTMV;
#endif
x->skip = 0;
@@ -5474,7 +5454,6 @@ int64_t vp9_rd_pick_inter_mode_sb64(VP9_COMP *cpi, MACROBLOCK *x,
returnrate, returndistortion, BLOCK_64X64);
}
#endif // CONFIG_SUPERBLOCKS64
-#endif
void vp9_pick_mode_inter_macroblock(VP9_COMP *cpi, MACROBLOCK *x,
int recon_yoffset,
diff --git a/vp9/encoder/vp9_segmentation.c b/vp9/encoder/vp9_segmentation.c
index 19529fcbe..2ef7be2a3 100644
--- a/vp9/encoder/vp9_segmentation.c
+++ b/vp9/encoder/vp9_segmentation.c
@@ -221,7 +221,7 @@ void vp9_choose_segmap_coding_method(VP9_COMP *cpi) {
for (mb_row = 0; mb_row < cm->mb_rows; mb_row += 4, mi_ptr += 4 * mis) {
mi = mi_ptr;
for (mb_col = 0; mb_col < cm->mb_cols; mb_col += 4, mi += 4) {
-#if CONFIG_SUPERBLOCKS && CONFIG_SUPERBLOCKS64
+#if CONFIG_SUPERBLOCKS64
if (mi->mbmi.sb_type == BLOCK_SIZE_SB64X64) {
count_segs(cpi, mi, no_pred_segcounts, temporal_predictor_count,
t_unpred_seg_counts, 4, mb_row, mb_col);
@@ -230,23 +230,18 @@ void vp9_choose_segmap_coding_method(VP9_COMP *cpi) {
{
for (i = 0; i < 4; i++) {
int x_idx = (i & 1) << 1, y_idx = i & 2;
-#if CONFIG_SUPERBLOCKS
MODE_INFO *sb_mi = mi + y_idx * mis + x_idx;
-#endif
if (mb_col + x_idx >= cm->mb_cols ||
mb_row + y_idx >= cm->mb_rows) {
continue;
}
-#if CONFIG_SUPERBLOCKS
if (sb_mi->mbmi.sb_type) {
assert(sb_mi->mbmi.sb_type == BLOCK_SIZE_SB32X32);
count_segs(cpi, sb_mi, no_pred_segcounts, temporal_predictor_count,
t_unpred_seg_counts, 2, mb_row + y_idx, mb_col + x_idx);
- } else
-#endif
- {
+ } else {
int j;
for (j = 0; j < 4; j++) {
@@ -258,9 +253,7 @@ void vp9_choose_segmap_coding_method(VP9_COMP *cpi) {
continue;
}
-#if CONFIG_SUPERBLOCKS
assert(mb_mi->mbmi.sb_type == BLOCK_SIZE_MB16X16);
-#endif
count_segs(cpi, mb_mi, no_pred_segcounts,
temporal_predictor_count, t_unpred_seg_counts,
1, mb_row + y_idx_mb, mb_col + x_idx_mb);
diff --git a/vp9/encoder/vp9_tokenize.c b/vp9/encoder/vp9_tokenize.c
index 7a364b3e8..dda81c838 100644
--- a/vp9/encoder/vp9_tokenize.c
+++ b/vp9/encoder/vp9_tokenize.c
@@ -141,7 +141,7 @@ static void tokenize_b(VP9_COMP *cpi,
vp9_block2left[tx_size][ib];
ENTROPY_CONTEXT a_ec = *a, l_ec = *l;
-#if CONFIG_SUPERBLOCKS && CONFIG_TX32X32
+#if CONFIG_TX32X32
ENTROPY_CONTEXT *const a1 = (ENTROPY_CONTEXT *)(&xd->above_context[1]) +
vp9_block2above[tx_size][ib];
ENTROPY_CONTEXT *const l1 = (ENTROPY_CONTEXT *)(&xd->left_context[1]) +
@@ -195,7 +195,7 @@ static void tokenize_b(VP9_COMP *cpi,
if (type != PLANE_TYPE_UV) {
a_ec = (a[0] + a[1] + a[2] + a[3]) != 0;
l_ec = (l[0] + l[1] + l[2] + l[3]) != 0;
-#if CONFIG_SUPERBLOCKS && CONFIG_TX32X32
+#if CONFIG_TX32X32
} else {
a_ec = (a[0] + a[1] + a1[0] + a1[1]) != 0;
l_ec = (l[0] + l[1] + l1[0] + l1[1]) != 0;
@@ -212,14 +212,14 @@ static void tokenize_b(VP9_COMP *cpi,
counts = cpi->coef_counts_16x16;
probs = cpi->common.fc.coef_probs_16x16;
}
-#if CONFIG_SUPERBLOCKS && CONFIG_TX32X32
+#if CONFIG_TX32X32
if (type == PLANE_TYPE_UV) {
int uv_idx = (ib - 16) >> 2;
qcoeff_ptr = xd->sb_coeff_data.qcoeff + 1024 + 256 * uv_idx;
}
#endif
break;
-#if CONFIG_TX32X32 && CONFIG_SUPERBLOCKS
+#if CONFIG_TX32X32
case TX_32X32:
#if CONFIG_CNVCONTEXT
a_ec = a[0] + a[1] + a[2] + a[3] +
@@ -294,13 +294,13 @@ static void tokenize_b(VP9_COMP *cpi,
if (type != PLANE_TYPE_UV) {
a[1] = a[2] = a[3] = a_ec;
l[1] = l[2] = l[3] = l_ec;
-#if CONFIG_TX32X32 && CONFIG_SUPERBLOCKS
+#if CONFIG_TX32X32
} else {
a1[0] = a1[1] = a[1] = a_ec;
l1[0] = l1[1] = l[1] = l_ec;
#endif
}
-#if CONFIG_TX32X32 && CONFIG_SUPERBLOCKS
+#if CONFIG_TX32X32
} else if (tx_size == TX_32X32) {
a[1] = a[2] = a[3] = a_ec;
l[1] = l[2] = l[3] = l_ec;
@@ -378,7 +378,7 @@ static int mb_is_skippable_16x16(MACROBLOCKD *xd) {
return (vp9_mby_is_skippable_16x16(xd) & vp9_mbuv_is_skippable_8x8(xd));
}
-#if CONFIG_TX32X32 && CONFIG_SUPERBLOCKS
+#if CONFIG_TX32X32
int vp9_sby_is_skippable_32x32(MACROBLOCKD *xd) {
int skip = 1;
skip &= !xd->block[0].eob;
@@ -768,7 +768,7 @@ static __inline void stuff_b(VP9_COMP *cpi,
ENTROPY_CONTEXT *const l = (ENTROPY_CONTEXT *)xd->left_context +
vp9_block2left[tx_size][ib];
ENTROPY_CONTEXT a_ec = *a, l_ec = *l;
-#if CONFIG_SUPERBLOCKS && CONFIG_TX32X32
+#if CONFIG_TX32X32
ENTROPY_CONTEXT *const a1 = (ENTROPY_CONTEXT *)(&xd->above_context[1]) +
vp9_block2above[tx_size][ib];
ENTROPY_CONTEXT *const l1 = (ENTROPY_CONTEXT *)(&xd->left_context[1]) +
@@ -808,7 +808,7 @@ static __inline void stuff_b(VP9_COMP *cpi,
if (type != PLANE_TYPE_UV) {
a_ec = (a[0] + a[1] + a[2] + a[3]) != 0;
l_ec = (l[0] + l[1] + l[2] + l[3]) != 0;
-#if CONFIG_SUPERBLOCKS && CONFIG_TX32X32
+#if CONFIG_TX32X32
} else {
a_ec = (a[0] + a[1] + a1[0] + a1[1]) != 0;
l_ec = (l[0] + l[1] + l1[0] + l1[1]) != 0;
@@ -824,7 +824,7 @@ static __inline void stuff_b(VP9_COMP *cpi,
probs = cpi->common.fc.coef_probs_16x16;
}
break;
-#if CONFIG_TX32X32 && CONFIG_SUPERBLOCKS
+#if CONFIG_TX32X32
case TX_32X32:
#if CONFIG_CNVCONTEXT
a_ec = a[0] + a[1] + a[2] + a[3] +
@@ -857,13 +857,13 @@ static __inline void stuff_b(VP9_COMP *cpi,
if (type != PLANE_TYPE_UV) {
a[1] = a[2] = a[3] = 0;
l[1] = l[2] = l[3] = 0;
-#if CONFIG_TX32X32 && CONFIG_SUPERBLOCKS
+#if CONFIG_TX32X32
} else {
a1[0] = a1[1] = a[1] = a_ec;
l1[0] = l1[1] = l[1] = l_ec;
#endif
}
-#if CONFIG_TX32X32 && CONFIG_SUPERBLOCKS
+#if CONFIG_TX32X32
} else if (tx_size == TX_32X32) {
a[1] = a[2] = a[3] = a_ec;
l[1] = l[2] = l[3] = l_ec;
@@ -983,7 +983,7 @@ void vp9_stuff_mb(VP9_COMP *cpi, MACROBLOCKD *xd, TOKENEXTRA **t, int dry_run) {
}
}
-#if CONFIG_TX32X32 && CONFIG_SUPERBLOCKS
+#if CONFIG_TX32X32
static void stuff_sb_32x32(VP9_COMP *cpi, MACROBLOCKD *xd,
TOKENEXTRA **t, int dry_run) {
int b;
@@ -1005,7 +1005,7 @@ void vp9_stuff_sb(VP9_COMP *cpi, MACROBLOCKD *xd, TOKENEXTRA **t, int dry_run) {
}
#endif
-#if CONFIG_TX32X32 && CONFIG_SUPERBLOCKS
+#if CONFIG_TX32X32
void vp9_fix_contexts_sb(MACROBLOCKD *xd) {
vpx_memset(xd->above_context, 0, sizeof(ENTROPY_CONTEXT_PLANES) * 2);
vpx_memset(xd->left_context, 0, sizeof(ENTROPY_CONTEXT_PLANES) * 2);
diff --git a/vp9/encoder/vp9_tokenize.h b/vp9/encoder/vp9_tokenize.h
index dffd294dd..9bc756e82 100644
--- a/vp9/encoder/vp9_tokenize.h
+++ b/vp9/encoder/vp9_tokenize.h
@@ -36,7 +36,7 @@ extern int vp9_mbuv_is_skippable_4x4(MACROBLOCKD *xd);
extern int vp9_mby_is_skippable_8x8(MACROBLOCKD *xd, int has_y2_block);
extern int vp9_mbuv_is_skippable_8x8(MACROBLOCKD *xd);
extern int vp9_mby_is_skippable_16x16(MACROBLOCKD *xd);
-#if CONFIG_SUPERBLOCKS && CONFIG_TX32X32
+#if CONFIG_TX32X32
extern int vp9_sby_is_skippable_32x32(MACROBLOCKD *xd);
extern int vp9_sbuv_is_skippable_16x16(MACROBLOCKD *xd);
#endif
@@ -50,12 +50,12 @@ extern void vp9_tokenize_sb(struct VP9_COMP *cpi, MACROBLOCKD *xd,
extern void vp9_stuff_mb(struct VP9_COMP *cpi, MACROBLOCKD *xd,
TOKENEXTRA **t, int dry_run);
-#if CONFIG_SUPERBLOCKS && CONFIG_TX32X32
+#if CONFIG_TX32X32
extern void vp9_stuff_sb(struct VP9_COMP *cpi, MACROBLOCKD *xd,
TOKENEXTRA **t, int dry_run);
#endif
-#if CONFIG_SUPERBLOCKS && CONFIG_TX32X32
+#if CONFIG_TX32X32
extern void vp9_fix_contexts_sb(MACROBLOCKD *xd);
#endif
#ifdef ENTROPY_STATS
diff --git a/vp9/encoder/vp9_variance_c.c b/vp9/encoder/vp9_variance_c.c
index 9060d4c74..4eee6ae56 100644
--- a/vp9/encoder/vp9_variance_c.c
+++ b/vp9/encoder/vp9_variance_c.c
@@ -24,7 +24,7 @@ unsigned int vp9_get_mb_ss_c(const int16_t *src_ptr) {
return sum;
}
-#if CONFIG_SUPERBLOCKS
+#if CONFIG_SUPERBLOCKS64
unsigned int vp9_variance64x64_c(const uint8_t *src_ptr,
int source_stride,
const uint8_t *ref_ptr,
@@ -37,6 +37,7 @@ unsigned int vp9_variance64x64_c(const uint8_t *src_ptr,
*sse = var;
return (var - (((int64_t)avg * avg) >> 12));
}
+#endif // CONFIG_SUPERBLOCKS64
unsigned int vp9_variance32x32_c(const uint8_t *src_ptr,
int source_stride,
@@ -50,7 +51,6 @@ unsigned int vp9_variance32x32_c(const uint8_t *src_ptr,
*sse = var;
return (var - (((int64_t)avg * avg) >> 10));
}
-#endif
unsigned int vp9_variance16x16_c(const uint8_t *src_ptr,
int source_stride,
@@ -197,7 +197,7 @@ unsigned int vp9_sub_pixel_variance16x16_c(const uint8_t *src_ptr,
return vp9_variance16x16_c(temp2, 16, dst_ptr, dst_pixels_per_line, sse);
}
-#if CONFIG_SUPERBLOCKS
+#if CONFIG_SUPERBLOCKS64
unsigned int vp9_sub_pixel_variance64x64_c(const uint8_t *src_ptr,
int src_pixels_per_line,
int xoffset,
@@ -218,6 +218,7 @@ unsigned int vp9_sub_pixel_variance64x64_c(const uint8_t *src_ptr,
return vp9_variance64x64_c(temp2, 64, dst_ptr, dst_pixels_per_line, sse);
}
+#endif // CONFIG_SUPERBLOCKS64
unsigned int vp9_sub_pixel_variance32x32_c(const uint8_t *src_ptr,
int src_pixels_per_line,
@@ -238,7 +239,6 @@ unsigned int vp9_sub_pixel_variance32x32_c(const uint8_t *src_ptr,
return vp9_variance32x32_c(temp2, 32, dst_ptr, dst_pixels_per_line, sse);
}
-#endif
unsigned int vp9_variance_halfpixvar16x16_h_c(const uint8_t *src_ptr,
int source_stride,
@@ -249,7 +249,6 @@ unsigned int vp9_variance_halfpixvar16x16_h_c(const uint8_t *src_ptr,
ref_ptr, recon_stride, sse);
}
-#if CONFIG_SUPERBLOCKS
unsigned int vp9_variance_halfpixvar32x32_h_c(const uint8_t *src_ptr,
int source_stride,
const uint8_t *ref_ptr,
@@ -259,6 +258,7 @@ unsigned int vp9_variance_halfpixvar32x32_h_c(const uint8_t *src_ptr,
ref_ptr, recon_stride, sse);
}
+#if CONFIG_SUPERBLOCKS64
unsigned int vp9_variance_halfpixvar64x64_h_c(const uint8_t *src_ptr,
int source_stride,
const uint8_t *ref_ptr,
@@ -267,7 +267,7 @@ unsigned int vp9_variance_halfpixvar64x64_h_c(const uint8_t *src_ptr,
return vp9_sub_pixel_variance64x64_c(src_ptr, source_stride, 8, 0,
ref_ptr, recon_stride, sse);
}
-#endif
+#endif // CONFIG_SUPERBLOCKS64
unsigned int vp9_variance_halfpixvar16x16_v_c(const uint8_t *src_ptr,
@@ -279,7 +279,6 @@ unsigned int vp9_variance_halfpixvar16x16_v_c(const uint8_t *src_ptr,
ref_ptr, recon_stride, sse);
}
-#if CONFIG_SUPERBLOCKS
unsigned int vp9_variance_halfpixvar32x32_v_c(const uint8_t *src_ptr,
int source_stride,
const uint8_t *ref_ptr,
@@ -289,6 +288,7 @@ unsigned int vp9_variance_halfpixvar32x32_v_c(const uint8_t *src_ptr,
ref_ptr, recon_stride, sse);
}
+#if CONFIG_SUPERBLOCKS64
unsigned int vp9_variance_halfpixvar64x64_v_c(const uint8_t *src_ptr,
int source_stride,
const uint8_t *ref_ptr,
@@ -297,7 +297,8 @@ unsigned int vp9_variance_halfpixvar64x64_v_c(const uint8_t *src_ptr,
return vp9_sub_pixel_variance64x64_c(src_ptr, source_stride, 0, 8,
ref_ptr, recon_stride, sse);
}
-#endif
+#endif // #if CONFIG_SUPERBLOCKS64
+
unsigned int vp9_variance_halfpixvar16x16_hv_c(const uint8_t *src_ptr,
int source_stride,
@@ -308,7 +309,6 @@ unsigned int vp9_variance_halfpixvar16x16_hv_c(const uint8_t *src_ptr,
ref_ptr, recon_stride, sse);
}
-#if CONFIG_SUPERBLOCKS
unsigned int vp9_variance_halfpixvar32x32_hv_c(const uint8_t *src_ptr,
int source_stride,
const uint8_t *ref_ptr,
@@ -318,6 +318,7 @@ unsigned int vp9_variance_halfpixvar32x32_hv_c(const uint8_t *src_ptr,
ref_ptr, recon_stride, sse);
}
+#if CONFIG_SUPERBLOCKS64
unsigned int vp9_variance_halfpixvar64x64_hv_c(const uint8_t *src_ptr,
int source_stride,
const uint8_t *ref_ptr,
@@ -326,7 +327,7 @@ unsigned int vp9_variance_halfpixvar64x64_hv_c(const uint8_t *src_ptr,
return vp9_sub_pixel_variance64x64_c(src_ptr, source_stride, 8, 8,
ref_ptr, recon_stride, sse);
}
-#endif
+#endif // CONFIG_SUPERBLOCKS64
unsigned int vp9_sub_pixel_mse16x16_c(const uint8_t *src_ptr,
int src_pixels_per_line,
@@ -341,7 +342,6 @@ unsigned int vp9_sub_pixel_mse16x16_c(const uint8_t *src_ptr,
return *sse;
}
-#if CONFIG_SUPERBLOCKS
unsigned int vp9_sub_pixel_mse32x32_c(const uint8_t *src_ptr,
int src_pixels_per_line,
int xoffset,
@@ -355,6 +355,7 @@ unsigned int vp9_sub_pixel_mse32x32_c(const uint8_t *src_ptr,
return *sse;
}
+#if CONFIG_SUPERBLOCKS64
unsigned int vp9_sub_pixel_mse64x64_c(const uint8_t *src_ptr,
int src_pixels_per_line,
int xoffset,
@@ -367,7 +368,7 @@ unsigned int vp9_sub_pixel_mse64x64_c(const uint8_t *src_ptr,
dst_pixels_per_line, sse);
return *sse;
}
-#endif
+#endif // CONFIG_SUPERBLOCKS64
unsigned int vp9_sub_pixel_variance16x8_c(const uint8_t *src_ptr,
int src_pixels_per_line,