summaryrefslogtreecommitdiff
path: root/vp8/encoder
diff options
context:
space:
mode:
Diffstat (limited to 'vp8/encoder')
-rw-r--r--vp8/encoder/bitstream.c98
-rw-r--r--vp8/encoder/encodeframe.c178
-rw-r--r--vp8/encoder/onyx_if.c16
-rw-r--r--vp8/encoder/onyx_int.h4
4 files changed, 260 insertions, 36 deletions
diff --git a/vp8/encoder/bitstream.c b/vp8/encoder/bitstream.c
index 284025d4e..7aad15081 100644
--- a/vp8/encoder/bitstream.c
+++ b/vp8/encoder/bitstream.c
@@ -20,7 +20,9 @@
#include "vp8/common/pragmas.h"
#include "vpx_mem/vpx_mem.h"
#include "bitstream.h"
-
+#if CONFIG_SEGMENTATION
+static int segment_cost = 0;
+#endif
const int vp8cx_base_skip_false_prob[128] =
{
255, 255, 255, 255, 255, 255, 255, 255,
@@ -819,24 +821,39 @@ static void write_mb_features(vp8_writer *w, const MB_MODE_INFO *mi, const MACRO
case 0:
vp8_write(w, 0, x->mb_segment_tree_probs[0]);
vp8_write(w, 0, x->mb_segment_tree_probs[1]);
+#if CONFIG_SEGMENTATION
+ segment_cost += vp8_cost_zero(x->mb_segment_tree_probs[0]) + vp8_cost_zero(x->mb_segment_tree_probs[1]);
+#endif
break;
case 1:
vp8_write(w, 0, x->mb_segment_tree_probs[0]);
vp8_write(w, 1, x->mb_segment_tree_probs[1]);
+#if CONFIG_SEGMENTATION
+ segment_cost += vp8_cost_zero(x->mb_segment_tree_probs[0]) + vp8_cost_one(x->mb_segment_tree_probs[1]);
+#endif
break;
case 2:
vp8_write(w, 1, x->mb_segment_tree_probs[0]);
vp8_write(w, 0, x->mb_segment_tree_probs[2]);
+#if CONFIG_SEGMENTATION
+ segment_cost += vp8_cost_one(x->mb_segment_tree_probs[0]) + vp8_cost_zero(x->mb_segment_tree_probs[2]);
+#endif
break;
case 3:
vp8_write(w, 1, x->mb_segment_tree_probs[0]);
vp8_write(w, 1, x->mb_segment_tree_probs[2]);
+#if CONFIG_SEGMENTATION
+ segment_cost += vp8_cost_one(x->mb_segment_tree_probs[0]) + vp8_cost_one(x->mb_segment_tree_probs[2]);
+#endif
break;
// TRAP.. This should not happen
default:
vp8_write(w, 0, x->mb_segment_tree_probs[0]);
vp8_write(w, 0, x->mb_segment_tree_probs[1]);
+#if CONFIG_SEGMENTATION
+ segment_cost += vp8_cost_zero(x->mb_segment_tree_probs[0]) + vp8_cost_zero(x->mb_segment_tree_probs[1]);
+#endif
break;
}
}
@@ -848,7 +865,13 @@ static void pack_inter_mode_mvs(VP8_COMP *const cpi)
VP8_COMMON *const pc = & cpi->common;
vp8_writer *const w = & cpi->bc;
const MV_CONTEXT *mvc = pc->fc.mvc;
-
+ MACROBLOCKD *xd = &cpi->mb.e_mbd;
+#if CONFIG_SEGMENTATION
+ int left_id, above_id;
+ int i;
+ int sum;
+ int index = 0;
+#endif
const int *const rfct = cpi->count_mb_ref_frame_usage;
const int rf_intra = rfct[INTRA_FRAME];
const int rf_inter = rfct[LAST_FRAME] + rfct[GOLDEN_FRAME] + rfct[ALTREF_FRAME];
@@ -905,7 +928,9 @@ static void pack_inter_mode_mvs(VP8_COMP *const cpi)
update_mbintra_mode_probs(cpi);
vp8_write_mvprobs(cpi);
-
+#if CONFIG_SEGMENTATION
+ vp8_write_bit(w, (xd->temporal_update) ? 1:0);
+#endif
while (++mb_row < pc->mb_rows)
{
int mb_col = -1;
@@ -916,7 +941,7 @@ static void pack_inter_mode_mvs(VP8_COMP *const cpi)
const MV_REFERENCE_FRAME rf = mi->ref_frame;
const MB_PREDICTION_MODE mode = mi->mode;
- MACROBLOCKD *xd = &cpi->mb.e_mbd;
+ //MACROBLOCKD *xd = &cpi->mb.e_mbd;
// Distance of Mb to the various image edges.
// These specified to 8th pel as they are always compared to MV values that are in 1/8th pel units
@@ -924,13 +949,46 @@ static void pack_inter_mode_mvs(VP8_COMP *const cpi)
xd->mb_to_right_edge = ((pc->mb_cols - 1 - mb_col) * 16) << 3;
xd->mb_to_top_edge = -((mb_row * 16)) << 3;
xd->mb_to_bottom_edge = ((pc->mb_rows - 1 - mb_row) * 16) << 3;
-
+ xd->up_available = (mb_row != 0);
+ xd->left_available = (mb_col != 0);
#ifdef ENTROPY_STATS
active_section = 9;
#endif
if (cpi->mb.e_mbd.update_mb_segmentation_map)
+ {
+#if CONFIG_SEGMENTATION
+ if (xd->temporal_update)
+ {
+ sum = 0;
+ if (mb_col != 0)
+ sum += (m-1)->mbmi.segment_flag;
+ if (mb_row != 0)
+ sum += (m-pc->mb_cols)->mbmi.segment_flag;
+
+ if (m->mbmi.segment_flag == 0)
+ {
+ vp8_write(w,0,xd->mb_segment_tree_probs[3+sum]);
+ segment_cost += vp8_cost_zero(xd->mb_segment_tree_probs[3+sum]);
+ }
+ else
+ {
+ vp8_write(w,1,xd->mb_segment_tree_probs[3+sum]);
+ segment_cost += vp8_cost_one(xd->mb_segment_tree_probs[3+sum]);
+ write_mb_features(w, mi, &cpi->mb.e_mbd);
+ cpi->segmentation_map[index] = mi->segment_id;
+ }
+ }
+ else
+ {
+ write_mb_features(w, mi, &cpi->mb.e_mbd);
+ cpi->segmentation_map[index] = mi->segment_id;
+ }
+ index++;
+#else
write_mb_features(w, mi, &cpi->mb.e_mbd);
+#endif
+ }
if (pc->mb_no_coeff_skip)
vp8_encode_bool(w, m->mbmi.mb_skip_coeff, prob_skip_false);
@@ -1059,7 +1117,11 @@ static void write_kfmodes(VP8_COMP *cpi)
const VP8_COMMON *const c = & cpi->common;
/* const */
MODE_INFO *m = c->mi;
-
+#if CONFIG_SEGMENTATION
+ int left_id, above_id;
+ int i;
+ int index = 0;
+#endif
int mb_row = -1;
int prob_skip_false = 0;
@@ -1084,9 +1146,22 @@ static void write_kfmodes(VP8_COMP *cpi)
while (++mb_col < c->mb_cols)
{
const int ym = m->mbmi.mode;
-
+#if CONFIG_SEGMENTATION
+ MACROBLOCKD *xd = &cpi->mb.e_mbd;
+ xd->up_available = (mb_row != 0);
+ xd->left_available = (mb_col != 0);
+#endif
if (cpi->mb.e_mbd.update_mb_segmentation_map)
+ {
+#if CONFIG_SEGMENTATION
+
+ write_mb_features(bc, &m->mbmi, &cpi->mb.e_mbd);
+ cpi->segmentation_map[index] = m->mbmi.segment_id;
+ index++;
+#else
write_mb_features(bc, &m->mbmi, &cpi->mb.e_mbd);
+#endif
+ }
if (c->mb_no_coeff_skip)
vp8_encode_bool(bc, m->mbmi.mb_skip_coeff, prob_skip_false);
@@ -1412,6 +1487,7 @@ void vp8_pack_bitstream(VP8_COMP *cpi, unsigned char *dest, unsigned long *size)
else
vp8_start_encode(bc, cx_data);
+ xd->update_mb_segmentation_map = 1;
// Signal whether or not Segmentation is enabled
vp8_write_bit(bc, (xd->segmentation_enabled) ? 1 : 0);
@@ -1462,8 +1538,12 @@ void vp8_pack_bitstream(VP8_COMP *cpi, unsigned char *dest, unsigned long *size)
if (xd->update_mb_segmentation_map)
{
+ #if CONFIG_SEGMENTATION
// Write the probs used to decode the segment id for each macro block.
+ for (i = 0; i < MB_FEATURE_TREE_PROBS+3; i++)
+#else
for (i = 0; i < MB_FEATURE_TREE_PROBS; i++)
+#endif
{
int Data = xd->mb_segment_tree_probs[i];
@@ -1633,7 +1713,9 @@ void vp8_pack_bitstream(VP8_COMP *cpi, unsigned char *dest, unsigned long *size)
active_section = 1;
#endif
}
-
+#if CONFIG_SEGMENTATION
+ //printf("%d\n",segment_cost);
+#endif
vp8_stop_encode(bc);
oh.first_partition_length_in_bytes = cpi->bc.pos;
diff --git a/vp8/encoder/encodeframe.c b/vp8/encoder/encodeframe.c
index c725cd023..56af2d16b 100644
--- a/vp8/encoder/encodeframe.c
+++ b/vp8/encoder/encodeframe.c
@@ -30,6 +30,7 @@
#include "vp8/common/subpixel.h"
#include "vpx_ports/vpx_timer.h"
+
#if CONFIG_RUNTIME_CPU_DETECT
#define RTCD(x) &cpi->common.rtcd.x
#define IF_RTCD(x) (x)
@@ -37,6 +38,13 @@
#define RTCD(x) NULL
#define IF_RTCD(x) NULL
#endif
+
+#if CONFIG_SEGMENTATION
+#define SEEK_SEGID 12
+#define SEEK_SAMEID 4
+#define SEEK_DIFFID 7
+#endif
+
extern void vp8_stuff_mb(VP8_COMP *cpi, MACROBLOCKD *x, TOKENEXTRA **t) ;
extern void vp8cx_initialize_me_consts(VP8_COMP *cpi, int QIndex);
@@ -320,7 +328,10 @@ void encode_mb_row(VP8_COMP *cpi,
int recon_y_stride = cm->yv12_fb[ref_fb_idx].y_stride;
int recon_uv_stride = cm->yv12_fb[ref_fb_idx].uv_stride;
int map_index = (mb_row * cpi->common.mb_cols);
-
+#if CONFIG_SEGMENTATION
+ int left_id, above_id;
+ int sum;
+#endif
#if CONFIG_MULTITHREAD
const int nsync = cpi->mt_sync_range;
const int rightmost_col = cm->mb_cols - 1;
@@ -331,7 +342,6 @@ void encode_mb_row(VP8_COMP *cpi,
else
last_row_current_mb_col = &rightmost_col;
#endif
-
// reset above block coeffs
xd->above_context = cm->above_context;
@@ -409,6 +419,7 @@ void encode_mb_row(VP8_COMP *cpi,
xd->mode_info_context->mbmi.segment_id = 0;
vp8cx_mb_init_quantizer(cpi, x);
+
}
else
xd->mode_info_context->mbmi.segment_id = 0; // Set to Segment 0 by default
@@ -477,6 +488,11 @@ void encode_mb_row(VP8_COMP *cpi,
x->mb_activity_ptr++;
x->mb_norm_activity_ptr++;
+ if ((xd->mode_info_context->mbmi.mode == ZEROMV) && (xd->mode_info_context->mbmi.ref_frame == LAST_FRAME))
+ xd->mode_info_context->mbmi.segment_id = 0;
+ else
+ xd->mode_info_context->mbmi.segment_id = 1;
+
if(cm->frame_type != INTRA_FRAME)
{
if (xd->mode_info_context->mbmi.mode != B_PRED)
@@ -504,9 +520,42 @@ void encode_mb_row(VP8_COMP *cpi,
recon_yoffset += 16;
recon_uvoffset += 8;
- // Keep track of segment useage
- segment_counts[xd->mode_info_context->mbmi.segment_id] ++;
+#if CONFIG_SEGMENTATION
+ //cpi->segmentation_map[mb_row * cm->mb_cols + mb_col] = xd->mbmi.segment_id;
+ if (cm->frame_type == KEY_FRAME)
+ {
+ segment_counts[xd->mode_info_context->mbmi.segment_id] ++;
+ }
+ else
+ {
+ sum = 0;
+ if (mb_col != 0)
+ sum += (xd->mode_info_context-1)->mbmi.segment_flag;
+ if (mb_row != 0)
+ sum += (xd->mode_info_context-cm->mb_cols)->mbmi.segment_flag;
+
+ if (xd->mode_info_context->mbmi.segment_id == cpi->segmentation_map[(mb_row*cm->mb_cols) + mb_col])
+ xd->mode_info_context->mbmi.segment_flag = 0;
+ else
+ xd->mode_info_context->mbmi.segment_flag = 1;
+ if (xd->mode_info_context->mbmi.segment_flag == 0)
+ {
+ segment_counts[SEEK_SAMEID + sum]++;
+ segment_counts[10]++;
+ }
+ else
+ {
+ segment_counts[SEEK_DIFFID + sum]++;
+ segment_counts[11]++;
+ //calculate individual segment ids
+ segment_counts[xd->mode_info_context->mbmi.segment_id] ++;
+ }
+ }
+ segment_counts[SEEK_SEGID + xd->mode_info_context->mbmi.segment_id] ++;
+#else
+ segment_counts[xd->mode_info_context->mbmi.segment_id] ++;
+#endif
// skip to next mb
xd->mode_info_context++;
x->partition_info++;
@@ -530,7 +579,6 @@ void encode_mb_row(VP8_COMP *cpi,
// this is to account for the border
xd->mode_info_context++;
x->partition_info++;
-
#if CONFIG_MULTITHREAD
if ((cpi->b_multi_threaded != 0) && (mb_row == cm->mb_rows - 1))
{
@@ -538,7 +586,6 @@ void encode_mb_row(VP8_COMP *cpi,
}
#endif
}
-
void vp8_encode_frame(VP8_COMP *cpi)
{
int mb_row;
@@ -547,7 +594,13 @@ void vp8_encode_frame(VP8_COMP *cpi)
MACROBLOCKD *const xd = & x->e_mbd;
TOKENEXTRA *tp = cpi->tok;
+#if CONFIG_SEGMENTATION
+ int segment_counts[MAX_MB_SEGMENTS + SEEK_SEGID];
+ int prob[3];
+ int new_cost, original_cost;
+#else
int segment_counts[MAX_MB_SEGMENTS];
+#endif
int totalrate;
// Functions setup for all frame types so we can use MC in AltRef
@@ -765,41 +818,126 @@ void vp8_encode_frame(VP8_COMP *cpi)
}
-
// Work out the segment probabilites if segmentation is enabled
if (xd->segmentation_enabled)
{
int tot_count;
- int i;
+ int i,j;
+ int count1,count2,count3,count4;
// Set to defaults
vpx_memset(xd->mb_segment_tree_probs, 255 , sizeof(xd->mb_segment_tree_probs));
+#if CONFIG_SEGMENTATION
+
+ tot_count = segment_counts[12] + segment_counts[13] + segment_counts[14] + segment_counts[15];
+ count1 = segment_counts[12] + segment_counts[13];
+ count2 = segment_counts[14] + segment_counts[15];
+
+ if (tot_count)
+ prob[0] = (count1 * 255) / tot_count;
+
+ if (count1 > 0)
+ prob[1] = (segment_counts[12] * 255) /count1;
+
+ if (count2 > 0)
+ prob[2] = (segment_counts[14] * 255) /count2;
+
+ if (cm->frame_type != KEY_FRAME)
+ {
+ tot_count = segment_counts[4] + segment_counts[7];
+ if (tot_count)
+ xd->mb_segment_tree_probs[3] = (segment_counts[4] * 255)/tot_count;
+
+ tot_count = segment_counts[5] + segment_counts[8];
+ if (tot_count)
+ xd->mb_segment_tree_probs[4] = (segment_counts[5] * 255)/tot_count;
+
+ tot_count = segment_counts[6] + segment_counts[9];
+ if (tot_count)
+ xd->mb_segment_tree_probs[5] = (segment_counts[6] * 255)/tot_count;
+ }
tot_count = segment_counts[0] + segment_counts[1] + segment_counts[2] + segment_counts[3];
+ count3 = segment_counts[0] + segment_counts[1];
+ count4 = segment_counts[2] + segment_counts[3];
if (tot_count)
+ xd->mb_segment_tree_probs[0] = (count3 * 255) / tot_count;
+
+ if (count3 > 0)
+ xd->mb_segment_tree_probs[1] = (segment_counts[0] * 255) /count3;
+
+ if (count4 > 0)
+ xd->mb_segment_tree_probs[2] = (segment_counts[2] * 255) /count4;
+
+ for (i = 0; i < MB_FEATURE_TREE_PROBS+3; i++)
{
- xd->mb_segment_tree_probs[0] = ((segment_counts[0] + segment_counts[1]) * 255) / tot_count;
+ if (xd->mb_segment_tree_probs[i] == 0)
+ xd->mb_segment_tree_probs[i] = 1;
+ }
- tot_count = segment_counts[0] + segment_counts[1];
+ original_cost = count1 * vp8_cost_zero(prob[0]) + count2 * vp8_cost_one(prob[0]);
- if (tot_count > 0)
- {
- xd->mb_segment_tree_probs[1] = (segment_counts[0] * 255) / tot_count;
- }
+ if (count1 > 0)
+ original_cost += segment_counts[12] * vp8_cost_zero(prob[1]) + segment_counts[13] * vp8_cost_one(prob[1]);
+
+ if (count2 > 0)
+ original_cost += segment_counts[14] * vp8_cost_zero(prob[2]) + segment_counts[15] * vp8_cost_one(prob[2]) ;
- tot_count = segment_counts[2] + segment_counts[3];
+ new_cost = 0;
- if (tot_count > 0)
- xd->mb_segment_tree_probs[2] = (segment_counts[2] * 255) / tot_count;
+ if (cm->frame_type != KEY_FRAME)
+ {
+ new_cost = segment_counts[4] * vp8_cost_zero(xd->mb_segment_tree_probs[3]) + segment_counts[7] * vp8_cost_one(xd->mb_segment_tree_probs[3]);
+
+ new_cost += segment_counts[5] * vp8_cost_zero(xd->mb_segment_tree_probs[4]) + segment_counts[8] * vp8_cost_one(xd->mb_segment_tree_probs[4]);
+
+ new_cost += segment_counts[6] * vp8_cost_zero(xd->mb_segment_tree_probs[5]) + segment_counts[9] * vp8_cost_one (xd->mb_segment_tree_probs[5]);
+ }
- // Zero probabilities not allowed
- for (i = 0; i < MB_FEATURE_TREE_PROBS; i ++)
+ if (tot_count > 0)
+ new_cost += count3 * vp8_cost_zero(xd->mb_segment_tree_probs[0]) + count4 * vp8_cost_one(xd->mb_segment_tree_probs[0]);
+
+ if (count3 > 0)
+ new_cost += segment_counts[0] * vp8_cost_zero(xd->mb_segment_tree_probs[1]) + segment_counts[1] * vp8_cost_one(xd->mb_segment_tree_probs[1]);
+
+ if (count4 > 0)
+ new_cost += segment_counts[2] * vp8_cost_zero(xd->mb_segment_tree_probs[2]) + segment_counts[3] * vp8_cost_one(xd->mb_segment_tree_probs[2]) ;
+
+ if (new_cost < original_cost)
+ xd->temporal_update = 1;
+ else
+ {
+ xd->temporal_update = 0;
+ xd->mb_segment_tree_probs[0] = prob[0];
+ xd->mb_segment_tree_probs[1] = prob[1];
+ xd->mb_segment_tree_probs[2] = prob[2];
+ }
+#else
+ tot_count = segment_counts[0] + segment_counts[1] + segment_counts[2] + segment_counts[3];
+ count1 = segment_counts[0] + segment_counts[1];
+ count2 = segment_counts[2] + segment_counts[3];
+
+ if (tot_count)
+ xd->mb_segment_tree_probs[0] = (count1 * 255) / tot_count;
+
+ if (count1 > 0)
+ xd->mb_segment_tree_probs[1] = (segment_counts[0] * 255) /count1;
+
+ if (count2 > 0)
+ xd->mb_segment_tree_probs[2] = (segment_counts[2] * 255) /count2;
+
+#endif
+ // Zero probabilities not allowed
+#if CONFIG_SEGMENTATION
+ for (i = 0; i < MB_FEATURE_TREE_PROBS+3; i++)
+#else
+ for (i = 0; i < MB_FEATURE_TREE_PROBS; i++)
+#endif
{
if (xd->mb_segment_tree_probs[i] == 0)
xd->mb_segment_tree_probs[i] = 1;
}
- }
}
// 256 rate units to the bit
diff --git a/vp8/encoder/onyx_if.c b/vp8/encoder/onyx_if.c
index 6e57b1c8a..bc1f6e881 100644
--- a/vp8/encoder/onyx_if.c
+++ b/vp8/encoder/onyx_if.c
@@ -408,7 +408,6 @@ static void set_segmentation_map(VP8_PTR ptr, unsigned char *segmentation_map)
// Copy in the new segmentation map
vpx_memcpy(cpi->segmentation_map, segmentation_map, (cpi->common.mb_rows * cpi->common.mb_cols));
-
// Signal that the map should be updated.
cpi->mb.e_mbd.update_mb_segmentation_map = 1;
cpi->mb.e_mbd.update_mb_segmentation_data = 1;
@@ -434,12 +433,10 @@ static void set_segment_data(VP8_PTR ptr, signed char *feature_data, unsigned ch
static void segmentation_test_function(VP8_PTR ptr)
{
VP8_COMP *cpi = (VP8_COMP *)(ptr);
-
unsigned char *seg_map;
signed char feature_data[MB_LVL_MAX][MAX_MB_SEGMENTS];
-
+ CHECK_MEM_ERROR(seg_map, vpx_calloc((cpi->common.mb_rows * cpi->common.mb_cols), 1));
// Create a temporary map for segmentation data.
- CHECK_MEM_ERROR(seg_map, vpx_calloc(cpi->common.mb_rows * cpi->common.mb_cols, 1));
// MB loop to set local segmentation map
/*for ( i = 0; i < cpi->common.mb_rows; i++ )
@@ -499,7 +496,7 @@ static void cyclic_background_refresh(VP8_COMP *cpi, int Q, int lf_adjustment)
int mbs_in_frame = cpi->common.mb_rows * cpi->common.mb_cols;
// Create a temporary map for segmentation data.
- CHECK_MEM_ERROR(seg_map, vpx_calloc(cpi->common.mb_rows * cpi->common.mb_cols, 1));
+ CHECK_MEM_ERROR(seg_map, vpx_calloc((cpi->common.mb_rows * cpi->common.mb_cols), 1));
cpi->cyclic_refresh_q = Q;
@@ -1925,7 +1922,7 @@ VP8_PTR vp8_create_compressor(VP8_CONFIG *oxcf)
CHECK_MEM_ERROR(cpi->lf_ref_frame, vpx_calloc((cpi->common.mb_rows+2) * (cpi->common.mb_cols+2), sizeof(int)));
// Create the encoder segmentation map and set all entries to 0
- CHECK_MEM_ERROR(cpi->segmentation_map, vpx_calloc(cpi->common.mb_rows * cpi->common.mb_cols, 1));
+ CHECK_MEM_ERROR(cpi->segmentation_map, vpx_calloc((cpi->common.mb_rows * cpi->common.mb_cols), 1));
CHECK_MEM_ERROR(cpi->active_map, vpx_calloc(cpi->common.mb_rows * cpi->common.mb_cols, 1));
vpx_memset(cpi->active_map , 1, (cpi->common.mb_rows * cpi->common.mb_cols));
cpi->active_map_enabled = 0;
@@ -1961,13 +1958,12 @@ VP8_PTR vp8_create_compressor(VP8_CONFIG *oxcf)
cpi->cyclic_refresh_q = 32;
if (cpi->cyclic_refresh_mode_enabled)
- {
CHECK_MEM_ERROR(cpi->cyclic_refresh_map, vpx_calloc((cpi->common.mb_rows * cpi->common.mb_cols), 1));
- }
else
cpi->cyclic_refresh_map = (signed char *) NULL;
// Test function for segmentation
+
//segmentation_test_function((VP8_PTR) cpi);
#ifdef ENTROPY_STATS
@@ -3280,6 +3276,10 @@ static void encode_frame_to_data_rate
// Test code for segmentation of gf/arf (0,0)
//segmentation_test_function((VP8_PTR) cpi);
+#if CONFIG_SEGMENTATION
+ cpi->mb.e_mbd.segmentation_enabled = 1;
+ cpi->mb.e_mbd.update_mb_segmentation_map = 1;
+#endif
#if CONFIG_REALTIME_ONLY
if(cpi->oxcf.auto_key && cm->frame_type != KEY_FRAME)
diff --git a/vp8/encoder/onyx_int.h b/vp8/encoder/onyx_int.h
index 663786004..d0e3e5782 100644
--- a/vp8/encoder/onyx_int.h
+++ b/vp8/encoder/onyx_int.h
@@ -196,7 +196,11 @@ typedef struct
typedef struct
{
MACROBLOCK mb;
+#if CONFIG_SEGMENTATION
+ int segment_counts[MAX_MB_SEGMENTS + 8];
+#else
int segment_counts[MAX_MB_SEGMENTS];
+#endif
int totalrate;
} MB_ROW_COMP;