summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorSuman Sunkara <sunkaras@google.com>2010-08-31 20:43:14 -0400
committerSuman Sunkara <sunkaras@google.com>2010-09-13 10:01:21 -0400
commitbe7e4e854c82bd9e8b5db0586873ea19156cfeef (patch)
treed6b242603f1fd2452107dc1a18cf93831e8a9cc2
parentf4020e2338a1786b1db0f67075ceb7d9c01be6a3 (diff)
downloadlibvpx-be7e4e854c82bd9e8b5db0586873ea19156cfeef.tar
libvpx-be7e4e854c82bd9e8b5db0586873ea19156cfeef.tar.gz
libvpx-be7e4e854c82bd9e8b5db0586873ea19156cfeef.tar.bz2
libvpx-be7e4e854c82bd9e8b5db0586873ea19156cfeef.zip
Delta updates to segmentation map using left and above contexts.
-Updates by making use of spatial correlation. -Checks if the segment_id is same as above or left context and encodes only the update to the map instead of updating individual segment_ids. Change-Id: Ib861df97e8aa2b37516219eeddcdbaf552b6a249
-rwxr-xr-xconfigure1
-rw-r--r--vp8/common/blockd.h9
-rw-r--r--vp8/decoder/decodemv.c45
-rw-r--r--vp8/decoder/decodframe.c8
-rw-r--r--vp8/decoder/demode.c50
-rw-r--r--vp8/decoder/onyxd_if.c8
-rw-r--r--vp8/decoder/onyxd_int.h1
-rw-r--r--vp8/encoder/bitstream.c165
-rw-r--r--vp8/encoder/encodeframe.c116
-rw-r--r--vp8/encoder/ethreading.c2
-rw-r--r--vp8/encoder/onyx_if.c61
-rw-r--r--vp8/encoder/onyx_int.h4
12 files changed, 410 insertions, 60 deletions
diff --git a/configure b/configure
index 8fa56fe1a..210c69b8b 100755
--- a/configure
+++ b/configure
@@ -204,6 +204,7 @@ HAVE_LIST="
sys_mman_h
"
EXPERIMENT_LIST="
+ segmentation
"
CONFIG_LIST="
external_build
diff --git a/vp8/common/blockd.h b/vp8/common/blockd.h
index 84ed53ad2..da8c0e1d8 100644
--- a/vp8/common/blockd.h
+++ b/vp8/common/blockd.h
@@ -31,8 +31,8 @@ void vpx_log(const char *format, ...);
#define UCONTEXT 1
#define VCONTEXT 2
#define Y2CONTEXT 3
-
#define MB_FEATURE_TREE_PROBS 3
+
#define MAX_MB_SEGMENTS 4
#define MAX_REF_LF_DELTAS 4
@@ -255,8 +255,11 @@ typedef struct
// Per frame flags that define which MB level features (such as quantizer or loop filter level)
// are enabled and when enabled the proabilities used to decode the per MB flags in MB_MODE_INFO
- vp8_prob mb_segment_tree_probs[MB_FEATURE_TREE_PROBS]; // Probability Tree used to code Segment number
-
+#if CONFIG_SEGMENTATION
+ vp8_prob mb_segment_tree_probs[MB_FEATURE_TREE_PROBS + 3]; // Probability Tree used to code Segment number
+#else
+ vp8_prob mb_segment_tree_probs[MB_FEATURE_TREE_PROBS];
+#endif
signed char segment_feature_data[MB_LVL_MAX][MAX_MB_SEGMENTS]; // Segment parameters
// mode_based Loop filter adjustment
diff --git a/vp8/decoder/decodemv.c b/vp8/decoder/decodemv.c
index 6035f3e6a..de85e8f6d 100644
--- a/vp8/decoder/decodemv.c
+++ b/vp8/decoder/decodemv.c
@@ -120,7 +120,10 @@ void vp8_decode_mode_mvs(VP8D_COMP *pbi)
MV_CONTEXT *const mvc = pc->fc.mvc;
int mb_row = -1;
-
+#if CONFIG_SEGMENTATION
+ int left_id, above_id;
+ int i;
+#endif
vp8_prob prob_intra;
vp8_prob prob_last;
vp8_prob prob_gf;
@@ -171,7 +174,6 @@ void vp8_decode_mode_mvs(VP8D_COMP *pbi)
MACROBLOCKD *xd = &pbi->mb;
vp8dx_bool_decoder_fill(bc);
-
// Distance of Mb to the various image edges.
// These specified to 8th pel as they are always compared to MV values that are in 1/8th pel units
xd->mb_to_left_edge = -((mb_col * 16) << 3);
@@ -181,7 +183,44 @@ void vp8_decode_mode_mvs(VP8D_COMP *pbi)
// If required read in new segmentation data for this MB
if (pbi->mb.update_mb_segmentation_map)
- vp8_read_mb_features(bc, mbmi, &pbi->mb);
+ {
+#if CONFIG_SEGMENTATION
+ xd->up_available = (mb_row != 0);
+ xd->left_available = (mb_col != 0);
+
+ if(xd->left_available)
+ left_id = (mi-1)->mbmi.segment_id;
+ else
+ left_id = 0;
+
+ if(xd->up_available)
+ above_id = (mi-pc->mb_cols)->mbmi.segment_id;
+ else
+ above_id = 0;
+
+ if (vp8_read(bc, xd->mb_segment_tree_probs[0]))
+ {
+ for(i = 0; i < MAX_MB_SEGMENTS; i++)
+ {
+ if((left_id != i) && (above_id != i))
+ {
+ if (vp8_read(bc, xd->mb_segment_tree_probs[2+i]) == 0)
+ mbmi->segment_id = i;
+ }
+ }
+ }
+ else
+ {
+ if (vp8_read(bc, xd->mb_segment_tree_probs[1]))
+ mbmi->segment_id = above_id;
+ else
+ mbmi->segment_id = left_id;
+
+ }
+#else
+ vp8_read_mb_features(bc, &mi->mbmi, &pbi->mb);
+#endif
+ }
// Read the macroblock coeff skip flag if this feature is in use, else default to 0
if (pc->mb_no_coeff_skip)
diff --git a/vp8/decoder/decodframe.c b/vp8/decoder/decodframe.c
index 4edf4f60d..a26056e35 100644
--- a/vp8/decoder/decodframe.c
+++ b/vp8/decoder/decodframe.c
@@ -545,7 +545,7 @@ static void init_frame(VP8D_COMP *pbi)
}
}
- xd->left_context = pc->left_context;
+ xd->left_context = &pc->left_context;
xd->mode_info_context = pc->mi;
xd->frame_type = pc->frame_type;
xd->mbmi.mode = DC_PRED;
@@ -560,7 +560,6 @@ int vp8_decode_frame(VP8D_COMP *pbi)
const unsigned char *data = (const unsigned char *)pbi->Source;
const unsigned char *const data_end = data + pbi->source_sz;
int first_partition_length_in_bytes;
-
int mb_row;
int i, j, k, l;
const int *const mb_feature_data_bits = vp8_mb_feature_data_bits;
@@ -669,9 +668,12 @@ int vp8_decode_frame(VP8D_COMP *pbi)
{
// Which macro block level features are enabled
vpx_memset(xd->mb_segment_tree_probs, 255, sizeof(xd->mb_segment_tree_probs));
-
+#if CONFIG_SEGMENTATION
// Read the probs used to decode the segment id for each macro block.
+ for (i = 0; i < MB_FEATURE_TREE_PROBS+3; i++)
+#else
for (i = 0; i < MB_FEATURE_TREE_PROBS; i++)
+#endif
{
// If not explicitly set value is defaulted to 255 by memset above
if (vp8_read_bit(bc))
diff --git a/vp8/decoder/demode.c b/vp8/decoder/demode.c
index fd05e6db5..0d2e95b8b 100644
--- a/vp8/decoder/demode.c
+++ b/vp8/decoder/demode.c
@@ -64,7 +64,10 @@ void vp8_kfread_modes(VP8D_COMP *pbi)
MODE_INFO *m = cp->mi;
const int ms = cp->mode_info_stride;
-
+#if CONFIG_SEGMENTATION
+ int left_id,above_id;
+ int i;
+#endif
int mb_row = -1;
vp8_prob prob_skip_false = 0;
@@ -78,16 +81,57 @@ void vp8_kfread_modes(VP8D_COMP *pbi)
while (++mb_col < cp->mb_cols)
{
MB_PREDICTION_MODE y_mode;
-
vp8dx_bool_decoder_fill(bc);
+
// Read the Macroblock segmentation map if it is being updated explicitly this frame (reset to 0 above by default)
// By default on a key frame reset all MBs to segment 0
m->mbmi.segment_id = 0;
if (pbi->mb.update_mb_segmentation_map)
+ {
+
+#if CONFIG_SEGMENTATION
+ MACROBLOCKD *xd = &pbi->mb;
+ xd->up_available = (mb_row != 0);
+ xd->left_available = (mb_col != 0);
+
+ if(xd->left_available)
+ left_id = (m-1)->mbmi.segment_id;
+ else
+ left_id = 0;
+
+ if(xd->up_available)
+ above_id = (m-cp->mb_cols)->mbmi.segment_id;
+ else
+ above_id = 0;
+
+ if (vp8_read(bc, xd->mb_segment_tree_probs[0]))
+ {
+ for(i = 0; i < MAX_MB_SEGMENTS; i++)
+ {
+ if((left_id != i) && (above_id != i))
+ {
+ if (vp8_read(bc, xd->mb_segment_tree_probs[2+i]) == 0)
+ {
+ m->mbmi.segment_id = i;
+ }
+ }
+ }
+ }
+ else
+ {
+ if (vp8_read(bc, xd->mb_segment_tree_probs[1]))
+ m->mbmi.segment_id = above_id;
+ else
+ m->mbmi.segment_id = left_id;
+
+ }
+#else
vp8_read_mb_features(bc, &m->mbmi, &pbi->mb);
+#endif
+ }
- // Read the macroblock coeff skip flag if this feature is in use, else default to 0
+ // Read the macroblock coeff skip flag if this feature is in use, else default to 0
if (cp->mb_no_coeff_skip)
m->mbmi.mb_skip_coeff = vp8_read(bc, prob_skip_false);
else
diff --git a/vp8/decoder/onyxd_if.c b/vp8/decoder/onyxd_if.c
index 6875585f0..c87f3afd5 100644
--- a/vp8/decoder/onyxd_if.c
+++ b/vp8/decoder/onyxd_if.c
@@ -85,7 +85,6 @@ void vp8dx_initialize()
}
}
-
VP8D_PTR vp8dx_create_decompressor(VP8D_CONFIG *oxcf)
{
VP8D_COMP *pbi = vpx_memalign(32, sizeof(VP8D_COMP));
@@ -132,7 +131,6 @@ VP8D_PTR vp8dx_create_decompressor(VP8D_CONFIG *oxcf)
return (VP8D_PTR) pbi;
}
-
void vp8dx_remove_decompressor(VP8D_PTR ptr)
{
VP8D_COMP *pbi = (VP8D_COMP *) ptr;
@@ -306,9 +304,11 @@ int vp8dx_receive_compressed_data(VP8D_PTR ptr, unsigned long size, const unsign
vp8_yv12_extend_frame_borders_ptr(cm->frame_to_show);
-#if 0
+
// DEBUG code
- //vp8_recon_write_yuv_frame("recon.yuv", cm->frame_to_show);
+#if 0
+ vp8_recon_write_yuv_frame("recon.yuv", cm->frame_to_show);
+
if (cm->current_video_frame <= 5)
write_dx_frame_to_file(cm->frame_to_show, cm->current_video_frame);
#endif
diff --git a/vp8/decoder/onyxd_int.h b/vp8/decoder/onyxd_int.h
index fa4fa48e4..218c23943 100644
--- a/vp8/decoder/onyxd_int.h
+++ b/vp8/decoder/onyxd_int.h
@@ -94,7 +94,6 @@ typedef struct VP8Decompressor
int current_mb_col_main;
int decoding_thread_count;
int allocated_decoding_thread_count;
-
// variable for threading
DECLARE_ALIGNED(16, MACROBLOCKD, lpfmb);
#if CONFIG_MULTITHREAD
diff --git a/vp8/encoder/bitstream.c b/vp8/encoder/bitstream.c
index e468f40f0..e336753b4 100644
--- a/vp8/encoder/bitstream.c
+++ b/vp8/encoder/bitstream.c
@@ -19,7 +19,9 @@
#include "pragmas.h"
#include "vpx_mem/vpx_mem.h"
#include "bitstream.h"
-
+#if CONFIG_SEGMENTATION
+static int segment_cost = 0;
+#endif
const int vp8cx_base_skip_false_prob[128] =
{
255, 255, 255, 255, 255, 255, 255, 255,
@@ -826,24 +828,29 @@ static void write_mb_features(vp8_writer *w, const MB_MODE_INFO *mi, const MACRO
case 0:
vp8_write(w, 0, x->mb_segment_tree_probs[0]);
vp8_write(w, 0, x->mb_segment_tree_probs[1]);
+ segment_cost += vp8_cost_zero(x->mb_segment_tree_probs[0]) + vp8_cost_zero(x->mb_segment_tree_probs[1]);
break;
case 1:
vp8_write(w, 0, x->mb_segment_tree_probs[0]);
vp8_write(w, 1, x->mb_segment_tree_probs[1]);
+ segment_cost += vp8_cost_zero(x->mb_segment_tree_probs[0]) + vp8_cost_one(x->mb_segment_tree_probs[1]);
break;
case 2:
vp8_write(w, 1, x->mb_segment_tree_probs[0]);
vp8_write(w, 0, x->mb_segment_tree_probs[2]);
+ segment_cost += vp8_cost_one(x->mb_segment_tree_probs[0]) + vp8_cost_zero(x->mb_segment_tree_probs[2]);
break;
case 3:
vp8_write(w, 1, x->mb_segment_tree_probs[0]);
vp8_write(w, 1, x->mb_segment_tree_probs[2]);
+ segment_cost += vp8_cost_one(x->mb_segment_tree_probs[0]) + vp8_cost_one(x->mb_segment_tree_probs[2]);
break;
// TRAP.. This should not happen
default:
vp8_write(w, 0, x->mb_segment_tree_probs[0]);
vp8_write(w, 0, x->mb_segment_tree_probs[1]);
+ segment_cost += vp8_cost_zero(x->mb_segment_tree_probs[0]) + vp8_cost_zero(x->mb_segment_tree_probs[1]);
break;
}
}
@@ -855,7 +862,10 @@ static void pack_inter_mode_mvs(VP8_COMP *const cpi)
VP8_COMMON *const pc = & cpi->common;
vp8_writer *const w = & cpi->bc;
const MV_CONTEXT *mvc = pc->fc.mvc;
-
+#if CONFIG_SEGMENTATION
+ int left_id, above_id;
+ int i;
+#endif
const int *const rfct = cpi->count_mb_ref_frame_usage;
const int rf_intra = rfct[INTRA_FRAME];
const int rf_inter = rfct[LAST_FRAME] + rfct[GOLDEN_FRAME] + rfct[ALTREF_FRAME];
@@ -929,13 +939,74 @@ static void pack_inter_mode_mvs(VP8_COMP *const cpi)
xd->mb_to_right_edge = ((pc->mb_cols - 1 - mb_col) * 16) << 3;
xd->mb_to_top_edge = -((mb_row * 16)) << 3;
xd->mb_to_bottom_edge = ((pc->mb_rows - 1 - mb_row) * 16) << 3;
-
+ xd->up_available = (mb_row != 0);
+ xd->left_available = (mb_col != 0);
#ifdef ENTROPY_STATS
active_section = 9;
#endif
if (cpi->mb.e_mbd.update_mb_segmentation_map)
+ {
+#if CONFIG_SEGMENTATION
+ if(xd->left_available)
+ left_id = (m-1)->mbmi.segment_id;
+ else
+ left_id = 0;
+
+ if(xd->up_available)
+ above_id = (m-pc->mb_cols)->mbmi.segment_id;
+ else
+ above_id = 0;
+
+ if ((m->mbmi.segment_id == left_id) || (m->mbmi.segment_id == above_id))
+ {
+ vp8_write(w, 0, xd->mb_segment_tree_probs[0]);
+ segment_cost += vp8_cost_zero(xd->mb_segment_tree_probs[0]);
+
+ if (left_id != above_id)
+ {
+ if(m->mbmi.segment_id == left_id)
+ {
+ vp8_write(w, 0, xd->mb_segment_tree_probs[1]);
+ segment_cost += vp8_cost_zero(xd->mb_segment_tree_probs[1]);
+ }
+ else
+ {
+ vp8_write(w, 1, xd->mb_segment_tree_probs[1]);
+ segment_cost += vp8_cost_one(xd->mb_segment_tree_probs[1]);
+ }
+ }
+ else
+ {
+ vp8_write(w, 0, xd->mb_segment_tree_probs[1]);
+ segment_cost += vp8_cost_zero(xd->mb_segment_tree_probs[1]);
+ }
+ }
+ else
+ {
+ vp8_write(w, 1, xd->mb_segment_tree_probs[0]);
+ segment_cost += vp8_cost_one(xd->mb_segment_tree_probs[0]);
+ for(i = 0; i < MAX_MB_SEGMENTS; i++)
+ {
+ if((left_id != i) && (above_id != i))
+ {
+ if(m->mbmi.segment_id == i)
+ {
+ vp8_write(w, 0, xd->mb_segment_tree_probs[2+i]);
+ segment_cost += vp8_cost_zero(xd->mb_segment_tree_probs[2+i]);
+ }
+ else
+ {
+ vp8_write(w, 1, xd->mb_segment_tree_probs[2+i]);
+ segment_cost += vp8_cost_one(xd->mb_segment_tree_probs[2+i]);
+ }
+ }
+ }
+ }
+#else
write_mb_features(w, mi, &cpi->mb.e_mbd);
+#endif
+ }
if (pc->mb_no_coeff_skip)
vp8_encode_bool(w, m->mbmi.mb_skip_coeff, prob_skip_false);
@@ -1061,7 +1132,10 @@ static void write_kfmodes(VP8_COMP *cpi)
const VP8_COMMON *const c = & cpi->common;
/* const */
MODE_INFO *m = c->mi;
-
+#if CONFIG_SEGMENTATION
+ int left_id, above_id;
+ int i;
+#endif
int mb_row = -1;
int prob_skip_false = 0;
@@ -1086,9 +1160,74 @@ static void write_kfmodes(VP8_COMP *cpi)
while (++mb_col < c->mb_cols)
{
const int ym = m->mbmi.mode;
-
+#if CONFIG_SEGMENTATION
+ MACROBLOCKD *xd = &cpi->mb.e_mbd;
+ xd->up_available = (mb_row != 0);
+ xd->left_available = (mb_col != 0);
+#endif
if (cpi->mb.e_mbd.update_mb_segmentation_map)
- write_mb_features(bc, &m->mbmi, &cpi->mb.e_mbd);
+ {
+#if CONFIG_SEGMENTATION
+ if(xd->left_available)
+ left_id = (m-1)->mbmi.segment_id;
+ else
+ left_id = 0;
+
+ if(xd->up_available)
+ above_id = (m-c->mb_cols)->mbmi.segment_id;
+ else
+ above_id = 0;
+
+ if ((m->mbmi.segment_id == left_id) || (m->mbmi.segment_id == above_id))
+ {
+ vp8_write(bc, 0, xd->mb_segment_tree_probs[0]);
+ segment_cost += vp8_cost_zero(xd->mb_segment_tree_probs[0]);
+ if (left_id != above_id)
+ {
+ if(m->mbmi.segment_id == left_id)
+ {
+ vp8_write(bc, 0, xd->mb_segment_tree_probs[1]);
+ segment_cost += vp8_cost_zero(xd->mb_segment_tree_probs[1]);
+ }
+ else
+ {
+ vp8_write(bc, 1, xd->mb_segment_tree_probs[1]);
+ segment_cost += vp8_cost_one(xd->mb_segment_tree_probs[1]);
+ }
+ }
+ else
+ {
+ vp8_write(bc, 0, xd->mb_segment_tree_probs[1]);
+ segment_cost += vp8_cost_zero(xd->mb_segment_tree_probs[1]);
+ }
+ }
+ else
+ {
+ vp8_write(bc, 1, xd->mb_segment_tree_probs[0]);
+ segment_cost += vp8_cost_one(xd->mb_segment_tree_probs[0]);
+
+ for(i = 0; i < MAX_MB_SEGMENTS; i++)
+ {
+ if((left_id != i) && (above_id != i))
+ {
+ if(m->mbmi.segment_id == i)
+ {
+ vp8_write(bc, 0, xd->mb_segment_tree_probs[2+i]);
+ segment_cost += vp8_cost_zero(xd->mb_segment_tree_probs[2+i]);
+ }
+ else
+ {
+ vp8_write(bc, 1, xd->mb_segment_tree_probs[2+i]);
+ segment_cost += vp8_cost_one(xd->mb_segment_tree_probs[2+i]);
+ }
+
+ }
+ }
+ }
+#else
+ write_mb_features(bc, &m->mbmi, &cpi->mb.e_mbd);
+#endif
+ }
if (c->mb_no_coeff_skip)
vp8_encode_bool(bc, m->mbmi.mb_skip_coeff, prob_skip_false);
@@ -1405,8 +1544,10 @@ void vp8_pack_bitstream(VP8_COMP *cpi, unsigned char *dest, unsigned long *size)
}
else
vp8_start_encode(bc, cx_data);
-
-
+//#if CONFIG_SEGMENTATION
+ //xd->segmentation_enabled =1;
+ xd->update_mb_segmentation_map = 1;
+//#endif
// Signal whether or not Segmentation is enabled
vp8_write_bit(bc, (xd->segmentation_enabled) ? 1 : 0);
@@ -1456,8 +1597,12 @@ void vp8_pack_bitstream(VP8_COMP *cpi, unsigned char *dest, unsigned long *size)
if (xd->update_mb_segmentation_map)
{
+ #if CONFIG_SEGMENTATION
// Write the probs used to decode the segment id for each macro block.
+ for (i = 0; i < MB_FEATURE_TREE_PROBS+3; i++)
+#else
for (i = 0; i < MB_FEATURE_TREE_PROBS; i++)
+#endif
{
int Data = xd->mb_segment_tree_probs[i];
@@ -1621,7 +1766,9 @@ void vp8_pack_bitstream(VP8_COMP *cpi, unsigned char *dest, unsigned long *size)
active_section = 1;
#endif
}
-
+#if CONFIG_SEGMENTATION
+ //printf("\nseg_cost is %d\n",segment_cost);
+#endif
vp8_stop_encode(bc);
diff --git a/vp8/encoder/encodeframe.c b/vp8/encoder/encodeframe.c
index a4e377220..adc85096b 100644
--- a/vp8/encoder/encodeframe.c
+++ b/vp8/encoder/encodeframe.c
@@ -258,8 +258,9 @@ void encode_mb_row(VP8_COMP *cpi,
int recon_y_stride = cm->last_frame.y_stride;
int recon_uv_stride = cm->last_frame.uv_stride;
int seg_map_index = (mb_row * cpi->common.mb_cols);
-
-
+#if CONFIG_SEGMENTATION
+ int left_id, above_id;
+#endif
// reset above block coeffs
xd->above_context[Y1CONTEXT] = cm->above_context[Y1CONTEXT];
xd->above_context[UCONTEXT ] = cm->above_context[UCONTEXT ];
@@ -305,6 +306,7 @@ void encode_mb_row(VP8_COMP *cpi,
xd->mbmi.segment_id = 0;
vp8cx_mb_init_quantizer(cpi, x);
+
}
else
xd->mbmi.segment_id = 0; // Set to Segment 0 by default
@@ -382,9 +384,48 @@ void encode_mb_row(VP8_COMP *cpi,
recon_yoffset += 16;
recon_uvoffset += 8;
- // Keep track of segment useage
- segment_counts[xd->mbmi.segment_id] ++;
+#if CONFIG_SEGMENTATION
+ if(xd->left_available)
+ left_id = cpi->segmentation_map[seg_map_index+mb_col-1];
+ else
+ left_id = 0;
+
+ if(xd->up_available)
+ above_id = cpi->segmentation_map[seg_map_index+mb_col-cpi->common.mb_cols];
+ else
+ above_id = 0;
+
+ if ((xd->mbmi.segment_id == left_id) || (xd->mbmi.segment_id == above_id))
+ {
+ segment_counts[8]++;
+ if (left_id != above_id)
+ {
+ if(xd->mbmi.segment_id == left_id)
+ segment_counts[10]++;
+ else
+ segment_counts[11]++;
+ }
+ else
+ segment_counts[10]++;
+ }
+ else
+ {
+ segment_counts[9]++;
+ for(i = 0; i < MAX_MB_SEGMENTS; i++)
+ {
+ if((left_id != i) && (above_id != i))
+ {
+ if(xd->mbmi.segment_id == i)
+ segment_counts[i]++;
+ else
+ segment_counts[MAX_MB_SEGMENTS + i]++;
+ }
+ }
+ }
+#else
+ segment_counts[xd->mode_info_context->mbmi.segment_id] ++;
+#endif
// skip to next mb
xd->mode_info_context++;
@@ -406,10 +447,6 @@ void encode_mb_row(VP8_COMP *cpi,
xd->mode_info_context++;
}
-
-
-
-
void vp8_encode_frame(VP8_COMP *cpi)
{
int mb_row;
@@ -419,7 +456,11 @@ void vp8_encode_frame(VP8_COMP *cpi)
int i;
TOKENEXTRA *tp = cpi->tok;
+#if CONFIG_SEGMENTATION
+ int segment_counts[MAX_MB_SEGMENTS + 8];
+#else
int segment_counts[MAX_MB_SEGMENTS];
+#endif
int totalrate;
if (cm->frame_type != KEY_FRAME)
@@ -447,7 +488,6 @@ void vp8_encode_frame(VP8_COMP *cpi)
//cpi->prob_intra_coded = 255;
//}
-
xd->gf_active_ptr = (signed char *)cm->gf_active_flags; // Point to base of GF active flags data structure
x->vector_range = 32;
@@ -467,7 +507,7 @@ void vp8_encode_frame(VP8_COMP *cpi)
#if 0
// Experimental code
- cpi->frame_distortion = 0;
+ cpi->frame_distortion = 0;
cpi->last_mb_distortion = 0;
#endif
@@ -687,32 +727,64 @@ void vp8_encode_frame(VP8_COMP *cpi)
// Set to defaults
vpx_memset(xd->mb_segment_tree_probs, 255 , sizeof(xd->mb_segment_tree_probs));
+#if CONFIG_SEGMENTATION
+ tot_count = segment_counts[8] + segment_counts[9];
+
+ if (tot_count)
+ xd->mb_segment_tree_probs[0] = (segment_counts[8] * 255) / tot_count;
+
+ tot_count = segment_counts[10] + segment_counts[11];
+
+ if (tot_count > 0)
+ xd->mb_segment_tree_probs[1] = (segment_counts[10] * 255) / tot_count;
+
+ tot_count = segment_counts[0] + segment_counts[4] ;
+
+ if (tot_count > 0)
+ xd->mb_segment_tree_probs[2] = (segment_counts[0] * 255) / tot_count;
+
+ tot_count = segment_counts[1] + segment_counts[5];
+
+ if (tot_count > 0)
+ xd->mb_segment_tree_probs[3] = (segment_counts[1] * 255) / tot_count;
+ tot_count = segment_counts[2] + segment_counts[6];
+
+ if (tot_count > 0)
+ xd->mb_segment_tree_probs[4] = (segment_counts[2] * 255) / tot_count;
+
+ tot_count = segment_counts[3] + segment_counts[7];
+
+ if (tot_count > 0)
+ xd->mb_segment_tree_probs[5] = (segment_counts[3] * 255) / tot_count;
+
+#else
tot_count = segment_counts[0] + segment_counts[1] + segment_counts[2] + segment_counts[3];
if (tot_count)
- {
xd->mb_segment_tree_probs[0] = ((segment_counts[0] + segment_counts[1]) * 255) / tot_count;
- tot_count = segment_counts[0] + segment_counts[1];
+ tot_count = segment_counts[0] + segment_counts[1];
- if (tot_count > 0)
- {
- xd->mb_segment_tree_probs[1] = (segment_counts[0] * 255) / tot_count;
- }
+ if (tot_count > 0)
+ xd->mb_segment_tree_probs[1] = (segment_counts[0] * 255) / tot_count;
- tot_count = segment_counts[2] + segment_counts[3];
+ tot_count = segment_counts[2] + segment_counts[3];
- if (tot_count > 0)
- xd->mb_segment_tree_probs[2] = (segment_counts[2] * 255) / tot_count;
+ if (tot_count > 0)
+ xd->mb_segment_tree_probs[2] = (segment_counts[2] * 255) / tot_count;
- // Zero probabilities not allowed
- for (i = 0; i < MB_FEATURE_TREE_PROBS; i ++)
+#endif
+ // Zero probabilities not allowed
+#if CONFIG_SEGMENTATION
+ for (i = 0; i < MB_FEATURE_TREE_PROBS+3; i++)
+#else
+ for (i = 0; i < MB_FEATURE_TREE_PROBS; i++)
+#endif
{
if (xd->mb_segment_tree_probs[i] == 0)
xd->mb_segment_tree_probs[i] = 1;
}
- }
}
// 256 rate units to the bit
diff --git a/vp8/encoder/ethreading.c b/vp8/encoder/ethreading.c
index a0b50d2a1..116c5cd20 100644
--- a/vp8/encoder/ethreading.c
+++ b/vp8/encoder/ethreading.c
@@ -86,7 +86,7 @@ THREAD_FUNCTION thread_encoding_proc(void *p_data)
for (mb_col = 0; mb_col < cm->mb_cols; mb_col++)
{
int seg_map_index = (mb_row * cm->mb_cols);
-
+
while (mb_col > (*last_row_current_mb_col - 1) && *last_row_current_mb_col != cm->mb_cols - 1)
{
x86_pause_hint();
diff --git a/vp8/encoder/onyx_if.c b/vp8/encoder/onyx_if.c
index 56516fcab..bab5f364a 100644
--- a/vp8/encoder/onyx_if.c
+++ b/vp8/encoder/onyx_if.c
@@ -252,7 +252,6 @@ static void set_segmentation_map(VP8_PTR ptr, unsigned char *segmentation_map)
// Copy in the new segmentation map
vpx_memcpy(cpi->segmentation_map, segmentation_map, (cpi->common.mb_rows * cpi->common.mb_cols));
-
// Signal that the map should be updated.
cpi->mb.e_mbd.update_mb_segmentation_map = 1;
cpi->mb.e_mbd.update_mb_segmentation_data = 1;
@@ -278,12 +277,27 @@ static void set_segment_data(VP8_PTR ptr, signed char *feature_data, unsigned ch
static void segmentation_test_function(VP8_PTR ptr)
{
VP8_COMP *cpi = (VP8_COMP *)(ptr);
-
+#if CONFIG_SEGMENTATION
+ int i,j;
+#endif
unsigned char *seg_map;
signed char feature_data[MB_LVL_MAX][MAX_MB_SEGMENTS];
-
+ CHECK_MEM_ERROR(seg_map, vpx_calloc((cpi->common.mb_rows * cpi->common.mb_cols), 1));
// Create a temporary map for segmentation data.
- CHECK_MEM_ERROR(seg_map, vpx_calloc(cpi->common.mb_rows * cpi->common.mb_cols, 1));
+#if CONFIG_SEGMENTATION
+ // MB loop to set local segmentation map
+ for (i = 0; i < cpi->common.mb_rows; i++ )
+ {
+ for (j = 0; j < cpi->common.mb_cols; j++ )
+ {
+ if (j >= cpi->common.mb_cols/4 && j < (cpi->common.mb_cols*3)/4 )
+ seg_map[(i*cpi->common.mb_cols) + j] = 2;
+ else
+ seg_map[(i*cpi->common.mb_cols) + j] = 0;
+ }
+ }
+
+#endif
// MB loop to set local segmentation map
/*for ( i = 0; i < cpi->common.mb_rows; i++ )
@@ -344,7 +358,7 @@ static void cyclic_background_refresh(VP8_COMP *cpi, int Q, int lf_adjustment)
int mbs_in_frame = cpi->common.mb_rows * cpi->common.mb_cols;
// Create a temporary map for segmentation data.
- CHECK_MEM_ERROR(seg_map, vpx_calloc(cpi->common.mb_rows * cpi->common.mb_cols, 1));
+ CHECK_MEM_ERROR(seg_map, vpx_calloc((cpi->common.mb_rows * cpi->common.mb_cols), 1));
cpi->cyclic_refresh_q = Q;
@@ -1924,7 +1938,7 @@ VP8_PTR vp8_create_compressor(VP8_CONFIG *oxcf)
VP8_COMP *cpi;
VP8_PTR ptr;
} ctx;
-
+
VP8_COMP *cpi;
VP8_COMMON *cm;
@@ -1987,7 +2001,7 @@ VP8_PTR vp8_create_compressor(VP8_CONFIG *oxcf)
// Create the encoder segmentation map and set all entries to 0
- CHECK_MEM_ERROR(cpi->segmentation_map, vpx_calloc(cpi->common.mb_rows * cpi->common.mb_cols, 1));
+ CHECK_MEM_ERROR(cpi->segmentation_map, vpx_calloc((cpi->common.mb_rows * cpi->common.mb_cols), 1));
CHECK_MEM_ERROR(cpi->active_map, vpx_calloc(cpi->common.mb_rows * cpi->common.mb_cols, 1));
vpx_memset(cpi->active_map , 1, (cpi->common.mb_rows * cpi->common.mb_cols));
cpi->active_map_enabled = 0;
@@ -2026,13 +2040,12 @@ VP8_PTR vp8_create_compressor(VP8_CONFIG *oxcf)
cpi->cyclic_refresh_q = 32;
if (cpi->cyclic_refresh_mode_enabled)
- {
CHECK_MEM_ERROR(cpi->cyclic_refresh_map, vpx_calloc((cpi->common.mb_rows * cpi->common.mb_cols), 1));
- }
else
cpi->cyclic_refresh_map = (signed char *) NULL;
// Test function for segmentation
+
//segmentation_test_function((VP8_PTR) cpi);
// Loop filter mode / ref deltas test function
@@ -3529,11 +3542,16 @@ static void encode_frame_to_data_rate(VP8_COMP *cpi, unsigned long *size, unsign
int drop_mark75 = drop_mark * 2 / 3;
int drop_mark50 = drop_mark / 4;
int drop_mark25 = drop_mark / 8;
-
+#if CONFIG_SEGMENTATION
+ int i;
+#endif
// Clear down mmx registers to allow floating point in what follows
vp8_clear_system_state();
// Test code for segmentation of gf/arf (0,0)
+#if CONFIG_SEGMENTATION
+ segmentation_test_function((VP8_PTR) cpi);
+#endif
//segmentation_test_function((VP8_PTR) cpi);
// For an alt ref frame in 2 pass we skip the call to the second pass function that sets the target bandwidth
@@ -4039,7 +4057,28 @@ static void encode_frame_to_data_rate(VP8_COMP *cpi, unsigned long *size, unsign
vp8_setup_key_frame(cpi);
// transform / motion compensation build reconstruction frame
-
+#if CONFIG_SEGMENTATION
+ // MB loop to set local segmentation map
+ for (i = 0; i < cpi->common.mb_rows; i++ )
+ {
+ int j = (cpi->common.mb_cols/4);
+ int k = (cpi->common.mb_cols*3)/4;
+ if((cm->current_video_frame%2 == 0 && i<cpi->common.mb_rows/2)||(cm->current_video_frame%2 == 1 && i>cpi->common.mb_rows/2))
+ {
+ cpi->segmentation_map[(i*cpi->common.mb_cols) + j] = 2;
+ cpi->segmentation_map[(i*cpi->common.mb_cols) + j-1] = 2;
+ cpi->segmentation_map[(i*cpi->common.mb_cols) + k] = 2;
+ cpi->segmentation_map[(i*cpi->common.mb_cols) + k+1] = 2;
+ }
+ else if((cm->current_video_frame%2 == 1 && i<cpi->common.mb_rows/2)||(cm->current_video_frame%2 == 0 && i>cpi->common.mb_rows/2))
+ {
+ cpi->segmentation_map[(i*cpi->common.mb_cols) + j] = 0;
+ cpi->segmentation_map[(i*cpi->common.mb_cols) + j-1] = 0;
+ cpi->segmentation_map[(i*cpi->common.mb_cols) + k] = 0;
+ cpi->segmentation_map[(i*cpi->common.mb_cols) + k+1] = 0;
+ }
+ }
+#endif
vp8_encode_frame(cpi);
cpi->projected_frame_size -= vp8_estimate_entropy_savings(cpi);
cpi->projected_frame_size = (cpi->projected_frame_size > 0) ? cpi->projected_frame_size : 0;
diff --git a/vp8/encoder/onyx_int.h b/vp8/encoder/onyx_int.h
index 55076b091..ec1774c81 100644
--- a/vp8/encoder/onyx_int.h
+++ b/vp8/encoder/onyx_int.h
@@ -187,7 +187,11 @@ typedef struct
MACROBLOCK mb;
int mb_row;
TOKENEXTRA *tp;
+#if CONFIG_SEGMENTATION
+ int segment_counts[MAX_MB_SEGMENTS + 8];
+#else
int segment_counts[MAX_MB_SEGMENTS];
+#endif
int totalrate;
int current_mb_col;
} MB_ROW_COMP;