summaryrefslogtreecommitdiff
path: root/vp8
diff options
context:
space:
mode:
authorJohn Koleszar <jkoleszar@google.com>2011-05-20 00:05:16 -0400
committerJohn Koleszar <jkoleszar@google.com>2011-05-20 00:05:16 -0400
commit27331e1377465aad1d97f9796dca56542ba389af (patch)
treee752f334e7c90cc48dbc81cdce81554bdba65791 /vp8
parente05fd0fc36ef7e17be6dc87a7ff2ae72a5fa2473 (diff)
parent1f3f18443db5820157448515579c14efd19eea1b (diff)
downloadlibvpx-27331e1377465aad1d97f9796dca56542ba389af.tar
libvpx-27331e1377465aad1d97f9796dca56542ba389af.tar.gz
libvpx-27331e1377465aad1d97f9796dca56542ba389af.tar.bz2
libvpx-27331e1377465aad1d97f9796dca56542ba389af.zip
Merge remote branch 'internal/upstream' into HEAD
Diffstat (limited to 'vp8')
-rw-r--r--vp8/common/alloccommon.c23
-rw-r--r--vp8/common/generic/systemdependent.c2
-rw-r--r--vp8/common/onyxc_int.h2
-rw-r--r--vp8/common/onyxd.h1
-rw-r--r--vp8/common/x86/recon_sse2.asm59
-rw-r--r--vp8/common/x86/recon_wrapper_sse2.c18
-rw-r--r--vp8/decoder/decodemv.c23
-rw-r--r--vp8/decoder/decodframe.c219
-rw-r--r--vp8/decoder/ec_types.h49
-rw-r--r--vp8/decoder/error_concealment.c613
-rw-r--r--vp8/decoder/error_concealment.h41
-rw-r--r--vp8/decoder/onyxd_if.c49
-rw-r--r--vp8/decoder/onyxd_int.h10
-rw-r--r--vp8/encoder/encodeframe.c376
-rw-r--r--vp8/encoder/encodeintra.c3
-rw-r--r--vp8/encoder/encodeintra.h1
-rw-r--r--vp8/encoder/generic/csystemdependent.c2
-rw-r--r--vp8/encoder/onyx_if.c61
-rw-r--r--vp8/encoder/onyx_int.h4
-rw-r--r--vp8/encoder/pickinter.c29
-rw-r--r--vp8/encoder/quantize.c418
-rw-r--r--vp8/encoder/quantize.h7
-rw-r--r--vp8/encoder/rdopt.c54
-rw-r--r--vp8/encoder/variance.h2
-rw-r--r--vp8/encoder/x86/x86_csystemdependent.c4
-rw-r--r--vp8/vp8_dx_iface.c8
-rw-r--r--vp8/vp8cx.mk6
-rw-r--r--vp8/vp8dx.mk3
28 files changed, 1545 insertions, 542 deletions
diff --git a/vp8/common/alloccommon.c b/vp8/common/alloccommon.c
index c457312f4..4f9d6f6d0 100644
--- a/vp8/common/alloccommon.c
+++ b/vp8/common/alloccommon.c
@@ -27,6 +27,9 @@ static void update_mode_info_border(MODE_INFO *mi, int rows, int cols)
for (i = 0; i < rows; i++)
{
+ /* TODO(holmer): Bug? This updates the last element of each row
+ * rather than the border element!
+ */
vpx_memset(&mi[i*cols-1], 0, sizeof(MODE_INFO));
}
}
@@ -43,9 +46,11 @@ void vp8_de_alloc_frame_buffers(VP8_COMMON *oci)
vpx_free(oci->above_context);
vpx_free(oci->mip);
+ vpx_free(oci->prev_mip);
oci->above_context = 0;
oci->mip = 0;
+ oci->prev_mip = 0;
}
@@ -110,6 +115,21 @@ int vp8_alloc_frame_buffers(VP8_COMMON *oci, int width, int height)
oci->mi = oci->mip + oci->mode_info_stride + 1;
+ /* allocate memory for last frame MODE_INFO array */
+#if CONFIG_ERROR_CONCEALMENT
+ oci->prev_mip = vpx_calloc((oci->mb_cols + 1) * (oci->mb_rows + 1), sizeof(MODE_INFO));
+
+ if (!oci->prev_mip)
+ {
+ vp8_de_alloc_frame_buffers(oci);
+ return 1;
+ }
+
+ oci->prev_mi = oci->prev_mip + oci->mode_info_stride + 1;
+#else
+ oci->prev_mip = NULL;
+ oci->prev_mi = NULL;
+#endif
oci->above_context = vpx_calloc(sizeof(ENTROPY_CONTEXT_PLANES) * oci->mb_cols, 1);
@@ -120,6 +140,9 @@ int vp8_alloc_frame_buffers(VP8_COMMON *oci, int width, int height)
}
update_mode_info_border(oci->mi, oci->mb_rows, oci->mb_cols);
+#if CONFIG_ERROR_CONCEALMENT
+ update_mode_info_border(oci->prev_mi, oci->mb_rows, oci->mb_cols);
+#endif
return 0;
}
diff --git a/vp8/common/generic/systemdependent.c b/vp8/common/generic/systemdependent.c
index c7fbb3e09..d9d439cf8 100644
--- a/vp8/common/generic/systemdependent.c
+++ b/vp8/common/generic/systemdependent.c
@@ -113,7 +113,7 @@ void vp8_machine_specific_config(VP8_COMMON *ctx)
rtcd->loopfilter.simple_mb_h = vp8_loop_filter_mbhs_c;
rtcd->loopfilter.simple_b_h = vp8_loop_filter_bhs_c;
-#if CONFIG_POSTPROC || (CONFIG_VP8_ENCODER && CONFIG_PSNR)
+#if CONFIG_POSTPROC || (CONFIG_VP8_ENCODER && CONFIG_INTERNAL_STATS)
rtcd->postproc.down = vp8_mbpost_proc_down_c;
rtcd->postproc.across = vp8_mbpost_proc_across_ip_c;
rtcd->postproc.downacross = vp8_post_proc_down_and_across_c;
diff --git a/vp8/common/onyxc_int.h b/vp8/common/onyxc_int.h
index 0565127e1..c992f473a 100644
--- a/vp8/common/onyxc_int.h
+++ b/vp8/common/onyxc_int.h
@@ -140,6 +140,8 @@ typedef struct VP8Common
MODE_INFO *mip; /* Base of allocated array */
MODE_INFO *mi; /* Corresponds to upper left visible macroblock */
+ MODE_INFO *prev_mip; /* MODE_INFO array 'mip' from last decoded frame */
+ MODE_INFO *prev_mi; /* 'mi' from last frame (points into prev_mip) */
INTERPOLATIONFILTERTYPE mcomp_filter_type;
diff --git a/vp8/common/onyxd.h b/vp8/common/onyxd.h
index e53bc3138..140dc5728 100644
--- a/vp8/common/onyxd.h
+++ b/vp8/common/onyxd.h
@@ -31,6 +31,7 @@ extern "C"
int Version;
int postprocess;
int max_threads;
+ int error_concealment;
} VP8D_CONFIG;
typedef enum
{
diff --git a/vp8/common/x86/recon_sse2.asm b/vp8/common/x86/recon_sse2.asm
index 86c421a9a..00b74387c 100644
--- a/vp8/common/x86/recon_sse2.asm
+++ b/vp8/common/x86/recon_sse2.asm
@@ -584,23 +584,35 @@ sym(vp8_intra_pred_uv_ve_mmx):
; unsigned char *src,
; int src_stride,
; )
-global sym(vp8_intra_pred_uv_ho_mmx2)
-sym(vp8_intra_pred_uv_ho_mmx2):
+%macro vp8_intra_pred_uv_ho 1
+global sym(vp8_intra_pred_uv_ho_%1)
+sym(vp8_intra_pred_uv_ho_%1):
push rbp
mov rbp, rsp
SHADOW_ARGS_TO_STACK 4
push rsi
push rdi
+%ifidn %1, ssse3
+ push rbx
+%endif
; end prolog
; read from left and write out
+%ifidn %1, mmx2
mov edx, 4
+%endif
mov rsi, arg(2) ;src;
movsxd rax, dword ptr arg(3) ;src_stride;
mov rdi, arg(0) ;dst;
movsxd rcx, dword ptr arg(1) ;dst_stride
+%ifidn %1, ssse3
+ lea rbx, [rax*3]
+ lea rdx, [rcx*3]
+ movdqa xmm2, [GLOBAL(dc_00001111)]
+%endif
dec rsi
-vp8_intra_pred_uv_ho_mmx2_loop:
+%ifidn %1, mmx2
+vp8_intra_pred_uv_ho_%1_loop:
movd mm0, [rsi]
movd mm1, [rsi+rax]
punpcklbw mm0, mm0
@@ -612,14 +624,49 @@ vp8_intra_pred_uv_ho_mmx2_loop:
lea rsi, [rsi+rax*2]
lea rdi, [rdi+rcx*2]
dec edx
- jnz vp8_intra_pred_uv_ho_mmx2_loop
+ jnz vp8_intra_pred_uv_ho_%1_loop
+%else
+ movd xmm0, [rsi]
+ movd xmm3, [rsi+rax]
+ movd xmm1, [rsi+rax*2]
+ movd xmm4, [rsi+rbx]
+ punpcklbw xmm0, xmm3
+ punpcklbw xmm1, xmm4
+ pshufb xmm0, xmm2
+ pshufb xmm1, xmm2
+ movq [rdi ], xmm0
+ movhps [rdi+rcx], xmm0
+ movq [rdi+rcx*2], xmm1
+ movhps [rdi+rdx], xmm1
+ lea rsi, [rsi+rax*4]
+ lea rdi, [rdi+rcx*4]
+ movd xmm0, [rsi]
+ movd xmm3, [rsi+rax]
+ movd xmm1, [rsi+rax*2]
+ movd xmm4, [rsi+rbx]
+ punpcklbw xmm0, xmm3
+ punpcklbw xmm1, xmm4
+ pshufb xmm0, xmm2
+ pshufb xmm1, xmm2
+ movq [rdi ], xmm0
+ movhps [rdi+rcx], xmm0
+ movq [rdi+rcx*2], xmm1
+ movhps [rdi+rdx], xmm1
+%endif
; begin epilog
+%ifidn %1, ssse3
+ pop rbx
+%endif
pop rdi
pop rsi
UNSHADOW_ARGS
pop rbp
ret
+%endmacro
+
+vp8_intra_pred_uv_ho mmx2
+vp8_intra_pred_uv_ho ssse3
SECTION_RODATA
dc_128:
@@ -629,3 +676,7 @@ dc_4:
align 16
dc_1024:
times 8 dw 0x400
+align 16
+dc_00001111:
+ times 8 db 0
+ times 8 db 1
diff --git a/vp8/common/x86/recon_wrapper_sse2.c b/vp8/common/x86/recon_wrapper_sse2.c
index 86b4da2c2..cb7b69c08 100644
--- a/vp8/common/x86/recon_wrapper_sse2.c
+++ b/vp8/common/x86/recon_wrapper_sse2.c
@@ -23,6 +23,7 @@ extern build_intra_predictors_mbuv_prototype(vp8_intra_pred_uv_dctop_mmx2);
extern build_intra_predictors_mbuv_prototype(vp8_intra_pred_uv_dcleft_mmx2);
extern build_intra_predictors_mbuv_prototype(vp8_intra_pred_uv_dc128_mmx);
extern build_intra_predictors_mbuv_prototype(vp8_intra_pred_uv_ho_mmx2);
+extern build_intra_predictors_mbuv_prototype(vp8_intra_pred_uv_ho_ssse3);
extern build_intra_predictors_mbuv_prototype(vp8_intra_pred_uv_ve_mmx);
extern build_intra_predictors_mbuv_prototype(vp8_intra_pred_uv_tm_sse2);
extern build_intra_predictors_mbuv_prototype(vp8_intra_pred_uv_tm_ssse3);
@@ -31,7 +32,8 @@ static void vp8_build_intra_predictors_mbuv_x86(MACROBLOCKD *x,
unsigned char *dst_u,
unsigned char *dst_v,
int dst_stride,
- build_intra_predictors_mbuv_fn_t tm_func)
+ build_intra_predictors_mbuv_fn_t tm_func,
+ build_intra_predictors_mbuv_fn_t ho_func)
{
int mode = x->mode_info_context->mbmi.uv_mode;
build_intra_predictors_mbuv_fn_t fn;
@@ -39,7 +41,7 @@ static void vp8_build_intra_predictors_mbuv_x86(MACROBLOCKD *x,
switch (mode) {
case V_PRED: fn = vp8_intra_pred_uv_ve_mmx; break;
- case H_PRED: fn = vp8_intra_pred_uv_ho_mmx2; break;
+ case H_PRED: fn = ho_func; break;
case TM_PRED: fn = tm_func; break;
case DC_PRED:
if (x->up_available) {
@@ -65,26 +67,30 @@ void vp8_build_intra_predictors_mbuv_sse2(MACROBLOCKD *x)
{
vp8_build_intra_predictors_mbuv_x86(x, &x->predictor[256],
&x->predictor[320], 8,
- vp8_intra_pred_uv_tm_sse2);
+ vp8_intra_pred_uv_tm_sse2,
+ vp8_intra_pred_uv_ho_mmx2);
}
void vp8_build_intra_predictors_mbuv_ssse3(MACROBLOCKD *x)
{
vp8_build_intra_predictors_mbuv_x86(x, &x->predictor[256],
&x->predictor[320], 8,
- vp8_intra_pred_uv_tm_ssse3);
+ vp8_intra_pred_uv_tm_ssse3,
+ vp8_intra_pred_uv_ho_ssse3);
}
void vp8_build_intra_predictors_mbuv_s_sse2(MACROBLOCKD *x)
{
vp8_build_intra_predictors_mbuv_x86(x, x->dst.u_buffer,
x->dst.v_buffer, x->dst.uv_stride,
- vp8_intra_pred_uv_tm_sse2);
+ vp8_intra_pred_uv_tm_sse2,
+ vp8_intra_pred_uv_ho_mmx2);
}
void vp8_build_intra_predictors_mbuv_s_ssse3(MACROBLOCKD *x)
{
vp8_build_intra_predictors_mbuv_x86(x, x->dst.u_buffer,
x->dst.v_buffer, x->dst.uv_stride,
- vp8_intra_pred_uv_tm_ssse3);
+ vp8_intra_pred_uv_tm_ssse3,
+ vp8_intra_pred_uv_ho_ssse3);
}
diff --git a/vp8/decoder/decodemv.c b/vp8/decoder/decodemv.c
index 39a39e160..a97193f0f 100644
--- a/vp8/decoder/decodemv.c
+++ b/vp8/decoder/decodemv.c
@@ -246,6 +246,12 @@ static void mb_mode_mv_init(VP8D_COMP *pbi)
MACROBLOCKD *const xd = & pbi->mb;
#endif
+#if CONFIG_ERROR_CONCEALMENT
+ /* Default is that no macroblock is corrupt, therefore we initialize
+ * mvs_corrupt_from_mb to something very big, which we can be sure is
+ * outside the frame. */
+ pbi->mvs_corrupt_from_mb = UINT_MAX;
+#endif
pbi->prob_skip_false = 0;
if (pbi->common.mb_no_coeff_skip)
pbi->prob_skip_false = (vp8_prob)vp8_read_literal(bc, 8);
@@ -285,6 +291,7 @@ static void mb_mode_mv_init(VP8D_COMP *pbi)
}
}
+
static void read_mb_modes_mv(VP8D_COMP *pbi, MODE_INFO *mi, MB_MODE_INFO *mbmi,
int mb_row, int mb_col)
{
@@ -445,7 +452,7 @@ static void read_mb_modes_mv(VP8D_COMP *pbi, MODE_INFO *mi, MB_MODE_INFO *mbmi,
do {
mi->bmi[ *fill_offset] = bmi;
- fill_offset++;
+ fill_offset++;
}while (--fill_count);
}
@@ -566,12 +573,26 @@ void vp8_decode_mode_mvs(VP8D_COMP *pbi)
while (++mb_col < pbi->common.mb_cols)
{
+ int mb_num = mb_row * pbi->common.mb_cols + mb_col;
/*read_mb_modes_mv(pbi, xd->mode_info_context, &xd->mode_info_context->mbmi, mb_row, mb_col);*/
if(pbi->common.frame_type == KEY_FRAME)
vp8_kfread_modes(pbi, mi, mb_row, mb_col);
else
read_mb_modes_mv(pbi, mi, &mi->mbmi, mb_row, mb_col);
+#if CONFIG_ERROR_CONCEALMENT
+ /* look for corruption. set mvs_corrupt_from_mb to the current
+ * mb_num if the frame is corrupt from this macroblock. */
+ if (vp8dx_bool_error(&pbi->bc) && mb_num < pbi->mvs_corrupt_from_mb)
+ {
+ pbi->mvs_corrupt_from_mb = mb_num;
+ /* no need to continue since the partition is corrupt from
+ * here on.
+ */
+ return;
+ }
+#endif
+
mi++; /* next macroblock */
}
diff --git a/vp8/decoder/decodframe.c b/vp8/decoder/decodframe.c
index 90de58ed6..ee065451c 100644
--- a/vp8/decoder/decodframe.c
+++ b/vp8/decoder/decodframe.c
@@ -27,6 +27,9 @@
#include "decodemv.h"
#include "vp8/common/extend.h"
+#if CONFIG_ERROR_CONCEALMENT
+#include "error_concealment.h"
+#endif
#include "vpx_mem/vpx_mem.h"
#include "vp8/common/idct.h"
#include "dequantize.h"
@@ -176,7 +179,8 @@ void clamp_mvs(MACROBLOCKD *xd)
}
-static void decode_macroblock(VP8D_COMP *pbi, MACROBLOCKD *xd)
+static void decode_macroblock(VP8D_COMP *pbi, MACROBLOCKD *xd,
+ unsigned int mb_idx)
{
int eobtotal = 0;
int i, do_clamp = xd->mode_info_context->mbmi.need_to_clamp_mvs;
@@ -233,6 +237,19 @@ static void decode_macroblock(VP8D_COMP *pbi, MACROBLOCKD *xd)
vp8_build_inter_predictors_mb(xd);
}
+#if CONFIG_ERROR_CONCEALMENT
+ if (pbi->ec_enabled &&
+ (mb_idx > pbi->mvs_corrupt_from_mb ||
+ vp8dx_bool_error(xd->current_bc)))
+ {
+ /* MB with corrupt residuals or corrupt mode/motion vectors.
+ * Better to use the predictor as reconstruction.
+ */
+ vp8_conceal_corrupt_mb(xd);
+ return;
+ }
+#endif
+
/* dequantization and idct */
if (xd->mode_info_context->mbmi.mode != B_PRED && xd->mode_info_context->mbmi.mode != SPLITMV)
{
@@ -355,6 +372,32 @@ decode_mb_row(VP8D_COMP *pbi, VP8_COMMON *pc, int mb_row, MACROBLOCKD *xd)
for (mb_col = 0; mb_col < pc->mb_cols; mb_col++)
{
+ /* Distance of Mb to the various image edges.
+ * These are specified to 8th pel as they are always compared to values
+ * that are in 1/8th pel units
+ */
+ xd->mb_to_left_edge = -((mb_col * 16) << 3);
+ xd->mb_to_right_edge = ((pc->mb_cols - 1 - mb_col) * 16) << 3;
+
+#if CONFIG_ERROR_CONCEALMENT
+ if (pbi->ec_enabled &&
+ xd->mode_info_context->mbmi.ref_frame == INTRA_FRAME &&
+ vp8dx_bool_error(xd->current_bc))
+ {
+ /* We have an intra block with corrupt coefficients, better to
+ * conceal with an inter block. Interpolate MVs from neighboring MBs
+ *
+ * Note that for the first mb with corrupt residual in a frame,
+ * we might not discover that before decoding the residual. That
+ * happens after this check, and therefore no inter concealment will
+ * be done.
+ */
+ vp8_interpolate_motion(xd,
+ mb_row, mb_col,
+ pc->mb_rows, pc->mb_cols,
+ pc->mode_info_stride);
+ }
+#endif
if (xd->mode_info_context->mbmi.mode == SPLITMV || xd->mode_info_context->mbmi.mode == B_PRED)
{
@@ -365,12 +408,6 @@ decode_mb_row(VP8D_COMP *pbi, VP8_COMMON *pc, int mb_row, MACROBLOCKD *xd)
}
}
- /* Distance of Mb to the various image edges.
- * These are specified to 8th pel as they are always compared to values that are in 1/8th pel units
- */
- xd->mb_to_left_edge = -((mb_col * 16) << 3);
- xd->mb_to_right_edge = ((pc->mb_cols - 1 - mb_col) * 16) << 3;
-
xd->dst.y_buffer = pc->yv12_fb[dst_fb_idx].y_buffer + recon_yoffset;
xd->dst.u_buffer = pc->yv12_fb[dst_fb_idx].u_buffer + recon_uvoffset;
xd->dst.v_buffer = pc->yv12_fb[dst_fb_idx].v_buffer + recon_uvoffset;
@@ -403,7 +440,7 @@ decode_mb_row(VP8D_COMP *pbi, VP8_COMMON *pc, int mb_row, MACROBLOCKD *xd)
else
pbi->debugoutput =0;
*/
- decode_macroblock(pbi, xd);
+ decode_macroblock(pbi, xd, mb_row * pc->mb_cols + mb_col);
/* check if the boolean decoder has suffered an error */
xd->corrupted |= vp8dx_bool_error(xd->current_bc);
@@ -477,8 +514,8 @@ static void setup_token_decoder(VP8D_COMP *pbi,
partition_size = user_data_end - partition;
}
- if (partition + partition_size > user_data_end
- || partition + partition_size < partition)
+ if (!pbi->ec_enabled && (partition + partition_size > user_data_end
+ || partition + partition_size < partition))
vpx_internal_error(&pc->error, VPX_CODEC_CORRUPT_FRAME,
"Truncated packet or corrupt partition "
"%d length", i + 1);
@@ -592,63 +629,105 @@ int vp8_decode_frame(VP8D_COMP *pbi)
pc->yv12_fb[pc->new_fb_idx].corrupted = 0;
if (data_end - data < 3)
- vpx_internal_error(&pc->error, VPX_CODEC_CORRUPT_FRAME,
- "Truncated packet");
- pc->frame_type = (FRAME_TYPE)(data[0] & 1);
- pc->version = (data[0] >> 1) & 7;
- pc->show_frame = (data[0] >> 4) & 1;
- first_partition_length_in_bytes =
- (data[0] | (data[1] << 8) | (data[2] << 16)) >> 5;
- data += 3;
-
- if (data + first_partition_length_in_bytes > data_end
- || data + first_partition_length_in_bytes < data)
- vpx_internal_error(&pc->error, VPX_CODEC_CORRUPT_FRAME,
- "Truncated packet or corrupt partition 0 length");
- vp8_setup_version(pc);
-
- if (pc->frame_type == KEY_FRAME)
{
- const int Width = pc->Width;
- const int Height = pc->Height;
-
- /* vet via sync code */
- if (data[0] != 0x9d || data[1] != 0x01 || data[2] != 0x2a)
- vpx_internal_error(&pc->error, VPX_CODEC_UNSUP_BITSTREAM,
- "Invalid frame sync code");
-
- pc->Width = (data[3] | (data[4] << 8)) & 0x3fff;
- pc->horiz_scale = data[4] >> 6;
- pc->Height = (data[5] | (data[6] << 8)) & 0x3fff;
- pc->vert_scale = data[6] >> 6;
- data += 7;
-
- if (Width != pc->Width || Height != pc->Height)
+ if (pbi->ec_enabled)
{
- int prev_mb_rows = pc->mb_rows;
+ /* Declare the missing frame as an inter frame since it will
+ be handled as an inter frame when we have estimated its
+ motion vectors. */
+ pc->frame_type = INTER_FRAME;
+ pc->version = 0;
+ pc->show_frame = 1;
+ first_partition_length_in_bytes = 0;
+ }
+ else
+ {
+ vpx_internal_error(&pc->error, VPX_CODEC_CORRUPT_FRAME,
+ "Truncated packet");
+ }
+ }
+ else
+ {
+ pc->frame_type = (FRAME_TYPE)(data[0] & 1);
+ pc->version = (data[0] >> 1) & 7;
+ pc->show_frame = (data[0] >> 4) & 1;
+ first_partition_length_in_bytes =
+ (data[0] | (data[1] << 8) | (data[2] << 16)) >> 5;
+ data += 3;
+
+ if (!pbi->ec_enabled && (data + first_partition_length_in_bytes > data_end
+ || data + first_partition_length_in_bytes < data))
+ vpx_internal_error(&pc->error, VPX_CODEC_CORRUPT_FRAME,
+ "Truncated packet or corrupt partition 0 length");
+ vp8_setup_version(pc);
- if (pc->Width <= 0)
+ if (pc->frame_type == KEY_FRAME)
+ {
+ const int Width = pc->Width;
+ const int Height = pc->Height;
+
+ /* vet via sync code */
+ /* When error concealment is enabled we should only check the sync
+ * code if we have enough bits available
+ */
+ if (!pbi->ec_enabled || data + 3 < data_end)
{
- pc->Width = Width;
- vpx_internal_error(&pc->error, VPX_CODEC_CORRUPT_FRAME,
- "Invalid frame width");
+ if (data[0] != 0x9d || data[1] != 0x01 || data[2] != 0x2a)
+ vpx_internal_error(&pc->error, VPX_CODEC_UNSUP_BITSTREAM,
+ "Invalid frame sync code");
}
- if (pc->Height <= 0)
+ /* If error concealment is enabled we should only parse the new size
+ * if we have enough data. Otherwise we will end up with the wrong
+ * size.
+ */
+ if (!pbi->ec_enabled || data + 6 < data_end)
{
- pc->Height = Height;
- vpx_internal_error(&pc->error, VPX_CODEC_CORRUPT_FRAME,
- "Invalid frame height");
+ pc->Width = (data[3] | (data[4] << 8)) & 0x3fff;
+ pc->horiz_scale = data[4] >> 6;
+ pc->Height = (data[5] | (data[6] << 8)) & 0x3fff;
+ pc->vert_scale = data[6] >> 6;
}
+ data += 7;
+
+ if (Width != pc->Width || Height != pc->Height)
+ {
+ int prev_mb_rows = pc->mb_rows;
+
+ if (pc->Width <= 0)
+ {
+ pc->Width = Width;
+ vpx_internal_error(&pc->error, VPX_CODEC_CORRUPT_FRAME,
+ "Invalid frame width");
+ }
+
+ if (pc->Height <= 0)
+ {
+ pc->Height = Height;
+ vpx_internal_error(&pc->error, VPX_CODEC_CORRUPT_FRAME,
+ "Invalid frame height");
+ }
- if (vp8_alloc_frame_buffers(pc, pc->Width, pc->Height))
- vpx_internal_error(&pc->error, VPX_CODEC_MEM_ERROR,
- "Failed to allocate frame buffers");
+ if (vp8_alloc_frame_buffers(pc, pc->Width, pc->Height))
+ vpx_internal_error(&pc->error, VPX_CODEC_MEM_ERROR,
+ "Failed to allocate frame buffers");
+
+#if CONFIG_ERROR_CONCEALMENT
+ pbi->overlaps = NULL;
+ if (pbi->ec_enabled)
+ {
+ if (vp8_alloc_overlap_lists(pbi))
+ vpx_internal_error(&pc->error, VPX_CODEC_MEM_ERROR,
+ "Failed to allocate overlap lists "
+ "for error concealment");
+ }
+#endif
#if CONFIG_MULTITHREAD
- if (pbi->b_multithreaded_rd)
- vp8mt_alloc_temp_buffers(pbi, pc->Width, prev_mb_rows);
+ if (pbi->b_multithreaded_rd)
+ vp8mt_alloc_temp_buffers(pbi, pc->Width, prev_mb_rows);
#endif
+ }
}
}
@@ -793,7 +872,20 @@ int vp8_decode_frame(VP8D_COMP *pbi)
{
/* Should the GF or ARF be updated from the current frame */
pc->refresh_golden_frame = vp8_read_bit(bc);
+#if CONFIG_ERROR_CONCEALMENT
+ /* Assume we shouldn't refresh golden if the bit is missing */
+ xd->corrupted |= vp8dx_bool_error(bc);
+ if (pbi->ec_enabled && xd->corrupted)
+ pc->refresh_golden_frame = 0;
+#endif
+
pc->refresh_alt_ref_frame = vp8_read_bit(bc);
+#if CONFIG_ERROR_CONCEALMENT
+ /* Assume we shouldn't refresh altref if the bit is missing */
+ xd->corrupted |= vp8dx_bool_error(bc);
+ if (pbi->ec_enabled && xd->corrupted)
+ pc->refresh_alt_ref_frame = 0;
+#endif
/* Buffer to buffer copy flags. */
pc->copy_buffer_to_gf = 0;
@@ -818,6 +910,13 @@ int vp8_decode_frame(VP8D_COMP *pbi)
pc->refresh_last_frame = pc->frame_type == KEY_FRAME || vp8_read_bit(bc);
+#if CONFIG_ERROR_CONCEALMENT
+ /* Assume we should refresh the last frame if the bit is missing */
+ xd->corrupted |= vp8dx_bool_error(bc);
+ if (pbi->ec_enabled && xd->corrupted)
+ pc->refresh_last_frame = 1;
+#endif
+
if (0)
{
FILE *z = fopen("decodestats.stt", "a");
@@ -877,6 +976,16 @@ int vp8_decode_frame(VP8D_COMP *pbi)
vp8_decode_mode_mvs(pbi);
+#if CONFIG_ERROR_CONCEALMENT
+ if (pbi->ec_enabled &&
+ pbi->mvs_corrupt_from_mb < (unsigned int)pc->mb_cols * pc->mb_rows)
+ {
+ /* Motion vectors are missing in this frame. We will try to estimate
+ * them and then continue decoding the frame as usual */
+ vp8_estimate_missing_mvs(pbi);
+ }
+#endif
+
vpx_memset(pc->above_context, 0, sizeof(ENTROPY_CONTEXT_PLANES) * pc->mb_cols);
vpx_memcpy(&xd->block[0].bmi, &xd->mode_info_context->bmi[0], sizeof(B_MODE_INFO));
diff --git a/vp8/decoder/ec_types.h b/vp8/decoder/ec_types.h
new file mode 100644
index 000000000..a4f8c78b3
--- /dev/null
+++ b/vp8/decoder/ec_types.h
@@ -0,0 +1,49 @@
+/*
+ * Copyright (c) 2011 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef VP8_DEC_EC_TYPES_H
+#define VP8_DEC_EC_TYPES_H
+
+#define MAX_OVERLAPS 16
+
+/* The area (pixel area in Q6) the block pointed to by bmi overlaps
+ * another block with.
+ */
+typedef struct
+{
+ int overlap;
+ B_MODE_INFO *bmi;
+} OVERLAP_NODE;
+
+/* Structure to keep track of overlapping blocks on a block level. */
+typedef struct
+{
+ /* TODO(holmer): This array should be exchanged for a linked list */
+ OVERLAP_NODE overlaps[MAX_OVERLAPS];
+} B_OVERLAP;
+
+/* Structure used to hold all the overlaps of a macroblock. The overlaps of a
+ * macroblock is further divided into block overlaps.
+ */
+typedef struct
+{
+ B_OVERLAP overlaps[16];
+} MB_OVERLAP;
+
+/* Structure for keeping track of motion vectors and which reference frame they
+ * refer to. Used for motion vector interpolation.
+ */
+typedef struct
+{
+ MV mv;
+ MV_REFERENCE_FRAME ref_frame;
+} EC_BLOCK;
+
+#endif /* VP8_DEC_EC_TYPES_H */
diff --git a/vp8/decoder/error_concealment.c b/vp8/decoder/error_concealment.c
new file mode 100644
index 000000000..dcb5c86a5
--- /dev/null
+++ b/vp8/decoder/error_concealment.c
@@ -0,0 +1,613 @@
+/*
+ * Copyright (c) 2011 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "error_concealment.h"
+#include "onyxd_int.h"
+#include "decodemv.h"
+#include "vpx_mem/vpx_mem.h"
+#include "vp8/common/recon.h"
+#include "vp8/common/findnearmv.h"
+
+#include <assert.h>
+
+#define MIN(x,y) (((x)<(y))?(x):(y))
+#define MAX(x,y) (((x)>(y))?(x):(y))
+
+#define FLOOR(x,q) ((x) & -(1 << (q)))
+
+#define NUM_NEIGHBORS 20
+
+typedef struct ec_position
+{
+ int row;
+ int col;
+} EC_POS;
+
+/*
+ * Regenerate the table in Matlab with:
+ * x = meshgrid((1:4), (1:4));
+ * y = meshgrid((1:4), (1:4))';
+ * W = round((1./(sqrt(x.^2 + y.^2))*2^7));
+ * W(1,1) = 0;
+ */
+static const int weights_q7[5][5] = {
+ { 0, 128, 64, 43, 32 },
+ {128, 91, 57, 40, 31 },
+ { 64, 57, 45, 36, 29 },
+ { 43, 40, 36, 30, 26 },
+ { 32, 31, 29, 26, 23 }
+};
+
+int vp8_alloc_overlap_lists(VP8D_COMP *pbi)
+{
+ if (pbi->overlaps != NULL)
+ {
+ vpx_free(pbi->overlaps);
+ pbi->overlaps = NULL;
+ }
+ pbi->overlaps = vpx_calloc(pbi->common.mb_rows * pbi->common.mb_cols,
+ sizeof(MB_OVERLAP));
+ if (pbi->overlaps == NULL)
+ return -1;
+ vpx_memset(pbi->overlaps, 0,
+ sizeof(MB_OVERLAP) * pbi->common.mb_rows * pbi->common.mb_cols);
+ return 0;
+}
+
+void vp8_de_alloc_overlap_lists(VP8D_COMP *pbi)
+{
+ vpx_free(pbi->overlaps);
+ pbi->overlaps = NULL;
+}
+
+/* Inserts a new overlap area value to the list of overlaps of a block */
+static void assign_overlap(OVERLAP_NODE* overlaps,
+ B_MODE_INFO *bmi,
+ int overlap)
+{
+ int i;
+ if (overlap <= 0)
+ return;
+ /* Find and assign to the next empty overlap node in the list of overlaps.
+ * Empty is defined as bmi == NULL */
+ for (i = 0; i < MAX_OVERLAPS; i++)
+ {
+ if (overlaps[i].bmi == NULL)
+ {
+ overlaps[i].bmi = bmi;
+ overlaps[i].overlap = overlap;
+ break;
+ }
+ }
+}
+
+/* Calculates the overlap area between two 4x4 squares, where the first
+ * square has its upper-left corner at (b1_row, b1_col) and the second
+ * square has its upper-left corner at (b2_row, b2_col). Doesn't
+ * properly handle squares which do not overlap.
+ */
+static int block_overlap(int b1_row, int b1_col, int b2_row, int b2_col)
+{
+ const int int_top = MAX(b1_row, b2_row); // top
+ const int int_left = MAX(b1_col, b2_col); // left
+ /* Since each block is 4x4 pixels, adding 4 (Q3) to the left/top edge
+ * gives us the right/bottom edge.
+ */
+ const int int_right = MIN(b1_col + (4<<3), b2_col + (4<<3)); // right
+ const int int_bottom = MIN(b1_row + (4<<3), b2_row + (4<<3)); // bottom
+ return (int_bottom - int_top) * (int_right - int_left);
+}
+
+/* Calculates the overlap area for all blocks in a macroblock at position
+ * (mb_row, mb_col) in macroblocks, which are being overlapped by a given
+ * overlapping block at position (new_row, new_col) (in pixels, Q3). The
+ * first block being overlapped in the macroblock has position (first_blk_row,
+ * first_blk_col) in blocks relative the upper-left corner of the image.
+ */
+static void calculate_overlaps_mb(B_OVERLAP *b_overlaps, B_MODE_INFO *bmi,
+ int new_row, int new_col,
+ int mb_row, int mb_col,
+ int first_blk_row, int first_blk_col)
+{
+ /* Find the blocks within this MB (defined by mb_row, mb_col) which are
+ * overlapped by bmi and calculate and assign overlap for each of those
+ * blocks. */
+
+ /* Block coordinates relative the upper-left block */
+ const int rel_ol_blk_row = first_blk_row - mb_row * 4;
+ const int rel_ol_blk_col = first_blk_col - mb_col * 4;
+ /* If the block partly overlaps any previous MB, these coordinates
+ * can be < 0. We don't want to access blocks in previous MBs.
+ */
+ const int blk_idx = MAX(rel_ol_blk_row,0) * 4 + MAX(rel_ol_blk_col,0);
+ /* Upper left overlapping block */
+ B_OVERLAP *b_ol_ul = &(b_overlaps[blk_idx]);
+
+ /* Calculate and assign overlaps for all blocks in this MB
+ * which the motion compensated block overlaps
+ */
+ /* Avoid calculating overlaps for blocks in later MBs */
+ int end_row = MIN(4 + mb_row * 4 - first_blk_row, 2);
+ int end_col = MIN(4 + mb_col * 4 - first_blk_col, 2);
+ int row, col;
+
+ /* Check if new_row and new_col are evenly divisible by 4 (Q3),
+ * and if so we shouldn't check neighboring blocks
+ */
+ if (new_row >= 0 && (new_row & 0x1F) == 0)
+ end_row = 1;
+ if (new_col >= 0 && (new_col & 0x1F) == 0)
+ end_col = 1;
+
+ /* Check if the overlapping block partly overlaps a previous MB
+ * and if so, we're overlapping fewer blocks in this MB.
+ */
+ if (new_row < (mb_row*16)<<3)
+ end_row = 1;
+ if (new_col < (mb_col*16)<<3)
+ end_col = 1;
+
+ for (row = 0; row < end_row; ++row)
+ {
+ for (col = 0; col < end_col; ++col)
+ {
+ /* input in Q3, result in Q6 */
+ const int overlap = block_overlap(new_row, new_col,
+ (((first_blk_row + row) *
+ 4) << 3),
+ (((first_blk_col + col) *
+ 4) << 3));
+ assign_overlap(b_ol_ul[row * 4 + col].overlaps, bmi, overlap);
+ }
+ }
+}
+
+void vp8_calculate_overlaps(MB_OVERLAP *overlap_ul,
+ int mb_rows, int mb_cols,
+ B_MODE_INFO *bmi,
+ int b_row, int b_col)
+{
+ MB_OVERLAP *mb_overlap;
+ int row, col, rel_row, rel_col;
+ int new_row, new_col;
+ int end_row, end_col;
+ int overlap_b_row, overlap_b_col;
+ int overlap_mb_row, overlap_mb_col;
+
+ /* mb subpixel position */
+ row = (4 * b_row) << 3; /* Q3 */
+ col = (4 * b_col) << 3; /* Q3 */
+
+ /* reverse compensate for motion */
+ new_row = row - bmi->mv.as_mv.row;
+ new_col = col - bmi->mv.as_mv.col;
+
+ if (new_row >= ((16*mb_rows) << 3) || new_col >= ((16*mb_cols) << 3))
+ {
+ /* the new block ended up outside the frame */
+ return;
+ }
+
+ if (new_row <= (-4 << 3) || new_col <= (-4 << 3))
+ {
+ /* outside the frame */
+ return;
+ }
+ /* overlapping block's position in blocks */
+ overlap_b_row = FLOOR(new_row / 4, 3) >> 3;
+ overlap_b_col = FLOOR(new_col / 4, 3) >> 3;
+
+ /* overlapping block's MB position in MBs
+ * operations are done in Q3
+ */
+ overlap_mb_row = FLOOR((overlap_b_row << 3) / 4, 3) >> 3;
+ overlap_mb_col = FLOOR((overlap_b_col << 3) / 4, 3) >> 3;
+
+ end_row = MIN(mb_rows - overlap_mb_row, 2);
+ end_col = MIN(mb_cols - overlap_mb_col, 2);
+
+ /* Don't calculate overlap for MBs we don't overlap */
+ /* Check if the new block row starts at the last block row of the MB */
+ if (abs(new_row - ((16*overlap_mb_row) << 3)) < ((3*4) << 3))
+ end_row = 1;
+ /* Check if the new block col starts at the last block col of the MB */
+ if (abs(new_col - ((16*overlap_mb_col) << 3)) < ((3*4) << 3))
+ end_col = 1;
+
+ /* find the MB(s) this block is overlapping */
+ for (rel_row = 0; rel_row < end_row; ++rel_row)
+ {
+ for (rel_col = 0; rel_col < end_col; ++rel_col)
+ {
+ if (overlap_mb_row + rel_row < 0 ||
+ overlap_mb_col + rel_col < 0)
+ continue;
+ mb_overlap = overlap_ul + (overlap_mb_row + rel_row) * mb_cols +
+ overlap_mb_col + rel_col;
+
+ calculate_overlaps_mb(mb_overlap->overlaps, bmi,
+ new_row, new_col,
+ overlap_mb_row + rel_row,
+ overlap_mb_col + rel_col,
+ overlap_b_row + rel_row,
+ overlap_b_col + rel_col);
+ }
+ }
+}
+
+/* Estimates a motion vector given the overlapping blocks' motion vectors.
+ * Filters out all overlapping blocks which do not refer to the correct
+ * reference frame type.
+ */
+static void estimate_mv(const OVERLAP_NODE *overlaps, B_MODE_INFO *bmi)
+{
+ int i;
+ int overlap_sum = 0;
+ int row_acc = 0;
+ int col_acc = 0;
+
+ bmi->mv.as_int = 0;
+ for (i=0; i < MAX_OVERLAPS; ++i)
+ {
+ if (overlaps[i].bmi == NULL)
+ break;
+ col_acc += overlaps[i].overlap * overlaps[i].bmi->mv.as_mv.col;
+ row_acc += overlaps[i].overlap * overlaps[i].bmi->mv.as_mv.row;
+ overlap_sum += overlaps[i].overlap;
+ }
+ if (overlap_sum > 0)
+ {
+ /* Q9 / Q6 = Q3 */
+ bmi->mv.as_mv.col = col_acc / overlap_sum;
+ bmi->mv.as_mv.row = row_acc / overlap_sum;
+ bmi->mode = NEW4X4;
+ }
+ else
+ {
+ bmi->mv.as_mv.col = 0;
+ bmi->mv.as_mv.row = 0;
+ bmi->mode = NEW4X4;
+ }
+}
+
+/* Estimates all motion vectors for a macroblock given the lists of
+ * overlaps for each block. Decides whether or not the MVs must be clamped.
+ */
+static void estimate_mb_mvs(const B_OVERLAP *block_overlaps,
+ MODE_INFO *mi,
+ int mb_to_left_edge,
+ int mb_to_right_edge,
+ int mb_to_top_edge,
+ int mb_to_bottom_edge)
+{
+ int i;
+ int non_zero_count = 0;
+ MV * const filtered_mv = &(mi->mbmi.mv.as_mv);
+ B_MODE_INFO * const bmi = mi->bmi;
+ filtered_mv->col = 0;
+ filtered_mv->row = 0;
+ for (i = 0; i < 16; ++i)
+ {
+ /* Estimate vectors for all blocks which are overlapped by this type */
+ /* Interpolate/extrapolate the rest of the block's MVs */
+ estimate_mv(block_overlaps[i].overlaps, &(bmi[i]));
+ mi->mbmi.need_to_clamp_mvs = vp8_check_mv_bounds(&bmi[i].mv,
+ mb_to_left_edge,
+ mb_to_right_edge,
+ mb_to_top_edge,
+ mb_to_bottom_edge);
+ if (bmi[i].mv.as_int != 0)
+ {
+ ++non_zero_count;
+ filtered_mv->col += bmi[i].mv.as_mv.col;
+ filtered_mv->row += bmi[i].mv.as_mv.row;
+ }
+ }
+ if (non_zero_count > 0)
+ {
+ filtered_mv->col /= non_zero_count;
+ filtered_mv->row /= non_zero_count;
+ }
+}
+
+static void calc_prev_mb_overlaps(MB_OVERLAP *overlaps, MODE_INFO *prev_mi,
+ int mb_row, int mb_col,
+ int mb_rows, int mb_cols)
+{
+ int sub_row;
+ int sub_col;
+ for (sub_row = 0; sub_row < 4; ++sub_row)
+ {
+ for (sub_col = 0; sub_col < 4; ++sub_col)
+ {
+ vp8_calculate_overlaps(
+ overlaps, mb_rows, mb_cols,
+ &(prev_mi->bmi[sub_row * 4 + sub_col]),
+ 4 * mb_row + sub_row,
+ 4 * mb_col + sub_col);
+ }
+ }
+}
+
+/* Estimate all missing motion vectors. This function does the same as the one
+ * above, but has different input arguments. */
+static void estimate_missing_mvs(MB_OVERLAP *overlaps,
+ MODE_INFO *mi, MODE_INFO *prev_mi,
+ int mb_rows, int mb_cols,
+ unsigned int first_corrupt)
+{
+ int mb_row, mb_col;
+ vpx_memset(overlaps, 0, sizeof(MB_OVERLAP) * mb_rows * mb_cols);
+ /* First calculate the overlaps for all blocks */
+ for (mb_row = 0; mb_row < mb_rows; ++mb_row)
+ {
+ for (mb_col = 0; mb_col < mb_cols; ++mb_col)
+ {
+ /* We're only able to use blocks referring to the last frame
+ * when extrapolating new vectors.
+ */
+ if (prev_mi->mbmi.ref_frame == LAST_FRAME)
+ {
+ calc_prev_mb_overlaps(overlaps, prev_mi,
+ mb_row, mb_col,
+ mb_rows, mb_cols);
+ }
+ ++prev_mi;
+ }
+ ++prev_mi;
+ }
+
+ mb_row = first_corrupt / mb_cols;
+ mb_col = first_corrupt - mb_row * mb_cols;
+ mi += mb_row*(mb_cols + 1) + mb_col;
+ /* Go through all macroblocks in the current image with missing MVs
+ * and calculate new MVs using the overlaps.
+ */
+ for (; mb_row < mb_rows; ++mb_row)
+ {
+ int mb_to_top_edge = -((mb_row * 16)) << 3;
+ int mb_to_bottom_edge = ((mb_rows - 1 - mb_row) * 16) << 3;
+ for (; mb_col < mb_cols; ++mb_col)
+ {
+ int mb_to_left_edge = -((mb_col * 16) << 3);
+ int mb_to_right_edge = ((mb_cols - 1 - mb_col) * 16) << 3;
+ const B_OVERLAP *block_overlaps =
+ overlaps[mb_row*mb_cols + mb_col].overlaps;
+ mi->mbmi.ref_frame = LAST_FRAME;
+ mi->mbmi.mode = SPLITMV;
+ mi->mbmi.uv_mode = DC_PRED;
+ mi->mbmi.partitioning = 3;
+ estimate_mb_mvs(block_overlaps,
+ mi,
+ mb_to_left_edge,
+ mb_to_right_edge,
+ mb_to_top_edge,
+ mb_to_bottom_edge);
+ ++mi;
+ }
+ mb_col = 0;
+ ++mi;
+ }
+}
+
+void vp8_estimate_missing_mvs(VP8D_COMP *pbi)
+{
+ VP8_COMMON * const pc = &pbi->common;
+ estimate_missing_mvs(pbi->overlaps,
+ pc->mi, pc->prev_mi,
+ pc->mb_rows, pc->mb_cols,
+ pbi->mvs_corrupt_from_mb);
+}
+
+static void assign_neighbor(EC_BLOCK *neighbor, MODE_INFO *mi, int block_idx)
+{
+ assert(mi->mbmi.ref_frame < MAX_REF_FRAMES);
+ neighbor->ref_frame = mi->mbmi.ref_frame;
+ neighbor->mv = mi->bmi[block_idx].mv.as_mv;
+}
+
+/* Finds the neighboring blocks of a macroblocks. In the general case
+ * 20 blocks are found. If a fewer number of blocks are found due to
+ * image boundaries, those positions in the EC_BLOCK array are left "empty".
+ * The neighbors are enumerated with the upper-left neighbor as the first
+ * element, the second element refers to the neighbor to right of the previous
+ * neighbor, and so on. The last element refers to the neighbor below the first
+ * neighbor.
+ */
+static void find_neighboring_blocks(MODE_INFO *mi,
+ EC_BLOCK *neighbors,
+ int mb_row, int mb_col,
+ int mb_rows, int mb_cols,
+ int mi_stride)
+{
+ int i = 0;
+ int j;
+ if (mb_row > 0)
+ {
+ /* upper left */
+ if (mb_col > 0)
+ assign_neighbor(&neighbors[i], mi - mi_stride - 1, 15);
+ ++i;
+ /* above */
+ for (j = 12; j < 16; ++j, ++i)
+ assign_neighbor(&neighbors[i], mi - mi_stride, j);
+ }
+ else
+ i += 5;
+ if (mb_col < mb_cols - 1)
+ {
+ /* upper right */
+ if (mb_row > 0)
+ assign_neighbor(&neighbors[i], mi - mi_stride + 1, 12);
+ ++i;
+ /* right */
+ for (j = 0; j <= 12; j += 4, ++i)
+ assign_neighbor(&neighbors[i], mi + 1, j);
+ }
+ else
+ i += 5;
+ if (mb_row < mb_rows - 1)
+ {
+ /* lower right */
+ if (mb_col < mb_cols - 1)
+ assign_neighbor(&neighbors[i], mi + mi_stride + 1, 0);
+ ++i;
+ /* below */
+ for (j = 0; j < 4; ++j, ++i)
+ assign_neighbor(&neighbors[i], mi + mi_stride, j);
+ }
+ else
+ i += 5;
+ if (mb_col > 0)
+ {
+ /* lower left */
+ if (mb_row < mb_rows - 1)
+ assign_neighbor(&neighbors[i], mi + mi_stride - 1, 4);
+ ++i;
+ /* left */
+ for (j = 3; j < 16; j += 4, ++i)
+ {
+ assign_neighbor(&neighbors[i], mi - 1, j);
+ }
+ }
+ else
+ i += 5;
+ assert(i == 20);
+}
+
+/* Calculates which reference frame type is dominating among the neighbors */
+static MV_REFERENCE_FRAME dominant_ref_frame(EC_BLOCK *neighbors)
+{
+ /* Default to referring to "skip" */
+ MV_REFERENCE_FRAME dom_ref_frame = LAST_FRAME;
+ int max_ref_frame_cnt = 0;
+ int ref_frame_cnt[MAX_REF_FRAMES] = {0};
+ int i;
+ /* Count neighboring reference frames */
+ for (i = 0; i < NUM_NEIGHBORS; ++i)
+ {
+ if (neighbors[i].ref_frame < MAX_REF_FRAMES &&
+ neighbors[i].ref_frame != INTRA_FRAME)
+ ++ref_frame_cnt[neighbors[i].ref_frame];
+ }
+ /* Find maximum */
+ for (i = 0; i < MAX_REF_FRAMES; ++i)
+ {
+ if (ref_frame_cnt[i] > max_ref_frame_cnt)
+ {
+ dom_ref_frame = i;
+ max_ref_frame_cnt = ref_frame_cnt[i];
+ }
+ }
+ return dom_ref_frame;
+}
+
+/* Interpolates all motion vectors for a macroblock from the neighboring blocks'
+ * motion vectors.
+ */
+static void interpolate_mvs(MACROBLOCKD *mb,
+ EC_BLOCK *neighbors,
+ MV_REFERENCE_FRAME dom_ref_frame)
+{
+ int row, col, i;
+ MODE_INFO * const mi = mb->mode_info_context;
+ /* Table with the position of the neighboring blocks relative the position
+ * of the upper left block of the current MB. Starting with the upper left
+ * neighbor and going to the right.
+ */
+ const EC_POS neigh_pos[NUM_NEIGHBORS] = {
+ {-1,-1}, {-1,0}, {-1,1}, {-1,2}, {-1,3},
+ {-1,4}, {0,4}, {1,4}, {2,4}, {3,4},
+ {4,4}, {4,3}, {4,2}, {4,1}, {4,0},
+ {4,-1}, {3,-1}, {2,-1}, {1,-1}, {0,-1}
+ };
+ for (row = 0; row < 4; ++row)
+ {
+ for (col = 0; col < 4; ++col)
+ {
+ int w_sum = 0;
+ int mv_row_sum = 0;
+ int mv_col_sum = 0;
+ int_mv * const mv = &(mi->bmi[row*4 + col].mv);
+ for (i = 0; i < NUM_NEIGHBORS; ++i)
+ {
+ /* Calculate the weighted sum of neighboring MVs referring
+ * to the dominant frame type.
+ */
+ const int w = weights_q7[abs(row - neigh_pos[i].row)]
+ [abs(col - neigh_pos[i].col)];
+ if (neighbors[i].ref_frame != dom_ref_frame)
+ continue;
+ w_sum += w;
+ /* Q7 * Q3 = Q10 */
+ mv_row_sum += w*neighbors[i].mv.row;
+ mv_col_sum += w*neighbors[i].mv.col;
+ }
+ if (w_sum > 0)
+ {
+ /* Avoid division by zero.
+ * Normalize with the sum of the coefficients
+ * Q3 = Q10 / Q7
+ */
+ mv->as_mv.row = mv_row_sum / w_sum;
+ mv->as_mv.col = mv_col_sum / w_sum;
+ mi->bmi[row*4 + col].mode = NEW4X4;
+ mi->mbmi.need_to_clamp_mvs = vp8_check_mv_bounds(mv,
+ mb->mb_to_left_edge,
+ mb->mb_to_right_edge,
+ mb->mb_to_top_edge,
+ mb->mb_to_bottom_edge);
+ }
+ }
+ }
+}
+
+void vp8_interpolate_motion(MACROBLOCKD *mb,
+ int mb_row, int mb_col,
+ int mb_rows, int mb_cols,
+ int mi_stride)
+{
+ /* Find relevant neighboring blocks */
+ EC_BLOCK neighbors[NUM_NEIGHBORS];
+ MV_REFERENCE_FRAME dom_ref_frame;
+ int i;
+ /* Initialize the array. MAX_REF_FRAMES is interpreted as "doesn't exist" */
+ for (i = 0; i < NUM_NEIGHBORS; ++i)
+ {
+ neighbors[i].ref_frame = MAX_REF_FRAMES;
+ neighbors[i].mv.row = neighbors[i].mv.col = 0;
+ }
+ find_neighboring_blocks(mb->mode_info_context,
+ neighbors,
+ mb_row, mb_col,
+ mb_rows, mb_cols,
+ mb->mode_info_stride);
+ /* Determine the dominant block type */
+ dom_ref_frame = dominant_ref_frame(neighbors);
+ /* Interpolate MVs for the missing blocks
+ * from the dominating MVs */
+ interpolate_mvs(mb, neighbors, dom_ref_frame);
+
+ mb->mode_info_context->mbmi.ref_frame = dom_ref_frame;
+ mb->mode_info_context->mbmi.mode = SPLITMV;
+ mb->mode_info_context->mbmi.uv_mode = DC_PRED;
+ mb->mode_info_context->mbmi.partitioning = 3;
+}
+
+void vp8_conceal_corrupt_mb(MACROBLOCKD *xd)
+{
+ /* This macroblock has corrupt residual, use the motion compensated
+ image (predictor) for concealment */
+ vp8_recon_copy16x16(xd->predictor, 16, xd->dst.y_buffer, xd->dst.y_stride);
+ vp8_recon_copy8x8(xd->predictor + 256, 8,
+ xd->dst.u_buffer, xd->dst.uv_stride);
+ vp8_recon_copy8x8(xd->predictor + 320, 8,
+ xd->dst.v_buffer, xd->dst.uv_stride);
+}
diff --git a/vp8/decoder/error_concealment.h b/vp8/decoder/error_concealment.h
new file mode 100644
index 000000000..65ae9d9be
--- /dev/null
+++ b/vp8/decoder/error_concealment.h
@@ -0,0 +1,41 @@
+/*
+ * Copyright (c) 2011 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+
+#ifndef ERROR_CONCEALMENT_H
+#define ERROR_CONCEALMENT_H
+
+#include "onyxd_int.h"
+#include "ec_types.h"
+
+/* Allocate memory for the overlap lists */
+int vp8_alloc_overlap_lists(VP8D_COMP *pbi);
+
+/* Deallocate the overlap lists */
+void vp8_de_alloc_overlap_lists(VP8D_COMP *pbi);
+
+/* Estimate all missing motion vectors. */
+void vp8_estimate_missing_mvs(VP8D_COMP *pbi);
+
+/* Functions for spatial MV interpolation */
+
+/* Interpolates all motion vectors for a macroblock mb at position
+ * (mb_row, mb_col). */
+void vp8_interpolate_motion(MACROBLOCKD *mb,
+ int mb_row, int mb_col,
+ int mb_rows, int mb_cols,
+ int mi_stride);
+
+/* Conceal a macroblock with corrupt residual.
+ * Copies the prediction signal to the reconstructed image.
+ */
+void vp8_conceal_corrupt_mb(MACROBLOCKD *xd);
+
+#endif
diff --git a/vp8/decoder/onyxd_if.c b/vp8/decoder/onyxd_if.c
index 1744354e3..5c8e09521 100644
--- a/vp8/decoder/onyxd_if.c
+++ b/vp8/decoder/onyxd_if.c
@@ -30,6 +30,9 @@
#include "vp8/common/systemdependent.h"
#include "vpx_ports/vpx_timer.h"
#include "detokenize.h"
+#if CONFIG_ERROR_CONCEALMENT
+#include "error_concealment.h"
+#endif
#if ARCH_ARM
#include "vpx_ports/arm.h"
#endif
@@ -95,6 +98,13 @@ VP8D_PTR vp8dx_create_decompressor(VP8D_CONFIG *oxcf)
}
pbi->common.error.setjmp = 0;
+
+#if CONFIG_ERROR_CONCEALMENT
+ pbi->ec_enabled = oxcf->error_concealment;
+#else
+ pbi->ec_enabled = 0;
+#endif
+
return (VP8D_PTR) pbi;
}
@@ -115,6 +125,9 @@ void vp8dx_remove_decompressor(VP8D_PTR ptr)
vp8mt_de_alloc_temp_buffers(pbi, pbi->common.mb_rows);
vp8_decoder_remove_threads(pbi);
#endif
+#if CONFIG_ERROR_CONCEALMENT
+ vp8_de_alloc_overlap_lists(pbi);
+#endif
vp8_remove_common(&pbi->common);
vpx_free(pbi);
}
@@ -274,11 +287,17 @@ int vp8dx_receive_compressed_data(VP8D_PTR ptr, unsigned long size, const unsign
*/
cm->yv12_fb[cm->lst_fb_idx].corrupted = 1;
- /* Signal that we have no frame to show. */
- cm->show_frame = 0;
+ /* If error concealment is disabled we won't signal missing frames to
+ * the decoder.
+ */
+ if (!pbi->ec_enabled)
+ {
+ /* Signal that we have no frame to show. */
+ cm->show_frame = 0;
- /* Nothing more to do. */
- return 0;
+ /* Nothing more to do. */
+ return 0;
+ }
}
@@ -393,6 +412,28 @@ int vp8dx_receive_compressed_data(VP8D_PTR ptr, unsigned long size, const unsign
vp8_clear_system_state();
+#if CONFIG_ERROR_CONCEALMENT
+ /* swap the mode infos to storage for future error concealment */
+ if (pbi->ec_enabled && pbi->common.prev_mi)
+ {
+ const MODE_INFO* tmp = pbi->common.prev_mi;
+ int row, col;
+ pbi->common.prev_mi = pbi->common.mi;
+ pbi->common.mi = tmp;
+
+ /* Propagate the segment_ids to the next frame */
+ for (row = 0; row < pbi->common.mb_rows; ++row)
+ {
+ for (col = 0; col < pbi->common.mb_cols; ++col)
+ {
+ const int i = row*pbi->common.mode_info_stride + col;
+ pbi->common.mi[i].mbmi.segment_id =
+ pbi->common.prev_mi[i].mbmi.segment_id;
+ }
+ }
+ }
+#endif
+
/*vp8_print_modes_and_motion_vectors( cm->mi, cm->mb_rows,cm->mb_cols, cm->current_video_frame);*/
if (cm->show_frame)
diff --git a/vp8/decoder/onyxd_int.h b/vp8/decoder/onyxd_int.h
index df489e9cb..ec1701079 100644
--- a/vp8/decoder/onyxd_int.h
+++ b/vp8/decoder/onyxd_int.h
@@ -17,6 +17,9 @@
#include "vp8/common/onyxc_int.h"
#include "vp8/common/threading.h"
#include "dequantize.h"
+#if CONFIG_ERROR_CONCEALMENT
+#include "ec_types.h"
+#endif
typedef struct
{
@@ -128,6 +131,13 @@ typedef struct VP8Decompressor
vp8_prob prob_gf;
vp8_prob prob_skip_false;
+#if CONFIG_ERROR_CONCEALMENT
+ MB_OVERLAP *overlaps;
+ /* the mb num from which modes and mvs (first partition) are corrupt */
+ unsigned int mvs_corrupt_from_mb;
+#endif
+ int ec_enabled;
+
} VP8D_COMP;
int vp8_decode_frame(VP8D_COMP *cpi);
diff --git a/vp8/encoder/encodeframe.c b/vp8/encoder/encodeframe.c
index b5d8bc21c..4885e6fef 100644
--- a/vp8/encoder/encodeframe.c
+++ b/vp8/encoder/encodeframe.c
@@ -68,370 +68,6 @@ unsigned int uv_modes[4] = {0, 0, 0, 0};
unsigned int b_modes[14] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
#endif
-static const int qrounding_factors[129] =
-{
- 48, 48, 48, 48, 48, 48, 48, 48,
- 48, 48, 48, 48, 48, 48, 48, 48,
- 48, 48, 48, 48, 48, 48, 48, 48,
- 48, 48, 48, 48, 48, 48, 48, 48,
- 48, 48, 48, 48, 48, 48, 48, 48,
- 48, 48, 48, 48, 48, 48, 48, 48,
- 48, 48, 48, 48, 48, 48, 48, 48,
- 48, 48, 48, 48, 48, 48, 48, 48,
- 48, 48, 48, 48, 48, 48, 48, 48,
- 48, 48, 48, 48, 48, 48, 48, 48,
- 48, 48, 48, 48, 48, 48, 48, 48,
- 48, 48, 48, 48, 48, 48, 48, 48,
- 48, 48, 48, 48, 48, 48, 48, 48,
- 48, 48, 48, 48, 48, 48, 48, 48,
- 48, 48, 48, 48, 48, 48, 48, 48,
- 48, 48, 48, 48, 48, 48, 48, 48,
- 48
-};
-
-static const int qzbin_factors[129] =
-{
- 84, 84, 84, 84, 84, 84, 84, 84,
- 84, 84, 84, 84, 84, 84, 84, 84,
- 84, 84, 84, 84, 84, 84, 84, 84,
- 84, 84, 84, 84, 84, 84, 84, 84,
- 84, 84, 84, 84, 84, 84, 84, 84,
- 84, 84, 84, 84, 84, 84, 84, 84,
- 80, 80, 80, 80, 80, 80, 80, 80,
- 80, 80, 80, 80, 80, 80, 80, 80,
- 80, 80, 80, 80, 80, 80, 80, 80,
- 80, 80, 80, 80, 80, 80, 80, 80,
- 80, 80, 80, 80, 80, 80, 80, 80,
- 80, 80, 80, 80, 80, 80, 80, 80,
- 80, 80, 80, 80, 80, 80, 80, 80,
- 80, 80, 80, 80, 80, 80, 80, 80,
- 80, 80, 80, 80, 80, 80, 80, 80,
- 80, 80, 80, 80, 80, 80, 80, 80,
- 80
-};
-
-static const int qrounding_factors_y2[129] =
-{
- 48, 48, 48, 48, 48, 48, 48, 48,
- 48, 48, 48, 48, 48, 48, 48, 48,
- 48, 48, 48, 48, 48, 48, 48, 48,
- 48, 48, 48, 48, 48, 48, 48, 48,
- 48, 48, 48, 48, 48, 48, 48, 48,
- 48, 48, 48, 48, 48, 48, 48, 48,
- 48, 48, 48, 48, 48, 48, 48, 48,
- 48, 48, 48, 48, 48, 48, 48, 48,
- 48, 48, 48, 48, 48, 48, 48, 48,
- 48, 48, 48, 48, 48, 48, 48, 48,
- 48, 48, 48, 48, 48, 48, 48, 48,
- 48, 48, 48, 48, 48, 48, 48, 48,
- 48, 48, 48, 48, 48, 48, 48, 48,
- 48, 48, 48, 48, 48, 48, 48, 48,
- 48, 48, 48, 48, 48, 48, 48, 48,
- 48, 48, 48, 48, 48, 48, 48, 48,
- 48
-};
-
-static const int qzbin_factors_y2[129] =
-{
- 84, 84, 84, 84, 84, 84, 84, 84,
- 84, 84, 84, 84, 84, 84, 84, 84,
- 84, 84, 84, 84, 84, 84, 84, 84,
- 84, 84, 84, 84, 84, 84, 84, 84,
- 84, 84, 84, 84, 84, 84, 84, 84,
- 84, 84, 84, 84, 84, 84, 84, 84,
- 80, 80, 80, 80, 80, 80, 80, 80,
- 80, 80, 80, 80, 80, 80, 80, 80,
- 80, 80, 80, 80, 80, 80, 80, 80,
- 80, 80, 80, 80, 80, 80, 80, 80,
- 80, 80, 80, 80, 80, 80, 80, 80,
- 80, 80, 80, 80, 80, 80, 80, 80,
- 80, 80, 80, 80, 80, 80, 80, 80,
- 80, 80, 80, 80, 80, 80, 80, 80,
- 80, 80, 80, 80, 80, 80, 80, 80,
- 80, 80, 80, 80, 80, 80, 80, 80,
- 80
-};
-
-#define EXACT_QUANT
-#ifdef EXACT_QUANT
-static void vp8cx_invert_quant(int improved_quant, short *quant,
- unsigned char *shift, short d)
-{
- if(improved_quant)
- {
- unsigned t;
- int l;
- t = d;
- for(l = 0; t > 1; l++)
- t>>=1;
- t = 1 + (1<<(16+l))/d;
- *quant = (short)(t - (1<<16));
- *shift = l;
- }
- else
- {
- *quant = (1 << 16) / d;
- *shift = 0;
- }
-}
-
-void vp8cx_init_quantizer(VP8_COMP *cpi)
-{
- int i;
- int quant_val;
- int Q;
-
- int zbin_boost[16] = {0, 0, 8, 10, 12, 14, 16, 20, 24, 28, 32, 36, 40, 44, 44, 44};
-
- for (Q = 0; Q < QINDEX_RANGE; Q++)
- {
- // dc values
- quant_val = vp8_dc_quant(Q, cpi->common.y1dc_delta_q);
- cpi->Y1quant_fast[Q][0] = (1 << 16) / quant_val;
- vp8cx_invert_quant(cpi->sf.improved_quant, cpi->Y1quant[Q] + 0,
- cpi->Y1quant_shift[Q] + 0, quant_val);
- cpi->Y1zbin[Q][0] = ((qzbin_factors[Q] * quant_val) + 64) >> 7;
- cpi->Y1round[Q][0] = (qrounding_factors[Q] * quant_val) >> 7;
- cpi->common.Y1dequant[Q][0] = quant_val;
- cpi->zrun_zbin_boost_y1[Q][0] = (quant_val * zbin_boost[0]) >> 7;
-
- quant_val = vp8_dc2quant(Q, cpi->common.y2dc_delta_q);
- cpi->Y2quant_fast[Q][0] = (1 << 16) / quant_val;
- vp8cx_invert_quant(cpi->sf.improved_quant, cpi->Y2quant[Q] + 0,
- cpi->Y2quant_shift[Q] + 0, quant_val);
- cpi->Y2zbin[Q][0] = ((qzbin_factors_y2[Q] * quant_val) + 64) >> 7;
- cpi->Y2round[Q][0] = (qrounding_factors_y2[Q] * quant_val) >> 7;
- cpi->common.Y2dequant[Q][0] = quant_val;
- cpi->zrun_zbin_boost_y2[Q][0] = (quant_val * zbin_boost[0]) >> 7;
-
- quant_val = vp8_dc_uv_quant(Q, cpi->common.uvdc_delta_q);
- cpi->UVquant_fast[Q][0] = (1 << 16) / quant_val;
- vp8cx_invert_quant(cpi->sf.improved_quant, cpi->UVquant[Q] + 0,
- cpi->UVquant_shift[Q] + 0, quant_val);
- cpi->UVzbin[Q][0] = ((qzbin_factors[Q] * quant_val) + 64) >> 7;;
- cpi->UVround[Q][0] = (qrounding_factors[Q] * quant_val) >> 7;
- cpi->common.UVdequant[Q][0] = quant_val;
- cpi->zrun_zbin_boost_uv[Q][0] = (quant_val * zbin_boost[0]) >> 7;
-
- // all the ac values = ;
- for (i = 1; i < 16; i++)
- {
- int rc = vp8_default_zig_zag1d[i];
-
- quant_val = vp8_ac_yquant(Q);
- cpi->Y1quant_fast[Q][rc] = (1 << 16) / quant_val;
- vp8cx_invert_quant(cpi->sf.improved_quant, cpi->Y1quant[Q] + rc,
- cpi->Y1quant_shift[Q] + rc, quant_val);
- cpi->Y1zbin[Q][rc] = ((qzbin_factors[Q] * quant_val) + 64) >> 7;
- cpi->Y1round[Q][rc] = (qrounding_factors[Q] * quant_val) >> 7;
- cpi->common.Y1dequant[Q][rc] = quant_val;
- cpi->zrun_zbin_boost_y1[Q][i] = (quant_val * zbin_boost[i]) >> 7;
-
- quant_val = vp8_ac2quant(Q, cpi->common.y2ac_delta_q);
- cpi->Y2quant_fast[Q][rc] = (1 << 16) / quant_val;
- vp8cx_invert_quant(cpi->sf.improved_quant, cpi->Y2quant[Q] + rc,
- cpi->Y2quant_shift[Q] + rc, quant_val);
- cpi->Y2zbin[Q][rc] = ((qzbin_factors_y2[Q] * quant_val) + 64) >> 7;
- cpi->Y2round[Q][rc] = (qrounding_factors_y2[Q] * quant_val) >> 7;
- cpi->common.Y2dequant[Q][rc] = quant_val;
- cpi->zrun_zbin_boost_y2[Q][i] = (quant_val * zbin_boost[i]) >> 7;
-
- quant_val = vp8_ac_uv_quant(Q, cpi->common.uvac_delta_q);
- cpi->UVquant_fast[Q][rc] = (1 << 16) / quant_val;
- vp8cx_invert_quant(cpi->sf.improved_quant, cpi->UVquant[Q] + rc,
- cpi->UVquant_shift[Q] + rc, quant_val);
- cpi->UVzbin[Q][rc] = ((qzbin_factors[Q] * quant_val) + 64) >> 7;
- cpi->UVround[Q][rc] = (qrounding_factors[Q] * quant_val) >> 7;
- cpi->common.UVdequant[Q][rc] = quant_val;
- cpi->zrun_zbin_boost_uv[Q][i] = (quant_val * zbin_boost[i]) >> 7;
- }
- }
-}
-#else
-void vp8cx_init_quantizer(VP8_COMP *cpi)
-{
- int i;
- int quant_val;
- int Q;
-
- int zbin_boost[16] = {0, 0, 8, 10, 12, 14, 16, 20, 24, 28, 32, 36, 40, 44, 44, 44};
-
- for (Q = 0; Q < QINDEX_RANGE; Q++)
- {
- // dc values
- quant_val = vp8_dc_quant(Q, cpi->common.y1dc_delta_q);
- cpi->Y1quant[Q][0] = (1 << 16) / quant_val;
- cpi->Y1zbin[Q][0] = ((qzbin_factors[Q] * quant_val) + 64) >> 7;
- cpi->Y1round[Q][0] = (qrounding_factors[Q] * quant_val) >> 7;
- cpi->common.Y1dequant[Q][0] = quant_val;
- cpi->zrun_zbin_boost_y1[Q][0] = (quant_val * zbin_boost[0]) >> 7;
-
- quant_val = vp8_dc2quant(Q, cpi->common.y2dc_delta_q);
- cpi->Y2quant[Q][0] = (1 << 16) / quant_val;
- cpi->Y2zbin[Q][0] = ((qzbin_factors_y2[Q] * quant_val) + 64) >> 7;
- cpi->Y2round[Q][0] = (qrounding_factors_y2[Q] * quant_val) >> 7;
- cpi->common.Y2dequant[Q][0] = quant_val;
- cpi->zrun_zbin_boost_y2[Q][0] = (quant_val * zbin_boost[0]) >> 7;
-
- quant_val = vp8_dc_uv_quant(Q, cpi->common.uvdc_delta_q);
- cpi->UVquant[Q][0] = (1 << 16) / quant_val;
- cpi->UVzbin[Q][0] = ((qzbin_factors[Q] * quant_val) + 64) >> 7;;
- cpi->UVround[Q][0] = (qrounding_factors[Q] * quant_val) >> 7;
- cpi->common.UVdequant[Q][0] = quant_val;
- cpi->zrun_zbin_boost_uv[Q][0] = (quant_val * zbin_boost[0]) >> 7;
-
- // all the ac values = ;
- for (i = 1; i < 16; i++)
- {
- int rc = vp8_default_zig_zag1d[i];
-
- quant_val = vp8_ac_yquant(Q);
- cpi->Y1quant[Q][rc] = (1 << 16) / quant_val;
- cpi->Y1zbin[Q][rc] = ((qzbin_factors[Q] * quant_val) + 64) >> 7;
- cpi->Y1round[Q][rc] = (qrounding_factors[Q] * quant_val) >> 7;
- cpi->common.Y1dequant[Q][rc] = quant_val;
- cpi->zrun_zbin_boost_y1[Q][i] = (quant_val * zbin_boost[i]) >> 7;
-
- quant_val = vp8_ac2quant(Q, cpi->common.y2ac_delta_q);
- cpi->Y2quant[Q][rc] = (1 << 16) / quant_val;
- cpi->Y2zbin[Q][rc] = ((qzbin_factors_y2[Q] * quant_val) + 64) >> 7;
- cpi->Y2round[Q][rc] = (qrounding_factors_y2[Q] * quant_val) >> 7;
- cpi->common.Y2dequant[Q][rc] = quant_val;
- cpi->zrun_zbin_boost_y2[Q][i] = (quant_val * zbin_boost[i]) >> 7;
-
- quant_val = vp8_ac_uv_quant(Q, cpi->common.uvac_delta_q);
- cpi->UVquant[Q][rc] = (1 << 16) / quant_val;
- cpi->UVzbin[Q][rc] = ((qzbin_factors[Q] * quant_val) + 64) >> 7;
- cpi->UVround[Q][rc] = (qrounding_factors[Q] * quant_val) >> 7;
- cpi->common.UVdequant[Q][rc] = quant_val;
- cpi->zrun_zbin_boost_uv[Q][i] = (quant_val * zbin_boost[i]) >> 7;
- }
- }
-}
-#endif
-void vp8cx_mb_init_quantizer(VP8_COMP *cpi, MACROBLOCK *x)
-{
- int i;
- int QIndex;
- MACROBLOCKD *xd = &x->e_mbd;
- int zbin_extra;
-
- // Select the baseline MB Q index.
- if (xd->segmentation_enabled)
- {
- // Abs Value
- if (xd->mb_segement_abs_delta == SEGMENT_ABSDATA)
-
- QIndex = xd->segment_feature_data[MB_LVL_ALT_Q][xd->mode_info_context->mbmi.segment_id];
- // Delta Value
- else
- {
- QIndex = cpi->common.base_qindex + xd->segment_feature_data[MB_LVL_ALT_Q][xd->mode_info_context->mbmi.segment_id];
- QIndex = (QIndex >= 0) ? ((QIndex <= MAXQ) ? QIndex : MAXQ) : 0; // Clamp to valid range
- }
- }
- else
- QIndex = cpi->common.base_qindex;
-
- // Y
- zbin_extra = ( cpi->common.Y1dequant[QIndex][1] *
- ( cpi->zbin_over_quant +
- cpi->zbin_mode_boost +
- x->act_zbin_adj ) ) >> 7;
-
- for (i = 0; i < 16; i++)
- {
- x->block[i].quant = cpi->Y1quant[QIndex];
- x->block[i].quant_fast = cpi->Y1quant_fast[QIndex];
- x->block[i].quant_shift = cpi->Y1quant_shift[QIndex];
- x->block[i].zbin = cpi->Y1zbin[QIndex];
- x->block[i].round = cpi->Y1round[QIndex];
- x->e_mbd.block[i].dequant = cpi->common.Y1dequant[QIndex];
- x->block[i].zrun_zbin_boost = cpi->zrun_zbin_boost_y1[QIndex];
- x->block[i].zbin_extra = (short)zbin_extra;
- }
-
- // UV
- zbin_extra = ( cpi->common.UVdequant[QIndex][1] *
- ( cpi->zbin_over_quant +
- cpi->zbin_mode_boost +
- x->act_zbin_adj ) ) >> 7;
-
- for (i = 16; i < 24; i++)
- {
- x->block[i].quant = cpi->UVquant[QIndex];
- x->block[i].quant_fast = cpi->UVquant_fast[QIndex];
- x->block[i].quant_shift = cpi->UVquant_shift[QIndex];
- x->block[i].zbin = cpi->UVzbin[QIndex];
- x->block[i].round = cpi->UVround[QIndex];
- x->e_mbd.block[i].dequant = cpi->common.UVdequant[QIndex];
- x->block[i].zrun_zbin_boost = cpi->zrun_zbin_boost_uv[QIndex];
- x->block[i].zbin_extra = (short)zbin_extra;
- }
-
- // Y2
- zbin_extra = ( cpi->common.Y2dequant[QIndex][1] *
- ( (cpi->zbin_over_quant / 2) +
- cpi->zbin_mode_boost +
- x->act_zbin_adj ) ) >> 7;
-
- x->block[24].quant_fast = cpi->Y2quant_fast[QIndex];
- x->block[24].quant = cpi->Y2quant[QIndex];
- x->block[24].quant_shift = cpi->Y2quant_shift[QIndex];
- x->block[24].zbin = cpi->Y2zbin[QIndex];
- x->block[24].round = cpi->Y2round[QIndex];
- x->e_mbd.block[24].dequant = cpi->common.Y2dequant[QIndex];
- x->block[24].zrun_zbin_boost = cpi->zrun_zbin_boost_y2[QIndex];
- x->block[24].zbin_extra = (short)zbin_extra;
-
- /* save this macroblock QIndex for vp8_update_zbin_extra() */
- x->q_index = QIndex;
-}
-void vp8_update_zbin_extra(VP8_COMP *cpi, MACROBLOCK *x)
-{
- int i;
- int QIndex = x->q_index;
- int zbin_extra;
-
- // Y
- zbin_extra = ( cpi->common.Y1dequant[QIndex][1] *
- ( cpi->zbin_over_quant +
- cpi->zbin_mode_boost +
- x->act_zbin_adj ) ) >> 7;
- for (i = 0; i < 16; i++)
- {
- x->block[i].zbin_extra = (short)zbin_extra;
- }
-
- // UV
- zbin_extra = ( cpi->common.UVdequant[QIndex][1] *
- ( cpi->zbin_over_quant +
- cpi->zbin_mode_boost +
- x->act_zbin_adj ) ) >> 7;
-
- for (i = 16; i < 24; i++)
- {
- x->block[i].zbin_extra = (short)zbin_extra;
- }
-
- // Y2
- zbin_extra = ( cpi->common.Y2dequant[QIndex][1] *
- ( (cpi->zbin_over_quant / 2) +
- cpi->zbin_mode_boost +
- x->act_zbin_adj ) ) >> 7;
-
- x->block[24].zbin_extra = (short)zbin_extra;
-}
-
-void vp8cx_frame_init_quantizer(VP8_COMP *cpi)
-{
- // Clear Zbin mode boost for default case
- cpi->zbin_mode_boost = 0;
-
- // MB level quantizer setup
- vp8cx_mb_init_quantizer(cpi, &cpi->mb);
-}
-
/* activity_avg must be positive, or flat regions could get a zero weight
* (infinite lambda), which confounds analysis.
@@ -452,7 +88,7 @@ static const unsigned char VP8_VAR_OFFS[16]=
// Original activity measure from Tim T's code.
-unsigned int tt_activity_measure( VP8_COMP *cpi, MACROBLOCK *x )
+static unsigned int tt_activity_measure( VP8_COMP *cpi, MACROBLOCK *x )
{
unsigned int act;
unsigned int sse;
@@ -482,7 +118,7 @@ unsigned int tt_activity_measure( VP8_COMP *cpi, MACROBLOCK *x )
}
// Stub for alternative experimental activity measures.
-unsigned int alt_activity_measure( VP8_COMP *cpi, MACROBLOCK *x )
+static unsigned int alt_activity_measure( VP8_COMP *cpi, MACROBLOCK *x )
{
unsigned int mb_activity = VP8_ACTIVITY_AVG_MIN;
@@ -500,7 +136,7 @@ unsigned int alt_activity_measure( VP8_COMP *cpi, MACROBLOCK *x )
// Measure the activity of the current macroblock
// What we measure here is TBD so abstracted to this function
-unsigned int mb_activity_measure( VP8_COMP *cpi, MACROBLOCK *x )
+static unsigned int mb_activity_measure( VP8_COMP *cpi, MACROBLOCK *x )
{
unsigned int mb_activity;
@@ -519,7 +155,7 @@ unsigned int mb_activity_measure( VP8_COMP *cpi, MACROBLOCK *x )
}
// Calculate an "average" mb activity value for the frame
-void calc_av_activity( VP8_COMP *cpi, INT64 activity_sum )
+static void calc_av_activity( VP8_COMP *cpi, INT64 activity_sum )
{
// Simple mean for now
cpi->activity_avg = (unsigned int)(activity_sum/cpi->common.MBs);
@@ -529,7 +165,7 @@ void calc_av_activity( VP8_COMP *cpi, INT64 activity_sum )
#define OUTPUT_NORM_ACT_STATS 0
// Calculate a normalized activity value for each mb
-void calc_norm_activity( VP8_COMP *cpi, MACROBLOCK *x )
+static void calc_norm_activity( VP8_COMP *cpi, MACROBLOCK *x )
{
VP8_COMMON *const cm = & cpi->common;
int mb_row, mb_col;
@@ -593,7 +229,7 @@ void calc_norm_activity( VP8_COMP *cpi, MACROBLOCK *x )
// Loop through all MBs. Note activity of each, average activity and
// calculate a normalized activity for each
-void build_activity_map( VP8_COMP *cpi )
+static void build_activity_map( VP8_COMP *cpi )
{
MACROBLOCK *const x = & cpi->mb;
VP8_COMMON *const cm = & cpi->common;
diff --git a/vp8/encoder/encodeintra.c b/vp8/encoder/encodeintra.c
index 5d52c9fc4..6f7e66371 100644
--- a/vp8/encoder/encodeintra.c
+++ b/vp8/encoder/encodeintra.c
@@ -21,9 +21,6 @@
#include "vp8/common/g_common.h"
#include "encodeintra.h"
-#define intra4x4ibias_rate 128
-#define intra4x4pbias_rate 256
-
#if CONFIG_RUNTIME_CPU_DETECT
#define IF_RTCD(x) (x)
diff --git a/vp8/encoder/encodeintra.h b/vp8/encoder/encodeintra.h
index 40930bc42..8159097c6 100644
--- a/vp8/encoder/encodeintra.h
+++ b/vp8/encoder/encodeintra.h
@@ -17,6 +17,5 @@ void vp8_encode_intra16x16mby(const VP8_ENCODER_RTCD *, MACROBLOCK *x);
void vp8_encode_intra16x16mbuv(const VP8_ENCODER_RTCD *, MACROBLOCK *x);
void vp8_encode_intra4x4mby(const VP8_ENCODER_RTCD *, MACROBLOCK *mb);
void vp8_encode_intra4x4block(const VP8_ENCODER_RTCD *, MACROBLOCK *x, BLOCK *be, BLOCKD *b, int best_mode);
-void vp8_encode_intra4x4block_rd(const VP8_ENCODER_RTCD *, MACROBLOCK *x, BLOCK *be, BLOCKD *b, int best_mode);
#endif
diff --git a/vp8/encoder/generic/csystemdependent.c b/vp8/encoder/generic/csystemdependent.c
index d127ed114..35d2d5332 100644
--- a/vp8/encoder/generic/csystemdependent.c
+++ b/vp8/encoder/generic/csystemdependent.c
@@ -100,7 +100,7 @@ void vp8_cmachine_specific_config(VP8_COMP *cpi)
// Pure C:
vp8_yv12_copy_partial_frame_ptr = vp8_yv12_copy_partial_frame;
-#if CONFIG_PSNR
+#if CONFIG_INTERNAL_STATS
cpi->rtcd.variance.ssimpf_8x8 = ssim_parms_8x8_c;
cpi->rtcd.variance.ssimpf = ssim_parms_c;
#endif
diff --git a/vp8/encoder/onyx_if.c b/vp8/encoder/onyx_if.c
index 1d2894aca..ecebdf9d3 100644
--- a/vp8/encoder/onyx_if.c
+++ b/vp8/encoder/onyx_if.c
@@ -75,7 +75,7 @@ static void set_default_lf_deltas(VP8_COMP *cpi);
extern const int vp8_gf_interval_table[101];
-#if CONFIG_PSNR
+#if CONFIG_INTERNAL_STATS
#include "math.h"
extern double vp8_calc_ssim
@@ -1298,7 +1298,7 @@ void vp8_set_speed_features(VP8_COMP *cpi)
cpi->find_fractional_mv_step = vp8_skip_fractional_mv_step;
}
- if (cpi->sf.optimize_coefficients == 1)
+ if (cpi->sf.optimize_coefficients == 1 && cpi->pass!=1)
cpi->mb.optimize = 1;
else
cpi->mb.optimize = 0;
@@ -1982,8 +1982,8 @@ VP8_PTR vp8_create_compressor(VP8_CONFIG *oxcf)
cpi->source_alt_ref_active = FALSE;
cpi->common.refresh_alt_ref_frame = 0;
- cpi->b_calculate_psnr = CONFIG_PSNR;
-#if CONFIG_PSNR
+ cpi->b_calculate_psnr = CONFIG_INTERNAL_STATS;
+#if CONFIG_INTERNAL_STATS
cpi->b_calculate_ssimg = 0;
cpi->count = 0;
@@ -2202,7 +2202,7 @@ void vp8_remove_compressor(VP8_PTR *ptr)
print_mode_context();
#endif
-#if CONFIG_PSNR
+#if CONFIG_INTERNAL_STATS
if (cpi->pass != 1)
{
@@ -2718,45 +2718,6 @@ static void resize_key_frame(VP8_COMP *cpi)
}
-static void set_quantizer(VP8_COMP *cpi, int Q)
-{
- VP8_COMMON *cm = &cpi->common;
- MACROBLOCKD *mbd = &cpi->mb.e_mbd;
- int update = 0;
- int new_delta_q;
- cm->base_qindex = Q;
-
- /* if any of the delta_q values are changing update flag has to be set */
- /* currently only y2dc_delta_q may change */
-
- cm->y1dc_delta_q = 0;
- cm->y2ac_delta_q = 0;
- cm->uvdc_delta_q = 0;
- cm->uvac_delta_q = 0;
-
- if (Q < 4)
- {
- new_delta_q = 4-Q;
- }
- else
- new_delta_q = 0;
-
- update |= cm->y2dc_delta_q != new_delta_q;
- cm->y2dc_delta_q = new_delta_q;
-
-
- // Set Segment specific quatizers
- mbd->segment_feature_data[MB_LVL_ALT_Q][0] = cpi->segment_feature_data[MB_LVL_ALT_Q][0];
- mbd->segment_feature_data[MB_LVL_ALT_Q][1] = cpi->segment_feature_data[MB_LVL_ALT_Q][1];
- mbd->segment_feature_data[MB_LVL_ALT_Q][2] = cpi->segment_feature_data[MB_LVL_ALT_Q][2];
- mbd->segment_feature_data[MB_LVL_ALT_Q][3] = cpi->segment_feature_data[MB_LVL_ALT_Q][3];
-
- /* quantizer has to be reinitialized for any delta_q changes */
- if(update)
- vp8cx_init_quantizer(cpi);
-
-}
-
static void update_alt_ref_frame_and_stats(VP8_COMP *cpi)
{
VP8_COMMON *cm = &cpi->common;
@@ -3105,7 +3066,7 @@ static void Pass1Encode(VP8_COMP *cpi, unsigned long *size, unsigned char *dest,
(void) size;
(void) dest;
(void) frame_flags;
- set_quantizer(cpi, 26);
+ vp8_set_quantizer(cpi, 26);
scale_and_extend_source(cpi->un_scaled_source, cpi);
vp8_first_pass(cpi);
@@ -3502,7 +3463,7 @@ static void encode_frame_to_data_rate
cm->current_video_frame++;
cpi->frames_since_key++;
-#if CONFIG_PSNR
+#if CONFIG_INTERNAL_STATS
cpi->count ++;
#endif
@@ -3769,7 +3730,7 @@ static void encode_frame_to_data_rate
Q = 127;
*/
- set_quantizer(cpi, Q);
+ vp8_set_quantizer(cpi, Q);
this_q = Q;
// setup skip prob for costing in mode/mv decision
@@ -4114,7 +4075,7 @@ static void encode_frame_to_data_rate
{
vp8_restore_coding_context(cpi);
loop_count++;
-#if CONFIG_PSNR
+#if CONFIG_INTERNAL_STATS
cpi->tot_recode_hits++;
#endif
}
@@ -4388,7 +4349,7 @@ static void encode_frame_to_data_rate
}
}
-#if 0 && CONFIG_PSNR
+#if 0 && CONFIG_INTERNAL_STATS
{
FILE *f = fopen("tmp.stt", "a");
@@ -4958,7 +4919,7 @@ int vp8_get_compressed_data(VP8_PTR ptr, unsigned int *frame_flags, unsigned lon
generate_psnr_packet(cpi);
}
-#if CONFIG_PSNR
+#if CONFIG_INTERNAL_STATS
if (cpi->pass != 1)
{
diff --git a/vp8/encoder/onyx_int.h b/vp8/encoder/onyx_int.h
index 16ee2ed21..f63b351c6 100644
--- a/vp8/encoder/onyx_int.h
+++ b/vp8/encoder/onyx_int.h
@@ -244,7 +244,7 @@ enum
BLOCK_MAX_SEGMENTS
};
-typedef struct
+typedef struct VP8_COMP
{
DECLARE_ALIGNED(16, short, Y1quant[QINDEX_RANGE][16]);
@@ -624,7 +624,7 @@ typedef struct
int fixed_divide[512];
#endif
-#if CONFIG_PSNR
+#if CONFIG_INTERNAL_STATS
int count;
double total_y;
double total_u;
diff --git a/vp8/encoder/pickinter.c b/vp8/encoder/pickinter.c
index 1785edb1c..c2524921d 100644
--- a/vp8/encoder/pickinter.c
+++ b/vp8/encoder/pickinter.c
@@ -426,24 +426,29 @@ void vp8_pick_intra_mbuv_mode(MACROBLOCK *mb)
}
-static void vp8_update_mvcount(VP8_COMP *cpi, MACROBLOCKD *xd, int_mv *best_ref_mv)
+static void update_mvcount(VP8_COMP *cpi, MACROBLOCKD *xd, int_mv *best_ref_mv)
{
- /* Split MV modes currently not supported when RD is nopt enabled, therefore, only need to modify MVcount in NEWMV mode. */
+ /* Split MV modes currently not supported when RD is nopt enabled,
+ * therefore, only need to modify MVcount in NEWMV mode. */
if (xd->mode_info_context->mbmi.mode == NEWMV)
{
- cpi->MVcount[0][mv_max+((xd->block[0].bmi.mv.as_mv.row - best_ref_mv->as_mv.row) >> 1)]++;
- cpi->MVcount[1][mv_max+((xd->block[0].bmi.mv.as_mv.col - best_ref_mv->as_mv.col) >> 1)]++;
+ cpi->MVcount[0][mv_max+((xd->block[0].bmi.mv.as_mv.row -
+ best_ref_mv->as_mv.row) >> 1)]++;
+ cpi->MVcount[1][mv_max+((xd->block[0].bmi.mv.as_mv.col -
+ best_ref_mv->as_mv.col) >> 1)]++;
}
}
-void vp8_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset, int recon_uvoffset, int *returnrate, int *returndistortion, int *returnintra)
+void vp8_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset,
+ int recon_uvoffset, int *returnrate,
+ int *returndistortion, int *returnintra)
{
BLOCK *b = &x->block[0];
BLOCKD *d = &x->e_mbd.block[0];
MACROBLOCKD *xd = &x->e_mbd;
B_MODE_INFO best_bmodes[16];
MB_MODE_INFO best_mbmode;
- PARTITION_INFO best_partition;
+
int_mv best_ref_mv;
int_mv mode_mv[MB_MODE_COUNT];
MB_PREDICTION_MODE this_mode;
@@ -878,9 +883,8 @@ void vp8_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset, int re
*returndistortion = distortion2;
best_rd = this_rd;
vpx_memcpy(&best_mbmode, &x->e_mbd.mode_info_context->mbmi, sizeof(MB_MODE_INFO));
- vpx_memcpy(&best_partition, x->partition_info, sizeof(PARTITION_INFO));
- if (this_mode == B_PRED || this_mode == SPLITMV)
+ if (this_mode == B_PRED)
for (i = 0; i < 16; i++)
{
vpx_memcpy(&best_bmodes[i], &x->e_mbd.block[i].bmi, sizeof(B_MODE_INFO));
@@ -952,7 +956,6 @@ void vp8_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset, int re
best_mbmode.partitioning = 0;
vpx_memcpy(&x->e_mbd.mode_info_context->mbmi, &best_mbmode, sizeof(MB_MODE_INFO));
- vpx_memcpy(x->partition_info, &best_partition, sizeof(PARTITION_INFO));
for (i = 0; i < 16; i++)
{
@@ -963,12 +966,10 @@ void vp8_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset, int re
return;
}
-
// macroblock modes
vpx_memcpy(&x->e_mbd.mode_info_context->mbmi, &best_mbmode, sizeof(MB_MODE_INFO));
- vpx_memcpy(x->partition_info, &best_partition, sizeof(PARTITION_INFO));
- if (x->e_mbd.mode_info_context->mbmi.mode == B_PRED || x->e_mbd.mode_info_context->mbmi.mode == SPLITMV)
+ if (x->e_mbd.mode_info_context->mbmi.mode == B_PRED)
for (i = 0; i < 16; i++)
{
vpx_memcpy(&x->e_mbd.block[i].bmi, &best_bmodes[i], sizeof(B_MODE_INFO));
@@ -979,7 +980,5 @@ void vp8_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset, int re
vp8_set_mbmode_and_mvs(x, x->e_mbd.mode_info_context->mbmi.mode, &best_bmodes[0].mv);
}
- x->e_mbd.mode_info_context->mbmi.mv.as_mv = x->e_mbd.block[15].bmi.mv.as_mv;
-
- vp8_update_mvcount(cpi, &x->e_mbd, &frame_best_ref_mv[xd->mode_info_context->mbmi.ref_frame]);
+ update_mvcount(cpi, &x->e_mbd, &frame_best_ref_mv[xd->mode_info_context->mbmi.ref_frame]);
}
diff --git a/vp8/encoder/quantize.c b/vp8/encoder/quantize.c
index 86ed267fb..49e8e1b9b 100644
--- a/vp8/encoder/quantize.c
+++ b/vp8/encoder/quantize.c
@@ -12,8 +12,9 @@
#include <math.h>
#include "vpx_mem/vpx_mem.h"
+#include "onyx_int.h"
#include "quantize.h"
-#include "vp8/common/entropy.h"
+#include "vp8/common/quant_common.h"
#define EXACT_QUANT
@@ -299,3 +300,418 @@ void vp8_quantize_mbuv(MACROBLOCK *x)
for (i = 16; i < 24; i++)
x->quantize_b(&x->block[i], &x->e_mbd.block[i]);
}
+
+
+static const int qrounding_factors[129] =
+{
+ 48, 48, 48, 48, 48, 48, 48, 48,
+ 48, 48, 48, 48, 48, 48, 48, 48,
+ 48, 48, 48, 48, 48, 48, 48, 48,
+ 48, 48, 48, 48, 48, 48, 48, 48,
+ 48, 48, 48, 48, 48, 48, 48, 48,
+ 48, 48, 48, 48, 48, 48, 48, 48,
+ 48, 48, 48, 48, 48, 48, 48, 48,
+ 48, 48, 48, 48, 48, 48, 48, 48,
+ 48, 48, 48, 48, 48, 48, 48, 48,
+ 48, 48, 48, 48, 48, 48, 48, 48,
+ 48, 48, 48, 48, 48, 48, 48, 48,
+ 48, 48, 48, 48, 48, 48, 48, 48,
+ 48, 48, 48, 48, 48, 48, 48, 48,
+ 48, 48, 48, 48, 48, 48, 48, 48,
+ 48, 48, 48, 48, 48, 48, 48, 48,
+ 48, 48, 48, 48, 48, 48, 48, 48,
+ 48
+};
+
+
+static const int qzbin_factors[129] =
+{
+ 84, 84, 84, 84, 84, 84, 84, 84,
+ 84, 84, 84, 84, 84, 84, 84, 84,
+ 84, 84, 84, 84, 84, 84, 84, 84,
+ 84, 84, 84, 84, 84, 84, 84, 84,
+ 84, 84, 84, 84, 84, 84, 84, 84,
+ 84, 84, 84, 84, 84, 84, 84, 84,
+ 80, 80, 80, 80, 80, 80, 80, 80,
+ 80, 80, 80, 80, 80, 80, 80, 80,
+ 80, 80, 80, 80, 80, 80, 80, 80,
+ 80, 80, 80, 80, 80, 80, 80, 80,
+ 80, 80, 80, 80, 80, 80, 80, 80,
+ 80, 80, 80, 80, 80, 80, 80, 80,
+ 80, 80, 80, 80, 80, 80, 80, 80,
+ 80, 80, 80, 80, 80, 80, 80, 80,
+ 80, 80, 80, 80, 80, 80, 80, 80,
+ 80, 80, 80, 80, 80, 80, 80, 80,
+ 80
+};
+
+
+static const int qrounding_factors_y2[129] =
+{
+ 48, 48, 48, 48, 48, 48, 48, 48,
+ 48, 48, 48, 48, 48, 48, 48, 48,
+ 48, 48, 48, 48, 48, 48, 48, 48,
+ 48, 48, 48, 48, 48, 48, 48, 48,
+ 48, 48, 48, 48, 48, 48, 48, 48,
+ 48, 48, 48, 48, 48, 48, 48, 48,
+ 48, 48, 48, 48, 48, 48, 48, 48,
+ 48, 48, 48, 48, 48, 48, 48, 48,
+ 48, 48, 48, 48, 48, 48, 48, 48,
+ 48, 48, 48, 48, 48, 48, 48, 48,
+ 48, 48, 48, 48, 48, 48, 48, 48,
+ 48, 48, 48, 48, 48, 48, 48, 48,
+ 48, 48, 48, 48, 48, 48, 48, 48,
+ 48, 48, 48, 48, 48, 48, 48, 48,
+ 48, 48, 48, 48, 48, 48, 48, 48,
+ 48, 48, 48, 48, 48, 48, 48, 48,
+ 48
+};
+
+
+static const int qzbin_factors_y2[129] =
+{
+ 84, 84, 84, 84, 84, 84, 84, 84,
+ 84, 84, 84, 84, 84, 84, 84, 84,
+ 84, 84, 84, 84, 84, 84, 84, 84,
+ 84, 84, 84, 84, 84, 84, 84, 84,
+ 84, 84, 84, 84, 84, 84, 84, 84,
+ 84, 84, 84, 84, 84, 84, 84, 84,
+ 80, 80, 80, 80, 80, 80, 80, 80,
+ 80, 80, 80, 80, 80, 80, 80, 80,
+ 80, 80, 80, 80, 80, 80, 80, 80,
+ 80, 80, 80, 80, 80, 80, 80, 80,
+ 80, 80, 80, 80, 80, 80, 80, 80,
+ 80, 80, 80, 80, 80, 80, 80, 80,
+ 80, 80, 80, 80, 80, 80, 80, 80,
+ 80, 80, 80, 80, 80, 80, 80, 80,
+ 80, 80, 80, 80, 80, 80, 80, 80,
+ 80, 80, 80, 80, 80, 80, 80, 80,
+ 80
+};
+
+
+#define EXACT_QUANT
+#ifdef EXACT_QUANT
+static void invert_quant(int improved_quant, short *quant,
+ unsigned char *shift, short d)
+{
+ if(improved_quant)
+ {
+ unsigned t;
+ int l;
+ t = d;
+ for(l = 0; t > 1; l++)
+ t>>=1;
+ t = 1 + (1<<(16+l))/d;
+ *quant = (short)(t - (1<<16));
+ *shift = l;
+ }
+ else
+ {
+ *quant = (1 << 16) / d;
+ *shift = 0;
+ }
+}
+
+
+void vp8cx_init_quantizer(VP8_COMP *cpi)
+{
+ int i;
+ int quant_val;
+ int Q;
+
+ int zbin_boost[16] = {0, 0, 8, 10, 12, 14, 16, 20, 24, 28, 32, 36, 40, 44, 44, 44};
+
+ for (Q = 0; Q < QINDEX_RANGE; Q++)
+ {
+ // dc values
+ quant_val = vp8_dc_quant(Q, cpi->common.y1dc_delta_q);
+ cpi->Y1quant_fast[Q][0] = (1 << 16) / quant_val;
+ invert_quant(cpi->sf.improved_quant, cpi->Y1quant[Q] + 0,
+ cpi->Y1quant_shift[Q] + 0, quant_val);
+ cpi->Y1zbin[Q][0] = ((qzbin_factors[Q] * quant_val) + 64) >> 7;
+ cpi->Y1round[Q][0] = (qrounding_factors[Q] * quant_val) >> 7;
+ cpi->common.Y1dequant[Q][0] = quant_val;
+ cpi->zrun_zbin_boost_y1[Q][0] = (quant_val * zbin_boost[0]) >> 7;
+
+ quant_val = vp8_dc2quant(Q, cpi->common.y2dc_delta_q);
+ cpi->Y2quant_fast[Q][0] = (1 << 16) / quant_val;
+ invert_quant(cpi->sf.improved_quant, cpi->Y2quant[Q] + 0,
+ cpi->Y2quant_shift[Q] + 0, quant_val);
+ cpi->Y2zbin[Q][0] = ((qzbin_factors_y2[Q] * quant_val) + 64) >> 7;
+ cpi->Y2round[Q][0] = (qrounding_factors_y2[Q] * quant_val) >> 7;
+ cpi->common.Y2dequant[Q][0] = quant_val;
+ cpi->zrun_zbin_boost_y2[Q][0] = (quant_val * zbin_boost[0]) >> 7;
+
+ quant_val = vp8_dc_uv_quant(Q, cpi->common.uvdc_delta_q);
+ cpi->UVquant_fast[Q][0] = (1 << 16) / quant_val;
+ invert_quant(cpi->sf.improved_quant, cpi->UVquant[Q] + 0,
+ cpi->UVquant_shift[Q] + 0, quant_val);
+ cpi->UVzbin[Q][0] = ((qzbin_factors[Q] * quant_val) + 64) >> 7;;
+ cpi->UVround[Q][0] = (qrounding_factors[Q] * quant_val) >> 7;
+ cpi->common.UVdequant[Q][0] = quant_val;
+ cpi->zrun_zbin_boost_uv[Q][0] = (quant_val * zbin_boost[0]) >> 7;
+
+ // all the ac values = ;
+ for (i = 1; i < 16; i++)
+ {
+ int rc = vp8_default_zig_zag1d[i];
+
+ quant_val = vp8_ac_yquant(Q);
+ cpi->Y1quant_fast[Q][rc] = (1 << 16) / quant_val;
+ invert_quant(cpi->sf.improved_quant, cpi->Y1quant[Q] + rc,
+ cpi->Y1quant_shift[Q] + rc, quant_val);
+ cpi->Y1zbin[Q][rc] = ((qzbin_factors[Q] * quant_val) + 64) >> 7;
+ cpi->Y1round[Q][rc] = (qrounding_factors[Q] * quant_val) >> 7;
+ cpi->common.Y1dequant[Q][rc] = quant_val;
+ cpi->zrun_zbin_boost_y1[Q][i] = (quant_val * zbin_boost[i]) >> 7;
+
+ quant_val = vp8_ac2quant(Q, cpi->common.y2ac_delta_q);
+ cpi->Y2quant_fast[Q][rc] = (1 << 16) / quant_val;
+ invert_quant(cpi->sf.improved_quant, cpi->Y2quant[Q] + rc,
+ cpi->Y2quant_shift[Q] + rc, quant_val);
+ cpi->Y2zbin[Q][rc] = ((qzbin_factors_y2[Q] * quant_val) + 64) >> 7;
+ cpi->Y2round[Q][rc] = (qrounding_factors_y2[Q] * quant_val) >> 7;
+ cpi->common.Y2dequant[Q][rc] = quant_val;
+ cpi->zrun_zbin_boost_y2[Q][i] = (quant_val * zbin_boost[i]) >> 7;
+
+ quant_val = vp8_ac_uv_quant(Q, cpi->common.uvac_delta_q);
+ cpi->UVquant_fast[Q][rc] = (1 << 16) / quant_val;
+ invert_quant(cpi->sf.improved_quant, cpi->UVquant[Q] + rc,
+ cpi->UVquant_shift[Q] + rc, quant_val);
+ cpi->UVzbin[Q][rc] = ((qzbin_factors[Q] * quant_val) + 64) >> 7;
+ cpi->UVround[Q][rc] = (qrounding_factors[Q] * quant_val) >> 7;
+ cpi->common.UVdequant[Q][rc] = quant_val;
+ cpi->zrun_zbin_boost_uv[Q][i] = (quant_val * zbin_boost[i]) >> 7;
+ }
+ }
+}
+#else
+void vp8cx_init_quantizer(VP8_COMP *cpi)
+{
+ int i;
+ int quant_val;
+ int Q;
+
+ int zbin_boost[16] = {0, 0, 8, 10, 12, 14, 16, 20, 24, 28, 32, 36, 40, 44, 44, 44};
+
+ for (Q = 0; Q < QINDEX_RANGE; Q++)
+ {
+ // dc values
+ quant_val = vp8_dc_quant(Q, cpi->common.y1dc_delta_q);
+ cpi->Y1quant[Q][0] = (1 << 16) / quant_val;
+ cpi->Y1zbin[Q][0] = ((qzbin_factors[Q] * quant_val) + 64) >> 7;
+ cpi->Y1round[Q][0] = (qrounding_factors[Q] * quant_val) >> 7;
+ cpi->common.Y1dequant[Q][0] = quant_val;
+ cpi->zrun_zbin_boost_y1[Q][0] = (quant_val * zbin_boost[0]) >> 7;
+
+ quant_val = vp8_dc2quant(Q, cpi->common.y2dc_delta_q);
+ cpi->Y2quant[Q][0] = (1 << 16) / quant_val;
+ cpi->Y2zbin[Q][0] = ((qzbin_factors_y2[Q] * quant_val) + 64) >> 7;
+ cpi->Y2round[Q][0] = (qrounding_factors_y2[Q] * quant_val) >> 7;
+ cpi->common.Y2dequant[Q][0] = quant_val;
+ cpi->zrun_zbin_boost_y2[Q][0] = (quant_val * zbin_boost[0]) >> 7;
+
+ quant_val = vp8_dc_uv_quant(Q, cpi->common.uvdc_delta_q);
+ cpi->UVquant[Q][0] = (1 << 16) / quant_val;
+ cpi->UVzbin[Q][0] = ((qzbin_factors[Q] * quant_val) + 64) >> 7;;
+ cpi->UVround[Q][0] = (qrounding_factors[Q] * quant_val) >> 7;
+ cpi->common.UVdequant[Q][0] = quant_val;
+ cpi->zrun_zbin_boost_uv[Q][0] = (quant_val * zbin_boost[0]) >> 7;
+
+ // all the ac values = ;
+ for (i = 1; i < 16; i++)
+ {
+ int rc = vp8_default_zig_zag1d[i];
+
+ quant_val = vp8_ac_yquant(Q);
+ cpi->Y1quant[Q][rc] = (1 << 16) / quant_val;
+ cpi->Y1zbin[Q][rc] = ((qzbin_factors[Q] * quant_val) + 64) >> 7;
+ cpi->Y1round[Q][rc] = (qrounding_factors[Q] * quant_val) >> 7;
+ cpi->common.Y1dequant[Q][rc] = quant_val;
+ cpi->zrun_zbin_boost_y1[Q][i] = (quant_val * zbin_boost[i]) >> 7;
+
+ quant_val = vp8_ac2quant(Q, cpi->common.y2ac_delta_q);
+ cpi->Y2quant[Q][rc] = (1 << 16) / quant_val;
+ cpi->Y2zbin[Q][rc] = ((qzbin_factors_y2[Q] * quant_val) + 64) >> 7;
+ cpi->Y2round[Q][rc] = (qrounding_factors_y2[Q] * quant_val) >> 7;
+ cpi->common.Y2dequant[Q][rc] = quant_val;
+ cpi->zrun_zbin_boost_y2[Q][i] = (quant_val * zbin_boost[i]) >> 7;
+
+ quant_val = vp8_ac_uv_quant(Q, cpi->common.uvac_delta_q);
+ cpi->UVquant[Q][rc] = (1 << 16) / quant_val;
+ cpi->UVzbin[Q][rc] = ((qzbin_factors[Q] * quant_val) + 64) >> 7;
+ cpi->UVround[Q][rc] = (qrounding_factors[Q] * quant_val) >> 7;
+ cpi->common.UVdequant[Q][rc] = quant_val;
+ cpi->zrun_zbin_boost_uv[Q][i] = (quant_val * zbin_boost[i]) >> 7;
+ }
+ }
+}
+#endif
+
+
+void vp8cx_mb_init_quantizer(VP8_COMP *cpi, MACROBLOCK *x)
+{
+ int i;
+ int QIndex;
+ MACROBLOCKD *xd = &x->e_mbd;
+ int zbin_extra;
+
+ // Select the baseline MB Q index.
+ if (xd->segmentation_enabled)
+ {
+ // Abs Value
+ if (xd->mb_segement_abs_delta == SEGMENT_ABSDATA)
+
+ QIndex = xd->segment_feature_data[MB_LVL_ALT_Q][xd->mode_info_context->mbmi.segment_id];
+ // Delta Value
+ else
+ {
+ QIndex = cpi->common.base_qindex + xd->segment_feature_data[MB_LVL_ALT_Q][xd->mode_info_context->mbmi.segment_id];
+ QIndex = (QIndex >= 0) ? ((QIndex <= MAXQ) ? QIndex : MAXQ) : 0; // Clamp to valid range
+ }
+ }
+ else
+ QIndex = cpi->common.base_qindex;
+
+ // Y
+ zbin_extra = ( cpi->common.Y1dequant[QIndex][1] *
+ ( cpi->zbin_over_quant +
+ cpi->zbin_mode_boost +
+ x->act_zbin_adj ) ) >> 7;
+
+ for (i = 0; i < 16; i++)
+ {
+ x->block[i].quant = cpi->Y1quant[QIndex];
+ x->block[i].quant_fast = cpi->Y1quant_fast[QIndex];
+ x->block[i].quant_shift = cpi->Y1quant_shift[QIndex];
+ x->block[i].zbin = cpi->Y1zbin[QIndex];
+ x->block[i].round = cpi->Y1round[QIndex];
+ x->e_mbd.block[i].dequant = cpi->common.Y1dequant[QIndex];
+ x->block[i].zrun_zbin_boost = cpi->zrun_zbin_boost_y1[QIndex];
+ x->block[i].zbin_extra = (short)zbin_extra;
+ }
+
+ // UV
+ zbin_extra = ( cpi->common.UVdequant[QIndex][1] *
+ ( cpi->zbin_over_quant +
+ cpi->zbin_mode_boost +
+ x->act_zbin_adj ) ) >> 7;
+
+ for (i = 16; i < 24; i++)
+ {
+ x->block[i].quant = cpi->UVquant[QIndex];
+ x->block[i].quant_fast = cpi->UVquant_fast[QIndex];
+ x->block[i].quant_shift = cpi->UVquant_shift[QIndex];
+ x->block[i].zbin = cpi->UVzbin[QIndex];
+ x->block[i].round = cpi->UVround[QIndex];
+ x->e_mbd.block[i].dequant = cpi->common.UVdequant[QIndex];
+ x->block[i].zrun_zbin_boost = cpi->zrun_zbin_boost_uv[QIndex];
+ x->block[i].zbin_extra = (short)zbin_extra;
+ }
+
+ // Y2
+ zbin_extra = ( cpi->common.Y2dequant[QIndex][1] *
+ ( (cpi->zbin_over_quant / 2) +
+ cpi->zbin_mode_boost +
+ x->act_zbin_adj ) ) >> 7;
+
+ x->block[24].quant_fast = cpi->Y2quant_fast[QIndex];
+ x->block[24].quant = cpi->Y2quant[QIndex];
+ x->block[24].quant_shift = cpi->Y2quant_shift[QIndex];
+ x->block[24].zbin = cpi->Y2zbin[QIndex];
+ x->block[24].round = cpi->Y2round[QIndex];
+ x->e_mbd.block[24].dequant = cpi->common.Y2dequant[QIndex];
+ x->block[24].zrun_zbin_boost = cpi->zrun_zbin_boost_y2[QIndex];
+ x->block[24].zbin_extra = (short)zbin_extra;
+
+ /* save this macroblock QIndex for vp8_update_zbin_extra() */
+ x->q_index = QIndex;
+}
+
+
+void vp8_update_zbin_extra(VP8_COMP *cpi, MACROBLOCK *x)
+{
+ int i;
+ int QIndex = x->q_index;
+ int zbin_extra;
+
+ // Y
+ zbin_extra = ( cpi->common.Y1dequant[QIndex][1] *
+ ( cpi->zbin_over_quant +
+ cpi->zbin_mode_boost +
+ x->act_zbin_adj ) ) >> 7;
+ for (i = 0; i < 16; i++)
+ {
+ x->block[i].zbin_extra = (short)zbin_extra;
+ }
+
+ // UV
+ zbin_extra = ( cpi->common.UVdequant[QIndex][1] *
+ ( cpi->zbin_over_quant +
+ cpi->zbin_mode_boost +
+ x->act_zbin_adj ) ) >> 7;
+
+ for (i = 16; i < 24; i++)
+ {
+ x->block[i].zbin_extra = (short)zbin_extra;
+ }
+
+ // Y2
+ zbin_extra = ( cpi->common.Y2dequant[QIndex][1] *
+ ( (cpi->zbin_over_quant / 2) +
+ cpi->zbin_mode_boost +
+ x->act_zbin_adj ) ) >> 7;
+
+ x->block[24].zbin_extra = (short)zbin_extra;
+}
+
+
+void vp8cx_frame_init_quantizer(VP8_COMP *cpi)
+{
+ // Clear Zbin mode boost for default case
+ cpi->zbin_mode_boost = 0;
+
+ // MB level quantizer setup
+ vp8cx_mb_init_quantizer(cpi, &cpi->mb);
+}
+
+
+void vp8_set_quantizer(struct VP8_COMP *cpi, int Q)
+{
+ VP8_COMMON *cm = &cpi->common;
+ MACROBLOCKD *mbd = &cpi->mb.e_mbd;
+ int update = 0;
+ int new_delta_q;
+ cm->base_qindex = Q;
+
+ /* if any of the delta_q values are changing update flag has to be set */
+ /* currently only y2dc_delta_q may change */
+
+ cm->y1dc_delta_q = 0;
+ cm->y2ac_delta_q = 0;
+ cm->uvdc_delta_q = 0;
+ cm->uvac_delta_q = 0;
+
+ if (Q < 4)
+ {
+ new_delta_q = 4-Q;
+ }
+ else
+ new_delta_q = 0;
+
+ update |= cm->y2dc_delta_q != new_delta_q;
+ cm->y2dc_delta_q = new_delta_q;
+
+
+ // Set Segment specific quatizers
+ mbd->segment_feature_data[MB_LVL_ALT_Q][0] = cpi->segment_feature_data[MB_LVL_ALT_Q][0];
+ mbd->segment_feature_data[MB_LVL_ALT_Q][1] = cpi->segment_feature_data[MB_LVL_ALT_Q][1];
+ mbd->segment_feature_data[MB_LVL_ALT_Q][2] = cpi->segment_feature_data[MB_LVL_ALT_Q][2];
+ mbd->segment_feature_data[MB_LVL_ALT_Q][3] = cpi->segment_feature_data[MB_LVL_ALT_Q][3];
+
+ /* quantizer has to be reinitialized for any delta_q changes */
+ if(update)
+ vp8cx_init_quantizer(cpi);
+
+}
diff --git a/vp8/encoder/quantize.h b/vp8/encoder/quantize.h
index e4c32a570..d9a041071 100644
--- a/vp8/encoder/quantize.h
+++ b/vp8/encoder/quantize.h
@@ -55,4 +55,11 @@ extern void vp8_quantize_mb(MACROBLOCK *x);
extern void vp8_quantize_mbuv(MACROBLOCK *x);
extern void vp8_quantize_mby(MACROBLOCK *x);
+struct VP8_COMP;
+extern void vp8_set_quantizer(struct VP8_COMP *cpi, int Q);
+extern void vp8cx_frame_init_quantizer(struct VP8_COMP *cpi);
+extern void vp8_update_zbin_extra(struct VP8_COMP *cpi, MACROBLOCK *x);
+extern void vp8cx_mb_init_quantizer(struct VP8_COMP *cpi, MACROBLOCK *x);
+extern void vp8cx_init_quantizer(struct VP8_COMP *cpi);
+
#endif
diff --git a/vp8/encoder/rdopt.c b/vp8/encoder/rdopt.c
index aa8202b2e..48070ab49 100644
--- a/vp8/encoder/rdopt.c
+++ b/vp8/encoder/rdopt.c
@@ -994,9 +994,11 @@ static int labels2mode(
cost = x->inter_bmode_costs[ m];
}
- d->bmi.mode = m;
d->bmi.mv.as_int = this_mv->as_int;
+ x->partition_info->bmi[i].mode = m;
+ x->partition_info->bmi[i].mv.as_int = this_mv->as_int;
+
}
while (++i < 16);
@@ -1340,8 +1342,8 @@ static void rd_check_segment(VP8_COMP *cpi, MACROBLOCK *x,
{
BLOCKD *bd = &x->e_mbd.block[i];
- bsi->mvs[i].as_mv = bd->bmi.mv.as_mv;
- bsi->modes[i] = bd->bmi.mode;
+ bsi->mvs[i].as_mv = x->partition_info->bmi[i].mv.as_mv;
+ bsi->modes[i] = x->partition_info->bmi[i].mode;
bsi->eobs[i] = bd->eob;
}
}
@@ -1471,7 +1473,6 @@ static int vp8_rd_pick_best_mbsegmentation(VP8_COMP *cpi, MACROBLOCK *x,
BLOCKD *bd = &x->e_mbd.block[i];
bd->bmi.mv.as_mv = bsi.mvs[i].as_mv;
- bd->bmi.mode = bsi.modes[i];
bd->eob = bsi.eobs[i];
}
@@ -1489,9 +1490,13 @@ static int vp8_rd_pick_best_mbsegmentation(VP8_COMP *cpi, MACROBLOCK *x,
j = vp8_mbsplit_offset[bsi.segment_num][i];
- x->partition_info->bmi[i].mode = x->e_mbd.block[j].bmi.mode;
- x->partition_info->bmi[i].mv.as_mv = x->e_mbd.block[j].bmi.mv.as_mv;
+ x->partition_info->bmi[i].mode = bsi.modes[j];
+ x->partition_info->bmi[i].mv.as_mv = bsi.mvs[j].as_mv;
}
+ /*
+ * used to set x->e_mbd.mode_info_context->mbmi.mv.as_int
+ */
+ x->partition_info->bmi[15].mv.as_int = bsi.mvs[15].as_int;
return bsi.segment_rd;
}
@@ -1751,25 +1756,29 @@ void vp8_cal_sad(VP8_COMP *cpi, MACROBLOCKD *xd, MACROBLOCK *x, int recon_yoffse
}
}
-static void vp8_rd_update_mvcount(VP8_COMP *cpi, MACROBLOCKD *xd, int_mv *best_ref_mv)
+static void rd_update_mvcount(VP8_COMP *cpi, MACROBLOCK *x, int_mv *best_ref_mv)
{
- int i;
-
- if (xd->mode_info_context->mbmi.mode == SPLITMV)
+ if (x->e_mbd.mode_info_context->mbmi.mode == SPLITMV)
{
- for (i = 0; i < 16; i++)
+ int i;
+
+ for (i = 0; i < x->partition_info->count; i++)
{
- if (xd->block[i].bmi.mode == NEW4X4)
+ if (x->partition_info->bmi[i].mode == NEW4X4)
{
- cpi->MVcount[0][mv_max+((xd->block[i].bmi.mv.as_mv.row - best_ref_mv->as_mv.row) >> 1)]++;
- cpi->MVcount[1][mv_max+((xd->block[i].bmi.mv.as_mv.col - best_ref_mv->as_mv.col) >> 1)]++;
+ cpi->MVcount[0][mv_max+((x->partition_info->bmi[i].mv.as_mv.row
+ - best_ref_mv->as_mv.row) >> 1)]++;
+ cpi->MVcount[1][mv_max+((x->partition_info->bmi[i].mv.as_mv.col
+ - best_ref_mv->as_mv.col) >> 1)]++;
}
}
}
- else if (xd->mode_info_context->mbmi.mode == NEWMV)
+ else if (x->e_mbd.mode_info_context->mbmi.mode == NEWMV)
{
- cpi->MVcount[0][mv_max+((xd->block[0].bmi.mv.as_mv.row - best_ref_mv->as_mv.row) >> 1)]++;
- cpi->MVcount[1][mv_max+((xd->block[0].bmi.mv.as_mv.col - best_ref_mv->as_mv.col) >> 1)]++;
+ cpi->MVcount[0][mv_max+((x->e_mbd.mode_info_context->mbmi.mv.as_mv.row
+ - best_ref_mv->as_mv.row) >> 1)]++;
+ cpi->MVcount[1][mv_max+((x->e_mbd.mode_info_context->mbmi.mv.as_mv.col
+ - best_ref_mv->as_mv.col) >> 1)]++;
}
}
@@ -2479,14 +2488,19 @@ void vp8_rd_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset, int
// macroblock modes
vpx_memcpy(&x->e_mbd.mode_info_context->mbmi, &best_mbmode, sizeof(MB_MODE_INFO));
- vpx_memcpy(x->partition_info, &best_partition, sizeof(PARTITION_INFO));
for (i = 0; i < 16; i++)
{
vpx_memcpy(&x->e_mbd.block[i].bmi, &best_bmodes[i], sizeof(B_MODE_INFO));
}
- x->e_mbd.mode_info_context->mbmi.mv.as_mv = x->e_mbd.block[15].bmi.mv.as_mv;
+ if (best_mbmode.mode == SPLITMV)
+ {
+ vpx_memcpy(x->partition_info, &best_partition, sizeof(PARTITION_INFO));
+ x->e_mbd.mode_info_context->mbmi.mv.as_int =
+ x->partition_info->bmi[15].mv.as_int;
+ }
+
+ rd_update_mvcount(cpi, x, &frame_best_ref_mv[xd->mode_info_context->mbmi.ref_frame]);
- vp8_rd_update_mvcount(cpi, &x->e_mbd, &frame_best_ref_mv[xd->mode_info_context->mbmi.ref_frame]);
}
diff --git a/vp8/encoder/variance.h b/vp8/encoder/variance.h
index bf17ea8b6..da5a5364a 100644
--- a/vp8/encoder/variance.h
+++ b/vp8/encoder/variance.h
@@ -391,7 +391,7 @@ typedef struct
vp8_sad_multi_d_fn_t sad8x8x4d;
vp8_sad_multi_d_fn_t sad4x4x4d;
-#if CONFIG_PSNR
+#if CONFIG_INTERNAL_STATS
vp8_ssimpf_fn_t ssimpf_8x8;
vp8_ssimpf_fn_t ssimpf;
#endif
diff --git a/vp8/encoder/x86/x86_csystemdependent.c b/vp8/encoder/x86/x86_csystemdependent.c
index f65ef8a5f..378b14066 100644
--- a/vp8/encoder/x86/x86_csystemdependent.c
+++ b/vp8/encoder/x86/x86_csystemdependent.c
@@ -112,7 +112,7 @@ static void subtract_b_sse2(BLOCK *be, BLOCKD *bd, int pitch)
#endif
#if HAVE_SSSE3
-#if CONFIG_PSNR
+#if CONFIG_INTERNAL_STATS
#if ARCH_X86_64
typedef void ssimpf
(
@@ -287,7 +287,7 @@ void vp8_arch_x86_encoder_init(VP8_COMP *cpi)
cpi->rtcd.quantize.fastquantb = vp8_fast_quantize_b_ssse3;
-#if CONFIG_PSNR
+#if CONFIG_INTERNAL_STATS
#if ARCH_X86_64
cpi->rtcd.variance.ssimpf_8x8 = vp8_ssim_parms_8x8_sse3;
cpi->rtcd.variance.ssimpf = vp8_ssim_parms_16x16_sse3;
diff --git a/vp8/vp8_dx_iface.c b/vp8/vp8_dx_iface.c
index cca17c23d..e0f4c0a96 100644
--- a/vp8/vp8_dx_iface.c
+++ b/vp8/vp8_dx_iface.c
@@ -19,6 +19,8 @@
#include "decoder/onyxd_int.h"
#define VP8_CAP_POSTPROC (CONFIG_POSTPROC ? VPX_CODEC_CAP_POSTPROC : 0)
+#define VP8_CAP_ERROR_CONCEALMENT (CONFIG_ERROR_CONCEALMENT ? \
+ VPX_CODEC_CAP_ERROR_CONCEALMENT : 0)
typedef vpx_codec_stream_info_t vp8_stream_info_t;
@@ -364,6 +366,8 @@ static vpx_codec_err_t vp8_decode(vpx_codec_alg_priv_t *ctx,
oxcf.Version = 9;
oxcf.postprocess = 0;
oxcf.max_threads = ctx->cfg.threads;
+ oxcf.error_concealment =
+ (ctx->base.init_flags & VPX_CODEC_USE_ERROR_CONCEALMENT);
optr = vp8dx_create_decompressor(&oxcf);
@@ -719,7 +723,7 @@ CODEC_INTERFACE(vpx_codec_vp8_dx) =
{
"WebM Project VP8 Decoder" VERSION_STRING,
VPX_CODEC_INTERNAL_ABI_VERSION,
- VPX_CODEC_CAP_DECODER | VP8_CAP_POSTPROC,
+ VPX_CODEC_CAP_DECODER | VP8_CAP_POSTPROC | VP8_CAP_ERROR_CONCEALMENT,
/* vpx_codec_caps_t caps; */
vp8_init, /* vpx_codec_init_fn_t init; */
vp8_destroy, /* vpx_codec_destroy_fn_t destroy; */
@@ -749,7 +753,7 @@ vpx_codec_iface_t vpx_codec_vp8_algo =
{
"WebM Project VP8 Decoder (Deprecated API)" VERSION_STRING,
VPX_CODEC_INTERNAL_ABI_VERSION,
- VPX_CODEC_CAP_DECODER | VP8_CAP_POSTPROC,
+ VPX_CODEC_CAP_DECODER | VP8_CAP_POSTPROC | VP8_CAP_ERROR_CONCEALMENT,
/* vpx_codec_caps_t caps; */
vp8_init, /* vpx_codec_init_fn_t init; */
vp8_destroy, /* vpx_codec_destroy_fn_t destroy; */
diff --git a/vp8/vp8cx.mk b/vp8/vp8cx.mk
index c17837164..d46d99df6 100644
--- a/vp8/vp8cx.mk
+++ b/vp8/vp8cx.mk
@@ -77,12 +77,12 @@ VP8_CX_SRCS-yes += encoder/rdopt.c
VP8_CX_SRCS-yes += encoder/sad_c.c
VP8_CX_SRCS-yes += encoder/segmentation.c
VP8_CX_SRCS-yes += encoder/segmentation.h
-VP8_CX_SRCS-$(CONFIG_PSNR) += encoder/ssim.c
+VP8_CX_SRCS-$(CONFIG_INTERNAL_STATS) += encoder/ssim.c
VP8_CX_SRCS-yes += encoder/tokenize.c
VP8_CX_SRCS-yes += encoder/treewriter.c
VP8_CX_SRCS-yes += encoder/variance_c.c
-VP8_CX_SRCS-$(CONFIG_PSNR) += common/postproc.h
-VP8_CX_SRCS-$(CONFIG_PSNR) += common/postproc.c
+VP8_CX_SRCS-$(CONFIG_INTERNAL_STATS) += common/postproc.h
+VP8_CX_SRCS-$(CONFIG_INTERNAL_STATS) += common/postproc.c
VP8_CX_SRCS-yes += encoder/temporal_filter.c
VP8_CX_SRCS-yes += encoder/temporal_filter.h
diff --git a/vp8/vp8dx.mk b/vp8/vp8dx.mk
index 564967191..85d6f513e 100644
--- a/vp8/vp8dx.mk
+++ b/vp8/vp8dx.mk
@@ -53,6 +53,9 @@ VP8_DX_SRCS-yes += decoder/decodemv.c
VP8_DX_SRCS-yes += decoder/decodframe.c
VP8_DX_SRCS-yes += decoder/dequantize.c
VP8_DX_SRCS-yes += decoder/detokenize.c
+VP8_DX_SRCS-$(CONFIG_ERROR_CONCEALMENT) += decoder/ec_types.h
+VP8_DX_SRCS-$(CONFIG_ERROR_CONCEALMENT) += decoder/error_concealment.h
+VP8_DX_SRCS-$(CONFIG_ERROR_CONCEALMENT) += decoder/error_concealment.c
VP8_DX_SRCS-yes += decoder/generic/dsystemdependent.c
VP8_DX_SRCS-yes += decoder/dboolhuff.h
VP8_DX_SRCS-yes += decoder/decodemv.h