summaryrefslogtreecommitdiff
path: root/vp8
diff options
context:
space:
mode:
Diffstat (limited to 'vp8')
-rw-r--r--vp8/common/alloccommon.c23
-rw-r--r--vp8/common/arm/neon/iwalsh_neon.asm62
-rw-r--r--vp8/common/generic/systemdependent.c2
-rw-r--r--vp8/common/onyxc_int.h2
-rw-r--r--vp8/common/onyxd.h1
-rw-r--r--vp8/common/x86/recon_sse2.asm59
-rw-r--r--vp8/common/x86/recon_wrapper_sse2.c18
-rw-r--r--vp8/decoder/decodemv.c23
-rw-r--r--vp8/decoder/decodframe.c219
-rw-r--r--vp8/decoder/ec_types.h49
-rw-r--r--vp8/decoder/error_concealment.c613
-rw-r--r--vp8/decoder/error_concealment.h41
-rw-r--r--vp8/decoder/onyxd_if.c49
-rw-r--r--vp8/decoder/onyxd_int.h10
-rw-r--r--vp8/encoder/block.h1
-rw-r--r--vp8/encoder/encodeframe.c379
-rw-r--r--vp8/encoder/encodeintra.c21
-rw-r--r--vp8/encoder/encodeintra.h5
-rw-r--r--vp8/encoder/ethreading.c1
-rw-r--r--vp8/encoder/firstpass.c485
-rw-r--r--vp8/encoder/generic/csystemdependent.c2
-rw-r--r--vp8/encoder/onyx_if.c145
-rw-r--r--vp8/encoder/onyx_int.h139
-rw-r--r--vp8/encoder/pickinter.c44
-rw-r--r--vp8/encoder/picklpf.c10
-rw-r--r--vp8/encoder/quantize.c418
-rw-r--r--vp8/encoder/quantize.h7
-rw-r--r--vp8/encoder/ratectrl.c4
-rw-r--r--vp8/encoder/rdopt.c76
-rw-r--r--vp8/encoder/variance.h2
-rw-r--r--vp8/encoder/x86/x86_csystemdependent.c4
-rw-r--r--vp8/vp8_dx_iface.c8
-rw-r--r--vp8/vp8cx.mk6
-rw-r--r--vp8/vp8dx.mk3
34 files changed, 1915 insertions, 1016 deletions
diff --git a/vp8/common/alloccommon.c b/vp8/common/alloccommon.c
index b0c7363a7..4d3744ebf 100644
--- a/vp8/common/alloccommon.c
+++ b/vp8/common/alloccommon.c
@@ -27,6 +27,9 @@ static void update_mode_info_border(MODE_INFO *mi, int rows, int cols)
for (i = 0; i < rows; i++)
{
+ /* TODO(holmer): Bug? This updates the last element of each row
+ * rather than the border element!
+ */
vpx_memset(&mi[i*cols-1], 0, sizeof(MODE_INFO));
}
}
@@ -43,9 +46,11 @@ void vp8_de_alloc_frame_buffers(VP8_COMMON *oci)
vpx_free(oci->above_context);
vpx_free(oci->mip);
+ vpx_free(oci->prev_mip);
oci->above_context = 0;
oci->mip = 0;
+ oci->prev_mip = 0;
}
@@ -110,6 +115,21 @@ int vp8_alloc_frame_buffers(VP8_COMMON *oci, int width, int height)
oci->mi = oci->mip + oci->mode_info_stride + 1;
+ /* allocate memory for last frame MODE_INFO array */
+#if CONFIG_ERROR_CONCEALMENT
+ oci->prev_mip = vpx_calloc((oci->mb_cols + 1) * (oci->mb_rows + 1), sizeof(MODE_INFO));
+
+ if (!oci->prev_mip)
+ {
+ vp8_de_alloc_frame_buffers(oci);
+ return 1;
+ }
+
+ oci->prev_mi = oci->prev_mip + oci->mode_info_stride + 1;
+#else
+ oci->prev_mip = NULL;
+ oci->prev_mi = NULL;
+#endif
oci->above_context = vpx_calloc(sizeof(ENTROPY_CONTEXT_PLANES) * oci->mb_cols, 1);
@@ -120,6 +140,9 @@ int vp8_alloc_frame_buffers(VP8_COMMON *oci, int width, int height)
}
update_mode_info_border(oci->mi, oci->mb_rows, oci->mb_cols);
+#if CONFIG_ERROR_CONCEALMENT
+ update_mode_info_border(oci->prev_mi, oci->mb_rows, oci->mb_cols);
+#endif
return 0;
}
diff --git a/vp8/common/arm/neon/iwalsh_neon.asm b/vp8/common/arm/neon/iwalsh_neon.asm
index 663bf390e..01c79d937 100644
--- a/vp8/common/arm/neon/iwalsh_neon.asm
+++ b/vp8/common/arm/neon/iwalsh_neon.asm
@@ -20,19 +20,16 @@
|vp8_short_inv_walsh4x4_neon| PROC
; read in all four lines of values: d0->d3
- vldm.64 r0, {q0, q1}
+ vld1.i16 {q0-q1}, [r0@128]
; first for loop
-
vadd.s16 d4, d0, d3 ;a = [0] + [12]
- vadd.s16 d5, d1, d2 ;b = [4] + [8]
- vsub.s16 d6, d1, d2 ;c = [4] - [8]
- vsub.s16 d7, d0, d3 ;d = [0] - [12]
+ vadd.s16 d6, d1, d2 ;b = [4] + [8]
+ vsub.s16 d5, d0, d3 ;d = [0] - [12]
+ vsub.s16 d7, d1, d2 ;c = [4] - [8]
- vadd.s16 d0, d4, d5 ;a + b
- vadd.s16 d1, d6, d7 ;c + d
- vsub.s16 d2, d4, d5 ;a - b
- vsub.s16 d3, d7, d6 ;d - c
+ vadd.s16 q0, q2, q3 ; a+b d+c
+ vsub.s16 q1, q2, q3 ; a-b d-c
vtrn.32 d0, d2 ;d0: 0 1 8 9
;d2: 2 3 10 11
@@ -47,29 +44,22 @@
; second for loop
vadd.s16 d4, d0, d3 ;a = [0] + [3]
- vadd.s16 d5, d1, d2 ;b = [1] + [2]
- vsub.s16 d6, d1, d2 ;c = [1] - [2]
- vsub.s16 d7, d0, d3 ;d = [0] - [3]
+ vadd.s16 d6, d1, d2 ;b = [1] + [2]
+ vsub.s16 d5, d0, d3 ;d = [0] - [3]
+ vsub.s16 d7, d1, d2 ;c = [1] - [2]
+
+ vmov.i16 q8, #3
- vadd.s16 d0, d4, d5 ;e = a + b
- vadd.s16 d1, d6, d7 ;f = c + d
- vsub.s16 d2, d4, d5 ;g = a - b
- vsub.s16 d3, d7, d6 ;h = d - c
+ vadd.s16 q0, q2, q3 ; a+b d+c
+ vsub.s16 q1, q2, q3 ; a-b d-c
- vmov.i16 q2, #3
- vadd.i16 q0, q0, q2 ;e/f += 3
- vadd.i16 q1, q1, q2 ;g/h += 3
+ vadd.i16 q0, q0, q8 ;e/f += 3
+ vadd.i16 q1, q1, q8 ;g/h += 3
vshr.s16 q0, q0, #3 ;e/f >> 3
vshr.s16 q1, q1, #3 ;g/h >> 3
- vtrn.32 d0, d2
- vtrn.32 d1, d3
- vtrn.16 d0, d1
- vtrn.16 d2, d3
-
- vstmia.16 r1!, {q0}
- vstmia.16 r1!, {q1}
+ vst4.i16 {d0,d1,d2,d3}, [r1@128]
bx lr
ENDP ; |vp8_short_inv_walsh4x4_neon|
@@ -77,19 +67,13 @@
;short vp8_short_inv_walsh4x4_1_neon(short *input, short *output)
|vp8_short_inv_walsh4x4_1_neon| PROC
- ; load a full line into a neon register
- vld1.16 {q0}, [r0]
- ; extract first element and replicate
- vdup.16 q1, d0[0]
- ; add 3 to all values
- vmov.i16 q2, #3
- vadd.i16 q3, q1, q2
- ; right shift
- vshr.s16 q3, q3, #3
- ; write it back
- vstmia.16 r1!, {q3}
- vstmia.16 r1!, {q3}
-
+ ldrsh r2, [r0] ; load input[0]
+ add r3, r2, #3 ; add 3
+ add r2, r1, #16 ; base for last 8 output
+ asr r0, r3, #3 ; right shift 3
+ vdup.16 q0, r0 ; load and duplicate
+ vst1.16 {q0}, [r1@128] ; write back 8
+ vst1.16 {q0}, [r2@128] ; write back last 8
bx lr
ENDP ; |vp8_short_inv_walsh4x4_1_neon|
diff --git a/vp8/common/generic/systemdependent.c b/vp8/common/generic/systemdependent.c
index d981f3496..133938097 100644
--- a/vp8/common/generic/systemdependent.c
+++ b/vp8/common/generic/systemdependent.c
@@ -113,7 +113,7 @@ void vp8_machine_specific_config(VP8_COMMON *ctx)
rtcd->loopfilter.simple_mb_h = vp8_loop_filter_mbhs_c;
rtcd->loopfilter.simple_b_h = vp8_loop_filter_bhs_c;
-#if CONFIG_POSTPROC || (CONFIG_VP8_ENCODER && CONFIG_PSNR)
+#if CONFIG_POSTPROC || (CONFIG_VP8_ENCODER && CONFIG_INTERNAL_STATS)
rtcd->postproc.down = vp8_mbpost_proc_down_c;
rtcd->postproc.across = vp8_mbpost_proc_across_ip_c;
rtcd->postproc.downacross = vp8_post_proc_down_and_across_c;
diff --git a/vp8/common/onyxc_int.h b/vp8/common/onyxc_int.h
index cf29d03df..862307ebb 100644
--- a/vp8/common/onyxc_int.h
+++ b/vp8/common/onyxc_int.h
@@ -139,6 +139,8 @@ typedef struct VP8Common
MODE_INFO *mip; /* Base of allocated array */
MODE_INFO *mi; /* Corresponds to upper left visible macroblock */
+ MODE_INFO *prev_mip; /* MODE_INFO array 'mip' from last decoded frame */
+ MODE_INFO *prev_mi; /* 'mi' from last frame (points into prev_mip) */
INTERPOLATIONFILTERTYPE mcomp_filter_type;
diff --git a/vp8/common/onyxd.h b/vp8/common/onyxd.h
index e53bc3138..140dc5728 100644
--- a/vp8/common/onyxd.h
+++ b/vp8/common/onyxd.h
@@ -31,6 +31,7 @@ extern "C"
int Version;
int postprocess;
int max_threads;
+ int error_concealment;
} VP8D_CONFIG;
typedef enum
{
diff --git a/vp8/common/x86/recon_sse2.asm b/vp8/common/x86/recon_sse2.asm
index 86c421a9a..00b74387c 100644
--- a/vp8/common/x86/recon_sse2.asm
+++ b/vp8/common/x86/recon_sse2.asm
@@ -584,23 +584,35 @@ sym(vp8_intra_pred_uv_ve_mmx):
; unsigned char *src,
; int src_stride,
; )
-global sym(vp8_intra_pred_uv_ho_mmx2)
-sym(vp8_intra_pred_uv_ho_mmx2):
+%macro vp8_intra_pred_uv_ho 1
+global sym(vp8_intra_pred_uv_ho_%1)
+sym(vp8_intra_pred_uv_ho_%1):
push rbp
mov rbp, rsp
SHADOW_ARGS_TO_STACK 4
push rsi
push rdi
+%ifidn %1, ssse3
+ push rbx
+%endif
; end prolog
; read from left and write out
+%ifidn %1, mmx2
mov edx, 4
+%endif
mov rsi, arg(2) ;src;
movsxd rax, dword ptr arg(3) ;src_stride;
mov rdi, arg(0) ;dst;
movsxd rcx, dword ptr arg(1) ;dst_stride
+%ifidn %1, ssse3
+ lea rbx, [rax*3]
+ lea rdx, [rcx*3]
+ movdqa xmm2, [GLOBAL(dc_00001111)]
+%endif
dec rsi
-vp8_intra_pred_uv_ho_mmx2_loop:
+%ifidn %1, mmx2
+vp8_intra_pred_uv_ho_%1_loop:
movd mm0, [rsi]
movd mm1, [rsi+rax]
punpcklbw mm0, mm0
@@ -612,14 +624,49 @@ vp8_intra_pred_uv_ho_mmx2_loop:
lea rsi, [rsi+rax*2]
lea rdi, [rdi+rcx*2]
dec edx
- jnz vp8_intra_pred_uv_ho_mmx2_loop
+ jnz vp8_intra_pred_uv_ho_%1_loop
+%else
+ movd xmm0, [rsi]
+ movd xmm3, [rsi+rax]
+ movd xmm1, [rsi+rax*2]
+ movd xmm4, [rsi+rbx]
+ punpcklbw xmm0, xmm3
+ punpcklbw xmm1, xmm4
+ pshufb xmm0, xmm2
+ pshufb xmm1, xmm2
+ movq [rdi ], xmm0
+ movhps [rdi+rcx], xmm0
+ movq [rdi+rcx*2], xmm1
+ movhps [rdi+rdx], xmm1
+ lea rsi, [rsi+rax*4]
+ lea rdi, [rdi+rcx*4]
+ movd xmm0, [rsi]
+ movd xmm3, [rsi+rax]
+ movd xmm1, [rsi+rax*2]
+ movd xmm4, [rsi+rbx]
+ punpcklbw xmm0, xmm3
+ punpcklbw xmm1, xmm4
+ pshufb xmm0, xmm2
+ pshufb xmm1, xmm2
+ movq [rdi ], xmm0
+ movhps [rdi+rcx], xmm0
+ movq [rdi+rcx*2], xmm1
+ movhps [rdi+rdx], xmm1
+%endif
; begin epilog
+%ifidn %1, ssse3
+ pop rbx
+%endif
pop rdi
pop rsi
UNSHADOW_ARGS
pop rbp
ret
+%endmacro
+
+vp8_intra_pred_uv_ho mmx2
+vp8_intra_pred_uv_ho ssse3
SECTION_RODATA
dc_128:
@@ -629,3 +676,7 @@ dc_4:
align 16
dc_1024:
times 8 dw 0x400
+align 16
+dc_00001111:
+ times 8 db 0
+ times 8 db 1
diff --git a/vp8/common/x86/recon_wrapper_sse2.c b/vp8/common/x86/recon_wrapper_sse2.c
index 86b4da2c2..cb7b69c08 100644
--- a/vp8/common/x86/recon_wrapper_sse2.c
+++ b/vp8/common/x86/recon_wrapper_sse2.c
@@ -23,6 +23,7 @@ extern build_intra_predictors_mbuv_prototype(vp8_intra_pred_uv_dctop_mmx2);
extern build_intra_predictors_mbuv_prototype(vp8_intra_pred_uv_dcleft_mmx2);
extern build_intra_predictors_mbuv_prototype(vp8_intra_pred_uv_dc128_mmx);
extern build_intra_predictors_mbuv_prototype(vp8_intra_pred_uv_ho_mmx2);
+extern build_intra_predictors_mbuv_prototype(vp8_intra_pred_uv_ho_ssse3);
extern build_intra_predictors_mbuv_prototype(vp8_intra_pred_uv_ve_mmx);
extern build_intra_predictors_mbuv_prototype(vp8_intra_pred_uv_tm_sse2);
extern build_intra_predictors_mbuv_prototype(vp8_intra_pred_uv_tm_ssse3);
@@ -31,7 +32,8 @@ static void vp8_build_intra_predictors_mbuv_x86(MACROBLOCKD *x,
unsigned char *dst_u,
unsigned char *dst_v,
int dst_stride,
- build_intra_predictors_mbuv_fn_t tm_func)
+ build_intra_predictors_mbuv_fn_t tm_func,
+ build_intra_predictors_mbuv_fn_t ho_func)
{
int mode = x->mode_info_context->mbmi.uv_mode;
build_intra_predictors_mbuv_fn_t fn;
@@ -39,7 +41,7 @@ static void vp8_build_intra_predictors_mbuv_x86(MACROBLOCKD *x,
switch (mode) {
case V_PRED: fn = vp8_intra_pred_uv_ve_mmx; break;
- case H_PRED: fn = vp8_intra_pred_uv_ho_mmx2; break;
+ case H_PRED: fn = ho_func; break;
case TM_PRED: fn = tm_func; break;
case DC_PRED:
if (x->up_available) {
@@ -65,26 +67,30 @@ void vp8_build_intra_predictors_mbuv_sse2(MACROBLOCKD *x)
{
vp8_build_intra_predictors_mbuv_x86(x, &x->predictor[256],
&x->predictor[320], 8,
- vp8_intra_pred_uv_tm_sse2);
+ vp8_intra_pred_uv_tm_sse2,
+ vp8_intra_pred_uv_ho_mmx2);
}
void vp8_build_intra_predictors_mbuv_ssse3(MACROBLOCKD *x)
{
vp8_build_intra_predictors_mbuv_x86(x, &x->predictor[256],
&x->predictor[320], 8,
- vp8_intra_pred_uv_tm_ssse3);
+ vp8_intra_pred_uv_tm_ssse3,
+ vp8_intra_pred_uv_ho_ssse3);
}
void vp8_build_intra_predictors_mbuv_s_sse2(MACROBLOCKD *x)
{
vp8_build_intra_predictors_mbuv_x86(x, x->dst.u_buffer,
x->dst.v_buffer, x->dst.uv_stride,
- vp8_intra_pred_uv_tm_sse2);
+ vp8_intra_pred_uv_tm_sse2,
+ vp8_intra_pred_uv_ho_mmx2);
}
void vp8_build_intra_predictors_mbuv_s_ssse3(MACROBLOCKD *x)
{
vp8_build_intra_predictors_mbuv_x86(x, x->dst.u_buffer,
x->dst.v_buffer, x->dst.uv_stride,
- vp8_intra_pred_uv_tm_ssse3);
+ vp8_intra_pred_uv_tm_ssse3,
+ vp8_intra_pred_uv_ho_ssse3);
}
diff --git a/vp8/decoder/decodemv.c b/vp8/decoder/decodemv.c
index 4de28688b..cd67536bc 100644
--- a/vp8/decoder/decodemv.c
+++ b/vp8/decoder/decodemv.c
@@ -244,6 +244,12 @@ static void mb_mode_mv_init(VP8D_COMP *pbi)
vp8_reader *const bc = & pbi->bc;
MV_CONTEXT *const mvc = pbi->common.fc.mvc;
+#if CONFIG_ERROR_CONCEALMENT
+ /* Default is that no macroblock is corrupt, therefore we initialize
+ * mvs_corrupt_from_mb to something very big, which we can be sure is
+ * outside the frame. */
+ pbi->mvs_corrupt_from_mb = UINT_MAX;
+#endif
pbi->prob_skip_false = 0;
if (pbi->common.mb_no_coeff_skip)
pbi->prob_skip_false = (vp8_prob)vp8_read_literal(bc, 8);
@@ -280,6 +286,7 @@ static void mb_mode_mv_init(VP8D_COMP *pbi)
}
}
+
static void read_mb_modes_mv(VP8D_COMP *pbi, MODE_INFO *mi, MB_MODE_INFO *mbmi,
int mb_row, int mb_col)
{
@@ -403,7 +410,7 @@ static void read_mb_modes_mv(VP8D_COMP *pbi, MODE_INFO *mi, MB_MODE_INFO *mbmi,
do {
mi->bmi[ *fill_offset] = bmi;
- fill_offset++;
+ fill_offset++;
}while (--fill_count);
}
@@ -524,12 +531,26 @@ void vp8_decode_mode_mvs(VP8D_COMP *pbi)
while (++mb_col < pbi->common.mb_cols)
{
+ int mb_num = mb_row * pbi->common.mb_cols + mb_col;
/*read_mb_modes_mv(pbi, xd->mode_info_context, &xd->mode_info_context->mbmi, mb_row, mb_col);*/
if(pbi->common.frame_type == KEY_FRAME)
vp8_kfread_modes(pbi, mi, mb_row, mb_col);
else
read_mb_modes_mv(pbi, mi, &mi->mbmi, mb_row, mb_col);
+#if CONFIG_ERROR_CONCEALMENT
+ /* look for corruption. set mvs_corrupt_from_mb to the current
+ * mb_num if the frame is corrupt from this macroblock. */
+ if (vp8dx_bool_error(&pbi->bc) && mb_num < pbi->mvs_corrupt_from_mb)
+ {
+ pbi->mvs_corrupt_from_mb = mb_num;
+ /* no need to continue since the partition is corrupt from
+ * here on.
+ */
+ return;
+ }
+#endif
+
mi++; /* next macroblock */
}
diff --git a/vp8/decoder/decodframe.c b/vp8/decoder/decodframe.c
index a585f774c..f8e04a7d0 100644
--- a/vp8/decoder/decodframe.c
+++ b/vp8/decoder/decodframe.c
@@ -27,6 +27,9 @@
#include "decodemv.h"
#include "vp8/common/extend.h"
+#if CONFIG_ERROR_CONCEALMENT
+#include "error_concealment.h"
+#endif
#include "vpx_mem/vpx_mem.h"
#include "vp8/common/idct.h"
#include "dequantize.h"
@@ -176,7 +179,8 @@ void clamp_mvs(MACROBLOCKD *xd)
}
-static void decode_macroblock(VP8D_COMP *pbi, MACROBLOCKD *xd)
+static void decode_macroblock(VP8D_COMP *pbi, MACROBLOCKD *xd,
+ unsigned int mb_idx)
{
int eobtotal = 0;
int i, do_clamp = xd->mode_info_context->mbmi.need_to_clamp_mvs;
@@ -233,6 +237,19 @@ static void decode_macroblock(VP8D_COMP *pbi, MACROBLOCKD *xd)
vp8_build_inter_predictors_mb(xd);
}
+#if CONFIG_ERROR_CONCEALMENT
+ if (pbi->ec_enabled &&
+ (mb_idx > pbi->mvs_corrupt_from_mb ||
+ vp8dx_bool_error(xd->current_bc)))
+ {
+ /* MB with corrupt residuals or corrupt mode/motion vectors.
+ * Better to use the predictor as reconstruction.
+ */
+ vp8_conceal_corrupt_mb(xd);
+ return;
+ }
+#endif
+
/* dequantization and idct */
if (xd->mode_info_context->mbmi.mode != B_PRED && xd->mode_info_context->mbmi.mode != SPLITMV)
{
@@ -355,6 +372,32 @@ decode_mb_row(VP8D_COMP *pbi, VP8_COMMON *pc, int mb_row, MACROBLOCKD *xd)
for (mb_col = 0; mb_col < pc->mb_cols; mb_col++)
{
+ /* Distance of Mb to the various image edges.
+ * These are specified to 8th pel as they are always compared to values
+ * that are in 1/8th pel units
+ */
+ xd->mb_to_left_edge = -((mb_col * 16) << 3);
+ xd->mb_to_right_edge = ((pc->mb_cols - 1 - mb_col) * 16) << 3;
+
+#if CONFIG_ERROR_CONCEALMENT
+ if (pbi->ec_enabled &&
+ xd->mode_info_context->mbmi.ref_frame == INTRA_FRAME &&
+ vp8dx_bool_error(xd->current_bc))
+ {
+ /* We have an intra block with corrupt coefficients, better to
+ * conceal with an inter block. Interpolate MVs from neighboring MBs
+ *
+ * Note that for the first mb with corrupt residual in a frame,
+ * we might not discover that before decoding the residual. That
+ * happens after this check, and therefore no inter concealment will
+ * be done.
+ */
+ vp8_interpolate_motion(xd,
+ mb_row, mb_col,
+ pc->mb_rows, pc->mb_cols,
+ pc->mode_info_stride);
+ }
+#endif
if (xd->mode_info_context->mbmi.mode == SPLITMV || xd->mode_info_context->mbmi.mode == B_PRED)
{
@@ -365,12 +408,6 @@ decode_mb_row(VP8D_COMP *pbi, VP8_COMMON *pc, int mb_row, MACROBLOCKD *xd)
}
}
- /* Distance of Mb to the various image edges.
- * These are specified to 8th pel as they are always compared to values that are in 1/8th pel units
- */
- xd->mb_to_left_edge = -((mb_col * 16) << 3);
- xd->mb_to_right_edge = ((pc->mb_cols - 1 - mb_col) * 16) << 3;
-
xd->dst.y_buffer = pc->yv12_fb[dst_fb_idx].y_buffer + recon_yoffset;
xd->dst.u_buffer = pc->yv12_fb[dst_fb_idx].u_buffer + recon_uvoffset;
xd->dst.v_buffer = pc->yv12_fb[dst_fb_idx].v_buffer + recon_uvoffset;
@@ -403,7 +440,7 @@ decode_mb_row(VP8D_COMP *pbi, VP8_COMMON *pc, int mb_row, MACROBLOCKD *xd)
else
pbi->debugoutput =0;
*/
- decode_macroblock(pbi, xd);
+ decode_macroblock(pbi, xd, mb_row * pc->mb_cols + mb_col);
/* check if the boolean decoder has suffered an error */
xd->corrupted |= vp8dx_bool_error(xd->current_bc);
@@ -477,8 +514,8 @@ static void setup_token_decoder(VP8D_COMP *pbi,
partition_size = user_data_end - partition;
}
- if (partition + partition_size > user_data_end
- || partition + partition_size < partition)
+ if (!pbi->ec_enabled && (partition + partition_size > user_data_end
+ || partition + partition_size < partition))
vpx_internal_error(&pc->error, VPX_CODEC_CORRUPT_FRAME,
"Truncated packet or corrupt partition "
"%d length", i + 1);
@@ -593,63 +630,105 @@ int vp8_decode_frame(VP8D_COMP *pbi)
pc->yv12_fb[pc->new_fb_idx].corrupted = 0;
if (data_end - data < 3)
- vpx_internal_error(&pc->error, VPX_CODEC_CORRUPT_FRAME,
- "Truncated packet");
- pc->frame_type = (FRAME_TYPE)(data[0] & 1);
- pc->version = (data[0] >> 1) & 7;
- pc->show_frame = (data[0] >> 4) & 1;
- first_partition_length_in_bytes =
- (data[0] | (data[1] << 8) | (data[2] << 16)) >> 5;
- data += 3;
-
- if (data + first_partition_length_in_bytes > data_end
- || data + first_partition_length_in_bytes < data)
- vpx_internal_error(&pc->error, VPX_CODEC_CORRUPT_FRAME,
- "Truncated packet or corrupt partition 0 length");
- vp8_setup_version(pc);
-
- if (pc->frame_type == KEY_FRAME)
{
- const int Width = pc->Width;
- const int Height = pc->Height;
-
- /* vet via sync code */
- if (data[0] != 0x9d || data[1] != 0x01 || data[2] != 0x2a)
- vpx_internal_error(&pc->error, VPX_CODEC_UNSUP_BITSTREAM,
- "Invalid frame sync code");
-
- pc->Width = (data[3] | (data[4] << 8)) & 0x3fff;
- pc->horiz_scale = data[4] >> 6;
- pc->Height = (data[5] | (data[6] << 8)) & 0x3fff;
- pc->vert_scale = data[6] >> 6;
- data += 7;
-
- if (Width != pc->Width || Height != pc->Height)
+ if (pbi->ec_enabled)
{
- int prev_mb_rows = pc->mb_rows;
+ /* Declare the missing frame as an inter frame since it will
+ be handled as an inter frame when we have estimated its
+ motion vectors. */
+ pc->frame_type = INTER_FRAME;
+ pc->version = 0;
+ pc->show_frame = 1;
+ first_partition_length_in_bytes = 0;
+ }
+ else
+ {
+ vpx_internal_error(&pc->error, VPX_CODEC_CORRUPT_FRAME,
+ "Truncated packet");
+ }
+ }
+ else
+ {
+ pc->frame_type = (FRAME_TYPE)(data[0] & 1);
+ pc->version = (data[0] >> 1) & 7;
+ pc->show_frame = (data[0] >> 4) & 1;
+ first_partition_length_in_bytes =
+ (data[0] | (data[1] << 8) | (data[2] << 16)) >> 5;
+ data += 3;
+
+ if (!pbi->ec_enabled && (data + first_partition_length_in_bytes > data_end
+ || data + first_partition_length_in_bytes < data))
+ vpx_internal_error(&pc->error, VPX_CODEC_CORRUPT_FRAME,
+ "Truncated packet or corrupt partition 0 length");
+ vp8_setup_version(pc);
- if (pc->Width <= 0)
+ if (pc->frame_type == KEY_FRAME)
+ {
+ const int Width = pc->Width;
+ const int Height = pc->Height;
+
+ /* vet via sync code */
+ /* When error concealment is enabled we should only check the sync
+ * code if we have enough bits available
+ */
+ if (!pbi->ec_enabled || data + 3 < data_end)
{
- pc->Width = Width;
- vpx_internal_error(&pc->error, VPX_CODEC_CORRUPT_FRAME,
- "Invalid frame width");
+ if (data[0] != 0x9d || data[1] != 0x01 || data[2] != 0x2a)
+ vpx_internal_error(&pc->error, VPX_CODEC_UNSUP_BITSTREAM,
+ "Invalid frame sync code");
}
- if (pc->Height <= 0)
+ /* If error concealment is enabled we should only parse the new size
+ * if we have enough data. Otherwise we will end up with the wrong
+ * size.
+ */
+ if (!pbi->ec_enabled || data + 6 < data_end)
{
- pc->Height = Height;
- vpx_internal_error(&pc->error, VPX_CODEC_CORRUPT_FRAME,
- "Invalid frame height");
+ pc->Width = (data[3] | (data[4] << 8)) & 0x3fff;
+ pc->horiz_scale = data[4] >> 6;
+ pc->Height = (data[5] | (data[6] << 8)) & 0x3fff;
+ pc->vert_scale = data[6] >> 6;
}
+ data += 7;
+
+ if (Width != pc->Width || Height != pc->Height)
+ {
+ int prev_mb_rows = pc->mb_rows;
+
+ if (pc->Width <= 0)
+ {
+ pc->Width = Width;
+ vpx_internal_error(&pc->error, VPX_CODEC_CORRUPT_FRAME,
+ "Invalid frame width");
+ }
+
+ if (pc->Height <= 0)
+ {
+ pc->Height = Height;
+ vpx_internal_error(&pc->error, VPX_CODEC_CORRUPT_FRAME,
+ "Invalid frame height");
+ }
- if (vp8_alloc_frame_buffers(pc, pc->Width, pc->Height))
- vpx_internal_error(&pc->error, VPX_CODEC_MEM_ERROR,
- "Failed to allocate frame buffers");
+ if (vp8_alloc_frame_buffers(pc, pc->Width, pc->Height))
+ vpx_internal_error(&pc->error, VPX_CODEC_MEM_ERROR,
+ "Failed to allocate frame buffers");
+
+#if CONFIG_ERROR_CONCEALMENT
+ pbi->overlaps = NULL;
+ if (pbi->ec_enabled)
+ {
+ if (vp8_alloc_overlap_lists(pbi))
+ vpx_internal_error(&pc->error, VPX_CODEC_MEM_ERROR,
+ "Failed to allocate overlap lists "
+ "for error concealment");
+ }
+#endif
#if CONFIG_MULTITHREAD
- if (pbi->b_multithreaded_rd)
- vp8mt_alloc_temp_buffers(pbi, pc->Width, prev_mb_rows);
+ if (pbi->b_multithreaded_rd)
+ vp8mt_alloc_temp_buffers(pbi, pc->Width, prev_mb_rows);
#endif
+ }
}
}
@@ -792,7 +871,20 @@ int vp8_decode_frame(VP8D_COMP *pbi)
{
/* Should the GF or ARF be updated from the current frame */
pc->refresh_golden_frame = vp8_read_bit(bc);
+#if CONFIG_ERROR_CONCEALMENT
+ /* Assume we shouldn't refresh golden if the bit is missing */
+ xd->corrupted |= vp8dx_bool_error(bc);
+ if (pbi->ec_enabled && xd->corrupted)
+ pc->refresh_golden_frame = 0;
+#endif
+
pc->refresh_alt_ref_frame = vp8_read_bit(bc);
+#if CONFIG_ERROR_CONCEALMENT
+ /* Assume we shouldn't refresh altref if the bit is missing */
+ xd->corrupted |= vp8dx_bool_error(bc);
+ if (pbi->ec_enabled && xd->corrupted)
+ pc->refresh_alt_ref_frame = 0;
+#endif
/* Buffer to buffer copy flags. */
pc->copy_buffer_to_gf = 0;
@@ -817,6 +909,13 @@ int vp8_decode_frame(VP8D_COMP *pbi)
pc->refresh_last_frame = pc->frame_type == KEY_FRAME || vp8_read_bit(bc);
+#if CONFIG_ERROR_CONCEALMENT
+ /* Assume we should refresh the last frame if the bit is missing */
+ xd->corrupted |= vp8dx_bool_error(bc);
+ if (pbi->ec_enabled && xd->corrupted)
+ pc->refresh_last_frame = 1;
+#endif
+
if (0)
{
FILE *z = fopen("decodestats.stt", "a");
@@ -872,6 +971,16 @@ int vp8_decode_frame(VP8D_COMP *pbi)
vp8_decode_mode_mvs(pbi);
+#if CONFIG_ERROR_CONCEALMENT
+ if (pbi->ec_enabled &&
+ pbi->mvs_corrupt_from_mb < (unsigned int)pc->mb_cols * pc->mb_rows)
+ {
+ /* Motion vectors are missing in this frame. We will try to estimate
+ * them and then continue decoding the frame as usual */
+ vp8_estimate_missing_mvs(pbi);
+ }
+#endif
+
vpx_memset(pc->above_context, 0, sizeof(ENTROPY_CONTEXT_PLANES) * pc->mb_cols);
vpx_memcpy(&xd->block[0].bmi, &xd->mode_info_context->bmi[0], sizeof(B_MODE_INFO));
diff --git a/vp8/decoder/ec_types.h b/vp8/decoder/ec_types.h
new file mode 100644
index 000000000..a4f8c78b3
--- /dev/null
+++ b/vp8/decoder/ec_types.h
@@ -0,0 +1,49 @@
+/*
+ * Copyright (c) 2011 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef VP8_DEC_EC_TYPES_H
+#define VP8_DEC_EC_TYPES_H
+
+#define MAX_OVERLAPS 16
+
+/* The area (pixel area in Q6) the block pointed to by bmi overlaps
+ * another block with.
+ */
+typedef struct
+{
+ int overlap;
+ B_MODE_INFO *bmi;
+} OVERLAP_NODE;
+
+/* Structure to keep track of overlapping blocks on a block level. */
+typedef struct
+{
+ /* TODO(holmer): This array should be exchanged for a linked list */
+ OVERLAP_NODE overlaps[MAX_OVERLAPS];
+} B_OVERLAP;
+
+/* Structure used to hold all the overlaps of a macroblock. The overlaps of a
+ * macroblock is further divided into block overlaps.
+ */
+typedef struct
+{
+ B_OVERLAP overlaps[16];
+} MB_OVERLAP;
+
+/* Structure for keeping track of motion vectors and which reference frame they
+ * refer to. Used for motion vector interpolation.
+ */
+typedef struct
+{
+ MV mv;
+ MV_REFERENCE_FRAME ref_frame;
+} EC_BLOCK;
+
+#endif /* VP8_DEC_EC_TYPES_H */
diff --git a/vp8/decoder/error_concealment.c b/vp8/decoder/error_concealment.c
new file mode 100644
index 000000000..dcb5c86a5
--- /dev/null
+++ b/vp8/decoder/error_concealment.c
@@ -0,0 +1,613 @@
+/*
+ * Copyright (c) 2011 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "error_concealment.h"
+#include "onyxd_int.h"
+#include "decodemv.h"
+#include "vpx_mem/vpx_mem.h"
+#include "vp8/common/recon.h"
+#include "vp8/common/findnearmv.h"
+
+#include <assert.h>
+
+#define MIN(x,y) (((x)<(y))?(x):(y))
+#define MAX(x,y) (((x)>(y))?(x):(y))
+
+#define FLOOR(x,q) ((x) & -(1 << (q)))
+
+#define NUM_NEIGHBORS 20
+
+typedef struct ec_position
+{
+ int row;
+ int col;
+} EC_POS;
+
+/*
+ * Regenerate the table in Matlab with:
+ * x = meshgrid((1:4), (1:4));
+ * y = meshgrid((1:4), (1:4))';
+ * W = round((1./(sqrt(x.^2 + y.^2))*2^7));
+ * W(1,1) = 0;
+ */
+static const int weights_q7[5][5] = {
+ { 0, 128, 64, 43, 32 },
+ {128, 91, 57, 40, 31 },
+ { 64, 57, 45, 36, 29 },
+ { 43, 40, 36, 30, 26 },
+ { 32, 31, 29, 26, 23 }
+};
+
+int vp8_alloc_overlap_lists(VP8D_COMP *pbi)
+{
+ if (pbi->overlaps != NULL)
+ {
+ vpx_free(pbi->overlaps);
+ pbi->overlaps = NULL;
+ }
+ pbi->overlaps = vpx_calloc(pbi->common.mb_rows * pbi->common.mb_cols,
+ sizeof(MB_OVERLAP));
+ if (pbi->overlaps == NULL)
+ return -1;
+ vpx_memset(pbi->overlaps, 0,
+ sizeof(MB_OVERLAP) * pbi->common.mb_rows * pbi->common.mb_cols);
+ return 0;
+}
+
+void vp8_de_alloc_overlap_lists(VP8D_COMP *pbi)
+{
+ vpx_free(pbi->overlaps);
+ pbi->overlaps = NULL;
+}
+
+/* Inserts a new overlap area value to the list of overlaps of a block */
+static void assign_overlap(OVERLAP_NODE* overlaps,
+ B_MODE_INFO *bmi,
+ int overlap)
+{
+ int i;
+ if (overlap <= 0)
+ return;
+ /* Find and assign to the next empty overlap node in the list of overlaps.
+ * Empty is defined as bmi == NULL */
+ for (i = 0; i < MAX_OVERLAPS; i++)
+ {
+ if (overlaps[i].bmi == NULL)
+ {
+ overlaps[i].bmi = bmi;
+ overlaps[i].overlap = overlap;
+ break;
+ }
+ }
+}
+
+/* Calculates the overlap area between two 4x4 squares, where the first
+ * square has its upper-left corner at (b1_row, b1_col) and the second
+ * square has its upper-left corner at (b2_row, b2_col). Doesn't
+ * properly handle squares which do not overlap.
+ */
+static int block_overlap(int b1_row, int b1_col, int b2_row, int b2_col)
+{
+ const int int_top = MAX(b1_row, b2_row); // top
+ const int int_left = MAX(b1_col, b2_col); // left
+ /* Since each block is 4x4 pixels, adding 4 (Q3) to the left/top edge
+ * gives us the right/bottom edge.
+ */
+ const int int_right = MIN(b1_col + (4<<3), b2_col + (4<<3)); // right
+ const int int_bottom = MIN(b1_row + (4<<3), b2_row + (4<<3)); // bottom
+ return (int_bottom - int_top) * (int_right - int_left);
+}
+
+/* Calculates the overlap area for all blocks in a macroblock at position
+ * (mb_row, mb_col) in macroblocks, which are being overlapped by a given
+ * overlapping block at position (new_row, new_col) (in pixels, Q3). The
+ * first block being overlapped in the macroblock has position (first_blk_row,
+ * first_blk_col) in blocks relative the upper-left corner of the image.
+ */
+static void calculate_overlaps_mb(B_OVERLAP *b_overlaps, B_MODE_INFO *bmi,
+ int new_row, int new_col,
+ int mb_row, int mb_col,
+ int first_blk_row, int first_blk_col)
+{
+ /* Find the blocks within this MB (defined by mb_row, mb_col) which are
+ * overlapped by bmi and calculate and assign overlap for each of those
+ * blocks. */
+
+ /* Block coordinates relative the upper-left block */
+ const int rel_ol_blk_row = first_blk_row - mb_row * 4;
+ const int rel_ol_blk_col = first_blk_col - mb_col * 4;
+ /* If the block partly overlaps any previous MB, these coordinates
+ * can be < 0. We don't want to access blocks in previous MBs.
+ */
+ const int blk_idx = MAX(rel_ol_blk_row,0) * 4 + MAX(rel_ol_blk_col,0);
+ /* Upper left overlapping block */
+ B_OVERLAP *b_ol_ul = &(b_overlaps[blk_idx]);
+
+ /* Calculate and assign overlaps for all blocks in this MB
+ * which the motion compensated block overlaps
+ */
+ /* Avoid calculating overlaps for blocks in later MBs */
+ int end_row = MIN(4 + mb_row * 4 - first_blk_row, 2);
+ int end_col = MIN(4 + mb_col * 4 - first_blk_col, 2);
+ int row, col;
+
+ /* Check if new_row and new_col are evenly divisible by 4 (Q3),
+ * and if so we shouldn't check neighboring blocks
+ */
+ if (new_row >= 0 && (new_row & 0x1F) == 0)
+ end_row = 1;
+ if (new_col >= 0 && (new_col & 0x1F) == 0)
+ end_col = 1;
+
+ /* Check if the overlapping block partly overlaps a previous MB
+ * and if so, we're overlapping fewer blocks in this MB.
+ */
+ if (new_row < (mb_row*16)<<3)
+ end_row = 1;
+ if (new_col < (mb_col*16)<<3)
+ end_col = 1;
+
+ for (row = 0; row < end_row; ++row)
+ {
+ for (col = 0; col < end_col; ++col)
+ {
+ /* input in Q3, result in Q6 */
+ const int overlap = block_overlap(new_row, new_col,
+ (((first_blk_row + row) *
+ 4) << 3),
+ (((first_blk_col + col) *
+ 4) << 3));
+ assign_overlap(b_ol_ul[row * 4 + col].overlaps, bmi, overlap);
+ }
+ }
+}
+
+void vp8_calculate_overlaps(MB_OVERLAP *overlap_ul,
+ int mb_rows, int mb_cols,
+ B_MODE_INFO *bmi,
+ int b_row, int b_col)
+{
+ MB_OVERLAP *mb_overlap;
+ int row, col, rel_row, rel_col;
+ int new_row, new_col;
+ int end_row, end_col;
+ int overlap_b_row, overlap_b_col;
+ int overlap_mb_row, overlap_mb_col;
+
+ /* mb subpixel position */
+ row = (4 * b_row) << 3; /* Q3 */
+ col = (4 * b_col) << 3; /* Q3 */
+
+ /* reverse compensate for motion */
+ new_row = row - bmi->mv.as_mv.row;
+ new_col = col - bmi->mv.as_mv.col;
+
+ if (new_row >= ((16*mb_rows) << 3) || new_col >= ((16*mb_cols) << 3))
+ {
+ /* the new block ended up outside the frame */
+ return;
+ }
+
+ if (new_row <= (-4 << 3) || new_col <= (-4 << 3))
+ {
+ /* outside the frame */
+ return;
+ }
+ /* overlapping block's position in blocks */
+ overlap_b_row = FLOOR(new_row / 4, 3) >> 3;
+ overlap_b_col = FLOOR(new_col / 4, 3) >> 3;
+
+ /* overlapping block's MB position in MBs
+ * operations are done in Q3
+ */
+ overlap_mb_row = FLOOR((overlap_b_row << 3) / 4, 3) >> 3;
+ overlap_mb_col = FLOOR((overlap_b_col << 3) / 4, 3) >> 3;
+
+ end_row = MIN(mb_rows - overlap_mb_row, 2);
+ end_col = MIN(mb_cols - overlap_mb_col, 2);
+
+ /* Don't calculate overlap for MBs we don't overlap */
+ /* Check if the new block row starts at the last block row of the MB */
+ if (abs(new_row - ((16*overlap_mb_row) << 3)) < ((3*4) << 3))
+ end_row = 1;
+ /* Check if the new block col starts at the last block col of the MB */
+ if (abs(new_col - ((16*overlap_mb_col) << 3)) < ((3*4) << 3))
+ end_col = 1;
+
+ /* find the MB(s) this block is overlapping */
+ for (rel_row = 0; rel_row < end_row; ++rel_row)
+ {
+ for (rel_col = 0; rel_col < end_col; ++rel_col)
+ {
+ if (overlap_mb_row + rel_row < 0 ||
+ overlap_mb_col + rel_col < 0)
+ continue;
+ mb_overlap = overlap_ul + (overlap_mb_row + rel_row) * mb_cols +
+ overlap_mb_col + rel_col;
+
+ calculate_overlaps_mb(mb_overlap->overlaps, bmi,
+ new_row, new_col,
+ overlap_mb_row + rel_row,
+ overlap_mb_col + rel_col,
+ overlap_b_row + rel_row,
+ overlap_b_col + rel_col);
+ }
+ }
+}
+
+/* Estimates a motion vector given the overlapping blocks' motion vectors.
+ * Filters out all overlapping blocks which do not refer to the correct
+ * reference frame type.
+ */
+static void estimate_mv(const OVERLAP_NODE *overlaps, B_MODE_INFO *bmi)
+{
+ int i;
+ int overlap_sum = 0;
+ int row_acc = 0;
+ int col_acc = 0;
+
+ bmi->mv.as_int = 0;
+ for (i=0; i < MAX_OVERLAPS; ++i)
+ {
+ if (overlaps[i].bmi == NULL)
+ break;
+ col_acc += overlaps[i].overlap * overlaps[i].bmi->mv.as_mv.col;
+ row_acc += overlaps[i].overlap * overlaps[i].bmi->mv.as_mv.row;
+ overlap_sum += overlaps[i].overlap;
+ }
+ if (overlap_sum > 0)
+ {
+ /* Q9 / Q6 = Q3 */
+ bmi->mv.as_mv.col = col_acc / overlap_sum;
+ bmi->mv.as_mv.row = row_acc / overlap_sum;
+ bmi->mode = NEW4X4;
+ }
+ else
+ {
+ bmi->mv.as_mv.col = 0;
+ bmi->mv.as_mv.row = 0;
+ bmi->mode = NEW4X4;
+ }
+}
+
+/* Estimates all motion vectors for a macroblock given the lists of
+ * overlaps for each block. Decides whether or not the MVs must be clamped.
+ */
+static void estimate_mb_mvs(const B_OVERLAP *block_overlaps,
+ MODE_INFO *mi,
+ int mb_to_left_edge,
+ int mb_to_right_edge,
+ int mb_to_top_edge,
+ int mb_to_bottom_edge)
+{
+ int i;
+ int non_zero_count = 0;
+ MV * const filtered_mv = &(mi->mbmi.mv.as_mv);
+ B_MODE_INFO * const bmi = mi->bmi;
+ filtered_mv->col = 0;
+ filtered_mv->row = 0;
+ for (i = 0; i < 16; ++i)
+ {
+ /* Estimate vectors for all blocks which are overlapped by this type */
+ /* Interpolate/extrapolate the rest of the block's MVs */
+ estimate_mv(block_overlaps[i].overlaps, &(bmi[i]));
+ mi->mbmi.need_to_clamp_mvs = vp8_check_mv_bounds(&bmi[i].mv,
+ mb_to_left_edge,
+ mb_to_right_edge,
+ mb_to_top_edge,
+ mb_to_bottom_edge);
+ if (bmi[i].mv.as_int != 0)
+ {
+ ++non_zero_count;
+ filtered_mv->col += bmi[i].mv.as_mv.col;
+ filtered_mv->row += bmi[i].mv.as_mv.row;
+ }
+ }
+ if (non_zero_count > 0)
+ {
+ filtered_mv->col /= non_zero_count;
+ filtered_mv->row /= non_zero_count;
+ }
+}
+
+static void calc_prev_mb_overlaps(MB_OVERLAP *overlaps, MODE_INFO *prev_mi,
+ int mb_row, int mb_col,
+ int mb_rows, int mb_cols)
+{
+ int sub_row;
+ int sub_col;
+ for (sub_row = 0; sub_row < 4; ++sub_row)
+ {
+ for (sub_col = 0; sub_col < 4; ++sub_col)
+ {
+ vp8_calculate_overlaps(
+ overlaps, mb_rows, mb_cols,
+ &(prev_mi->bmi[sub_row * 4 + sub_col]),
+ 4 * mb_row + sub_row,
+ 4 * mb_col + sub_col);
+ }
+ }
+}
+
+/* Estimate all missing motion vectors. This function does the same as the one
+ * above, but has different input arguments. */
+static void estimate_missing_mvs(MB_OVERLAP *overlaps,
+ MODE_INFO *mi, MODE_INFO *prev_mi,
+ int mb_rows, int mb_cols,
+ unsigned int first_corrupt)
+{
+ int mb_row, mb_col;
+ vpx_memset(overlaps, 0, sizeof(MB_OVERLAP) * mb_rows * mb_cols);
+ /* First calculate the overlaps for all blocks */
+ for (mb_row = 0; mb_row < mb_rows; ++mb_row)
+ {
+ for (mb_col = 0; mb_col < mb_cols; ++mb_col)
+ {
+ /* We're only able to use blocks referring to the last frame
+ * when extrapolating new vectors.
+ */
+ if (prev_mi->mbmi.ref_frame == LAST_FRAME)
+ {
+ calc_prev_mb_overlaps(overlaps, prev_mi,
+ mb_row, mb_col,
+ mb_rows, mb_cols);
+ }
+ ++prev_mi;
+ }
+ ++prev_mi;
+ }
+
+ mb_row = first_corrupt / mb_cols;
+ mb_col = first_corrupt - mb_row * mb_cols;
+ mi += mb_row*(mb_cols + 1) + mb_col;
+ /* Go through all macroblocks in the current image with missing MVs
+ * and calculate new MVs using the overlaps.
+ */
+ for (; mb_row < mb_rows; ++mb_row)
+ {
+ int mb_to_top_edge = -((mb_row * 16)) << 3;
+ int mb_to_bottom_edge = ((mb_rows - 1 - mb_row) * 16) << 3;
+ for (; mb_col < mb_cols; ++mb_col)
+ {
+ int mb_to_left_edge = -((mb_col * 16) << 3);
+ int mb_to_right_edge = ((mb_cols - 1 - mb_col) * 16) << 3;
+ const B_OVERLAP *block_overlaps =
+ overlaps[mb_row*mb_cols + mb_col].overlaps;
+ mi->mbmi.ref_frame = LAST_FRAME;
+ mi->mbmi.mode = SPLITMV;
+ mi->mbmi.uv_mode = DC_PRED;
+ mi->mbmi.partitioning = 3;
+ estimate_mb_mvs(block_overlaps,
+ mi,
+ mb_to_left_edge,
+ mb_to_right_edge,
+ mb_to_top_edge,
+ mb_to_bottom_edge);
+ ++mi;
+ }
+ mb_col = 0;
+ ++mi;
+ }
+}
+
+void vp8_estimate_missing_mvs(VP8D_COMP *pbi)
+{
+ VP8_COMMON * const pc = &pbi->common;
+ estimate_missing_mvs(pbi->overlaps,
+ pc->mi, pc->prev_mi,
+ pc->mb_rows, pc->mb_cols,
+ pbi->mvs_corrupt_from_mb);
+}
+
+static void assign_neighbor(EC_BLOCK *neighbor, MODE_INFO *mi, int block_idx)
+{
+ assert(mi->mbmi.ref_frame < MAX_REF_FRAMES);
+ neighbor->ref_frame = mi->mbmi.ref_frame;
+ neighbor->mv = mi->bmi[block_idx].mv.as_mv;
+}
+
+/* Finds the neighboring blocks of a macroblocks. In the general case
+ * 20 blocks are found. If a fewer number of blocks are found due to
+ * image boundaries, those positions in the EC_BLOCK array are left "empty".
+ * The neighbors are enumerated with the upper-left neighbor as the first
+ * element, the second element refers to the neighbor to right of the previous
+ * neighbor, and so on. The last element refers to the neighbor below the first
+ * neighbor.
+ */
+static void find_neighboring_blocks(MODE_INFO *mi,
+ EC_BLOCK *neighbors,
+ int mb_row, int mb_col,
+ int mb_rows, int mb_cols,
+ int mi_stride)
+{
+ int i = 0;
+ int j;
+ if (mb_row > 0)
+ {
+ /* upper left */
+ if (mb_col > 0)
+ assign_neighbor(&neighbors[i], mi - mi_stride - 1, 15);
+ ++i;
+ /* above */
+ for (j = 12; j < 16; ++j, ++i)
+ assign_neighbor(&neighbors[i], mi - mi_stride, j);
+ }
+ else
+ i += 5;
+ if (mb_col < mb_cols - 1)
+ {
+ /* upper right */
+ if (mb_row > 0)
+ assign_neighbor(&neighbors[i], mi - mi_stride + 1, 12);
+ ++i;
+ /* right */
+ for (j = 0; j <= 12; j += 4, ++i)
+ assign_neighbor(&neighbors[i], mi + 1, j);
+ }
+ else
+ i += 5;
+ if (mb_row < mb_rows - 1)
+ {
+ /* lower right */
+ if (mb_col < mb_cols - 1)
+ assign_neighbor(&neighbors[i], mi + mi_stride + 1, 0);
+ ++i;
+ /* below */
+ for (j = 0; j < 4; ++j, ++i)
+ assign_neighbor(&neighbors[i], mi + mi_stride, j);
+ }
+ else
+ i += 5;
+ if (mb_col > 0)
+ {
+ /* lower left */
+ if (mb_row < mb_rows - 1)
+ assign_neighbor(&neighbors[i], mi + mi_stride - 1, 4);
+ ++i;
+ /* left */
+ for (j = 3; j < 16; j += 4, ++i)
+ {
+ assign_neighbor(&neighbors[i], mi - 1, j);
+ }
+ }
+ else
+ i += 5;
+ assert(i == 20);
+}
+
+/* Calculates which reference frame type is dominating among the neighbors */
+static MV_REFERENCE_FRAME dominant_ref_frame(EC_BLOCK *neighbors)
+{
+ /* Default to referring to "skip" */
+ MV_REFERENCE_FRAME dom_ref_frame = LAST_FRAME;
+ int max_ref_frame_cnt = 0;
+ int ref_frame_cnt[MAX_REF_FRAMES] = {0};
+ int i;
+ /* Count neighboring reference frames */
+ for (i = 0; i < NUM_NEIGHBORS; ++i)
+ {
+ if (neighbors[i].ref_frame < MAX_REF_FRAMES &&
+ neighbors[i].ref_frame != INTRA_FRAME)
+ ++ref_frame_cnt[neighbors[i].ref_frame];
+ }
+ /* Find maximum */
+ for (i = 0; i < MAX_REF_FRAMES; ++i)
+ {
+ if (ref_frame_cnt[i] > max_ref_frame_cnt)
+ {
+ dom_ref_frame = i;
+ max_ref_frame_cnt = ref_frame_cnt[i];
+ }
+ }
+ return dom_ref_frame;
+}
+
+/* Interpolates all motion vectors for a macroblock from the neighboring blocks'
+ * motion vectors.
+ */
+static void interpolate_mvs(MACROBLOCKD *mb,
+ EC_BLOCK *neighbors,
+ MV_REFERENCE_FRAME dom_ref_frame)
+{
+ int row, col, i;
+ MODE_INFO * const mi = mb->mode_info_context;
+ /* Table with the position of the neighboring blocks relative the position
+ * of the upper left block of the current MB. Starting with the upper left
+ * neighbor and going to the right.
+ */
+ const EC_POS neigh_pos[NUM_NEIGHBORS] = {
+ {-1,-1}, {-1,0}, {-1,1}, {-1,2}, {-1,3},
+ {-1,4}, {0,4}, {1,4}, {2,4}, {3,4},
+ {4,4}, {4,3}, {4,2}, {4,1}, {4,0},
+ {4,-1}, {3,-1}, {2,-1}, {1,-1}, {0,-1}
+ };
+ for (row = 0; row < 4; ++row)
+ {
+ for (col = 0; col < 4; ++col)
+ {
+ int w_sum = 0;
+ int mv_row_sum = 0;
+ int mv_col_sum = 0;
+ int_mv * const mv = &(mi->bmi[row*4 + col].mv);
+ for (i = 0; i < NUM_NEIGHBORS; ++i)
+ {
+ /* Calculate the weighted sum of neighboring MVs referring
+ * to the dominant frame type.
+ */
+ const int w = weights_q7[abs(row - neigh_pos[i].row)]
+ [abs(col - neigh_pos[i].col)];
+ if (neighbors[i].ref_frame != dom_ref_frame)
+ continue;
+ w_sum += w;
+ /* Q7 * Q3 = Q10 */
+ mv_row_sum += w*neighbors[i].mv.row;
+ mv_col_sum += w*neighbors[i].mv.col;
+ }
+ if (w_sum > 0)
+ {
+ /* Avoid division by zero.
+ * Normalize with the sum of the coefficients
+ * Q3 = Q10 / Q7
+ */
+ mv->as_mv.row = mv_row_sum / w_sum;
+ mv->as_mv.col = mv_col_sum / w_sum;
+ mi->bmi[row*4 + col].mode = NEW4X4;
+ mi->mbmi.need_to_clamp_mvs = vp8_check_mv_bounds(mv,
+ mb->mb_to_left_edge,
+ mb->mb_to_right_edge,
+ mb->mb_to_top_edge,
+ mb->mb_to_bottom_edge);
+ }
+ }
+ }
+}
+
+void vp8_interpolate_motion(MACROBLOCKD *mb,
+ int mb_row, int mb_col,
+ int mb_rows, int mb_cols,
+ int mi_stride)
+{
+ /* Find relevant neighboring blocks */
+ EC_BLOCK neighbors[NUM_NEIGHBORS];
+ MV_REFERENCE_FRAME dom_ref_frame;
+ int i;
+ /* Initialize the array. MAX_REF_FRAMES is interpreted as "doesn't exist" */
+ for (i = 0; i < NUM_NEIGHBORS; ++i)
+ {
+ neighbors[i].ref_frame = MAX_REF_FRAMES;
+ neighbors[i].mv.row = neighbors[i].mv.col = 0;
+ }
+ find_neighboring_blocks(mb->mode_info_context,
+ neighbors,
+ mb_row, mb_col,
+ mb_rows, mb_cols,
+ mb->mode_info_stride);
+ /* Determine the dominant block type */
+ dom_ref_frame = dominant_ref_frame(neighbors);
+ /* Interpolate MVs for the missing blocks
+ * from the dominating MVs */
+ interpolate_mvs(mb, neighbors, dom_ref_frame);
+
+ mb->mode_info_context->mbmi.ref_frame = dom_ref_frame;
+ mb->mode_info_context->mbmi.mode = SPLITMV;
+ mb->mode_info_context->mbmi.uv_mode = DC_PRED;
+ mb->mode_info_context->mbmi.partitioning = 3;
+}
+
+void vp8_conceal_corrupt_mb(MACROBLOCKD *xd)
+{
+ /* This macroblock has corrupt residual, use the motion compensated
+ image (predictor) for concealment */
+ vp8_recon_copy16x16(xd->predictor, 16, xd->dst.y_buffer, xd->dst.y_stride);
+ vp8_recon_copy8x8(xd->predictor + 256, 8,
+ xd->dst.u_buffer, xd->dst.uv_stride);
+ vp8_recon_copy8x8(xd->predictor + 320, 8,
+ xd->dst.v_buffer, xd->dst.uv_stride);
+}
diff --git a/vp8/decoder/error_concealment.h b/vp8/decoder/error_concealment.h
new file mode 100644
index 000000000..65ae9d9be
--- /dev/null
+++ b/vp8/decoder/error_concealment.h
@@ -0,0 +1,41 @@
+/*
+ * Copyright (c) 2011 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+
+#ifndef ERROR_CONCEALMENT_H
+#define ERROR_CONCEALMENT_H
+
+#include "onyxd_int.h"
+#include "ec_types.h"
+
+/* Allocate memory for the overlap lists */
+int vp8_alloc_overlap_lists(VP8D_COMP *pbi);
+
+/* Deallocate the overlap lists */
+void vp8_de_alloc_overlap_lists(VP8D_COMP *pbi);
+
+/* Estimate all missing motion vectors. */
+void vp8_estimate_missing_mvs(VP8D_COMP *pbi);
+
+/* Functions for spatial MV interpolation */
+
+/* Interpolates all motion vectors for a macroblock mb at position
+ * (mb_row, mb_col). */
+void vp8_interpolate_motion(MACROBLOCKD *mb,
+ int mb_row, int mb_col,
+ int mb_rows, int mb_cols,
+ int mi_stride);
+
+/* Conceal a macroblock with corrupt residual.
+ * Copies the prediction signal to the reconstructed image.
+ */
+void vp8_conceal_corrupt_mb(MACROBLOCKD *xd);
+
+#endif
diff --git a/vp8/decoder/onyxd_if.c b/vp8/decoder/onyxd_if.c
index 1e83ab542..705fb334f 100644
--- a/vp8/decoder/onyxd_if.c
+++ b/vp8/decoder/onyxd_if.c
@@ -30,6 +30,9 @@
#include "vp8/common/systemdependent.h"
#include "vpx_ports/vpx_timer.h"
#include "detokenize.h"
+#if CONFIG_ERROR_CONCEALMENT
+#include "error_concealment.h"
+#endif
#if ARCH_ARM
#include "vpx_ports/arm.h"
#endif
@@ -96,6 +99,13 @@ VP8D_PTR vp8dx_create_decompressor(VP8D_CONFIG *oxcf)
}
pbi->common.error.setjmp = 0;
+
+#if CONFIG_ERROR_CONCEALMENT
+ pbi->ec_enabled = oxcf->error_concealment;
+#else
+ pbi->ec_enabled = 0;
+#endif
+
return (VP8D_PTR) pbi;
}
@@ -112,6 +122,9 @@ void vp8dx_remove_decompressor(VP8D_PTR ptr)
vp8mt_de_alloc_temp_buffers(pbi, pbi->common.mb_rows);
vp8_decoder_remove_threads(pbi);
#endif
+#if CONFIG_ERROR_CONCEALMENT
+ vp8_de_alloc_overlap_lists(pbi);
+#endif
vp8_remove_common(&pbi->common);
vpx_free(pbi);
}
@@ -271,11 +284,17 @@ int vp8dx_receive_compressed_data(VP8D_PTR ptr, unsigned long size, const unsign
*/
cm->yv12_fb[cm->lst_fb_idx].corrupted = 1;
- /* Signal that we have no frame to show. */
- cm->show_frame = 0;
+ /* If error concealment is disabled we won't signal missing frames to
+ * the decoder.
+ */
+ if (!pbi->ec_enabled)
+ {
+ /* Signal that we have no frame to show. */
+ cm->show_frame = 0;
- /* Nothing more to do. */
- return 0;
+ /* Nothing more to do. */
+ return 0;
+ }
}
@@ -388,6 +407,28 @@ int vp8dx_receive_compressed_data(VP8D_PTR ptr, unsigned long size, const unsign
vp8_clear_system_state();
+#if CONFIG_ERROR_CONCEALMENT
+ /* swap the mode infos to storage for future error concealment */
+ if (pbi->ec_enabled && pbi->common.prev_mi)
+ {
+ const MODE_INFO* tmp = pbi->common.prev_mi;
+ int row, col;
+ pbi->common.prev_mi = pbi->common.mi;
+ pbi->common.mi = tmp;
+
+ /* Propagate the segment_ids to the next frame */
+ for (row = 0; row < pbi->common.mb_rows; ++row)
+ {
+ for (col = 0; col < pbi->common.mb_cols; ++col)
+ {
+ const int i = row*pbi->common.mode_info_stride + col;
+ pbi->common.mi[i].mbmi.segment_id =
+ pbi->common.prev_mi[i].mbmi.segment_id;
+ }
+ }
+ }
+#endif
+
/*vp8_print_modes_and_motion_vectors( cm->mi, cm->mb_rows,cm->mb_cols, cm->current_video_frame);*/
if (cm->show_frame)
diff --git a/vp8/decoder/onyxd_int.h b/vp8/decoder/onyxd_int.h
index 9b9175628..ea6de3dda 100644
--- a/vp8/decoder/onyxd_int.h
+++ b/vp8/decoder/onyxd_int.h
@@ -17,6 +17,9 @@
#include "vp8/common/onyxc_int.h"
#include "vp8/common/threading.h"
#include "dequantize.h"
+#if CONFIG_ERROR_CONCEALMENT
+#include "ec_types.h"
+#endif
typedef struct
{
@@ -128,6 +131,13 @@ typedef struct VP8Decompressor
vp8_prob prob_gf;
vp8_prob prob_skip_false;
+#if CONFIG_ERROR_CONCEALMENT
+ MB_OVERLAP *overlaps;
+ /* the mb num from which modes and mvs (first partition) are corrupt */
+ unsigned int mvs_corrupt_from_mb;
+#endif
+ int ec_enabled;
+
} VP8D_COMP;
int vp8_decode_frame(VP8D_COMP *cpi);
diff --git a/vp8/encoder/block.h b/vp8/encoder/block.h
index 3a4a30228..fbdc89e87 100644
--- a/vp8/encoder/block.h
+++ b/vp8/encoder/block.h
@@ -79,7 +79,6 @@ typedef struct
int errorperbit;
int sadperbit16;
int sadperbit4;
- int errthresh;
int rddiv;
int rdmult;
unsigned int * mb_activity_ptr;
diff --git a/vp8/encoder/encodeframe.c b/vp8/encoder/encodeframe.c
index 6e5282b92..984776fdb 100644
--- a/vp8/encoder/encodeframe.c
+++ b/vp8/encoder/encodeframe.c
@@ -60,370 +60,6 @@ unsigned int uv_modes[4] = {0, 0, 0, 0};
unsigned int b_modes[14] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
#endif
-static const int qrounding_factors[129] =
-{
- 48, 48, 48, 48, 48, 48, 48, 48,
- 48, 48, 48, 48, 48, 48, 48, 48,
- 48, 48, 48, 48, 48, 48, 48, 48,
- 48, 48, 48, 48, 48, 48, 48, 48,
- 48, 48, 48, 48, 48, 48, 48, 48,
- 48, 48, 48, 48, 48, 48, 48, 48,
- 48, 48, 48, 48, 48, 48, 48, 48,
- 48, 48, 48, 48, 48, 48, 48, 48,
- 48, 48, 48, 48, 48, 48, 48, 48,
- 48, 48, 48, 48, 48, 48, 48, 48,
- 48, 48, 48, 48, 48, 48, 48, 48,
- 48, 48, 48, 48, 48, 48, 48, 48,
- 48, 48, 48, 48, 48, 48, 48, 48,
- 48, 48, 48, 48, 48, 48, 48, 48,
- 48, 48, 48, 48, 48, 48, 48, 48,
- 48, 48, 48, 48, 48, 48, 48, 48,
- 48
-};
-
-static const int qzbin_factors[129] =
-{
- 84, 84, 84, 84, 84, 84, 84, 84,
- 84, 84, 84, 84, 84, 84, 84, 84,
- 84, 84, 84, 84, 84, 84, 84, 84,
- 84, 84, 84, 84, 84, 84, 84, 84,
- 84, 84, 84, 84, 84, 84, 84, 84,
- 84, 84, 84, 84, 84, 84, 84, 84,
- 80, 80, 80, 80, 80, 80, 80, 80,
- 80, 80, 80, 80, 80, 80, 80, 80,
- 80, 80, 80, 80, 80, 80, 80, 80,
- 80, 80, 80, 80, 80, 80, 80, 80,
- 80, 80, 80, 80, 80, 80, 80, 80,
- 80, 80, 80, 80, 80, 80, 80, 80,
- 80, 80, 80, 80, 80, 80, 80, 80,
- 80, 80, 80, 80, 80, 80, 80, 80,
- 80, 80, 80, 80, 80, 80, 80, 80,
- 80, 80, 80, 80, 80, 80, 80, 80,
- 80
-};
-
-static const int qrounding_factors_y2[129] =
-{
- 48, 48, 48, 48, 48, 48, 48, 48,
- 48, 48, 48, 48, 48, 48, 48, 48,
- 48, 48, 48, 48, 48, 48, 48, 48,
- 48, 48, 48, 48, 48, 48, 48, 48,
- 48, 48, 48, 48, 48, 48, 48, 48,
- 48, 48, 48, 48, 48, 48, 48, 48,
- 48, 48, 48, 48, 48, 48, 48, 48,
- 48, 48, 48, 48, 48, 48, 48, 48,
- 48, 48, 48, 48, 48, 48, 48, 48,
- 48, 48, 48, 48, 48, 48, 48, 48,
- 48, 48, 48, 48, 48, 48, 48, 48,
- 48, 48, 48, 48, 48, 48, 48, 48,
- 48, 48, 48, 48, 48, 48, 48, 48,
- 48, 48, 48, 48, 48, 48, 48, 48,
- 48, 48, 48, 48, 48, 48, 48, 48,
- 48, 48, 48, 48, 48, 48, 48, 48,
- 48
-};
-
-static const int qzbin_factors_y2[129] =
-{
- 84, 84, 84, 84, 84, 84, 84, 84,
- 84, 84, 84, 84, 84, 84, 84, 84,
- 84, 84, 84, 84, 84, 84, 84, 84,
- 84, 84, 84, 84, 84, 84, 84, 84,
- 84, 84, 84, 84, 84, 84, 84, 84,
- 84, 84, 84, 84, 84, 84, 84, 84,
- 80, 80, 80, 80, 80, 80, 80, 80,
- 80, 80, 80, 80, 80, 80, 80, 80,
- 80, 80, 80, 80, 80, 80, 80, 80,
- 80, 80, 80, 80, 80, 80, 80, 80,
- 80, 80, 80, 80, 80, 80, 80, 80,
- 80, 80, 80, 80, 80, 80, 80, 80,
- 80, 80, 80, 80, 80, 80, 80, 80,
- 80, 80, 80, 80, 80, 80, 80, 80,
- 80, 80, 80, 80, 80, 80, 80, 80,
- 80, 80, 80, 80, 80, 80, 80, 80,
- 80
-};
-
-#define EXACT_QUANT
-#ifdef EXACT_QUANT
-static void vp8cx_invert_quant(int improved_quant, short *quant,
- unsigned char *shift, short d)
-{
- if(improved_quant)
- {
- unsigned t;
- int l;
- t = d;
- for(l = 0; t > 1; l++)
- t>>=1;
- t = 1 + (1<<(16+l))/d;
- *quant = (short)(t - (1<<16));
- *shift = l;
- }
- else
- {
- *quant = (1 << 16) / d;
- *shift = 0;
- }
-}
-
-void vp8cx_init_quantizer(VP8_COMP *cpi)
-{
- int i;
- int quant_val;
- int Q;
-
- int zbin_boost[16] = {0, 0, 8, 10, 12, 14, 16, 20, 24, 28, 32, 36, 40, 44, 44, 44};
-
- for (Q = 0; Q < QINDEX_RANGE; Q++)
- {
- // dc values
- quant_val = vp8_dc_quant(Q, cpi->common.y1dc_delta_q);
- cpi->Y1quant_fast[Q][0] = (1 << 16) / quant_val;
- vp8cx_invert_quant(cpi->sf.improved_quant, cpi->Y1quant[Q] + 0,
- cpi->Y1quant_shift[Q] + 0, quant_val);
- cpi->Y1zbin[Q][0] = ((qzbin_factors[Q] * quant_val) + 64) >> 7;
- cpi->Y1round[Q][0] = (qrounding_factors[Q] * quant_val) >> 7;
- cpi->common.Y1dequant[Q][0] = quant_val;
- cpi->zrun_zbin_boost_y1[Q][0] = (quant_val * zbin_boost[0]) >> 7;
-
- quant_val = vp8_dc2quant(Q, cpi->common.y2dc_delta_q);
- cpi->Y2quant_fast[Q][0] = (1 << 16) / quant_val;
- vp8cx_invert_quant(cpi->sf.improved_quant, cpi->Y2quant[Q] + 0,
- cpi->Y2quant_shift[Q] + 0, quant_val);
- cpi->Y2zbin[Q][0] = ((qzbin_factors_y2[Q] * quant_val) + 64) >> 7;
- cpi->Y2round[Q][0] = (qrounding_factors_y2[Q] * quant_val) >> 7;
- cpi->common.Y2dequant[Q][0] = quant_val;
- cpi->zrun_zbin_boost_y2[Q][0] = (quant_val * zbin_boost[0]) >> 7;
-
- quant_val = vp8_dc_uv_quant(Q, cpi->common.uvdc_delta_q);
- cpi->UVquant_fast[Q][0] = (1 << 16) / quant_val;
- vp8cx_invert_quant(cpi->sf.improved_quant, cpi->UVquant[Q] + 0,
- cpi->UVquant_shift[Q] + 0, quant_val);
- cpi->UVzbin[Q][0] = ((qzbin_factors[Q] * quant_val) + 64) >> 7;;
- cpi->UVround[Q][0] = (qrounding_factors[Q] * quant_val) >> 7;
- cpi->common.UVdequant[Q][0] = quant_val;
- cpi->zrun_zbin_boost_uv[Q][0] = (quant_val * zbin_boost[0]) >> 7;
-
- // all the ac values = ;
- for (i = 1; i < 16; i++)
- {
- int rc = vp8_default_zig_zag1d[i];
-
- quant_val = vp8_ac_yquant(Q);
- cpi->Y1quant_fast[Q][rc] = (1 << 16) / quant_val;
- vp8cx_invert_quant(cpi->sf.improved_quant, cpi->Y1quant[Q] + rc,
- cpi->Y1quant_shift[Q] + rc, quant_val);
- cpi->Y1zbin[Q][rc] = ((qzbin_factors[Q] * quant_val) + 64) >> 7;
- cpi->Y1round[Q][rc] = (qrounding_factors[Q] * quant_val) >> 7;
- cpi->common.Y1dequant[Q][rc] = quant_val;
- cpi->zrun_zbin_boost_y1[Q][i] = (quant_val * zbin_boost[i]) >> 7;
-
- quant_val = vp8_ac2quant(Q, cpi->common.y2ac_delta_q);
- cpi->Y2quant_fast[Q][rc] = (1 << 16) / quant_val;
- vp8cx_invert_quant(cpi->sf.improved_quant, cpi->Y2quant[Q] + rc,
- cpi->Y2quant_shift[Q] + rc, quant_val);
- cpi->Y2zbin[Q][rc] = ((qzbin_factors_y2[Q] * quant_val) + 64) >> 7;
- cpi->Y2round[Q][rc] = (qrounding_factors_y2[Q] * quant_val) >> 7;
- cpi->common.Y2dequant[Q][rc] = quant_val;
- cpi->zrun_zbin_boost_y2[Q][i] = (quant_val * zbin_boost[i]) >> 7;
-
- quant_val = vp8_ac_uv_quant(Q, cpi->common.uvac_delta_q);
- cpi->UVquant_fast[Q][rc] = (1 << 16) / quant_val;
- vp8cx_invert_quant(cpi->sf.improved_quant, cpi->UVquant[Q] + rc,
- cpi->UVquant_shift[Q] + rc, quant_val);
- cpi->UVzbin[Q][rc] = ((qzbin_factors[Q] * quant_val) + 64) >> 7;
- cpi->UVround[Q][rc] = (qrounding_factors[Q] * quant_val) >> 7;
- cpi->common.UVdequant[Q][rc] = quant_val;
- cpi->zrun_zbin_boost_uv[Q][i] = (quant_val * zbin_boost[i]) >> 7;
- }
- }
-}
-#else
-void vp8cx_init_quantizer(VP8_COMP *cpi)
-{
- int i;
- int quant_val;
- int Q;
-
- int zbin_boost[16] = {0, 0, 8, 10, 12, 14, 16, 20, 24, 28, 32, 36, 40, 44, 44, 44};
-
- for (Q = 0; Q < QINDEX_RANGE; Q++)
- {
- // dc values
- quant_val = vp8_dc_quant(Q, cpi->common.y1dc_delta_q);
- cpi->Y1quant[Q][0] = (1 << 16) / quant_val;
- cpi->Y1zbin[Q][0] = ((qzbin_factors[Q] * quant_val) + 64) >> 7;
- cpi->Y1round[Q][0] = (qrounding_factors[Q] * quant_val) >> 7;
- cpi->common.Y1dequant[Q][0] = quant_val;
- cpi->zrun_zbin_boost_y1[Q][0] = (quant_val * zbin_boost[0]) >> 7;
-
- quant_val = vp8_dc2quant(Q, cpi->common.y2dc_delta_q);
- cpi->Y2quant[Q][0] = (1 << 16) / quant_val;
- cpi->Y2zbin[Q][0] = ((qzbin_factors_y2[Q] * quant_val) + 64) >> 7;
- cpi->Y2round[Q][0] = (qrounding_factors_y2[Q] * quant_val) >> 7;
- cpi->common.Y2dequant[Q][0] = quant_val;
- cpi->zrun_zbin_boost_y2[Q][0] = (quant_val * zbin_boost[0]) >> 7;
-
- quant_val = vp8_dc_uv_quant(Q, cpi->common.uvdc_delta_q);
- cpi->UVquant[Q][0] = (1 << 16) / quant_val;
- cpi->UVzbin[Q][0] = ((qzbin_factors[Q] * quant_val) + 64) >> 7;;
- cpi->UVround[Q][0] = (qrounding_factors[Q] * quant_val) >> 7;
- cpi->common.UVdequant[Q][0] = quant_val;
- cpi->zrun_zbin_boost_uv[Q][0] = (quant_val * zbin_boost[0]) >> 7;
-
- // all the ac values = ;
- for (i = 1; i < 16; i++)
- {
- int rc = vp8_default_zig_zag1d[i];
-
- quant_val = vp8_ac_yquant(Q);
- cpi->Y1quant[Q][rc] = (1 << 16) / quant_val;
- cpi->Y1zbin[Q][rc] = ((qzbin_factors[Q] * quant_val) + 64) >> 7;
- cpi->Y1round[Q][rc] = (qrounding_factors[Q] * quant_val) >> 7;
- cpi->common.Y1dequant[Q][rc] = quant_val;
- cpi->zrun_zbin_boost_y1[Q][i] = (quant_val * zbin_boost[i]) >> 7;
-
- quant_val = vp8_ac2quant(Q, cpi->common.y2ac_delta_q);
- cpi->Y2quant[Q][rc] = (1 << 16) / quant_val;
- cpi->Y2zbin[Q][rc] = ((qzbin_factors_y2[Q] * quant_val) + 64) >> 7;
- cpi->Y2round[Q][rc] = (qrounding_factors_y2[Q] * quant_val) >> 7;
- cpi->common.Y2dequant[Q][rc] = quant_val;
- cpi->zrun_zbin_boost_y2[Q][i] = (quant_val * zbin_boost[i]) >> 7;
-
- quant_val = vp8_ac_uv_quant(Q, cpi->common.uvac_delta_q);
- cpi->UVquant[Q][rc] = (1 << 16) / quant_val;
- cpi->UVzbin[Q][rc] = ((qzbin_factors[Q] * quant_val) + 64) >> 7;
- cpi->UVround[Q][rc] = (qrounding_factors[Q] * quant_val) >> 7;
- cpi->common.UVdequant[Q][rc] = quant_val;
- cpi->zrun_zbin_boost_uv[Q][i] = (quant_val * zbin_boost[i]) >> 7;
- }
- }
-}
-#endif
-void vp8cx_mb_init_quantizer(VP8_COMP *cpi, MACROBLOCK *x)
-{
- int i;
- int QIndex;
- MACROBLOCKD *xd = &x->e_mbd;
- int zbin_extra;
-
- // Select the baseline MB Q index.
- if (xd->segmentation_enabled)
- {
- // Abs Value
- if (xd->mb_segement_abs_delta == SEGMENT_ABSDATA)
-
- QIndex = xd->segment_feature_data[MB_LVL_ALT_Q][xd->mode_info_context->mbmi.segment_id];
- // Delta Value
- else
- {
- QIndex = cpi->common.base_qindex + xd->segment_feature_data[MB_LVL_ALT_Q][xd->mode_info_context->mbmi.segment_id];
- QIndex = (QIndex >= 0) ? ((QIndex <= MAXQ) ? QIndex : MAXQ) : 0; // Clamp to valid range
- }
- }
- else
- QIndex = cpi->common.base_qindex;
-
- // Y
- zbin_extra = ( cpi->common.Y1dequant[QIndex][1] *
- ( cpi->zbin_over_quant +
- cpi->zbin_mode_boost +
- x->act_zbin_adj ) ) >> 7;
-
- for (i = 0; i < 16; i++)
- {
- x->block[i].quant = cpi->Y1quant[QIndex];
- x->block[i].quant_fast = cpi->Y1quant_fast[QIndex];
- x->block[i].quant_shift = cpi->Y1quant_shift[QIndex];
- x->block[i].zbin = cpi->Y1zbin[QIndex];
- x->block[i].round = cpi->Y1round[QIndex];
- x->e_mbd.block[i].dequant = cpi->common.Y1dequant[QIndex];
- x->block[i].zrun_zbin_boost = cpi->zrun_zbin_boost_y1[QIndex];
- x->block[i].zbin_extra = (short)zbin_extra;
- }
-
- // UV
- zbin_extra = ( cpi->common.UVdequant[QIndex][1] *
- ( cpi->zbin_over_quant +
- cpi->zbin_mode_boost +
- x->act_zbin_adj ) ) >> 7;
-
- for (i = 16; i < 24; i++)
- {
- x->block[i].quant = cpi->UVquant[QIndex];
- x->block[i].quant_fast = cpi->UVquant_fast[QIndex];
- x->block[i].quant_shift = cpi->UVquant_shift[QIndex];
- x->block[i].zbin = cpi->UVzbin[QIndex];
- x->block[i].round = cpi->UVround[QIndex];
- x->e_mbd.block[i].dequant = cpi->common.UVdequant[QIndex];
- x->block[i].zrun_zbin_boost = cpi->zrun_zbin_boost_uv[QIndex];
- x->block[i].zbin_extra = (short)zbin_extra;
- }
-
- // Y2
- zbin_extra = ( cpi->common.Y2dequant[QIndex][1] *
- ( (cpi->zbin_over_quant / 2) +
- cpi->zbin_mode_boost +
- x->act_zbin_adj ) ) >> 7;
-
- x->block[24].quant_fast = cpi->Y2quant_fast[QIndex];
- x->block[24].quant = cpi->Y2quant[QIndex];
- x->block[24].quant_shift = cpi->Y2quant_shift[QIndex];
- x->block[24].zbin = cpi->Y2zbin[QIndex];
- x->block[24].round = cpi->Y2round[QIndex];
- x->e_mbd.block[24].dequant = cpi->common.Y2dequant[QIndex];
- x->block[24].zrun_zbin_boost = cpi->zrun_zbin_boost_y2[QIndex];
- x->block[24].zbin_extra = (short)zbin_extra;
-
- /* save this macroblock QIndex for vp8_update_zbin_extra() */
- x->q_index = QIndex;
-}
-void vp8_update_zbin_extra(VP8_COMP *cpi, MACROBLOCK *x)
-{
- int i;
- int QIndex = x->q_index;
- int zbin_extra;
-
- // Y
- zbin_extra = ( cpi->common.Y1dequant[QIndex][1] *
- ( cpi->zbin_over_quant +
- cpi->zbin_mode_boost +
- x->act_zbin_adj ) ) >> 7;
- for (i = 0; i < 16; i++)
- {
- x->block[i].zbin_extra = (short)zbin_extra;
- }
-
- // UV
- zbin_extra = ( cpi->common.UVdequant[QIndex][1] *
- ( cpi->zbin_over_quant +
- cpi->zbin_mode_boost +
- x->act_zbin_adj ) ) >> 7;
-
- for (i = 16; i < 24; i++)
- {
- x->block[i].zbin_extra = (short)zbin_extra;
- }
-
- // Y2
- zbin_extra = ( cpi->common.Y2dequant[QIndex][1] *
- ( (cpi->zbin_over_quant / 2) +
- cpi->zbin_mode_boost +
- x->act_zbin_adj ) ) >> 7;
-
- x->block[24].zbin_extra = (short)zbin_extra;
-}
-
-void vp8cx_frame_init_quantizer(VP8_COMP *cpi)
-{
- // Clear Zbin mode boost for default case
- cpi->zbin_mode_boost = 0;
-
- // MB level quantizer setup
- vp8cx_mb_init_quantizer(cpi, &cpi->mb);
-}
-
/* activity_avg must be positive, or flat regions could get a zero weight
* (infinite lambda), which confounds analysis.
@@ -444,7 +80,7 @@ static const unsigned char VP8_VAR_OFFS[16]=
// Original activity measure from Tim T's code.
-unsigned int tt_activity_measure( VP8_COMP *cpi, MACROBLOCK *x )
+static unsigned int tt_activity_measure( VP8_COMP *cpi, MACROBLOCK *x )
{
unsigned int act;
unsigned int sse;
@@ -474,7 +110,7 @@ unsigned int tt_activity_measure( VP8_COMP *cpi, MACROBLOCK *x )
}
// Stub for alternative experimental activity measures.
-unsigned int alt_activity_measure( VP8_COMP *cpi, MACROBLOCK *x )
+static unsigned int alt_activity_measure( VP8_COMP *cpi, MACROBLOCK *x )
{
unsigned int mb_activity = VP8_ACTIVITY_AVG_MIN;
@@ -492,7 +128,7 @@ unsigned int alt_activity_measure( VP8_COMP *cpi, MACROBLOCK *x )
// Measure the activity of the current macroblock
// What we measure here is TBD so abstracted to this function
-unsigned int mb_activity_measure( VP8_COMP *cpi, MACROBLOCK *x )
+static unsigned int mb_activity_measure( VP8_COMP *cpi, MACROBLOCK *x )
{
unsigned int mb_activity;
@@ -511,7 +147,7 @@ unsigned int mb_activity_measure( VP8_COMP *cpi, MACROBLOCK *x )
}
// Calculate an "average" mb activity value for the frame
-void calc_av_activity( VP8_COMP *cpi, INT64 activity_sum )
+static void calc_av_activity( VP8_COMP *cpi, INT64 activity_sum )
{
// Simple mean for now
cpi->activity_avg = (unsigned int)(activity_sum/cpi->common.MBs);
@@ -521,7 +157,7 @@ void calc_av_activity( VP8_COMP *cpi, INT64 activity_sum )
#define OUTPUT_NORM_ACT_STATS 0
// Calculate a normalized activity value for each mb
-void calc_norm_activity( VP8_COMP *cpi, MACROBLOCK *x )
+static void calc_norm_activity( VP8_COMP *cpi, MACROBLOCK *x )
{
VP8_COMMON *const cm = & cpi->common;
int mb_row, mb_col;
@@ -585,7 +221,7 @@ void calc_norm_activity( VP8_COMP *cpi, MACROBLOCK *x )
// Loop through all MBs. Note activity of each, average activity and
// calculate a normalized activity for each
-void build_activity_map( VP8_COMP *cpi )
+static void build_activity_map( VP8_COMP *cpi )
{
MACROBLOCK *const x = & cpi->mb;
VP8_COMMON *const cm = & cpi->common;
@@ -926,9 +562,6 @@ void vp8_encode_frame(VP8_COMP *cpi)
x->vector_range = 32;
- // Count of MBs using the alternate Q if any
- cpi->alt_qcount = 0;
-
// Reset frame count of inter 0,0 motion vector useage.
cpi->inter_zz_count = 0;
diff --git a/vp8/encoder/encodeintra.c b/vp8/encoder/encodeintra.c
index 5d52c9fc4..307064153 100644
--- a/vp8/encoder/encodeintra.c
+++ b/vp8/encoder/encodeintra.c
@@ -21,19 +21,22 @@
#include "vp8/common/g_common.h"
#include "encodeintra.h"
-#define intra4x4ibias_rate 128
-#define intra4x4pbias_rate 256
-
#if CONFIG_RUNTIME_CPU_DETECT
#define IF_RTCD(x) (x)
#else
#define IF_RTCD(x) NULL
#endif
-void vp8_encode_intra4x4block(const VP8_ENCODER_RTCD *rtcd, MACROBLOCK *x, BLOCK *be, BLOCKD *b, int best_mode)
+
+
+void vp8_encode_intra4x4block(const VP8_ENCODER_RTCD *rtcd,
+ MACROBLOCK *x, int ib)
{
+ BLOCKD *b = &x->e_mbd.block[ib];
+ BLOCK *be = &x->block[ib];
+
RECON_INVOKE(&rtcd->common->recon, intra4x4_predict)
- (b, best_mode, b->predictor);
+ (b, b->bmi.mode, b->predictor);
ENCODEMB_INVOKE(&rtcd->encodemb, subb)(be, b, 16);
@@ -54,13 +57,7 @@ void vp8_encode_intra4x4mby(const VP8_ENCODER_RTCD *rtcd, MACROBLOCK *mb)
vp8_intra_prediction_down_copy(x);
for (i = 0; i < 16; i++)
- {
- BLOCK *be = &mb->block[i];
- BLOCKD *b = &x->block[i];
-
- vp8_encode_intra4x4block(rtcd, mb, be, b, b->bmi.mode);
- }
-
+ vp8_encode_intra4x4block(rtcd, mb, i);
return;
}
diff --git a/vp8/encoder/encodeintra.h b/vp8/encoder/encodeintra.h
index 40930bc42..5861fd1fc 100644
--- a/vp8/encoder/encodeintra.h
+++ b/vp8/encoder/encodeintra.h
@@ -16,7 +16,6 @@
void vp8_encode_intra16x16mby(const VP8_ENCODER_RTCD *, MACROBLOCK *x);
void vp8_encode_intra16x16mbuv(const VP8_ENCODER_RTCD *, MACROBLOCK *x);
void vp8_encode_intra4x4mby(const VP8_ENCODER_RTCD *, MACROBLOCK *mb);
-void vp8_encode_intra4x4block(const VP8_ENCODER_RTCD *, MACROBLOCK *x, BLOCK *be, BLOCKD *b, int best_mode);
-void vp8_encode_intra4x4block_rd(const VP8_ENCODER_RTCD *, MACROBLOCK *x, BLOCK *be, BLOCKD *b, int best_mode);
-
+void vp8_encode_intra4x4block(const VP8_ENCODER_RTCD *rtcd,
+ MACROBLOCK *x, int ib);
#endif
diff --git a/vp8/encoder/ethreading.c b/vp8/encoder/ethreading.c
index 281efa4ff..a03ff951f 100644
--- a/vp8/encoder/ethreading.c
+++ b/vp8/encoder/ethreading.c
@@ -302,7 +302,6 @@ static void setup_mbby_copy(MACROBLOCK *mbdst, MACROBLOCK *mbsrc)
z->sadperbit16 = x->sadperbit16;
z->sadperbit4 = x->sadperbit4;
- z->errthresh = x->errthresh;
/*
z->mv_col_min = x->mv_col_min;
diff --git a/vp8/encoder/firstpass.c b/vp8/encoder/firstpass.c
index 7334a465d..7cf50415d 100644
--- a/vp8/encoder/firstpass.c
+++ b/vp8/encoder/firstpass.c
@@ -100,10 +100,8 @@ static int encode_intra(VP8_COMP *cpi, MACROBLOCK *x, int use_dc_pred)
{
for (i = 0; i < 16; i++)
{
- BLOCKD *b = &x->e_mbd.block[i];
- BLOCK *be = &x->block[i];
-
- vp8_encode_intra4x4block(IF_RTCD(&cpi->rtcd), x, be, b, B_DC_PRED);
+ x->e_mbd.block[i].bmi.mode = B_DC_PRED;
+ vp8_encode_intra4x4block(IF_RTCD(&cpi->rtcd), x, i);
}
}
@@ -115,22 +113,22 @@ static int encode_intra(VP8_COMP *cpi, MACROBLOCK *x, int use_dc_pred)
// Resets the first pass file to the given position using a relative seek from the current position
static void reset_fpf_position(VP8_COMP *cpi, FIRSTPASS_STATS *Position)
{
- cpi->stats_in = Position;
+ cpi->twopass.stats_in = Position;
}
static int lookup_next_frame_stats(VP8_COMP *cpi, FIRSTPASS_STATS *next_frame)
{
- if (cpi->stats_in >= cpi->stats_in_end)
+ if (cpi->twopass.stats_in >= cpi->twopass.stats_in_end)
return EOF;
- *next_frame = *cpi->stats_in;
+ *next_frame = *cpi->twopass.stats_in;
return 1;
}
// Calculate a modified Error used in distributing bits between easier and harder frames
static double calculate_modified_err(VP8_COMP *cpi, FIRSTPASS_STATS *this_frame)
{
- double av_err = cpi->total_stats->ssim_weighted_pred_err;
+ double av_err = cpi->twopass.total_stats->ssim_weighted_pred_err;
double this_err = this_frame->ssim_weighted_pred_err;
double modified_err;
@@ -142,7 +140,7 @@ static double calculate_modified_err(VP8_COMP *cpi, FIRSTPASS_STATS *this_frame)
//FIRSTPASS_STATS next_frame;
//FIRSTPASS_STATS *start_pos;
- /*start_pos = cpi->stats_in;
+ /*start_pos = cpi->twopass.stats_in;
sum_iiratio = 0.0;
i = 0;
while ( (i < 1) && input_stats(cpi,&next_frame) != EOF )
@@ -155,7 +153,7 @@ static double calculate_modified_err(VP8_COMP *cpi, FIRSTPASS_STATS *this_frame)
}
if ( i > 0 )
{
- relative_next_iiratio = sum_iiratio / DOUBLE_DIVIDE_CHECK(cpi->avg_iiratio * (double)i);
+ relative_next_iiratio = sum_iiratio / DOUBLE_DIVIDE_CHECK(cpi->twopass.avg_iiratio * (double)i);
}
else
{
@@ -269,7 +267,7 @@ static int frame_max_bits(VP8_COMP *cpi)
else
{
// For VBR base this on the bits and frames left plus the two_pass_vbrmax_section rate passed in by the user
- max_bits = (int)(((double)cpi->bits_left / (cpi->total_stats->count - (double)cpi->common.current_video_frame)) * ((double)cpi->oxcf.two_pass_vbrmax_section / 100.0));
+ max_bits = (int)(((double)cpi->twopass.bits_left / (cpi->twopass.total_stats->count - (double)cpi->common.current_video_frame)) * ((double)cpi->oxcf.two_pass_vbrmax_section / 100.0));
}
// Trap case where we are out of bits
@@ -324,11 +322,12 @@ static void output_stats(const VP8_COMP *cpi,
static int input_stats(VP8_COMP *cpi, FIRSTPASS_STATS *fps)
{
- if (cpi->stats_in >= cpi->stats_in_end)
+ if (cpi->twopass.stats_in >= cpi->twopass.stats_in_end)
return EOF;
- *fps = *cpi->stats_in;
- cpi->stats_in = (void*)((char *)cpi->stats_in + sizeof(FIRSTPASS_STATS));
+ *fps = *cpi->twopass.stats_in;
+ cpi->twopass.stats_in =
+ (void*)((char *)cpi->twopass.stats_in + sizeof(FIRSTPASS_STATS));
return 1;
}
@@ -396,12 +395,12 @@ static void avg_stats(FIRSTPASS_STATS *section)
void vp8_init_first_pass(VP8_COMP *cpi)
{
- zero_stats(cpi->total_stats);
+ zero_stats(cpi->twopass.total_stats);
}
void vp8_end_first_pass(VP8_COMP *cpi)
{
- output_stats(cpi, cpi->output_pkt_list, cpi->total_stats);
+ output_stats(cpi, cpi->output_pkt_list, cpi->twopass.total_stats);
}
static void zz_motion_search( VP8_COMP *cpi, MACROBLOCK * x, YV12_BUFFER_CONFIG * recon_buffer, int * best_motion_err, int recon_yoffset )
@@ -796,17 +795,17 @@ void vp8_first_pass(VP8_COMP *cpi)
- cpi->source->ts_start;
// don't want to do output stats with a stack variable!
- memcpy(cpi->this_frame_stats,
+ memcpy(cpi->twopass.this_frame_stats,
&fps,
sizeof(FIRSTPASS_STATS));
- output_stats(cpi, cpi->output_pkt_list, cpi->this_frame_stats);
- accumulate_stats(cpi->total_stats, &fps);
+ output_stats(cpi, cpi->output_pkt_list, cpi->twopass.this_frame_stats);
+ accumulate_stats(cpi->twopass.total_stats, &fps);
}
// Copy the previous Last Frame into the GF buffer if specific conditions for doing so are met
if ((cm->current_video_frame > 0) &&
- (cpi->this_frame_stats->pcnt_inter > 0.20) &&
- ((cpi->this_frame_stats->intra_error / cpi->this_frame_stats->coded_error) > 2.0))
+ (cpi->twopass.this_frame_stats->pcnt_inter > 0.20) &&
+ ((cpi->twopass.this_frame_stats->intra_error / cpi->twopass.this_frame_stats->coded_error) > 2.0))
{
vp8_yv12_copy_frame_ptr(lst_yv12, gld_yv12);
}
@@ -860,7 +859,7 @@ static int estimate_max_q(VP8_COMP *cpi, double section_err, int section_target_
double pow_lowq = 0.40;
if (section_target_bandwitdh <= 0)
- return cpi->maxq_max_limit; // Highest value allowed
+ return cpi->twopass.maxq_max_limit; // Highest value allowed
target_norm_bits_per_mb = (section_target_bandwitdh < (1 << 20)) ? (512 * section_target_bandwitdh) / num_mbs : 512 * (section_target_bandwitdh / num_mbs);
@@ -869,17 +868,17 @@ static int estimate_max_q(VP8_COMP *cpi, double section_err, int section_target_
{
rolling_ratio = (double)cpi->rolling_actual_bits / (double)cpi->rolling_target_bits;
- //if ( cpi->est_max_qcorrection_factor > rolling_ratio )
+ //if ( cpi->twopass.est_max_qcorrection_factor > rolling_ratio )
if (rolling_ratio < 0.95)
- //cpi->est_max_qcorrection_factor *= adjustment_rate;
- cpi->est_max_qcorrection_factor -= 0.005;
- //else if ( cpi->est_max_qcorrection_factor < rolling_ratio )
+ //cpi->twopass.est_max_qcorrection_factor *= adjustment_rate;
+ cpi->twopass.est_max_qcorrection_factor -= 0.005;
+ //else if ( cpi->twopass.est_max_qcorrection_factor < rolling_ratio )
else if (rolling_ratio > 1.05)
- cpi->est_max_qcorrection_factor += 0.005;
+ cpi->twopass.est_max_qcorrection_factor += 0.005;
- //cpi->est_max_qcorrection_factor /= adjustment_rate;
+ //cpi->twopass.est_max_qcorrection_factor /= adjustment_rate;
- cpi->est_max_qcorrection_factor = (cpi->est_max_qcorrection_factor < 0.1) ? 0.1 : (cpi->est_max_qcorrection_factor > 10.0) ? 10.0 : cpi->est_max_qcorrection_factor;
+ cpi->twopass.est_max_qcorrection_factor = (cpi->twopass.est_max_qcorrection_factor < 0.1) ? 0.1 : (cpi->twopass.est_max_qcorrection_factor > 10.0) ? 10.0 : cpi->twopass.est_max_qcorrection_factor;
}
// Corrections for higher compression speed settings (reduced compression expected)
@@ -898,7 +897,7 @@ static int estimate_max_q(VP8_COMP *cpi, double section_err, int section_target_
// Try and pick a max Q that will be high enough to encode the
// content at the given rate.
- for (Q = cpi->maxq_min_limit; Q < cpi->maxq_max_limit; Q++)
+ for (Q = cpi->twopass.maxq_min_limit; Q < cpi->twopass.maxq_max_limit; Q++)
{
int bits_per_mb_at_this_q;
@@ -910,8 +909,11 @@ static int estimate_max_q(VP8_COMP *cpi, double section_err, int section_target_
else
correction_factor = corr_high;
- bits_per_mb_at_this_q = (int)(.5 + correction_factor * speed_correction * cpi->est_max_qcorrection_factor * cpi->section_max_qfactor * (double)vp8_bits_per_mb[INTER_FRAME][Q] / 1.0);
- //bits_per_mb_at_this_q = (int)(.5 + correction_factor * speed_correction * cpi->est_max_qcorrection_factor * (double)vp8_bits_per_mb[INTER_FRAME][Q] / 1.0);
+ bits_per_mb_at_this_q = (int)(.5 + correction_factor
+ * speed_correction * cpi->twopass.est_max_qcorrection_factor
+ * cpi->twopass.section_max_qfactor
+ * (double)vp8_bits_per_mb[INTER_FRAME][Q] / 1.0);
+ //bits_per_mb_at_this_q = (int)(.5 + correction_factor * speed_correction * cpi->twopass.est_max_qcorrection_factor * (double)vp8_bits_per_mb[INTER_FRAME][Q] / 1.0);
if (bits_per_mb_at_this_q <= target_norm_bits_per_mb)
break;
@@ -930,12 +932,12 @@ static int estimate_max_q(VP8_COMP *cpi, double section_err, int section_target_
// averaga q observed in clip for non kf/gf.arf frames
// Give average a chance to settle though.
if ( (cpi->ni_frames >
- ((unsigned int)cpi->total_stats->count >> 8)) &&
+ ((unsigned int)cpi->twopass.total_stats->count >> 8)) &&
(cpi->ni_frames > 150) )
{
- cpi->maxq_max_limit = ((cpi->ni_av_qi + 32) < cpi->worst_quality)
+ cpi->twopass.maxq_max_limit = ((cpi->ni_av_qi + 32) < cpi->worst_quality)
? (cpi->ni_av_qi + 32) : cpi->worst_quality;
- cpi->maxq_min_limit = ((cpi->ni_av_qi - 32) > cpi->best_quality)
+ cpi->twopass.maxq_min_limit = ((cpi->ni_av_qi - 32) > cpi->best_quality)
? (cpi->ni_av_qi - 32) : cpi->best_quality;
}
@@ -982,7 +984,7 @@ static int estimate_q(VP8_COMP *cpi, double section_err, int section_target_band
else
correction_factor = corr_high;
- bits_per_mb_at_this_q = (int)(.5 + correction_factor * speed_correction * cpi->est_max_qcorrection_factor * (double)vp8_bits_per_mb[INTER_FRAME][Q] / 1.0);
+ bits_per_mb_at_this_q = (int)(.5 + correction_factor * speed_correction * cpi->twopass.est_max_qcorrection_factor * (double)vp8_bits_per_mb[INTER_FRAME][Q] / 1.0);
if (bits_per_mb_at_this_q <= target_norm_bits_per_mb)
break;
@@ -1119,8 +1121,8 @@ static int estimate_cq(VP8_COMP *cpi, double section_err, int section_target_ban
speed_correction = 1.25;
}
// II ratio correction factor for clip as a whole
- clip_iiratio = cpi->total_stats->intra_error /
- DOUBLE_DIVIDE_CHECK(cpi->total_stats->coded_error);
+ clip_iiratio = cpi->twopass.total_stats->intra_error /
+ DOUBLE_DIVIDE_CHECK(cpi->twopass.total_stats->coded_error);
clip_iifactor = 1.0 - ((clip_iiratio - 10.0) * 0.025);
if (clip_iifactor < 0.80)
clip_iifactor = 0.80;
@@ -1168,48 +1170,48 @@ void vp8_init_second_pass(VP8_COMP *cpi)
double two_pass_min_rate = (double)(cpi->oxcf.target_bandwidth * cpi->oxcf.two_pass_vbrmin_section / 100);
- zero_stats(cpi->total_stats);
+ zero_stats(cpi->twopass.total_stats);
- if (!cpi->stats_in_end)
+ if (!cpi->twopass.stats_in_end)
return;
- *cpi->total_stats = *cpi->stats_in_end;
+ *cpi->twopass.total_stats = *cpi->twopass.stats_in_end;
- cpi->total_error_left = cpi->total_stats->ssim_weighted_pred_err;
- cpi->total_intra_error_left = cpi->total_stats->intra_error;
- cpi->total_coded_error_left = cpi->total_stats->coded_error;
- cpi->start_tot_err_left = cpi->total_error_left;
+ cpi->twopass.total_error_left = cpi->twopass.total_stats->ssim_weighted_pred_err;
+ cpi->twopass.total_intra_error_left = cpi->twopass.total_stats->intra_error;
+ cpi->twopass.total_coded_error_left = cpi->twopass.total_stats->coded_error;
+ cpi->twopass.start_tot_err_left = cpi->twopass.total_error_left;
- //cpi->bits_left = (long long)(cpi->total_stats->count * cpi->oxcf.target_bandwidth / DOUBLE_DIVIDE_CHECK((double)cpi->oxcf.frame_rate));
- //cpi->bits_left -= (long long)(cpi->total_stats->count * two_pass_min_rate / DOUBLE_DIVIDE_CHECK((double)cpi->oxcf.frame_rate));
+ //cpi->twopass.bits_left = (long long)(cpi->twopass.total_stats->count * cpi->oxcf.target_bandwidth / DOUBLE_DIVIDE_CHECK((double)cpi->oxcf.frame_rate));
+ //cpi->twopass.bits_left -= (long long)(cpi->twopass.total_stats->count * two_pass_min_rate / DOUBLE_DIVIDE_CHECK((double)cpi->oxcf.frame_rate));
// each frame can have a different duration, as the frame rate in the source
// isn't guaranteed to be constant. The frame rate prior to the first frame
// encoded in the second pass is a guess. However the sum duration is not.
// Its calculated based on the actual durations of all frames from the first
// pass.
- vp8_new_frame_rate(cpi, 10000000.0 * cpi->total_stats->count / cpi->total_stats->duration);
+ vp8_new_frame_rate(cpi, 10000000.0 * cpi->twopass.total_stats->count / cpi->twopass.total_stats->duration);
cpi->output_frame_rate = cpi->oxcf.frame_rate;
- cpi->bits_left = (long long)(cpi->total_stats->duration * cpi->oxcf.target_bandwidth / 10000000.0) ;
- cpi->bits_left -= (long long)(cpi->total_stats->duration * two_pass_min_rate / 10000000.0);
- cpi->clip_bits_total = cpi->bits_left;
+ cpi->twopass.bits_left = (long long)(cpi->twopass.total_stats->duration * cpi->oxcf.target_bandwidth / 10000000.0) ;
+ cpi->twopass.bits_left -= (long long)(cpi->twopass.total_stats->duration * two_pass_min_rate / 10000000.0);
+ cpi->twopass.clip_bits_total = cpi->twopass.bits_left;
// Calculate a minimum intra value to be used in determining the IIratio
// scores used in the second pass. We have this minimum to make sure
// that clips that are static but "low complexity" in the intra domain
// are still boosted appropriately for KF/GF/ARF
- cpi->kf_intra_err_min = KF_MB_INTRA_MIN * cpi->common.MBs;
- cpi->gf_intra_err_min = GF_MB_INTRA_MIN * cpi->common.MBs;
+ cpi->twopass.kf_intra_err_min = KF_MB_INTRA_MIN * cpi->common.MBs;
+ cpi->twopass.gf_intra_err_min = GF_MB_INTRA_MIN * cpi->common.MBs;
- avg_stats(cpi->total_stats);
+ avg_stats(cpi->twopass.total_stats);
// Scan the first pass file and calculate an average Intra / Inter error score ratio for the sequence
{
double sum_iiratio = 0.0;
double IIRatio;
- start_pos = cpi->stats_in; // Note starting "file" position
+ start_pos = cpi->twopass.stats_in; // Note starting "file" position
while (input_stats(cpi, &this_frame) != EOF)
{
@@ -1218,7 +1220,7 @@ void vp8_init_second_pass(VP8_COMP *cpi)
sum_iiratio += IIRatio;
}
- cpi->avg_iiratio = sum_iiratio / DOUBLE_DIVIDE_CHECK((double)cpi->total_stats->count);
+ cpi->twopass.avg_iiratio = sum_iiratio / DOUBLE_DIVIDE_CHECK((double)cpi->twopass.total_stats->count);
// Reset file position
reset_fpf_position(cpi, start_pos);
@@ -1227,26 +1229,20 @@ void vp8_init_second_pass(VP8_COMP *cpi)
// Scan the first pass file and calculate a modified total error based upon the bias/power function
// used to allocate bits
{
- start_pos = cpi->stats_in; // Note starting "file" position
+ start_pos = cpi->twopass.stats_in; // Note starting "file" position
- cpi->modified_error_total = 0.0;
- cpi->modified_error_used = 0.0;
+ cpi->twopass.modified_error_total = 0.0;
+ cpi->twopass.modified_error_used = 0.0;
while (input_stats(cpi, &this_frame) != EOF)
{
- cpi->modified_error_total += calculate_modified_err(cpi, &this_frame);
+ cpi->twopass.modified_error_total += calculate_modified_err(cpi, &this_frame);
}
- cpi->modified_error_left = cpi->modified_error_total;
+ cpi->twopass.modified_error_left = cpi->twopass.modified_error_total;
reset_fpf_position(cpi, start_pos); // Reset file position
}
-
- // Calculate the clip target modified bits per error
- // The observed bpe starts as the same number.
- cpi->clip_bpe = cpi->bits_left /
- DOUBLE_DIVIDE_CHECK(cpi->modified_error_total);
- cpi->observed_bpe = cpi->clip_bpe;
}
void vp8_end_second_pass(VP8_COMP *cpi)
@@ -1310,7 +1306,7 @@ static int detect_transition_to_still(
(decay_accumulator < 0.9) )
{
int j;
- FIRSTPASS_STATS * position = cpi->stats_in;
+ FIRSTPASS_STATS * position = cpi->twopass.stats_in;
FIRSTPASS_STATS tmp_next_frame;
double decay_rate;
@@ -1366,12 +1362,12 @@ static void define_gf_group(VP8_COMP *cpi, FIRSTPASS_STATS *this_frame)
unsigned int allow_alt_ref =
cpi->oxcf.play_alternate && cpi->oxcf.lag_in_frames;
- cpi->gf_group_bits = 0;
- cpi->gf_decay_rate = 0;
+ cpi->twopass.gf_group_bits = 0;
+ cpi->twopass.gf_decay_rate = 0;
vp8_clear_system_state(); //__asm emms;
- start_pos = cpi->stats_in;
+ start_pos = cpi->twopass.stats_in;
vpx_memset(&next_frame, 0, sizeof(next_frame)); // assure clean
@@ -1393,9 +1389,9 @@ static void define_gf_group(VP8_COMP *cpi, FIRSTPASS_STATS *this_frame)
// or ARF that will be coded with the group
i = 0;
- while (((i < cpi->static_scene_max_gf_interval) ||
- ((cpi->frames_to_key - i) < MIN_GF_INTERVAL)) &&
- (i < cpi->frames_to_key))
+ while (((i < cpi->twopass.static_scene_max_gf_interval) ||
+ ((cpi->twopass.frames_to_key - i) < MIN_GF_INTERVAL)) &&
+ (i < cpi->twopass.frames_to_key))
{
double r;
double this_frame_mvr_ratio;
@@ -1460,11 +1456,11 @@ static void define_gf_group(VP8_COMP *cpi, FIRSTPASS_STATS *this_frame)
( next_frame.intra_error /
DOUBLE_DIVIDE_CHECK(next_frame.coded_error)));
- if (next_frame.intra_error > cpi->gf_intra_err_min)
+ if (next_frame.intra_error > cpi->twopass.gf_intra_err_min)
r = (IIKFACTOR2 * next_frame.intra_error /
DOUBLE_DIVIDE_CHECK(next_frame.coded_error));
else
- r = (IIKFACTOR2 * cpi->gf_intra_err_min /
+ r = (IIKFACTOR2 * cpi->twopass.gf_intra_err_min /
DOUBLE_DIVIDE_CHECK(next_frame.coded_error));
// Increase boost for frames where new data coming into frame
@@ -1506,7 +1502,7 @@ static void define_gf_group(VP8_COMP *cpi, FIRSTPASS_STATS *this_frame)
// Dont break out with a very short interval
(i > MIN_GF_INTERVAL) &&
// Dont break out very close to a key frame
- ((cpi->frames_to_key - i) >= MIN_GF_INTERVAL) &&
+ ((cpi->twopass.frames_to_key - i) >= MIN_GF_INTERVAL) &&
((boost_score > 20.0) || (next_frame.pcnt_inter < 0.75)) &&
((mv_ratio_accumulator > 100.0) ||
(abs_mv_in_out_accumulator > 3.0) ||
@@ -1523,7 +1519,7 @@ static void define_gf_group(VP8_COMP *cpi, FIRSTPASS_STATS *this_frame)
old_boost_score = boost_score;
}
- cpi->gf_decay_rate =
+ cpi->twopass.gf_decay_rate =
(i > 0) ? (int)(100.0 * (1.0 - decay_accumulator)) / i : 0;
// When using CBR apply additional buffer related upper limits
@@ -1561,7 +1557,7 @@ static void define_gf_group(VP8_COMP *cpi, FIRSTPASS_STATS *this_frame)
if (allow_alt_ref &&
(i >= MIN_GF_INTERVAL) &&
// dont use ARF very near next kf
- (i <= (cpi->frames_to_key - MIN_GF_INTERVAL)) &&
+ (i <= (cpi->twopass.frames_to_key - MIN_GF_INTERVAL)) &&
(((next_frame.pcnt_inter > 0.75) &&
((mv_in_out_accumulator / (double)i > -0.2) || (mv_in_out_accumulator > -2.0)) &&
//(cpi->gfu_boost>150) &&
@@ -1569,7 +1565,7 @@ static void define_gf_group(VP8_COMP *cpi, FIRSTPASS_STATS *this_frame)
//(cpi->gfu_boost>AF_THRESH2) &&
//((cpi->gfu_boost/i)>AF_THRESH) &&
//(decay_accumulator > 0.5) &&
- (cpi->gf_decay_rate <= (ARF_DECAY_THRESH + (cpi->gfu_boost / 200)))
+ (cpi->twopass.gf_decay_rate <= (ARF_DECAY_THRESH + (cpi->gfu_boost / 200)))
)
)
)
@@ -1582,8 +1578,8 @@ static void define_gf_group(VP8_COMP *cpi, FIRSTPASS_STATS *this_frame)
int group_bits;
// Estimate the bits to be allocated to the group as a whole
- if ((cpi->kf_group_bits > 0) && (cpi->kf_group_error_left > 0))
- group_bits = (int)((double)cpi->kf_group_bits * (gf_group_err / (double)cpi->kf_group_error_left));
+ if ((cpi->twopass.kf_group_bits > 0) && (cpi->twopass.kf_group_error_left > 0))
+ group_bits = (int)((double)cpi->twopass.kf_group_bits * (gf_group_err / (double)cpi->twopass.kf_group_error_left));
else
group_bits = 0;
@@ -1630,7 +1626,7 @@ static void define_gf_group(VP8_COMP *cpi, FIRSTPASS_STATS *this_frame)
// Note: this_frame->frame has been updated in the loop
// so it now points at the ARF frame.
half_gf_int = cpi->baseline_gf_interval >> 1;
- frames_after_arf = cpi->total_stats->count - this_frame->frame - 1;
+ frames_after_arf = cpi->twopass.total_stats->count - this_frame->frame - 1;
switch (cpi->oxcf.arnr_type)
{
@@ -1683,46 +1679,46 @@ static void define_gf_group(VP8_COMP *cpi, FIRSTPASS_STATS *this_frame)
if (!cpi->source_alt_ref_pending)
{
// Dont allow conventional gf too near the next kf
- if ((cpi->frames_to_key - cpi->baseline_gf_interval) < MIN_GF_INTERVAL)
+ if ((cpi->twopass.frames_to_key - cpi->baseline_gf_interval) < MIN_GF_INTERVAL)
{
- while (cpi->baseline_gf_interval < cpi->frames_to_key)
+ while (cpi->baseline_gf_interval < cpi->twopass.frames_to_key)
{
if (EOF == input_stats(cpi, this_frame))
break;
cpi->baseline_gf_interval++;
- if (cpi->baseline_gf_interval < cpi->frames_to_key)
+ if (cpi->baseline_gf_interval < cpi->twopass.frames_to_key)
gf_group_err += calculate_modified_err(cpi, this_frame);
}
}
}
// Now decide how many bits should be allocated to the GF group as a proportion of those remaining in the kf group.
- // The final key frame group in the clip is treated as a special case where cpi->kf_group_bits is tied to cpi->bits_left.
+ // The final key frame group in the clip is treated as a special case where cpi->twopass.kf_group_bits is tied to cpi->twopass.bits_left.
// This is also important for short clips where there may only be one key frame.
- if (cpi->frames_to_key >= (int)(cpi->total_stats->count - cpi->common.current_video_frame))
+ if (cpi->twopass.frames_to_key >= (int)(cpi->twopass.total_stats->count - cpi->common.current_video_frame))
{
- cpi->kf_group_bits = (cpi->bits_left > 0) ? cpi->bits_left : 0;
+ cpi->twopass.kf_group_bits = (cpi->twopass.bits_left > 0) ? cpi->twopass.bits_left : 0;
}
// Calculate the bits to be allocated to the group as a whole
- if ((cpi->kf_group_bits > 0) && (cpi->kf_group_error_left > 0))
- cpi->gf_group_bits = (int)((double)cpi->kf_group_bits * (gf_group_err / (double)cpi->kf_group_error_left));
+ if ((cpi->twopass.kf_group_bits > 0) && (cpi->twopass.kf_group_error_left > 0))
+ cpi->twopass.gf_group_bits = (int)((double)cpi->twopass.kf_group_bits * (gf_group_err / (double)cpi->twopass.kf_group_error_left));
else
- cpi->gf_group_bits = 0;
+ cpi->twopass.gf_group_bits = 0;
- cpi->gf_group_bits = (cpi->gf_group_bits < 0) ? 0 : (cpi->gf_group_bits > cpi->kf_group_bits) ? cpi->kf_group_bits : cpi->gf_group_bits;
+ cpi->twopass.gf_group_bits = (cpi->twopass.gf_group_bits < 0) ? 0 : (cpi->twopass.gf_group_bits > cpi->twopass.kf_group_bits) ? cpi->twopass.kf_group_bits : cpi->twopass.gf_group_bits;
- // Clip cpi->gf_group_bits based on user supplied data rate variability limit (cpi->oxcf.two_pass_vbrmax_section)
- if (cpi->gf_group_bits > max_bits * cpi->baseline_gf_interval)
- cpi->gf_group_bits = max_bits * cpi->baseline_gf_interval;
+ // Clip cpi->twopass.gf_group_bits based on user supplied data rate variability limit (cpi->oxcf.two_pass_vbrmax_section)
+ if (cpi->twopass.gf_group_bits > max_bits * cpi->baseline_gf_interval)
+ cpi->twopass.gf_group_bits = max_bits * cpi->baseline_gf_interval;
// Reset the file position
reset_fpf_position(cpi, start_pos);
// Update the record of error used so far (only done once per gf group)
- cpi->modified_error_used += gf_group_err;
+ cpi->twopass.modified_error_used += gf_group_err;
// Assign bits to the arf or gf.
{
@@ -1771,7 +1767,7 @@ static void define_gf_group(VP8_COMP *cpi, FIRSTPASS_STATS *this_frame)
}
// Calculate the number of bits to be spent on the gf or arf based on the boost number
- cpi->gf_bits = (int)((double)Boost * (cpi->gf_group_bits / (double)allocation_chunks));
+ cpi->twopass.gf_bits = (int)((double)Boost * (cpi->twopass.gf_group_bits / (double)allocation_chunks));
// If the frame that is to be boosted is simpler than the average for
// the gf/arf group then use an alternative calculation
@@ -1782,16 +1778,16 @@ static void define_gf_group(VP8_COMP *cpi, FIRSTPASS_STATS *this_frame)
int alt_gf_bits;
alt_gf_grp_bits =
- (double)cpi->kf_group_bits *
+ (double)cpi->twopass.kf_group_bits *
(mod_frame_err * (double)cpi->baseline_gf_interval) /
- DOUBLE_DIVIDE_CHECK((double)cpi->kf_group_error_left);
+ DOUBLE_DIVIDE_CHECK((double)cpi->twopass.kf_group_error_left);
alt_gf_bits = (int)((double)Boost * (alt_gf_grp_bits /
(double)allocation_chunks));
- if (cpi->gf_bits > alt_gf_bits)
+ if (cpi->twopass.gf_bits > alt_gf_bits)
{
- cpi->gf_bits = alt_gf_bits;
+ cpi->twopass.gf_bits = alt_gf_bits;
}
}
// Else if it is harder than other frames in the group make sure it at
@@ -1800,45 +1796,45 @@ static void define_gf_group(VP8_COMP *cpi, FIRSTPASS_STATS *this_frame)
else
{
int alt_gf_bits =
- (int)((double)cpi->kf_group_bits *
+ (int)((double)cpi->twopass.kf_group_bits *
mod_frame_err /
- DOUBLE_DIVIDE_CHECK((double)cpi->kf_group_error_left));
+ DOUBLE_DIVIDE_CHECK((double)cpi->twopass.kf_group_error_left));
- if (alt_gf_bits > cpi->gf_bits)
+ if (alt_gf_bits > cpi->twopass.gf_bits)
{
- cpi->gf_bits = alt_gf_bits;
+ cpi->twopass.gf_bits = alt_gf_bits;
}
}
// Apply an additional limit for CBR
if (cpi->oxcf.end_usage == USAGE_STREAM_FROM_SERVER)
{
- if (cpi->gf_bits > (cpi->buffer_level >> 1))
- cpi->gf_bits = cpi->buffer_level >> 1;
+ if (cpi->twopass.gf_bits > (cpi->buffer_level >> 1))
+ cpi->twopass.gf_bits = cpi->buffer_level >> 1;
}
// Dont allow a negative value for gf_bits
- if (cpi->gf_bits < 0)
- cpi->gf_bits = 0;
+ if (cpi->twopass.gf_bits < 0)
+ cpi->twopass.gf_bits = 0;
// Adjust KF group bits and error remainin
- cpi->kf_group_error_left -= gf_group_err;
- cpi->kf_group_bits -= cpi->gf_group_bits;
+ cpi->twopass.kf_group_error_left -= gf_group_err;
+ cpi->twopass.kf_group_bits -= cpi->twopass.gf_group_bits;
- if (cpi->kf_group_bits < 0)
- cpi->kf_group_bits = 0;
+ if (cpi->twopass.kf_group_bits < 0)
+ cpi->twopass.kf_group_bits = 0;
// Note the error score left in the remaining frames of the group.
// For normal GFs we want to remove the error score for the first frame of the group (except in Key frame case where this has already happened)
if (!cpi->source_alt_ref_pending && cpi->common.frame_type != KEY_FRAME)
- cpi->gf_group_error_left = gf_group_err - gf_first_frame_err;
+ cpi->twopass.gf_group_error_left = gf_group_err - gf_first_frame_err;
else
- cpi->gf_group_error_left = gf_group_err;
+ cpi->twopass.gf_group_error_left = gf_group_err;
- cpi->gf_group_bits -= cpi->gf_bits;
+ cpi->twopass.gf_group_bits -= cpi->twopass.gf_bits;
- if (cpi->gf_group_bits < 0)
- cpi->gf_group_bits = 0;
+ if (cpi->twopass.gf_group_bits < 0)
+ cpi->twopass.gf_group_bits = 0;
// Set aside some bits for a mid gf sequence boost
if ((cpi->gfu_boost > 150) && (cpi->baseline_gf_interval > 5))
@@ -1846,18 +1842,18 @@ static void define_gf_group(VP8_COMP *cpi, FIRSTPASS_STATS *this_frame)
int pct_extra = (cpi->gfu_boost - 100) / 50;
pct_extra = (pct_extra > 10) ? 10 : pct_extra;
- cpi->mid_gf_extra_bits = (cpi->gf_group_bits * pct_extra) / 100;
- cpi->gf_group_bits -= cpi->mid_gf_extra_bits;
+ cpi->twopass.mid_gf_extra_bits = (cpi->twopass.gf_group_bits * pct_extra) / 100;
+ cpi->twopass.gf_group_bits -= cpi->twopass.mid_gf_extra_bits;
}
else
- cpi->mid_gf_extra_bits = 0;
+ cpi->twopass.mid_gf_extra_bits = 0;
- cpi->gf_bits += cpi->min_frame_bandwidth; // Add in minimum for a frame
+ cpi->twopass.gf_bits += cpi->min_frame_bandwidth; // Add in minimum for a frame
}
if (!cpi->source_alt_ref_pending && (cpi->common.frame_type != KEY_FRAME)) // Normal GF and not a KF
{
- cpi->per_frame_bandwidth = cpi->gf_bits; // Per frame bit target for this frame
+ cpi->per_frame_bandwidth = cpi->twopass.gf_bits; // Per frame bit target for this frame
}
// Adjustment to estimate_max_q based on a measure of complexity of the section
@@ -1877,21 +1873,21 @@ static void define_gf_group(VP8_COMP *cpi, FIRSTPASS_STATS *this_frame)
avg_stats(&sectionstats);
- cpi->section_intra_rating =
+ cpi->twopass.section_intra_rating =
sectionstats.intra_error /
DOUBLE_DIVIDE_CHECK(sectionstats.coded_error);
Ratio = sectionstats.intra_error / DOUBLE_DIVIDE_CHECK(sectionstats.coded_error);
//if( (Ratio > 11) ) //&& (sectionstats.pcnt_second_ref < .20) )
//{
- cpi->section_max_qfactor = 1.0 - ((Ratio - 10.0) * 0.025);
+ cpi->twopass.section_max_qfactor = 1.0 - ((Ratio - 10.0) * 0.025);
- if (cpi->section_max_qfactor < 0.80)
- cpi->section_max_qfactor = 0.80;
+ if (cpi->twopass.section_max_qfactor < 0.80)
+ cpi->twopass.section_max_qfactor = 0.80;
//}
//else
- // cpi->section_max_qfactor = 1.0;
+ // cpi->twopass.section_max_qfactor = 1.0;
reset_fpf_position(cpi, start_pos);
}
@@ -1908,22 +1904,22 @@ static void assign_std_frame_bits(VP8_COMP *cpi, FIRSTPASS_STATS *this_frame)
int max_bits = frame_max_bits(cpi); // Max for a single frame
// The final few frames have special treatment
- if (cpi->frames_till_gf_update_due >= (int)(cpi->total_stats->count - cpi->common.current_video_frame))
+ if (cpi->frames_till_gf_update_due >= (int)(cpi->twopass.total_stats->count - cpi->common.current_video_frame))
{
- cpi->gf_group_bits = (cpi->bits_left > 0) ? cpi->bits_left : 0;;
+ cpi->twopass.gf_group_bits = (cpi->twopass.bits_left > 0) ? cpi->twopass.bits_left : 0;;
}
// Calculate modified prediction error used in bit allocation
modified_err = calculate_modified_err(cpi, this_frame);
- if (cpi->gf_group_error_left > 0)
- err_fraction = modified_err / cpi->gf_group_error_left; // What portion of the remaining GF group error is used by this frame
+ if (cpi->twopass.gf_group_error_left > 0)
+ err_fraction = modified_err / cpi->twopass.gf_group_error_left; // What portion of the remaining GF group error is used by this frame
else
err_fraction = 0.0;
- target_frame_size = (int)((double)cpi->gf_group_bits * err_fraction); // How many of those bits available for allocation should we give it?
+ target_frame_size = (int)((double)cpi->twopass.gf_group_bits * err_fraction); // How many of those bits available for allocation should we give it?
- // Clip to target size to 0 - max_bits (or cpi->gf_group_bits) at the top end.
+ // Clip to target size to 0 - max_bits (or cpi->twopass.gf_group_bits) at the top end.
if (target_frame_size < 0)
target_frame_size = 0;
else
@@ -1931,21 +1927,21 @@ static void assign_std_frame_bits(VP8_COMP *cpi, FIRSTPASS_STATS *this_frame)
if (target_frame_size > max_bits)
target_frame_size = max_bits;
- if (target_frame_size > cpi->gf_group_bits)
- target_frame_size = cpi->gf_group_bits;
+ if (target_frame_size > cpi->twopass.gf_group_bits)
+ target_frame_size = cpi->twopass.gf_group_bits;
}
- cpi->gf_group_error_left -= modified_err; // Adjust error remaining
- cpi->gf_group_bits -= target_frame_size; // Adjust bits remaining
+ cpi->twopass.gf_group_error_left -= modified_err; // Adjust error remaining
+ cpi->twopass.gf_group_bits -= target_frame_size; // Adjust bits remaining
- if (cpi->gf_group_bits < 0)
- cpi->gf_group_bits = 0;
+ if (cpi->twopass.gf_group_bits < 0)
+ cpi->twopass.gf_group_bits = 0;
target_frame_size += cpi->min_frame_bandwidth; // Add in the minimum number of bits that is set aside for every frame.
// Special case for the frame that lies half way between two gfs
if (cpi->common.frames_since_golden == cpi->baseline_gf_interval / 2)
- target_frame_size += cpi->mid_gf_extra_bits;
+ target_frame_size += cpi->twopass.mid_gf_extra_bits;
cpi->per_frame_bandwidth = target_frame_size; // Per frame bit target for this frame
}
@@ -1953,7 +1949,7 @@ static void assign_std_frame_bits(VP8_COMP *cpi, FIRSTPASS_STATS *this_frame)
void vp8_second_pass(VP8_COMP *cpi)
{
int tmp_q;
- int frames_left = (int)(cpi->total_stats->count - cpi->common.current_video_frame);
+ int frames_left = (int)(cpi->twopass.total_stats->count - cpi->common.current_video_frame);
FIRSTPASS_STATS this_frame;
FIRSTPASS_STATS this_frame_copy;
@@ -1964,7 +1960,7 @@ void vp8_second_pass(VP8_COMP *cpi)
FIRSTPASS_STATS *start_pos;
- if (!cpi->stats_in)
+ if (!cpi->twopass.stats_in)
{
return ;
}
@@ -1978,17 +1974,10 @@ void vp8_second_pass(VP8_COMP *cpi)
this_frame_intra_error = this_frame.intra_error;
this_frame_coded_error = this_frame.coded_error;
- // Store information regarding level of motion etc for use mode decisions.
- cpi->motion_speed = (int)(fabs(this_frame.MVr) + fabs(this_frame.MVc));
- cpi->motion_var = (int)(fabs(this_frame.MVrv) + fabs(this_frame.MVcv));
- cpi->inter_lvl = (int)(this_frame.pcnt_inter * 100);
- cpi->intra_lvl = (int)((1.0 - this_frame.pcnt_inter) * 100);
- cpi->motion_lvl = (int)(this_frame.pcnt_motion * 100);
-
- start_pos = cpi->stats_in;
+ start_pos = cpi->twopass.stats_in;
// keyframe and section processing !
- if (cpi->frames_to_key == 0)
+ if (cpi->twopass.frames_to_key == 0)
{
// Define next KF group and assign bits to it
vpx_memcpy(&this_frame_copy, &this_frame, sizeof(this_frame));
@@ -1999,9 +1988,9 @@ void vp8_second_pass(VP8_COMP *cpi)
// This is temporary code till we decide what should really happen in this case.
if (cpi->oxcf.error_resilient_mode)
{
- cpi->gf_group_bits = cpi->kf_group_bits;
- cpi->gf_group_error_left = cpi->kf_group_error_left;
- cpi->baseline_gf_interval = cpi->frames_to_key;
+ cpi->twopass.gf_group_bits = cpi->twopass.kf_group_bits;
+ cpi->twopass.gf_group_error_left = cpi->twopass.kf_group_error_left;
+ cpi->baseline_gf_interval = cpi->twopass.frames_to_key;
cpi->frames_till_gf_update_due = cpi->baseline_gf_interval;
cpi->source_alt_ref_pending = FALSE;
}
@@ -2011,16 +2000,6 @@ void vp8_second_pass(VP8_COMP *cpi)
// Is this a GF / ARF (Note that a KF is always also a GF)
if (cpi->frames_till_gf_update_due == 0)
{
- // Update monitor of the bits per error observed so far.
- // Done once per gf group based on what has gone before
- // so do nothing if this is the first frame.
- if (cpi->common.current_video_frame > 0)
- {
- cpi->observed_bpe =
- (double)(cpi->clip_bits_total - cpi->bits_left) /
- cpi->modified_error_used;
- }
-
// Define next gf group and assign bits to it
vpx_memcpy(&this_frame_copy, &this_frame, sizeof(this_frame));
define_gf_group(cpi, &this_frame_copy);
@@ -2043,8 +2022,8 @@ void vp8_second_pass(VP8_COMP *cpi)
pct_extra = (pct_extra > 20) ? 20 : pct_extra;
- extra_bits = (cpi->gf_group_bits * pct_extra) / 100;
- cpi->gf_group_bits -= extra_bits;
+ extra_bits = (cpi->twopass.gf_group_bits * pct_extra) / 100;
+ cpi->twopass.gf_group_bits -= extra_bits;
cpi->per_frame_bandwidth += extra_bits;
}
}
@@ -2058,7 +2037,7 @@ void vp8_second_pass(VP8_COMP *cpi)
// This is temporary code till we decide what should really happen in this case.
if (cpi->oxcf.error_resilient_mode)
{
- cpi->frames_till_gf_update_due = cpi->frames_to_key;
+ cpi->frames_till_gf_update_due = cpi->twopass.frames_to_key;
if (cpi->common.frame_type != KEY_FRAME)
{
@@ -2076,13 +2055,13 @@ void vp8_second_pass(VP8_COMP *cpi)
}
// Keep a globally available copy of this and the next frame's iiratio.
- cpi->this_iiratio = this_frame_intra_error /
+ cpi->twopass.this_iiratio = this_frame_intra_error /
DOUBLE_DIVIDE_CHECK(this_frame_coded_error);
{
FIRSTPASS_STATS next_frame;
if ( lookup_next_frame_stats(cpi, &next_frame) != EOF )
{
- cpi->next_iiratio = next_frame.intra_error /
+ cpi->twopass.next_iiratio = next_frame.intra_error /
DOUBLE_DIVIDE_CHECK(next_frame.coded_error);
}
}
@@ -2094,7 +2073,7 @@ void vp8_second_pass(VP8_COMP *cpi)
if (cpi->common.current_video_frame == 0)
{
- cpi->est_max_qcorrection_factor = 1.0;
+ cpi->twopass.est_max_qcorrection_factor = 1.0;
// Experimental code to try and set a cq_level in constrained
// quality mode.
@@ -2104,8 +2083,8 @@ void vp8_second_pass(VP8_COMP *cpi)
est_cq =
estimate_cq( cpi,
- (cpi->total_coded_error_left / frames_left),
- (int)(cpi->bits_left / frames_left));
+ (cpi->twopass.total_coded_error_left / frames_left),
+ (int)(cpi->twopass.bits_left / frames_left));
cpi->cq_target_quality = cpi->oxcf.cq_level;
if ( est_cq > cpi->cq_target_quality )
@@ -2113,20 +2092,20 @@ void vp8_second_pass(VP8_COMP *cpi)
}
// guess at maxq needed in 2nd pass
- cpi->maxq_max_limit = cpi->worst_quality;
- cpi->maxq_min_limit = cpi->best_quality;
+ cpi->twopass.maxq_max_limit = cpi->worst_quality;
+ cpi->twopass.maxq_min_limit = cpi->best_quality;
tmp_q = estimate_max_q( cpi,
- (cpi->total_coded_error_left / frames_left),
- (int)(cpi->bits_left / frames_left));
+ (cpi->twopass.total_coded_error_left / frames_left),
+ (int)(cpi->twopass.bits_left / frames_left));
// Limit the maxq value returned subsequently.
// This increases the risk of overspend or underspend if the initial
// estimate for the clip is bad, but helps prevent excessive
// variation in Q, especially near the end of a clip
// where for example a small overspend may cause Q to crash
- cpi->maxq_max_limit = ((tmp_q + 32) < cpi->worst_quality)
+ cpi->twopass.maxq_max_limit = ((tmp_q + 32) < cpi->worst_quality)
? (tmp_q + 32) : cpi->worst_quality;
- cpi->maxq_min_limit = ((tmp_q - 32) > cpi->best_quality)
+ cpi->twopass.maxq_min_limit = ((tmp_q - 32) > cpi->best_quality)
? (tmp_q - 32) : cpi->best_quality;
cpi->active_worst_quality = tmp_q;
@@ -2138,14 +2117,14 @@ void vp8_second_pass(VP8_COMP *cpi)
// radical adjustments to the allowed quantizer range just to use up a
// few surplus bits or get beneath the target rate.
else if ( (cpi->common.current_video_frame <
- (((unsigned int)cpi->total_stats->count * 255)>>8)) &&
+ (((unsigned int)cpi->twopass.total_stats->count * 255)>>8)) &&
((cpi->common.current_video_frame + cpi->baseline_gf_interval) <
- (unsigned int)cpi->total_stats->count) )
+ (unsigned int)cpi->twopass.total_stats->count) )
{
if (frames_left < 1)
frames_left = 1;
- tmp_q = estimate_max_q(cpi, (cpi->total_coded_error_left / frames_left), (int)(cpi->bits_left / frames_left));
+ tmp_q = estimate_max_q(cpi, (cpi->twopass.total_coded_error_left / frames_left), (int)(cpi->twopass.bits_left / frames_left));
// Move active_worst_quality but in a damped way
if (tmp_q > cpi->active_worst_quality)
@@ -2156,10 +2135,10 @@ void vp8_second_pass(VP8_COMP *cpi)
cpi->active_worst_quality = ((cpi->active_worst_quality * 3) + tmp_q + 2) / 4;
}
- cpi->frames_to_key --;
- cpi->total_error_left -= this_frame_error;
- cpi->total_intra_error_left -= this_frame_intra_error;
- cpi->total_coded_error_left -= this_frame_coded_error;
+ cpi->twopass.frames_to_key --;
+ cpi->twopass.total_error_left -= this_frame_error;
+ cpi->twopass.total_intra_error_left -= this_frame_intra_error;
+ cpi->twopass.total_coded_error_left -= this_frame_coded_error;
}
@@ -2196,7 +2175,7 @@ static BOOL test_candidate_kf(VP8_COMP *cpi, FIRSTPASS_STATS *last_frame, FIRST
vpx_memcpy(&local_next_frame, next_frame, sizeof(*next_frame));
// Note the starting file position so we can reset to it
- start_pos = cpi->stats_in;
+ start_pos = cpi->twopass.stats_in;
// Examine how well the key frame predicts subsequent frames
for (i = 0 ; i < 16; i++)
@@ -2273,7 +2252,7 @@ static void find_next_key_frame(VP8_COMP *cpi, FIRSTPASS_STATS *this_frame)
vpx_memset(&next_frame, 0, sizeof(next_frame)); // assure clean
vp8_clear_system_state(); //__asm emms;
- start_position = cpi->stats_in;
+ start_position = cpi->twopass.stats_in;
cpi->common.frame_type = KEY_FRAME;
@@ -2286,19 +2265,19 @@ static void find_next_key_frame(VP8_COMP *cpi, FIRSTPASS_STATS *this_frame)
// Kf is always a gf so clear frames till next gf counter
cpi->frames_till_gf_update_due = 0;
- cpi->frames_to_key = 1;
+ cpi->twopass.frames_to_key = 1;
// Take a copy of the initial frame details
vpx_memcpy(&first_frame, this_frame, sizeof(*this_frame));
- cpi->kf_group_bits = 0; // Total bits avaialable to kf group
- cpi->kf_group_error_left = 0; // Group modified error score.
+ cpi->twopass.kf_group_bits = 0; // Total bits avaialable to kf group
+ cpi->twopass.kf_group_error_left = 0; // Group modified error score.
kf_mod_err = calculate_modified_err(cpi, this_frame);
// find the next keyframe
i = 0;
- while (cpi->stats_in < cpi->stats_in_end)
+ while (cpi->twopass.stats_in < cpi->twopass.stats_in_end)
{
// Accumulate kf group error
kf_group_err += calculate_modified_err(cpi, this_frame);
@@ -2345,14 +2324,14 @@ static void find_next_key_frame(VP8_COMP *cpi, FIRSTPASS_STATS *this_frame)
// Step on to the next frame
- cpi->frames_to_key ++;
+ cpi->twopass.frames_to_key ++;
// If we don't have a real key frame within the next two
// forcekeyframeevery intervals then break out of the loop.
- if (cpi->frames_to_key >= 2 *(int)cpi->key_frame_frequency)
+ if (cpi->twopass.frames_to_key >= 2 *(int)cpi->key_frame_frequency)
break;
} else
- cpi->frames_to_key ++;
+ cpi->twopass.frames_to_key ++;
i++;
}
@@ -2362,12 +2341,12 @@ static void find_next_key_frame(VP8_COMP *cpi, FIRSTPASS_STATS *this_frame)
// This code centers the extra kf if the actual natural
// interval is between 1x and 2x
if (cpi->oxcf.auto_key
- && cpi->frames_to_key > (int)cpi->key_frame_frequency )
+ && cpi->twopass.frames_to_key > (int)cpi->key_frame_frequency )
{
- FIRSTPASS_STATS *current_pos = cpi->stats_in;
+ FIRSTPASS_STATS *current_pos = cpi->twopass.stats_in;
FIRSTPASS_STATS tmp_frame;
- cpi->frames_to_key /= 2;
+ cpi->twopass.frames_to_key /= 2;
// Copy first frame details
vpx_memcpy(&tmp_frame, &first_frame, sizeof(first_frame));
@@ -2380,7 +2359,7 @@ static void find_next_key_frame(VP8_COMP *cpi, FIRSTPASS_STATS *this_frame)
kf_group_coded_err = 0;
// Rescan to get the correct error data for the forced kf group
- for( i = 0; i < cpi->frames_to_key; i++ )
+ for( i = 0; i < cpi->twopass.frames_to_key; i++ )
{
// Accumulate kf group errors
kf_group_err += calculate_modified_err(cpi, &tmp_frame);
@@ -2400,7 +2379,7 @@ static void find_next_key_frame(VP8_COMP *cpi, FIRSTPASS_STATS *this_frame)
cpi->next_key_frame_forced = FALSE;
// Special case for the last frame of the file
- if (cpi->stats_in >= cpi->stats_in_end)
+ if (cpi->twopass.stats_in >= cpi->twopass.stats_in_end)
{
// Accumulate kf group error
kf_group_err += calculate_modified_err(cpi, this_frame);
@@ -2412,7 +2391,7 @@ static void find_next_key_frame(VP8_COMP *cpi, FIRSTPASS_STATS *this_frame)
}
// Calculate the number of bits that should be assigned to the kf group.
- if ((cpi->bits_left > 0) && (cpi->modified_error_left > 0.0))
+ if ((cpi->twopass.bits_left > 0) && (cpi->twopass.modified_error_left > 0.0))
{
// Max for a single normal frame (not key frame)
int max_bits = frame_max_bits(cpi);
@@ -2422,14 +2401,14 @@ static void find_next_key_frame(VP8_COMP *cpi, FIRSTPASS_STATS *this_frame)
// Default allocation based on bits left and relative
// complexity of the section
- cpi->kf_group_bits = (long long)( cpi->bits_left *
+ cpi->twopass.kf_group_bits = (long long)( cpi->twopass.bits_left *
( kf_group_err /
- cpi->modified_error_left ));
+ cpi->twopass.modified_error_left ));
// Clip based on maximum per frame rate defined by the user.
- max_grp_bits = (long long)max_bits * (long long)cpi->frames_to_key;
- if (cpi->kf_group_bits > max_grp_bits)
- cpi->kf_group_bits = max_grp_bits;
+ max_grp_bits = (long long)max_bits * (long long)cpi->twopass.frames_to_key;
+ if (cpi->twopass.kf_group_bits > max_grp_bits)
+ cpi->twopass.kf_group_bits = max_grp_bits;
// Additional special case for CBR if buffer is getting full.
if (cpi->oxcf.end_usage == USAGE_STREAM_FROM_SERVER)
@@ -2448,7 +2427,7 @@ static void find_next_key_frame(VP8_COMP *cpi, FIRSTPASS_STATS *this_frame)
// Av bits per frame * number of frames
av_group_bits = (long long)cpi->av_per_frame_bandwidth *
- (long long)cpi->frames_to_key;
+ (long long)cpi->twopass.frames_to_key;
// We are at or above the maximum.
if (cpi->buffer_level >= high_water_mark)
@@ -2459,16 +2438,16 @@ static void find_next_key_frame(VP8_COMP *cpi, FIRSTPASS_STATS *this_frame)
(long long)(buffer_lvl -
high_water_mark);
- if (cpi->kf_group_bits < min_group_bits)
- cpi->kf_group_bits = min_group_bits;
+ if (cpi->twopass.kf_group_bits < min_group_bits)
+ cpi->twopass.kf_group_bits = min_group_bits;
}
// We are above optimal but below the maximum
- else if (cpi->kf_group_bits < av_group_bits)
+ else if (cpi->twopass.kf_group_bits < av_group_bits)
{
long long bits_below_av = av_group_bits -
- cpi->kf_group_bits;
+ cpi->twopass.kf_group_bits;
- cpi->kf_group_bits +=
+ cpi->twopass.kf_group_bits +=
(long long)((double)bits_below_av *
(double)(buffer_lvl - opt_buffer_lvl) /
(double)(high_water_mark - opt_buffer_lvl));
@@ -2477,7 +2456,7 @@ static void find_next_key_frame(VP8_COMP *cpi, FIRSTPASS_STATS *this_frame)
}
}
else
- cpi->kf_group_bits = 0;
+ cpi->twopass.kf_group_bits = 0;
// Reset the first pass file position
reset_fpf_position(cpi, start_position);
@@ -2487,18 +2466,18 @@ static void find_next_key_frame(VP8_COMP *cpi, FIRSTPASS_STATS *this_frame)
boost_score = 0.0;
loop_decay_rate = 1.00; // Starting decay rate
- for (i = 0 ; i < cpi->frames_to_key ; i++)
+ for (i = 0 ; i < cpi->twopass.frames_to_key ; i++)
{
double r;
if (EOF == input_stats(cpi, &next_frame))
break;
- if (next_frame.intra_error > cpi->kf_intra_err_min)
+ if (next_frame.intra_error > cpi->twopass.kf_intra_err_min)
r = (IIKFACTOR2 * next_frame.intra_error /
DOUBLE_DIVIDE_CHECK(next_frame.coded_error));
else
- r = (IIKFACTOR2 * cpi->kf_intra_err_min /
+ r = (IIKFACTOR2 * cpi->twopass.kf_intra_err_min /
DOUBLE_DIVIDE_CHECK(next_frame.coded_error));
if (r > RMAX)
@@ -2529,7 +2508,7 @@ static void find_next_key_frame(VP8_COMP *cpi, FIRSTPASS_STATS *this_frame)
zero_stats(&sectionstats);
reset_fpf_position(cpi, start_position);
- for (i = 0 ; i < cpi->frames_to_key ; i++)
+ for (i = 0 ; i < cpi->twopass.frames_to_key ; i++)
{
input_stats(cpi, &next_frame);
accumulate_stats(&sectionstats, &next_frame);
@@ -2537,19 +2516,21 @@ static void find_next_key_frame(VP8_COMP *cpi, FIRSTPASS_STATS *this_frame)
avg_stats(&sectionstats);
- cpi->section_intra_rating = sectionstats.intra_error / DOUBLE_DIVIDE_CHECK(sectionstats.coded_error);
+ cpi->twopass.section_intra_rating =
+ sectionstats.intra_error
+ / DOUBLE_DIVIDE_CHECK(sectionstats.coded_error);
Ratio = sectionstats.intra_error / DOUBLE_DIVIDE_CHECK(sectionstats.coded_error);
// if( (Ratio > 11) ) //&& (sectionstats.pcnt_second_ref < .20) )
//{
- cpi->section_max_qfactor = 1.0 - ((Ratio - 10.0) * 0.025);
+ cpi->twopass.section_max_qfactor = 1.0 - ((Ratio - 10.0) * 0.025);
- if (cpi->section_max_qfactor < 0.80)
- cpi->section_max_qfactor = 0.80;
+ if (cpi->twopass.section_max_qfactor < 0.80)
+ cpi->twopass.section_max_qfactor = 0.80;
//}
//else
- // cpi->section_max_qfactor = 1.0;
+ // cpi->twopass.section_max_qfactor = 1.0;
}
// When using CBR apply additional buffer fullness related upper limits
@@ -2587,7 +2568,7 @@ static void find_next_key_frame(VP8_COMP *cpi, FIRSTPASS_STATS *this_frame)
{
int kf_boost = boost_score;
int allocation_chunks;
- int Counter = cpi->frames_to_key;
+ int Counter = cpi->twopass.frames_to_key;
int alt_kf_bits;
YV12_BUFFER_CONFIG *lst_yv12 = &cpi->common.yv12_fb[cpi->common.lst_fb_idx];
// Min boost based on kf interval
@@ -2627,7 +2608,7 @@ static void find_next_key_frame(VP8_COMP *cpi, FIRSTPASS_STATS *this_frame)
// The second (optionaly) on the key frames own error if this is smaller than the average for the group.
// The final one insures that the frame receives at least the allocation it would have received based on its own error score vs the error score remaining
- allocation_chunks = ((cpi->frames_to_key - 1) * 100) + kf_boost; // cpi->frames_to_key-1 because key frame itself is taken care of by kf_boost
+ allocation_chunks = ((cpi->twopass.frames_to_key - 1) * 100) + kf_boost; // cpi->twopass.frames_to_key-1 because key frame itself is taken care of by kf_boost
// Normalize Altboost and allocations chunck down to prevent overflow
while (kf_boost > 1000)
@@ -2636,35 +2617,35 @@ static void find_next_key_frame(VP8_COMP *cpi, FIRSTPASS_STATS *this_frame)
allocation_chunks /= 2;
}
- cpi->kf_group_bits = (cpi->kf_group_bits < 0) ? 0 : cpi->kf_group_bits;
+ cpi->twopass.kf_group_bits = (cpi->twopass.kf_group_bits < 0) ? 0 : cpi->twopass.kf_group_bits;
// Calculate the number of bits to be spent on the key frame
- cpi->kf_bits = (int)((double)kf_boost * ((double)cpi->kf_group_bits / (double)allocation_chunks));
+ cpi->twopass.kf_bits = (int)((double)kf_boost * ((double)cpi->twopass.kf_group_bits / (double)allocation_chunks));
// Apply an additional limit for CBR
if (cpi->oxcf.end_usage == USAGE_STREAM_FROM_SERVER)
{
- if (cpi->kf_bits > ((3 * cpi->buffer_level) >> 2))
- cpi->kf_bits = (3 * cpi->buffer_level) >> 2;
+ if (cpi->twopass.kf_bits > ((3 * cpi->buffer_level) >> 2))
+ cpi->twopass.kf_bits = (3 * cpi->buffer_level) >> 2;
}
// If the key frame is actually easier than the average for the
// kf group (which does sometimes happen... eg a blank intro frame)
// Then use an alternate calculation based on the kf error score
// which should give a smaller key frame.
- if (kf_mod_err < kf_group_err / cpi->frames_to_key)
+ if (kf_mod_err < kf_group_err / cpi->twopass.frames_to_key)
{
double alt_kf_grp_bits =
- ((double)cpi->bits_left *
- (kf_mod_err * (double)cpi->frames_to_key) /
- DOUBLE_DIVIDE_CHECK(cpi->modified_error_left));
+ ((double)cpi->twopass.bits_left *
+ (kf_mod_err * (double)cpi->twopass.frames_to_key) /
+ DOUBLE_DIVIDE_CHECK(cpi->twopass.modified_error_left));
alt_kf_bits = (int)((double)kf_boost *
(alt_kf_grp_bits / (double)allocation_chunks));
- if (cpi->kf_bits > alt_kf_bits)
+ if (cpi->twopass.kf_bits > alt_kf_bits)
{
- cpi->kf_bits = alt_kf_bits;
+ cpi->twopass.kf_bits = alt_kf_bits;
}
}
// Else if it is much harder than other frames in the group make sure
@@ -2673,29 +2654,29 @@ static void find_next_key_frame(VP8_COMP *cpi, FIRSTPASS_STATS *this_frame)
else
{
alt_kf_bits =
- (int)((double)cpi->bits_left *
+ (int)((double)cpi->twopass.bits_left *
(kf_mod_err /
- DOUBLE_DIVIDE_CHECK(cpi->modified_error_left)));
+ DOUBLE_DIVIDE_CHECK(cpi->twopass.modified_error_left)));
- if (alt_kf_bits > cpi->kf_bits)
+ if (alt_kf_bits > cpi->twopass.kf_bits)
{
- cpi->kf_bits = alt_kf_bits;
+ cpi->twopass.kf_bits = alt_kf_bits;
}
}
- cpi->kf_group_bits -= cpi->kf_bits;
- cpi->kf_bits += cpi->min_frame_bandwidth; // Add in the minimum frame allowance
+ cpi->twopass.kf_group_bits -= cpi->twopass.kf_bits;
+ cpi->twopass.kf_bits += cpi->min_frame_bandwidth; // Add in the minimum frame allowance
- cpi->per_frame_bandwidth = cpi->kf_bits; // Peer frame bit target for this frame
- cpi->target_bandwidth = cpi->kf_bits * cpi->output_frame_rate; // Convert to a per second bitrate
+ cpi->per_frame_bandwidth = cpi->twopass.kf_bits; // Peer frame bit target for this frame
+ cpi->target_bandwidth = cpi->twopass.kf_bits * cpi->output_frame_rate; // Convert to a per second bitrate
}
// Note the total error score of the kf group minus the key frame itself
- cpi->kf_group_error_left = (int)(kf_group_err - kf_mod_err);
+ cpi->twopass.kf_group_error_left = (int)(kf_group_err - kf_mod_err);
// Adjust the count of total modified error left.
// The count of bits left is adjusted elsewhere based on real coded frame sizes
- cpi->modified_error_left -= kf_group_err;
+ cpi->twopass.modified_error_left -= kf_group_err;
if (cpi->oxcf.allow_spatial_resampling)
{
@@ -2712,7 +2693,7 @@ static void find_next_key_frame(VP8_COMP *cpi, FIRSTPASS_STATS *this_frame)
double projected_bits_perframe;
double group_iiratio = (kf_group_intra_err - first_frame.intra_error) / (kf_group_coded_err - first_frame.coded_error);
- double err_per_frame = kf_group_err / cpi->frames_to_key;
+ double err_per_frame = kf_group_err / cpi->twopass.frames_to_key;
double bits_per_frame;
double av_bits_per_frame;
double effective_size_ratio;
@@ -2725,7 +2706,7 @@ static void find_next_key_frame(VP8_COMP *cpi, FIRSTPASS_STATS *this_frame)
cpi->common.vert_scale = NORMAL;
// Calculate Average bits per frame.
- //av_bits_per_frame = cpi->bits_left/(double)(cpi->total_stats->count - cpi->common.current_video_frame);
+ //av_bits_per_frame = cpi->twopass.bits_left/(double)(cpi->twopass.total_stats->count - cpi->common.current_video_frame);
av_bits_per_frame = cpi->oxcf.target_bandwidth / DOUBLE_DIVIDE_CHECK((double)cpi->oxcf.frame_rate);
//if ( av_bits_per_frame < 0.0 )
// av_bits_per_frame = 0.0
@@ -2740,7 +2721,7 @@ static void find_next_key_frame(VP8_COMP *cpi, FIRSTPASS_STATS *this_frame)
// So use the larger of target bitrate for this sectoion or average bitrate for sequence
else
{
- bits_per_frame = cpi->kf_group_bits / cpi->frames_to_key; // This accounts for how hard the section is...
+ bits_per_frame = cpi->twopass.kf_group_bits / cpi->twopass.frames_to_key; // This accounts for how hard the section is...
if (bits_per_frame < av_bits_per_frame) // Dont turn to resampling in easy sections just because they have been assigned a small number of bits
bits_per_frame = av_bits_per_frame;
@@ -2764,12 +2745,12 @@ static void find_next_key_frame(VP8_COMP *cpi, FIRSTPASS_STATS *this_frame)
}
// Guess at buffer level at the end of the section
- projected_buffer_level = cpi->buffer_level - (int)((projected_bits_perframe - av_bits_per_frame) * cpi->frames_to_key);
+ projected_buffer_level = cpi->buffer_level - (int)((projected_bits_perframe - av_bits_per_frame) * cpi->twopass.frames_to_key);
if (0)
{
FILE *f = fopen("Subsamle.stt", "a");
- fprintf(f, " %8d %8d %8d %8d %12.0f %8d %8d %8d\n", cpi->common.current_video_frame, kf_q, cpi->common.horiz_scale, cpi->common.vert_scale, kf_group_err / cpi->frames_to_key, (int)(cpi->kf_group_bits / cpi->frames_to_key), new_height, new_width);
+ fprintf(f, " %8d %8d %8d %8d %12.0f %8d %8d %8d\n", cpi->common.current_video_frame, kf_q, cpi->common.horiz_scale, cpi->common.vert_scale, kf_group_err / cpi->twopass.frames_to_key, (int)(cpi->twopass.kf_group_bits / cpi->twopass.frames_to_key), new_height, new_width);
fclose(f);
}
@@ -2788,7 +2769,7 @@ static void find_next_key_frame(VP8_COMP *cpi, FIRSTPASS_STATS *this_frame)
}
else
{
- long long clip_bits = (long long)(cpi->total_stats->count * cpi->oxcf.target_bandwidth / DOUBLE_DIVIDE_CHECK((double)cpi->oxcf.frame_rate));
+ long long clip_bits = (long long)(cpi->twopass.total_stats->count * cpi->oxcf.target_bandwidth / DOUBLE_DIVIDE_CHECK((double)cpi->oxcf.frame_rate));
long long over_spend = cpi->oxcf.starting_buffer_level - cpi->buffer_level;
if ((last_kf_resampled && (kf_q > cpi->worst_quality)) || // If triggered last time the threshold for triggering again is reduced
@@ -2826,7 +2807,7 @@ static void find_next_key_frame(VP8_COMP *cpi, FIRSTPASS_STATS *this_frame)
if (0)
{
FILE *f = fopen("Subsamle.stt", "a");
- fprintf(f, "******** %8d %8d %8d %12.0f %8d %8d %8d\n", kf_q, cpi->common.horiz_scale, cpi->common.vert_scale, kf_group_err / cpi->frames_to_key, (int)(cpi->kf_group_bits / cpi->frames_to_key), new_height, new_width);
+ fprintf(f, "******** %8d %8d %8d %12.0f %8d %8d %8d\n", kf_q, cpi->common.horiz_scale, cpi->common.vert_scale, kf_group_err / cpi->twopass.frames_to_key, (int)(cpi->twopass.kf_group_bits / cpi->twopass.frames_to_key), new_height, new_width);
fclose(f);
}
}
diff --git a/vp8/encoder/generic/csystemdependent.c b/vp8/encoder/generic/csystemdependent.c
index 58f810d5f..928f559f3 100644
--- a/vp8/encoder/generic/csystemdependent.c
+++ b/vp8/encoder/generic/csystemdependent.c
@@ -100,7 +100,7 @@ void vp8_cmachine_specific_config(VP8_COMP *cpi)
// Pure C:
vp8_yv12_copy_partial_frame_ptr = vp8_yv12_copy_partial_frame;
-#if CONFIG_PSNR
+#if CONFIG_INTERNAL_STATS
cpi->rtcd.variance.ssimpf_8x8 = ssim_parms_8x8_c;
cpi->rtcd.variance.ssimpf = ssim_parms_c;
#endif
diff --git a/vp8/encoder/onyx_if.c b/vp8/encoder/onyx_if.c
index 2a7723581..15d92899e 100644
--- a/vp8/encoder/onyx_if.c
+++ b/vp8/encoder/onyx_if.c
@@ -75,7 +75,7 @@ static void set_default_lf_deltas(VP8_COMP *cpi);
extern const int vp8_gf_interval_table[101];
-#if CONFIG_PSNR
+#if CONFIG_INTERNAL_STATS
#include "math.h"
extern double vp8_calc_ssim
@@ -306,11 +306,11 @@ static void dealloc_compressor_data(VP8_COMP *cpi)
cpi->mb.pip = 0;
#if !(CONFIG_REALTIME_ONLY)
- vpx_free(cpi->total_stats);
- cpi->total_stats = 0;
+ vpx_free(cpi->twopass.total_stats);
+ cpi->twopass.total_stats = 0;
- vpx_free(cpi->this_frame_stats);
- cpi->this_frame_stats = 0;
+ vpx_free(cpi->twopass.this_frame_stats);
+ cpi->twopass.this_frame_stats = 0;
#endif
}
@@ -1232,7 +1232,7 @@ void vp8_set_speed_features(VP8_COMP *cpi)
cpi->find_fractional_mv_step = vp8_skip_fractional_mv_step;
}
- if (cpi->sf.optimize_coefficients == 1)
+ if (cpi->sf.optimize_coefficients == 1 && cpi->pass!=1)
cpi->mb.optimize = 1;
else
cpi->mb.optimize = 0;
@@ -1344,15 +1344,15 @@ void vp8_alloc_compressor_data(VP8_COMP *cpi)
cm->mb_rows * cm->mb_cols));
#if !(CONFIG_REALTIME_ONLY)
- vpx_free(cpi->total_stats);
+ vpx_free(cpi->twopass.total_stats);
- cpi->total_stats = vpx_calloc(1, sizeof(FIRSTPASS_STATS));
+ cpi->twopass.total_stats = vpx_calloc(1, sizeof(FIRSTPASS_STATS));
- vpx_free(cpi->this_frame_stats);
+ vpx_free(cpi->twopass.this_frame_stats);
- cpi->this_frame_stats = vpx_calloc(1, sizeof(FIRSTPASS_STATS));
+ cpi->twopass.this_frame_stats = vpx_calloc(1, sizeof(FIRSTPASS_STATS));
- if(!cpi->total_stats || !cpi->this_frame_stats)
+ if(!cpi->twopass.total_stats || !cpi->twopass.this_frame_stats)
vpx_internal_error(&cpi->common.error, VPX_CODEC_MEM_ERROR,
"Failed to allocate firstpass stats");
#endif
@@ -1415,7 +1415,7 @@ void vp8_new_frame_rate(VP8_COMP *cpi, double framerate)
cpi->max_gf_interval = 12;
// Extended interval for genuinely static scenes
- cpi->static_scene_max_gf_interval = cpi->key_frame_frequency >> 1;
+ cpi->twopass.static_scene_max_gf_interval = cpi->key_frame_frequency >> 1;
// Special conditions when altr ref frame enabled in lagged compress mode
if (cpi->oxcf.play_alternate && cpi->oxcf.lag_in_frames)
@@ -1423,12 +1423,12 @@ void vp8_new_frame_rate(VP8_COMP *cpi, double framerate)
if (cpi->max_gf_interval > cpi->oxcf.lag_in_frames - 1)
cpi->max_gf_interval = cpi->oxcf.lag_in_frames - 1;
- if (cpi->static_scene_max_gf_interval > cpi->oxcf.lag_in_frames - 1)
- cpi->static_scene_max_gf_interval = cpi->oxcf.lag_in_frames - 1;
+ if (cpi->twopass.static_scene_max_gf_interval > cpi->oxcf.lag_in_frames - 1)
+ cpi->twopass.static_scene_max_gf_interval = cpi->oxcf.lag_in_frames - 1;
}
- if ( cpi->max_gf_interval > cpi->static_scene_max_gf_interval )
- cpi->max_gf_interval = cpi->static_scene_max_gf_interval;
+ if ( cpi->max_gf_interval > cpi->twopass.static_scene_max_gf_interval )
+ cpi->max_gf_interval = cpi->twopass.static_scene_max_gf_interval;
}
@@ -1843,7 +1843,7 @@ VP8_PTR vp8_create_compressor(VP8_CONFIG *oxcf)
// Set reference frame sign bias for ALTREF frame to 1 (for now)
cpi->common.ref_frame_sign_bias[ALTREF_FRAME] = 1;
- cpi->gf_decay_rate = 0;
+ cpi->twopass.gf_decay_rate = 0;
cpi->baseline_gf_interval = DEFAULT_GF_INTERVAL;
cpi->gold_is_last = 0 ;
@@ -1917,8 +1917,8 @@ VP8_PTR vp8_create_compressor(VP8_CONFIG *oxcf)
cpi->source_alt_ref_active = FALSE;
cpi->common.refresh_alt_ref_frame = 0;
- cpi->b_calculate_psnr = CONFIG_PSNR;
-#if CONFIG_PSNR
+ cpi->b_calculate_psnr = CONFIG_INTERNAL_STATS;
+#if CONFIG_INTERNAL_STATS
cpi->b_calculate_ssimg = 0;
cpi->count = 0;
@@ -1972,7 +1972,7 @@ VP8_PTR vp8_create_compressor(VP8_CONFIG *oxcf)
cpi->rate_correction_factor = 1.0;
cpi->key_frame_rate_correction_factor = 1.0;
cpi->gf_rate_correction_factor = 1.0;
- cpi->est_max_qcorrection_factor = 1.0;
+ cpi->twopass.est_max_qcorrection_factor = 1.0;
cpi->mb.mvcost[0] = &cpi->mb.mvcosts[0][mv_max+1];
cpi->mb.mvcost[1] = &cpi->mb.mvcosts[1][mv_max+1];
@@ -2008,8 +2008,8 @@ VP8_PTR vp8_create_compressor(VP8_CONFIG *oxcf)
size_t packet_sz = sizeof(FIRSTPASS_STATS);
int packets = oxcf->two_pass_stats_in.sz / packet_sz;
- cpi->stats_in = oxcf->two_pass_stats_in.buf;
- cpi->stats_in_end = (void*)((char *)cpi->stats_in
+ cpi->twopass.stats_in = oxcf->two_pass_stats_in.buf;
+ cpi->twopass.stats_in_end = (void*)((char *)cpi->twopass.stats_in
+ (packets - 1) * packet_sz);
vp8_init_second_pass(cpi);
}
@@ -2093,8 +2093,6 @@ VP8_PTR vp8_create_compressor(VP8_CONFIG *oxcf)
cpi->diamond_search_sad = SEARCH_INVOKE(&cpi->rtcd.search, diamond_search);
cpi->refining_search_sad = SEARCH_INVOKE(&cpi->rtcd.search, refining_search);
- cpi->ready_for_new_frame = 1;
-
// make sure frame 1 is okay
cpi->error_bins[0] = cpi->common.MBs;
@@ -2137,7 +2135,7 @@ void vp8_remove_compressor(VP8_PTR *ptr)
print_mode_context();
#endif
-#if CONFIG_PSNR
+#if CONFIG_INTERNAL_STATS
if (cpi->pass != 1)
{
@@ -2653,45 +2651,6 @@ static void resize_key_frame(VP8_COMP *cpi)
}
-static void set_quantizer(VP8_COMP *cpi, int Q)
-{
- VP8_COMMON *cm = &cpi->common;
- MACROBLOCKD *mbd = &cpi->mb.e_mbd;
- int update = 0;
- int new_delta_q;
- cm->base_qindex = Q;
-
- /* if any of the delta_q values are changing update flag has to be set */
- /* currently only y2dc_delta_q may change */
-
- cm->y1dc_delta_q = 0;
- cm->y2ac_delta_q = 0;
- cm->uvdc_delta_q = 0;
- cm->uvac_delta_q = 0;
-
- if (Q < 4)
- {
- new_delta_q = 4-Q;
- }
- else
- new_delta_q = 0;
-
- update |= cm->y2dc_delta_q != new_delta_q;
- cm->y2dc_delta_q = new_delta_q;
-
-
- // Set Segment specific quatizers
- mbd->segment_feature_data[MB_LVL_ALT_Q][0] = cpi->segment_feature_data[MB_LVL_ALT_Q][0];
- mbd->segment_feature_data[MB_LVL_ALT_Q][1] = cpi->segment_feature_data[MB_LVL_ALT_Q][1];
- mbd->segment_feature_data[MB_LVL_ALT_Q][2] = cpi->segment_feature_data[MB_LVL_ALT_Q][2];
- mbd->segment_feature_data[MB_LVL_ALT_Q][3] = cpi->segment_feature_data[MB_LVL_ALT_Q][3];
-
- /* quantizer has to be reinitialized for any delta_q changes */
- if(update)
- vp8cx_init_quantizer(cpi);
-
-}
-
static void update_alt_ref_frame_and_stats(VP8_COMP *cpi)
{
VP8_COMMON *cm = &cpi->common;
@@ -3034,7 +2993,7 @@ static void Pass1Encode(VP8_COMP *cpi, unsigned long *size, unsigned char *dest,
(void) size;
(void) dest;
(void) frame_flags;
- set_quantizer(cpi, 26);
+ vp8_set_quantizer(cpi, 26);
scale_and_extend_source(cpi->un_scaled_source, cpi);
vp8_first_pass(cpi);
@@ -3265,8 +3224,8 @@ static void encode_frame_to_data_rate
{
if (cpi->common.refresh_alt_ref_frame)
{
- cpi->per_frame_bandwidth = cpi->gf_bits; // Per frame bit target for the alt ref frame
- cpi->target_bandwidth = cpi->gf_bits * cpi->output_frame_rate; // per second target bitrate
+ cpi->per_frame_bandwidth = cpi->twopass.gf_bits; // Per frame bit target for the alt ref frame
+ cpi->target_bandwidth = cpi->twopass.gf_bits * cpi->output_frame_rate; // per second target bitrate
}
}
else
@@ -3427,7 +3386,7 @@ static void encode_frame_to_data_rate
cm->current_video_frame++;
cpi->frames_since_key++;
-#if CONFIG_PSNR
+#if CONFIG_INTERNAL_STATS
cpi->count ++;
#endif
@@ -3466,15 +3425,15 @@ static void encode_frame_to_data_rate
buff_lvl_step = (cpi->oxcf.maximum_buffer_size - cpi->oxcf.optimal_buffer_level) / Adjustment;
if (buff_lvl_step)
- {
Adjustment = (cpi->buffer_level - cpi->oxcf.optimal_buffer_level) / buff_lvl_step;
- cpi->active_worst_quality -= Adjustment;
- }
- }
- else
- {
- cpi->active_worst_quality -= Adjustment;
+ else
+ Adjustment = 0;
}
+
+ cpi->active_worst_quality -= Adjustment;
+
+ if(cpi->active_worst_quality < cpi->active_best_quality)
+ cpi->active_worst_quality = cpi->active_best_quality;
}
}
@@ -3694,7 +3653,7 @@ static void encode_frame_to_data_rate
Q = 127;
*/
- set_quantizer(cpi, Q);
+ vp8_set_quantizer(cpi, Q);
this_q = Q;
// setup skip prob for costing in mode/mv decision
@@ -4028,7 +3987,7 @@ static void encode_frame_to_data_rate
{
vp8_restore_coding_context(cpi);
loop_count++;
-#if CONFIG_PSNR
+#if CONFIG_INTERNAL_STATS
cpi->tot_recode_hits++;
#endif
}
@@ -4060,7 +4019,7 @@ static void encode_frame_to_data_rate
// Special case code to reduce pulsing when key frames are forced at a
// fixed interval. Note the reconstruction error if it is the frame before
// the force key frame
- if ( cpi->next_key_frame_forced && (cpi->frames_to_key == 0) )
+ if ( cpi->next_key_frame_forced && (cpi->twopass.frames_to_key == 0) )
{
cpi->ambient_err = vp8_calc_ss_err(cpi->Source,
&cm->yv12_fb[cm->new_fb_idx],
@@ -4266,17 +4225,17 @@ static void encode_frame_to_data_rate
// Update bits left to the kf and gf groups to account for overshoot or undershoot on these frames
if (cm->frame_type == KEY_FRAME)
{
- cpi->kf_group_bits += cpi->this_frame_target - cpi->projected_frame_size;
+ cpi->twopass.kf_group_bits += cpi->this_frame_target - cpi->projected_frame_size;
- if (cpi->kf_group_bits < 0)
- cpi->kf_group_bits = 0 ;
+ if (cpi->twopass.kf_group_bits < 0)
+ cpi->twopass.kf_group_bits = 0 ;
}
else if (cm->refresh_golden_frame || cm->refresh_alt_ref_frame)
{
- cpi->gf_group_bits += cpi->this_frame_target - cpi->projected_frame_size;
+ cpi->twopass.gf_group_bits += cpi->this_frame_target - cpi->projected_frame_size;
- if (cpi->gf_group_bits < 0)
- cpi->gf_group_bits = 0 ;
+ if (cpi->twopass.gf_group_bits < 0)
+ cpi->twopass.gf_group_bits = 0 ;
}
if (cm->frame_type != KEY_FRAME)
@@ -4302,7 +4261,7 @@ static void encode_frame_to_data_rate
}
}
-#if 0 && CONFIG_PSNR
+#if 0 && CONFIG_INTERNAL_STATS
{
FILE *f = fopen("tmp.stt", "a");
@@ -4323,7 +4282,7 @@ static void encode_frame_to_data_rate
//cpi->avg_frame_qindex, cpi->zbin_over_quant,
cm->refresh_golden_frame, cm->refresh_alt_ref_frame,
cm->frame_type, cpi->gfu_boost,
- cpi->est_max_qcorrection_factor, (int)cpi->bits_left,
+ cpi->twopass.est_max_qcorrection_factor, (int)cpi->bits_left,
cpi->total_coded_error_left,
(double)cpi->bits_left / cpi->total_coded_error_left,
cpi->tot_recode_hits);
@@ -4342,7 +4301,7 @@ static void encode_frame_to_data_rate
//cpi->avg_frame_qindex, cpi->zbin_over_quant,
cm->refresh_golden_frame, cm->refresh_alt_ref_frame,
cm->frame_type, cpi->gfu_boost,
- cpi->est_max_qcorrection_factor, (int)cpi->bits_left,
+ cpi->twopass.est_max_qcorrection_factor, (int)cpi->bits_left,
cpi->total_coded_error_left, cpi->tot_recode_hits);
fclose(f);
@@ -4546,13 +4505,13 @@ static void Pass2Encode(VP8_COMP *cpi, unsigned long *size, unsigned char *dest,
vp8_second_pass(cpi);
encode_frame_to_data_rate(cpi, size, dest, frame_flags);
- cpi->bits_left -= 8 * *size;
+ cpi->twopass.bits_left -= 8 * *size;
if (!cpi->common.refresh_alt_ref_frame)
{
double two_pass_min_rate = (double)(cpi->oxcf.target_bandwidth
*cpi->oxcf.two_pass_vbrmin_section / 100);
- cpi->bits_left += (long long)(two_pass_min_rate / cpi->oxcf.frame_rate);
+ cpi->twopass.bits_left += (long long)(two_pass_min_rate / cpi->oxcf.frame_rate);
}
}
#endif
@@ -4686,10 +4645,10 @@ int vp8_get_compressed_data(VP8_PTR ptr, unsigned int *frame_flags, unsigned lon
*size = 0;
#if !(CONFIG_REALTIME_ONLY)
- if (flush && cpi->pass == 1 && !cpi->first_pass_done)
+ if (flush && cpi->pass == 1 && !cpi->twopass.first_pass_done)
{
vp8_end_first_pass(cpi); /* get last stats packet */
- cpi->first_pass_done = 1;
+ cpi->twopass.first_pass_done = 1;
}
#endif
@@ -4857,8 +4816,6 @@ int vp8_get_compressed_data(VP8_PTR ptr, unsigned int *frame_flags, unsigned lon
}
- cpi->ready_for_new_frame = 1;
-
vpx_usec_timer_mark(&cmptimer);
cpi->time_compress_data += vpx_usec_timer_elapsed(&cmptimer);
@@ -4867,7 +4824,7 @@ int vp8_get_compressed_data(VP8_PTR ptr, unsigned int *frame_flags, unsigned lon
generate_psnr_packet(cpi);
}
-#if CONFIG_PSNR
+#if CONFIG_INTERNAL_STATS
if (cpi->pass != 1)
{
diff --git a/vp8/encoder/onyx_int.h b/vp8/encoder/onyx_int.h
index 8b17178b3..663786004 100644
--- a/vp8/encoder/onyx_int.h
+++ b/vp8/encoder/onyx_int.h
@@ -240,7 +240,7 @@ enum
BLOCK_MAX_SEGMENTS
};
-typedef struct
+typedef struct VP8_COMP
{
DECLARE_ALIGNED(16, short, Y1quant[QINDEX_RANGE][16]);
@@ -313,10 +313,6 @@ typedef struct
int rd_thresh_mult[MAX_MODES];
int rd_baseline_thresh[MAX_MODES];
int rd_threshes[MAX_MODES];
- int mvcostbase;
- int mvcostmultiplier;
- int subseqblockweight;
- int errthresh;
int RDMULT;
int RDDIV ;
@@ -329,16 +325,6 @@ typedef struct
long long intra_error;
long long last_intra_error;
-#if 0
- // Experimental RD code
- long long frame_distortion;
- long long last_frame_distortion;
-#endif
-
- int last_mb_distortion;
-
- int frames_since_auto_filter;
-
int this_frame_target;
int projected_frame_size;
int last_q[2]; // Separate values for Intra/Inter
@@ -346,37 +332,18 @@ typedef struct
double rate_correction_factor;
double key_frame_rate_correction_factor;
double gf_rate_correction_factor;
- double est_max_qcorrection_factor;
int frames_till_gf_update_due; // Count down till next GF
int current_gf_interval; // GF interval chosen when we coded the last GF
int gf_overspend_bits; // Total bits overspent becasue of GF boost (cumulative)
- int gf_group_bits; // Projected Bits available for a group of frames including 1 GF or ARF
- int gf_bits; // Bits for the golden frame or ARF - 2 pass only
- int mid_gf_extra_bits; // A few extra bits for the frame half way between two gfs.
-
- // Projected total bits available for a key frame group of frames
- long long kf_group_bits;
-
- // Error score of frames still to be coded in kf group
- long long kf_group_error_left;
-
- // Bits for the key frame in a key frame group - 2 pass only
- int kf_bits;
-
int non_gf_bitrate_adjustment; // Used in the few frames following a GF to recover the extra bits spent in that GF
- int initial_gf_use; // percentage use of gf 2 frames after gf
-
- int gf_group_error_left; // Remaining error from uncoded frames in a gf group. Two pass use only
int kf_overspend_bits; // Extra bits spent on key frames that need to be recovered on inter frames
int kf_bitrate_adjustment; // Current number of bit s to try and recover on each inter frame.
int max_gf_interval;
- int static_scene_max_gf_interval;
int baseline_gf_interval;
- int gf_decay_rate;
int active_arnr_frames; // <= cpi->oxcf.arnr_max_frames
INT64 key_frame_count;
@@ -384,7 +351,6 @@ typedef struct
int per_frame_bandwidth; // Current section per frame bandwidth target
int av_per_frame_bandwidth; // Average frame size target for clip
int min_frame_bandwidth; // Minimum allocation that should be used for any frame
- int last_key_frame_size;
int inter_frame_target;
double output_frame_rate;
long long last_time_stamp_seen;
@@ -422,8 +388,6 @@ typedef struct
int active_best_quality;
int cq_target_quality;
- int maxq_max_limit;
- int maxq_min_limit;
int drop_frames_allowed; // Are we permitted to drop frames?
int drop_frame; // Drop this frame?
@@ -443,39 +407,12 @@ typedef struct
vp8_prob frame_coef_probs [BLOCK_TYPES] [COEF_BANDS] [PREV_COEF_CONTEXTS] [vp8_coef_tokens-1];
unsigned int frame_branch_ct [BLOCK_TYPES] [COEF_BANDS] [PREV_COEF_CONTEXTS] [vp8_coef_tokens-1][2];
- /* Second compressed data partition contains coefficient data. */
-
- unsigned char *output_partition2;
- size_t output_partition2size;
-
- int frames_to_key;
int gfu_boost;
int kf_boost;
int last_boost;
- double total_error_left;
- double total_intra_error_left;
- double total_coded_error_left;
- double start_tot_err_left;
- double min_error;
- double kf_intra_err_min;
- double gf_intra_err_min;
-
- double modified_error_total;
- double modified_error_used;
- double modified_error_left;
- double clip_bpe;
- double observed_bpe;
-
- double avg_iiratio;
int target_bandwidth;
- long long bits_left;
- long long clip_bits_total;
- FIRSTPASS_STATS *total_stats;
- FIRSTPASS_STATS *this_frame_stats;
- FIRSTPASS_STATS *stats_in, *stats_in_end;
struct vpx_codec_pkt_list *output_pkt_list;
- int first_pass_done;
#if 0
// Experimental code for lagged and one pass
@@ -499,7 +436,6 @@ typedef struct
int goldfreq;
int auto_worst_q;
int cpu_used;
- int chroma_boost;
int horiz_scale;
int vert_scale;
int pass;
@@ -517,31 +453,11 @@ typedef struct
int this_frame_percent_intra;
int last_frame_percent_intra;
- int last_key_frame_q;
- int last_kffilt_lvl;
-
int ref_frame_flags;
SPEED_FEATURES sf;
int error_bins[1024];
- int inter_lvl;
- int intra_lvl;
- int motion_lvl;
- int motion_speed;
- int motion_var;
- unsigned int next_iiratio;
- unsigned int this_iiratio;
- int this_frame_modified_error;
-
- double norm_intra_err_per_mb;
- double norm_inter_err_per_mb;
- double norm_iidiff_per_mb;
-
- int last_best_mode_index; // Record of mode index chosen for previous macro block.
- int last_auto_filt_val;
- int last_auto_filt_q;
-
// Data used for real time conferencing mode to help determine if it would be good to update the gf
int inter_zz_count;
int gf_bad_count;
@@ -549,10 +465,6 @@ typedef struct
int skip_true_count;
int skip_false_count;
- int alt_qcount;
-
- int ready_for_new_frame;
-
unsigned char *segmentation_map;
signed char segment_feature_data[MB_LVL_MAX][MAX_MB_SEGMENTS]; // Segment data (can be deltas or absolute values)
int segment_encode_breakout[MAX_MB_SEGMENTS]; // segment threashold for encode breakout
@@ -602,14 +514,49 @@ typedef struct
unsigned int time_pick_lpf;
unsigned int time_encode_mb_row;
- unsigned int tempdata1;
- unsigned int tempdata2;
-
int base_skip_false_prob[128];
- unsigned int section_intra_rating;
-
- double section_max_qfactor;
+ struct twopass_rc
+ {
+ unsigned int section_intra_rating;
+ double section_max_qfactor;
+ unsigned int next_iiratio;
+ unsigned int this_iiratio;
+ FIRSTPASS_STATS *total_stats;
+ FIRSTPASS_STATS *this_frame_stats;
+ FIRSTPASS_STATS *stats_in, *stats_in_end;
+ int first_pass_done;
+ long long bits_left;
+ long long clip_bits_total;
+ double avg_iiratio;
+ double modified_error_total;
+ double modified_error_used;
+ double modified_error_left;
+ double total_error_left;
+ double total_intra_error_left;
+ double total_coded_error_left;
+ double start_tot_err_left;
+ double kf_intra_err_min;
+ double gf_intra_err_min;
+ int frames_to_key;
+ int maxq_max_limit;
+ int maxq_min_limit;
+ int gf_decay_rate;
+ int static_scene_max_gf_interval;
+ int kf_bits;
+ int gf_group_error_left; // Remaining error from uncoded frames in a gf group. Two pass use only
+
+ // Projected total bits available for a key frame group of frames
+ long long kf_group_bits;
+
+ // Error score of frames still to be coded in kf group
+ long long kf_group_error_left;
+
+ int gf_group_bits; // Projected Bits available for a group of frames including 1 GF or ARF
+ int gf_bits; // Bits for the golden frame or ARF - 2 pass only
+ int mid_gf_extra_bits; // A few extra bits for the frame half way between two gfs.
+ double est_max_qcorrection_factor;
+ } twopass;
#if CONFIG_RUNTIME_CPU_DETECT
VP8_ENCODER_RTCD rtcd;
@@ -620,7 +567,7 @@ typedef struct
int fixed_divide[512];
#endif
-#if CONFIG_PSNR
+#if CONFIG_INTERNAL_STATS
int count;
double total_y;
double total_u;
diff --git a/vp8/encoder/pickinter.c b/vp8/encoder/pickinter.c
index 1785edb1c..b47c7b5f8 100644
--- a/vp8/encoder/pickinter.c
+++ b/vp8/encoder/pickinter.c
@@ -170,8 +170,7 @@ static int get_prediction_error(BLOCK *be, BLOCKD *b, const vp8_variance_rtcd_vt
static int pick_intra4x4block(
const VP8_ENCODER_RTCD *rtcd,
MACROBLOCK *x,
- BLOCK *be,
- BLOCKD *b,
+ int ib,
B_PREDICTION_MODE *best_mode,
B_PREDICTION_MODE above,
B_PREDICTION_MODE left,
@@ -179,6 +178,9 @@ static int pick_intra4x4block(
int *bestrate,
int *bestdistortion)
{
+
+ BLOCKD *b = &x->e_mbd.block[ib];
+ BLOCK *be = &x->block[ib];
B_PREDICTION_MODE mode;
int best_rd = INT_MAX; // 1<<30
int rate;
@@ -214,8 +216,7 @@ static int pick_intra4x4block(
}
b->bmi.mode = (B_PREDICTION_MODE)(*best_mode);
- vp8_encode_intra4x4block(rtcd, x, be, b, b->bmi.mode);
-
+ vp8_encode_intra4x4block(rtcd, x, ib);
return best_rd;
}
@@ -245,8 +246,7 @@ int vp8_pick_intra4x4mby_modes
B_PREDICTION_MODE UNINITIALIZED_IS_SAFE(best_mode);
int UNINITIALIZED_IS_SAFE(r), UNINITIALIZED_IS_SAFE(d);
- pick_intra4x4block(rtcd, mb, mb->block + i, xd->block + i,
- &best_mode, A, L, &r, &d);
+ pick_intra4x4block(rtcd, mb, i, &best_mode, A, L, &r, &d);
cost += r;
distortion += d;
@@ -426,24 +426,29 @@ void vp8_pick_intra_mbuv_mode(MACROBLOCK *mb)
}
-static void vp8_update_mvcount(VP8_COMP *cpi, MACROBLOCKD *xd, int_mv *best_ref_mv)
+static void update_mvcount(VP8_COMP *cpi, MACROBLOCKD *xd, int_mv *best_ref_mv)
{
- /* Split MV modes currently not supported when RD is nopt enabled, therefore, only need to modify MVcount in NEWMV mode. */
+ /* Split MV modes currently not supported when RD is nopt enabled,
+ * therefore, only need to modify MVcount in NEWMV mode. */
if (xd->mode_info_context->mbmi.mode == NEWMV)
{
- cpi->MVcount[0][mv_max+((xd->block[0].bmi.mv.as_mv.row - best_ref_mv->as_mv.row) >> 1)]++;
- cpi->MVcount[1][mv_max+((xd->block[0].bmi.mv.as_mv.col - best_ref_mv->as_mv.col) >> 1)]++;
+ cpi->MVcount[0][mv_max+((xd->block[0].bmi.mv.as_mv.row -
+ best_ref_mv->as_mv.row) >> 1)]++;
+ cpi->MVcount[1][mv_max+((xd->block[0].bmi.mv.as_mv.col -
+ best_ref_mv->as_mv.col) >> 1)]++;
}
}
-void vp8_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset, int recon_uvoffset, int *returnrate, int *returndistortion, int *returnintra)
+void vp8_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset,
+ int recon_uvoffset, int *returnrate,
+ int *returndistortion, int *returnintra)
{
BLOCK *b = &x->block[0];
BLOCKD *d = &x->e_mbd.block[0];
MACROBLOCKD *xd = &x->e_mbd;
B_MODE_INFO best_bmodes[16];
MB_MODE_INFO best_mbmode;
- PARTITION_INFO best_partition;
+
int_mv best_ref_mv;
int_mv mode_mv[MB_MODE_COUNT];
MB_PREDICTION_MODE this_mode;
@@ -878,9 +883,8 @@ void vp8_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset, int re
*returndistortion = distortion2;
best_rd = this_rd;
vpx_memcpy(&best_mbmode, &x->e_mbd.mode_info_context->mbmi, sizeof(MB_MODE_INFO));
- vpx_memcpy(&best_partition, x->partition_info, sizeof(PARTITION_INFO));
- if (this_mode == B_PRED || this_mode == SPLITMV)
+ if (this_mode == B_PRED)
for (i = 0; i < 16; i++)
{
vpx_memcpy(&best_bmodes[i], &x->e_mbd.block[i].bmi, sizeof(B_MODE_INFO));
@@ -919,9 +923,6 @@ void vp8_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset, int re
cpi->rd_threshes[best_mode_index] = (cpi->rd_baseline_thresh[best_mode_index] >> 7) * cpi->rd_thresh_mult[best_mode_index];
}
- // Keep a record of best mode index for use in next loop
- cpi->last_best_mode_index = best_mode_index;
-
if (best_mbmode.mode <= B_PRED)
{
x->e_mbd.mode_info_context->mbmi.ref_frame = INTRA_FRAME;
@@ -952,7 +953,6 @@ void vp8_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset, int re
best_mbmode.partitioning = 0;
vpx_memcpy(&x->e_mbd.mode_info_context->mbmi, &best_mbmode, sizeof(MB_MODE_INFO));
- vpx_memcpy(x->partition_info, &best_partition, sizeof(PARTITION_INFO));
for (i = 0; i < 16; i++)
{
@@ -963,12 +963,10 @@ void vp8_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset, int re
return;
}
-
// macroblock modes
vpx_memcpy(&x->e_mbd.mode_info_context->mbmi, &best_mbmode, sizeof(MB_MODE_INFO));
- vpx_memcpy(x->partition_info, &best_partition, sizeof(PARTITION_INFO));
- if (x->e_mbd.mode_info_context->mbmi.mode == B_PRED || x->e_mbd.mode_info_context->mbmi.mode == SPLITMV)
+ if (x->e_mbd.mode_info_context->mbmi.mode == B_PRED)
for (i = 0; i < 16; i++)
{
vpx_memcpy(&x->e_mbd.block[i].bmi, &best_bmodes[i], sizeof(B_MODE_INFO));
@@ -979,7 +977,5 @@ void vp8_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset, int re
vp8_set_mbmode_and_mvs(x, x->e_mbd.mode_info_context->mbmi.mode, &best_bmodes[0].mv);
}
- x->e_mbd.mode_info_context->mbmi.mv.as_mv = x->e_mbd.block[15].bmi.mv.as_mv;
-
- vp8_update_mvcount(cpi, &x->e_mbd, &frame_best_ref_mv[xd->mode_info_context->mbmi.ref_frame]);
+ update_mvcount(cpi, &x->e_mbd, &frame_best_ref_mv[xd->mode_info_context->mbmi.ref_frame]);
}
diff --git a/vp8/encoder/picklpf.c b/vp8/encoder/picklpf.c
index d294af6c0..4f9d4126e 100644
--- a/vp8/encoder/picklpf.c
+++ b/vp8/encoder/picklpf.c
@@ -142,7 +142,7 @@ static int get_max_filter_level(VP8_COMP *cpi, int base_qindex)
// with lots of intra coming in.
int max_filter_level = MAX_LOOP_FILTER ;//* 3 / 4;
- if (cpi->section_intra_rating > 8)
+ if (cpi->twopass.section_intra_rating > 8)
max_filter_level = MAX_LOOP_FILTER * 3 / 4;
(void) cpi;
@@ -377,8 +377,8 @@ void vp8cx_pick_filter_level(YV12_BUFFER_CONFIG *sd, VP8_COMP *cpi)
Bias = (best_err >> (15 - (filt_mid / 8))) * filter_step; //PGW change 12/12/06 for small images
// jbb chg: 20100118 - in sections with lots of new material coming in don't bias as much to a low filter value
- if (cpi->section_intra_rating < 20)
- Bias = Bias * cpi->section_intra_rating / 20;
+ if (cpi->twopass.section_intra_rating < 20)
+ Bias = Bias * cpi->twopass.section_intra_rating / 20;
filt_high = ((filt_mid + filter_step) > max_filter_level) ? max_filter_level : (filt_mid + filter_step);
filt_low = ((filt_mid - filter_step) < min_filter_level) ? min_filter_level : (filt_mid - filter_step);
@@ -474,8 +474,4 @@ void vp8cx_pick_filter_level(YV12_BUFFER_CONFIG *sd, VP8_COMP *cpi)
}
cm->filter_level = filt_best;
- cpi->last_auto_filt_val = filt_best;
- cpi->last_auto_filt_q = cm->base_qindex;
-
- cpi->frames_since_auto_filter = 0;
}
diff --git a/vp8/encoder/quantize.c b/vp8/encoder/quantize.c
index 86ed267fb..49e8e1b9b 100644
--- a/vp8/encoder/quantize.c
+++ b/vp8/encoder/quantize.c
@@ -12,8 +12,9 @@
#include <math.h>
#include "vpx_mem/vpx_mem.h"
+#include "onyx_int.h"
#include "quantize.h"
-#include "vp8/common/entropy.h"
+#include "vp8/common/quant_common.h"
#define EXACT_QUANT
@@ -299,3 +300,418 @@ void vp8_quantize_mbuv(MACROBLOCK *x)
for (i = 16; i < 24; i++)
x->quantize_b(&x->block[i], &x->e_mbd.block[i]);
}
+
+
+static const int qrounding_factors[129] =
+{
+ 48, 48, 48, 48, 48, 48, 48, 48,
+ 48, 48, 48, 48, 48, 48, 48, 48,
+ 48, 48, 48, 48, 48, 48, 48, 48,
+ 48, 48, 48, 48, 48, 48, 48, 48,
+ 48, 48, 48, 48, 48, 48, 48, 48,
+ 48, 48, 48, 48, 48, 48, 48, 48,
+ 48, 48, 48, 48, 48, 48, 48, 48,
+ 48, 48, 48, 48, 48, 48, 48, 48,
+ 48, 48, 48, 48, 48, 48, 48, 48,
+ 48, 48, 48, 48, 48, 48, 48, 48,
+ 48, 48, 48, 48, 48, 48, 48, 48,
+ 48, 48, 48, 48, 48, 48, 48, 48,
+ 48, 48, 48, 48, 48, 48, 48, 48,
+ 48, 48, 48, 48, 48, 48, 48, 48,
+ 48, 48, 48, 48, 48, 48, 48, 48,
+ 48, 48, 48, 48, 48, 48, 48, 48,
+ 48
+};
+
+
+static const int qzbin_factors[129] =
+{
+ 84, 84, 84, 84, 84, 84, 84, 84,
+ 84, 84, 84, 84, 84, 84, 84, 84,
+ 84, 84, 84, 84, 84, 84, 84, 84,
+ 84, 84, 84, 84, 84, 84, 84, 84,
+ 84, 84, 84, 84, 84, 84, 84, 84,
+ 84, 84, 84, 84, 84, 84, 84, 84,
+ 80, 80, 80, 80, 80, 80, 80, 80,
+ 80, 80, 80, 80, 80, 80, 80, 80,
+ 80, 80, 80, 80, 80, 80, 80, 80,
+ 80, 80, 80, 80, 80, 80, 80, 80,
+ 80, 80, 80, 80, 80, 80, 80, 80,
+ 80, 80, 80, 80, 80, 80, 80, 80,
+ 80, 80, 80, 80, 80, 80, 80, 80,
+ 80, 80, 80, 80, 80, 80, 80, 80,
+ 80, 80, 80, 80, 80, 80, 80, 80,
+ 80, 80, 80, 80, 80, 80, 80, 80,
+ 80
+};
+
+
+static const int qrounding_factors_y2[129] =
+{
+ 48, 48, 48, 48, 48, 48, 48, 48,
+ 48, 48, 48, 48, 48, 48, 48, 48,
+ 48, 48, 48, 48, 48, 48, 48, 48,
+ 48, 48, 48, 48, 48, 48, 48, 48,
+ 48, 48, 48, 48, 48, 48, 48, 48,
+ 48, 48, 48, 48, 48, 48, 48, 48,
+ 48, 48, 48, 48, 48, 48, 48, 48,
+ 48, 48, 48, 48, 48, 48, 48, 48,
+ 48, 48, 48, 48, 48, 48, 48, 48,
+ 48, 48, 48, 48, 48, 48, 48, 48,
+ 48, 48, 48, 48, 48, 48, 48, 48,
+ 48, 48, 48, 48, 48, 48, 48, 48,
+ 48, 48, 48, 48, 48, 48, 48, 48,
+ 48, 48, 48, 48, 48, 48, 48, 48,
+ 48, 48, 48, 48, 48, 48, 48, 48,
+ 48, 48, 48, 48, 48, 48, 48, 48,
+ 48
+};
+
+
+static const int qzbin_factors_y2[129] =
+{
+ 84, 84, 84, 84, 84, 84, 84, 84,
+ 84, 84, 84, 84, 84, 84, 84, 84,
+ 84, 84, 84, 84, 84, 84, 84, 84,
+ 84, 84, 84, 84, 84, 84, 84, 84,
+ 84, 84, 84, 84, 84, 84, 84, 84,
+ 84, 84, 84, 84, 84, 84, 84, 84,
+ 80, 80, 80, 80, 80, 80, 80, 80,
+ 80, 80, 80, 80, 80, 80, 80, 80,
+ 80, 80, 80, 80, 80, 80, 80, 80,
+ 80, 80, 80, 80, 80, 80, 80, 80,
+ 80, 80, 80, 80, 80, 80, 80, 80,
+ 80, 80, 80, 80, 80, 80, 80, 80,
+ 80, 80, 80, 80, 80, 80, 80, 80,
+ 80, 80, 80, 80, 80, 80, 80, 80,
+ 80, 80, 80, 80, 80, 80, 80, 80,
+ 80, 80, 80, 80, 80, 80, 80, 80,
+ 80
+};
+
+
+#define EXACT_QUANT
+#ifdef EXACT_QUANT
+static void invert_quant(int improved_quant, short *quant,
+ unsigned char *shift, short d)
+{
+ if(improved_quant)
+ {
+ unsigned t;
+ int l;
+ t = d;
+ for(l = 0; t > 1; l++)
+ t>>=1;
+ t = 1 + (1<<(16+l))/d;
+ *quant = (short)(t - (1<<16));
+ *shift = l;
+ }
+ else
+ {
+ *quant = (1 << 16) / d;
+ *shift = 0;
+ }
+}
+
+
+void vp8cx_init_quantizer(VP8_COMP *cpi)
+{
+ int i;
+ int quant_val;
+ int Q;
+
+ int zbin_boost[16] = {0, 0, 8, 10, 12, 14, 16, 20, 24, 28, 32, 36, 40, 44, 44, 44};
+
+ for (Q = 0; Q < QINDEX_RANGE; Q++)
+ {
+ // dc values
+ quant_val = vp8_dc_quant(Q, cpi->common.y1dc_delta_q);
+ cpi->Y1quant_fast[Q][0] = (1 << 16) / quant_val;
+ invert_quant(cpi->sf.improved_quant, cpi->Y1quant[Q] + 0,
+ cpi->Y1quant_shift[Q] + 0, quant_val);
+ cpi->Y1zbin[Q][0] = ((qzbin_factors[Q] * quant_val) + 64) >> 7;
+ cpi->Y1round[Q][0] = (qrounding_factors[Q] * quant_val) >> 7;
+ cpi->common.Y1dequant[Q][0] = quant_val;
+ cpi->zrun_zbin_boost_y1[Q][0] = (quant_val * zbin_boost[0]) >> 7;
+
+ quant_val = vp8_dc2quant(Q, cpi->common.y2dc_delta_q);
+ cpi->Y2quant_fast[Q][0] = (1 << 16) / quant_val;
+ invert_quant(cpi->sf.improved_quant, cpi->Y2quant[Q] + 0,
+ cpi->Y2quant_shift[Q] + 0, quant_val);
+ cpi->Y2zbin[Q][0] = ((qzbin_factors_y2[Q] * quant_val) + 64) >> 7;
+ cpi->Y2round[Q][0] = (qrounding_factors_y2[Q] * quant_val) >> 7;
+ cpi->common.Y2dequant[Q][0] = quant_val;
+ cpi->zrun_zbin_boost_y2[Q][0] = (quant_val * zbin_boost[0]) >> 7;
+
+ quant_val = vp8_dc_uv_quant(Q, cpi->common.uvdc_delta_q);
+ cpi->UVquant_fast[Q][0] = (1 << 16) / quant_val;
+ invert_quant(cpi->sf.improved_quant, cpi->UVquant[Q] + 0,
+ cpi->UVquant_shift[Q] + 0, quant_val);
+ cpi->UVzbin[Q][0] = ((qzbin_factors[Q] * quant_val) + 64) >> 7;;
+ cpi->UVround[Q][0] = (qrounding_factors[Q] * quant_val) >> 7;
+ cpi->common.UVdequant[Q][0] = quant_val;
+ cpi->zrun_zbin_boost_uv[Q][0] = (quant_val * zbin_boost[0]) >> 7;
+
+ // all the ac values = ;
+ for (i = 1; i < 16; i++)
+ {
+ int rc = vp8_default_zig_zag1d[i];
+
+ quant_val = vp8_ac_yquant(Q);
+ cpi->Y1quant_fast[Q][rc] = (1 << 16) / quant_val;
+ invert_quant(cpi->sf.improved_quant, cpi->Y1quant[Q] + rc,
+ cpi->Y1quant_shift[Q] + rc, quant_val);
+ cpi->Y1zbin[Q][rc] = ((qzbin_factors[Q] * quant_val) + 64) >> 7;
+ cpi->Y1round[Q][rc] = (qrounding_factors[Q] * quant_val) >> 7;
+ cpi->common.Y1dequant[Q][rc] = quant_val;
+ cpi->zrun_zbin_boost_y1[Q][i] = (quant_val * zbin_boost[i]) >> 7;
+
+ quant_val = vp8_ac2quant(Q, cpi->common.y2ac_delta_q);
+ cpi->Y2quant_fast[Q][rc] = (1 << 16) / quant_val;
+ invert_quant(cpi->sf.improved_quant, cpi->Y2quant[Q] + rc,
+ cpi->Y2quant_shift[Q] + rc, quant_val);
+ cpi->Y2zbin[Q][rc] = ((qzbin_factors_y2[Q] * quant_val) + 64) >> 7;
+ cpi->Y2round[Q][rc] = (qrounding_factors_y2[Q] * quant_val) >> 7;
+ cpi->common.Y2dequant[Q][rc] = quant_val;
+ cpi->zrun_zbin_boost_y2[Q][i] = (quant_val * zbin_boost[i]) >> 7;
+
+ quant_val = vp8_ac_uv_quant(Q, cpi->common.uvac_delta_q);
+ cpi->UVquant_fast[Q][rc] = (1 << 16) / quant_val;
+ invert_quant(cpi->sf.improved_quant, cpi->UVquant[Q] + rc,
+ cpi->UVquant_shift[Q] + rc, quant_val);
+ cpi->UVzbin[Q][rc] = ((qzbin_factors[Q] * quant_val) + 64) >> 7;
+ cpi->UVround[Q][rc] = (qrounding_factors[Q] * quant_val) >> 7;
+ cpi->common.UVdequant[Q][rc] = quant_val;
+ cpi->zrun_zbin_boost_uv[Q][i] = (quant_val * zbin_boost[i]) >> 7;
+ }
+ }
+}
+#else
+void vp8cx_init_quantizer(VP8_COMP *cpi)
+{
+ int i;
+ int quant_val;
+ int Q;
+
+ int zbin_boost[16] = {0, 0, 8, 10, 12, 14, 16, 20, 24, 28, 32, 36, 40, 44, 44, 44};
+
+ for (Q = 0; Q < QINDEX_RANGE; Q++)
+ {
+ // dc values
+ quant_val = vp8_dc_quant(Q, cpi->common.y1dc_delta_q);
+ cpi->Y1quant[Q][0] = (1 << 16) / quant_val;
+ cpi->Y1zbin[Q][0] = ((qzbin_factors[Q] * quant_val) + 64) >> 7;
+ cpi->Y1round[Q][0] = (qrounding_factors[Q] * quant_val) >> 7;
+ cpi->common.Y1dequant[Q][0] = quant_val;
+ cpi->zrun_zbin_boost_y1[Q][0] = (quant_val * zbin_boost[0]) >> 7;
+
+ quant_val = vp8_dc2quant(Q, cpi->common.y2dc_delta_q);
+ cpi->Y2quant[Q][0] = (1 << 16) / quant_val;
+ cpi->Y2zbin[Q][0] = ((qzbin_factors_y2[Q] * quant_val) + 64) >> 7;
+ cpi->Y2round[Q][0] = (qrounding_factors_y2[Q] * quant_val) >> 7;
+ cpi->common.Y2dequant[Q][0] = quant_val;
+ cpi->zrun_zbin_boost_y2[Q][0] = (quant_val * zbin_boost[0]) >> 7;
+
+ quant_val = vp8_dc_uv_quant(Q, cpi->common.uvdc_delta_q);
+ cpi->UVquant[Q][0] = (1 << 16) / quant_val;
+ cpi->UVzbin[Q][0] = ((qzbin_factors[Q] * quant_val) + 64) >> 7;;
+ cpi->UVround[Q][0] = (qrounding_factors[Q] * quant_val) >> 7;
+ cpi->common.UVdequant[Q][0] = quant_val;
+ cpi->zrun_zbin_boost_uv[Q][0] = (quant_val * zbin_boost[0]) >> 7;
+
+ // all the ac values = ;
+ for (i = 1; i < 16; i++)
+ {
+ int rc = vp8_default_zig_zag1d[i];
+
+ quant_val = vp8_ac_yquant(Q);
+ cpi->Y1quant[Q][rc] = (1 << 16) / quant_val;
+ cpi->Y1zbin[Q][rc] = ((qzbin_factors[Q] * quant_val) + 64) >> 7;
+ cpi->Y1round[Q][rc] = (qrounding_factors[Q] * quant_val) >> 7;
+ cpi->common.Y1dequant[Q][rc] = quant_val;
+ cpi->zrun_zbin_boost_y1[Q][i] = (quant_val * zbin_boost[i]) >> 7;
+
+ quant_val = vp8_ac2quant(Q, cpi->common.y2ac_delta_q);
+ cpi->Y2quant[Q][rc] = (1 << 16) / quant_val;
+ cpi->Y2zbin[Q][rc] = ((qzbin_factors_y2[Q] * quant_val) + 64) >> 7;
+ cpi->Y2round[Q][rc] = (qrounding_factors_y2[Q] * quant_val) >> 7;
+ cpi->common.Y2dequant[Q][rc] = quant_val;
+ cpi->zrun_zbin_boost_y2[Q][i] = (quant_val * zbin_boost[i]) >> 7;
+
+ quant_val = vp8_ac_uv_quant(Q, cpi->common.uvac_delta_q);
+ cpi->UVquant[Q][rc] = (1 << 16) / quant_val;
+ cpi->UVzbin[Q][rc] = ((qzbin_factors[Q] * quant_val) + 64) >> 7;
+ cpi->UVround[Q][rc] = (qrounding_factors[Q] * quant_val) >> 7;
+ cpi->common.UVdequant[Q][rc] = quant_val;
+ cpi->zrun_zbin_boost_uv[Q][i] = (quant_val * zbin_boost[i]) >> 7;
+ }
+ }
+}
+#endif
+
+
+void vp8cx_mb_init_quantizer(VP8_COMP *cpi, MACROBLOCK *x)
+{
+ int i;
+ int QIndex;
+ MACROBLOCKD *xd = &x->e_mbd;
+ int zbin_extra;
+
+ // Select the baseline MB Q index.
+ if (xd->segmentation_enabled)
+ {
+ // Abs Value
+ if (xd->mb_segement_abs_delta == SEGMENT_ABSDATA)
+
+ QIndex = xd->segment_feature_data[MB_LVL_ALT_Q][xd->mode_info_context->mbmi.segment_id];
+ // Delta Value
+ else
+ {
+ QIndex = cpi->common.base_qindex + xd->segment_feature_data[MB_LVL_ALT_Q][xd->mode_info_context->mbmi.segment_id];
+ QIndex = (QIndex >= 0) ? ((QIndex <= MAXQ) ? QIndex : MAXQ) : 0; // Clamp to valid range
+ }
+ }
+ else
+ QIndex = cpi->common.base_qindex;
+
+ // Y
+ zbin_extra = ( cpi->common.Y1dequant[QIndex][1] *
+ ( cpi->zbin_over_quant +
+ cpi->zbin_mode_boost +
+ x->act_zbin_adj ) ) >> 7;
+
+ for (i = 0; i < 16; i++)
+ {
+ x->block[i].quant = cpi->Y1quant[QIndex];
+ x->block[i].quant_fast = cpi->Y1quant_fast[QIndex];
+ x->block[i].quant_shift = cpi->Y1quant_shift[QIndex];
+ x->block[i].zbin = cpi->Y1zbin[QIndex];
+ x->block[i].round = cpi->Y1round[QIndex];
+ x->e_mbd.block[i].dequant = cpi->common.Y1dequant[QIndex];
+ x->block[i].zrun_zbin_boost = cpi->zrun_zbin_boost_y1[QIndex];
+ x->block[i].zbin_extra = (short)zbin_extra;
+ }
+
+ // UV
+ zbin_extra = ( cpi->common.UVdequant[QIndex][1] *
+ ( cpi->zbin_over_quant +
+ cpi->zbin_mode_boost +
+ x->act_zbin_adj ) ) >> 7;
+
+ for (i = 16; i < 24; i++)
+ {
+ x->block[i].quant = cpi->UVquant[QIndex];
+ x->block[i].quant_fast = cpi->UVquant_fast[QIndex];
+ x->block[i].quant_shift = cpi->UVquant_shift[QIndex];
+ x->block[i].zbin = cpi->UVzbin[QIndex];
+ x->block[i].round = cpi->UVround[QIndex];
+ x->e_mbd.block[i].dequant = cpi->common.UVdequant[QIndex];
+ x->block[i].zrun_zbin_boost = cpi->zrun_zbin_boost_uv[QIndex];
+ x->block[i].zbin_extra = (short)zbin_extra;
+ }
+
+ // Y2
+ zbin_extra = ( cpi->common.Y2dequant[QIndex][1] *
+ ( (cpi->zbin_over_quant / 2) +
+ cpi->zbin_mode_boost +
+ x->act_zbin_adj ) ) >> 7;
+
+ x->block[24].quant_fast = cpi->Y2quant_fast[QIndex];
+ x->block[24].quant = cpi->Y2quant[QIndex];
+ x->block[24].quant_shift = cpi->Y2quant_shift[QIndex];
+ x->block[24].zbin = cpi->Y2zbin[QIndex];
+ x->block[24].round = cpi->Y2round[QIndex];
+ x->e_mbd.block[24].dequant = cpi->common.Y2dequant[QIndex];
+ x->block[24].zrun_zbin_boost = cpi->zrun_zbin_boost_y2[QIndex];
+ x->block[24].zbin_extra = (short)zbin_extra;
+
+ /* save this macroblock QIndex for vp8_update_zbin_extra() */
+ x->q_index = QIndex;
+}
+
+
+void vp8_update_zbin_extra(VP8_COMP *cpi, MACROBLOCK *x)
+{
+ int i;
+ int QIndex = x->q_index;
+ int zbin_extra;
+
+ // Y
+ zbin_extra = ( cpi->common.Y1dequant[QIndex][1] *
+ ( cpi->zbin_over_quant +
+ cpi->zbin_mode_boost +
+ x->act_zbin_adj ) ) >> 7;
+ for (i = 0; i < 16; i++)
+ {
+ x->block[i].zbin_extra = (short)zbin_extra;
+ }
+
+ // UV
+ zbin_extra = ( cpi->common.UVdequant[QIndex][1] *
+ ( cpi->zbin_over_quant +
+ cpi->zbin_mode_boost +
+ x->act_zbin_adj ) ) >> 7;
+
+ for (i = 16; i < 24; i++)
+ {
+ x->block[i].zbin_extra = (short)zbin_extra;
+ }
+
+ // Y2
+ zbin_extra = ( cpi->common.Y2dequant[QIndex][1] *
+ ( (cpi->zbin_over_quant / 2) +
+ cpi->zbin_mode_boost +
+ x->act_zbin_adj ) ) >> 7;
+
+ x->block[24].zbin_extra = (short)zbin_extra;
+}
+
+
+void vp8cx_frame_init_quantizer(VP8_COMP *cpi)
+{
+ // Clear Zbin mode boost for default case
+ cpi->zbin_mode_boost = 0;
+
+ // MB level quantizer setup
+ vp8cx_mb_init_quantizer(cpi, &cpi->mb);
+}
+
+
+void vp8_set_quantizer(struct VP8_COMP *cpi, int Q)
+{
+ VP8_COMMON *cm = &cpi->common;
+ MACROBLOCKD *mbd = &cpi->mb.e_mbd;
+ int update = 0;
+ int new_delta_q;
+ cm->base_qindex = Q;
+
+ /* if any of the delta_q values are changing update flag has to be set */
+ /* currently only y2dc_delta_q may change */
+
+ cm->y1dc_delta_q = 0;
+ cm->y2ac_delta_q = 0;
+ cm->uvdc_delta_q = 0;
+ cm->uvac_delta_q = 0;
+
+ if (Q < 4)
+ {
+ new_delta_q = 4-Q;
+ }
+ else
+ new_delta_q = 0;
+
+ update |= cm->y2dc_delta_q != new_delta_q;
+ cm->y2dc_delta_q = new_delta_q;
+
+
+ // Set Segment specific quatizers
+ mbd->segment_feature_data[MB_LVL_ALT_Q][0] = cpi->segment_feature_data[MB_LVL_ALT_Q][0];
+ mbd->segment_feature_data[MB_LVL_ALT_Q][1] = cpi->segment_feature_data[MB_LVL_ALT_Q][1];
+ mbd->segment_feature_data[MB_LVL_ALT_Q][2] = cpi->segment_feature_data[MB_LVL_ALT_Q][2];
+ mbd->segment_feature_data[MB_LVL_ALT_Q][3] = cpi->segment_feature_data[MB_LVL_ALT_Q][3];
+
+ /* quantizer has to be reinitialized for any delta_q changes */
+ if(update)
+ vp8cx_init_quantizer(cpi);
+
+}
diff --git a/vp8/encoder/quantize.h b/vp8/encoder/quantize.h
index e4c32a570..d9a041071 100644
--- a/vp8/encoder/quantize.h
+++ b/vp8/encoder/quantize.h
@@ -55,4 +55,11 @@ extern void vp8_quantize_mb(MACROBLOCK *x);
extern void vp8_quantize_mbuv(MACROBLOCK *x);
extern void vp8_quantize_mby(MACROBLOCK *x);
+struct VP8_COMP;
+extern void vp8_set_quantizer(struct VP8_COMP *cpi, int Q);
+extern void vp8cx_frame_init_quantizer(struct VP8_COMP *cpi);
+extern void vp8_update_zbin_extra(struct VP8_COMP *cpi, MACROBLOCK *x);
+extern void vp8cx_mb_init_quantizer(struct VP8_COMP *cpi, MACROBLOCK *x);
+extern void vp8cx_init_quantizer(struct VP8_COMP *cpi);
+
#endif
diff --git a/vp8/encoder/ratectrl.c b/vp8/encoder/ratectrl.c
index 313e6455c..54c394dfc 100644
--- a/vp8/encoder/ratectrl.c
+++ b/vp8/encoder/ratectrl.c
@@ -626,7 +626,7 @@ static void calc_pframe_target_size(VP8_COMP *cpi)
{
if (cpi->pass == 2)
{
- cpi->per_frame_bandwidth = cpi->gf_bits; // Per frame bit target for the alt ref frame
+ cpi->per_frame_bandwidth = cpi->twopass.gf_bits; // Per frame bit target for the alt ref frame
cpi->this_frame_target = cpi->per_frame_bandwidth;
}
@@ -1055,7 +1055,6 @@ static void calc_pframe_target_size(VP8_COMP *cpi)
}
#endif
- cpi->initial_gf_use = 0;
if (cpi->auto_adjust_gold_quantizer)
{
@@ -1431,7 +1430,6 @@ void vp8_adjust_key_frame_context(VP8_COMP *cpi)
}
cpi->frames_since_key = 0;
- cpi->last_key_frame_size = cpi->projected_frame_size;
cpi->key_frame_count++;
}
diff --git a/vp8/encoder/rdopt.c b/vp8/encoder/rdopt.c
index 5bd0c0a5e..d33d18bf2 100644
--- a/vp8/encoder/rdopt.c
+++ b/vp8/encoder/rdopt.c
@@ -232,10 +232,11 @@ void vp8_initialize_rd_consts(VP8_COMP *cpi, int Qvalue)
if (cpi->pass == 2 && (cpi->common.frame_type != KEY_FRAME))
{
- if (cpi->next_iiratio > 31)
+ if (cpi->twopass.next_iiratio > 31)
cpi->RDMULT += (cpi->RDMULT * rd_iifactor[31]) >> 4;
else
- cpi->RDMULT += (cpi->RDMULT * rd_iifactor[cpi->next_iiratio]) >> 4;
+ cpi->RDMULT +=
+ (cpi->RDMULT * rd_iifactor[cpi->twopass.next_iiratio]) >> 4;
}
cpi->mb.errorperbit = (cpi->RDMULT / 100);
@@ -455,17 +456,21 @@ int VP8_UVSSE(MACROBLOCK *x, const vp8_variance_rtcd_vtable_t *rtcd)
if ((mv_row | mv_col) & 7)
{
- VARIANCE_INVOKE(rtcd, subpixvar8x8)(uptr, pre_stride, mv_col & 7, mv_row & 7, upred_ptr, uv_stride, &sse2);
- VARIANCE_INVOKE(rtcd, subpixvar8x8)(vptr, pre_stride, mv_col & 7, mv_row & 7, vpred_ptr, uv_stride, &sse1);
+ VARIANCE_INVOKE(rtcd, subpixvar8x8)(uptr, pre_stride,
+ mv_col & 7, mv_row & 7, upred_ptr, uv_stride, &sse2);
+ VARIANCE_INVOKE(rtcd, subpixvar8x8)(vptr, pre_stride,
+ mv_col & 7, mv_row & 7, vpred_ptr, uv_stride, &sse1);
sse2 += sse1;
}
else
{
- VARIANCE_INVOKE(rtcd, subpixvar8x8)(uptr, pre_stride, mv_col & 7, mv_row & 7, upred_ptr, uv_stride, &sse2);
- VARIANCE_INVOKE(rtcd, subpixvar8x8)(vptr, pre_stride, mv_col & 7, mv_row & 7, vpred_ptr, uv_stride, &sse1);
+ int sum2, sum1;
+ VARIANCE_INVOKE(rtcd, get8x8var)(uptr, pre_stride,
+ upred_ptr, uv_stride, &sse2, &sum2);
+ VARIANCE_INVOKE(rtcd, get8x8var)(vptr, pre_stride,
+ vpred_ptr, uv_stride, &sse1, &sum1);
sse2 += sse1;
}
-
return sse2;
}
@@ -933,9 +938,11 @@ static int labels2mode(
cost = x->inter_bmode_costs[ m];
}
- d->bmi.mode = m;
d->bmi.mv.as_int = this_mv->as_int;
+ x->partition_info->bmi[i].mode = m;
+ x->partition_info->bmi[i].mv.as_int = this_mv->as_int;
+
}
while (++i < 16);
@@ -1275,8 +1282,8 @@ static void rd_check_segment(VP8_COMP *cpi, MACROBLOCK *x,
{
BLOCKD *bd = &x->e_mbd.block[i];
- bsi->mvs[i].as_mv = bd->bmi.mv.as_mv;
- bsi->modes[i] = bd->bmi.mode;
+ bsi->mvs[i].as_mv = x->partition_info->bmi[i].mv.as_mv;
+ bsi->modes[i] = x->partition_info->bmi[i].mode;
bsi->eobs[i] = bd->eob;
}
}
@@ -1406,7 +1413,6 @@ static int vp8_rd_pick_best_mbsegmentation(VP8_COMP *cpi, MACROBLOCK *x,
BLOCKD *bd = &x->e_mbd.block[i];
bd->bmi.mv.as_mv = bsi.mvs[i].as_mv;
- bd->bmi.mode = bsi.modes[i];
bd->eob = bsi.eobs[i];
}
@@ -1424,9 +1430,13 @@ static int vp8_rd_pick_best_mbsegmentation(VP8_COMP *cpi, MACROBLOCK *x,
j = vp8_mbsplit_offset[bsi.segment_num][i];
- x->partition_info->bmi[i].mode = x->e_mbd.block[j].bmi.mode;
- x->partition_info->bmi[i].mv.as_mv = x->e_mbd.block[j].bmi.mv.as_mv;
+ x->partition_info->bmi[i].mode = bsi.modes[j];
+ x->partition_info->bmi[i].mv.as_mv = bsi.mvs[j].as_mv;
}
+ /*
+ * used to set x->e_mbd.mode_info_context->mbmi.mv.as_int
+ */
+ x->partition_info->bmi[15].mv.as_int = bsi.mvs[15].as_int;
return bsi.segment_rd;
}
@@ -1686,25 +1696,29 @@ void vp8_cal_sad(VP8_COMP *cpi, MACROBLOCKD *xd, MACROBLOCK *x, int recon_yoffse
}
}
-static void vp8_rd_update_mvcount(VP8_COMP *cpi, MACROBLOCKD *xd, int_mv *best_ref_mv)
+static void rd_update_mvcount(VP8_COMP *cpi, MACROBLOCK *x, int_mv *best_ref_mv)
{
- int i;
-
- if (xd->mode_info_context->mbmi.mode == SPLITMV)
+ if (x->e_mbd.mode_info_context->mbmi.mode == SPLITMV)
{
- for (i = 0; i < 16; i++)
+ int i;
+
+ for (i = 0; i < x->partition_info->count; i++)
{
- if (xd->block[i].bmi.mode == NEW4X4)
+ if (x->partition_info->bmi[i].mode == NEW4X4)
{
- cpi->MVcount[0][mv_max+((xd->block[i].bmi.mv.as_mv.row - best_ref_mv->as_mv.row) >> 1)]++;
- cpi->MVcount[1][mv_max+((xd->block[i].bmi.mv.as_mv.col - best_ref_mv->as_mv.col) >> 1)]++;
+ cpi->MVcount[0][mv_max+((x->partition_info->bmi[i].mv.as_mv.row
+ - best_ref_mv->as_mv.row) >> 1)]++;
+ cpi->MVcount[1][mv_max+((x->partition_info->bmi[i].mv.as_mv.col
+ - best_ref_mv->as_mv.col) >> 1)]++;
}
}
}
- else if (xd->mode_info_context->mbmi.mode == NEWMV)
+ else if (x->e_mbd.mode_info_context->mbmi.mode == NEWMV)
{
- cpi->MVcount[0][mv_max+((xd->block[0].bmi.mv.as_mv.row - best_ref_mv->as_mv.row) >> 1)]++;
- cpi->MVcount[1][mv_max+((xd->block[0].bmi.mv.as_mv.col - best_ref_mv->as_mv.col) >> 1)]++;
+ cpi->MVcount[0][mv_max+((x->e_mbd.mode_info_context->mbmi.mv.as_mv.row
+ - best_ref_mv->as_mv.row) >> 1)]++;
+ cpi->MVcount[1][mv_max+((x->e_mbd.mode_info_context->mbmi.mv.as_mv.col
+ - best_ref_mv->as_mv.col) >> 1)]++;
}
}
@@ -2374,9 +2388,6 @@ void vp8_rd_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset, int
}
- // Keep a record of best mode index that we chose
- cpi->last_best_mode_index = best_mode_index;
-
// Note how often each mode chosen as best
cpi->mode_chosen_counts[best_mode_index] ++;
@@ -2414,14 +2425,19 @@ void vp8_rd_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset, int
// macroblock modes
vpx_memcpy(&x->e_mbd.mode_info_context->mbmi, &best_mbmode, sizeof(MB_MODE_INFO));
- vpx_memcpy(x->partition_info, &best_partition, sizeof(PARTITION_INFO));
for (i = 0; i < 16; i++)
{
vpx_memcpy(&x->e_mbd.block[i].bmi, &best_bmodes[i], sizeof(B_MODE_INFO));
}
- x->e_mbd.mode_info_context->mbmi.mv.as_mv = x->e_mbd.block[15].bmi.mv.as_mv;
+ if (best_mbmode.mode == SPLITMV)
+ {
+ vpx_memcpy(x->partition_info, &best_partition, sizeof(PARTITION_INFO));
+ x->e_mbd.mode_info_context->mbmi.mv.as_int =
+ x->partition_info->bmi[15].mv.as_int;
+ }
+
+ rd_update_mvcount(cpi, x, &frame_best_ref_mv[xd->mode_info_context->mbmi.ref_frame]);
- vp8_rd_update_mvcount(cpi, &x->e_mbd, &frame_best_ref_mv[xd->mode_info_context->mbmi.ref_frame]);
}
diff --git a/vp8/encoder/variance.h b/vp8/encoder/variance.h
index bf17ea8b6..da5a5364a 100644
--- a/vp8/encoder/variance.h
+++ b/vp8/encoder/variance.h
@@ -391,7 +391,7 @@ typedef struct
vp8_sad_multi_d_fn_t sad8x8x4d;
vp8_sad_multi_d_fn_t sad4x4x4d;
-#if CONFIG_PSNR
+#if CONFIG_INTERNAL_STATS
vp8_ssimpf_fn_t ssimpf_8x8;
vp8_ssimpf_fn_t ssimpf;
#endif
diff --git a/vp8/encoder/x86/x86_csystemdependent.c b/vp8/encoder/x86/x86_csystemdependent.c
index f65ef8a5f..378b14066 100644
--- a/vp8/encoder/x86/x86_csystemdependent.c
+++ b/vp8/encoder/x86/x86_csystemdependent.c
@@ -112,7 +112,7 @@ static void subtract_b_sse2(BLOCK *be, BLOCKD *bd, int pitch)
#endif
#if HAVE_SSSE3
-#if CONFIG_PSNR
+#if CONFIG_INTERNAL_STATS
#if ARCH_X86_64
typedef void ssimpf
(
@@ -287,7 +287,7 @@ void vp8_arch_x86_encoder_init(VP8_COMP *cpi)
cpi->rtcd.quantize.fastquantb = vp8_fast_quantize_b_ssse3;
-#if CONFIG_PSNR
+#if CONFIG_INTERNAL_STATS
#if ARCH_X86_64
cpi->rtcd.variance.ssimpf_8x8 = vp8_ssim_parms_8x8_sse3;
cpi->rtcd.variance.ssimpf = vp8_ssim_parms_16x16_sse3;
diff --git a/vp8/vp8_dx_iface.c b/vp8/vp8_dx_iface.c
index cca17c23d..e0f4c0a96 100644
--- a/vp8/vp8_dx_iface.c
+++ b/vp8/vp8_dx_iface.c
@@ -19,6 +19,8 @@
#include "decoder/onyxd_int.h"
#define VP8_CAP_POSTPROC (CONFIG_POSTPROC ? VPX_CODEC_CAP_POSTPROC : 0)
+#define VP8_CAP_ERROR_CONCEALMENT (CONFIG_ERROR_CONCEALMENT ? \
+ VPX_CODEC_CAP_ERROR_CONCEALMENT : 0)
typedef vpx_codec_stream_info_t vp8_stream_info_t;
@@ -364,6 +366,8 @@ static vpx_codec_err_t vp8_decode(vpx_codec_alg_priv_t *ctx,
oxcf.Version = 9;
oxcf.postprocess = 0;
oxcf.max_threads = ctx->cfg.threads;
+ oxcf.error_concealment =
+ (ctx->base.init_flags & VPX_CODEC_USE_ERROR_CONCEALMENT);
optr = vp8dx_create_decompressor(&oxcf);
@@ -719,7 +723,7 @@ CODEC_INTERFACE(vpx_codec_vp8_dx) =
{
"WebM Project VP8 Decoder" VERSION_STRING,
VPX_CODEC_INTERNAL_ABI_VERSION,
- VPX_CODEC_CAP_DECODER | VP8_CAP_POSTPROC,
+ VPX_CODEC_CAP_DECODER | VP8_CAP_POSTPROC | VP8_CAP_ERROR_CONCEALMENT,
/* vpx_codec_caps_t caps; */
vp8_init, /* vpx_codec_init_fn_t init; */
vp8_destroy, /* vpx_codec_destroy_fn_t destroy; */
@@ -749,7 +753,7 @@ vpx_codec_iface_t vpx_codec_vp8_algo =
{
"WebM Project VP8 Decoder (Deprecated API)" VERSION_STRING,
VPX_CODEC_INTERNAL_ABI_VERSION,
- VPX_CODEC_CAP_DECODER | VP8_CAP_POSTPROC,
+ VPX_CODEC_CAP_DECODER | VP8_CAP_POSTPROC | VP8_CAP_ERROR_CONCEALMENT,
/* vpx_codec_caps_t caps; */
vp8_init, /* vpx_codec_init_fn_t init; */
vp8_destroy, /* vpx_codec_destroy_fn_t destroy; */
diff --git a/vp8/vp8cx.mk b/vp8/vp8cx.mk
index c17837164..d46d99df6 100644
--- a/vp8/vp8cx.mk
+++ b/vp8/vp8cx.mk
@@ -77,12 +77,12 @@ VP8_CX_SRCS-yes += encoder/rdopt.c
VP8_CX_SRCS-yes += encoder/sad_c.c
VP8_CX_SRCS-yes += encoder/segmentation.c
VP8_CX_SRCS-yes += encoder/segmentation.h
-VP8_CX_SRCS-$(CONFIG_PSNR) += encoder/ssim.c
+VP8_CX_SRCS-$(CONFIG_INTERNAL_STATS) += encoder/ssim.c
VP8_CX_SRCS-yes += encoder/tokenize.c
VP8_CX_SRCS-yes += encoder/treewriter.c
VP8_CX_SRCS-yes += encoder/variance_c.c
-VP8_CX_SRCS-$(CONFIG_PSNR) += common/postproc.h
-VP8_CX_SRCS-$(CONFIG_PSNR) += common/postproc.c
+VP8_CX_SRCS-$(CONFIG_INTERNAL_STATS) += common/postproc.h
+VP8_CX_SRCS-$(CONFIG_INTERNAL_STATS) += common/postproc.c
VP8_CX_SRCS-yes += encoder/temporal_filter.c
VP8_CX_SRCS-yes += encoder/temporal_filter.h
diff --git a/vp8/vp8dx.mk b/vp8/vp8dx.mk
index 564967191..85d6f513e 100644
--- a/vp8/vp8dx.mk
+++ b/vp8/vp8dx.mk
@@ -53,6 +53,9 @@ VP8_DX_SRCS-yes += decoder/decodemv.c
VP8_DX_SRCS-yes += decoder/decodframe.c
VP8_DX_SRCS-yes += decoder/dequantize.c
VP8_DX_SRCS-yes += decoder/detokenize.c
+VP8_DX_SRCS-$(CONFIG_ERROR_CONCEALMENT) += decoder/ec_types.h
+VP8_DX_SRCS-$(CONFIG_ERROR_CONCEALMENT) += decoder/error_concealment.h
+VP8_DX_SRCS-$(CONFIG_ERROR_CONCEALMENT) += decoder/error_concealment.c
VP8_DX_SRCS-yes += decoder/generic/dsystemdependent.c
VP8_DX_SRCS-yes += decoder/dboolhuff.h
VP8_DX_SRCS-yes += decoder/decodemv.h