summaryrefslogtreecommitdiff
path: root/vp9
diff options
context:
space:
mode:
Diffstat (limited to 'vp9')
-rw-r--r--vp9/common/vp9_entropy.c10
-rw-r--r--vp9/common/vp9_entropy.h10
-rw-r--r--vp9/common/vp9_entropymode.c30
-rw-r--r--vp9/common/vp9_entropymode.h27
-rw-r--r--vp9/common/vp9_entropymv.c8
-rw-r--r--vp9/common/vp9_entropymv.h8
-rw-r--r--vp9/common/vp9_onyxc_int.h8
-rw-r--r--vp9/common/vp9_reconinter.c322
-rw-r--r--vp9/common/vp9_reconinter.h8
-rw-r--r--vp9/common/vp9_reconintra.c62
-rw-r--r--vp9/common/vp9_treecoder.c19
-rw-r--r--vp9/common/vp9_treecoder.h14
-rw-r--r--vp9/decoder/vp9_decodframe.c49
-rw-r--r--vp9/decoder/vp9_dequantize.c5
-rw-r--r--vp9/decoder/vp9_detokenize.c9
-rw-r--r--vp9/decoder/vp9_onyxd_if.c2
-rw-r--r--vp9/decoder/vp9_treereader.h1
-rw-r--r--vp9/encoder/vp9_bitstream.c26
-rw-r--r--vp9/encoder/vp9_encodeframe.c196
-rw-r--r--vp9/encoder/vp9_encodemv.c72
-rw-r--r--vp9/encoder/vp9_firstpass.c321
-rw-r--r--vp9/encoder/vp9_mcomp.c7
-rw-r--r--vp9/encoder/vp9_onyx_if.c312
-rw-r--r--vp9/encoder/vp9_onyx_int.h61
-rw-r--r--vp9/encoder/vp9_quantize.c6
-rw-r--r--vp9/encoder/vp9_ratectrl.c11
-rw-r--r--vp9/encoder/vp9_rdopt.c10
-rw-r--r--vp9/encoder/vp9_tokenize.c2
-rw-r--r--vp9/encoder/vp9_treewriter.h15
-rw-r--r--vp9/vp9_cx_iface.c7
30 files changed, 890 insertions, 748 deletions
diff --git a/vp9/common/vp9_entropy.c b/vp9/common/vp9_entropy.c
index 500a278ff..5e6cba2ed 100644
--- a/vp9/common/vp9_entropy.c
+++ b/vp9/common/vp9_entropy.c
@@ -386,7 +386,7 @@ const vp9_tree_index vp9_coef_tree[ 22] = /* corresponding _CONTEXT_NODEs */
-DCT_VAL_CATEGORY5, -DCT_VAL_CATEGORY6 /* 10 = CAT_FIVE */
};
-struct vp9_token_struct vp9_coef_encodings[MAX_ENTROPY_TOKENS];
+struct vp9_token vp9_coef_encodings[MAX_ENTROPY_TOKENS];
/* Trees for extra bits. Probabilities are constant and
do not depend on previously encoded bits */
@@ -408,7 +408,7 @@ const vp9_tree_index vp9_nzc4x4_tree[2 * NZC4X4_NODES] = {
-NZC_3TO4, 8,
-NZC_5TO8, -NZC_9TO16,
};
-struct vp9_token_struct vp9_nzc4x4_encodings[NZC4X4_TOKENS];
+struct vp9_token vp9_nzc4x4_encodings[NZC4X4_TOKENS];
const vp9_tree_index vp9_nzc8x8_tree[2 * NZC8X8_NODES] = {
-NZC_0, 2,
@@ -419,7 +419,7 @@ const vp9_tree_index vp9_nzc8x8_tree[2 * NZC8X8_NODES] = {
-NZC_9TO16, 12,
-NZC_17TO32, -NZC_33TO64,
};
-struct vp9_token_struct vp9_nzc8x8_encodings[NZC8X8_TOKENS];
+struct vp9_token vp9_nzc8x8_encodings[NZC8X8_TOKENS];
const vp9_tree_index vp9_nzc16x16_tree[2 * NZC16X16_NODES] = {
-NZC_0, 2,
@@ -432,7 +432,7 @@ const vp9_tree_index vp9_nzc16x16_tree[2 * NZC16X16_NODES] = {
-NZC_33TO64, 16,
-NZC_65TO128, -NZC_129TO256,
};
-struct vp9_token_struct vp9_nzc16x16_encodings[NZC16X16_TOKENS];
+struct vp9_token vp9_nzc16x16_encodings[NZC16X16_TOKENS];
const vp9_tree_index vp9_nzc32x32_tree[2 * NZC32X32_NODES] = {
-NZC_0, 2,
@@ -447,7 +447,7 @@ const vp9_tree_index vp9_nzc32x32_tree[2 * NZC32X32_NODES] = {
-NZC_129TO256, 20,
-NZC_257TO512, -NZC_513TO1024,
};
-struct vp9_token_struct vp9_nzc32x32_encodings[NZC32X32_TOKENS];
+struct vp9_token vp9_nzc32x32_encodings[NZC32X32_TOKENS];
const int vp9_extranzcbits[NZC32X32_TOKENS] = {
0, 0, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9
diff --git a/vp9/common/vp9_entropy.h b/vp9/common/vp9_entropy.h
index 645faa2c6..db167420c 100644
--- a/vp9/common/vp9_entropy.h
+++ b/vp9/common/vp9_entropy.h
@@ -40,7 +40,7 @@ extern const int vp9_i8x8_block[4];
extern const vp9_tree_index vp9_coef_tree[];
-extern struct vp9_token_struct vp9_coef_encodings[MAX_ENTROPY_TOKENS];
+extern struct vp9_token vp9_coef_encodings[MAX_ENTROPY_TOKENS];
typedef struct {
vp9_tree_p tree;
@@ -215,10 +215,10 @@ extern const vp9_tree_index vp9_nzc16x16_tree[];
extern const vp9_tree_index vp9_nzc32x32_tree[];
/* nzc encodings */
-extern struct vp9_token_struct vp9_nzc4x4_encodings[NZC4X4_TOKENS];
-extern struct vp9_token_struct vp9_nzc8x8_encodings[NZC8X8_TOKENS];
-extern struct vp9_token_struct vp9_nzc16x16_encodings[NZC16X16_TOKENS];
-extern struct vp9_token_struct vp9_nzc32x32_encodings[NZC32X32_TOKENS];
+extern struct vp9_token vp9_nzc4x4_encodings[NZC4X4_TOKENS];
+extern struct vp9_token vp9_nzc8x8_encodings[NZC8X8_TOKENS];
+extern struct vp9_token vp9_nzc16x16_encodings[NZC16X16_TOKENS];
+extern struct vp9_token vp9_nzc32x32_encodings[NZC32X32_TOKENS];
#define codenzc(x) (\
(x) <= 3 ? (x) : (x) <= 4 ? 3 : (x) <= 8 ? 4 : \
diff --git a/vp9/common/vp9_entropymode.c b/vp9/common/vp9_entropymode.c
index 055ba7986..ed3c6a554 100644
--- a/vp9/common/vp9_entropymode.c
+++ b/vp9/common/vp9_entropymode.c
@@ -283,19 +283,19 @@ const vp9_tree_index vp9_sub_mv_ref_tree[6] = {
-ZERO4X4, -NEW4X4
};
-struct vp9_token_struct vp9_bmode_encodings[VP9_NKF_BINTRAMODES];
-struct vp9_token_struct vp9_kf_bmode_encodings[VP9_KF_BINTRAMODES];
-struct vp9_token_struct vp9_ymode_encodings[VP9_YMODES];
-struct vp9_token_struct vp9_sb_ymode_encodings[VP9_I32X32_MODES];
-struct vp9_token_struct vp9_sb_kf_ymode_encodings[VP9_I32X32_MODES];
-struct vp9_token_struct vp9_kf_ymode_encodings[VP9_YMODES];
-struct vp9_token_struct vp9_uv_mode_encodings[VP9_UV_MODES];
-struct vp9_token_struct vp9_i8x8_mode_encodings[VP9_I8X8_MODES];
-struct vp9_token_struct vp9_mbsplit_encodings[VP9_NUMMBSPLITS];
-
-struct vp9_token_struct vp9_mv_ref_encoding_array[VP9_MVREFS];
-struct vp9_token_struct vp9_sb_mv_ref_encoding_array[VP9_MVREFS];
-struct vp9_token_struct vp9_sub_mv_ref_encoding_array[VP9_SUBMVREFS];
+struct vp9_token vp9_bmode_encodings[VP9_NKF_BINTRAMODES];
+struct vp9_token vp9_kf_bmode_encodings[VP9_KF_BINTRAMODES];
+struct vp9_token vp9_ymode_encodings[VP9_YMODES];
+struct vp9_token vp9_sb_ymode_encodings[VP9_I32X32_MODES];
+struct vp9_token vp9_sb_kf_ymode_encodings[VP9_I32X32_MODES];
+struct vp9_token vp9_kf_ymode_encodings[VP9_YMODES];
+struct vp9_token vp9_uv_mode_encodings[VP9_UV_MODES];
+struct vp9_token vp9_i8x8_mode_encodings[VP9_I8X8_MODES];
+struct vp9_token vp9_mbsplit_encodings[VP9_NUMMBSPLITS];
+
+struct vp9_token vp9_mv_ref_encoding_array[VP9_MVREFS];
+struct vp9_token vp9_sb_mv_ref_encoding_array[VP9_MVREFS];
+struct vp9_token vp9_sub_mv_ref_encoding_array[VP9_SUBMVREFS];
void vp9_init_mbmode_probs(VP9_COMMON *x) {
unsigned int bct [VP9_YMODES] [2]; /* num Ymodes > num UV modes */
@@ -379,7 +379,7 @@ const vp9_tree_index vp9_switchable_interp_tree[VP9_SWITCHABLE_FILTERS*2-2] = {
-0, 2,
-1, -2
};
-struct vp9_token_struct vp9_switchable_interp_encodings[VP9_SWITCHABLE_FILTERS];
+struct vp9_token vp9_switchable_interp_encodings[VP9_SWITCHABLE_FILTERS];
#if CONFIG_ENABLE_6TAP
const INTERPOLATIONFILTERTYPE vp9_switchable_interp[VP9_SWITCHABLE_FILTERS] = {
SIXTAP, EIGHTTAP, EIGHTTAP_SHARP};
@@ -397,7 +397,7 @@ const vp9_prob vp9_switchable_interp_prob [VP9_SWITCHABLE_FILTERS+1]
const vp9_tree_index vp9_switchable_interp_tree[VP9_SWITCHABLE_FILTERS*2-2] = {
-0, -1,
};
-struct vp9_token_struct vp9_switchable_interp_encodings[VP9_SWITCHABLE_FILTERS];
+struct vp9_token vp9_switchable_interp_encodings[VP9_SWITCHABLE_FILTERS];
const vp9_prob vp9_switchable_interp_prob [VP9_SWITCHABLE_FILTERS+1]
[VP9_SWITCHABLE_FILTERS-1] = {
{248},
diff --git a/vp9/common/vp9_entropymode.h b/vp9/common/vp9_entropymode.h
index 8b0caf6eb..fe97f0e1f 100644
--- a/vp9/common/vp9_entropymode.h
+++ b/vp9/common/vp9_entropymode.h
@@ -54,21 +54,21 @@ extern const vp9_tree_index vp9_mv_ref_tree[];
extern const vp9_tree_index vp9_sb_mv_ref_tree[];
extern const vp9_tree_index vp9_sub_mv_ref_tree[];
-extern struct vp9_token_struct vp9_bmode_encodings[VP9_NKF_BINTRAMODES];
-extern struct vp9_token_struct vp9_kf_bmode_encodings[VP9_KF_BINTRAMODES];
-extern struct vp9_token_struct vp9_ymode_encodings[VP9_YMODES];
-extern struct vp9_token_struct vp9_sb_ymode_encodings[VP9_I32X32_MODES];
-extern struct vp9_token_struct vp9_sb_kf_ymode_encodings[VP9_I32X32_MODES];
-extern struct vp9_token_struct vp9_kf_ymode_encodings[VP9_YMODES];
-extern struct vp9_token_struct vp9_i8x8_mode_encodings[VP9_I8X8_MODES];
-extern struct vp9_token_struct vp9_uv_mode_encodings[VP9_UV_MODES];
-extern struct vp9_token_struct vp9_mbsplit_encodings[VP9_NUMMBSPLITS];
+extern struct vp9_token vp9_bmode_encodings[VP9_NKF_BINTRAMODES];
+extern struct vp9_token vp9_kf_bmode_encodings[VP9_KF_BINTRAMODES];
+extern struct vp9_token vp9_ymode_encodings[VP9_YMODES];
+extern struct vp9_token vp9_sb_ymode_encodings[VP9_I32X32_MODES];
+extern struct vp9_token vp9_sb_kf_ymode_encodings[VP9_I32X32_MODES];
+extern struct vp9_token vp9_kf_ymode_encodings[VP9_YMODES];
+extern struct vp9_token vp9_i8x8_mode_encodings[VP9_I8X8_MODES];
+extern struct vp9_token vp9_uv_mode_encodings[VP9_UV_MODES];
+extern struct vp9_token vp9_mbsplit_encodings[VP9_NUMMBSPLITS];
/* Inter mode values do not start at zero */
-extern struct vp9_token_struct vp9_mv_ref_encoding_array[VP9_MVREFS];
-extern struct vp9_token_struct vp9_sb_mv_ref_encoding_array[VP9_MVREFS];
-extern struct vp9_token_struct vp9_sub_mv_ref_encoding_array[VP9_SUBMVREFS];
+extern struct vp9_token vp9_mv_ref_encoding_array[VP9_MVREFS];
+extern struct vp9_token vp9_sb_mv_ref_encoding_array[VP9_MVREFS];
+extern struct vp9_token vp9_sub_mv_ref_encoding_array[VP9_SUBMVREFS];
void vp9_entropy_mode_init(void);
@@ -107,8 +107,7 @@ extern const int vp9_is_interpolating_filter[SWITCHABLE + 1];
extern const vp9_tree_index vp9_switchable_interp_tree
[2 * (VP9_SWITCHABLE_FILTERS - 1)];
-extern struct vp9_token_struct vp9_switchable_interp_encodings
- [VP9_SWITCHABLE_FILTERS];
+extern struct vp9_token vp9_switchable_interp_encodings[VP9_SWITCHABLE_FILTERS];
extern const vp9_prob vp9_switchable_interp_prob[VP9_SWITCHABLE_FILTERS + 1]
[VP9_SWITCHABLE_FILTERS - 1];
diff --git a/vp9/common/vp9_entropymv.c b/vp9/common/vp9_entropymv.c
index 8330befbe..fa8eacc10 100644
--- a/vp9/common/vp9_entropymv.c
+++ b/vp9/common/vp9_entropymv.c
@@ -33,7 +33,7 @@ const vp9_tree_index vp9_mv_joint_tree[2 * MV_JOINTS - 2] = {
-MV_JOINT_HNZVZ, 4,
-MV_JOINT_HZVNZ, -MV_JOINT_HNZVNZ
};
-struct vp9_token_struct vp9_mv_joint_encodings[MV_JOINTS];
+struct vp9_token vp9_mv_joint_encodings[MV_JOINTS];
const vp9_tree_index vp9_mv_class_tree[2 * MV_CLASSES - 2] = {
-MV_CLASS_0, 2,
@@ -47,19 +47,19 @@ const vp9_tree_index vp9_mv_class_tree[2 * MV_CLASSES - 2] = {
-MV_CLASS_7, -MV_CLASS_8,
-MV_CLASS_9, -MV_CLASS_10,
};
-struct vp9_token_struct vp9_mv_class_encodings[MV_CLASSES];
+struct vp9_token vp9_mv_class_encodings[MV_CLASSES];
const vp9_tree_index vp9_mv_class0_tree [2 * CLASS0_SIZE - 2] = {
-0, -1,
};
-struct vp9_token_struct vp9_mv_class0_encodings[CLASS0_SIZE];
+struct vp9_token vp9_mv_class0_encodings[CLASS0_SIZE];
const vp9_tree_index vp9_mv_fp_tree [2 * 4 - 2] = {
-0, 2,
-1, 4,
-2, -3
};
-struct vp9_token_struct vp9_mv_fp_encodings[4];
+struct vp9_token vp9_mv_fp_encodings[4];
const nmv_context vp9_default_nmv_context = {
{32, 64, 96},
diff --git a/vp9/common/vp9_entropymv.h b/vp9/common/vp9_entropymv.h
index 162d2b44f..3521a520c 100644
--- a/vp9/common/vp9_entropymv.h
+++ b/vp9/common/vp9_entropymv.h
@@ -46,7 +46,7 @@ typedef enum {
} MV_JOINT_TYPE;
extern const vp9_tree_index vp9_mv_joint_tree[2 * MV_JOINTS - 2];
-extern struct vp9_token_struct vp9_mv_joint_encodings [MV_JOINTS];
+extern struct vp9_token vp9_mv_joint_encodings[MV_JOINTS];
/* Symbols for coding magnitude class of nonzero components */
#define MV_CLASSES 11
@@ -65,7 +65,7 @@ typedef enum {
} MV_CLASS_TYPE;
extern const vp9_tree_index vp9_mv_class_tree[2 * MV_CLASSES - 2];
-extern struct vp9_token_struct vp9_mv_class_encodings [MV_CLASSES];
+extern struct vp9_token vp9_mv_class_encodings[MV_CLASSES];
#define CLASS0_BITS 1 /* bits at integer precision for class 0 */
#define CLASS0_SIZE (1 << CLASS0_BITS)
@@ -76,10 +76,10 @@ extern struct vp9_token_struct vp9_mv_class_encodings [MV_CLASSES];
#define MV_VALS ((MV_MAX << 1) + 1)
extern const vp9_tree_index vp9_mv_class0_tree[2 * CLASS0_SIZE - 2];
-extern struct vp9_token_struct vp9_mv_class0_encodings[CLASS0_SIZE];
+extern struct vp9_token vp9_mv_class0_encodings[CLASS0_SIZE];
extern const vp9_tree_index vp9_mv_fp_tree[2 * 4 - 2];
-extern struct vp9_token_struct vp9_mv_fp_encodings[4];
+extern struct vp9_token vp9_mv_fp_encodings[4];
typedef struct {
vp9_prob sign;
diff --git a/vp9/common/vp9_onyxc_int.h b/vp9/common/vp9_onyxc_int.h
index 6f928f5e0..c7ca67efe 100644
--- a/vp9/common/vp9_onyxc_int.h
+++ b/vp9/common/vp9_onyxc_int.h
@@ -18,6 +18,7 @@
#include "vp9/common/vp9_entropymv.h"
#include "vp9/common/vp9_entropy.h"
#include "vp9/common/vp9_entropymode.h"
+
#if CONFIG_POSTPROC
#include "vp9/common/vp9_postproc.h"
#endif
@@ -37,8 +38,13 @@ void vp9_initialize_common(void);
#define QINDEX_RANGE (MAXQ + 1)
+#if CONFIG_MULTIPLE_ARF
+#define NUM_REF_FRAMES 8
+#define NUM_REF_FRAMES_LG2 3
+#else
#define NUM_REF_FRAMES 3
#define NUM_REF_FRAMES_LG2 2
+#endif
#define ALLOWED_REFS_PER_FRAME 3
@@ -52,6 +58,8 @@ void vp9_initialize_common(void);
#define COMP_PRED_CONTEXTS 2
+#define MAX_LAG_BUFFERS 25
+
typedef struct frame_contexts {
vp9_prob bmode_prob[VP9_NKF_BINTRAMODES - 1];
vp9_prob ymode_prob[VP9_YMODES - 1]; /* interframe intra mode probs */
diff --git a/vp9/common/vp9_reconinter.c b/vp9/common/vp9_reconinter.c
index 22d4b0449..ee378d239 100644
--- a/vp9/common/vp9_reconinter.c
+++ b/vp9/common/vp9_reconinter.c
@@ -1070,12 +1070,15 @@ void vp9_build_inter16x16_predictors_mbuv(MACROBLOCKD *xd,
#endif
#if CONFIG_IMPLICIT_COMPOUNDINTER_WEIGHT
-static void build_inter32x32_predictors_sby_w(MACROBLOCKD *x,
- uint8_t *dst_y,
- int dst_ystride,
- int weight,
- int mb_row,
- int mb_col) {
+static void build_inter_predictors_sby_w(MACROBLOCKD *x,
+ uint8_t *dst_y,
+ int dst_ystride,
+ int weight,
+ int mb_row,
+ int mb_col,
+ BLOCK_SIZE_TYPE bsize) {
+ const int bwl = mb_width_log2(bsize), bw = 1 << bwl;
+ const int bhl = mb_height_log2(bsize), bh = 1 << bhl;
uint8_t *y1 = x->pre.y_buffer;
uint8_t *y2 = x->second_pre.y_buffer;
int edge[4], n;
@@ -1085,8 +1088,8 @@ static void build_inter32x32_predictors_sby_w(MACROBLOCKD *x,
edge[2] = x->mb_to_left_edge;
edge[3] = x->mb_to_right_edge;
- for (n = 0; n < 4; n++) {
- const int x_idx = n & 1, y_idx = n >> 1;
+ for (n = 0; n < bw * bh; n++) {
+ const int x_idx = n & (bw - 1), y_idx = n >> bwl;
x->mb_to_top_edge = edge[0] - ((y_idx * 16) << 3);
x->mb_to_bottom_edge = edge[1] + (((1 - y_idx) * 16) << 3);
@@ -1119,25 +1122,31 @@ static void build_inter32x32_predictors_sby_w(MACROBLOCKD *x,
}
}
-void vp9_build_inter32x32_predictors_sby(MACROBLOCKD *x,
+void vp9_build_inter_predictors_sby(MACROBLOCKD *x,
uint8_t *dst_y,
int dst_ystride,
int mb_row,
- int mb_col) {
+ int mb_col,
+ BLOCK_SIZE_TYPE bsize) {
int weight = get_implicit_compoundinter_weight(x, mb_row, mb_col);
- build_inter32x32_predictors_sby_w(x, dst_y, dst_ystride, weight,
- mb_row, mb_col);
+ build_inter_predictors_sby_w(x, dst_y, dst_ystride, weight,
+ mb_row, mb_col, bsize);
}
#else
-// TODO(all): Can we use 32x32 specific implementations of this rather than
-// using 16x16 implementations ?
-void vp9_build_inter32x32_predictors_sby(MACROBLOCKD *x,
- uint8_t *dst_y,
- int dst_ystride,
- int mb_row,
- int mb_col) {
+// TODO(jingning): vp9_convolve8_ssse3_ limits the dimension up to 16. Currently
+// handle inter prediction of block sizes above 16x16 separately from those
+// smaller ones. Need to combine them all in to a unified inter prediction
+// function.
+void vp9_build_inter_predictors_sby(MACROBLOCKD *x,
+ uint8_t *dst_y,
+ int dst_ystride,
+ int mb_row,
+ int mb_col,
+ BLOCK_SIZE_TYPE bsize) {
+ const int bwl = mb_width_log2(bsize), bw = 1 << bwl;
+ const int bhl = mb_height_log2(bsize), bh = 1 << bhl;
uint8_t *y1 = x->pre.y_buffer;
uint8_t *y2 = x->second_pre.y_buffer;
int edge[4], n;
@@ -1147,8 +1156,8 @@ void vp9_build_inter32x32_predictors_sby(MACROBLOCKD *x,
edge[2] = x->mb_to_left_edge;
edge[3] = x->mb_to_right_edge;
- for (n = 0; n < 4; n++) {
- const int x_idx = n & 1, y_idx = n >> 1;
+ for (n = 0; n < bw * bh; n++) {
+ const int x_idx = n & (bw - 1), y_idx = n >> bwl;
x->mb_to_top_edge = edge[0] - ((y_idx * 16) << 3);
x->mb_to_bottom_edge = edge[1] + (((1 - y_idx) * 16) << 3);
@@ -1184,13 +1193,16 @@ void vp9_build_inter32x32_predictors_sby(MACROBLOCKD *x,
#endif
#if CONFIG_IMPLICIT_COMPOUNDINTER_WEIGHT
-static void build_inter32x32_predictors_sbuv_w(MACROBLOCKD *x,
- uint8_t *dst_u,
- uint8_t *dst_v,
- int dst_uvstride,
- int weight,
- int mb_row,
- int mb_col) {
+static void build_inter_predictors_sbuv_w(MACROBLOCKD *x,
+ uint8_t *dst_u,
+ uint8_t *dst_v,
+ int dst_uvstride,
+ int weight,
+ int mb_row,
+ int mb_col,
+ BLOCK_SIZE_TYPE bsize) {
+ const int bwl = mb_width_log2(bsize), bw = 1 << bwl;
+ const int bhl = mb_height_log2(bsize), bh = 1 << bhl;
uint8_t *u1 = x->pre.u_buffer, *v1 = x->pre.v_buffer;
uint8_t *u2 = x->second_pre.u_buffer, *v2 = x->second_pre.v_buffer;
int edge[4], n;
@@ -1200,9 +1212,9 @@ static void build_inter32x32_predictors_sbuv_w(MACROBLOCKD *x,
edge[2] = x->mb_to_left_edge;
edge[3] = x->mb_to_right_edge;
- for (n = 0; n < 4; n++) {
+ for (n = 0; n < bw * bh; n++) {
int scaled_uv_offset;
- const int x_idx = n & 1, y_idx = n >> 1;
+ const int x_idx = n & (bw - 1), y_idx = n >> bwl;
x->mb_to_top_edge = edge[0] - ((y_idx * 16) << 3);
x->mb_to_bottom_edge = edge[1] + (((1 - y_idx) * 16) << 3);
@@ -1244,29 +1256,33 @@ static void build_inter32x32_predictors_sbuv_w(MACROBLOCKD *x,
}
}
-void vp9_build_inter32x32_predictors_sbuv(MACROBLOCKD *xd,
- uint8_t *dst_u,
- uint8_t *dst_v,
- int dst_uvstride,
- int mb_row,
- int mb_col) {
+void vp9_build_inter_predictors_sbuv(MACROBLOCKD *xd,
+ uint8_t *dst_u,
+ uint8_t *dst_v,
+ int dst_uvstride,
+ int mb_row,
+ int mb_col,
+ BLOCK_SIZE_TYPE bsize) {
#ifdef USE_IMPLICIT_WEIGHT_UV
int weight = get_implicit_compoundinter_weight(xd, mb_row, mb_col);
#else
int weight = AVERAGE_WEIGHT;
#endif
- build_inter32x32_predictors_sbuv_w(xd, dst_u, dst_v, dst_uvstride,
- weight, mb_row, mb_col);
+ build_inter_predictors_sbuv_w(xd, dst_u, dst_v, dst_uvstride,
+ weight, mb_row, mb_col, bsize);
}
#else
-void vp9_build_inter32x32_predictors_sbuv(MACROBLOCKD *x,
- uint8_t *dst_u,
- uint8_t *dst_v,
- int dst_uvstride,
- int mb_row,
- int mb_col) {
+void vp9_build_inter_predictors_sbuv(MACROBLOCKD *x,
+ uint8_t *dst_u,
+ uint8_t *dst_v,
+ int dst_uvstride,
+ int mb_row,
+ int mb_col,
+ BLOCK_SIZE_TYPE bsize) {
+ const int bwl = mb_width_log2(bsize), bw = 1 << bwl;
+ const int bhl = mb_height_log2(bsize), bh = 1 << bhl;
uint8_t *u1 = x->pre.u_buffer, *v1 = x->pre.v_buffer;
uint8_t *u2 = x->second_pre.u_buffer, *v2 = x->second_pre.v_buffer;
int edge[4], n;
@@ -1276,9 +1292,9 @@ void vp9_build_inter32x32_predictors_sbuv(MACROBLOCKD *x,
edge[2] = x->mb_to_left_edge;
edge[3] = x->mb_to_right_edge;
- for (n = 0; n < 4; n++) {
+ for (n = 0; n < bw * bh; n++) {
int scaled_uv_offset;
- const int x_idx = n & 1, y_idx = n >> 1;
+ const int x_idx = n & (bw - 1), y_idx = n >> bwl;
x->mb_to_top_edge = edge[0] - ((y_idx * 16) << 3);
x->mb_to_bottom_edge = edge[1] + (((1 - y_idx) * 16) << 3);
@@ -1321,215 +1337,25 @@ void vp9_build_inter32x32_predictors_sbuv(MACROBLOCKD *x,
}
#endif
-void vp9_build_inter32x32_predictors_sb(MACROBLOCKD *mb,
- int mb_row, int mb_col) {
- uint8_t *const y = mb->dst.y_buffer;
- uint8_t *const u = mb->dst.u_buffer;
- uint8_t *const v = mb->dst.v_buffer;
- const int y_stride = mb->dst.y_stride;
- const int uv_stride = mb->dst.uv_stride;
-
- vp9_build_inter32x32_predictors_sby(mb, y, y_stride, mb_row, mb_col);
- vp9_build_inter32x32_predictors_sbuv(mb, u, v, uv_stride, mb_row, mb_col);
-#if CONFIG_COMP_INTERINTRA_PRED
- if (mb->mode_info_context->mbmi.second_ref_frame == INTRA_FRAME) {
- vp9_build_interintra_32x32_predictors_sb(mb, y, u, v, y_stride, uv_stride);
- }
-#endif
-}
-
-#if CONFIG_IMPLICIT_COMPOUNDINTER_WEIGHT
-static void build_inter64x64_predictors_sby_w(MACROBLOCKD *x,
- uint8_t *dst_y,
- int dst_ystride,
- int weight,
- int mb_row,
- int mb_col) {
- uint8_t *y1 = x->pre.y_buffer;
- uint8_t *y2 = x->second_pre.y_buffer;
- int edge[4], n;
-
- edge[0] = x->mb_to_top_edge;
- edge[1] = x->mb_to_bottom_edge;
- edge[2] = x->mb_to_left_edge;
- edge[3] = x->mb_to_right_edge;
-
- for (n = 0; n < 4; n++) {
- const int x_idx = n & 1, y_idx = n >> 1;
-
- x->mb_to_top_edge = edge[0] - ((y_idx * 32) << 3);
- x->mb_to_bottom_edge = edge[1] + (((1 - y_idx) * 32) << 3);
- x->mb_to_left_edge = edge[2] - ((x_idx * 32) << 3);
- x->mb_to_right_edge = edge[3] + (((1 - x_idx) * 32) << 3);
-
- x->pre.y_buffer = y1 + scaled_buffer_offset(x_idx * 32,
- y_idx * 32,
- x->pre.y_stride,
- &x->scale_factor[0]);
-
- if (x->mode_info_context->mbmi.second_ref_frame > 0) {
- x->second_pre.y_buffer = y2 +
- scaled_buffer_offset(x_idx * 32,
- y_idx * 32,
- x->second_pre.y_stride,
- &x->scale_factor[1]);
- }
-
- build_inter32x32_predictors_sby_w(x,
- dst_y + y_idx * 32 * dst_ystride + x_idx * 32,
- dst_ystride, weight, mb_row + y_idx * 2, mb_col + x_idx * 2);
- }
-
- x->mb_to_top_edge = edge[0];
- x->mb_to_bottom_edge = edge[1];
- x->mb_to_left_edge = edge[2];
- x->mb_to_right_edge = edge[3];
-
- x->pre.y_buffer = y1;
-
- if (x->mode_info_context->mbmi.second_ref_frame > 0) {
- x->second_pre.y_buffer = y2;
- }
-}
-
-void vp9_build_inter64x64_predictors_sby(MACROBLOCKD *x,
- uint8_t *dst_y,
- int dst_ystride,
- int mb_row,
- int mb_col) {
- int weight = get_implicit_compoundinter_weight(x, mb_row, mb_col);
- build_inter64x64_predictors_sby_w(x, dst_y, dst_ystride, weight,
- mb_row, mb_col);
-}
-
-#else
-
-void vp9_build_inter64x64_predictors_sby(MACROBLOCKD *x,
- uint8_t *dst_y,
- int dst_ystride,
- int mb_row,
- int mb_col) {
- uint8_t *y1 = x->pre.y_buffer;
- uint8_t *y2 = x->second_pre.y_buffer;
- int edge[4], n;
-
- edge[0] = x->mb_to_top_edge;
- edge[1] = x->mb_to_bottom_edge;
- edge[2] = x->mb_to_left_edge;
- edge[3] = x->mb_to_right_edge;
-
- for (n = 0; n < 4; n++) {
- const int x_idx = n & 1, y_idx = n >> 1;
-
- x->mb_to_top_edge = edge[0] - ((y_idx * 32) << 3);
- x->mb_to_bottom_edge = edge[1] + (((1 - y_idx) * 32) << 3);
- x->mb_to_left_edge = edge[2] - ((x_idx * 32) << 3);
- x->mb_to_right_edge = edge[3] + (((1 - x_idx) * 32) << 3);
-
- x->pre.y_buffer = y1 + scaled_buffer_offset(x_idx * 32,
- y_idx * 32,
- x->pre.y_stride,
- &x->scale_factor[0]);
-
- if (x->mode_info_context->mbmi.second_ref_frame > 0) {
- x->second_pre.y_buffer = y2 +
- scaled_buffer_offset(x_idx * 32,
- y_idx * 32,
- x->second_pre.y_stride,
- &x->scale_factor[1]);
- }
-
- vp9_build_inter32x32_predictors_sby(x,
- dst_y + y_idx * 32 * dst_ystride + x_idx * 32,
- dst_ystride, mb_row + y_idx * 2, mb_col + x_idx * 2);
- }
-
- x->mb_to_top_edge = edge[0];
- x->mb_to_bottom_edge = edge[1];
- x->mb_to_left_edge = edge[2];
- x->mb_to_right_edge = edge[3];
-
- x->pre.y_buffer = y1;
-
- if (x->mode_info_context->mbmi.second_ref_frame > 0) {
- x->second_pre.y_buffer = y2;
- }
-}
-#endif
-
-void vp9_build_inter64x64_predictors_sbuv(MACROBLOCKD *x,
- uint8_t *dst_u,
- uint8_t *dst_v,
- int dst_uvstride,
- int mb_row,
- int mb_col) {
- uint8_t *u1 = x->pre.u_buffer, *v1 = x->pre.v_buffer;
- uint8_t *u2 = x->second_pre.u_buffer, *v2 = x->second_pre.v_buffer;
- int edge[4], n;
-
- edge[0] = x->mb_to_top_edge;
- edge[1] = x->mb_to_bottom_edge;
- edge[2] = x->mb_to_left_edge;
- edge[3] = x->mb_to_right_edge;
-
- for (n = 0; n < 4; n++) {
- const int x_idx = n & 1, y_idx = n >> 1;
- int scaled_uv_offset;
-
- x->mb_to_top_edge = edge[0] - ((y_idx * 32) << 3);
- x->mb_to_bottom_edge = edge[1] + (((1 - y_idx) * 32) << 3);
- x->mb_to_left_edge = edge[2] - ((x_idx * 32) << 3);
- x->mb_to_right_edge = edge[3] + (((1 - x_idx) * 32) << 3);
-
- scaled_uv_offset = scaled_buffer_offset(x_idx * 16,
- y_idx * 16,
- x->pre.uv_stride,
- &x->scale_factor_uv[0]);
- x->pre.u_buffer = u1 + scaled_uv_offset;
- x->pre.v_buffer = v1 + scaled_uv_offset;
-
- if (x->mode_info_context->mbmi.second_ref_frame > 0) {
- scaled_uv_offset = scaled_buffer_offset(x_idx * 16,
- y_idx * 16,
- x->second_pre.uv_stride,
- &x->scale_factor_uv[1]);
- x->second_pre.u_buffer = u2 + scaled_uv_offset;
- x->second_pre.v_buffer = v2 + scaled_uv_offset;
- }
-
- vp9_build_inter32x32_predictors_sbuv(x,
- dst_u + y_idx * 16 * dst_uvstride + x_idx * 16,
- dst_v + y_idx * 16 * dst_uvstride + x_idx * 16,
- dst_uvstride, mb_row + y_idx * 2, mb_col + x_idx * 2);
- }
-
- x->mb_to_top_edge = edge[0];
- x->mb_to_bottom_edge = edge[1];
- x->mb_to_left_edge = edge[2];
- x->mb_to_right_edge = edge[3];
-
- x->pre.u_buffer = u1;
- x->pre.v_buffer = v1;
-
- if (x->mode_info_context->mbmi.second_ref_frame > 0) {
- x->second_pre.u_buffer = u2;
- x->second_pre.v_buffer = v2;
- }
-}
-
-void vp9_build_inter64x64_predictors_sb(MACROBLOCKD *mb,
- int mb_row, int mb_col) {
+void vp9_build_inter_predictors_sb(MACROBLOCKD *mb,
+ int mb_row, int mb_col,
+ BLOCK_SIZE_TYPE bsize) {
uint8_t *const y = mb->dst.y_buffer;
uint8_t *const u = mb->dst.u_buffer;
uint8_t *const v = mb->dst.v_buffer;
const int y_stride = mb->dst.y_stride;
const int uv_stride = mb->dst.uv_stride;
- vp9_build_inter64x64_predictors_sby(mb, y, y_stride, mb_row, mb_col);
- vp9_build_inter64x64_predictors_sbuv(mb, u, v, uv_stride, mb_row, mb_col);
+ vp9_build_inter_predictors_sby(mb, y, y_stride, mb_row, mb_col, bsize);
+ vp9_build_inter_predictors_sbuv(mb, u, v, uv_stride, mb_row, mb_col, bsize);
#if CONFIG_COMP_INTERINTRA_PRED
if (mb->mode_info_context->mbmi.second_ref_frame == INTRA_FRAME) {
- vp9_build_interintra_64x64_predictors_sb(mb, y, u, v, y_stride, uv_stride);
+ if (bsize == BLOCK_SIZE_SB32X32)
+ vp9_build_interintra_32x32_predictors_sb(mb, y, u, v,
+ y_stride, uv_stride);
+ else
+ vp9_build_interintra_64x64_predictors_sb(mb, y, u, v,
+ y_stride, uv_stride);
}
#endif
}
@@ -1713,6 +1539,8 @@ void vp9_build_inter_predictors_mb(MACROBLOCKD *xd,
int mb_row,
int mb_col) {
if (xd->mode_info_context->mbmi.mode != SPLITMV) {
+ // TODO(jingning): to be replaced with vp9_build_inter_predictors_sb() when
+ // converting buffers from predictors to dst.
vp9_build_inter16x16_predictors_mb(xd, xd->predictor,
&xd->predictor[256],
&xd->predictor[320], 16, 8,
diff --git a/vp9/common/vp9_reconinter.h b/vp9/common/vp9_reconinter.h
index 5268607fd..585fcfd6d 100644
--- a/vp9/common/vp9_reconinter.h
+++ b/vp9/common/vp9_reconinter.h
@@ -38,11 +38,9 @@ void vp9_build_inter16x16_predictors_mb(MACROBLOCKD *xd,
int mb_row,
int mb_col);
-void vp9_build_inter32x32_predictors_sb(MACROBLOCKD *mb,
- int mb_row, int mb_col);
-
-void vp9_build_inter64x64_predictors_sb(MACROBLOCKD *mb,
- int mb_row, int mb_col);
+void vp9_build_inter_predictors_sb(MACROBLOCKD *mb,
+ int mb_row, int mb_col,
+ BLOCK_SIZE_TYPE bsize);
void vp9_build_inter_predictors_mb(MACROBLOCKD *xd,
int mb_row,
diff --git a/vp9/common/vp9_reconintra.c b/vp9/common/vp9_reconintra.c
index 34e95a252..7ef03fc35 100644
--- a/vp9/common/vp9_reconintra.c
+++ b/vp9/common/vp9_reconintra.c
@@ -31,7 +31,7 @@ static const unsigned int iscale[64] = {
};
static INLINE int iscale_round(int value, int i) {
- return ROUND_POWER_OF_TWO(value * iscale[i], 16);
+ return ROUND_POWER_OF_TWO(value * iscale[i], 16);
}
static void d27_predictor(uint8_t *ypred_ptr, int y_stride,
@@ -70,7 +70,7 @@ static void d27_predictor(uint8_t *ypred_ptr, int y_stride,
ypred_ptr[r * y_stride] = ROUND_POWER_OF_TWO(ypred_ptr[(r - 1) * y_stride] +
yleft_col[r], 1);
for (r = bh - 2; r >= bh / 2; --r) {
- int w = c + (bh - 1 - r) * 2;
+ const int w = c + (bh - 1 - r) * 2;
ypred_ptr[r * y_stride + w] =
ROUND_POWER_OF_TWO(ypred_ptr[(r - 1) * y_stride + w] +
ypred_ptr[r * y_stride + w - 1], 1);
@@ -78,7 +78,7 @@ static void d27_predictor(uint8_t *ypred_ptr, int y_stride,
for (c = 1; c < bw; c++) {
for (r = bh - 1; r >= bh / 2 + c / 2; --r) {
- int w = c + (bh - 1 - r) * 2;
+ const int w = c + (bh - 1 - r) * 2;
ypred_ptr[r * y_stride + w] =
ROUND_POWER_OF_TWO(ypred_ptr[(r - 1) * y_stride + w] +
ypred_ptr[r * y_stride + w - 1], 1);
@@ -121,7 +121,7 @@ static void d63_predictor(uint8_t *ypred_ptr, int y_stride,
c = bw - 1;
ypred_ptr[c] = ROUND_POWER_OF_TWO(ypred_ptr[(c - 1)] + yabove_row[c], 1);
for (c = bw - 2; c >= bw / 2; --c) {
- int h = r + (bw - 1 - c) * 2;
+ const int h = r + (bw - 1 - c) * 2;
ypred_ptr[h * y_stride + c] =
ROUND_POWER_OF_TWO(ypred_ptr[h * y_stride + c - 1] +
ypred_ptr[(h - 1) * y_stride + c], 1);
@@ -129,7 +129,7 @@ static void d63_predictor(uint8_t *ypred_ptr, int y_stride,
for (r = 1; r < bh; r++) {
for (c = bw - 1; c >= bw / 2 + r / 2; --c) {
- int h = r + (bw - 1 - c) * 2;
+ const int h = r + (bw - 1 - c) * 2;
ypred_ptr[h * y_stride + c] =
ROUND_POWER_OF_TWO(ypred_ptr[h * y_stride + c - 1] +
ypred_ptr[(h - 1) * y_stride + c], 1);
@@ -197,9 +197,8 @@ static void d135_predictor(uint8_t *ypred_ptr, int y_stride,
ypred_ptr += y_stride;
for (r = 1; r < bh; ++r) {
- for (c = 1; c < bw; c++) {
+ for (c = 1; c < bw; c++)
ypred_ptr[c] = ypred_ptr[-y_stride + c - 1];
- }
ypred_ptr += y_stride;
}
}
@@ -300,14 +299,13 @@ void vp9_build_intra_predictors(uint8_t *src, int src_stride,
int r, c, i;
uint8_t yleft_col[64], yabove_data[65], ytop_left;
uint8_t *yabove_row = yabove_data + 1;
- /*
- * 127 127 127 .. 127 127 127 127 127 127
- * 129 A B .. Y Z
- * 129 C D .. W X
- * 129 E F .. U V
- * 129 G H .. S T T T T T
- * ..
- */
+
+ // 127 127 127 .. 127 127 127 127 127 127
+ // 129 A B .. Y Z
+ // 129 C D .. W X
+ // 129 E F .. U V
+ // 129 G H .. S T T T T T
+ // ..
if (left_available) {
for (i = 0; i < bh; i++)
@@ -319,42 +317,34 @@ void vp9_build_intra_predictors(uint8_t *src, int src_stride,
if (up_available) {
uint8_t *yabove_ptr = src - src_stride;
vpx_memcpy(yabove_row, yabove_ptr, bw);
- if (left_available) {
- ytop_left = yabove_ptr[-1];
- } else {
- ytop_left = 127;
- }
+ ytop_left = left_available ? yabove_ptr[-1] : 127;
} else {
vpx_memset(yabove_row, 127, bw);
ytop_left = 127;
}
yabove_row[-1] = ytop_left;
- /* for Y */
+
switch (mode) {
case DC_PRED: {
- int expected_dc;
int i;
+ int expected_dc = 128;
int average = 0;
int count = 0;
if (up_available || left_available) {
if (up_available) {
- for (i = 0; i < bw; i++) {
+ for (i = 0; i < bw; i++)
average += yabove_row[i];
- }
count += bw;
}
if (left_available) {
- for (i = 0; i < bh; i++) {
+ for (i = 0; i < bh; i++)
average += yleft_col[i];
- }
count += bh;
}
expected_dc = (average + (count >> 1)) / count;
- } else {
- expected_dc = 128;
}
for (r = 0; r < bh; r++) {
@@ -377,9 +367,8 @@ void vp9_build_intra_predictors(uint8_t *src, int src_stride,
break;
case TM_PRED:
for (r = 0; r < bh; r++) {
- for (c = 0; c < bw; c++) {
+ for (c = 0; c < bw; c++)
ypred_ptr[c] = clip_pixel(yleft_col[r] + yabove_row[c] - ytop_left);
- }
ypred_ptr += y_stride;
}
@@ -402,14 +391,7 @@ void vp9_build_intra_predictors(uint8_t *src, int src_stride,
case D63_PRED:
d63_predictor(ypred_ptr, y_stride, bw, bh, yabove_row, yleft_col);
break;
- case I8X8_PRED:
- case I4X4_PRED:
- case NEARESTMV:
- case NEARMV:
- case ZEROMV:
- case NEWMV:
- case SPLITMV:
- case MB_MODE_COUNT:
+ default:
break;
}
}
@@ -746,7 +728,7 @@ void vp9_intra8x8_predict(MACROBLOCKD *xd,
const int block4x4_idx = (b - xd->block);
const int block_idx = (block4x4_idx >> 2) | !!(block4x4_idx & 2);
const int have_top = (block_idx >> 1) || xd->up_available;
- const int have_left = (block_idx & 1) || xd->left_available;
+ const int have_left = (block_idx & 1) || xd->left_available;
const int have_right = !(block_idx & 1) || xd->right_available;
vp9_build_intra_predictors(*(b->base_dst) + b->dst,
@@ -761,7 +743,7 @@ void vp9_intra_uv4x4_predict(MACROBLOCKD *xd,
uint8_t *predictor, int pre_stride) {
const int block_idx = (b - xd->block) & 3;
const int have_top = (block_idx >> 1) || xd->up_available;
- const int have_left = (block_idx & 1) || xd->left_available;
+ const int have_left = (block_idx & 1) || xd->left_available;
const int have_right = !(block_idx & 1) || xd->right_available;
vp9_build_intra_predictors(*(b->base_dst) + b->dst,
diff --git a/vp9/common/vp9_treecoder.c b/vp9/common/vp9_treecoder.c
index 6e2597954..3f049b5b3 100644
--- a/vp9/common/vp9_treecoder.c
+++ b/vp9/common/vp9_treecoder.c
@@ -18,32 +18,27 @@
#include "vp9/common/vp9_treecoder.h"
-static void tree2tok(
- struct vp9_token_struct *const p,
- vp9_tree t,
- int i,
- int v,
- int L
-) {
+static void tree2tok(struct vp9_token *const p, vp9_tree t,
+ int i, int v, int l) {
v += v;
- ++L;
+ ++l;
do {
const vp9_tree_index j = t[i++];
if (j <= 0) {
p[-j].value = v;
- p[-j].Len = L;
+ p[-j].len = l;
} else
- tree2tok(p, t, j, v, L);
+ tree2tok(p, t, j, v, l);
} while (++v & 1);
}
-void vp9_tokens_from_tree(struct vp9_token_struct *p, vp9_tree t) {
+void vp9_tokens_from_tree(struct vp9_token *p, vp9_tree t) {
tree2tok(p, t, 0, 0, 0);
}
-void vp9_tokens_from_tree_offset(struct vp9_token_struct *p, vp9_tree t,
+void vp9_tokens_from_tree_offset(struct vp9_token *p, vp9_tree t,
int offset) {
tree2tok(p - offset, t, 0, 0, 0);
}
diff --git a/vp9/common/vp9_treecoder.h b/vp9/common/vp9_treecoder.h
index 9297d5280..ebcd4116f 100644
--- a/vp9/common/vp9_treecoder.h
+++ b/vp9/common/vp9_treecoder.h
@@ -13,6 +13,7 @@
#include "./vpx_config.h"
#include "vpx/vpx_integer.h"
+#include "vp9/common/vp9_common.h"
typedef uint8_t vp9_prob;
@@ -31,16 +32,15 @@ typedef int8_t vp9_tree_index;
typedef const vp9_tree_index vp9_tree[], *vp9_tree_p;
-typedef const struct vp9_token_struct {
+struct vp9_token {
int value;
- int Len;
-} vp9_token;
+ int len;
+};
/* Construct encoding array from tree. */
-void vp9_tokens_from_tree(struct vp9_token_struct *, vp9_tree);
-void vp9_tokens_from_tree_offset(struct vp9_token_struct *, vp9_tree,
- int offset);
+void vp9_tokens_from_tree(struct vp9_token*, vp9_tree);
+void vp9_tokens_from_tree_offset(struct vp9_token*, vp9_tree, int offset);
/* Convert array of token occurrence counts into a table of probabilities
for the associated binary encoding tree. Also writes count of branches
@@ -76,7 +76,7 @@ static INLINE vp9_prob get_binary_prob(int n0, int n1) {
/* this function assumes prob1 and prob2 are already within [1,255] range */
static INLINE vp9_prob weighted_prob(int prob1, int prob2, int factor) {
- return (prob1 * (256 - factor) + prob2 * factor + 128) >> 8;
+ return ROUND_POWER_OF_TWO(prob1 * (256 - factor) + prob2 * factor, 8);
}
#endif // VP9_COMMON_VP9_TREECODER_H_
diff --git a/vp9/decoder/vp9_decodframe.c b/vp9/decoder/vp9_decodframe.c
index 2f702433b..43e5d02e7 100644
--- a/vp9/decoder/vp9_decodframe.c
+++ b/vp9/decoder/vp9_decodframe.c
@@ -187,25 +187,12 @@ static void skip_recon_sb(VP9D_COMP *pbi, MACROBLOCKD *xd,
int mb_row, int mb_col,
BLOCK_SIZE_TYPE bsize) {
MODE_INFO *m = xd->mode_info_context;
- BLOCK_SIZE_TYPE sb_type = m->mbmi.sb_type;
- if (xd->mode_info_context->mbmi.ref_frame == INTRA_FRAME) {
+ if (m->mbmi.ref_frame == INTRA_FRAME) {
vp9_build_intra_predictors_sbuv_s(xd, bsize);
vp9_build_intra_predictors_sby_s(xd, bsize);
} else {
- if (sb_type == BLOCK_SIZE_SB64X64) {
- vp9_build_inter64x64_predictors_sb(xd, mb_row, mb_col);
- } else if (sb_type == BLOCK_SIZE_SB32X32) {
- vp9_build_inter32x32_predictors_sb(xd, mb_row, mb_col);
- } else {
- vp9_build_inter16x16_predictors_mb(xd,
- xd->dst.y_buffer,
- xd->dst.u_buffer,
- xd->dst.v_buffer,
- xd->dst.y_stride,
- xd->dst.uv_stride,
- mb_row, mb_col);
- }
+ vp9_build_inter_predictors_sb(xd, mb_row, mb_col, bsize);
}
#if CONFIG_CODE_NONZEROCOUNT
vpx_memset(m->mbmi.nzcs, 0, 384 * sizeof(m->mbmi.nzcs[0]));
@@ -703,19 +690,12 @@ static void decode_sb(VP9D_COMP *pbi, MACROBLOCKD *xd, int mb_row, int mb_col,
return;
}
- // TODO(jingning): need to combine inter predictor functions and
- // make them block size independent.
// generate prediction
if (xd->mode_info_context->mbmi.ref_frame == INTRA_FRAME) {
vp9_build_intra_predictors_sby_s(xd, bsize);
vp9_build_intra_predictors_sbuv_s(xd, bsize);
} else {
- if (bsize == BLOCK_SIZE_SB64X64) {
- vp9_build_inter64x64_predictors_sb(xd, mb_row, mb_col);
- } else {
- assert(bsize == BLOCK_SIZE_SB32X32);
- vp9_build_inter32x32_predictors_sb(xd, mb_row, mb_col);
- }
+ vp9_build_inter_predictors_sb(xd, mb_row, mb_col, bsize);
}
// dequantization and idct
@@ -864,10 +844,8 @@ static int get_delta_q(vp9_reader *r, int *dq) {
const int old_value = *dq;
if (vp9_read_bit(r)) { // Update bit
- int value = vp9_read_literal(r, 4);
- if (vp9_read_bit(r)) // Sign bit
- value = -value;
- *dq = value;
+ const int value = vp9_read_literal(r, 4);
+ *dq = vp9_read_and_apply_sign(r, value);
}
// Trigger a quantizer update if the delta-q value has changed
@@ -1252,9 +1230,8 @@ static void setup_segmentation(VP9_COMMON *pc, MACROBLOCKD *xd, vp9_reader *r) {
if (feature_enabled) {
vp9_enable_segfeature(xd, i, j);
data = vp9_decode_unsigned_max(r, vp9_seg_feature_data_max(j));
- if (vp9_is_segfeature_signed(j) && vp9_read_bit(r)) {
- data = -data;
- }
+ if (vp9_is_segfeature_signed(j))
+ data = vp9_read_and_apply_sign(r, data);
}
vp9_set_segdata(xd, i, j, data);
}
@@ -1303,19 +1280,15 @@ static void setup_loopfilter(VP9_COMMON *pc, MACROBLOCKD *xd, vp9_reader *r) {
for (i = 0; i < MAX_REF_LF_DELTAS; i++) {
if (vp9_read_bit(r)) {
- int value = vp9_read_literal(r, 6);
- if (vp9_read_bit(r))
- value = -value;
- xd->ref_lf_deltas[i] = value;
+ const int value = vp9_read_literal(r, 6);
+ xd->ref_lf_deltas[i] = vp9_read_and_apply_sign(r, value);
}
}
for (i = 0; i < MAX_MODE_LF_DELTAS; i++) {
if (vp9_read_bit(r)) {
- int value = vp9_read_literal(r, 6);
- if (vp9_read_bit(r))
- value = -value;
- xd->mode_lf_deltas[i] = value;
+ const int value = vp9_read_literal(r, 6);
+ xd->mode_lf_deltas[i] = vp9_read_and_apply_sign(r, value);
}
}
}
diff --git a/vp9/decoder/vp9_dequantize.c b/vp9/decoder/vp9_dequantize.c
index 8b3bb732d..ade216a0c 100644
--- a/vp9/decoder/vp9_dequantize.c
+++ b/vp9/decoder/vp9_dequantize.c
@@ -100,10 +100,7 @@ void vp9_dequant_iht_add_8x8_c(TX_TYPE tx_type, int16_t *input,
int pitch, int stride, int eob) {
DECLARE_ALIGNED_ARRAY(16, int16_t, output, 64);
- if (eob == 0) {
- // All 0 DCT coefficients
- vp9_copy_mem8x8(pred, pitch, dest, stride);
- } else if (eob > 0) {
+ if (eob > 0) {
int i;
input[0] *= dq[0];
diff --git a/vp9/decoder/vp9_detokenize.c b/vp9/decoder/vp9_detokenize.c
index 3df841b88..d98d5eba8 100644
--- a/vp9/decoder/vp9_detokenize.c
+++ b/vp9/decoder/vp9_detokenize.c
@@ -60,11 +60,6 @@ static const vp9_prob cat6_prob[15] = {
DECLARE_ALIGNED(16, extern const uint8_t, vp9_norm[256]);
-static int16_t get_signed(BOOL_DECODER *br, int16_t value_to_sign) {
- return decode_bool(br, 128) ? -value_to_sign : value_to_sign;
-}
-
-
#define INCREMENT_COUNT(token) \
do { \
coef_counts[type][ref][get_coef_band(scan, txfm_size, c)] \
@@ -77,7 +72,7 @@ static int16_t get_signed(BOOL_DECODER *br, int16_t value_to_sign) {
#if CONFIG_CODE_NONZEROCOUNT
#define WRITE_COEF_CONTINUE(val, token) \
{ \
- qcoeff_ptr[scan[c]] = get_signed(br, val); \
+ qcoeff_ptr[scan[c]] = vp9_read_and_apply_sign(br, val); \
INCREMENT_COUNT(token); \
c++; \
nzc++; \
@@ -86,7 +81,7 @@ static int16_t get_signed(BOOL_DECODER *br, int16_t value_to_sign) {
#else
#define WRITE_COEF_CONTINUE(val, token) \
{ \
- qcoeff_ptr[scan[c]] = get_signed(br, val); \
+ qcoeff_ptr[scan[c]] = vp9_read_and_apply_sign(br, val); \
INCREMENT_COUNT(token); \
c++; \
continue; \
diff --git a/vp9/decoder/vp9_onyxd_if.c b/vp9/decoder/vp9_onyxd_if.c
index 3c7ac0fcd..b64b7e4dc 100644
--- a/vp9/decoder/vp9_onyxd_if.c
+++ b/vp9/decoder/vp9_onyxd_if.c
@@ -249,7 +249,7 @@ int vp9_get_reference_dec(VP9D_PTR ptr, int index, YV12_BUFFER_CONFIG **fb) {
return 0;
}
-/* If any buffer updating is signalled it should be done here. */
+/* If any buffer updating is signaled it should be done here. */
static void swap_frame_buffers(VP9D_COMP *pbi) {
int ref_index = 0, mask;
diff --git a/vp9/decoder/vp9_treereader.h b/vp9/decoder/vp9_treereader.h
index 4ec6de99d..a43f5c867 100644
--- a/vp9/decoder/vp9_treereader.h
+++ b/vp9/decoder/vp9_treereader.h
@@ -21,6 +21,7 @@ typedef BOOL_DECODER vp9_reader;
#define vp9_read_literal decode_value
#define vp9_read_bit(r) vp9_read(r, vp9_prob_half)
#define vp9_read_prob(r) ((vp9_prob)vp9_read_literal(r, 8))
+#define vp9_read_and_apply_sign(r, value) (vp9_read_bit(r) ? -(value) : (value))
// Intent of tree data structure is to make decoding trivial.
static int treed_read(vp9_reader *const r, /* !!! must return a 0 or 1 !!! */
diff --git a/vp9/encoder/vp9_bitstream.c b/vp9/encoder/vp9_bitstream.c
index ba4c80bd4..c2ae957c8 100644
--- a/vp9/encoder/vp9_bitstream.c
+++ b/vp9/encoder/vp9_bitstream.c
@@ -118,7 +118,7 @@ static int prob_diff_update_cost(vp9_prob newp, vp9_prob oldp) {
static void update_mode(
vp9_writer *const bc,
int n,
- vp9_token tok [/* n */],
+ const struct vp9_token tok[/* n */],
vp9_tree tree,
vp9_prob Pnew [/* n-1 */],
vp9_prob Pcur [/* n-1 */],
@@ -458,12 +458,12 @@ static void pack_mb_tokens(vp9_writer* const bc,
while (p < stop) {
const int t = p->Token;
- vp9_token *const a = vp9_coef_encodings + t;
+ const struct vp9_token *const a = vp9_coef_encodings + t;
const vp9_extra_bit_struct *const b = vp9_extra_bits + t;
int i = 0;
const unsigned char *pp = p->context_tree;
int v = a->value;
- int n = a->Len;
+ int n = a->len;
if (t == EOSB_TOKEN)
{
@@ -2617,7 +2617,12 @@ void vp9_pack_bitstream(VP9_COMP *cpi, unsigned char *dest,
int refresh_mask;
// Should the GF or ARF be updated using the transmitted frame or buffer
- if (cpi->refresh_golden_frame && !cpi->refresh_alt_ref_frame) {
+#if CONFIG_MULTIPLE_ARF
+ if (!cpi->multi_arf_enabled && cpi->refresh_golden_frame &&
+ !cpi->refresh_alt_ref_frame) {
+#else
+ if (cpi->refresh_golden_frame && !cpi->refresh_alt_ref_frame) {
+#endif
/* Preserve the previously existing golden frame and update the frame in
* the alt ref slot instead. This is highly specific to the use of
* alt-ref as a forward reference, and this needs to be generalized as
@@ -2630,10 +2635,21 @@ void vp9_pack_bitstream(VP9_COMP *cpi, unsigned char *dest,
refresh_mask = (cpi->refresh_last_frame << cpi->lst_fb_idx) |
(cpi->refresh_golden_frame << cpi->alt_fb_idx);
} else {
+ int arf_idx = cpi->alt_fb_idx;
+#if CONFIG_MULTIPLE_ARF
+ // Determine which ARF buffer to use to encode this ARF frame.
+ if (cpi->multi_arf_enabled) {
+ int sn = cpi->sequence_number;
+ arf_idx = (cpi->frame_coding_order[sn] < 0) ?
+ cpi->arf_buffer_idx[sn + 1] :
+ cpi->arf_buffer_idx[sn];
+ }
+#endif
refresh_mask = (cpi->refresh_last_frame << cpi->lst_fb_idx) |
(cpi->refresh_golden_frame << cpi->gld_fb_idx) |
- (cpi->refresh_alt_ref_frame << cpi->alt_fb_idx);
+ (cpi->refresh_alt_ref_frame << arf_idx);
}
+
vp9_write_literal(&header_bc, refresh_mask, NUM_REF_FRAMES);
vp9_write_literal(&header_bc, cpi->lst_fb_idx, NUM_REF_FRAMES_LG2);
vp9_write_literal(&header_bc, cpi->gld_fb_idx, NUM_REF_FRAMES_LG2);
diff --git a/vp9/encoder/vp9_encodeframe.c b/vp9/encoder/vp9_encodeframe.c
index aae829849..b4ba8dc1f 100644
--- a/vp9/encoder/vp9_encodeframe.c
+++ b/vp9/encoder/vp9_encodeframe.c
@@ -431,6 +431,7 @@ static void update_state(VP9_COMP *cpi,
BLOCK_SIZE_TYPE bsize,
int output_enabled) {
int i, x_idx, y;
+ VP9_COMMON *const cm = &cpi->common;
MACROBLOCK *const x = &cpi->mb;
MACROBLOCKD *const xd = &x->e_mbd;
MODE_INFO *mi = &ctx->mic;
@@ -485,12 +486,29 @@ static void update_state(VP9_COMP *cpi,
return;
{
- int segment_id = mbmi->segment_id;
+ int segment_id = mbmi->segment_id, ref_pred_flag;
if (!vp9_segfeature_active(xd, segment_id, SEG_LVL_SKIP)) {
for (i = 0; i < NB_TXFM_MODES; i++) {
cpi->rd_tx_select_diff[i] += ctx->txfm_rd_diff[i];
}
}
+
+ // Did the chosen reference frame match its predicted value.
+ ref_pred_flag = ((xd->mode_info_context->mbmi.ref_frame ==
+ vp9_get_pred_ref(cm, xd)));
+ vp9_set_pred_flag(xd, PRED_REF, ref_pred_flag);
+ if (!xd->segmentation_enabled ||
+ !vp9_segfeature_active(xd, segment_id, SEG_LVL_REF_FRAME) ||
+ vp9_check_segref(xd, segment_id, INTRA_FRAME) +
+ vp9_check_segref(xd, segment_id, LAST_FRAME) +
+ vp9_check_segref(xd, segment_id, GOLDEN_FRAME) +
+ vp9_check_segref(xd, segment_id, ALTREF_FRAME) > 1) {
+ // Get the prediction context and status
+ int pred_context = vp9_get_pred_context(cm, xd, PRED_REF);
+
+ // Count prediction success
+ cpi->ref_pred_count[pred_context][ref_pred_flag]++;
+ }
}
if (cpi->common.frame_type == KEY_FRAME) {
@@ -709,116 +727,44 @@ static void set_offsets(VP9_COMP *cpi,
}
}
-static int pick_mb_modes(VP9_COMP *cpi,
- int mb_row0,
- int mb_col0,
- TOKENEXTRA **tp,
- int *totalrate,
- int *totaldist) {
+static int pick_mb_mode(VP9_COMP *cpi,
+ int mb_row,
+ int mb_col,
+ TOKENEXTRA **tp,
+ int *totalrate,
+ int *totaldist) {
VP9_COMMON *const cm = &cpi->common;
MACROBLOCK *const x = &cpi->mb;
MACROBLOCKD *const xd = &x->e_mbd;
- int i;
int splitmodes_used = 0;
- ENTROPY_CONTEXT_PLANES left_context[2];
- ENTROPY_CONTEXT_PLANES above_context[2];
- ENTROPY_CONTEXT_PLANES *initial_above_context_ptr = cm->above_context
- + mb_col0;
-
- /* Function should not modify L & A contexts; save and restore on exit */
- vpx_memcpy(left_context,
- cm->left_context + (mb_row0 & 2),
- sizeof(left_context));
- vpx_memcpy(above_context,
- initial_above_context_ptr,
- sizeof(above_context));
-
- /* Encode MBs in raster order within the SB */
- for (i = 0; i < 4; i++) {
- const int x_idx = i & 1, y_idx = i >> 1;
- const int mb_row = mb_row0 + y_idx;
- const int mb_col = mb_col0 + x_idx;
- MB_MODE_INFO *mbmi;
-
- if ((mb_row >= cm->mb_rows) || (mb_col >= cm->mb_cols)) {
- // MB lies outside frame, move on
- continue;
- }
-
- // Index of the MB in the SB 0..3
- xd->mb_index = i;
- set_offsets(cpi, mb_row, mb_col, BLOCK_SIZE_MB16X16);
-
- if (cpi->oxcf.tuning == VP8_TUNE_SSIM)
- vp9_activity_masking(cpi, x);
-
- mbmi = &xd->mode_info_context->mbmi;
- mbmi->sb_type = BLOCK_SIZE_MB16X16;
-
- // Find best coding mode & reconstruct the MB so it is available
- // as a predictor for MBs that follow in the SB
- if (cm->frame_type == KEY_FRAME) {
- int r, d;
-#if 0 // ENC_DEBUG
- if (enc_debug)
- printf("intra pick_mb_modes %d %d\n", mb_row, mb_col);
-#endif
- vp9_rd_pick_intra_mode(cpi, x, &r, &d);
- *totalrate += r;
- *totaldist += d;
-
- // Dummy encode, do not do the tokenization
- encode_macroblock(cpi, tp, 0, mb_row, mb_col);
+ MB_MODE_INFO *mbmi;
- // Note the encoder may have changed the segment_id
+ set_offsets(cpi, mb_row, mb_col, BLOCK_SIZE_MB16X16);
- // Save the coding context
- vpx_memcpy(&x->mb_context[xd->sb_index][i].mic, xd->mode_info_context,
- sizeof(MODE_INFO));
- } else {
- int seg_id, r, d;
+ if (cpi->oxcf.tuning == VP8_TUNE_SSIM)
+ vp9_activity_masking(cpi, x);
-#if 0 // ENC_DEBUG
- if (enc_debug)
- printf("inter pick_mb_modes %d %d\n", mb_row, mb_col);
-#endif
- vp9_pick_mode_inter_macroblock(cpi, x, mb_row, mb_col, &r, &d);
- *totalrate += r;
- *totaldist += d;
+ mbmi = &xd->mode_info_context->mbmi;
+ mbmi->sb_type = BLOCK_SIZE_MB16X16;
- splitmodes_used += (mbmi->mode == SPLITMV);
+ // Find best coding mode & reconstruct the MB so it is available
+ // as a predictor for MBs that follow in the SB
+ if (cm->frame_type == KEY_FRAME) {
+ vp9_rd_pick_intra_mode(cpi, x, totalrate, totaldist);
- // Dummy encode, do not do the tokenization
- encode_macroblock(cpi, tp, 0, mb_row, mb_col);
+ // Save the coding context
+ vpx_memcpy(&x->mb_context[xd->sb_index][xd->mb_index].mic,
+ xd->mode_info_context, sizeof(MODE_INFO));
+ } else {
+ vp9_pick_mode_inter_macroblock(cpi, x, mb_row, mb_col,
+ totalrate, totaldist);
+ splitmodes_used += (mbmi->mode == SPLITMV);
- seg_id = mbmi->segment_id;
- if (cpi->mb.e_mbd.segmentation_enabled && seg_id == 0) {
- cpi->seg0_idx++;
- }
- if (!xd->segmentation_enabled ||
- !vp9_segfeature_active(xd, seg_id, SEG_LVL_REF_FRAME) ||
- vp9_check_segref(xd, seg_id, INTRA_FRAME) +
- vp9_check_segref(xd, seg_id, LAST_FRAME) +
- vp9_check_segref(xd, seg_id, GOLDEN_FRAME) +
- vp9_check_segref(xd, seg_id, ALTREF_FRAME) > 1) {
- // Get the prediction context and status
- int pred_flag = vp9_get_pred_flag(xd, PRED_REF);
- int pred_context = vp9_get_pred_context(cm, xd, PRED_REF);
-
- // Count prediction success
- cpi->ref_pred_count[pred_context][pred_flag]++;
- }
+ if (cpi->mb.e_mbd.segmentation_enabled && mbmi->segment_id == 0) {
+ cpi->seg0_idx++;
}
}
- /* Restore L & A coding context to those in place on entry */
- vpx_memcpy(cm->left_context + (mb_row0 & 2),
- left_context,
- sizeof(left_context));
- vpx_memcpy(initial_above_context_ptr,
- above_context,
- sizeof(above_context));
-
return splitmodes_used;
}
@@ -1038,14 +984,45 @@ static void encode_sb_row(VP9_COMP *cpi,
int sb_rate = INT_MAX, sb_dist;
int splitmodes_used = 0;
int sb32_skip = 0;
+ int j;
+ ENTROPY_CONTEXT_PLANES l2[2], a2[2];
if (mb_row + y_idx >= cm->mb_rows || mb_col + x_idx >= cm->mb_cols)
continue;
xd->sb_index = i;
- splitmodes_used = pick_mb_modes(cpi, mb_row + y_idx, mb_col + x_idx,
- tp, &mb_rate, &mb_dist);
+ /* Function should not modify L & A contexts; save and restore on exit */
+ vpx_memcpy(l2, cm->left_context + y_idx, sizeof(l2));
+ vpx_memcpy(a2, cm->above_context + mb_col + x_idx, sizeof(a2));
+
+ /* Encode MBs in raster order within the SB */
+ for (j = 0; j < 4; j++) {
+ const int x_idx_m = x_idx + (j & 1), y_idx_m = y_idx + (j >> 1);
+ int r, d;
+
+ if (mb_row + y_idx_m >= cm->mb_rows ||
+ mb_col + x_idx_m >= cm->mb_cols) {
+ // MB lies outside frame, move on
+ continue;
+ }
+
+ // Index of the MB in the SB 0..3
+ xd->mb_index = j;
+
+ splitmodes_used += pick_mb_mode(cpi, mb_row + y_idx_m,
+ mb_col + x_idx_m, tp, &r, &d);
+ mb_rate += r;
+ mb_dist += d;
+
+ // Dummy encode, do not do the tokenization
+ encode_macroblock(cpi, tp, 0, mb_row + y_idx_m,
+ mb_col + x_idx_m);
+ }
+
+ /* Restore L & A coding context to those in place on entry */
+ vpx_memcpy(cm->left_context + y_idx, l2, sizeof(l2));
+ vpx_memcpy(cm->above_context + mb_col + x_idx, a2, sizeof(a2));
mb_rate += vp9_cost_bit(cm->prob_sb32_coded, 0);
@@ -1875,7 +1852,6 @@ static void encode_macroblock(VP9_COMP *cpi, TOKENEXTRA **t,
MODE_INFO *mi = xd->mode_info_context;
MB_MODE_INFO *const mbmi = &mi->mbmi;
const int mis = cm->mode_info_stride;
- unsigned char ref_pred_flag;
assert(!xd->mode_info_context->mbmi.sb_type);
@@ -1919,12 +1895,6 @@ static void encode_macroblock(VP9_COMP *cpi, TOKENEXTRA **t,
}
vp9_update_zbin_extra(cpi, x);
-
- // SET VARIOUS PREDICTION FLAGS
-
- // Did the chosen reference frame match its predicted value.
- ref_pred_flag = ((mbmi->ref_frame == vp9_get_pred_ref(cm, xd)));
- vp9_set_pred_flag(xd, PRED_REF, ref_pred_flag);
}
if (mbmi->ref_frame == INTRA_FRAME) {
@@ -2134,7 +2104,6 @@ static void encode_superblock(VP9_COMP *cpi, TOKENEXTRA **t,
uint8_t *vdst = xd->dst.v_buffer;
int src_y_stride = x->src.y_stride, dst_y_stride = xd->dst.y_stride;
int src_uv_stride = x->src.uv_stride, dst_uv_stride = xd->dst.uv_stride;
- unsigned char ref_pred_flag;
int n;
MODE_INFO *mi = x->e_mbd.mode_info_context;
unsigned int segment_id = mi->mbmi.segment_id;
@@ -2176,11 +2145,6 @@ static void encode_superblock(VP9_COMP *cpi, TOKENEXTRA **t,
}
vp9_update_zbin_extra(cpi, x);
-
- // Did the chosen reference frame match its predicted value.
- ref_pred_flag = ((xd->mode_info_context->mbmi.ref_frame ==
- vp9_get_pred_ref(cm, xd)));
- vp9_set_pred_flag(xd, PRED_REF, ref_pred_flag);
}
if (xd->mode_info_context->mbmi.ref_frame == INTRA_FRAME) {
@@ -2221,11 +2185,7 @@ static void encode_superblock(VP9_COMP *cpi, TOKENEXTRA **t,
&xd->scale_factor[1], &xd->scale_factor_uv[1]);
}
- if (bsize == BLOCK_SIZE_SB32X32) {
- vp9_build_inter32x32_predictors_sb(xd, mb_row, mb_col);
- } else {
- vp9_build_inter64x64_predictors_sb(xd, mb_row, mb_col);
- }
+ vp9_build_inter_predictors_sb(xd, mb_row, mb_col, bsize);
}
if (!x->skip) {
diff --git a/vp9/encoder/vp9_encodemv.c b/vp9/encoder/vp9_encodemv.c
index 9431f0781..918a0bd7d 100644
--- a/vp9/encoder/vp9_encodemv.c
+++ b/vp9/encoder/vp9_encodemv.c
@@ -603,59 +603,33 @@ void vp9_update_nmv_count(VP9_COMP *cpi, MACROBLOCK *x,
if (mbmi->mode == SPLITMV) {
int i;
-
- for (i = 0; i < x->partition_info->count; i++) {
- if (x->partition_info->bmi[i].mode == NEW4X4) {
- if (x->e_mbd.allow_high_precision_mv) {
- mv.row = (x->partition_info->bmi[i].mv.as_mv.row
- - best_ref_mv->as_mv.row);
- mv.col = (x->partition_info->bmi[i].mv.as_mv.col
- - best_ref_mv->as_mv.col);
- vp9_increment_nmv(&mv, &best_ref_mv->as_mv, &cpi->NMVcount, 1);
- if (x->e_mbd.mode_info_context->mbmi.second_ref_frame > 0) {
- mv.row = (x->partition_info->bmi[i].second_mv.as_mv.row
- - second_best_ref_mv->as_mv.row);
- mv.col = (x->partition_info->bmi[i].second_mv.as_mv.col
- - second_best_ref_mv->as_mv.col);
- vp9_increment_nmv(&mv, &second_best_ref_mv->as_mv,
- &cpi->NMVcount, 1);
- }
- } else {
- mv.row = (x->partition_info->bmi[i].mv.as_mv.row
- - best_ref_mv->as_mv.row);
- mv.col = (x->partition_info->bmi[i].mv.as_mv.col
- - best_ref_mv->as_mv.col);
- vp9_increment_nmv(&mv, &best_ref_mv->as_mv, &cpi->NMVcount, 0);
- if (x->e_mbd.mode_info_context->mbmi.second_ref_frame > 0) {
- mv.row = (x->partition_info->bmi[i].second_mv.as_mv.row
- - second_best_ref_mv->as_mv.row);
- mv.col = (x->partition_info->bmi[i].second_mv.as_mv.col
- - second_best_ref_mv->as_mv.col);
- vp9_increment_nmv(&mv, &second_best_ref_mv->as_mv,
- &cpi->NMVcount, 0);
- }
+ PARTITION_INFO *pi = x->partition_info;
+ for (i = 0; i < pi->count; i++) {
+ if (pi->bmi[i].mode == NEW4X4) {
+ mv.row = (pi->bmi[i].mv.as_mv.row - best_ref_mv->as_mv.row);
+ mv.col = (pi->bmi[i].mv.as_mv.col - best_ref_mv->as_mv.col);
+ vp9_increment_nmv(&mv, &best_ref_mv->as_mv, &cpi->NMVcount,
+ x->e_mbd.allow_high_precision_mv);
+ if (x->e_mbd.mode_info_context->mbmi.second_ref_frame > 0) {
+ mv.row = pi->bmi[i].second_mv.as_mv.row -
+ second_best_ref_mv->as_mv.row;
+ mv.col = pi->bmi[i].second_mv.as_mv.col -
+ second_best_ref_mv->as_mv.col;
+ vp9_increment_nmv(&mv, &second_best_ref_mv->as_mv, &cpi->NMVcount,
+ x->e_mbd.allow_high_precision_mv);
}
}
}
} else if (mbmi->mode == NEWMV) {
- if (x->e_mbd.allow_high_precision_mv) {
- mv.row = (mbmi->mv[0].as_mv.row - best_ref_mv->as_mv.row);
- mv.col = (mbmi->mv[0].as_mv.col - best_ref_mv->as_mv.col);
- vp9_increment_nmv(&mv, &best_ref_mv->as_mv, &cpi->NMVcount, 1);
- if (mbmi->second_ref_frame > 0) {
- mv.row = (mbmi->mv[1].as_mv.row - second_best_ref_mv->as_mv.row);
- mv.col = (mbmi->mv[1].as_mv.col - second_best_ref_mv->as_mv.col);
- vp9_increment_nmv(&mv, &second_best_ref_mv->as_mv, &cpi->NMVcount, 1);
- }
- } else {
- mv.row = (mbmi->mv[0].as_mv.row - best_ref_mv->as_mv.row);
- mv.col = (mbmi->mv[0].as_mv.col - best_ref_mv->as_mv.col);
- vp9_increment_nmv(&mv, &best_ref_mv->as_mv, &cpi->NMVcount, 0);
- if (mbmi->second_ref_frame > 0) {
- mv.row = (mbmi->mv[1].as_mv.row - second_best_ref_mv->as_mv.row);
- mv.col = (mbmi->mv[1].as_mv.col - second_best_ref_mv->as_mv.col);
- vp9_increment_nmv(&mv, &second_best_ref_mv->as_mv, &cpi->NMVcount, 0);
- }
+ mv.row = (mbmi->mv[0].as_mv.row - best_ref_mv->as_mv.row);
+ mv.col = (mbmi->mv[0].as_mv.col - best_ref_mv->as_mv.col);
+ vp9_increment_nmv(&mv, &best_ref_mv->as_mv, &cpi->NMVcount,
+ x->e_mbd.allow_high_precision_mv);
+ if (mbmi->second_ref_frame > 0) {
+ mv.row = (mbmi->mv[1].as_mv.row - second_best_ref_mv->as_mv.row);
+ mv.col = (mbmi->mv[1].as_mv.col - second_best_ref_mv->as_mv.col);
+ vp9_increment_nmv(&mv, &second_best_ref_mv->as_mv, &cpi->NMVcount,
+ x->e_mbd.allow_high_precision_mv);
}
}
}
diff --git a/vp9/encoder/vp9_firstpass.c b/vp9/encoder/vp9_firstpass.c
index 70f9e3153..1dd2a7dd8 100644
--- a/vp9/encoder/vp9_firstpass.c
+++ b/vp9/encoder/vp9_firstpass.c
@@ -317,15 +317,20 @@ static double simple_weight(YV12_BUFFER_CONFIG *source) {
}
-// This function returns the current per frame maximum bitrate target
+// This function returns the current per frame maximum bitrate target.
static int frame_max_bits(VP9_COMP *cpi) {
- // Max allocation for a single frame based on the max section guidelines passed in and how many bits are left
+ // Max allocation for a single frame based on the max section guidelines
+ // passed in and how many bits are left.
int max_bits;
- // For VBR base this on the bits and frames left plus the two_pass_vbrmax_section rate passed in by the user
- max_bits = (int)(((double)cpi->twopass.bits_left / (cpi->twopass.total_stats->count - (double)cpi->common.current_video_frame)) * ((double)cpi->oxcf.two_pass_vbrmax_section / 100.0));
+ // For VBR base this on the bits and frames left plus the
+ // two_pass_vbrmax_section rate passed in by the user.
+ max_bits = (int) (((double) cpi->twopass.bits_left
+ / (cpi->twopass.total_stats->count - (double) cpi->common
+ .current_video_frame))
+ * ((double) cpi->oxcf.two_pass_vbrmax_section / 100.0));
- // Trap case where we are out of bits
+ // Trap case where we are out of bits.
if (max_bits < 0)
max_bits = 0;
@@ -746,7 +751,7 @@ void vp9_first_pass(VP9_COMP *cpi) {
}
// TODO: handle the case when duration is set to 0, or something less
- // than the full time between subsequent cpi->source_time_stamp s .
+ // than the full time between subsequent values of cpi->source_time_stamp.
fps.duration = (double)(cpi->source->ts_end
- cpi->source->ts_start);
@@ -873,7 +878,7 @@ static double calc_correction_factor(double err_per_mb,
// Given a current maxQ value sets a range for future values.
// PGW TODO..
-// This code removes direct dependency on QIndex to determin the range
+// This code removes direct dependency on QIndex to determine the range
// (now uses the actual quantizer) but has not been tuned.
static void adjust_maxq_qrange(VP9_COMP *cpi) {
int i;
@@ -991,7 +996,7 @@ static int estimate_max_q(VP9_COMP *cpi,
}
// Adjust maxq_min_limit and maxq_max_limit limits based on
- // averaga q observed in clip for non kf/gf/arf frames
+ // average q observed in clip for non kf/gf/arf frames
// Give average a chance to settle though.
// PGW TODO.. This code is broken for the extended Q range
if ((cpi->ni_frames >
@@ -1379,7 +1384,7 @@ static int calc_arf_boost(
&this_frame_mv_in_out, &mv_in_out_accumulator,
&abs_mv_in_out_accumulator, &mv_ratio_accumulator);
- // We want to discount the the flash frame itself and the recovery
+ // We want to discount the flash frame itself and the recovery
// frame that follows as both will have poor scores.
flash_detected = detect_flash(cpi, (i + offset)) ||
detect_flash(cpi, (i + offset + 1));
@@ -1442,9 +1447,8 @@ static int calc_arf_boost(
return arf_boost;
}
-static void configure_arnr_filter(VP9_COMP *cpi,
- FIRSTPASS_STATS *this_frame,
- int group_boost) {
+void configure_arnr_filter(VP9_COMP *cpi, const unsigned int this_frame,
+ int group_boost) {
int half_gf_int;
int frames_after_arf;
int frames_bwd = cpi->oxcf.arnr_max_frames - 1;
@@ -1458,8 +1462,7 @@ static void configure_arnr_filter(VP9_COMP *cpi,
// Note: this_frame->frame has been updated in the loop
// so it now points at the ARF frame.
half_gf_int = cpi->baseline_gf_interval >> 1;
- frames_after_arf = (int)(cpi->twopass.total_stats->count -
- this_frame->frame - 1);
+ frames_after_arf = (int)(cpi->twopass.total_stats->count - this_frame - 1);
switch (cpi->oxcf.arnr_type) {
case 1: // Backward filter
@@ -1515,7 +1518,144 @@ static void configure_arnr_filter(VP9_COMP *cpi,
}
}
-// Analyse and define a gf/arf group .
+#if CONFIG_MULTIPLE_ARF
+// Work out the frame coding order for a GF or an ARF group.
+// The current implementation codes frames in their natural order for a
+// GF group, and inserts additional ARFs into an ARF group using a
+// binary split approach.
+// NOTE: this function is currently implemented recursively.
+static void schedule_frames(VP9_COMP *cpi, const int start, const int end,
+ const int arf_idx, const int gf_or_arf_group,
+ const int level) {
+ int i, abs_end, half_range;
+ int *cfo = cpi->frame_coding_order;
+ int idx = cpi->new_frame_coding_order_period;
+
+ // If (end < 0) an ARF should be coded at position (-end).
+ assert(start >= 0);
+
+ // printf("start:%d end:%d\n", start, end);
+
+ // GF Group: code frames in logical order.
+ if (gf_or_arf_group == 0) {
+ assert(end >= start);
+ for (i = start; i <= end; ++i) {
+ cfo[idx] = i;
+ cpi->arf_buffer_idx[idx] = arf_idx;
+ cpi->arf_weight[idx] = -1;
+ ++idx;
+ }
+ cpi->new_frame_coding_order_period = idx;
+ return;
+ }
+
+ // ARF Group: work out the ARF schedule.
+ // Mark ARF frames as negative.
+ if (end < 0) {
+ // printf("start:%d end:%d\n", -end, -end);
+ // ARF frame is at the end of the range.
+ cfo[idx] = end;
+ // What ARF buffer does this ARF use as predictor.
+ cpi->arf_buffer_idx[idx] = (arf_idx > 2) ? (arf_idx - 1) : 2;
+ cpi->arf_weight[idx] = level;
+ ++idx;
+ abs_end = -end;
+ } else {
+ abs_end = end;
+ }
+
+ half_range = (abs_end - start) >> 1;
+
+ // ARFs may not be adjacent, they must be separated by at least
+ // MIN_GF_INTERVAL non-ARF frames.
+ if ((start + MIN_GF_INTERVAL) >= (abs_end - MIN_GF_INTERVAL)) {
+ // printf("start:%d end:%d\n", start, abs_end);
+ // Update the coding order and active ARF.
+ for (i = start; i <= abs_end; ++i) {
+ cfo[idx] = i;
+ cpi->arf_buffer_idx[idx] = arf_idx;
+ cpi->arf_weight[idx] = -1;
+ ++idx;
+ }
+ cpi->new_frame_coding_order_period = idx;
+ } else {
+ // Place a new ARF at the mid-point of the range.
+ cpi->new_frame_coding_order_period = idx;
+ schedule_frames(cpi, start, -(start + half_range), arf_idx + 1,
+ gf_or_arf_group, level + 1);
+ schedule_frames(cpi, start + half_range + 1, abs_end, arf_idx,
+ gf_or_arf_group, level + 1);
+ }
+}
+
+#define FIXED_ARF_GROUP_SIZE 16
+
+void define_fixed_arf_period(VP9_COMP *cpi) {
+ int i;
+ int max_level = INT_MIN;
+
+ assert(cpi->multi_arf_enabled);
+ assert(cpi->oxcf.lag_in_frames >= FIXED_ARF_GROUP_SIZE);
+
+ // Save the weight of the last frame in the sequence before next
+ // sequence pattern overwrites it.
+ cpi->this_frame_weight = cpi->arf_weight[cpi->sequence_number];
+ assert(cpi->this_frame_weight >= 0);
+
+ // Initialize frame coding order variables.
+ cpi->new_frame_coding_order_period = 0;
+ cpi->next_frame_in_order = 0;
+ cpi->arf_buffered = 0;
+ vp9_zero(cpi->frame_coding_order);
+ vp9_zero(cpi->arf_buffer_idx);
+ vpx_memset(cpi->arf_weight, -1, sizeof(cpi->arf_weight));
+
+ if (cpi->twopass.frames_to_key <= (FIXED_ARF_GROUP_SIZE + 8)) {
+ // Setup a GF group close to the keyframe.
+ cpi->source_alt_ref_pending = FALSE;
+ cpi->baseline_gf_interval = cpi->twopass.frames_to_key;
+ schedule_frames(cpi, 0, (cpi->baseline_gf_interval - 1), 2, 0, 0);
+ } else {
+ // Setup a fixed period ARF group.
+ cpi->source_alt_ref_pending = TRUE;
+ cpi->baseline_gf_interval = FIXED_ARF_GROUP_SIZE;
+ schedule_frames(cpi, 0, -(cpi->baseline_gf_interval - 1), 2, 1, 0);
+ }
+
+ // Replace level indicator of -1 with correct level.
+ for (i = 0; i < cpi->new_frame_coding_order_period; ++i) {
+ if (cpi->arf_weight[i] > max_level) {
+ max_level = cpi->arf_weight[i];
+ }
+ }
+ ++max_level;
+ for (i = 0; i < cpi->new_frame_coding_order_period; ++i) {
+ if (cpi->arf_weight[i] == -1) {
+ cpi->arf_weight[i] = max_level;
+ }
+ }
+ cpi->max_arf_level = max_level;
+#if 0
+ printf("\nSchedule: ");
+ for (i = 0; i < cpi->new_frame_coding_order_period; ++i) {
+ printf("%4d ", cpi->frame_coding_order[i]);
+ }
+ printf("\n");
+ printf("ARFref: ");
+ for (i = 0; i < cpi->new_frame_coding_order_period; ++i) {
+ printf("%4d ", cpi->arf_buffer_idx[i]);
+ }
+ printf("\n");
+ printf("Weight: ");
+ for (i = 0; i < cpi->new_frame_coding_order_period; ++i) {
+ printf("%4d ", cpi->arf_weight[i]);
+ }
+ printf("\n");
+#endif
+}
+#endif
+
+// Analyse and define a gf/arf group.
static void define_gf_group(VP9_COMP *cpi, FIRSTPASS_STATS *this_frame) {
FIRSTPASS_STATS next_frame;
FIRSTPASS_STATS *start_pos;
@@ -1619,7 +1759,7 @@ static void define_gf_group(VP9_COMP *cpi, FIRSTPASS_STATS *this_frame) {
}
// Break clause to detect very still sections after motion
- // (for example a staic image after a fade or other transition).
+ // (for example a static image after a fade or other transition).
if (detect_transition_to_still(cpi, i, 5, loop_decay_rate,
last_loop_decay_rate)) {
allow_alt_ref = FALSE;
@@ -1637,9 +1777,9 @@ static void define_gf_group(VP9_COMP *cpi, FIRSTPASS_STATS *this_frame) {
// Break at cpi->max_gf_interval unless almost totally static
(i >= active_max_gf_interval && (zero_motion_accumulator < 0.995)) ||
(
- // Dont break out with a very short interval
+ // Don't break out with a very short interval
(i > MIN_GF_INTERVAL) &&
- // Dont break out very close to a key frame
+ // Don't break out very close to a key frame
((cpi->twopass.frames_to_key - i) >= MIN_GF_INTERVAL) &&
((boost_score > 125.0) || (next_frame.pcnt_inter < 0.75)) &&
(!flash_detected) &&
@@ -1657,7 +1797,7 @@ static void define_gf_group(VP9_COMP *cpi, FIRSTPASS_STATS *this_frame) {
old_boost_score = boost_score;
}
- // Dont allow a gf too near the next kf
+ // Don't allow a gf too near the next kf
if ((cpi->twopass.frames_to_key - i) < MIN_GF_INTERVAL) {
while (i < cpi->twopass.frames_to_key) {
i++;
@@ -1672,10 +1812,22 @@ static void define_gf_group(VP9_COMP *cpi, FIRSTPASS_STATS *this_frame) {
}
}
- // Set the interval till the next gf or arf.
+ // Set the interval until the next gf or arf.
cpi->baseline_gf_interval = i;
- // Should we use the alternate refernce frame
+#if CONFIG_MULTIPLE_ARF
+ if (cpi->multi_arf_enabled) {
+ // Initialize frame coding order variables.
+ cpi->new_frame_coding_order_period = 0;
+ cpi->next_frame_in_order = 0;
+ cpi->arf_buffered = 0;
+ vp9_zero(cpi->frame_coding_order);
+ vp9_zero(cpi->arf_buffer_idx);
+ vpx_memset(cpi->arf_weight, -1, sizeof(cpi->arf_weight));
+ }
+#endif
+
+ // Should we use the alternate reference frame
if (allow_alt_ref &&
(i < cpi->oxcf.lag_in_frames) &&
(i >= MIN_GF_INTERVAL) &&
@@ -1686,16 +1838,66 @@ static void define_gf_group(VP9_COMP *cpi, FIRSTPASS_STATS *this_frame) {
((mv_in_out_accumulator / (double)i > -0.2) ||
(mv_in_out_accumulator > -2.0)) &&
(boost_score > 100)) {
- // Alterrnative boost calculation for alt ref
+ // Alternative boost calculation for alt ref
cpi->gfu_boost = calc_arf_boost(cpi, 0, (i - 1), (i - 1), &f_boost, &b_boost);
cpi->source_alt_ref_pending = TRUE;
- configure_arnr_filter(cpi, this_frame, cpi->gfu_boost);
+#if CONFIG_MULTIPLE_ARF
+ // Set the ARF schedule.
+ if (cpi->multi_arf_enabled) {
+ schedule_frames(cpi, 0, -(cpi->baseline_gf_interval - 1), 2, 1, 0);
+ }
+#endif
} else {
cpi->gfu_boost = (int)boost_score;
cpi->source_alt_ref_pending = FALSE;
+#if CONFIG_MULTIPLE_ARF
+ // Set the GF schedule.
+ if (cpi->multi_arf_enabled) {
+ schedule_frames(cpi, 0, cpi->baseline_gf_interval - 1, 2, 0, 0);
+ assert(cpi->new_frame_coding_order_period == cpi->baseline_gf_interval);
+ }
+#endif
}
+#if CONFIG_MULTIPLE_ARF
+ if (cpi->multi_arf_enabled && (cpi->common.frame_type != KEY_FRAME)) {
+ int max_level = INT_MIN;
+ // Replace level indicator of -1 with correct level.
+ for (i = 0; i < cpi->frame_coding_order_period; ++i) {
+ if (cpi->arf_weight[i] > max_level) {
+ max_level = cpi->arf_weight[i];
+ }
+ }
+ ++max_level;
+ for (i = 0; i < cpi->frame_coding_order_period; ++i) {
+ if (cpi->arf_weight[i] == -1) {
+ cpi->arf_weight[i] = max_level;
+ }
+ }
+ cpi->max_arf_level = max_level;
+ }
+#if 0
+ if (cpi->multi_arf_enabled) {
+ printf("\nSchedule: ");
+ for (i = 0; i < cpi->new_frame_coding_order_period; ++i) {
+ printf("%4d ", cpi->frame_coding_order[i]);
+ }
+ printf("\n");
+ printf("ARFref: ");
+ for (i = 0; i < cpi->new_frame_coding_order_period; ++i) {
+ printf("%4d ", cpi->arf_buffer_idx[i]);
+ }
+ printf("\n");
+ printf("Weight: ");
+ for (i = 0; i < cpi->new_frame_coding_order_period; ++i) {
+ printf("%4d ", cpi->arf_weight[i]);
+ }
+ printf("\n");
+ }
+#endif
+#endif
+
// Now decide how many bits should be allocated to the GF group as a
// proportion of those remaining in the kf group.
// The final key frame group in the clip is treated as a special case
@@ -1736,10 +1938,13 @@ static void define_gf_group(VP9_COMP *cpi, FIRSTPASS_STATS *this_frame) {
cpi->twopass.modified_error_used += gf_group_err;
// Assign bits to the arf or gf.
- for (i = 0; i <= (cpi->source_alt_ref_pending && cpi->common.frame_type != KEY_FRAME); i++) {
+ for (i = 0;
+ i <= (cpi->source_alt_ref_pending && cpi->common.frame_type != KEY_FRAME);
+ ++i) {
int boost;
int allocation_chunks;
- int Q = (cpi->oxcf.fixed_q < 0) ? cpi->last_q[INTER_FRAME] : cpi->oxcf.fixed_q;
+ int Q =
+ (cpi->oxcf.fixed_q < 0) ? cpi->last_q[INTER_FRAME] : cpi->oxcf.fixed_q;
int gf_bits;
boost = (cpi->gfu_boost * vp9_gfboost_qadjust(Q)) / 100;
@@ -1758,7 +1963,7 @@ static void define_gf_group(VP9_COMP *cpi, FIRSTPASS_STATS *this_frame) {
(cpi->baseline_gf_interval * 100) + (boost - 100);
// Prevent overflow
- if (boost > 1028) {
+ if (boost > 1028) { // TODO(agrange) Should this be 1024?
int divisor = boost >> 10;
boost /= divisor;
allocation_chunks /= divisor;
@@ -1807,18 +2012,21 @@ static void define_gf_group(VP9_COMP *cpi, FIRSTPASS_STATS *this_frame) {
if (gf_bits < 0)
gf_bits = 0;
- gf_bits += cpi->min_frame_bandwidth; // Add in minimum for a frame
+ // Add in minimum for a frame
+ gf_bits += cpi->min_frame_bandwidth;
if (i == 0) {
cpi->twopass.gf_bits = gf_bits;
}
- if (i == 1 || (!cpi->source_alt_ref_pending && (cpi->common.frame_type != KEY_FRAME))) {
- cpi->per_frame_bandwidth = gf_bits; // Per frame bit target for this frame
+ if (i == 1 || (!cpi->source_alt_ref_pending
+ && (cpi->common.frame_type != KEY_FRAME))) {
+ // Per frame bit target for this frame
+ cpi->per_frame_bandwidth = gf_bits;
}
}
{
- // Adjust KF group bits and error remainin
+ // Adjust KF group bits and error remaining
cpi->twopass.kf_group_error_left -= (int64_t)gf_group_err;
cpi->twopass.kf_group_bits -= cpi->twopass.gf_group_bits;
@@ -1835,13 +2043,14 @@ static void define_gf_group(VP9_COMP *cpi, FIRSTPASS_STATS *this_frame) {
else
cpi->twopass.gf_group_error_left = (int64_t)gf_group_err;
- cpi->twopass.gf_group_bits -= cpi->twopass.gf_bits - cpi->min_frame_bandwidth;
+ cpi->twopass.gf_group_bits -= cpi->twopass.gf_bits
+ - cpi->min_frame_bandwidth;
if (cpi->twopass.gf_group_bits < 0)
cpi->twopass.gf_group_bits = 0;
// This condition could fail if there are two kfs very close together
- // despite (MIN_GF_INTERVAL) and would cause a devide by 0 in the
+ // despite (MIN_GF_INTERVAL) and would cause a divide by 0 in the
// calculation of cpi->twopass.alt_extra_bits.
if (cpi->baseline_gf_interval >= 3) {
int boost = (cpi->source_alt_ref_pending)
@@ -1853,6 +2062,7 @@ static void define_gf_group(VP9_COMP *cpi, FIRSTPASS_STATS *this_frame) {
pct_extra = (boost - 100) / 50;
pct_extra = (pct_extra > 20) ? 20 : pct_extra;
+ // TODO(agrange) Remove cpi->twopass.alt_extra_bits.
cpi->twopass.alt_extra_bits = (int)
((cpi->twopass.gf_group_bits * pct_extra) / 100);
cpi->twopass.gf_group_bits -= cpi->twopass.alt_extra_bits;
@@ -1887,24 +2097,28 @@ static void define_gf_group(VP9_COMP *cpi, FIRSTPASS_STATS *this_frame) {
// Allocate bits to a normal frame that is neither a gf an arf or a key frame.
static void assign_std_frame_bits(VP9_COMP *cpi, FIRSTPASS_STATS *this_frame) {
- int target_frame_size; // gf_group_error_left
+ int target_frame_size;
double modified_err;
- double err_fraction; // What portion of the remaining GF group error is used by this frame
+ double err_fraction;
- int max_bits = frame_max_bits(cpi); // Max for a single frame
+ // Max for a single frame.
+ int max_bits = frame_max_bits(cpi);
- // Calculate modified prediction error used in bit allocation
+ // Calculate modified prediction error used in bit allocation.
modified_err = calculate_modified_err(cpi, this_frame);
if (cpi->twopass.gf_group_error_left > 0)
- err_fraction = modified_err / cpi->twopass.gf_group_error_left; // What portion of the remaining GF group error is used by this frame
+ // What portion of the remaining GF group error is used by this frame.
+ err_fraction = modified_err / cpi->twopass.gf_group_error_left;
else
err_fraction = 0.0;
- target_frame_size = (int)((double)cpi->twopass.gf_group_bits * err_fraction); // How many of those bits available for allocation should we give it?
+ // How many of those bits available for allocation should we give it?
+ target_frame_size = (int)((double)cpi->twopass.gf_group_bits * err_fraction);
- // Clip to target size to 0 - max_bits (or cpi->twopass.gf_group_bits) at the top end.
+ // Clip target size to 0 - max_bits (or cpi->twopass.gf_group_bits) at
+ // the top end.
if (target_frame_size < 0)
target_frame_size = 0;
else {
@@ -1915,17 +2129,18 @@ static void assign_std_frame_bits(VP9_COMP *cpi, FIRSTPASS_STATS *this_frame) {
target_frame_size = (int)cpi->twopass.gf_group_bits;
}
- // Adjust error remaining
+ // Adjust error and bits remaining.
cpi->twopass.gf_group_error_left -= (int64_t)modified_err;
- cpi->twopass.gf_group_bits -= target_frame_size; // Adjust bits remaining
+ cpi->twopass.gf_group_bits -= target_frame_size;
if (cpi->twopass.gf_group_bits < 0)
cpi->twopass.gf_group_bits = 0;
- target_frame_size += cpi->min_frame_bandwidth; // Add in the minimum number of bits that is set aside for every frame.
-
+ // Add in the minimum number of bits that is set aside for every frame.
+ target_frame_size += cpi->min_frame_bandwidth;
- cpi->per_frame_bandwidth = target_frame_size; // Per frame bit target for this frame
+ // Per frame bit target for this frame.
+ cpi->per_frame_bandwidth = target_frame_size;
}
// Make a damped adjustment to the active max q.
@@ -2059,7 +2274,16 @@ void vp9_second_pass(VP9_COMP *cpi) {
if (cpi->frames_till_gf_update_due == 0) {
// Define next gf group and assign bits to it
vpx_memcpy(&this_frame_copy, &this_frame, sizeof(this_frame));
- define_gf_group(cpi, &this_frame_copy);
+
+#if CONFIG_MULTIPLE_ARF
+ if (cpi->multi_arf_enabled) {
+ define_fixed_arf_period(cpi);
+ } else {
+#endif
+ define_gf_group(cpi, &this_frame_copy);
+#if CONFIG_MULTIPLE_ARF
+ }
+#endif
// If we are going to code an altref frame at the end of the group
// and the current frame is not a key frame....
@@ -2101,7 +2325,7 @@ void vp9_second_pass(VP9_COMP *cpi) {
cpi->twopass.frames_to_key--;
- // Update the total stats remaining sturcture
+ // Update the total stats remaining structure
subtract_stats(cpi->twopass.total_left_stats, &this_frame);
}
@@ -2178,7 +2402,8 @@ static int test_candidate_kf(VP9_COMP *cpi,
break;
}
- // If there is tolerable prediction for at least the next 3 frames then break out else discard this pottential key frame and move on
+ // If there is tolerable prediction for at least the next 3 frames then
+ // break out else discard this potential key frame and move on
if (boost_score > 30.0 && (i > 3))
is_viable_kf = TRUE;
else {
@@ -2231,7 +2456,7 @@ static void find_next_key_frame(VP9_COMP *cpi, FIRSTPASS_STATS *this_frame) {
// Take a copy of the initial frame details
vpx_memcpy(&first_frame, this_frame, sizeof(*this_frame));
- cpi->twopass.kf_group_bits = 0; // Total bits avaialable to kf group
+ cpi->twopass.kf_group_bits = 0; // Total bits available to kf group
cpi->twopass.kf_group_error_left = 0; // Group modified error score.
kf_mod_err = calculate_modified_err(cpi, this_frame);
diff --git a/vp9/encoder/vp9_mcomp.c b/vp9/encoder/vp9_mcomp.c
index e642b7487..1649ccade 100644
--- a/vp9/encoder/vp9_mcomp.c
+++ b/vp9/encoder/vp9_mcomp.c
@@ -79,9 +79,10 @@ static int mvsad_err_cost(int_mv *mv, int_mv *ref, int *mvjsadcost,
MV v;
v.row = mv->as_mv.row - ref->as_mv.row;
v.col = mv->as_mv.col - ref->as_mv.col;
- return ((mvjsadcost[vp9_get_mv_joint(v)] +
- mvsadcost[0][v.row] + mvsadcost[1][v.col]) *
- error_per_bit + 128) >> 8;
+
+ return ROUND_POWER_OF_TWO((mvjsadcost[vp9_get_mv_joint(v)] +
+ mvsadcost[0][v.row] + mvsadcost[1][v.col]) *
+ error_per_bit, 8);
}
return 0;
}
diff --git a/vp9/encoder/vp9_onyx_if.c b/vp9/encoder/vp9_onyx_if.c
index 05977b3f5..890195204 100644
--- a/vp9/encoder/vp9_onyx_if.c
+++ b/vp9/encoder/vp9_onyx_if.c
@@ -82,6 +82,10 @@ extern double vp9_calc_ssimg(YV12_BUFFER_CONFIG *source,
#endif
+// TODO(agrange) Move this function.
+extern void configure_arnr_filter(VP9_COMP *cpi, const unsigned int this_frame,
+ int group_boost);
+
// #define OUTPUT_YUV_REC
#ifdef OUTPUT_YUV_SRC
@@ -738,7 +742,12 @@ void vp9_set_speed_features(VP9_COMP *cpi) {
sf->optimize_coefficients = !cpi->oxcf.lossless;
sf->first_step = 0;
sf->max_step_search_steps = MAX_MVSEARCH_STEPS;
+#if CONFIG_MULTIPLE_ARF
+ // Switch segmentation off.
+ sf->static_segmentation = 0;
+#else
sf->static_segmentation = 1;
+#endif
sf->splitmode_breakout = 0;
sf->mb16_breakout = 0;
@@ -748,7 +757,12 @@ void vp9_set_speed_features(VP9_COMP *cpi) {
break;
case 1:
+#if CONFIG_MULTIPLE_ARF
+ // Switch segmentation off.
+ sf->static_segmentation = 0;
+#else
sf->static_segmentation = 1;
+#endif
sf->splitmode_breakout = 1;
sf->mb16_breakout = 0;
@@ -1265,7 +1279,11 @@ void vp9_change_config(VP9_PTR ptr, VP9_CONFIG *oxcf) {
}
// YX Temp
+#if CONFIG_MULTIPLE_ARF
+ vp9_zero(cpi->alt_ref_source);
+#else
cpi->alt_ref_source = NULL;
+#endif
cpi->is_src_frame_alt_ref = 0;
#if 0
@@ -1356,9 +1374,9 @@ VP9_PTR vp9_create_compressor(VP9_CONFIG *oxcf) {
cpi->common.current_video_frame = 0;
cpi->kf_overspend_bits = 0;
cpi->kf_bitrate_adjustment = 0;
- cpi->frames_till_gf_update_due = 0;
+ cpi->frames_till_gf_update_due = 0;
cpi->gf_overspend_bits = 0;
- cpi->non_gf_bitrate_adjustment = 0;
+ cpi->non_gf_bitrate_adjustment = 0;
cm->prob_last_coded = 128;
cm->prob_gf_coded = 128;
cm->prob_intra_coded = 63;
@@ -1369,7 +1387,7 @@ VP9_PTR vp9_create_compressor(VP9_CONFIG *oxcf) {
for (i = 0; i < TX_SIZE_MAX_SB - 1; i++)
cm->prob_tx[i] = 128;
- // Prime the recent reference frame useage counters.
+ // Prime the recent reference frame usage counters.
// Hereafter they will be maintained as a sort of moving average
cpi->recent_ref_frame_usage[INTRA_FRAME] = 1;
cpi->recent_ref_frame_usage[LAST_FRAME] = 1;
@@ -1449,6 +1467,19 @@ VP9_PTR vp9_create_compressor(VP9_CONFIG *oxcf) {
cpi->source_alt_ref_active = FALSE;
cpi->refresh_alt_ref_frame = 0;
+#if CONFIG_MULTIPLE_ARF
+ // Turn multiple ARF usage on/off. This is a quick hack for the initial test
+ // version. It should eventually be set via the codec API.
+ cpi->multi_arf_enabled = 1;
+
+ if (cpi->multi_arf_enabled) {
+ cpi->sequence_number = 0;
+ cpi->frame_coding_order_period = 0;
+ vp9_zero(cpi->frame_coding_order);
+ vp9_zero(cpi->arf_buffer_idx);
+ }
+#endif
+
cpi->b_calculate_psnr = CONFIG_INTERNAL_STATS;
#if CONFIG_INTERNAL_STATS
cpi->b_calculate_ssimg = 0;
@@ -2193,10 +2224,13 @@ static void update_alt_ref_frame_stats(VP9_COMP *cpi) {
// this frame refreshes means next frames don't unless specified by user
cpi->common.frames_since_golden = 0;
- // Clear the alternate reference update pending flag.
- cpi->source_alt_ref_pending = FALSE;
+#if CONFIG_MULTIPLE_ARF
+ if (!cpi->multi_arf_enabled)
+#endif
+ // Clear the alternate reference update pending flag.
+ cpi->source_alt_ref_pending = FALSE;
- // Set the alternate refernce frame active flag
+ // Set the alternate reference frame active flag
cpi->source_alt_ref_active = TRUE;
@@ -2223,7 +2257,7 @@ static void update_golden_frame_stats(VP9_COMP *cpi) {
// }
// else
// {
- // // Carry a potrtion of count over to begining of next gf sequence
+ // // Carry a portion of count over to beginning of next gf sequence
// cpi->recent_ref_frame_usage[INTRA_FRAME] >>= 5;
// cpi->recent_ref_frame_usage[LAST_FRAME] >>= 5;
// cpi->recent_ref_frame_usage[GOLDEN_FRAME] >>= 5;
@@ -2362,7 +2396,7 @@ static int recode_loop_test(VP9_COMP *cpi,
VP9_COMMON *cm = &cpi->common;
// Is frame recode allowed at all
- // Yes if either recode mode 1 is selected or mode two is selcted
+ // Yes if either recode mode 1 is selected or mode two is selected
// and the frame is a key frame. golden frame or alt_ref_frame
if ((cpi->sf.recode_loop == 1) ||
((cpi->sf.recode_loop == 2) &&
@@ -2403,13 +2437,19 @@ static void update_reference_frames(VP9_COMP * const cpi) {
&cm->ref_frame_map[cpi->gld_fb_idx], cm->new_fb_idx);
ref_cnt_fb(cm->fb_idx_ref_cnt,
&cm->ref_frame_map[cpi->alt_fb_idx], cm->new_fb_idx);
- } else if (cpi->refresh_golden_frame && !cpi->refresh_alt_ref_frame) {
+ }
+#if CONFIG_MULTIPLE_ARF
+ else if (!cpi->multi_arf_enabled && cpi->refresh_golden_frame &&
+ !cpi->refresh_alt_ref_frame) {
+#else
+ else if (cpi->refresh_golden_frame && !cpi->refresh_alt_ref_frame) {
+#endif
/* Preserve the previously existing golden frame and update the frame in
* the alt ref slot instead. This is highly specific to the current use of
* alt-ref as a forward reference, and this needs to be generalized as
* other uses are implemented (like RTC/temporal scaling)
*
- * The update to the buffer in the alt ref slot was signalled in
+ * The update to the buffer in the alt ref slot was signaled in
* vp9_pack_bitstream(), now swap the buffer pointers so that it's treated
* as the golden frame next time.
*/
@@ -2421,10 +2461,16 @@ static void update_reference_frames(VP9_COMP * const cpi) {
tmp = cpi->alt_fb_idx;
cpi->alt_fb_idx = cpi->gld_fb_idx;
cpi->gld_fb_idx = tmp;
- } else { /* For non key/golden frames */
+ } else { /* For non key/golden frames */
if (cpi->refresh_alt_ref_frame) {
+ int arf_idx = cpi->alt_fb_idx;
+#if CONFIG_MULTIPLE_ARF
+ if (cpi->multi_arf_enabled) {
+ arf_idx = cpi->arf_buffer_idx[cpi->sequence_number + 1];
+ }
+#endif
ref_cnt_fb(cm->fb_idx_ref_cnt,
- &cm->ref_frame_map[cpi->alt_fb_idx], cm->new_fb_idx);
+ &cm->ref_frame_map[arf_idx], cm->new_fb_idx);
}
if (cpi->refresh_golden_frame) {
@@ -2615,7 +2661,8 @@ static void encode_frame_to_data_rate(VP9_COMP *cpi,
// For an alt ref frame in 2 pass we skip the call to the second
// pass function that sets the target bandwidth so must set it here
if (cpi->refresh_alt_ref_frame) {
- cpi->per_frame_bandwidth = cpi->twopass.gf_bits; // Per frame bit target for the alt ref frame
+ // Per frame bit target for the alt ref frame
+ cpi->per_frame_bandwidth = cpi->twopass.gf_bits;
// per second target bitrate
cpi->target_bandwidth = (int)(cpi->twopass.gf_bits *
cpi->output_frame_rate);
@@ -2637,7 +2684,7 @@ static void encode_frame_to_data_rate(VP9_COMP *cpi,
// Current default encoder behaviour for the altref sign bias
cpi->common.ref_frame_sign_bias[ALTREF_FRAME] = cpi->source_alt_ref_active;
- // Check to see if a key frame is signalled
+ // Check to see if a key frame is signaled
// For two pass with auto key frame enabled cm->frame_type may already be set, but not for one pass.
if ((cm->current_video_frame == 0) ||
(cm->frame_flags & FRAMEFLAGS_KEY) ||
@@ -2681,7 +2728,7 @@ static void encode_frame_to_data_rate(VP9_COMP *cpi,
// Configure use of segmentation for enhanced coding of static regions.
// Only allowed for now in second pass of two pass (as requires lagged coding)
- // and if the relevent speed feature flag is set.
+ // and if the relevant speed feature flag is set.
if ((cpi->pass == 2) && (cpi->sf.static_segmentation)) {
configure_static_seg_features(cpi);
}
@@ -2695,6 +2742,9 @@ static void encode_frame_to_data_rate(VP9_COMP *cpi,
q = cpi->active_worst_quality;
if (cm->frame_type == KEY_FRAME) {
+#if CONFIG_MULTIPLE_ARF
+ double current_q;
+#endif
int high = 2000;
int low = 400;
@@ -2711,7 +2761,7 @@ static void encode_frame_to_data_rate(VP9_COMP *cpi,
cpi->active_best_quality = kf_low_motion_minq[q] + adjustment;
}
- // Make an adjustment based on the %s static
+ // Make an adjustment based on the % static
// The main impact of this is at lower Q to prevent overly large key
// frames unless a lot of the image is static.
if (cpi->kf_zeromotion_pct < 64)
@@ -2728,9 +2778,14 @@ static void encode_frame_to_data_rate(VP9_COMP *cpi,
delta_qindex = compute_qdelta(cpi, last_boosted_q,
(last_boosted_q * 0.75));
-
cpi->active_best_quality = MAX(qindex + delta_qindex, cpi->best_quality);
}
+#if CONFIG_MULTIPLE_ARF
+ // Force the KF quantizer to be 30% of the active_worst_quality.
+ current_q = vp9_convert_qindex_to_q(cpi->active_worst_quality);
+ cpi->active_best_quality = cpi->active_worst_quality
+ + compute_qdelta(cpi, current_q, current_q * 0.3);
+#endif
} else if (cpi->refresh_golden_frame || cpi->refresh_alt_ref_frame) {
int high = 2000;
int low = 400;
@@ -2776,7 +2831,7 @@ static void encode_frame_to_data_rate(VP9_COMP *cpi,
cpi->active_best_quality = inter_minq[q];
#endif
- // For the constant/constrained quality mode we dont want
+ // For the constant/constrained quality mode we don't want
// q to fall below the cq level.
if ((cpi->oxcf.end_usage == USAGE_CONSTRAINED_QUALITY) &&
(cpi->active_best_quality < cpi->cq_target_quality)) {
@@ -2814,12 +2869,34 @@ static void encode_frame_to_data_rate(VP9_COMP *cpi,
vp9_compute_frame_size_bounds(cpi, &frame_under_shoot_limit,
&frame_over_shoot_limit);
- // Limit Q range for the adaptive loop.
- bottom_index = cpi->active_best_quality;
- top_index = cpi->active_worst_quality;
- q_low = cpi->active_best_quality;
- q_high = cpi->active_worst_quality;
+#if CONFIG_MULTIPLE_ARF
+ // Force the quantizer determined by the coding order pattern.
+ if (cpi->multi_arf_enabled && (cm->frame_type != KEY_FRAME)) {
+ double new_q;
+ double current_q = vp9_convert_qindex_to_q(cpi->active_worst_quality);
+ int level = cpi->this_frame_weight;
+ assert(level >= 0);
+ // Set quantizer steps at 10% increments.
+ new_q = current_q * (1.0 - (0.2 * (cpi->max_arf_level - level)));
+ q = cpi->active_worst_quality + compute_qdelta(cpi, current_q, new_q);
+
+ bottom_index = q;
+ top_index = q;
+ q_low = q;
+ q_high = q;
+
+ printf("frame:%d q:%d\n", cm->current_video_frame, q);
+ } else {
+#endif
+ // Limit Q range for the adaptive loop.
+ bottom_index = cpi->active_best_quality;
+ top_index = cpi->active_worst_quality;
+ q_low = cpi->active_best_quality;
+ q_high = cpi->active_worst_quality;
+#if CONFIG_MULTIPLE_ARF
+ }
+#endif
loop_count = 0;
if (cm->frame_type != KEY_FRAME) {
@@ -2936,8 +3013,8 @@ static void encode_frame_to_data_rate(VP9_COMP *cpi,
if (cm->frame_type == KEY_FRAME) {
/* Choose which entropy context to use. When using a forward reference
* frame, it immediately follows the keyframe, and thus benefits from
- * using the same entropy context established by the keyframe. Otherwise,
- * use the default context 0.
+ * using the same entropy context established by the keyframe.
+ * Otherwise, use the default context 0.
*/
cm->frame_context_idx = cpi->oxcf.play_alternate;
vp9_setup_key_frame(cpi);
@@ -3191,7 +3268,7 @@ static void encode_frame_to_data_rate(VP9_COMP *cpi,
}
}
- // Update the GF useage maps.
+ // Update the GF usage maps.
// This is done after completing the compression of a frame when all modes
// etc. are finalized but before loop filter
vp9_update_gf_useage_maps(cpi, cm, &cpi->mb);
@@ -3366,7 +3443,7 @@ static void encode_frame_to_data_rate(VP9_COMP *cpi,
// in this frame.
update_base_skip_probs(cpi);
-#if 0 // 1 && CONFIG_INTERNAL_STATS
+#if 0 && CONFIG_INTERNAL_STATS
{
FILE *f = fopen("tmp.stt", "a");
int recon_err;
@@ -3513,10 +3590,33 @@ static void encode_frame_to_data_rate(VP9_COMP *cpi,
// Tell the caller that the frame was coded as a key frame
*frame_flags = cm->frame_flags | FRAMEFLAGS_KEY;
- // As this frame is a key frame the next defaults to an inter frame.
+#if CONFIG_MULTIPLE_ARF
+ // Reset the sequence number.
+ if (cpi->multi_arf_enabled) {
+ cpi->sequence_number = 0;
+ cpi->frame_coding_order_period = cpi->new_frame_coding_order_period;
+ cpi->new_frame_coding_order_period = -1;
+ }
+#endif
+
+ // As this frame is a key frame the next defaults to an inter frame.
cm->frame_type = INTER_FRAME;
} else {
*frame_flags = cm->frame_flags&~FRAMEFLAGS_KEY;
+
+#if CONFIG_MULTIPLE_ARF
+ /* Increment position in the coded frame sequence. */
+ if (cpi->multi_arf_enabled) {
+ ++cpi->sequence_number;
+ if (cpi->sequence_number >= cpi->frame_coding_order_period) {
+ cpi->sequence_number = 0;
+ cpi->frame_coding_order_period = cpi->new_frame_coding_order_period;
+ cpi->new_frame_coding_order_period = -1;
+ }
+ cpi->this_frame_weight = cpi->arf_weight[cpi->sequence_number];
+ assert(cpi->this_frame_weight >= 0);
+ }
+#endif
}
// Clear the one shot update flags for segmentation map and mode/ref loop filter deltas.
@@ -3528,16 +3628,15 @@ static void encode_frame_to_data_rate(VP9_COMP *cpi,
cm->last_width = cm->width;
cm->last_height = cm->height;
- // Dont increment frame counters if this was an altref buffer update not a real frame
+ // Don't increment frame counters if this was an altref buffer
+ // update not a real frame
if (cm->show_frame) {
- cm->current_video_frame++;
- cpi->frames_since_key++;
+ ++cm->current_video_frame;
+ ++cpi->frames_since_key;
}
// reset to normal state now that we are done.
-
-
#if 0
{
char filename[512];
@@ -3624,6 +3723,12 @@ static int frame_is_reference(const VP9_COMP *cpi) {
mb->update_mb_segmentation_data;
}
+#if CONFIG_MULTIPLE_ARF
+int is_next_frame_arf(VP9_COMP *cpi) {
+ // Negative entry in frame_coding_order indicates an ARF at this position.
+ return cpi->frame_coding_order[cpi->sequence_number + 1] < 0 ? 1 : 0;
+}
+#endif
int vp9_get_compressed_data(VP9_PTR ptr, unsigned int *frame_flags,
unsigned long *size, unsigned char *dest,
@@ -3633,6 +3738,7 @@ int vp9_get_compressed_data(VP9_PTR ptr, unsigned int *frame_flags,
struct vpx_usec_timer cmptimer;
YV12_BUFFER_CONFIG *force_src_buffer = NULL;
int i;
+ // FILE *fp_out = fopen("enc_frame_type.txt", "a");
if (!cpi)
return -1;
@@ -3644,37 +3750,90 @@ int vp9_get_compressed_data(VP9_PTR ptr, unsigned int *frame_flags,
cpi->mb.e_mbd.allow_high_precision_mv = ALTREF_HIGH_PRECISION_MV;
set_mvcost(&cpi->mb);
- // Should we code an alternate reference frame
- if (cpi->oxcf.play_alternate &&
- cpi->source_alt_ref_pending) {
- if ((cpi->source = vp9_lookahead_peek(cpi->lookahead,
- cpi->frames_till_gf_update_due))) {
+ // Should we code an alternate reference frame.
+ if (cpi->oxcf.play_alternate && cpi->source_alt_ref_pending) {
+ int frames_to_arf;
+
+#if CONFIG_MULTIPLE_ARF
+ assert(!cpi->multi_arf_enabled ||
+ cpi->frame_coding_order[cpi->sequence_number] < 0);
+
+ if (cpi->multi_arf_enabled && (cpi->pass == 2))
+ frames_to_arf = (-cpi->frame_coding_order[cpi->sequence_number])
+ - cpi->next_frame_in_order;
+ else
+#endif
+ frames_to_arf = cpi->frames_till_gf_update_due;
+
+ assert(frames_to_arf < cpi->twopass.frames_to_key);
+
+ if ((cpi->source = vp9_lookahead_peek(cpi->lookahead, frames_to_arf))) {
+#if CONFIG_MULTIPLE_ARF
+ cpi->alt_ref_source[cpi->arf_buffered] = cpi->source;
+#else
cpi->alt_ref_source = cpi->source;
+#endif
+
if (cpi->oxcf.arnr_max_frames > 0) {
- vp9_temporal_filter_prepare(cpi, cpi->frames_till_gf_update_due);
+ // Produce the filtered ARF frame.
+ // TODO(agrange) merge these two functions.
+ configure_arnr_filter(cpi, cm->current_video_frame + frames_to_arf,
+ cpi->gfu_boost);
+ vp9_temporal_filter_prepare(cpi, frames_to_arf);
force_src_buffer = &cpi->alt_ref_buffer;
}
- cm->frames_till_alt_ref_frame = cpi->frames_till_gf_update_due;
+
+ cm->show_frame = 0;
cpi->refresh_alt_ref_frame = 1;
cpi->refresh_golden_frame = 0;
cpi->refresh_last_frame = 0;
- cm->show_frame = 0;
- cpi->source_alt_ref_pending = FALSE; // Clear Pending altf Ref flag.
cpi->is_src_frame_alt_ref = 0;
+
+ // TODO(agrange) This needs to vary depending on where the next ARF is.
+ cm->frames_till_alt_ref_frame = frames_to_arf;
+
+#if CONFIG_MULTIPLE_ARF
+ if (!cpi->multi_arf_enabled)
+#endif
+ cpi->source_alt_ref_pending = FALSE; // Clear Pending altf Ref flag.
}
}
if (!cpi->source) {
+#if CONFIG_MULTIPLE_ARF
+ int i;
+#endif
if ((cpi->source = vp9_lookahead_pop(cpi->lookahead, flush))) {
cm->show_frame = 1;
+#if CONFIG_MULTIPLE_ARF
+ // Is this frame the ARF overlay.
+ cpi->is_src_frame_alt_ref = FALSE;
+ for (i = 0; i < cpi->arf_buffered; ++i) {
+ if (cpi->source == cpi->alt_ref_source[i]) {
+ cpi->is_src_frame_alt_ref = TRUE;
+ cpi->refresh_golden_frame = TRUE;
+ break;
+ }
+ }
+#else
cpi->is_src_frame_alt_ref = cpi->alt_ref_source
&& (cpi->source == cpi->alt_ref_source);
-
+#endif
if (cpi->is_src_frame_alt_ref) {
- cpi->refresh_last_frame = 0;
+ // Current frame is an ARF overlay frame.
+#if CONFIG_MULTIPLE_ARF
+ cpi->alt_ref_source[i] = NULL;
+#else
cpi->alt_ref_source = NULL;
+#endif
+ // Don't refresh the last buffer for an ARF overlay frame. It will
+ // become the GF so preserve last as an alternative prediction option.
+ cpi->refresh_last_frame = 0;
}
+#if CONFIG_MULTIPLE_ARF
+ ++cpi->next_frame_in_order;
+#endif
}
}
@@ -3684,6 +3843,23 @@ int vp9_get_compressed_data(VP9_PTR ptr, unsigned int *frame_flags,
*time_stamp = cpi->source->ts_start;
*time_end = cpi->source->ts_end;
*frame_flags = cpi->source->flags;
+
+ // fprintf(fp_out, " Frame:%d", cm->current_video_frame);
+#if CONFIG_MULTIPLE_ARF
+ if (cpi->multi_arf_enabled) {
+ // fprintf(fp_out, " seq_no:%d this_frame_weight:%d",
+ // cpi->sequence_number, cpi->this_frame_weight);
+ } else {
+ // fprintf(fp_out, "\n");
+ }
+#else
+ // fprintf(fp_out, "\n");
+#endif
+
+#if CONFIG_MULTIPLE_ARF
+ if ((cm->frame_type != KEY_FRAME) && (cpi->pass == 2))
+ cpi->source_alt_ref_pending = is_next_frame_arf(cpi);
+#endif
} else {
*size = 0;
if (flush && cpi->pass == 1 && !cpi->twopass.first_pass_done) {
@@ -3691,6 +3867,7 @@ int vp9_get_compressed_data(VP9_PTR ptr, unsigned int *frame_flags,
cpi->twopass.first_pass_done = 1;
}
+ // fclose(fp_out);
return -1;
}
@@ -3745,33 +3922,44 @@ int vp9_get_compressed_data(VP9_PTR ptr, unsigned int *frame_flags,
// Clear down mmx registers
vp9_clear_system_state(); // __asm emms;
- cm->frame_type = INTER_FRAME;
- cm->frame_flags = *frame_flags;
-
-#if 0
-
- if (cpi->refresh_alt_ref_frame) {
- // cpi->refresh_golden_frame = 1;
- cpi->refresh_golden_frame = 0;
- cpi->refresh_last_frame = 0;
- } else {
- cpi->refresh_golden_frame = 0;
- cpi->refresh_last_frame = 1;
- }
-
-#endif
-
/* find a free buffer for the new frame, releasing the reference previously
* held.
*/
cm->fb_idx_ref_cnt[cm->new_fb_idx]--;
cm->new_fb_idx = get_free_fb(cm);
+#if CONFIG_MULTIPLE_ARF
+ /* Set up the correct ARF frame. */
+ if (cpi->refresh_alt_ref_frame) {
+ ++cpi->arf_buffered;
+ }
+ if (cpi->multi_arf_enabled && (cm->frame_type != KEY_FRAME) &&
+ (cpi->pass == 2)) {
+ cpi->alt_fb_idx = cpi->arf_buffer_idx[cpi->sequence_number];
+ }
+#endif
+
/* Get the mapping of L/G/A to the reference buffer pool */
cm->active_ref_idx[0] = cm->ref_frame_map[cpi->lst_fb_idx];
cm->active_ref_idx[1] = cm->ref_frame_map[cpi->gld_fb_idx];
cm->active_ref_idx[2] = cm->ref_frame_map[cpi->alt_fb_idx];
+#if 0 // CONFIG_MULTIPLE_ARF
+ if (cpi->multi_arf_enabled) {
+ fprintf(fp_out, " idx(%d, %d, %d, %d) active(%d, %d, %d)",
+ cpi->lst_fb_idx, cpi->gld_fb_idx, cpi->alt_fb_idx, cm->new_fb_idx,
+ cm->active_ref_idx[0], cm->active_ref_idx[1], cm->active_ref_idx[2]);
+ if (cpi->refresh_alt_ref_frame)
+ fprintf(fp_out, " type:ARF");
+ if (cpi->is_src_frame_alt_ref)
+ fprintf(fp_out, " type:OVERLAY[%d]", cpi->alt_fb_idx);
+ fprintf(fp_out, "\n");
+ }
+#endif
+
+ cm->frame_type = INTER_FRAME;
+ cm->frame_flags = *frame_flags;
+
/* Reset the frame pointers to the current frame size */
vp8_yv12_realloc_frame_buffer(&cm->yv12_fb[cm->new_fb_idx],
cm->width, cm->height,
@@ -3814,7 +4002,6 @@ int vp9_get_compressed_data(VP9_PTR ptr, unsigned int *frame_flags,
cpi->refresh_golden_frame = 0;
cpi->refresh_last_frame = 1;
cm->frame_type = INTER_FRAME;
-
}
vpx_usec_timer_mark(&cmptimer);
@@ -3927,12 +4114,11 @@ int vp9_get_compressed_data(VP9_PTR ptr, unsigned int *frame_flags,
cpi->total_ssimg_v += v;
cpi->total_ssimg_all += frame_all;
}
-
}
}
#endif
-
+ // fclose(fp_out);
return 0;
}
diff --git a/vp9/encoder/vp9_onyx_int.h b/vp9/encoder/vp9_onyx_int.h
index 15daa7e83..c9d4be41a 100644
--- a/vp9/encoder/vp9_onyx_int.h
+++ b/vp9/encoder/vp9_onyx_int.h
@@ -31,18 +31,22 @@
// Experimental rate control switches
#if CONFIG_ONESHOTQ
-#define ONE_SHOT_Q_ESTIMATE 1
-#define STRICT_ONE_SHOT_Q 1
-#define DISABLE_RC_LONG_TERM_MEM 1
+#define ONE_SHOT_Q_ESTIMATE 0
+#define STRICT_ONE_SHOT_Q 0
+#define DISABLE_RC_LONG_TERM_MEM 0
#endif
+
// #define SPEEDSTATS 1
+#if CONFIG_MULTIPLE_ARF
+// Set MIN_GF_INTERVAL to 1 for the full decomposition.
+#define MIN_GF_INTERVAL 2
+#else
#define MIN_GF_INTERVAL 4
+#endif
#define DEFAULT_GF_INTERVAL 7
#define KEY_FRAME_CONTEXT 5
-#define MAX_LAG_BUFFERS 25
-
#if CONFIG_COMP_INTERINTRA_PRED
#define MAX_MODES 54
#else
@@ -273,26 +277,6 @@ typedef struct {
int static_segmentation;
} SPEED_FEATURES;
-typedef struct {
- MACROBLOCK mb;
- int totalrate;
-} MB_ROW_COMP;
-
-typedef struct {
- TOKENEXTRA *start;
- TOKENEXTRA *stop;
-} TOKENLIST;
-
-typedef struct {
- int ithread;
- void *ptr1;
- void *ptr2;
-} ENCODETHREAD_DATA;
-typedef struct {
- int ithread;
- void *ptr1;
-} LPFTHREAD_DATA;
-
enum BlockSize {
BLOCK_16X8 = PARTITIONING_16X8,
BLOCK_8X16 = PARTITIONING_8X16,
@@ -326,7 +310,11 @@ typedef struct VP9_COMP {
struct lookahead_ctx *lookahead;
struct lookahead_entry *source;
+#if CONFIG_MULTIPLE_ARF
+ struct lookahead_entry *alt_ref_source[NUM_REF_FRAMES];
+#else
struct lookahead_entry *alt_ref_source;
+#endif
YV12_BUFFER_CONFIG *Source;
YV12_BUFFER_CONFIG *un_scaled_source;
@@ -345,6 +333,9 @@ typedef struct VP9_COMP {
int lst_fb_idx;
int gld_fb_idx;
int alt_fb_idx;
+#if CONFIG_MULTIPLE_ARF
+ int alt_ref_fb_idx[NUM_REF_FRAMES - 3];
+#endif
int refresh_last_frame;
int refresh_golden_frame;
int refresh_alt_ref_frame;
@@ -358,6 +349,12 @@ typedef struct VP9_COMP {
unsigned int key_frame_frequency;
unsigned int this_key_frame_forced;
unsigned int next_key_frame_forced;
+#if CONFIG_MULTIPLE_ARF
+ // Position within a frame coding order (including any additional ARF frames).
+ unsigned int sequence_number;
+ // Next frame in naturally occurring order that has not yet been coded.
+ int next_frame_in_order;
+#endif
// Ambient reconstruction err target for force key frames
int ambient_err;
@@ -396,7 +393,6 @@ typedef struct VP9_COMP {
double gf_rate_correction_factor;
int frames_till_gf_update_due; // Count down till next GF
- int current_gf_interval; // GF interval chosen when we coded the last GF
int gf_overspend_bits; // Total bits overspent becasue of GF boost (cumulative)
@@ -686,6 +682,19 @@ typedef struct VP9_COMP {
int initial_width;
int initial_height;
+
+#if CONFIG_MULTIPLE_ARF
+ // ARF tracking variables.
+ int multi_arf_enabled;
+ unsigned int frame_coding_order_period;
+ unsigned int new_frame_coding_order_period;
+ int frame_coding_order[MAX_LAG_BUFFERS * 2];
+ int arf_buffer_idx[MAX_LAG_BUFFERS * 3 / 2];
+ int arf_weight[MAX_LAG_BUFFERS];
+ int arf_buffered;
+ int this_frame_weight;
+ int max_arf_level;
+#endif
} VP9_COMP;
void vp9_encode_frame(VP9_COMP *cpi);
diff --git a/vp9/encoder/vp9_quantize.c b/vp9/encoder/vp9_quantize.c
index 83fa1dafd..1401bd64e 100644
--- a/vp9/encoder/vp9_quantize.c
+++ b/vp9/encoder/vp9_quantize.c
@@ -477,7 +477,7 @@ void vp9_quantize_sbuv_32x32(MACROBLOCK *x, BLOCK_SIZE_TYPE bsize) {
void vp9_quantize_sbuv_16x16(MACROBLOCK *x, BLOCK_SIZE_TYPE bsize) {
const int bwl = mb_width_log2(bsize);
- const int bhl = mb_width_log2(bsize);
+ const int bhl = mb_height_log2(bsize);
const int uoff = 16 << (bhl + bwl);
int i;
@@ -487,7 +487,7 @@ void vp9_quantize_sbuv_16x16(MACROBLOCK *x, BLOCK_SIZE_TYPE bsize) {
void vp9_quantize_sbuv_8x8(MACROBLOCK *x, BLOCK_SIZE_TYPE bsize) {
const int bwl = mb_width_log2(bsize);
- const int bhl = mb_width_log2(bsize);
+ const int bhl = mb_height_log2(bsize);
const int uoff = 16 << (bhl + bwl);
int i;
@@ -497,7 +497,7 @@ void vp9_quantize_sbuv_8x8(MACROBLOCK *x, BLOCK_SIZE_TYPE bsize) {
void vp9_quantize_sbuv_4x4(MACROBLOCK *x, BLOCK_SIZE_TYPE bsize) {
const int bwl = mb_width_log2(bsize);
- const int bhl = mb_width_log2(bsize);
+ const int bhl = mb_height_log2(bsize);
const int uoff = 16 << (bhl + bwl);
int i;
diff --git a/vp9/encoder/vp9_ratectrl.c b/vp9/encoder/vp9_ratectrl.c
index d26f5ec46..fc22146c3 100644
--- a/vp9/encoder/vp9_ratectrl.c
+++ b/vp9/encoder/vp9_ratectrl.c
@@ -112,7 +112,7 @@ int vp9_bits_per_mb(FRAME_TYPE frame_type, int qindex,
const double q = vp9_convert_qindex_to_q(qindex);
int enumerator = frame_type == KEY_FRAME ? 4000000 : 2500000;
- // q based adjustment to baseline enumberator
+ // q based adjustment to baseline enumerator
enumerator += (int)(enumerator * q) >> 12;
return (int)(0.5 + (enumerator * correction_factor / q));
}
@@ -300,7 +300,7 @@ static void calc_iframe_target_size(VP9_COMP *cpi) {
}
-// Do the best we can to define the parameteres for the next GF based
+// Do the best we can to define the parameters for the next GF based
// on what information we have available.
//
// In this experimental code only two pass is supported
@@ -358,16 +358,13 @@ static void calc_pframe_target_size(VP9_COMP *cpi) {
(estimate_bits_at_q(1, q, cpi->common.MBs, 1.0)
* cpi->last_boost) / 100;
}
-
} else {
// If there is an active ARF at this location use the minimum
- // bits on this frame even if it is a contructed arf.
+ // bits on this frame even if it is a constructed arf.
// The active maximum quantizer insures that an appropriate
- // number of bits will be spent if needed for contstructed ARFs.
+ // number of bits will be spent if needed for constructed ARFs.
cpi->this_frame_target = 0;
}
-
- cpi->current_gf_interval = cpi->frames_till_gf_update_due;
}
}
diff --git a/vp9/encoder/vp9_rdopt.c b/vp9/encoder/vp9_rdopt.c
index 467773bbb..1d0715499 100644
--- a/vp9/encoder/vp9_rdopt.c
+++ b/vp9/encoder/vp9_rdopt.c
@@ -3069,7 +3069,7 @@ static int64_t handle_inter_mode(VP9_COMP *cpi, MACROBLOCK *x,
unsigned int sse, var;
int tmp_rate_y, tmp_rate_u, tmp_rate_v;
int tmp_dist_y, tmp_dist_u, tmp_dist_v;
- vp9_build_inter64x64_predictors_sb(xd, mb_row, mb_col);
+ vp9_build_inter_predictors_sb(xd, mb_row, mb_col, bsize);
var = vp9_variance64x64(*(b->base_src), b->src_stride,
xd->dst.y_buffer, xd->dst.y_stride, &sse);
// Note our transform coeffs are 8 times an orthogonal transform.
@@ -3148,7 +3148,7 @@ static int64_t handle_inter_mode(VP9_COMP *cpi, MACROBLOCK *x,
unsigned int sse, var;
int tmp_rate_y, tmp_rate_u, tmp_rate_v;
int tmp_dist_y, tmp_dist_u, tmp_dist_v;
- vp9_build_inter32x32_predictors_sb(xd, mb_row, mb_col);
+ vp9_build_inter_predictors_sb(xd, mb_row, mb_col, bsize);
var = vp9_variance32x32(*(b->base_src), b->src_stride,
xd->dst.y_buffer, xd->dst.y_stride, &sse);
// Note our transform coeffs are 8 times an orthogonal transform.
@@ -3314,10 +3314,8 @@ static int64_t handle_inter_mode(VP9_COMP *cpi, MACROBLOCK *x,
} else {
// Handles the special case when a filter that is not in the
// switchable list (ex. bilinear, 6-tap) is indicated at the frame level
- if (bsize == BLOCK_SIZE_SB64X64) {
- vp9_build_inter64x64_predictors_sb(xd, mb_row, mb_col);
- } else if (bsize == BLOCK_SIZE_SB32X32) {
- vp9_build_inter32x32_predictors_sb(xd, mb_row, mb_col);
+ if (bsize > BLOCK_SIZE_MB16X16) {
+ vp9_build_inter_predictors_sb(xd, mb_row, mb_col, bsize);
} else {
vp9_build_inter16x16_predictors_mb(xd, xd->predictor,
xd->predictor + 256,
diff --git a/vp9/encoder/vp9_tokenize.c b/vp9/encoder/vp9_tokenize.c
index 891eb25fd..3095d6421 100644
--- a/vp9/encoder/vp9_tokenize.c
+++ b/vp9/encoder/vp9_tokenize.c
@@ -290,7 +290,7 @@ static void tokenize_b(VP9_COMP *cpi,
else
#endif
t->skip_eob_node = (c > 0) && (token_cache[c - 1] == 0);
- assert(vp9_coef_encodings[t->Token].Len - t->skip_eob_node > 0);
+ assert(vp9_coef_encodings[t->Token].len - t->skip_eob_node > 0);
if (!dry_run) {
++counts[type][ref][band][pt][token];
if (!t->skip_eob_node)
diff --git a/vp9/encoder/vp9_treewriter.h b/vp9/encoder/vp9_treewriter.h
index 832471aa8..d11a5dd61 100644
--- a/vp9/encoder/vp9_treewriter.h
+++ b/vp9/encoder/vp9_treewriter.h
@@ -67,11 +67,9 @@ static INLINE void treed_write(vp9_writer *const w,
} while (n);
}
-static INLINE void write_token(vp9_writer *const w,
- vp9_tree t,
- const vp9_prob *const p,
- vp9_token *const x) {
- treed_write(w, t, p, x->value, x->Len);
+static INLINE void write_token(vp9_writer *w, vp9_tree t, const vp9_prob *p,
+ const struct vp9_token *x) {
+ treed_write(w, t, p, x->value, x->len);
}
static INLINE int treed_cost(vp9_tree t,
@@ -91,10 +89,9 @@ static INLINE int treed_cost(vp9_tree t,
return c;
}
-static INLINE int cost_token(vp9_tree t,
- const vp9_prob *const p,
- vp9_token *const x) {
- return treed_cost(t, p, x->value, x->Len);
+static INLINE int cost_token(vp9_tree t, const vp9_prob *p,
+ const struct vp9_token *x) {
+ return treed_cost(t, p, x->value, x->len);
}
/* Fill array of costs for all possible token values. */
diff --git a/vp9/vp9_cx_iface.c b/vp9/vp9_cx_iface.c
index 56453e249..1d95eed79 100644
--- a/vp9/vp9_cx_iface.c
+++ b/vp9/vp9_cx_iface.c
@@ -486,7 +486,10 @@ static vpx_codec_err_t vp8e_common_init(vpx_codec_ctx_t *ctx,
priv->vp8_cfg.pkt_list = &priv->pkt_list.head;
priv->vp8_cfg.experimental = experimental;
- priv->cx_data_sz = priv->cfg.g_w * priv->cfg.g_h * 3 / 2 * 2;
+ // TODO(agrange) Check the limits set on this buffer, or the check that is
+ // applied in vp8e_encode.
+ priv->cx_data_sz = priv->cfg.g_w * priv->cfg.g_h * 3 / 2 * 8;
+// priv->cx_data_sz = priv->cfg.g_w * priv->cfg.g_h * 3 / 2 * 2;
if (priv->cx_data_sz < 4096) priv->cx_data_sz = 4096;
@@ -754,7 +757,7 @@ static vpx_codec_err_t vp8e_encode(vpx_codec_alg_priv_t *ctx,
vpx_codec_cx_pkt_t pkt;
VP9_COMP *cpi = (VP9_COMP *)ctx->cpi;
- /* Pack invisible frames with the next visisble frame */
+ /* Pack invisible frames with the next visible frame */
if (!cpi->common.show_frame) {
if (!ctx->pending_cx_data)
ctx->pending_cx_data = cx_data;