summaryrefslogtreecommitdiff
path: root/vp9
diff options
context:
space:
mode:
Diffstat (limited to 'vp9')
-rw-r--r--vp9/common/vp9_blockd.h89
-rw-r--r--vp9/common/vp9_coefupdateprobs.h8
-rw-r--r--vp9/common/vp9_default_coef_probs.h343
-rw-r--r--vp9/common/vp9_entropy.c1394
-rw-r--r--vp9/common/vp9_entropy.h143
-rw-r--r--vp9/common/vp9_entropymode.c5
-rw-r--r--vp9/common/vp9_entropymv.c5
-rw-r--r--vp9/common/vp9_entropymv.h6
-rw-r--r--vp9/common/vp9_findnearmv.c2
-rw-r--r--vp9/common/vp9_findnearmv.h2
-rw-r--r--vp9/common/vp9_invtrans.c46
-rw-r--r--vp9/common/vp9_mbpitch.c32
-rw-r--r--vp9/common/vp9_mvref_common.c228
-rw-r--r--vp9/common/vp9_onyxc_int.h52
-rw-r--r--vp9/common/vp9_pred_common.c38
-rw-r--r--vp9/common/vp9_pred_common.h5
-rw-r--r--vp9/common/vp9_recon.c86
-rw-r--r--vp9/common/vp9_reconinter.c706
-rw-r--r--vp9/common/vp9_reconinter.h88
-rw-r--r--vp9/common/vp9_reconintra.c39
-rw-r--r--vp9/common/vp9_reconintra.h2
-rw-r--r--vp9/common/vp9_reconintra4x4.c23
-rw-r--r--vp9/common/vp9_rtcd_defs.sh15
-rw-r--r--vp9/common/x86/vp9_asm_stubs.c91
-rw-r--r--vp9/common/x86/vp9_recon_wrapper_sse2.c22
-rw-r--r--vp9/decoder/vp9_dboolhuff.c39
-rw-r--r--vp9/decoder/vp9_dboolhuff.h28
-rw-r--r--vp9/decoder/vp9_decodemv.c490
-rw-r--r--vp9/decoder/vp9_decodframe.c454
-rw-r--r--vp9/decoder/vp9_detokenize.c163
-rw-r--r--vp9/decoder/vp9_treereader.h2
-rw-r--r--vp9/encoder/vp9_bitstream.c1312
-rw-r--r--vp9/encoder/vp9_block.h6
-rw-r--r--vp9/encoder/vp9_encodeframe.c366
-rw-r--r--vp9/encoder/vp9_encodeintra.c9
-rw-r--r--vp9/encoder/vp9_encodemb.c93
-rw-r--r--vp9/encoder/vp9_firstpass.c26
-rw-r--r--vp9/encoder/vp9_mbgraph.c40
-rw-r--r--vp9/encoder/vp9_onyx_if.c81
-rw-r--r--vp9/encoder/vp9_onyx_int.h45
-rw-r--r--vp9/encoder/vp9_quantize.c51
-rw-r--r--vp9/encoder/vp9_ratectrl.c22
-rw-r--r--vp9/encoder/vp9_rdopt.c370
-rw-r--r--vp9/encoder/vp9_segmentation.c48
-rw-r--r--vp9/encoder/vp9_segmentation.h2
-rw-r--r--vp9/encoder/vp9_temporal_filter.c12
-rw-r--r--vp9/encoder/vp9_tokenize.c254
-rw-r--r--vp9/encoder/vp9_tokenize.h11
48 files changed, 2018 insertions, 5376 deletions
diff --git a/vp9/common/vp9_blockd.h b/vp9/common/vp9_blockd.h
index dd957618c..715df98bd 100644
--- a/vp9/common/vp9_blockd.h
+++ b/vp9/common/vp9_blockd.h
@@ -41,7 +41,7 @@
#define SEGMENT_DELTADATA 0
#define SEGMENT_ABSDATA 1
#define MAX_MV_REFS 9
-#define MAX_MV_REF_CANDIDATES 4
+#define MAX_MV_REF_CANDIDATES 2
typedef enum {
PLANE_TYPE_Y_WITH_DC,
@@ -261,9 +261,6 @@ typedef struct {
int_mv mv[2]; // for each reference frame used
int_mv ref_mvs[MAX_REF_FRAMES][MAX_MV_REF_CANDIDATES];
int_mv best_mv, best_second_mv;
-#if CONFIG_NEW_MVREF
- int best_index, best_second_index;
-#endif
int mb_mode_context[MAX_REF_FRAMES];
@@ -285,9 +282,6 @@ typedef struct {
INTERPOLATIONFILTERTYPE interp_filter;
BLOCK_SIZE_TYPE sb_type;
-#if CONFIG_CODE_NONZEROCOUNT
- uint16_t nzcs[256+64*2];
-#endif
} MB_MODE_INFO;
typedef struct {
@@ -341,13 +335,21 @@ struct scale_factors {
enum { MAX_MB_PLANE = 3 };
+struct buf_2d {
+ uint8_t *buf;
+ int stride;
+};
+
struct mb_plane {
DECLARE_ALIGNED(16, int16_t, qcoeff[64 * 64]);
DECLARE_ALIGNED(16, int16_t, dqcoeff[64 * 64]);
DECLARE_ALIGNED(16, uint16_t, eobs[256]);
+ DECLARE_ALIGNED(16, int16_t, diff[64 * 64]);
PLANE_TYPE plane_type;
int subsampling_x;
int subsampling_y;
+ struct buf_2d dst;
+ struct buf_2d pre[2];
};
#define BLOCK_OFFSET(x, i, n) ((x) + (i) * (n))
@@ -358,18 +360,11 @@ struct mb_plane {
BLOCK_OFFSET((x)->plane[2].field, ((i) - 20), 16))
typedef struct macroblockd {
- DECLARE_ALIGNED(16, int16_t, diff[64*64+32*32*2]); /* from idct diff */
-#if CONFIG_CODE_NONZEROCOUNT
- DECLARE_ALIGNED(16, uint16_t, nzcs[256+64*2]);
-#endif
struct mb_plane plane[MAX_MB_PLANE];
/* 16 Y blocks, 4 U, 4 V, each with 16 entries. */
BLOCKD block[24];
- YV12_BUFFER_CONFIG pre; /* Filtered copy of previous frame reconstruction */
- YV12_BUFFER_CONFIG second_pre;
- YV12_BUFFER_CONFIG dst;
struct scale_factors scale_factor[2];
struct scale_factors scale_factor_uv[2];
@@ -406,10 +401,6 @@ typedef struct macroblockd {
vp9_prob mb_segment_tree_probs[MB_FEATURE_TREE_PROBS];
vp9_prob mb_segment_mispred_tree_probs[MAX_MB_SEGMENTS];
-#if CONFIG_NEW_MVREF
- vp9_prob mb_mv_ref_probs[MAX_REF_FRAMES][MAX_MV_REF_CANDIDATES-1];
-#endif
-
// Segment features
signed char segment_feature_data[MAX_MB_SEGMENTS][SEG_LVL_MAX];
unsigned int segment_feature_mask[MAX_MB_SEGMENTS];
@@ -754,12 +745,6 @@ static TX_SIZE get_uv_tx_size(const MACROBLOCKD *xd) {
return size;
}
-#if CONFIG_CODE_NONZEROCOUNT
-static int get_nzc_used(TX_SIZE tx_size) {
- return (tx_size >= TX_16X16);
-}
-#endif
-
struct plane_block_idx {
int plane;
int block;
@@ -768,7 +753,7 @@ struct plane_block_idx {
// TODO(jkoleszar): returning a struct so it can be used in a const context,
// expect to refactor this further later.
static INLINE struct plane_block_idx plane_block_idx(int y_blocks,
- int b_idx) {
+ int b_idx) {
const int v_offset = y_blocks * 5 / 4;
struct plane_block_idx res;
@@ -885,31 +870,40 @@ typedef void (*foreach_predicted_block_visitor)(int plane, int block,
static INLINE void foreach_predicted_block_in_plane(
const MACROBLOCKD* const xd, BLOCK_SIZE_TYPE bsize, int plane,
foreach_predicted_block_visitor visit, void *arg) {
- const int bw = b_width_log2(bsize), bh = b_height_log2(bsize);
+ int i, x, y;
+ const MB_PREDICTION_MODE mode = xd->mode_info_context->mbmi.mode;
// block sizes in number of 4x4 blocks log 2 ("*_b")
// 4x4=0, 8x8=2, 16x16=4, 32x32=6, 64x64=8
- const MB_PREDICTION_MODE mode = xd->mode_info_context->mbmi.mode;
- const int block_size_b = bw + bh;
-
// subsampled size of the block
- const int ss_sum = xd->plane[plane].subsampling_x +
- xd->plane[plane].subsampling_y;
- const int ss_block_size = block_size_b - ss_sum;
+ const int bw = b_width_log2(bsize) - xd->plane[plane].subsampling_x;
+ const int bh = b_height_log2(bsize) - xd->plane[plane].subsampling_y;
// size of the predictor to use.
- // TODO(jkoleszar): support I8X8, I4X4
- const int pred_w = bw - xd->plane[plane].subsampling_x;
- const int pred_h = bh - xd->plane[plane].subsampling_y;
- const int pred_b = mode == SPLITMV ? 0 : pred_w + pred_h;
- const int step = 1 << pred_b;
-
- int i;
-
- assert(pred_b <= block_size_b);
- assert(pred_b == ss_block_size);
- for (i = 0; i < (1 << ss_block_size); i += step) {
- visit(plane, i, bsize, pred_w, pred_h, arg);
+ int pred_w, pred_h;
+
+ if (mode == SPLITMV) {
+ // 4x4 or 8x8
+ const int is_4x4 =
+ (xd->mode_info_context->mbmi.partitioning == PARTITIONING_4X4);
+ pred_w = is_4x4 ? 0 : 1 >> xd->plane[plane].subsampling_x;
+ pred_h = is_4x4 ? 0 : 1 >> xd->plane[plane].subsampling_y;
+ } else {
+ pred_w = bw;
+ pred_h = bh;
+ }
+ assert(pred_w <= bw);
+ assert(pred_h <= bh);
+
+ // visit each subblock in raster order
+ i = 0;
+ for (y = 0; y < 1 << bh; y += 1 << pred_h) {
+ for (x = 0; x < 1 << bw; x += 1 << pred_w) {
+ visit(plane, i, bsize, pred_w, pred_h, arg);
+ i += 1 << pred_w;
+ }
+ i -= 1 << bw;
+ i += 1 << (bw + pred_h);
}
}
static INLINE void foreach_predicted_block(
@@ -931,6 +925,9 @@ static INLINE void foreach_predicted_block_uv(
}
}
-
-
+#if CONFIG_CODE_ZEROGROUP
+static int get_zpc_used(TX_SIZE tx_size) {
+ return (tx_size >= TX_16X16);
+}
+#endif
#endif // VP9_COMMON_VP9_BLOCKD_H_
diff --git a/vp9/common/vp9_coefupdateprobs.h b/vp9/common/vp9_coefupdateprobs.h
index b4d892df9..e49935c0c 100644
--- a/vp9/common/vp9_coefupdateprobs.h
+++ b/vp9/common/vp9_coefupdateprobs.h
@@ -18,12 +18,8 @@ static const vp9_prob vp9_coef_update_prob[ENTROPY_NODES] = {
252, 252, 252, 252, 252, 252, 252, 252, 252, 252, 252
};
-#if CONFIG_CODE_NONZEROCOUNT
-#define NZC_UPDATE_PROB_4X4 252
-#define NZC_UPDATE_PROB_8X8 252
-#define NZC_UPDATE_PROB_16X16 252
-#define NZC_UPDATE_PROB_32X32 252
-#define NZC_UPDATE_PROB_PCAT 252
+#if CONFIG_CODE_ZEROGROUP
+#define ZPC_UPDATE_PROB 248
#endif
#if CONFIG_MODELCOEFPROB
diff --git a/vp9/common/vp9_default_coef_probs.h b/vp9/common/vp9_default_coef_probs.h
index 5a781fb0a..453b4a243 100644
--- a/vp9/common/vp9_default_coef_probs.h
+++ b/vp9/common/vp9_default_coef_probs.h
@@ -700,298 +700,85 @@ static const vp9_coeff_probs default_coef_probs_32x32[BLOCK_TYPES] = {
}
};
-#if CONFIG_CODE_NONZEROCOUNT
+#if CONFIG_CODE_ZEROGROUP
-// TODO(debargha): Remove the macro and count tables after experimentation
-#define NZC_DEFAULT_COUNTS /* Uncomment to use counts as defaults */
-
-#ifdef NZC_DEFAULT_COUNTS
-static const unsigned int default_nzc_counts_4x4[MAX_NZC_CONTEXTS]
- [REF_TYPES]
- [BLOCK_TYPES]
- [NZC4X4_TOKENS] = {
- {
- {
- { 967652, 29023, 15039, 6952, 1568, 116 },
- { 289116, 22938, 4522, 1935, 520, 47 }
- }, {
- { 967652, 29023, 15039, 6952, 1568, 116 },
- { 689116, 22938, 4522, 1935, 520, 47 }
- },
- }, {
- {
- { 124684, 37167, 15270, 8483, 1777, 102 },
- { 10405, 12395, 3401, 3574, 2461, 771 }
- }, {
- { 124684, 37167, 15270, 8483, 1777, 102 },
- { 20405, 12395, 3401, 3574, 2461, 771 }
- }
- }, {
- {
- { 4100, 22976, 15627, 16137, 7982, 1793 },
- { 4249, 3084, 2131, 4081, 6439, 1653 }
- }, {
- { 21100, 22976, 15627, 16137, 7982, 1793 },
- { 4249, 3084, 2131, 4081, 2439, 1653 }
- }
- }
-};
-
-static const unsigned int default_nzc_counts_8x8[MAX_NZC_CONTEXTS]
- [REF_TYPES]
- [BLOCK_TYPES]
- [NZC8X8_TOKENS] = {
- {
- {
- { 372988, 62777, 19440, 11812, 5145, 1917, 439, 10 },
- { 72052, 30468, 6973, 3250, 1500, 750, 375, 5 },
- }, {
- { 372988, 62777, 19440, 11812, 5145, 1917, 439, 10 },
- { 192052, 30468, 6973, 3250, 1500, 750, 375, 5 },
- }
- }, {
- {
- { 121533, 33527, 15655, 11920, 5723, 2009, 315, 7 },
- { 23772, 23120, 13127, 8115, 4000, 2000, 200, 6 },
- }, {
- { 121533, 33527, 15655, 11920, 5723, 2009, 315, 7 },
- { 23772, 23120, 13127, 8115, 4000, 2000, 200, 6 },
+// There are two probs: the first is the prob(0) of the isolated zero bit,
+// the second is the prob(0) of the end of orientation symbol [if 0 that
+// indicates a zerotree root].
+static const vp9_zpc_probs default_zpc_probs_4x4 = {
+ { /* Intra */
+ { /* Coeff Band 0 */
+ { 1, }, { 1, }, { 1, },
+ }, { /* Coeff Band 1 */
+ { 1, }, { 1, }, { 1, },
+ }, { /* Coeff Band 2 */
+ { 1, }, { 1, }, { 1, },
}
- }, {
- {
- { 29408, 11758, 8023, 10123, 6705, 2468, 369, 17 },
- { 11612, 13874, 13329, 13022, 6500, 3250, 300, 12 },
- }, {
- { 29408, 11758, 8023, 10123, 6705, 2468, 369, 17 },
- { 11612, 13874, 13329, 13022, 6500, 3250, 300, 12 },
+ }, { /* Inter */
+ { /* Coeff Band 0 */
+ { 1, }, { 1, }, { 1, },
+ }, { /* Coeff Band 1 */
+ { 1, }, { 1, }, { 1, },
+ }, { /* Coeff Band 2 */
+ { 1, }, { 1, }, { 1, },
}
}
};
-
-static const unsigned int default_nzc_counts_16x16[MAX_NZC_CONTEXTS]
- [REF_TYPES]
- [BLOCK_TYPES]
- [NZC16X16_TOKENS] = {
- {
- {
- { 372988, 62777, 19440, 11812, 5145, 1917, 439, 10, 5, 2 },
- { 72052, 30468, 6973, 3250, 1500, 750, 375, 50, 8, 1 },
- }, {
- { 32988, 62777, 19440, 11812, 5145, 1917, 439, 10, 5, 2 },
- { 92052, 30468, 6973, 3250, 1500, 750, 375, 50, 8, 1 },
- }
- }, {
- {
- { 21533, 33527, 15655, 11920, 5723, 2009, 315, 7, 4, 2 },
- { 47772, 23120, 13127, 8115, 4000, 2000, 200, 6, 4, 2 },
- }, {
- { 21533, 33527, 15655, 11920, 5723, 2009, 315, 7, 4, 2 },
- { 27772, 23120, 13127, 8115, 4000, 2000, 200, 6, 4, 2 },
+static const vp9_zpc_probs default_zpc_probs_8x8 = {
+ { /* Intra */
+ { /* ZPC Band 0 */
+ { 4, }, { 2, }, { 1, },
+ }, { /* ZPC Band 1 */
+ { 4, }, { 2, }, { 1, },
+ }, { /* ZPC Band 2 */
+ { 4, }, { 2, }, { 1, },
}
- }, {
- {
- { 19408, 31758, 16023, 10123, 6705, 2468, 369, 17, 10, 5 },
- { 9612, 13874, 13329, 13022, 6500, 3250, 300, 12, 6, 3 },
- }, {
- { 22408, 11758, 8023, 10123, 6705, 2468, 369, 17, 10, 5 },
- { 9612, 13874, 13329, 13022, 6500, 3250, 300, 12, 6, 3 },
+ }, { /* Inter */
+ { /* ZPC Band 0 */
+ { 4, }, { 2, }, { 1, },
+ }, { /* ZPC Band 1 */
+ { 4, }, { 2, }, { 1, },
+ }, { /* ZPC Band 2 */
+ { 4, }, { 2, }, { 1, },
}
}
};
-
-static const unsigned int default_nzc_counts_32x32[MAX_NZC_CONTEXTS]
- [REF_TYPES]
- [BLOCK_TYPES]
- [NZC32X32_TOKENS] = {
- {
- {
- { 72988, 62777, 19440, 11812, 5145, 1917, 439, 10, 5, 2, 1, 0 },
- { 52052, 30468, 6973, 3250, 1500, 750, 375, 50, 8, 1, 0, 0 },
- }, {
- { 72988, 62777, 19440, 11812, 5145, 1917, 439, 10, 5, 2, 1, 0 },
- { 72052, 30468, 6973, 3250, 1500, 750, 375, 50, 8, 1, 0, 0 },
- }
- }, {
- {
- { 21533, 33527, 15655, 11920, 5723, 2009, 315, 7, 4, 2, 1, 0 },
- { 27772, 23120, 13127, 8115, 4000, 2000, 200, 6, 4, 2, 1, 0 },
- }, {
- { 21533, 33527, 15655, 11920, 5723, 2009, 315, 7, 4, 2, 1, 0 },
- { 27772, 23120, 13127, 8115, 4000, 2000, 200, 6, 4, 2, 1, 0 },
- }
- }, {
- {
- { 19408, 11758, 8023, 10123, 6705, 2468, 369, 17, 10, 5, 2, 1 },
- { 9612, 13874, 13329, 13022, 6500, 3250, 300, 12, 6, 3, 2, 1 },
- }, {
- { 29408, 11758, 8023, 10123, 6705, 2468, 369, 17, 10, 5, 2, 1 },
- { 9612, 13874, 13329, 13022, 6500, 3250, 300, 12, 6, 3, 2, 1 },
- }
- }
-};
-
-#else
-
-static const vp9_prob default_nzc_probs_4x4[MAX_NZC_CONTEXTS]
- [REF_TYPES]
- [BLOCK_TYPES]
- [NZC4X4_TOKENS] = {
- {
- {
- { 219, 162, 179, 142, 242, },
- { 214, 253, 228, 246, 255, },
- }, {
- { 225, 236, 190, 229, 253, },
- { 251, 253, 240, 248, 255, },
- },
- }, {
- {
- { 106, 126, 158, 126, 244, },
- { 118, 241, 201, 240, 255, },
- }, {
- { 165, 179, 143, 189, 242, },
- { 173, 239, 192, 255, 128, },
+static const vp9_zpc_probs default_zpc_probs_16x16 = {
+ { /* Intra */
+ { /* ZPC Band 0 */
+ { 57, }, { 30, }, { 13, },
+ }, { /* ZPC Band 1 */
+ { 46, }, { 23, }, { 4, },
+ }, { /* ZPC Band 1 */
+ { 36, }, { 11, }, { 2, },
},
- }, {
- {
- { 42 , 78 , 153, 92 , 223, },
- { 128, 128, 128, 128, 128, },
- }, {
- { 76 , 68 , 126, 110, 216, },
- { 128, 128, 128, 128, 128, },
+ }, { /* Inter */
+ { /* ZPC Band 0 */
+ { 45, }, { 21 }, { 10, },
+ }, { /* ZPC Band 1 */
+ { 24, }, { 14, }, { 3, },
+ }, { /* ZPC Band 2 */
+ { 16, }, { 6, }, { 1, },
},
},
};
-
-static const vp9_prob default_nzc_probs_8x8[MAX_NZC_CONTEXTS]
- [REF_TYPES]
- [BLOCK_TYPES]
- [NZC8X8_TOKENS] = {
- {
- {
- { 134, 139, 170, 178, 142, 197, 255, },
- { 167, 224, 199, 252, 205, 255, 128, },
- }, {
- { 181, 210, 180, 241, 190, 235, 255, },
- { 234, 251, 235, 252, 219, 255, 128, },
- },
- }, {
- {
- { 33 , 64 , 155, 143, 86 , 216, 255, },
- { 73 , 160, 167, 251, 153, 255, 128, },
- }, {
- { 79 , 104, 153, 195, 119, 246, 255, },
- { 149, 183, 186, 249, 203, 255, 128, },
- },
- }, {
- {
- { 10 , 25 , 156, 61 , 69 , 156, 254, },
- { 32 , 1 , 128, 146, 64 , 255, 128, },
- }, {
- { 37 , 48 , 143, 113, 81 , 202, 255, },
- { 1 , 255, 128, 128, 128, 128, 128, },
- },
- },
-};
-
-static const vp9_prob default_nzc_probs_16x16[MAX_NZC_CONTEXTS]
- [REF_TYPES]
- [BLOCK_TYPES]
- [NZC16X16_TOKENS] = {
- {
- {
- { 11 , 188, 210, 167, 141, 143, 152, 255, 128, },
- { 171, 201, 203, 244, 207, 255, 255, 128, 128, },
- }, {
- { 23 , 217, 207, 251, 198, 255, 219, 128, 128, },
- { 235, 249, 229, 255, 199, 128, 128, 128, 128, },
- },
- }, {
- {
- { 9 , 45 , 168, 85 , 66 , 221, 139, 246, 255, },
- { 51 , 110, 163, 238, 94 , 255, 255, 128, 128, },
- }, {
- { 4 , 149, 175, 240, 149, 255, 205, 128, 128, },
- { 141, 217, 186, 255, 128, 128, 128, 128, 128, },
- },
- }, {
- {
- { 1 , 12 , 173, 6 , 68 , 145, 41 , 204, 255, },
- { 39 , 47 , 128, 199, 110, 255, 128, 128, 128, },
- }, {
- { 1 , 121, 171, 149, 115, 242, 159, 255, 128, },
- { 1 , 255, 255, 128, 128, 128, 128, 128, 128, },
+static const vp9_zpc_probs default_zpc_probs_32x32 = {
+ { /* Intra */
+ { /* ZPC Band 0 */
+ { 132, }, { 60, }, { 19, },
+ }, { /* ZPC Band 1 */
+ { 64, }, { 32, }, { 8, },
+ }, { /* ZPC Band 2 */
+ { 25, }, { 11, }, { 1, },
},
- },
-};
-
-static const vp9_prob default_nzc_probs_32x32[MAX_NZC_CONTEXTS]
- [REF_TYPES]
- [BLOCK_TYPES]
- [NZC32X32_TOKENS] = {
- {
- {
- { 11 , 216, 195, 201, 160, 247, 217, 255, 255, 128, 128, },
- { 177, 240, 239, 255, 192, 128, 128, 128, 128, 128, 128, },
- }, {
- { 48 , 235, 213, 235, 199, 255, 255, 128, 128, 128, 128, },
- { 205, 255, 248, 128, 128, 128, 128, 128, 128, 128, 128, },
- },
- }, {
- {
- { 6 , 96 , 138, 99 , 125, 248, 188, 255, 128, 128, 128, },
- { 17 , 53 , 43 , 189, 1 , 255, 171, 128, 128, 128, 128, },
- }, {
- { 5 , 187, 235, 232, 117, 255, 219, 128, 128, 128, 128, },
- { 146, 255, 255, 128, 128, 128, 128, 128, 128, 128, 128, },
- },
- }, {
- {
- { 1 , 7 , 93 , 14 , 100, 30 , 85 , 65 , 81 , 210, 255, },
- { 1 , 1 , 128, 26 , 1 , 218, 78 , 255, 255, 128, 128, },
- }, {
- { 4 , 148, 206, 137, 160, 255, 255, 128, 128, 128, 128, },
- { 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, },
+ }, { /* Inter */
+ { /* ZPC Band 0 */
+ { 134, }, { 39, }, { 25, },
+ }, { /* ZPC Band 1 */
+ { 64, }, { 24, }, { 12, },
+ }, { /* ZPC Band 2 */
+ { 21, }, { 10, }, { 1, },
},
},
};
-#endif
-
-static const vp9_prob default_nzc_pcat_probs[MAX_NZC_CONTEXTS]
- [NZC_TOKENS_EXTRA]
- [NZC_BITS_EXTRA] = {
- // Bit probabilities are in least to most significance order
- {
- {176, 128, 128, 128, 128, 128, 128, 128, 128}, // 3 - 4
- {164, 192, 128, 128, 128, 128, 128, 128, 128}, // 5 - 8
- {154, 184, 208, 128, 128, 128, 128, 128, 128}, // 9 - 16
- {144, 176, 200, 216, 128, 128, 128, 128, 128}, // 17 - 32
- {140, 172, 192, 208, 224, 128, 128, 128, 128}, // 33 - 64
- {136, 168, 188, 200, 220, 232, 128, 128, 128}, // 65 - 128
- {132, 164, 184, 196, 216, 228, 240, 128, 128}, // 129 - 256
- {130, 162, 178, 194, 212, 226, 240, 248, 128}, // 257 - 512
- {128, 160, 176, 192, 208, 224, 240, 248, 254}, // 513 - 1024
- }, {
- {168, 128, 128, 128, 128, 128, 128, 128, 128}, // 3 - 4
- {152, 184, 128, 128, 128, 128, 128, 128, 128}, // 5 - 8
- {152, 184, 208, 128, 128, 128, 128, 128, 128}, // 9 - 16
- {144, 176, 200, 216, 128, 128, 128, 128, 128}, // 17 - 32
- {140, 172, 192, 208, 224, 128, 128, 128, 128}, // 33 - 64
- {136, 168, 188, 200, 220, 232, 128, 128, 128}, // 65 - 128
- {132, 164, 184, 196, 216, 228, 240, 128, 128}, // 129 - 256
- {130, 162, 178, 194, 212, 226, 240, 248, 128}, // 257 - 512
- {128, 160, 176, 192, 208, 224, 240, 248, 254}, // 513 - 1024
- }, {
- {160, 128, 128, 128, 128, 128, 128, 128, 128}, // 3 - 4
- {152, 176, 128, 128, 128, 128, 128, 128, 128}, // 5 - 8
- {150, 184, 208, 128, 128, 128, 128, 128, 128}, // 9 - 16
- {144, 176, 200, 216, 128, 128, 128, 128, 128}, // 17 - 32
- {140, 172, 192, 208, 224, 128, 128, 128, 128}, // 33 - 64
- {136, 168, 188, 200, 220, 232, 128, 128, 128}, // 65 - 128
- {132, 164, 184, 196, 216, 228, 240, 128, 128}, // 129 - 256
- {130, 162, 178, 194, 212, 226, 240, 248, 128}, // 257 - 512
- {128, 160, 176, 192, 208, 224, 240, 248, 254}, // 513 - 1024
- },
-};
-
-#endif // CONFIG_CODE_NONZEROCOUNT
+#endif // CONFIG_CODE_ZEROGROUP
diff --git a/vp9/common/vp9_entropy.c b/vp9/common/vp9_entropy.c
index 5e6cba2ed..a5437d889 100644
--- a/vp9/common/vp9_entropy.c
+++ b/vp9/common/vp9_entropy.c
@@ -400,65 +400,6 @@ static const vp9_prob Pcat6[] = {
254, 254, 254, 252, 249, 243, 230, 196, 177, 153, 140, 133, 130, 129
};
-#if CONFIG_CODE_NONZEROCOUNT
-const vp9_tree_index vp9_nzc4x4_tree[2 * NZC4X4_NODES] = {
- -NZC_0, 2,
- 4, 6,
- -NZC_1, -NZC_2,
- -NZC_3TO4, 8,
- -NZC_5TO8, -NZC_9TO16,
-};
-struct vp9_token vp9_nzc4x4_encodings[NZC4X4_TOKENS];
-
-const vp9_tree_index vp9_nzc8x8_tree[2 * NZC8X8_NODES] = {
- -NZC_0, 2,
- 4, 6,
- -NZC_1, -NZC_2,
- 8, 10,
- -NZC_3TO4, -NZC_5TO8,
- -NZC_9TO16, 12,
- -NZC_17TO32, -NZC_33TO64,
-};
-struct vp9_token vp9_nzc8x8_encodings[NZC8X8_TOKENS];
-
-const vp9_tree_index vp9_nzc16x16_tree[2 * NZC16X16_NODES] = {
- -NZC_0, 2,
- 4, 6,
- -NZC_1, -NZC_2,
- 8, 10,
- -NZC_3TO4, -NZC_5TO8,
- 12, 14,
- -NZC_9TO16, -NZC_17TO32,
- -NZC_33TO64, 16,
- -NZC_65TO128, -NZC_129TO256,
-};
-struct vp9_token vp9_nzc16x16_encodings[NZC16X16_TOKENS];
-
-const vp9_tree_index vp9_nzc32x32_tree[2 * NZC32X32_NODES] = {
- -NZC_0, 2,
- 4, 6,
- -NZC_1, -NZC_2,
- 8, 10,
- -NZC_3TO4, -NZC_5TO8,
- 12, 14,
- -NZC_9TO16, -NZC_17TO32,
- 16, 18,
- -NZC_33TO64, -NZC_65TO128,
- -NZC_129TO256, 20,
- -NZC_257TO512, -NZC_513TO1024,
-};
-struct vp9_token vp9_nzc32x32_encodings[NZC32X32_TOKENS];
-
-const int vp9_extranzcbits[NZC32X32_TOKENS] = {
- 0, 0, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9
-};
-
-const int vp9_basenzcvalue[NZC32X32_TOKENS] = {
- 0, 1, 2, 3, 5, 9, 17, 33, 65, 129, 257, 513
-};
-
-#endif // CONFIG_CODE_NONZEROCOUNT
-
#if CONFIG_MODELCOEFPROB
#if UNCONSTRAINED_NODES == 2
@@ -1310,7 +1251,7 @@ static void init_bit_trees() {
init_bit_tree(cat6, 14);
}
-vp9_extra_bit_struct vp9_extra_bits[12] = {
+vp9_extra_bit vp9_extra_bits[12] = {
{ 0, 0, 0, 0},
{ 0, 0, 0, 1},
{ 0, 0, 0, 2},
@@ -1344,10 +1285,10 @@ int vp9_get_coef_context(const int *scan, const int *neighbors,
int ctx;
assert(neighbors[MAX_NEIGHBORS * c + 0] >= 0);
if (neighbors[MAX_NEIGHBORS * c + 1] >= 0) {
- ctx = (1 + token_cache[neighbors[MAX_NEIGHBORS * c + 0]] +
- token_cache[neighbors[MAX_NEIGHBORS * c + 1]]) >> 1;
+ ctx = (1 + token_cache[scan[neighbors[MAX_NEIGHBORS * c + 0]]] +
+ token_cache[scan[neighbors[MAX_NEIGHBORS * c + 1]]]) >> 1;
} else {
- ctx = token_cache[neighbors[MAX_NEIGHBORS * c + 0]];
+ ctx = token_cache[scan[neighbors[MAX_NEIGHBORS * c + 0]]];
}
return vp9_pt_energy_class[ctx];
}
@@ -1357,55 +1298,6 @@ void vp9_default_coef_probs(VP9_COMMON *pc) {
#if CONFIG_MODELCOEFPROB
int b, r, c, p;
#endif
-#if CONFIG_CODE_NONZEROCOUNT
-#ifdef NZC_DEFAULT_COUNTS
- int h, g;
- for (h = 0; h < MAX_NZC_CONTEXTS; ++h) {
- for (g = 0; g < REF_TYPES; ++g) {
- int i;
- unsigned int branch_ct4x4[NZC4X4_NODES][2];
- unsigned int branch_ct8x8[NZC8X8_NODES][2];
- unsigned int branch_ct16x16[NZC16X16_NODES][2];
- unsigned int branch_ct32x32[NZC32X32_NODES][2];
- for (i = 0; i < BLOCK_TYPES; ++i) {
- vp9_tree_probs_from_distribution(
- vp9_nzc4x4_tree,
- pc->fc.nzc_probs_4x4[h][g][i], branch_ct4x4,
- default_nzc_counts_4x4[h][g][i], 0);
- }
- for (i = 0; i < BLOCK_TYPES; ++i) {
- vp9_tree_probs_from_distribution(
- vp9_nzc8x8_tree,
- pc->fc.nzc_probs_8x8[h][g][i], branch_ct8x8,
- default_nzc_counts_8x8[h][g][i], 0);
- }
- for (i = 0; i < BLOCK_TYPES; ++i) {
- vp9_tree_probs_from_distribution(
- vp9_nzc16x16_tree,
- pc->fc.nzc_probs_16x16[h][g][i], branch_ct16x16,
- default_nzc_counts_16x16[h][g][i], 0);
- }
- for (i = 0; i < BLOCK_TYPES; ++i) {
- vp9_tree_probs_from_distribution(
- vp9_nzc32x32_tree,
- pc->fc.nzc_probs_32x32[h][g][i], branch_ct32x32,
- default_nzc_counts_32x32[h][g][i], 0);
- }
- }
- }
-#else
- vpx_memcpy(pc->fc.nzc_probs_4x4, default_nzc_probs_4x4,
- sizeof(pc->fc.nzc_probs_4x4));
- vpx_memcpy(pc->fc.nzc_probs_8x8, default_nzc_probs_8x8,
- sizeof(pc->fc.nzc_probs_8x8));
- vpx_memcpy(pc->fc.nzc_probs_16x16, default_nzc_probs_16x16,
- sizeof(pc->fc.nzc_probs_16x16));
- vpx_memcpy(pc->fc.nzc_probs_32x32, default_nzc_probs_32x32,
- sizeof(pc->fc.nzc_probs_32x32));
-#endif
- vpx_memcpy(pc->fc.nzc_pcat_probs, default_nzc_pcat_probs,
- sizeof(pc->fc.nzc_pcat_probs));
-#endif // CONFIG_CODE_NONZEROCOUNT
#if CONFIG_MODELCOEFPROB
for (b = 0; b < BLOCK_TYPES; ++b)
for (r = 0; r < REF_TYPES; ++r)
@@ -1447,6 +1339,16 @@ void vp9_default_coef_probs(VP9_COMMON *pc) {
vpx_memcpy(pc->fc.coef_probs_32x32, default_coef_probs_32x32,
sizeof(pc->fc.coef_probs_32x32));
#endif
+#if CONFIG_CODE_ZEROGROUP
+ vpx_memcpy(pc->fc.zpc_probs_4x4, default_zpc_probs_4x4,
+ sizeof(pc->fc.zpc_probs_4x4));
+ vpx_memcpy(pc->fc.zpc_probs_8x8, default_zpc_probs_8x8,
+ sizeof(pc->fc.zpc_probs_8x8));
+ vpx_memcpy(pc->fc.zpc_probs_16x16, default_zpc_probs_16x16,
+ sizeof(pc->fc.zpc_probs_16x16));
+ vpx_memcpy(pc->fc.zpc_probs_32x32, default_zpc_probs_32x32,
+ sizeof(pc->fc.zpc_probs_32x32));
+#endif
}
// Neighborhood 5-tuples for various scans and blocksizes,
@@ -1597,1099 +1499,8 @@ void vp9_coef_tree_initialize() {
vp9_init_neighbors();
init_bit_trees();
vp9_tokens_from_tree(vp9_coef_encodings, vp9_coef_tree);
-#if CONFIG_CODE_NONZEROCOUNT
- vp9_tokens_from_tree(vp9_nzc4x4_encodings, vp9_nzc4x4_tree);
- vp9_tokens_from_tree(vp9_nzc8x8_encodings, vp9_nzc8x8_tree);
- vp9_tokens_from_tree(vp9_nzc16x16_encodings, vp9_nzc16x16_tree);
- vp9_tokens_from_tree(vp9_nzc32x32_encodings, vp9_nzc32x32_tree);
-#endif
-}
-
-#if CONFIG_CODE_NONZEROCOUNT
-
-#define mb_in_cur_tile(cm, mb_row, mb_col) \
- ((mb_col) >= (cm)->cur_tile_mb_col_start && \
- (mb_col) <= (cm)->cur_tile_mb_col_end && \
- (mb_row) >= 0)
-
-#define choose_nzc_context(nzc_exp, t2, t1) \
- ((nzc_exp) >= (t2) ? 2 : (nzc_exp) >= (t1) ? 1 : 0)
-
-#define NZC_T2_32X32 (16 << 6)
-#define NZC_T1_32X32 (4 << 6)
-
-#define NZC_T2_16X16 (12 << 6)
-#define NZC_T1_16X16 (3 << 6)
-
-#define NZC_T2_8X8 (8 << 6)
-#define NZC_T1_8X8 (2 << 6)
-
-#define NZC_T2_4X4 (4 << 6)
-#define NZC_T1_4X4 (1 << 6)
-
-// Transforms a mb16 block index to a sb64 block index
-static inline int mb16_to_sb64_index(int mb_row, int mb_col, int block) {
- int r = (mb_row & 3);
- int c = (mb_col & 3);
- int b;
- if (block < 16) { // Y
- int ib = block >> 2;
- int jb = block & 3;
- ib += r * 4;
- jb += c * 4;
- b = ib * 16 + jb;
- assert(b < 256);
- return b;
- } else { // UV
- int base = block - (block & 3);
- int ib = (block - base) >> 1;
- int jb = (block - base) & 1;
- ib += r * 2;
- jb += c * 2;
- b = base * 16 + ib * 8 + jb;
- assert(b >= 256 && b < 384);
- return b;
- }
-}
-
-// Transforms a mb16 block index to a sb32 block index
-static inline int mb16_to_sb32_index(int mb_row, int mb_col, int block) {
- int r = (mb_row & 1);
- int c = (mb_col & 1);
- int b;
- if (block < 16) { // Y
- int ib = block >> 2;
- int jb = block & 3;
- ib += r * 4;
- jb += c * 4;
- b = ib * 8 + jb;
- assert(b < 64);
- return b;
- } else { // UV
- int base = block - (block & 3);
- int ib = (block - base) >> 1;
- int jb = (block - base) & 1;
- ib += r * 2;
- jb += c * 2;
- b = base * 4 + ib * 4 + jb;
- assert(b >= 64 && b < 96);
- return b;
- }
-}
-
-static inline int block_to_txfm_index(int block, TX_SIZE tx_size, int s) {
- // s is the log of the number of 4x4 blocks in each row/col of larger block
- int b, ib, jb, nb;
- ib = block >> s;
- jb = block - (ib << s);
- ib >>= tx_size;
- jb >>= tx_size;
- nb = 1 << (s - tx_size);
- b = (ib * nb + jb) << (2 * tx_size);
- return b;
-}
-
-/* BEGIN - Helper functions to get the y nzcs */
-static unsigned int get_nzc_4x4_y_sb64(MB_MODE_INFO *mi, int block) {
- int b;
- assert(block < 256);
- b = block_to_txfm_index(block, mi->txfm_size, 4);
- assert(b < 256);
- return mi->nzcs[b] << (6 - 2 * mi->txfm_size);
-}
-
-static unsigned int get_nzc_4x4_y_sb32(MB_MODE_INFO *mi, int block) {
- int b;
- assert(block < 64);
- b = block_to_txfm_index(block, mi->txfm_size, 3);
- assert(b < 64);
- return mi->nzcs[b] << (6 - 2 * mi->txfm_size);
-}
-
-static unsigned int get_nzc_4x4_y_mb16(MB_MODE_INFO *mi, int block) {
- int b;
- assert(block < 16);
- b = block_to_txfm_index(block, mi->txfm_size, 2);
- assert(b < 16);
- return mi->nzcs[b] << (6 - 2 * mi->txfm_size);
-}
-/* END - Helper functions to get the y nzcs */
-
-/* Function to get y nzc where block index is in mb16 terms */
-static unsigned int get_nzc_4x4_y(VP9_COMMON *cm, MODE_INFO *m,
- int mb_row, int mb_col, int block) {
- // NOTE: All values returned are at 64 times the true value at 4x4 scale
- MB_MODE_INFO *const mi = &m->mbmi;
- const int mis = cm->mode_info_stride;
- if (mi->mb_skip_coeff || !mb_in_cur_tile(cm, mb_row, mb_col))
- return 0;
- if (mi->sb_type == BLOCK_SIZE_SB64X64) {
- int r = mb_row & 3;
- int c = mb_col & 3;
- m -= c + r * mis;
- if (m->mbmi.mb_skip_coeff || !mb_in_cur_tile(cm, mb_row - r, mb_col - c))
- return 0;
- else
- return get_nzc_4x4_y_sb64(
- &m->mbmi, mb16_to_sb64_index(mb_row, mb_col, block));
- } else if (mi->sb_type == BLOCK_SIZE_SB32X32) {
- int r = mb_row & 1;
- int c = mb_col & 1;
- m -= c + r * mis;
- if (m->mbmi.mb_skip_coeff || !mb_in_cur_tile(cm, mb_row - r, mb_col - c))
- return 0;
- else
- return get_nzc_4x4_y_sb32(
- &m->mbmi, mb16_to_sb32_index(mb_row, mb_col, block));
- } else {
- if (m->mbmi.mb_skip_coeff || !mb_in_cur_tile(cm, mb_row, mb_col))
- return 0;
- return get_nzc_4x4_y_mb16(mi, block);
- }
-}
-
-/* BEGIN - Helper functions to get the uv nzcs */
-static unsigned int get_nzc_4x4_uv_sb64(MB_MODE_INFO *mi, int block) {
- int b;
- int base, uvtxfm_size;
- assert(block >= 256 && block < 384);
- uvtxfm_size = mi->txfm_size;
- base = 256 + (block & 64);
- block -= base;
- b = base + block_to_txfm_index(block, uvtxfm_size, 3);
- assert(b >= 256 && b < 384);
- return mi->nzcs[b] << (6 - 2 * uvtxfm_size);
-}
-
-static unsigned int get_nzc_4x4_uv_sb32(MB_MODE_INFO *mi, int block) {
- int b;
- int base, uvtxfm_size;
- assert(block >= 64 && block < 96);
- if (mi->txfm_size == TX_32X32)
- uvtxfm_size = TX_16X16;
- else
- uvtxfm_size = mi->txfm_size;
- base = 64 + (block & 16);
- block -= base;
- b = base + block_to_txfm_index(block, uvtxfm_size, 2);
- assert(b >= 64 && b < 96);
- return mi->nzcs[b] << (6 - 2 * uvtxfm_size);
-}
-
-static unsigned int get_nzc_4x4_uv_mb16(MB_MODE_INFO *mi, int block) {
- int b;
- int base, uvtxfm_size;
- assert(block >= 16 && block < 24);
- if (mi->txfm_size == TX_8X8 &&
- (mi->mode == SPLITMV || mi->mode == I8X8_PRED))
- uvtxfm_size = TX_4X4;
- else if (mi->txfm_size == TX_16X16)
- uvtxfm_size = TX_8X8;
- else
- uvtxfm_size = mi->txfm_size;
- base = 16 + (block & 4);
- block -= base;
- b = base + block_to_txfm_index(block, uvtxfm_size, 1);
- assert(b >= 16 && b < 24);
- return mi->nzcs[b] << (6 - 2 * uvtxfm_size);
-}
-/* END - Helper functions to get the uv nzcs */
-
-/* Function to get uv nzc where block index is in mb16 terms */
-static unsigned int get_nzc_4x4_uv(VP9_COMMON *cm, MODE_INFO *m,
- int mb_row, int mb_col, int block) {
- // NOTE: All values returned are at 64 times the true value at 4x4 scale
- MB_MODE_INFO *const mi = &m->mbmi;
- const int mis = cm->mode_info_stride;
- if (mi->mb_skip_coeff || !mb_in_cur_tile(cm, mb_row, mb_col))
- return 0;
- if (mi->sb_type == BLOCK_SIZE_SB64X64) {
- int r = mb_row & 3;
- int c = mb_col & 3;
- m -= c + r * mis;
- if (m->mbmi.mb_skip_coeff || !mb_in_cur_tile(cm, mb_row - r, mb_col - c))
- return 0;
- else
- return get_nzc_4x4_uv_sb64(
- &m->mbmi, mb16_to_sb64_index(mb_row, mb_col, block));
- } else if (mi->sb_type == BLOCK_SIZE_SB32X32) {
- int r = mb_row & 1;
- int c = mb_col & 1;
- m -= c + r * mis;
- if (m->mbmi.mb_skip_coeff || !mb_in_cur_tile(cm, mb_row - r, mb_col - c))
- return 0;
- else
- return get_nzc_4x4_uv_sb32(
- &m->mbmi, mb16_to_sb32_index(mb_row, mb_col, block));
- } else {
- return get_nzc_4x4_uv_mb16(mi, block);
- }
-}
-
-int vp9_get_nzc_context_y_sb64(VP9_COMMON *cm, MODE_INFO *cur,
- int mb_row, int mb_col, int block) {
- // returns an index in [0, MAX_NZC_CONTEXTS - 1] to reflect how busy
- // neighboring blocks are
- int mis = cm->mode_info_stride;
- int nzc_exp = 0;
- TX_SIZE txfm_size = cur->mbmi.txfm_size;
- assert(block < 256);
- switch (txfm_size) {
- case TX_32X32:
- assert((block & 63) == 0);
- if (block < 128) {
- int o = (block >> 6) * 2;
- nzc_exp =
- get_nzc_4x4_y(cm, cur - mis + o, mb_row - 1, mb_col + o, 12) +
- get_nzc_4x4_y(cm, cur - mis + o, mb_row - 1, mb_col + o, 13) +
- get_nzc_4x4_y(cm, cur - mis + o, mb_row - 1, mb_col + o, 14) +
- get_nzc_4x4_y(cm, cur - mis + o, mb_row - 1, mb_col + o, 15) +
- get_nzc_4x4_y(cm, cur - mis + o + 1,
- mb_row - 1, mb_col + o + 1, 12) +
- get_nzc_4x4_y(cm, cur - mis + o + 1,
- mb_row - 1, mb_col + o + 1, 13) +
- get_nzc_4x4_y(cm, cur - mis + o + 1,
- mb_row - 1, mb_col + o + 1, 14) +
- get_nzc_4x4_y(cm, cur - mis + o + 1,
- mb_row - 1, mb_col + o + 1, 15);
- } else {
- nzc_exp = cur->mbmi.nzcs[block - 128] << 3;
- }
- if ((block & 127) == 0) {
- int o = (block >> 7) * 2;
- nzc_exp +=
- get_nzc_4x4_y(cm, cur - 1 + o * mis, mb_row + o, mb_col - 1, 3) +
- get_nzc_4x4_y(cm, cur - 1 + o * mis, mb_row + o, mb_col - 1, 7) +
- get_nzc_4x4_y(cm, cur - 1 + o * mis, mb_row + o, mb_col - 1, 11) +
- get_nzc_4x4_y(cm, cur - 1 + o * mis, mb_row + o, mb_col - 1, 15) +
- get_nzc_4x4_y(cm, cur - 1 + o * mis + mis,
- mb_row + o + 1, mb_col - 1, 3) +
- get_nzc_4x4_y(cm, cur - 1 + o * mis + mis,
- mb_row + o + 1, mb_col - 1, 7) +
- get_nzc_4x4_y(cm, cur - 1 + o * mis + mis,
- mb_row + o + 1, mb_col - 1, 11) +
- get_nzc_4x4_y(cm, cur - 1 + o * mis + mis,
- mb_row + o + 1, mb_col - 1, 15);
- } else {
- nzc_exp += cur->mbmi.nzcs[block - 64] << 3;
- }
- nzc_exp <<= 2;
- // Note nzc_exp is 64 times the average value expected at 32x32 scale
- return choose_nzc_context(nzc_exp, NZC_T2_32X32, NZC_T1_32X32);
- break;
-
- case TX_16X16:
- assert((block & 15) == 0);
- if (block < 64) {
- int o = block >> 4;
- nzc_exp =
- get_nzc_4x4_y(cm, cur - mis + o, mb_row - 1, mb_col + o, 12) +
- get_nzc_4x4_y(cm, cur - mis + o, mb_row - 1, mb_col + o, 13) +
- get_nzc_4x4_y(cm, cur - mis + o, mb_row - 1, mb_col + o, 14) +
- get_nzc_4x4_y(cm, cur - mis + o, mb_row - 1, mb_col + o, 15);
- } else {
- nzc_exp = cur->mbmi.nzcs[block - 64] << 4;
- }
- if ((block & 63) == 0) {
- int o = block >> 6;
- nzc_exp +=
- get_nzc_4x4_y(cm, cur - 1 + o * mis, mb_row + o, mb_col - 1, 3) +
- get_nzc_4x4_y(cm, cur - 1 + o * mis, mb_row + o, mb_col - 1, 7) +
- get_nzc_4x4_y(cm, cur - 1 + o * mis, mb_row + o, mb_col - 1, 11) +
- get_nzc_4x4_y(cm, cur - 1 + o * mis, mb_row + o, mb_col - 1, 15);
- } else {
- nzc_exp += cur->mbmi.nzcs[block - 16] << 4;
- }
- nzc_exp <<= 1;
- // Note nzc_exp is 64 times the average value expected at 16x16 scale
- return choose_nzc_context(nzc_exp, NZC_T2_16X16, NZC_T1_16X16);
- break;
-
- case TX_8X8:
- assert((block & 3) == 0);
- if (block < 32) {
- int o = block >> 3;
- int p = ((block >> 2) & 1) ? 14 : 12;
- nzc_exp =
- get_nzc_4x4_y(cm, cur - mis + o, mb_row - 1, mb_col + o, p) +
- get_nzc_4x4_y(cm, cur - mis + o, mb_row - 1, mb_col + o, p + 1);
- } else {
- nzc_exp = cur->mbmi.nzcs[block - 32] << 5;
- }
- if ((block & 31) == 0) {
- int o = block >> 6;
- int p = ((block >> 5) & 1) ? 11 : 3;
- nzc_exp +=
- get_nzc_4x4_y(cm, cur - 1 + o * mis, mb_row + o, mb_col - 1, p) +
- get_nzc_4x4_y(cm, cur - 1 + o * mis, mb_row + o, mb_col - 1, p + 4);
- } else {
- nzc_exp += cur->mbmi.nzcs[block - 4] << 5;
- }
- // Note nzc_exp is 64 times the average value expected at 8x8 scale
- return choose_nzc_context(nzc_exp, NZC_T2_8X8, NZC_T1_8X8);
- break;
-
- case TX_4X4:
- if (block < 16) {
- int o = block >> 2;
- int p = block & 3;
- nzc_exp = get_nzc_4x4_y(cm, cur - mis + o, mb_row - 1, mb_col + o,
- 12 + p);
- } else {
- nzc_exp = (cur->mbmi.nzcs[block - 16] << 6);
- }
- if ((block & 15) == 0) {
- int o = block >> 6;
- int p = (block >> 4) & 3;
- nzc_exp += get_nzc_4x4_y(cm, cur - 1 + o * mis, mb_row + o, mb_col - 1,
- 3 + 4 * p);
- } else {
- nzc_exp += (cur->mbmi.nzcs[block - 1] << 6);
- }
- nzc_exp >>= 1;
- // Note nzc_exp is 64 times the average value expected at 4x4 scale
- return choose_nzc_context(nzc_exp, NZC_T2_4X4, NZC_T1_4X4);
- break;
-
- default:
- return 0;
- }
-}
-
-int vp9_get_nzc_context_y_sb32(VP9_COMMON *cm, MODE_INFO *cur,
- int mb_row, int mb_col, int block) {
- // returns an index in [0, MAX_NZC_CONTEXTS - 1] to reflect how busy
- // neighboring blocks are
- int mis = cm->mode_info_stride;
- int nzc_exp = 0;
- TX_SIZE txfm_size = cur->mbmi.txfm_size;
- assert(block < 64);
- switch (txfm_size) {
- case TX_32X32:
- assert(block == 0);
- nzc_exp =
- (get_nzc_4x4_y(cm, cur - mis, mb_row - 1, mb_col, 12) +
- get_nzc_4x4_y(cm, cur - mis, mb_row - 1, mb_col, 13) +
- get_nzc_4x4_y(cm, cur - mis, mb_row - 1, mb_col, 14) +
- get_nzc_4x4_y(cm, cur - mis, mb_row - 1, mb_col, 15) +
- get_nzc_4x4_y(cm, cur - mis + 1, mb_row - 1, mb_col + 1, 12) +
- get_nzc_4x4_y(cm, cur - mis + 1, mb_row - 1, mb_col + 1, 13) +
- get_nzc_4x4_y(cm, cur - mis + 1, mb_row - 1, mb_col + 1, 14) +
- get_nzc_4x4_y(cm, cur - mis + 1, mb_row - 1, mb_col + 1, 15) +
- get_nzc_4x4_y(cm, cur - 1, mb_row, mb_col - 1, 3) +
- get_nzc_4x4_y(cm, cur - 1, mb_row, mb_col - 1, 7) +
- get_nzc_4x4_y(cm, cur - 1, mb_row, mb_col - 1, 11) +
- get_nzc_4x4_y(cm, cur - 1, mb_row, mb_col - 1, 15) +
- get_nzc_4x4_y(cm, cur - 1 + mis, mb_row + 1, mb_col - 1, 3) +
- get_nzc_4x4_y(cm, cur - 1 + mis, mb_row + 1, mb_col - 1, 7) +
- get_nzc_4x4_y(cm, cur - 1 + mis, mb_row + 1, mb_col - 1, 11) +
- get_nzc_4x4_y(cm, cur - 1 + mis, mb_row + 1, mb_col - 1, 15)) << 2;
- // Note nzc_exp is 64 times the average value expected at 32x32 scale
- return choose_nzc_context(nzc_exp, NZC_T2_32X32, NZC_T1_32X32);
- break;
-
- case TX_16X16:
- assert((block & 15) == 0);
- if (block < 32) {
- int o = (block >> 4) & 1;
- nzc_exp =
- get_nzc_4x4_y(cm, cur - mis + o, mb_row - 1, mb_col + o, 12) +
- get_nzc_4x4_y(cm, cur - mis + o, mb_row - 1, mb_col + o, 13) +
- get_nzc_4x4_y(cm, cur - mis + o, mb_row - 1, mb_col + o, 14) +
- get_nzc_4x4_y(cm, cur - mis + o, mb_row - 1, mb_col + o, 15);
- } else {
- nzc_exp = cur->mbmi.nzcs[block - 32] << 4;
- }
- if ((block & 31) == 0) {
- int o = block >> 5;
- nzc_exp +=
- get_nzc_4x4_y(cm, cur - 1 + o * mis, mb_row + o, mb_col - 1, 3) +
- get_nzc_4x4_y(cm, cur - 1 + o * mis, mb_row + o, mb_col - 1, 7) +
- get_nzc_4x4_y(cm, cur - 1 + o * mis, mb_row + o, mb_col - 1, 11) +
- get_nzc_4x4_y(cm, cur - 1 + o * mis, mb_row + o, mb_col - 1, 15);
- } else {
- nzc_exp += cur->mbmi.nzcs[block - 16] << 4;
- }
- nzc_exp <<= 1;
- // Note nzc_exp is 64 times the average value expected at 16x16 scale
- return choose_nzc_context(nzc_exp, NZC_T2_16X16, NZC_T1_16X16);
- break;
-
- case TX_8X8:
- assert((block & 3) == 0);
- if (block < 16) {
- int o = block >> 3;
- int p = ((block >> 2) & 1) ? 14 : 12;
- nzc_exp =
- get_nzc_4x4_y(cm, cur - mis + o, mb_row - 1, mb_col + o, p) +
- get_nzc_4x4_y(cm, cur - mis + o, mb_row - 1, mb_col + o, p + 1);
- } else {
- nzc_exp = cur->mbmi.nzcs[block - 16] << 5;
- }
- if ((block & 15) == 0) {
- int o = block >> 5;
- int p = ((block >> 4) & 1) ? 11 : 3;
- nzc_exp +=
- get_nzc_4x4_y(cm, cur - 1 + o * mis, mb_row + o, mb_col - 1, p) +
- get_nzc_4x4_y(cm, cur - 1 + o * mis, mb_row + o, mb_col - 1, p + 4);
- } else {
- nzc_exp += cur->mbmi.nzcs[block - 4] << 5;
- }
- // Note nzc_exp is 64 times the average value expected at 8x8 scale
- return choose_nzc_context(nzc_exp, NZC_T2_8X8, NZC_T1_8X8);
- break;
-
- case TX_4X4:
- if (block < 8) {
- int o = block >> 2;
- int p = block & 3;
- nzc_exp = get_nzc_4x4_y(cm, cur - mis + o, mb_row - 1, mb_col + o,
- 12 + p);
- } else {
- nzc_exp = (cur->mbmi.nzcs[block - 8] << 6);
- }
- if ((block & 7) == 0) {
- int o = block >> 5;
- int p = (block >> 3) & 3;
- nzc_exp += get_nzc_4x4_y(cm, cur - 1 + o * mis, mb_row + o, mb_col - 1,
- 3 + 4 * p);
- } else {
- nzc_exp += (cur->mbmi.nzcs[block - 1] << 6);
- }
- nzc_exp >>= 1;
- // Note nzc_exp is 64 times the average value expected at 4x4 scale
- return choose_nzc_context(nzc_exp, NZC_T2_4X4, NZC_T1_4X4);
- break;
-
- default:
- return 0;
- break;
- }
-}
-
-int vp9_get_nzc_context_y_mb16(VP9_COMMON *cm, MODE_INFO *cur,
- int mb_row, int mb_col, int block) {
- // returns an index in [0, MAX_NZC_CONTEXTS - 1] to reflect how busy
- // neighboring blocks are
- int mis = cm->mode_info_stride;
- int nzc_exp = 0;
- TX_SIZE txfm_size = cur->mbmi.txfm_size;
- assert(block < 16);
- switch (txfm_size) {
- case TX_16X16:
- assert(block == 0);
- nzc_exp =
- get_nzc_4x4_y(cm, cur - mis, mb_row - 1, mb_col, 12) +
- get_nzc_4x4_y(cm, cur - mis, mb_row - 1, mb_col, 13) +
- get_nzc_4x4_y(cm, cur - mis, mb_row - 1, mb_col, 14) +
- get_nzc_4x4_y(cm, cur - mis, mb_row - 1, mb_col, 15) +
- get_nzc_4x4_y(cm, cur - 1, mb_row, mb_col - 1, 3) +
- get_nzc_4x4_y(cm, cur - 1, mb_row, mb_col - 1, 7) +
- get_nzc_4x4_y(cm, cur - 1, mb_row, mb_col - 1, 11) +
- get_nzc_4x4_y(cm, cur - 1, mb_row, mb_col - 1, 15);
- nzc_exp <<= 1;
- // Note nzc_exp is 64 times the average value expected at 16x16 scale
- return choose_nzc_context(nzc_exp, NZC_T2_16X16, NZC_T1_16X16);
-
- case TX_8X8:
- assert((block & 3) == 0);
- if (block < 8) {
- int p = ((block >> 2) & 1) ? 14 : 12;
- nzc_exp =
- get_nzc_4x4_y(cm, cur - mis, mb_row - 1, mb_col, p) +
- get_nzc_4x4_y(cm, cur - mis, mb_row - 1, mb_col, p + 1);
- } else {
- nzc_exp = cur->mbmi.nzcs[block - 8] << 5;
- }
- if ((block & 7) == 0) {
- int p = ((block >> 3) & 1) ? 11 : 3;
- nzc_exp +=
- get_nzc_4x4_y(cm, cur - 1, mb_row, mb_col - 1, p) +
- get_nzc_4x4_y(cm, cur - 1, mb_row, mb_col - 1, p + 4);
- } else {
- nzc_exp += cur->mbmi.nzcs[block - 4] << 5;
- }
- // Note nzc_exp is 64 times the average value expected at 8x8 scale
- return choose_nzc_context(nzc_exp, NZC_T2_8X8, NZC_T1_8X8);
-
- case TX_4X4:
- if (block < 4) {
- int p = block & 3;
- nzc_exp = get_nzc_4x4_y(cm, cur - mis, mb_row - 1, mb_col,
- 12 + p);
- } else {
- nzc_exp = (cur->mbmi.nzcs[block - 4] << 6);
- }
- if ((block & 3) == 0) {
- int p = (block >> 2) & 3;
- nzc_exp += get_nzc_4x4_y(cm, cur - 1, mb_row, mb_col - 1,
- 3 + 4 * p);
- } else {
- nzc_exp += (cur->mbmi.nzcs[block - 1] << 6);
- }
- nzc_exp >>= 1;
- // Note nzc_exp is 64 times the average value expected at 4x4 scale
- return choose_nzc_context(nzc_exp, NZC_T2_4X4, NZC_T1_4X4);
-
- default:
- return 0;
- break;
- }
-}
-
-int vp9_get_nzc_context_uv_sb64(VP9_COMMON *cm, MODE_INFO *cur,
- int mb_row, int mb_col, int block) {
- // returns an index in [0, MAX_NZC_CONTEXTS - 1] to reflect how busy
- // neighboring blocks are
- int mis = cm->mode_info_stride;
- int nzc_exp = 0;
- const int base = block - (block & 63);
- const int boff = (block & 63);
- const int base_mb16 = base >> 4;
- TX_SIZE txfm_size = cur->mbmi.txfm_size;
- TX_SIZE txfm_size_uv;
-
- assert(block >= 256 && block < 384);
- txfm_size_uv = txfm_size;
-
- switch (txfm_size_uv) {
- case TX_32X32:
- assert(block == 256 || block == 320);
- nzc_exp =
- get_nzc_4x4_uv(cm, cur - mis, mb_row - 1, mb_col,
- base_mb16 + 2) +
- get_nzc_4x4_uv(cm, cur - mis, mb_row - 1, mb_col,
- base_mb16 + 3) +
- get_nzc_4x4_uv(cm, cur - mis + 1, mb_row - 1, mb_col + 1,
- base_mb16 + 2) +
- get_nzc_4x4_uv(cm, cur - mis + 1, mb_row - 1, mb_col + 1,
- base_mb16 + 3) +
- get_nzc_4x4_uv(cm, cur - mis + 2, mb_row - 1, mb_col + 2,
- base_mb16 + 2) +
- get_nzc_4x4_uv(cm, cur - mis + 2, mb_row - 1, mb_col + 2,
- base_mb16 + 3) +
- get_nzc_4x4_uv(cm, cur - mis + 3, mb_row - 1, mb_col + 3,
- base_mb16 + 2) +
- get_nzc_4x4_uv(cm, cur - mis + 3, mb_row - 1, mb_col + 3,
- base_mb16 + 3) +
- get_nzc_4x4_uv(cm, cur - 1, mb_row, mb_col - 1,
- base_mb16 + 1) +
- get_nzc_4x4_uv(cm, cur - 1, mb_row, mb_col - 1,
- base_mb16 + 3) +
- get_nzc_4x4_uv(cm, cur - 1 + mis, mb_row + 1, mb_col - 1,
- base_mb16 + 1) +
- get_nzc_4x4_uv(cm, cur - 1 + mis, mb_row + 1, mb_col - 1,
- base_mb16 + 3) +
- get_nzc_4x4_uv(cm, cur - 1 + 2 * mis, mb_row + 2, mb_col - 1,
- base_mb16 + 1) +
- get_nzc_4x4_uv(cm, cur - 1 + 2 * mis, mb_row + 2, mb_col - 1,
- base_mb16 + 3) +
- get_nzc_4x4_uv(cm, cur - 1 + 3 * mis, mb_row + 3, mb_col - 1,
- base_mb16 + 1) +
- get_nzc_4x4_uv(cm, cur - 1 + 3 * mis, mb_row + 3, mb_col - 1,
- base_mb16 + 3);
- nzc_exp <<= 2;
- // Note nzc_exp is 64 times the average value expected at 32x32 scale
- return choose_nzc_context(nzc_exp, NZC_T2_32X32, NZC_T1_32X32);
-
- case TX_16X16:
- // uv txfm_size 16x16
- assert((block & 15) == 0);
- if (boff < 32) {
- int o = (boff >> 4) & 1;
- nzc_exp =
- get_nzc_4x4_uv(cm, cur - mis + o, mb_row - 1, mb_col + o,
- base_mb16 + 2) +
- get_nzc_4x4_uv(cm, cur - mis + o, mb_row - 1, mb_col + o,
- base_mb16 + 3) +
- get_nzc_4x4_uv(cm, cur - mis + o + 1, mb_row - 1, mb_col + o + 1,
- base_mb16 + 2) +
- get_nzc_4x4_uv(cm, cur - mis + o + 1, mb_row - 1, mb_col + o + 1,
- base_mb16 + 3);
- } else {
- nzc_exp = cur->mbmi.nzcs[block - 32] << 4;
- }
- if ((boff & 31) == 0) {
- int o = boff >> 5;
- nzc_exp +=
- get_nzc_4x4_uv(cm, cur - 1 + o * mis,
- mb_row + o, mb_col - 1, base_mb16 + 1) +
- get_nzc_4x4_uv(cm, cur - 1 + o * mis,
- mb_row + o, mb_col - 1, base_mb16 + 3) +
- get_nzc_4x4_uv(cm, cur - 1 + o * mis + mis,
- mb_row + o + 1, mb_col - 1, base_mb16 + 1) +
- get_nzc_4x4_uv(cm, cur - 1 + o * mis + mis,
- mb_row + o + 1, mb_col - 1, base_mb16 + 3);
- } else {
- nzc_exp += cur->mbmi.nzcs[block - 16] << 4;
- }
- nzc_exp <<= 1;
- // Note nzc_exp is 64 times the average value expected at 16x16 scale
- return choose_nzc_context(nzc_exp, NZC_T2_16X16, NZC_T1_16X16);
-
- case TX_8X8:
- assert((block & 3) == 0);
- if (boff < 16) {
- int o = boff >> 2;
- nzc_exp =
- get_nzc_4x4_uv(cm, cur - mis + o, mb_row - 1, mb_col + o,
- base_mb16 + 2) +
- get_nzc_4x4_uv(cm, cur - mis + o, mb_row - 1, mb_col + o,
- base_mb16 + 3);
- } else {
- nzc_exp = cur->mbmi.nzcs[block - 16] << 5;
- }
- if ((boff & 15) == 0) {
- int o = boff >> 4;
- nzc_exp +=
- get_nzc_4x4_uv(cm, cur - 1 + o * mis, mb_row + o, mb_col - 1,
- base_mb16 + 1) +
- get_nzc_4x4_uv(cm, cur - 1 + o * mis, mb_row + o, mb_col - 1,
- base_mb16 + 3);
- } else {
- nzc_exp += cur->mbmi.nzcs[block - 4] << 5;
- }
- // Note nzc_exp is 64 times the average value expected at 8x8 scale
- return choose_nzc_context(nzc_exp, NZC_T2_8X8, NZC_T1_8X8);
-
- case TX_4X4:
- if (boff < 8) {
- int o = boff >> 1;
- int p = boff & 1;
- nzc_exp = get_nzc_4x4_uv(cm, cur - mis + o, mb_row - 1, mb_col + o,
- base_mb16 + 2 + p);
- } else {
- nzc_exp = (cur->mbmi.nzcs[block - 8] << 6);
- }
- if ((boff & 7) == 0) {
- int o = boff >> 4;
- int p = (boff >> 3) & 1;
- nzc_exp += get_nzc_4x4_uv(cm, cur - 1 + o * mis, mb_row + o, mb_col - 1,
- base_mb16 + 1 + 2 * p);
- } else {
- nzc_exp += (cur->mbmi.nzcs[block - 1] << 6);
- }
- nzc_exp >>= 1;
- // Note nzc_exp is 64 times the average value expected at 4x4 scale
- return choose_nzc_context(nzc_exp, NZC_T2_4X4, NZC_T1_4X4);
-
- default:
- return 0;
- }
-}
-
-int vp9_get_nzc_context_uv_sb32(VP9_COMMON *cm, MODE_INFO *cur,
- int mb_row, int mb_col, int block) {
- // returns an index in [0, MAX_NZC_CONTEXTS - 1] to reflect how busy
- // neighboring blocks are
- int mis = cm->mode_info_stride;
- int nzc_exp = 0;
- const int base = block - (block & 15);
- const int boff = (block & 15);
- const int base_mb16 = base >> 2;
- TX_SIZE txfm_size = cur->mbmi.txfm_size;
- TX_SIZE txfm_size_uv;
-
- assert(block >= 64 && block < 96);
- if (txfm_size == TX_32X32)
- txfm_size_uv = TX_16X16;
- else
- txfm_size_uv = txfm_size;
-
- switch (txfm_size_uv) {
- case TX_16X16:
- // uv txfm_size 16x16
- assert(block == 64 || block == 80);
- nzc_exp =
- get_nzc_4x4_uv(cm, cur - mis, mb_row - 1, mb_col,
- base_mb16 + 2) +
- get_nzc_4x4_uv(cm, cur - mis, mb_row - 1, mb_col,
- base_mb16 + 3) +
- get_nzc_4x4_uv(cm, cur - mis + 1, mb_row - 1, mb_col + 1,
- base_mb16 + 2) +
- get_nzc_4x4_uv(cm, cur - mis + 1, mb_row - 1, mb_col + 1,
- base_mb16 + 3) +
- get_nzc_4x4_uv(cm, cur - 1 + mis, mb_row, mb_col - 1,
- base_mb16 + 1) +
- get_nzc_4x4_uv(cm, cur - 1 + mis, mb_row, mb_col - 1,
- base_mb16 + 3) +
- get_nzc_4x4_uv(cm, cur - 1 + mis, mb_row + 1, mb_col - 1,
- base_mb16 + 1) +
- get_nzc_4x4_uv(cm, cur - 1 + mis, mb_row + 1, mb_col - 1,
- base_mb16 + 3);
- nzc_exp <<= 1;
- // Note nzc_exp is 64 times the average value expected at 16x16 scale
- return choose_nzc_context(nzc_exp, NZC_T2_16X16, NZC_T1_16X16);
- break;
-
- case TX_8X8:
- assert((block & 3) == 0);
- if (boff < 8) {
- int o = boff >> 2;
- nzc_exp =
- get_nzc_4x4_uv(cm, cur - mis + o, mb_row - 1, mb_col + o,
- base_mb16 + 2) +
- get_nzc_4x4_uv(cm, cur - mis + o, mb_row - 1, mb_col + o,
- base_mb16 + 3);
- } else {
- nzc_exp = cur->mbmi.nzcs[block - 8] << 5;
- }
- if ((boff & 7) == 0) {
- int o = boff >> 3;
- nzc_exp +=
- get_nzc_4x4_uv(cm, cur - 1 + o * mis, mb_row + o, mb_col - 1,
- base_mb16 + 1) +
- get_nzc_4x4_uv(cm, cur - 1 + o * mis, mb_row + o, mb_col - 1,
- base_mb16 + 3);
- } else {
- nzc_exp += cur->mbmi.nzcs[block - 4] << 5;
- }
- // Note nzc_exp is 64 times the average value expected at 8x8 scale
- return choose_nzc_context(nzc_exp, NZC_T2_8X8, NZC_T1_8X8);
-
- case TX_4X4:
- if (boff < 4) {
- int o = boff >> 1;
- int p = boff & 1;
- nzc_exp = get_nzc_4x4_uv(cm, cur - mis + o, mb_row - 1, mb_col + o,
- base_mb16 + 2 + p);
- } else {
- nzc_exp = (cur->mbmi.nzcs[block - 4] << 6);
- }
- if ((boff & 3) == 0) {
- int o = boff >> 3;
- int p = (boff >> 2) & 1;
- nzc_exp += get_nzc_4x4_uv(cm, cur - 1 + o * mis, mb_row + o, mb_col - 1,
- base_mb16 + 1 + 2 * p);
- } else {
- nzc_exp += (cur->mbmi.nzcs[block - 1] << 6);
- }
- nzc_exp >>= 1;
- // Note nzc_exp is 64 times the average value expected at 4x4 scale
- return choose_nzc_context(nzc_exp, NZC_T2_4X4, NZC_T1_4X4);
-
- default:
- return 0;
- }
-}
-
-int vp9_get_nzc_context_uv_mb16(VP9_COMMON *cm, MODE_INFO *cur,
- int mb_row, int mb_col, int block) {
- // returns an index in [0, MAX_NZC_CONTEXTS - 1] to reflect how busy
- // neighboring blocks are
- int mis = cm->mode_info_stride;
- int nzc_exp = 0;
- const int base = block - (block & 3);
- const int boff = (block & 3);
- const int base_mb16 = base;
- TX_SIZE txfm_size = cur->mbmi.txfm_size;
- TX_SIZE txfm_size_uv;
-
- assert(block >= 16 && block < 24);
- if (txfm_size == TX_16X16)
- txfm_size_uv = TX_8X8;
- else if (txfm_size == TX_8X8 &&
- (cur->mbmi.mode == I8X8_PRED || cur->mbmi.mode == SPLITMV))
- txfm_size_uv = TX_4X4;
- else
- txfm_size_uv = txfm_size;
-
- switch (txfm_size_uv) {
- case TX_8X8:
- assert((block & 3) == 0);
- nzc_exp =
- get_nzc_4x4_uv(cm, cur - mis, mb_row - 1, mb_col, base_mb16 + 2) +
- get_nzc_4x4_uv(cm, cur - mis, mb_row - 1, mb_col, base_mb16 + 3) +
- get_nzc_4x4_uv(cm, cur - 1, mb_row, mb_col - 1, base_mb16 + 1) +
- get_nzc_4x4_uv(cm, cur - 1, mb_row, mb_col - 1, base_mb16 + 3);
- // Note nzc_exp is 64 times the average value expected at 8x8 scale
- return choose_nzc_context(nzc_exp, NZC_T2_8X8, NZC_T1_8X8);
-
- case TX_4X4:
- if (boff < 2) {
- int p = boff & 1;
- nzc_exp = get_nzc_4x4_uv(cm, cur - mis, mb_row - 1, mb_col,
- base_mb16 + 2 + p);
- } else {
- nzc_exp = (cur->mbmi.nzcs[block - 2] << 6);
- }
- if ((boff & 1) == 0) {
- int p = (boff >> 1) & 1;
- nzc_exp += get_nzc_4x4_uv(cm, cur - 1, mb_row, mb_col - 1,
- base_mb16 + 1 + 2 * p);
- } else {
- nzc_exp += (cur->mbmi.nzcs[block - 1] << 6);
- }
- nzc_exp >>= 1;
- // Note nzc_exp is 64 times the average value expected at 4x4 scale
- return choose_nzc_context(nzc_exp, NZC_T2_4X4, NZC_T1_4X4);
-
- default:
- return 0;
- }
-}
-
-int vp9_get_nzc_context(VP9_COMMON *cm, MACROBLOCKD *xd, int block) {
- if (xd->mode_info_context->mbmi.sb_type == BLOCK_SIZE_SB64X64) {
- assert(block < 384);
- if (block < 256)
- return vp9_get_nzc_context_y_sb64(cm, xd->mode_info_context,
- get_mb_row(xd), get_mb_col(xd), block);
- else
- return vp9_get_nzc_context_uv_sb64(cm, xd->mode_info_context,
- get_mb_row(xd), get_mb_col(xd), block);
- } else if (xd->mode_info_context->mbmi.sb_type == BLOCK_SIZE_SB32X32) {
- assert(block < 96);
- if (block < 64)
- return vp9_get_nzc_context_y_sb32(cm, xd->mode_info_context,
- get_mb_row(xd), get_mb_col(xd), block);
- else
- return vp9_get_nzc_context_uv_sb32(cm, xd->mode_info_context,
- get_mb_row(xd), get_mb_col(xd), block);
- } else {
- assert(block < 64);
- if (block < 16)
- return vp9_get_nzc_context_y_mb16(cm, xd->mode_info_context,
- get_mb_row(xd), get_mb_col(xd), block);
- else
- return vp9_get_nzc_context_uv_mb16(cm, xd->mode_info_context,
- get_mb_row(xd), get_mb_col(xd), block);
- }
-}
-
-static void update_nzc(VP9_COMMON *cm,
- uint16_t nzc,
- int nzc_context,
- TX_SIZE tx_size,
- int ref,
- int type) {
- int e, c;
- if (!get_nzc_used(tx_size)) return;
- c = codenzc(nzc);
- if (tx_size == TX_32X32)
- cm->fc.nzc_counts_32x32[nzc_context][ref][type][c]++;
- else if (tx_size == TX_16X16)
- cm->fc.nzc_counts_16x16[nzc_context][ref][type][c]++;
- else if (tx_size == TX_8X8)
- cm->fc.nzc_counts_8x8[nzc_context][ref][type][c]++;
- else if (tx_size == TX_4X4)
- cm->fc.nzc_counts_4x4[nzc_context][ref][type][c]++;
- else
- assert(0);
-
- if ((e = vp9_extranzcbits[c])) {
- int x = nzc - vp9_basenzcvalue[c];
- while (e--) {
- int b = (x >> e) & 1;
- cm->fc.nzc_pcat_counts[nzc_context][c - NZC_TOKENS_NOEXTRA][e][b]++;
- }
- }
-}
-
-static void update_nzcs_sb64(VP9_COMMON *cm,
- MACROBLOCKD *xd,
- int mb_row,
- int mb_col) {
- MODE_INFO *m = xd->mode_info_context;
- MB_MODE_INFO *const mi = &m->mbmi;
- int j, nzc_context;
- const int ref = m->mbmi.ref_frame != INTRA_FRAME;
-
- assert(mb_col == get_mb_col(xd));
- assert(mb_row == get_mb_row(xd));
-
- if (mi->mb_skip_coeff)
- return;
-
- switch (mi->txfm_size) {
- case TX_32X32:
- for (j = 0; j < 256; j += 64) {
- nzc_context = vp9_get_nzc_context_y_sb64(cm, m, mb_row, mb_col, j);
- update_nzc(cm, m->mbmi.nzcs[j], nzc_context, TX_32X32, ref, 0);
- }
- for (j = 256; j < 384; j += 64) {
- nzc_context = vp9_get_nzc_context_uv_sb64(cm, m, mb_row, mb_col, j);
- update_nzc(cm, m->mbmi.nzcs[j], nzc_context, TX_32X32, ref, 1);
- }
- break;
-
- case TX_16X16:
- for (j = 0; j < 256; j += 16) {
- nzc_context = vp9_get_nzc_context_y_sb64(cm, m, mb_row, mb_col, j);
- update_nzc(cm, m->mbmi.nzcs[j], nzc_context, TX_16X16, ref, 0);
- }
- for (j = 256; j < 384; j += 16) {
- nzc_context = vp9_get_nzc_context_uv_sb64(cm, m, mb_row, mb_col, j);
- update_nzc(cm, m->mbmi.nzcs[j], nzc_context, TX_16X16, ref, 1);
- }
- break;
-
- case TX_8X8:
- for (j = 0; j < 256; j += 4) {
- nzc_context = vp9_get_nzc_context_y_sb64(cm, m, mb_row, mb_col, j);
- update_nzc(cm, m->mbmi.nzcs[j], nzc_context, TX_8X8, ref, 0);
- }
- for (j = 256; j < 384; j += 4) {
- nzc_context = vp9_get_nzc_context_uv_sb64(cm, m, mb_row, mb_col, j);
- update_nzc(cm, m->mbmi.nzcs[j], nzc_context, TX_8X8, ref, 1);
- }
- break;
-
- case TX_4X4:
- for (j = 0; j < 256; ++j) {
- nzc_context = vp9_get_nzc_context_y_sb64(cm, m, mb_row, mb_col, j);
- update_nzc(cm, m->mbmi.nzcs[j], nzc_context, TX_4X4, ref, 0);
- }
- for (j = 256; j < 384; ++j) {
- nzc_context = vp9_get_nzc_context_uv_sb64(cm, m, mb_row, mb_col, j);
- update_nzc(cm, m->mbmi.nzcs[j], nzc_context, TX_4X4, ref, 1);
- }
- break;
-
- default:
- break;
- }
}
-static void update_nzcs_sb32(VP9_COMMON *cm,
- MACROBLOCKD *xd,
- int mb_row,
- int mb_col) {
- MODE_INFO *m = xd->mode_info_context;
- MB_MODE_INFO *const mi = &m->mbmi;
- int j, nzc_context;
- const int ref = m->mbmi.ref_frame != INTRA_FRAME;
-
- assert(mb_col == get_mb_col(xd));
- assert(mb_row == get_mb_row(xd));
-
- if (mi->mb_skip_coeff)
- return;
-
- switch (mi->txfm_size) {
- case TX_32X32:
- for (j = 0; j < 64; j += 64) {
- nzc_context = vp9_get_nzc_context_y_sb32(cm, m, mb_row, mb_col, j);
- update_nzc(cm, m->mbmi.nzcs[j], nzc_context, TX_32X32, ref, 0);
- }
- for (j = 64; j < 96; j += 16) {
- nzc_context = vp9_get_nzc_context_uv_sb32(cm, m, mb_row, mb_col, j);
- update_nzc(cm, m->mbmi.nzcs[j], nzc_context, TX_16X16, ref, 1);
- }
- break;
-
- case TX_16X16:
- for (j = 0; j < 64; j += 16) {
- nzc_context = vp9_get_nzc_context_y_sb32(cm, m, mb_row, mb_col, j);
- update_nzc(cm, m->mbmi.nzcs[j], nzc_context, TX_16X16, ref, 0);
- }
- for (j = 64; j < 96; j += 16) {
- nzc_context = vp9_get_nzc_context_uv_sb32(cm, m, mb_row, mb_col, j);
- update_nzc(cm, m->mbmi.nzcs[j], nzc_context, TX_16X16, ref, 1);
- }
- break;
-
- case TX_8X8:
- for (j = 0; j < 64; j += 4) {
- nzc_context = vp9_get_nzc_context_y_sb32(cm, m, mb_row, mb_col, j);
- update_nzc(cm, m->mbmi.nzcs[j], nzc_context, TX_8X8, ref, 0);
- }
- for (j = 64; j < 96; j += 4) {
- nzc_context = vp9_get_nzc_context_uv_sb32(cm, m, mb_row, mb_col, j);
- update_nzc(cm, m->mbmi.nzcs[j], nzc_context, TX_8X8, ref, 1);
- }
- break;
-
- case TX_4X4:
- for (j = 0; j < 64; ++j) {
- nzc_context = vp9_get_nzc_context_y_sb32(cm, m, mb_row, mb_col, j);
- update_nzc(cm, m->mbmi.nzcs[j], nzc_context, TX_4X4, ref, 0);
- }
- for (j = 64; j < 96; ++j) {
- nzc_context = vp9_get_nzc_context_uv_sb32(cm, m, mb_row, mb_col, j);
- update_nzc(cm, m->mbmi.nzcs[j], nzc_context, TX_4X4, ref, 1);
- }
- break;
-
- default:
- break;
- }
-}
-
-static void update_nzcs_mb16(VP9_COMMON *cm,
- MACROBLOCKD *xd,
- int mb_row,
- int mb_col) {
- MODE_INFO *m = xd->mode_info_context;
- MB_MODE_INFO *const mi = &m->mbmi;
- int j, nzc_context;
- const int ref = m->mbmi.ref_frame != INTRA_FRAME;
-
- assert(mb_col == get_mb_col(xd));
- assert(mb_row == get_mb_row(xd));
-
- if (mi->mb_skip_coeff)
- return;
-
- switch (mi->txfm_size) {
- case TX_16X16:
- for (j = 0; j < 16; j += 16) {
- nzc_context = vp9_get_nzc_context_y_mb16(cm, m, mb_row, mb_col, j);
- update_nzc(cm, m->mbmi.nzcs[j], nzc_context, TX_16X16, ref, 0);
- }
- for (j = 16; j < 24; j += 4) {
- nzc_context = vp9_get_nzc_context_uv_mb16(cm, m, mb_row, mb_col, j);
- update_nzc(cm, m->mbmi.nzcs[j], nzc_context, TX_8X8, ref, 1);
- }
- break;
-
- case TX_8X8:
- for (j = 0; j < 16; j += 4) {
- nzc_context = vp9_get_nzc_context_y_mb16(cm, m, mb_row, mb_col, j);
- update_nzc(cm, m->mbmi.nzcs[j], nzc_context, TX_8X8, ref, 0);
- }
- if (mi->mode == I8X8_PRED || mi->mode == SPLITMV) {
- for (j = 16; j < 24; ++j) {
- nzc_context = vp9_get_nzc_context_uv_mb16(cm, m, mb_row, mb_col, j);
- update_nzc(cm, m->mbmi.nzcs[j], nzc_context, TX_4X4, ref, 1);
- }
- } else {
- for (j = 16; j < 24; j += 4) {
- nzc_context = vp9_get_nzc_context_uv_mb16(cm, m, mb_row, mb_col, j);
- update_nzc(cm, m->mbmi.nzcs[j], nzc_context, TX_8X8, ref, 1);
- }
- }
- break;
-
- case TX_4X4:
- for (j = 0; j < 16; ++j) {
- nzc_context = vp9_get_nzc_context_y_mb16(cm, m, mb_row, mb_col, j);
- update_nzc(cm, m->mbmi.nzcs[j], nzc_context, TX_4X4, ref, 0);
- }
- for (j = 16; j < 24; ++j) {
- nzc_context = vp9_get_nzc_context_uv_mb16(cm, m, mb_row, mb_col, j);
- update_nzc(cm, m->mbmi.nzcs[j], nzc_context, TX_4X4, ref, 1);
- }
- break;
-
- default:
- break;
- }
-}
-
-void vp9_update_nzc_counts(VP9_COMMON *cm,
- MACROBLOCKD *xd,
- int mb_row,
- int mb_col) {
- if (xd->mode_info_context->mbmi.sb_type == BLOCK_SIZE_SB64X64)
- update_nzcs_sb64(cm, xd, mb_row, mb_col);
- else if (xd->mode_info_context->mbmi.sb_type == BLOCK_SIZE_SB32X32)
- update_nzcs_sb32(cm, xd, mb_row, mb_col);
- else
- update_nzcs_mb16(cm, xd, mb_row, mb_col);
-}
-#endif // CONFIG_CODE_NONZEROCOUNT
-
// #define COEF_COUNT_TESTING
#define COEF_COUNT_SAT 24
@@ -2777,111 +1588,105 @@ void vp9_adapt_coef_probs(VP9_COMMON *cm) {
count_sat, update_factor);
}
-#if CONFIG_CODE_NONZEROCOUNT
-static void adapt_nzc_probs_common(VP9_COMMON *cm,
+#if CONFIG_CODE_ZEROGROUP
+OrientationType vp9_get_orientation(int rc, TX_SIZE tx_size) {
+ int i = rc >> (tx_size + 2);
+ int j = rc & ((4 << tx_size) - 1);
+ if (i > 2 * j)
+ return VERTICAL;
+ else if (j > 2 * i)
+ return HORIZONTAL;
+ else
+ return DIAGONAL;
+ /*
+ if (i == 0 && j == 0) return DIAGONAL;
+ while (i > 1 || j > 1) {
+ i >>= 1;
+ j >>= 1;
+ }
+ if (i == 0 && j == 1)
+ return HORIZONTAL; // horizontal
+ else if (i == 1 && j == 1)
+ return DIAGONAL; // diagonal
+ else if (i == 1 && j == 0)
+ return VERTICAL; // vertical
+ assert(0);
+ */
+}
+
+int vp9_use_eoo(int c, int seg_eob, const int *scan,
+ TX_SIZE tx_size, int *is_last_zero, int *is_eoo) {
+ // NOTE: returning 0 from this function will turn off eoo symbols
+ // For instance we can experiment with turning eoo off for smaller blocks
+ // and/or lower bands
+ int o = vp9_get_orientation(scan[c], tx_size);
+ int band = get_coef_band(scan, tx_size, c);
+ int use_eoo = (!is_last_zero[o] &&
+ !is_eoo[o] &&
+ band <= ZPC_EOO_BAND_UPPER &&
+ band >= ZPC_EOO_BAND_LOWER &&
+ get_zpc_used(tx_size) &&
+ seg_eob - c > (ZPC_USEEOO_THRESH << tx_size) &&
+ is_eoo[0] + is_eoo[1] + is_eoo[2] < 2);
+ return use_eoo;
+}
+
+int vp9_is_eoo(int c, int eob, const int *scan, TX_SIZE tx_size,
+ const int16_t *qcoeff_ptr, int *last_nz_pos) {
+ int rc = scan[c];
+ int o = vp9_get_orientation(rc, tx_size);
+ int eoo = c > last_nz_pos[o];
+ return eoo;
+}
+
+static void adapt_zpc_probs_common(VP9_COMMON *cm,
TX_SIZE tx_size,
int count_sat,
int update_factor) {
- int c, r, b, n;
+ int r, b, p, n;
int count, factor;
- unsigned int nzc_branch_ct[NZC32X32_NODES][2];
- vp9_prob nzc_probs[NZC32X32_NODES];
- int tokens, nodes;
- const vp9_tree_index *nzc_tree;
- vp9_prob *dst_nzc_probs;
- vp9_prob *pre_nzc_probs;
- unsigned int *nzc_counts;
-
- if (!get_nzc_used(tx_size)) return;
+ vp9_zpc_probs *zpc_probs;
+ vp9_zpc_probs *pre_zpc_probs;
+ vp9_zpc_count *zpc_counts;
+ if (!get_zpc_used(tx_size)) return;
if (tx_size == TX_32X32) {
- tokens = NZC32X32_TOKENS;
- nzc_tree = vp9_nzc32x32_tree;
- dst_nzc_probs = cm->fc.nzc_probs_32x32[0][0][0];
- pre_nzc_probs = cm->fc.pre_nzc_probs_32x32[0][0][0];
- nzc_counts = cm->fc.nzc_counts_32x32[0][0][0];
+ zpc_probs = &cm->fc.zpc_probs_32x32;
+ pre_zpc_probs = &cm->fc.pre_zpc_probs_32x32;
+ zpc_counts = &cm->fc.zpc_counts_32x32;
} else if (tx_size == TX_16X16) {
- tokens = NZC16X16_TOKENS;
- nzc_tree = vp9_nzc16x16_tree;
- dst_nzc_probs = cm->fc.nzc_probs_16x16[0][0][0];
- pre_nzc_probs = cm->fc.pre_nzc_probs_16x16[0][0][0];
- nzc_counts = cm->fc.nzc_counts_16x16[0][0][0];
+ zpc_probs = &cm->fc.zpc_probs_16x16;
+ pre_zpc_probs = &cm->fc.pre_zpc_probs_16x16;
+ zpc_counts = &cm->fc.zpc_counts_16x16;
} else if (tx_size == TX_8X8) {
- tokens = NZC8X8_TOKENS;
- nzc_tree = vp9_nzc8x8_tree;
- dst_nzc_probs = cm->fc.nzc_probs_8x8[0][0][0];
- pre_nzc_probs = cm->fc.pre_nzc_probs_8x8[0][0][0];
- nzc_counts = cm->fc.nzc_counts_8x8[0][0][0];
+ zpc_probs = &cm->fc.zpc_probs_8x8;
+ pre_zpc_probs = &cm->fc.pre_zpc_probs_8x8;
+ zpc_counts = &cm->fc.zpc_counts_8x8;
} else {
- nzc_tree = vp9_nzc4x4_tree;
- tokens = NZC4X4_TOKENS;
- dst_nzc_probs = cm->fc.nzc_probs_4x4[0][0][0];
- pre_nzc_probs = cm->fc.pre_nzc_probs_4x4[0][0][0];
- nzc_counts = cm->fc.nzc_counts_4x4[0][0][0];
+ zpc_probs = &cm->fc.zpc_probs_4x4;
+ pre_zpc_probs = &cm->fc.pre_zpc_probs_4x4;
+ zpc_counts = &cm->fc.zpc_counts_4x4;
}
- nodes = tokens - 1;
- for (c = 0; c < MAX_NZC_CONTEXTS; ++c)
- for (r = 0; r < REF_TYPES; ++r)
- for (b = 0; b < BLOCK_TYPES; ++b) {
- int offset = c * REF_TYPES * BLOCK_TYPES + r * BLOCK_TYPES + b;
- int offset_nodes = offset * nodes;
- int offset_tokens = offset * tokens;
- vp9_tree_probs_from_distribution(nzc_tree,
- nzc_probs, nzc_branch_ct,
- nzc_counts + offset_tokens, 0);
- for (n = 0; n < nodes; ++n) {
- count = nzc_branch_ct[n][0] + nzc_branch_ct[n][1];
+ for (r = 0; r < REF_TYPES; ++r) {
+ for (b = 0; b < ZPC_BANDS; ++b) {
+ for (p = 0; p < ZPC_PTOKS; ++p) {
+ for (n = 0; n < ZPC_NODES; ++n) {
+ vp9_prob prob = get_binary_prob((*zpc_counts)[r][b][p][n][0],
+ (*zpc_counts)[r][b][p][n][1]);
+ count = (*zpc_counts)[r][b][p][n][0] + (*zpc_counts)[r][b][p][n][1];
count = count > count_sat ? count_sat : count;
factor = (update_factor * count / count_sat);
- dst_nzc_probs[offset_nodes + n] =
- weighted_prob(pre_nzc_probs[offset_nodes + n],
- nzc_probs[n], factor);
+ (*zpc_probs)[r][b][p][n] = weighted_prob(
+ (*pre_zpc_probs)[r][b][p][n], prob, factor);
}
}
-}
-
-static void adapt_nzc_pcat(VP9_COMMON *cm, int count_sat, int update_factor) {
- int c, t;
- int count, factor;
- if (!(get_nzc_used(TX_4X4) || get_nzc_used(TX_8X8) ||
- get_nzc_used(TX_16X16) || get_nzc_used(TX_32X32)))
- return;
- for (c = 0; c < MAX_NZC_CONTEXTS; ++c) {
- for (t = 0; t < NZC_TOKENS_EXTRA; ++t) {
- int bits = vp9_extranzcbits[t + NZC_TOKENS_NOEXTRA];
- int b;
- for (b = 0; b < bits; ++b) {
- vp9_prob prob = get_binary_prob(cm->fc.nzc_pcat_counts[c][t][b][0],
- cm->fc.nzc_pcat_counts[c][t][b][1]);
- count = cm->fc.nzc_pcat_counts[c][t][b][0] +
- cm->fc.nzc_pcat_counts[c][t][b][1];
- count = count > count_sat ? count_sat : count;
- factor = (update_factor * count / count_sat);
- cm->fc.nzc_pcat_probs[c][t][b] = weighted_prob(
- cm->fc.pre_nzc_pcat_probs[c][t][b], prob, factor);
- }
}
}
}
-// #define NZC_COUNT_TESTING
-void vp9_adapt_nzc_probs(VP9_COMMON *cm) {
+// #define ZPC_COUNT_TESTING
+void vp9_adapt_zpc_probs(VP9_COMMON *cm) {
int count_sat;
int update_factor; /* denominator 256 */
-#ifdef NZC_COUNT_TESTING
- int c, r, b, t;
- printf("\n");
- for (c = 0; c < MAX_NZC_CONTEXTS; ++c)
- for (r = 0; r < REF_TYPES; ++r) {
- for (b = 0; b < BLOCK_TYPES; ++b) {
- printf(" {");
- for (t = 0; t < NZC4X4_TOKENS; ++t) {
- printf(" %d,", cm->fc.nzc_counts_4x4[c][r][b][t]);
- }
- printf("}\n");
- }
- printf("\n");
- }
-#endif
if (cm->frame_type == KEY_FRAME) {
update_factor = COEF_MAX_UPDATE_FACTOR_KEY;
@@ -2894,10 +1699,9 @@ void vp9_adapt_nzc_probs(VP9_COMMON *cm) {
count_sat = COEF_COUNT_SAT;
}
- adapt_nzc_probs_common(cm, TX_4X4, count_sat, update_factor);
- adapt_nzc_probs_common(cm, TX_8X8, count_sat, update_factor);
- adapt_nzc_probs_common(cm, TX_16X16, count_sat, update_factor);
- adapt_nzc_probs_common(cm, TX_32X32, count_sat, update_factor);
- adapt_nzc_pcat(cm, count_sat, update_factor);
+ adapt_zpc_probs_common(cm, TX_4X4, count_sat, update_factor);
+ adapt_zpc_probs_common(cm, TX_8X8, count_sat, update_factor);
+ adapt_zpc_probs_common(cm, TX_16X16, count_sat, update_factor);
+ adapt_zpc_probs_common(cm, TX_32X32, count_sat, update_factor);
}
-#endif // CONFIG_CODE_NONZEROCOUNT
+#endif // CONFIG_CODE_ZEROGROUP
diff --git a/vp9/common/vp9_entropy.h b/vp9/common/vp9_entropy.h
index db167420c..123b5e289 100644
--- a/vp9/common/vp9_entropy.h
+++ b/vp9/common/vp9_entropy.h
@@ -45,11 +45,11 @@ extern struct vp9_token vp9_coef_encodings[MAX_ENTROPY_TOKENS];
typedef struct {
vp9_tree_p tree;
const vp9_prob *prob;
- int Len;
+ int len;
int base_val;
-} vp9_extra_bit_struct;
+} vp9_extra_bit;
-extern vp9_extra_bit_struct vp9_extra_bits[12]; /* indexed by token value */
+extern vp9_extra_bit vp9_extra_bits[12]; /* indexed by token value */
#define PROB_UPDATE_BASELINE_COST 7
@@ -167,88 +167,61 @@ void vp9_get_model_distribution(vp9_prob model, vp9_prob *tree_probs,
int b, int r);
#endif // CONFIG_MODELCOEFPROB
-#if CONFIG_CODE_NONZEROCOUNT
-/* Alphabet for number of non-zero symbols in block */
-#define NZC_0 0 /* Used for all blocks */
-#define NZC_1 1 /* Used for all blocks */
-#define NZC_2 2 /* Used for all blocks */
-#define NZC_3TO4 3 /* Used for all blocks */
-#define NZC_5TO8 4 /* Used for all blocks */
-#define NZC_9TO16 5 /* Used for all blocks */
-#define NZC_17TO32 6 /* Used for 8x8 and larger blocks */
-#define NZC_33TO64 7 /* Used for 8x8 and larger blocks */
-#define NZC_65TO128 8 /* Used for 16x16 and larger blocks */
-#define NZC_129TO256 9 /* Used for 16x16 and larger blocks */
-#define NZC_257TO512 10 /* Used for 32x32 and larger blocks */
-#define NZC_513TO1024 11 /* Used for 32x32 and larger blocks */
-
-/* Number of tokens for each block size */
-#define NZC4X4_TOKENS 6
-#define NZC8X8_TOKENS 8
-#define NZC16X16_TOKENS 10
-#define NZC32X32_TOKENS 12
-
-/* Number of nodes for each block size */
-#define NZC4X4_NODES 5
-#define NZC8X8_NODES 7
-#define NZC16X16_NODES 9
-#define NZC32X32_NODES 11
-
-/* Max number of tokens with extra bits */
-#define NZC_TOKENS_EXTRA 9
-
-/* Max number of extra bits */
-#define NZC_BITS_EXTRA 9
-
-/* Tokens without extra bits */
-#define NZC_TOKENS_NOEXTRA (NZC32X32_TOKENS - NZC_TOKENS_EXTRA)
-
-#define MAX_NZC_CONTEXTS 3
-
-/* whether to update extra bit probabilities */
-#define NZC_PCAT_UPDATE
-
-/* nzc trees */
-extern const vp9_tree_index vp9_nzc4x4_tree[];
-extern const vp9_tree_index vp9_nzc8x8_tree[];
-extern const vp9_tree_index vp9_nzc16x16_tree[];
-extern const vp9_tree_index vp9_nzc32x32_tree[];
-
-/* nzc encodings */
-extern struct vp9_token vp9_nzc4x4_encodings[NZC4X4_TOKENS];
-extern struct vp9_token vp9_nzc8x8_encodings[NZC8X8_TOKENS];
-extern struct vp9_token vp9_nzc16x16_encodings[NZC16X16_TOKENS];
-extern struct vp9_token vp9_nzc32x32_encodings[NZC32X32_TOKENS];
-
-#define codenzc(x) (\
- (x) <= 3 ? (x) : (x) <= 4 ? 3 : (x) <= 8 ? 4 : \
- (x) <= 16 ? 5 : (x) <= 32 ? 6 : (x) <= 64 ? 7 :\
- (x) <= 128 ? 8 : (x) <= 256 ? 9 : (x) <= 512 ? 10 : 11)
-
-int vp9_get_nzc_context_y_sb64(struct VP9Common *cm, MODE_INFO *cur,
- int mb_row, int mb_col, int block);
-int vp9_get_nzc_context_y_sb32(struct VP9Common *cm, MODE_INFO *cur,
- int mb_row, int mb_col, int block);
-int vp9_get_nzc_context_y_mb16(struct VP9Common *cm, MODE_INFO *cur,
- int mb_row, int mb_col, int block);
-int vp9_get_nzc_context_uv_sb64(struct VP9Common *cm, MODE_INFO *cur,
- int mb_row, int mb_col, int block);
-int vp9_get_nzc_context_uv_sb32(struct VP9Common *cm, MODE_INFO *cur,
- int mb_row, int mb_col, int block);
-int vp9_get_nzc_context_uv_mb16(struct VP9Common *cm, MODE_INFO *cur,
- int mb_row, int mb_col, int block);
-int vp9_get_nzc_context(struct VP9Common *cm, MACROBLOCKD *xd, int block);
-void vp9_update_nzc_counts(struct VP9Common *cm, MACROBLOCKD *xd,
- int mb_row, int mb_col);
-void vp9_adapt_nzc_probs(struct VP9Common *cm);
-
-/* Extra bits array */
-extern const int vp9_extranzcbits[NZC32X32_TOKENS];
-
-/* Base nzc values */
-extern const int vp9_basenzcvalue[NZC32X32_TOKENS];
-
-#endif // CONFIG_CODE_NONZEROCOUNT
+#if CONFIG_CODE_ZEROGROUP
+
+#define ZPC_STATS
+
+typedef enum {
+ HORIZONTAL = 0,
+ DIAGONAL,
+ VERTICAL,
+} OrientationType;
+
+/* Note EOB should become part of this symbol eventually,
+ * but holding off on this for now because that is a major
+ * change in the rest of the codebase */
+
+#define ZPC_ISOLATED (MAX_ENTROPY_TOKENS + 0) /* Isolated zero */
+
+/* ZPC_EOORIENT: All remaining coefficients in the same orientation are 0.
+ * In other words all remaining coeffs in the current subband, and all
+ * children of the current subband are zero. Subbands are defined by
+ * dyadic partitioning in the coeff domain */
+#define ZPC_EOORIENT (MAX_ENTROPY_TOKENS + 1) /* End of Orientation */
+
+/* Band limits over which the eoo bit is sent */
+#define ZPC_EOO_BAND_LOWER 0
+#define ZPC_EOO_BAND_UPPER 5
+
+#define USE_ZPC_EOORIENT 1 /* 0: not used */
+ /* 1: used */
+#define ZPC_NODES 1
+
+#define UNKNOWN_TOKEN 255 /* Not signalled, encoder only */
+
+#define ZPC_BANDS 3 /* context bands for izr */
+#define ZPC_PTOKS 3 /* context pt for zpcs */
+
+#define coef_to_zpc_band(b) ((b) >> 1)
+#define coef_to_zpc_ptok(p) ((p) > 2 ? 2 : (p))
+
+typedef vp9_prob vp9_zpc_probs[REF_TYPES][ZPC_BANDS]
+ [ZPC_PTOKS][ZPC_NODES];
+typedef unsigned int vp9_zpc_count[REF_TYPES][ZPC_BANDS]
+ [ZPC_PTOKS][ZPC_NODES][2];
+
+OrientationType vp9_get_orientation(int rc, TX_SIZE tx_size);
+int vp9_use_eoo(int c, int eob, const int *scan, TX_SIZE tx_size,
+ int *is_last_zero, int *is_eoo);
+int vp9_is_eoo(int c, int eob, const int *scan, TX_SIZE tx_size,
+ const int16_t *qcoeff_ptr, int *last_nz_pos);
+
+#define ZPC_USEEOO_THRESH 4
+#define ZPC_ZEROSSAVED_EOO 7 /* encoder only */
+
+void vp9_adapt_zpc_probs(struct VP9Common *cm);
+
+#endif // CONFIG_CODE_ZEROGROUP
#include "vp9/common/vp9_coefupdateprobs.h"
diff --git a/vp9/common/vp9_entropymode.c b/vp9/common/vp9_entropymode.c
index 0db2de6ee..f19dc12d3 100644
--- a/vp9/common/vp9_entropymode.c
+++ b/vp9/common/vp9_entropymode.c
@@ -718,11 +718,6 @@ void vp9_setup_past_independence(VP9_COMMON *cm, MACROBLOCKD *xd) {
vp9_update_mode_info_border(cm, cm->mip);
vp9_update_mode_info_in_image(cm, cm->mi);
-#if CONFIG_NEW_MVREF
- // Defaults probabilities for encoding the MV ref id signal
- vpx_memset(xd->mb_mv_ref_probs, VP9_DEFAULT_MV_REF_PROB,
- sizeof(xd->mb_mv_ref_probs));
-#endif
cm->ref_frame_sign_bias[GOLDEN_FRAME] = 0;
cm->ref_frame_sign_bias[ALTREF_FRAME] = 0;
diff --git a/vp9/common/vp9_entropymv.c b/vp9/common/vp9_entropymv.c
index 0a81015cb..320c29c6a 100644
--- a/vp9/common/vp9_entropymv.c
+++ b/vp9/common/vp9_entropymv.c
@@ -17,13 +17,8 @@
#define MV_COUNT_SAT 16
#define MV_MAX_UPDATE_FACTOR 160
-#if CONFIG_NEW_MVREF
-/* Integer pel reference mv threshold for use of high-precision 1/8 mv */
-#define COMPANDED_MVREF_THRESH 1000000
-#else
/* Integer pel reference mv threshold for use of high-precision 1/8 mv */
#define COMPANDED_MVREF_THRESH 8
-#endif
/* Smooth or bias the mv-counts before prob computation */
/* #define SMOOTH_MV_COUNTS */
diff --git a/vp9/common/vp9_entropymv.h b/vp9/common/vp9_entropymv.h
index de1bd4383..118574b62 100644
--- a/vp9/common/vp9_entropymv.h
+++ b/vp9/common/vp9_entropymv.h
@@ -26,12 +26,6 @@ int vp9_use_nmv_hp(const MV *ref);
#define VP9_NMV_UPDATE_PROB 255
-#if CONFIG_NEW_MVREF
-#define VP9_MVREF_UPDATE_PROB 252
-#define VP9_DEFAULT_MV_REF_PROB 192
-#define VP9_MV_REF_UPDATE_COST (14 << 8)
-#endif
-
//#define MV_GROUP_UPDATE
#define LOW_PRECISION_MV_UPDATE /* Use 7 bit forward update */
diff --git a/vp9/common/vp9_findnearmv.c b/vp9/common/vp9_findnearmv.c
index 832e8ddf1..b5a32d9b3 100644
--- a/vp9/common/vp9_findnearmv.c
+++ b/vp9/common/vp9_findnearmv.c
@@ -39,8 +39,6 @@ vp9_prob *vp9_mv_ref_probs(VP9_COMMON *pc, vp9_prob p[4], int context) {
}
void vp9_find_best_ref_mvs(MACROBLOCKD *xd,
- uint8_t *ref_y_buffer,
- int ref_y_stride,
int_mv *mvlist,
int_mv *nearest,
int_mv *near) {
diff --git a/vp9/common/vp9_findnearmv.h b/vp9/common/vp9_findnearmv.h
index c360c20eb..085454512 100644
--- a/vp9/common/vp9_findnearmv.h
+++ b/vp9/common/vp9_findnearmv.h
@@ -24,8 +24,6 @@
// above and a number cols of pixels in the left to select the one with best
// score to use as ref motion vector
void vp9_find_best_ref_mvs(MACROBLOCKD *xd,
- uint8_t *ref_y_buffer,
- int ref_y_stride,
int_mv *mvlist,
int_mv *nearest,
int_mv *near);
diff --git a/vp9/common/vp9_invtrans.c b/vp9/common/vp9_invtrans.c
index 3b11fa9cb..0673fd81a 100644
--- a/vp9/common/vp9_invtrans.c
+++ b/vp9/common/vp9_invtrans.c
@@ -38,10 +38,10 @@ void vp9_inverse_transform_sby_32x32(MACROBLOCKD *xd, BLOCK_SIZE_TYPE bsize) {
for (n = 0; n < bw * bh; n++) {
const int x_idx = n & (bw - 1), y_idx = n >> bwl;
+ const int offset = x_idx * 32 + y_idx * 32 * stride;
vp9_short_idct32x32(BLOCK_OFFSET(xd->plane[0].dqcoeff, n, 1024),
- xd->diff + x_idx * 32 + y_idx * 32 * stride,
- stride * 2);
+ xd->plane[0].diff + offset, stride * 2);
}
}
@@ -55,15 +55,14 @@ void vp9_inverse_transform_sby_16x16(MACROBLOCKD *xd, BLOCK_SIZE_TYPE bsize) {
const int x_idx = n & (bw - 1), y_idx = n >> bwl;
const TX_TYPE tx_type = get_tx_type_16x16(xd,
(y_idx * bstride + x_idx) * 4);
+ const int offset = x_idx * 16 + y_idx * 16 * stride;
if (tx_type == DCT_DCT) {
vp9_inverse_transform_b_16x16(BLOCK_OFFSET(xd->plane[0].dqcoeff, n, 256),
- xd->diff + x_idx * 16 + y_idx * stride * 16,
- stride * 2);
+ xd->plane[0].diff + offset, stride * 2);
} else {
vp9_short_iht16x16(BLOCK_OFFSET(xd->plane[0].dqcoeff, n, 256),
- xd->diff + x_idx * 16 + y_idx * stride * 16,
- stride, tx_type);
+ xd->plane[0].diff + offset, stride, tx_type);
}
}
}
@@ -77,15 +76,14 @@ void vp9_inverse_transform_sby_8x8(MACROBLOCKD *xd, BLOCK_SIZE_TYPE bsize) {
for (n = 0; n < bw * bh; n++) {
const int x_idx = n & (bw - 1), y_idx = n >> bwl;
const TX_TYPE tx_type = get_tx_type_8x8(xd, (y_idx * bstride + x_idx) * 2);
+ const int offset = x_idx * 8 + y_idx * 8 * stride;
if (tx_type == DCT_DCT) {
vp9_inverse_transform_b_8x8(BLOCK_OFFSET(xd->plane[0].dqcoeff, n, 64),
- xd->diff + x_idx * 8 + y_idx * stride * 8,
- stride * 2);
+ xd->plane[0].diff + offset, stride * 2);
} else {
vp9_short_iht8x8(BLOCK_OFFSET(xd->plane[0].dqcoeff, n, 64),
- xd->diff + x_idx * 8 + y_idx * stride * 8,
- stride, tx_type);
+ xd->plane[0].diff + offset, stride, tx_type);
}
}
}
@@ -99,16 +97,15 @@ void vp9_inverse_transform_sby_4x4(MACROBLOCKD *xd, BLOCK_SIZE_TYPE bsize) {
for (n = 0; n < bw * bh; n++) {
const int x_idx = n & (bw - 1), y_idx = n >> bwl;
const TX_TYPE tx_type = get_tx_type_4x4(xd, y_idx * bstride + x_idx);
+ const int offset = x_idx * 4 + y_idx * 4 * stride;
if (tx_type == DCT_DCT) {
vp9_inverse_transform_b_4x4(xd, xd->plane[0].eobs[n],
BLOCK_OFFSET(xd->plane[0].dqcoeff, n, 16),
- xd->diff + x_idx * 4 + y_idx * 4 * stride,
- stride * 2);
+ xd->plane[0].diff + offset, stride * 2);
} else {
vp9_short_iht4x4(BLOCK_OFFSET(xd->plane[0].dqcoeff, n, 16),
- xd->diff + x_idx * 4 + y_idx * 4 * stride,
- stride, tx_type);
+ xd->plane[0].diff + offset, stride, tx_type);
}
}
}
@@ -116,15 +113,12 @@ void vp9_inverse_transform_sby_4x4(MACROBLOCKD *xd, BLOCK_SIZE_TYPE bsize) {
void vp9_inverse_transform_sbuv_32x32(MACROBLOCKD *xd, BLOCK_SIZE_TYPE bsize) {
assert(bsize == BLOCK_SIZE_SB64X64);
- vp9_short_idct32x32(xd->plane[1].dqcoeff,
- xd->diff + 4096, 64);
- vp9_short_idct32x32(xd->plane[2].dqcoeff,
- xd->diff + 4096 + 1024, 64);
+ vp9_short_idct32x32(xd->plane[1].dqcoeff, xd->plane[1].diff, 64);
+ vp9_short_idct32x32(xd->plane[2].dqcoeff, xd->plane[2].diff, 64);
}
void vp9_inverse_transform_sbuv_16x16(MACROBLOCKD *xd, BLOCK_SIZE_TYPE bsize) {
const int bwl = mb_width_log2(bsize), bhl = mb_height_log2(bsize);
- const int uoff = (16 * 16) << (bwl + bhl), voff = (uoff * 5) >> 2;
const int bw = 1 << (bwl - 1), bh = 1 << (bhl - 1);
const int stride = 16 << (bwl - 1);
int n;
@@ -134,15 +128,14 @@ void vp9_inverse_transform_sbuv_16x16(MACROBLOCKD *xd, BLOCK_SIZE_TYPE bsize) {
const int off = x_idx * 16 + y_idx * stride * 16;
vp9_inverse_transform_b_16x16(BLOCK_OFFSET(xd->plane[1].dqcoeff, n, 256),
- xd->diff + uoff + off, stride * 2);
+ xd->plane[1].diff + off, stride * 2);
vp9_inverse_transform_b_16x16(BLOCK_OFFSET(xd->plane[2].dqcoeff, n, 256),
- xd->diff + voff + off, stride * 2);
+ xd->plane[2].diff + off, stride * 2);
}
}
void vp9_inverse_transform_sbuv_8x8(MACROBLOCKD *xd, BLOCK_SIZE_TYPE bsize) {
const int bwl = mb_width_log2(bsize) + 1, bhl = mb_height_log2(bsize) + 1;
- const int uoff = (8 * 8) << (bwl + bhl), voff = (uoff * 5) >> 2;
const int bw = 1 << (bwl - 1), bh = 1 << (bhl - 1);
const int stride = 8 << (bwl - 1);
int n;
@@ -152,15 +145,14 @@ void vp9_inverse_transform_sbuv_8x8(MACROBLOCKD *xd, BLOCK_SIZE_TYPE bsize) {
const int off = x_idx * 8 + y_idx * stride * 8;
vp9_inverse_transform_b_8x8(BLOCK_OFFSET(xd->plane[1].dqcoeff, n, 64),
- xd->diff + uoff + off, stride * 2);
+ xd->plane[1].diff + off, stride * 2);
vp9_inverse_transform_b_8x8(BLOCK_OFFSET(xd->plane[2].dqcoeff, n, 64),
- xd->diff + voff + off, stride * 2);
+ xd->plane[2].diff + off, stride * 2);
}
}
void vp9_inverse_transform_sbuv_4x4(MACROBLOCKD *xd, BLOCK_SIZE_TYPE bsize) {
const int bwl = mb_width_log2(bsize) + 2, bhl = mb_height_log2(bsize) + 2;
- const int uoff = (4 * 4) << (bwl + bhl), voff = (uoff * 5) >> 2;
const int bw = 1 << (bwl - 1), bh = 1 << (bhl - 1);
const int stride = 4 << (bwl - 1);
int n;
@@ -171,9 +163,9 @@ void vp9_inverse_transform_sbuv_4x4(MACROBLOCKD *xd, BLOCK_SIZE_TYPE bsize) {
vp9_inverse_transform_b_4x4(xd, xd->plane[1].eobs[n],
BLOCK_OFFSET(xd->plane[1].dqcoeff, n, 16),
- xd->diff + uoff + off, stride * 2);
+ xd->plane[1].diff + off, stride * 2);
vp9_inverse_transform_b_4x4(xd, xd->plane[2].eobs[n],
BLOCK_OFFSET(xd->plane[2].dqcoeff, n, 16),
- xd->diff + voff + off, stride * 2);
+ xd->plane[2].diff + off, stride * 2);
}
}
diff --git a/vp9/common/vp9_mbpitch.c b/vp9/common/vp9_mbpitch.c
index 6ed5f27d9..d5f104d4d 100644
--- a/vp9/common/vp9_mbpitch.c
+++ b/vp9/common/vp9_mbpitch.c
@@ -36,32 +36,32 @@ static void setup_macroblock(MACROBLOCKD *mb, BLOCKSET bs) {
int i, stride;
if (bs == DEST) {
- y = &mb->dst.y_buffer;
- u = &mb->dst.u_buffer;
- v = &mb->dst.v_buffer;
+ y = &mb->plane[0].dst.buf;
+ u = &mb->plane[1].dst.buf;
+ v = &mb->plane[2].dst.buf;
y2 = NULL;
u2 = NULL;
v2 = NULL;
} else {
- y = &mb->pre.y_buffer;
- u = &mb->pre.u_buffer;
- v = &mb->pre.v_buffer;
+ y = &mb->plane[0].pre[0].buf;
+ u = &mb->plane[1].pre[0].buf;
+ v = &mb->plane[2].pre[0].buf;
- y2 = &mb->second_pre.y_buffer;
- u2 = &mb->second_pre.u_buffer;
- v2 = &mb->second_pre.v_buffer;
+ y2 = &mb->plane[0].pre[1].buf;
+ u2 = &mb->plane[1].pre[1].buf;
+ v2 = &mb->plane[2].pre[1].buf;
}
// luma
- stride = mb->dst.y_stride;
+ stride = mb->plane[0].dst.stride;
for (i = 0; i < 16; ++i) {
const int offset = (i >> 2) * 4 * stride + (i & 3) * 4;
setup_block(&blockd[i], y, y2, stride, offset, bs);
}
// chroma
- stride = mb->dst.uv_stride;
+ stride = mb->plane[1].dst.stride;
for (i = 16; i < 20; i++) {
const int offset = ((i - 16) >> 1) * 4 * stride + (i & 1) * 4;
setup_block(&blockd[i], u, u2, stride, offset, bs);
@@ -77,23 +77,23 @@ void vp9_setup_block_dptrs(MACROBLOCKD *mb) {
for (c = 0; c < 4; c++) {
const int to = r * 4 + c;
const int from = r * 4 * 16 + c * 4;
- blockd[to].diff = &mb->diff[from];
+ blockd[to].diff = &mb->plane[0].diff[from];
}
}
for (r = 0; r < 2; r++) {
for (c = 0; c < 2; c++) {
const int to = 16 + r * 2 + c;
- const int from = 256 + r * 4 * 8 + c * 4;
- blockd[to].diff = &mb->diff[from];
+ const int from = r * 4 * 8 + c * 4;
+ blockd[to].diff = &mb->plane[1].diff[from];
}
}
for (r = 0; r < 2; r++) {
for (c = 0; c < 2; c++) {
const int to = 20 + r * 2 + c;
- const int from = 320 + r * 4 * 8 + c * 4;
- blockd[to].diff = &mb->diff[from];
+ const int from = r * 4 * 8 + c * 4;
+ blockd[to].diff = &mb->plane[2].diff[from];
}
}
diff --git a/vp9/common/vp9_mvref_common.c b/vp9/common/vp9_mvref_common.c
index 666197366..fa4158f84 100644
--- a/vp9/common/vp9_mvref_common.c
+++ b/vp9/common/vp9_mvref_common.c
@@ -17,28 +17,16 @@ static int mb_mv_ref_search[MVREF_NEIGHBOURS][2] = {
{-2, 0}, {-1, -2}, {-2, -1}, {-2, -2}
};
-static int mb_ref_distance_weight[MVREF_NEIGHBOURS] =
- { 3, 3, 2, 1, 1, 1, 1, 1 };
-
static int sb_mv_ref_search[MVREF_NEIGHBOURS][2] = {
{0, -1}, {-1, 0}, {1, -1}, {-1, 1},
{-1, -1}, {0, -2}, {-2, 0}, {-1, -2}
};
-static int sb_ref_distance_weight[MVREF_NEIGHBOURS] =
- { 3, 3, 2, 2, 2, 1, 1, 1 };
-
-
-
static int sb64_mv_ref_search[MVREF_NEIGHBOURS][2] = {
{0, -1}, {-1, 0}, {1, -1}, {-1, 1},
{2, -1}, {-1, 2}, {3, -1}, {-1,-1}
};
-static int sb64_ref_distance_weight[MVREF_NEIGHBOURS] =
- { 1, 1, 1, 1, 1, 1, 1, 1 };
-
-
// clamp_mv_ref
#define MV_BORDER (16 << 3) // Allow 16 pels in 1/8th pel units
@@ -138,102 +126,25 @@ static void scale_mv(MACROBLOCKD *xd, MV_REFERENCE_FRAME this_ref_frame,
*/
}
-/*
-// Adds a new candidate reference vector to the sorted list.
-// If it is a repeat the weight of the existing entry is increased
-// and the order of the list is resorted.
-// This method of add plus sort has been deprecated for now as there is a
-// further sort of the best candidates in vp9_find_best_ref_mvs() and the
-// incremental benefit of both is small. If the decision is made to remove
-// the sort in vp9_find_best_ref_mvs() for performance reasons then it may be
-// worth re-instating some sort of list reordering by weight here.
-//
-static void addmv_and_shuffle(
- int_mv *mv_list,
- int *mv_scores,
- int *refmv_count,
- int_mv candidate_mv,
- int weight
-) {
-
- int i;
- int insert_point;
- int duplicate_found = 0;
-
- // Check for duplicates. If there is one increase its score.
- // We only compare vs the current top candidates.
- insert_point = (*refmv_count < (MAX_MV_REF_CANDIDATES - 1))
- ? *refmv_count : (MAX_MV_REF_CANDIDATES - 1);
-
- i = insert_point;
- if (*refmv_count > i)
- i++;
- while (i > 0) {
- i--;
- if (candidate_mv.as_int == mv_list[i].as_int) {
- duplicate_found = 1;
- mv_scores[i] += weight;
- break;
- }
- }
-
- // If no duplicate and the new candidate is good enough then add it.
- if (!duplicate_found ) {
- if (weight > mv_scores[insert_point]) {
- mv_list[insert_point].as_int = candidate_mv.as_int;
- mv_scores[insert_point] = weight;
- i = insert_point;
- }
- (*refmv_count)++;
- }
-
- // Reshuffle the list so that highest scoring mvs at the top.
- while (i > 0) {
- if (mv_scores[i] > mv_scores[i-1]) {
- int tmp_score = mv_scores[i-1];
- int_mv tmp_mv = mv_list[i-1];
-
- mv_scores[i-1] = mv_scores[i];
- mv_list[i-1] = mv_list[i];
- mv_scores[i] = tmp_score;
- mv_list[i] = tmp_mv;
- i--;
- } else
- break;
- }
-}
-*/
-
-// Adds a new candidate reference vector to the list.
-// The mv is thrown out if it is already in the list.
-// Unlike the addmv_and_shuffle() this does not reorder the list
-// but assumes that candidates are added in the order most likely to
-// match distance and reference frame bias.
+// Add a candidate mv.
+// Discard if it has already been seen.
static void add_candidate_mv(int_mv *mv_list, int *mv_scores,
int *candidate_count, int_mv candidate_mv,
int weight) {
- int i;
-
- // Make sure we dont insert off the end of the list
- const int insert_point = MIN(*candidate_count, MAX_MV_REF_CANDIDATES - 1);
-
- // Look for duplicates
- for (i = 0; i <= insert_point; ++i) {
- if (candidate_mv.as_int == mv_list[i].as_int)
- break;
- }
-
- // Add the candidate. If the list is already full it is only desirable that
- // it should overwrite if it has a higher weight than the last entry.
- if (i >= insert_point && weight > mv_scores[insert_point]) {
- mv_list[insert_point].as_int = candidate_mv.as_int;
- mv_scores[insert_point] = weight;
- *candidate_count += (*candidate_count < MAX_MV_REF_CANDIDATES);
+ if (*candidate_count == 0) {
+ mv_list[0].as_int = candidate_mv.as_int;
+ mv_scores[0] = weight;
+ *candidate_count += 1;
+ } else if ((*candidate_count == 1) &&
+ (candidate_mv.as_int != mv_list[0].as_int)) {
+ mv_list[1].as_int = candidate_mv.as_int;
+ mv_scores[1] = weight;
+ *candidate_count += 1;
}
}
-// This function searches the neighbourhood of a given MB/SB and populates a
-// list of candidate reference vectors.
+// This function searches the neighbourhood of a given MB/SB
+// to try and find candidate reference vectors.
//
void vp9_find_mv_refs(VP9_COMMON *cm, MACROBLOCKD *xd, MODE_INFO *here,
MODE_INFO *lf_here, MV_REFERENCE_FRAME ref_frame,
@@ -241,7 +152,6 @@ void vp9_find_mv_refs(VP9_COMMON *cm, MACROBLOCKD *xd, MODE_INFO *here,
int i;
MODE_INFO *candidate_mi;
MB_MODE_INFO * mbmi = &xd->mode_info_context->mbmi;
- int_mv candidate_mvs[MAX_MV_REF_CANDIDATES];
int_mv c_refmv;
int_mv c2_refmv;
MV_REFERENCE_FRAME c_ref_frame;
@@ -250,24 +160,17 @@ void vp9_find_mv_refs(VP9_COMMON *cm, MACROBLOCKD *xd, MODE_INFO *here,
int refmv_count = 0;
int split_count = 0;
int (*mv_ref_search)[2];
- int *ref_distance_weight;
- int zero_seen = 0;
const int mb_col = (-xd->mb_to_left_edge) >> 7;
-
// Blank the reference vector lists and other local structures.
vpx_memset(mv_ref_list, 0, sizeof(int_mv) * MAX_MV_REF_CANDIDATES);
- vpx_memset(candidate_mvs, 0, sizeof(int_mv) * MAX_MV_REF_CANDIDATES);
vpx_memset(candidate_scores, 0, sizeof(candidate_scores));
if (mbmi->sb_type == BLOCK_SIZE_SB64X64) {
mv_ref_search = sb64_mv_ref_search;
- ref_distance_weight = sb64_ref_distance_weight;
} else if (mbmi->sb_type >= BLOCK_SIZE_SB32X32) {
mv_ref_search = sb_mv_ref_search;
- ref_distance_weight = sb_ref_distance_weight;
} else {
mv_ref_search = mb_mv_ref_search;
- ref_distance_weight = mb_ref_distance_weight;
}
// We first scan for candidate vectors that match the current reference frame
@@ -283,23 +186,16 @@ void vp9_find_mv_refs(VP9_COMMON *cm, MACROBLOCKD *xd, MODE_INFO *here,
(mv_ref_search[i][1] * xd->mode_info_stride);
if (get_matching_candidate(candidate_mi, ref_frame, &c_refmv)) {
- add_candidate_mv(candidate_mvs, candidate_scores,
- &refmv_count, c_refmv, ref_distance_weight[i] + 16);
+ add_candidate_mv(mv_ref_list, candidate_scores,
+ &refmv_count, c_refmv, 16);
}
split_count += (candidate_mi->mbmi.mode == SPLITMV);
}
}
- // Look in the last frame if it exists
- if (lf_here) {
- candidate_mi = lf_here;
- if (get_matching_candidate(candidate_mi, ref_frame, &c_refmv)) {
- add_candidate_mv(candidate_mvs, candidate_scores,
- &refmv_count, c_refmv, 18);
- }
- }
+
// More distant neigbours
for (i = 2; (i < MVREF_NEIGHBOURS) &&
- (refmv_count < (MAX_MV_REF_CANDIDATES - 1)); ++i) {
+ (refmv_count < MAX_MV_REF_CANDIDATES); ++i) {
const int mb_search_col = mb_col + mv_ref_search[i][0];
if ((mb_search_col >= cm->cur_tile_mb_col_start) &&
@@ -309,51 +205,55 @@ void vp9_find_mv_refs(VP9_COMMON *cm, MACROBLOCKD *xd, MODE_INFO *here,
(mv_ref_search[i][1] * xd->mode_info_stride);
if (get_matching_candidate(candidate_mi, ref_frame, &c_refmv)) {
- add_candidate_mv(candidate_mvs, candidate_scores,
- &refmv_count, c_refmv, ref_distance_weight[i] + 16);
+ add_candidate_mv(mv_ref_list, candidate_scores,
+ &refmv_count, c_refmv, 16);
}
}
}
+ // Look in the last frame if it exists
+ if (lf_here && (refmv_count < MAX_MV_REF_CANDIDATES)) {
+ candidate_mi = lf_here;
+ if (get_matching_candidate(candidate_mi, ref_frame, &c_refmv)) {
+ add_candidate_mv(mv_ref_list, candidate_scores,
+ &refmv_count, c_refmv, 16);
+ }
+ }
+
// If we have not found enough candidates consider ones where the
// reference frame does not match. Break out when we have
// MAX_MV_REF_CANDIDATES candidates.
// Look first at spatial neighbours
- if (refmv_count < (MAX_MV_REF_CANDIDATES - 1)) {
- for (i = 0; i < MVREF_NEIGHBOURS; ++i) {
- const int mb_search_col = mb_col + mv_ref_search[i][0];
-
- if ((mb_search_col >= cm->cur_tile_mb_col_start) &&
- (mb_search_col < cm->cur_tile_mb_col_end) &&
- ((mv_ref_search[i][1] << 7) >= xd->mb_to_top_edge)) {
-
- candidate_mi = here + mv_ref_search[i][0] +
- (mv_ref_search[i][1] * xd->mode_info_stride);
-
- get_non_matching_candidates(candidate_mi, ref_frame,
- &c_ref_frame, &c_refmv,
- &c2_ref_frame, &c2_refmv);
-
- if (c_ref_frame != INTRA_FRAME) {
- scale_mv(xd, ref_frame, c_ref_frame, &c_refmv, ref_sign_bias);
- add_candidate_mv(candidate_mvs, candidate_scores,
- &refmv_count, c_refmv, ref_distance_weight[i]);
- }
-
- if (c2_ref_frame != INTRA_FRAME) {
- scale_mv(xd, ref_frame, c2_ref_frame, &c2_refmv, ref_sign_bias);
- add_candidate_mv(candidate_mvs, candidate_scores,
- &refmv_count, c2_refmv, ref_distance_weight[i]);
- }
+ for (i = 0; (i < MVREF_NEIGHBOURS) &&
+ (refmv_count < MAX_MV_REF_CANDIDATES); ++i) {
+ const int mb_search_col = mb_col + mv_ref_search[i][0];
+
+ if ((mb_search_col >= cm->cur_tile_mb_col_start) &&
+ (mb_search_col < cm->cur_tile_mb_col_end) &&
+ ((mv_ref_search[i][1] << 7) >= xd->mb_to_top_edge)) {
+ candidate_mi = here + mv_ref_search[i][0] +
+ (mv_ref_search[i][1] * xd->mode_info_stride);
+
+ get_non_matching_candidates(candidate_mi, ref_frame,
+ &c_ref_frame, &c_refmv,
+ &c2_ref_frame, &c2_refmv);
+
+ if (c_ref_frame != INTRA_FRAME) {
+ scale_mv(xd, ref_frame, c_ref_frame, &c_refmv, ref_sign_bias);
+ add_candidate_mv(mv_ref_list, candidate_scores,
+ &refmv_count, c_refmv, 1);
}
- if (refmv_count >= (MAX_MV_REF_CANDIDATES - 1)) {
- break;
+ if (c2_ref_frame != INTRA_FRAME) {
+ scale_mv(xd, ref_frame, c2_ref_frame, &c2_refmv, ref_sign_bias);
+ add_candidate_mv(mv_ref_list, candidate_scores,
+ &refmv_count, c2_refmv, 1);
}
}
}
+
// Look at the last frame if it exists
- if (refmv_count < (MAX_MV_REF_CANDIDATES - 1) && lf_here) {
+ if (lf_here && (refmv_count < MAX_MV_REF_CANDIDATES)) {
candidate_mi = lf_here;
get_non_matching_candidates(candidate_mi, ref_frame,
&c_ref_frame, &c_refmv,
@@ -361,20 +261,20 @@ void vp9_find_mv_refs(VP9_COMMON *cm, MACROBLOCKD *xd, MODE_INFO *here,
if (c_ref_frame != INTRA_FRAME) {
scale_mv(xd, ref_frame, c_ref_frame, &c_refmv, ref_sign_bias);
- add_candidate_mv(candidate_mvs, candidate_scores,
- &refmv_count, c_refmv, 2);
+ add_candidate_mv(mv_ref_list, candidate_scores,
+ &refmv_count, c_refmv, 1);
}
if (c2_ref_frame != INTRA_FRAME) {
scale_mv(xd, ref_frame, c2_ref_frame, &c2_refmv, ref_sign_bias);
- add_candidate_mv(candidate_mvs, candidate_scores,
- &refmv_count, c2_refmv, 2);
+ add_candidate_mv(mv_ref_list, candidate_scores,
+ &refmv_count, c2_refmv, 1);
}
}
// Define inter mode coding context.
// 0,0 was best
- if (candidate_mvs[0].as_int == 0) {
+ if (mv_ref_list[0].as_int == 0) {
// 0,0 is only candidate
if (refmv_count <= 1) {
mbmi->mb_mode_context[ref_frame] = 0;
@@ -392,18 +292,8 @@ void vp9_find_mv_refs(VP9_COMMON *cm, MACROBLOCKD *xd, MODE_INFO *here,
mbmi->mb_mode_context[ref_frame] = candidate_scores[0] >= 16 ? 5 : 6;
}
- // Scan for 0,0 case and clamp non zero choices
+ // Clamp vectors
for (i = 0; i < MAX_MV_REF_CANDIDATES; ++i) {
- if (candidate_mvs[i].as_int == 0) {
- zero_seen = 1;
- } else {
- clamp_mv_ref(xd, &candidate_mvs[i]);
- }
+ clamp_mv_ref(xd, &mv_ref_list[i]);
}
- // 0,0 is always a valid reference. Add it if not already seen.
- if (!zero_seen)
- candidate_mvs[MAX_MV_REF_CANDIDATES-1].as_int = 0;
-
- // Copy over the candidate list.
- vpx_memcpy(mv_ref_list, candidate_mvs, sizeof(candidate_mvs));
}
diff --git a/vp9/common/vp9_onyxc_int.h b/vp9/common/vp9_onyxc_int.h
index 13ec8657f..8f957345d 100644
--- a/vp9/common/vp9_onyxc_int.h
+++ b/vp9/common/vp9_onyxc_int.h
@@ -74,17 +74,11 @@ typedef struct frame_contexts {
vp9_coeff_probs coef_probs_8x8[BLOCK_TYPES];
vp9_coeff_probs coef_probs_16x16[BLOCK_TYPES];
vp9_coeff_probs coef_probs_32x32[BLOCK_TYPES];
-#if CONFIG_CODE_NONZEROCOUNT
- vp9_prob nzc_probs_4x4[MAX_NZC_CONTEXTS][REF_TYPES][BLOCK_TYPES]
- [NZC4X4_NODES];
- vp9_prob nzc_probs_8x8[MAX_NZC_CONTEXTS][REF_TYPES][BLOCK_TYPES]
- [NZC8X8_NODES];
- vp9_prob nzc_probs_16x16[MAX_NZC_CONTEXTS][REF_TYPES][BLOCK_TYPES]
- [NZC16X16_NODES];
- vp9_prob nzc_probs_32x32[MAX_NZC_CONTEXTS][REF_TYPES][BLOCK_TYPES]
- [NZC32X32_NODES];
- vp9_prob nzc_pcat_probs[MAX_NZC_CONTEXTS]
- [NZC_TOKENS_EXTRA][NZC_BITS_EXTRA];
+#if CONFIG_CODE_ZEROGROUP
+ vp9_zpc_probs zpc_probs_4x4;
+ vp9_zpc_probs zpc_probs_8x8;
+ vp9_zpc_probs zpc_probs_16x16;
+ vp9_zpc_probs zpc_probs_32x32;
#endif
nmv_context nmvc;
@@ -110,17 +104,11 @@ typedef struct frame_contexts {
vp9_coeff_probs pre_coef_probs_8x8[BLOCK_TYPES];
vp9_coeff_probs pre_coef_probs_16x16[BLOCK_TYPES];
vp9_coeff_probs pre_coef_probs_32x32[BLOCK_TYPES];
-#if CONFIG_CODE_NONZEROCOUNT
- vp9_prob pre_nzc_probs_4x4[MAX_NZC_CONTEXTS][REF_TYPES][BLOCK_TYPES]
- [NZC4X4_NODES];
- vp9_prob pre_nzc_probs_8x8[MAX_NZC_CONTEXTS][REF_TYPES][BLOCK_TYPES]
- [NZC8X8_NODES];
- vp9_prob pre_nzc_probs_16x16[MAX_NZC_CONTEXTS][REF_TYPES][BLOCK_TYPES]
- [NZC16X16_NODES];
- vp9_prob pre_nzc_probs_32x32[MAX_NZC_CONTEXTS][REF_TYPES][BLOCK_TYPES]
- [NZC32X32_NODES];
- vp9_prob pre_nzc_pcat_probs[MAX_NZC_CONTEXTS]
- [NZC_TOKENS_EXTRA][NZC_BITS_EXTRA];
+#if CONFIG_CODE_ZEROGROUP
+ vp9_zpc_probs pre_zpc_probs_4x4;
+ vp9_zpc_probs pre_zpc_probs_8x8;
+ vp9_zpc_probs pre_zpc_probs_16x16;
+ vp9_zpc_probs pre_zpc_probs_32x32;
#endif
vp9_coeff_count coef_counts_4x4[BLOCK_TYPES];
@@ -130,17 +118,11 @@ typedef struct frame_contexts {
unsigned int eob_branch_counts[TX_SIZE_MAX_SB][BLOCK_TYPES][REF_TYPES]
[COEF_BANDS][PREV_COEF_CONTEXTS];
-#if CONFIG_CODE_NONZEROCOUNT
- unsigned int nzc_counts_4x4[MAX_NZC_CONTEXTS][REF_TYPES][BLOCK_TYPES]
- [NZC4X4_TOKENS];
- unsigned int nzc_counts_8x8[MAX_NZC_CONTEXTS][REF_TYPES][BLOCK_TYPES]
- [NZC8X8_TOKENS];
- unsigned int nzc_counts_16x16[MAX_NZC_CONTEXTS][REF_TYPES][BLOCK_TYPES]
- [NZC16X16_TOKENS];
- unsigned int nzc_counts_32x32[MAX_NZC_CONTEXTS][REF_TYPES][BLOCK_TYPES]
- [NZC32X32_TOKENS];
- unsigned int nzc_pcat_counts[MAX_NZC_CONTEXTS]
- [NZC_TOKENS_EXTRA][NZC_BITS_EXTRA][2];
+#if CONFIG_CODE_ZEROGROUP
+ vp9_zpc_count zpc_counts_4x4;
+ vp9_zpc_count zpc_counts_8x8;
+ vp9_zpc_count zpc_counts_16x16;
+ vp9_zpc_count zpc_counts_32x32;
#endif
nmv_context_counts NMVcount;
@@ -377,4 +359,8 @@ static int get_mb_row(const MACROBLOCKD *xd) {
static int get_mb_col(const MACROBLOCKD *xd) {
return ((-xd->mb_to_left_edge) >> 7);
}
+
+static int get_token_alloc(int mb_rows, int mb_cols) {
+ return mb_rows * mb_cols * (24 * 16 + 4);
+}
#endif // VP9_COMMON_VP9_ONYXC_INT_H_
diff --git a/vp9/common/vp9_pred_common.c b/vp9/common/vp9_pred_common.c
index ffdfa6782..e110cff44 100644
--- a/vp9/common/vp9_pred_common.c
+++ b/vp9/common/vp9_pred_common.c
@@ -9,6 +9,8 @@
* be found in the AUTHORS file in the root of the source tree.
*/
+#include <limits.h>
+
#include "vp9/common/vp9_common.h"
#include "vp9/common/vp9_pred_common.h"
#include "vp9/common/vp9_seg_common.h"
@@ -225,30 +227,26 @@ void vp9_set_pred_flag(MACROBLOCKD *const xd,
// peredict various bitstream signals.
// Macroblock segment id prediction function
-unsigned char vp9_get_pred_mb_segid(const VP9_COMMON *const cm,
- const MACROBLOCKD *const xd, int MbIndex) {
- // Currently the prediction for the macroblock segment ID is
- // the value stored for this macroblock in the previous frame.
- if (!xd->mode_info_context->mbmi.sb_type) {
- return cm->last_frame_seg_map[MbIndex];
- } else {
- BLOCK_SIZE_TYPE bsize = xd->mode_info_context->mbmi.sb_type;
- const int bh = 1 << mb_height_log2(bsize);
- const int bw = 1 << mb_width_log2(bsize);
- const int mb_col = MbIndex % cm->mb_cols;
- const int mb_row = MbIndex / cm->mb_cols;
- const int x_mbs = MIN(bw, cm->mb_cols - mb_col);
- const int y_mbs = MIN(bh, cm->mb_rows - mb_row);
+int vp9_get_pred_mb_segid(VP9_COMMON *cm, BLOCK_SIZE_TYPE sb_type,
+ int mb_row, int mb_col) {
+ const int mb_index = mb_row * cm->mb_cols + mb_col;
+ if (sb_type) {
+ const int bw = 1 << mb_width_log2(sb_type);
+ const int bh = 1 << mb_height_log2(sb_type);
+ const int ymbs = MIN(cm->mb_rows - mb_row, bh);
+ const int xmbs = MIN(cm->mb_cols - mb_col, bw);
+ int segment_id = INT_MAX;
int x, y;
- unsigned seg_id = -1;
- for (y = mb_row; y < mb_row + y_mbs; y++) {
- for (x = mb_col; x < mb_col + x_mbs; x++) {
- seg_id = MIN(seg_id, cm->last_frame_seg_map[cm->mb_cols * y + x]);
+ for (y = 0; y < ymbs; y++) {
+ for (x = 0; x < xmbs; x++) {
+ const int index = mb_index + (y * cm->mb_cols + x);
+ segment_id = MIN(segment_id, cm->last_frame_seg_map[index]);
}
}
-
- return seg_id;
+ return segment_id;
+ } else {
+ return cm->last_frame_seg_map[mb_index];
}
}
diff --git a/vp9/common/vp9_pred_common.h b/vp9/common/vp9_pred_common.h
index 49dcf0a4c..222d5f3d0 100644
--- a/vp9/common/vp9_pred_common.h
+++ b/vp9/common/vp9_pred_common.h
@@ -43,9 +43,8 @@ void vp9_set_pred_flag(MACROBLOCKD *const xd,
unsigned char pred_flag);
-unsigned char vp9_get_pred_mb_segid(const VP9_COMMON *const cm,
- const MACROBLOCKD *const xd,
- int MbIndex);
+int vp9_get_pred_mb_segid(VP9_COMMON *cm, BLOCK_SIZE_TYPE sb_type,
+ int mb_row, int mb_col);
MV_REFERENCE_FRAME vp9_get_pred_ref(const VP9_COMMON *const cm,
const MACROBLOCKD *const xd);
diff --git a/vp9/common/vp9_recon.c b/vp9/common/vp9_recon.c
index 121776c69..41194504b 100644
--- a/vp9/common/vp9_recon.c
+++ b/vp9/common/vp9_recon.c
@@ -14,104 +14,64 @@
#include "vp9/common/vp9_blockd.h"
static INLINE void recon(int rows, int cols,
- const uint8_t *pred_ptr, int pred_stride,
const int16_t *diff_ptr, int diff_stride,
uint8_t *dst_ptr, int dst_stride) {
int r, c;
for (r = 0; r < rows; r++) {
for (c = 0; c < cols; c++)
- dst_ptr[c] = clip_pixel(diff_ptr[c] + pred_ptr[c]);
+ dst_ptr[c] = clip_pixel(diff_ptr[c] + dst_ptr[c]);
dst_ptr += dst_stride;
diff_ptr += diff_stride;
- pred_ptr += pred_stride;
}
}
void vp9_recon_b_c(uint8_t *pred_ptr, int16_t *diff_ptr, uint8_t *dst_ptr,
int stride) {
- recon(4, 4, pred_ptr, stride, diff_ptr, 16, dst_ptr, stride);
+ assert(pred_ptr == dst_ptr);
+ recon(4, 4, diff_ptr, 16, dst_ptr, stride);
}
void vp9_recon_uv_b_c(uint8_t *pred_ptr, int16_t *diff_ptr, uint8_t *dst_ptr,
int stride) {
- recon(4, 4, pred_ptr, stride, diff_ptr, 8, dst_ptr, stride);
+ assert(pred_ptr == dst_ptr);
+ recon(4, 4, diff_ptr, 8, dst_ptr, stride);
}
void vp9_recon4b_c(uint8_t *pred_ptr, int16_t *diff_ptr, uint8_t *dst_ptr,
int stride) {
- recon(4, 16, pred_ptr, stride, diff_ptr, 16, dst_ptr, stride);
+ assert(pred_ptr == dst_ptr);
+ recon(4, 16, diff_ptr, 16, dst_ptr, stride);
}
void vp9_recon2b_c(uint8_t *pred_ptr, int16_t *diff_ptr, uint8_t *dst_ptr,
int stride) {
- recon(4, 8, pred_ptr, stride, diff_ptr, 8, dst_ptr, stride);
+ assert(pred_ptr == dst_ptr);
+ recon(4, 8, diff_ptr, 8, dst_ptr, stride);
}
-void vp9_recon_sby_s_c(MACROBLOCKD *mb, uint8_t *dst,
- BLOCK_SIZE_TYPE bsize) {
- const int bw = 16 << mb_width_log2(bsize), bh = 16 << mb_height_log2(bsize);
- int x, y;
- const int stride = mb->block[0].dst_stride;
- const int16_t *diff = mb->diff;
-
- for (y = 0; y < bh; y++) {
- for (x = 0; x < bw; x++)
- dst[x] = clip_pixel(dst[x] + diff[x]);
-
- dst += stride;
- diff += bw;
- }
+static void recon_plane(MACROBLOCKD *xd, BLOCK_SIZE_TYPE bsize, int plane) {
+ const int bw = 4 << (b_width_log2(bsize) - xd->plane[plane].subsampling_x);
+ const int bh = 4 << (b_height_log2(bsize) - xd->plane[plane].subsampling_y);
+ recon(bh, bw,
+ xd->plane[plane].diff, bw,
+ xd->plane[plane].dst.buf, xd->plane[plane].dst.stride);
}
-void vp9_recon_sbuv_s_c(MACROBLOCKD *mb, uint8_t *u_dst, uint8_t *v_dst,
- BLOCK_SIZE_TYPE bsize) {
- const int bwl = mb_width_log2(bsize), bhl = mb_height_log2(bsize);
- const int uoff = (16 * 16) << (bwl + bhl), voff = (uoff * 5) >> 2;
- const int bw = 8 << bwl, bh = 8 << bhl;
- int x, y;
- const int stride = mb->block[16].dst_stride;
- const int16_t *u_diff = mb->diff + uoff;
- const int16_t *v_diff = mb->diff + voff;
-
- for (y = 0; y < bh; y++) {
- for (x = 0; x < bw; x++) {
- u_dst[x] = clip_pixel(u_dst[x] + u_diff[x]);
- v_dst[x] = clip_pixel(v_dst[x] + v_diff[x]);
- }
-
- u_dst += stride;
- v_dst += stride;
- u_diff += bw;
- v_diff += bw;
- }
+void vp9_recon_sby_c(MACROBLOCKD *mb, BLOCK_SIZE_TYPE bsize) {
+ recon_plane(mb, bsize, 0);
}
-void vp9_recon_mby_c(MACROBLOCKD *xd) {
+void vp9_recon_sbuv_c(MACROBLOCKD *mb, BLOCK_SIZE_TYPE bsize) {
int i;
- for (i = 0; i < 16; i += 4) {
- BLOCKD *b = &xd->block[i];
-
- vp9_recon4b(*(b->base_dst) + b->dst, b->diff,
- *(b->base_dst) + b->dst, b->dst_stride);
- }
+ for (i = 1; i < MAX_MB_PLANE; i++)
+ recon_plane(mb, bsize, i);
}
-void vp9_recon_mb_c(MACROBLOCKD *xd) {
- int i;
-
- for (i = 0; i < 16; i += 4) {
- BLOCKD *b = &xd->block[i];
- vp9_recon4b(*(b->base_dst) + b->dst, b->diff,
- *(b->base_dst) + b->dst, b->dst_stride);
- }
-
- for (i = 16; i < 24; i += 2) {
- BLOCKD *b = &xd->block[i];
- vp9_recon2b(*(b->base_dst) + b->dst, b->diff,
- *(b->base_dst) + b->dst, b->dst_stride);
- }
+void vp9_recon_sb_c(MACROBLOCKD *xd, BLOCK_SIZE_TYPE bsize) {
+ vp9_recon_sby(xd, bsize);
+ vp9_recon_sbuv(xd, bsize);
}
diff --git a/vp9/common/vp9_reconinter.c b/vp9/common/vp9_reconinter.c
index 64929c1bc..2697bd25c 100644
--- a/vp9/common/vp9_reconinter.c
+++ b/vp9/common/vp9_reconinter.c
@@ -358,9 +358,6 @@ void vp9_build_inter_predictor(const uint8_t *src, int src_stride,
w, h);
}
-/* Like vp9_build_inter_predictor, but takes the full-pel part of the
- * mv separately, and the fractional part as a q4.
- */
void vp9_build_inter_predictor_q4(const uint8_t *src, int src_stride,
uint8_t *dst, int dst_stride,
const int_mv *mv_q4,
@@ -438,182 +435,231 @@ static void build_2x1_inter_predictor_wh(const BLOCKD *d0, const BLOCKD *d1,
}
}
-static void build_2x1_inter_predictor(const BLOCKD *d0, const BLOCKD *d1,
- struct scale_factors *s,
- int block_size, int stride,
- int which_mv, int weight,
- const struct subpix_fn_table *subpix,
- int row, int col) {
- uint8_t *d0_predictor = *(d0->base_dst) + d0->dst;
- uint8_t *d1_predictor = *(d1->base_dst) + d1->dst;
- struct scale_factors * scale = &s[which_mv];
- stride = d0->dst_stride;
-
- assert(d1_predictor - d0_predictor == block_size);
- assert(d1->pre == d0->pre + block_size);
-
- scale->set_scaled_offsets(scale, row, col);
-
- if (d0->bmi.as_mv[which_mv].as_int == d1->bmi.as_mv[which_mv].as_int) {
- uint8_t **base_pre = which_mv ? d0->base_second_pre : d0->base_pre;
-
- vp9_build_inter_predictor(*base_pre + d0->pre,
- d0->pre_stride,
- d0_predictor, stride,
- &d0->bmi.as_mv[which_mv],
- scale,
- 2 * block_size, block_size,
- weight, subpix);
- } else {
- uint8_t **base_pre0 = which_mv ? d0->base_second_pre : d0->base_pre;
- uint8_t **base_pre1 = which_mv ? d1->base_second_pre : d1->base_pre;
-
- vp9_build_inter_predictor(*base_pre0 + d0->pre,
- d0->pre_stride,
- d0_predictor, stride,
- &d0->bmi.as_mv[which_mv],
- scale,
- block_size, block_size,
- weight, subpix);
-
- scale->set_scaled_offsets(scale, row, col + block_size);
+#if !CONFIG_IMPLICIT_COMPOUNDINTER_WEIGHT
- vp9_build_inter_predictor(*base_pre1 + d1->pre,
- d1->pre_stride,
- d1_predictor, stride,
- &d1->bmi.as_mv[which_mv],
- scale,
- block_size, block_size,
- weight, subpix);
- }
+static INLINE int round_mv_comp_q4(int value) {
+ return (value < 0 ? value - 2 : value + 2) / 4;
}
-static void clamp_mv_to_umv_border(MV *mv, const MACROBLOCKD *xd) {
- /* If the MV points so far into the UMV border that no visible pixels
- * are used for reconstruction, the subpel part of the MV can be
- * discarded and the MV limited to 16 pixels with equivalent results.
- *
- * This limit kicks in at 19 pixels for the top and left edges, for
- * the 16 pixels plus 3 taps right of the central pixel when subpel
- * filtering. The bottom and right edges use 16 pixels plus 2 pixels
- * left of the central pixel when filtering.
- */
- if (mv->col < (xd->mb_to_left_edge - ((16 + VP9_INTERP_EXTEND) << 3)))
- mv->col = xd->mb_to_left_edge - (16 << 3);
- else if (mv->col > xd->mb_to_right_edge + ((15 + VP9_INTERP_EXTEND) << 3))
- mv->col = xd->mb_to_right_edge + (16 << 3);
-
- if (mv->row < (xd->mb_to_top_edge - ((16 + VP9_INTERP_EXTEND) << 3)))
- mv->row = xd->mb_to_top_edge - (16 << 3);
- else if (mv->row > xd->mb_to_bottom_edge + ((15 + VP9_INTERP_EXTEND) << 3))
- mv->row = xd->mb_to_bottom_edge + (16 << 3);
+static int mi_mv_pred_row_q4(MACROBLOCKD *mb, int off, int idx) {
+ const int temp = mb->mode_info_context->bmi[off + 0].as_mv[idx].as_mv.row +
+ mb->mode_info_context->bmi[off + 1].as_mv[idx].as_mv.row +
+ mb->mode_info_context->bmi[off + 4].as_mv[idx].as_mv.row +
+ mb->mode_info_context->bmi[off + 5].as_mv[idx].as_mv.row;
+ return round_mv_comp_q4(temp);
}
-/* A version of the above function for chroma block MVs.*/
-static void clamp_uvmv_to_umv_border(MV *mv, const MACROBLOCKD *xd) {
- const int extend = VP9_INTERP_EXTEND;
-
- mv->col = (2 * mv->col < (xd->mb_to_left_edge - ((16 + extend) << 3))) ?
- (xd->mb_to_left_edge - (16 << 3)) >> 1 : mv->col;
- mv->col = (2 * mv->col > xd->mb_to_right_edge + ((15 + extend) << 3)) ?
- (xd->mb_to_right_edge + (16 << 3)) >> 1 : mv->col;
-
- mv->row = (2 * mv->row < (xd->mb_to_top_edge - ((16 + extend) << 3))) ?
- (xd->mb_to_top_edge - (16 << 3)) >> 1 : mv->row;
- mv->row = (2 * mv->row > xd->mb_to_bottom_edge + ((15 + extend) << 3)) ?
- (xd->mb_to_bottom_edge + (16 << 3)) >> 1 : mv->row;
+static int mi_mv_pred_col_q4(MACROBLOCKD *mb, int off, int idx) {
+ const int temp = mb->mode_info_context->bmi[off + 0].as_mv[idx].as_mv.col +
+ mb->mode_info_context->bmi[off + 1].as_mv[idx].as_mv.col +
+ mb->mode_info_context->bmi[off + 4].as_mv[idx].as_mv.col +
+ mb->mode_info_context->bmi[off + 5].as_mv[idx].as_mv.col;
+ return round_mv_comp_q4(temp);
}
-#if !CONFIG_IMPLICIT_COMPOUNDINTER_WEIGHT
// TODO(jkoleszar): yet another mv clamping function :-(
MV clamp_mv_to_umv_border_sb(const MV *src_mv,
- int bwl, int bhl,
+ int bwl, int bhl, int ss_x, int ss_y,
int mb_to_left_edge, int mb_to_top_edge,
int mb_to_right_edge, int mb_to_bottom_edge) {
/* If the MV points so far into the UMV border that no visible pixels
* are used for reconstruction, the subpel part of the MV can be
* discarded and the MV limited to 16 pixels with equivalent results.
*/
- const int epel_left = (VP9_INTERP_EXTEND + (4 << bwl)) << 3;
- const int epel_right = epel_left - (1 << 3);
- const int epel_top = (VP9_INTERP_EXTEND + (4 << bhl)) << 3;
- const int epel_bottom = epel_top - (1 << 3);
+ const int spel_left = (VP9_INTERP_EXTEND + (4 << bwl)) << 4;
+ const int spel_right = spel_left - (1 << 4);
+ const int spel_top = (VP9_INTERP_EXTEND + (4 << bhl)) << 4;
+ const int spel_bottom = spel_top - (1 << 4);
MV clamped_mv;
- clamped_mv.col = clamp(src_mv->col,
- mb_to_left_edge - epel_left,
- mb_to_right_edge + epel_right);
- clamped_mv.row = clamp(src_mv->row,
- mb_to_top_edge - epel_top,
- mb_to_bottom_edge + epel_bottom);
+
+ assert(ss_x <= 1);
+ assert(ss_y <= 1);
+ clamped_mv.col = clamp(src_mv->col << (1 - ss_x),
+ (mb_to_left_edge << (1 - ss_x)) - spel_left,
+ (mb_to_right_edge << (1 - ss_x)) + spel_right);
+ clamped_mv.row = clamp(src_mv->row << (1 - ss_y),
+ (mb_to_top_edge << (1 - ss_y)) - spel_top,
+ (mb_to_bottom_edge << (1 - ss_y)) + spel_bottom);
return clamped_mv;
}
+// TODO(jkoleszar): In principle, nothing has to depend on this, but it's
+// currently required. Some users look at the mi->bmi, some look at the
+// xd->bmi.
+static void duplicate_splitmv_bmi(MACROBLOCKD *xd) {
+ int i;
+
+ for (i = 0; i < 16; i += 2) {
+ xd->block[i + 0].bmi = xd->mode_info_context->bmi[i + 0];
+ xd->block[i + 1].bmi = xd->mode_info_context->bmi[i + 1];
+ }
+}
+
struct build_inter_predictors_args {
MACROBLOCKD *xd;
- uint8_t* dst[MAX_MB_PLANE];
- int dst_stride[MAX_MB_PLANE];
int x;
int y;
+ uint8_t* dst[MAX_MB_PLANE];
+ int dst_stride[MAX_MB_PLANE];
+ uint8_t* pre[2][MAX_MB_PLANE];
+ int pre_stride[2][MAX_MB_PLANE];
};
static void build_inter_predictors(int plane, int block,
BLOCK_SIZE_TYPE bsize,
int pred_w, int pred_h,
void *argv) {
const struct build_inter_predictors_args* const arg = argv;
- const int bwl = pred_w, bw = 4 << bwl;
- const int bhl = pred_h, bh = 4 << bhl;
+ MACROBLOCKD * const xd = arg->xd;
+ const int bwl = b_width_log2(bsize) - xd->plane[plane].subsampling_x;
+ const int bhl = b_height_log2(bsize) - xd->plane[plane].subsampling_y;
+ const int bh = 4 << bhl, bw = 4 << bwl;
const int x_idx = block & ((1 << bwl) - 1), y_idx = block >> bwl;
const int x = x_idx * 4, y = y_idx * 4;
- MACROBLOCKD * const xd = arg->xd;
const int use_second_ref = xd->mode_info_context->mbmi.second_ref_frame > 0;
int which_mv;
+ assert(x < bw);
+ assert(y < bh);
+ assert(xd->mode_info_context->mbmi.mode == SPLITMV || 4 << pred_w == bw);
+ assert(xd->mode_info_context->mbmi.mode == SPLITMV || 4 << pred_h == bh);
+
for (which_mv = 0; which_mv < 1 + use_second_ref; ++which_mv) {
- const MV* const mv = (xd->mode_info_context->mbmi.mode == SPLITMV)
- ? &xd->block[block].bmi.as_mv[which_mv].as_mv
- : &xd->mode_info_context->mbmi.mv[which_mv].as_mv;
-
- const uint8_t * const base_pre = which_mv ? xd->second_pre.y_buffer
- : xd->pre.y_buffer;
- const int pre_stride = which_mv ? xd->second_pre.y_stride
- : xd->pre.y_stride;
+ // source
+ const uint8_t * const base_pre = arg->pre[which_mv][plane];
+ const int pre_stride = arg->pre_stride[which_mv][plane];
const uint8_t *const pre = base_pre +
scaled_buffer_offset(x, y, pre_stride, &xd->scale_factor[which_mv]);
struct scale_factors * const scale =
plane == 0 ? &xd->scale_factor[which_mv] : &xd->scale_factor_uv[which_mv];
+ // dest
+ uint8_t *const dst = arg->dst[plane] + arg->dst_stride[plane] * y + x;
+
+ // motion vector
+ const MV *mv;
+ MV split_chroma_mv;
int_mv clamped_mv;
+
+ if (xd->mode_info_context->mbmi.mode == SPLITMV) {
+ if (plane == 0) {
+ mv = &xd->block[block].bmi.as_mv[which_mv].as_mv;
+ } else {
+ const int y_block = (block & 2) * 4 + (block & 1) * 2;
+ split_chroma_mv.row = mi_mv_pred_row_q4(xd, y_block, which_mv);
+ split_chroma_mv.col = mi_mv_pred_col_q4(xd, y_block, which_mv);
+ mv = &split_chroma_mv;
+ }
+ } else {
+ mv = &xd->mode_info_context->mbmi.mv[which_mv].as_mv;
+ }
+
+ /* TODO(jkoleszar): This clamping is done in the incorrect place for the
+ * scaling case. It needs to be done on the scaled MV, not the pre-scaling
+ * MV. Note however that it performs the subsampling aware scaling so
+ * that the result is always q4.
+ */
clamped_mv.as_mv = clamp_mv_to_umv_border_sb(mv, bwl, bhl,
+ xd->plane[plane].subsampling_x,
+ xd->plane[plane].subsampling_y,
xd->mb_to_left_edge,
xd->mb_to_top_edge,
xd->mb_to_right_edge,
xd->mb_to_bottom_edge);
-
scale->set_scaled_offsets(scale, arg->y + y, arg->x + x);
- vp9_build_inter_predictor(pre, pre_stride,
- arg->dst[plane], arg->dst_stride[plane],
- &clamped_mv, &xd->scale_factor[which_mv],
- bw, bh, which_mv, &xd->subpix);
+ vp9_build_inter_predictor_q4(pre, pre_stride,
+ dst, arg->dst_stride[plane],
+ &clamped_mv, &xd->scale_factor[which_mv],
+ 4 << pred_w, 4 << pred_h, which_mv,
+ &xd->subpix);
}
}
void vp9_build_inter_predictors_sby(MACROBLOCKD *xd,
- uint8_t *dst_y,
- int dst_ystride,
int mb_row,
int mb_col,
BLOCK_SIZE_TYPE bsize) {
struct build_inter_predictors_args args = {
- xd, {dst_y, NULL, NULL}, {dst_ystride, 0, 0}, mb_col * 16, mb_row * 16
+ xd, mb_col * 16, mb_row * 16,
+ {xd->plane[0].dst.buf, NULL, NULL}, {xd->plane[0].dst.stride, 0, 0},
+ {{xd->plane[0].pre[0].buf, NULL, NULL},
+ {xd->plane[0].pre[1].buf, NULL, NULL}},
+ {{xd->plane[0].pre[0].stride, 0, 0}, {xd->plane[0].pre[1].stride, 0, 0}},
};
+
+ // TODO(jkoleszar): This is a hack no matter where you put it, but does it
+ // belong here?
+ if (xd->mode_info_context->mbmi.mode == SPLITMV)
+ duplicate_splitmv_bmi(xd);
+
foreach_predicted_block_in_plane(xd, bsize, 0, build_inter_predictors, &args);
}
+void vp9_build_inter_predictors_sbuv(MACROBLOCKD *xd,
+ int mb_row,
+ int mb_col,
+ BLOCK_SIZE_TYPE bsize) {
+ struct build_inter_predictors_args args = {
+ xd, mb_col * 16, mb_row * 16,
+ {NULL, xd->plane[1].dst.buf, xd->plane[2].dst.buf},
+ {0, xd->plane[1].dst.stride, xd->plane[1].dst.stride},
+ {{NULL, xd->plane[1].pre[0].buf, xd->plane[2].pre[0].buf},
+ {NULL, xd->plane[1].pre[1].buf, xd->plane[2].pre[1].buf}},
+ {{0, xd->plane[1].pre[0].stride, xd->plane[1].pre[0].stride},
+ {0, xd->plane[1].pre[1].stride, xd->plane[1].pre[1].stride}},
+ };
+ foreach_predicted_block_uv(xd, bsize, build_inter_predictors, &args);
+}
+void vp9_build_inter_predictors_sb(MACROBLOCKD *xd,
+ int mb_row, int mb_col,
+ BLOCK_SIZE_TYPE bsize) {
+#if CONFIG_COMP_INTERINTRA_PRED
+ uint8_t *const y = xd->plane[0].dst.buf;
+ uint8_t *const u = xd->plane[1].dst.buf;
+ uint8_t *const v = xd->plane[2].dst.buf;
+ const int y_stride = xd->plane[0].dst.stride;
+ const int uv_stride = xd->plane[1].dst.stride;
+#endif
+
+ vp9_build_inter_predictors_sby(xd, mb_row, mb_col, bsize);
+ vp9_build_inter_predictors_sbuv(xd, mb_row, mb_col, bsize);
+
+#if CONFIG_COMP_INTERINTRA_PRED
+ if (xd->mode_info_context->mbmi.second_ref_frame == INTRA_FRAME) {
+ if (bsize == BLOCK_SIZE_SB32X32)
+ vp9_build_interintra_32x32_predictors_sb(xd, y, u, v,
+ y_stride, uv_stride);
+ else
+ vp9_build_interintra_64x64_predictors_sb(xd, y, u, v,
+ y_stride, uv_stride);
+ }
#endif
+}
+#endif // !CONFIG_IMPLICIT_COMPOUNDINTER_WEIGHT
#define AVERAGE_WEIGHT (1 << (2 * CONFIG_IMPLICIT_COMPOUNDINTER_WEIGHT))
#if CONFIG_IMPLICIT_COMPOUNDINTER_WEIGHT
+static void clamp_mv_to_umv_border(MV *mv, const MACROBLOCKD *xd) {
+ /* If the MV points so far into the UMV border that no visible pixels
+ * are used for reconstruction, the subpel part of the MV can be
+ * discarded and the MV limited to 16 pixels with equivalent results.
+ *
+ * This limit kicks in at 19 pixels for the top and left edges, for
+ * the 16 pixels plus 3 taps right of the central pixel when subpel
+ * filtering. The bottom and right edges use 16 pixels plus 2 pixels
+ * left of the central pixel when filtering.
+ */
+ if (mv->col < (xd->mb_to_left_edge - ((16 + VP9_INTERP_EXTEND) << 3)))
+ mv->col = xd->mb_to_left_edge - (16 << 3);
+ else if (mv->col > xd->mb_to_right_edge + ((15 + VP9_INTERP_EXTEND) << 3))
+ mv->col = xd->mb_to_right_edge + (16 << 3);
+
+ if (mv->row < (xd->mb_to_top_edge - ((16 + VP9_INTERP_EXTEND) << 3)))
+ mv->row = xd->mb_to_top_edge - (16 << 3);
+ else if (mv->row > xd->mb_to_bottom_edge + ((15 + VP9_INTERP_EXTEND) << 3))
+ mv->row = xd->mb_to_bottom_edge + (16 << 3);
+}
+
// Whether to use implicit weighting for UV
#define USE_IMPLICIT_WEIGHT_UV
@@ -624,8 +670,8 @@ void vp9_build_inter_predictors_sby(MACROBLOCKD *xd,
static int64_t get_consistency_metric(MACROBLOCKD *xd,
uint8_t *tmp_y, int tmp_ystride) {
int block_size = 16 << xd->mode_info_context->mbmi.sb_type;
- uint8_t *rec_y = xd->dst.y_buffer;
- int rec_ystride = xd->dst.y_stride;
+ uint8_t *rec_y = xd->plane[0].dst.buf;
+ int rec_ystride = xd->plane[0].dst.stride;
int64_t metric = 0;
int i;
if (xd->up_available) {
@@ -844,8 +890,8 @@ static int get_implicit_compoundinter_weight(MACROBLOCKD *xd,
edge[3] = xd->mb_to_right_edge;
clamp_mvs = xd->mode_info_context->mbmi.need_to_clamp_secondmv;
- base_pre = xd->second_pre.y_buffer;
- pre_stride = xd->second_pre.y_stride;
+ base_pre = xd->plane[0].pre[1].buf;
+ pre_stride = xd->plane[0].pre[1].stride;
ymv.as_int = xd->mode_info_context->mbmi.mv[1].as_int;
// First generate the second predictor
scale = &xd->scale_factor[1];
@@ -880,8 +926,8 @@ static int get_implicit_compoundinter_weight(MACROBLOCKD *xd,
metric_2 = get_consistency_metric(xd, tmp_y, tmp_ystride);
clamp_mvs = xd->mode_info_context->mbmi.need_to_clamp_mvs;
- base_pre = xd->pre.y_buffer;
- pre_stride = xd->pre.y_stride;
+ base_pre = xd->plane[0].pre[0].buf;
+ pre_stride = xd->plane[0].pre[0].stride;
ymv.as_int = xd->mode_info_context->mbmi.mv[0].as_int;
// Now generate the first predictor
scale = &xd->scale_factor[0];
@@ -933,8 +979,8 @@ static void build_inter16x16_predictors_mby_w(MACROBLOCKD *xd,
xd->mode_info_context->mbmi.need_to_clamp_secondmv :
xd->mode_info_context->mbmi.need_to_clamp_mvs;
- uint8_t *base_pre = which_mv ? xd->second_pre.y_buffer : xd->pre.y_buffer;
- int pre_stride = which_mv ? xd->second_pre.y_stride : xd->pre.y_stride;
+ uint8_t *base_pre = xd->plane[0].pre[which_mv].buf;
+ int pre_stride = xd->plane[0].pre[which_mv].stride;
int_mv ymv;
struct scale_factors *scale = &xd->scale_factor[which_mv];
@@ -950,9 +996,7 @@ static void build_inter16x16_predictors_mby_w(MACROBLOCKD *xd,
which_mv ? weight : 0, &xd->subpix);
}
}
-#endif
-#if CONFIG_IMPLICIT_COMPOUNDINTER_WEIGHT
static void build_inter16x16_predictors_mbuv_w(MACROBLOCKD *xd,
uint8_t *dst_u,
uint8_t *dst_v,
@@ -968,8 +1012,8 @@ static void build_inter16x16_predictors_mbuv_w(MACROBLOCKD *xd,
which_mv ? xd->mode_info_context->mbmi.need_to_clamp_secondmv
: xd->mode_info_context->mbmi.need_to_clamp_mvs;
uint8_t *uptr, *vptr;
- int pre_stride = which_mv ? xd->second_pre.uv_stride
- : xd->pre.uv_stride;
+ int pre_stride = which_mv ? xd->plane[1].pre[1].stride
+ : xd->plane[1].pre[0].stride;
int_mv mv;
struct scale_factors *scale = &xd->scale_factor_uv[which_mv];
@@ -979,8 +1023,8 @@ static void build_inter16x16_predictors_mbuv_w(MACROBLOCKD *xd,
if (clamp_mvs)
clamp_mv_to_umv_border(&mv.as_mv, xd);
- uptr = (which_mv ? xd->second_pre.u_buffer : xd->pre.u_buffer);
- vptr = (which_mv ? xd->second_pre.v_buffer : xd->pre.v_buffer);
+ uptr = (which_mv ? xd->plane[1].pre[1].buf : xd->plane[1].pre[0].buf);
+ vptr = (which_mv ? xd->plane[2].pre[1].buf : xd->plane[2].pre[0].buf);
scale->set_scaled_offsets(scale, mb_row * 16, mb_col * 16);
@@ -993,68 +1037,6 @@ static void build_inter16x16_predictors_mbuv_w(MACROBLOCKD *xd,
scale, 8, 8, which_mv ? weight : 0, &xd->subpix);
}
}
-
-void vp9_build_inter16x16_predictors_mbuv(MACROBLOCKD *xd,
- uint8_t *dst_u,
- uint8_t *dst_v,
- int dst_uvstride,
- int mb_row,
- int mb_col) {
-#ifdef USE_IMPLICIT_WEIGHT_UV
- int weight = get_implicit_compoundinter_weight(xd, mb_row, mb_col);
-#else
- int weight = AVERAGE_WEIGHT;
-#endif
- build_inter16x16_predictors_mbuv_w(xd, dst_u, dst_v, dst_uvstride,
- weight, mb_row, mb_col);
-}
-
-#else
-
-void vp9_build_inter16x16_predictors_mbuv(MACROBLOCKD *xd,
- uint8_t *dst_u,
- uint8_t *dst_v,
- int dst_uvstride,
- int mb_row,
- int mb_col) {
- const int use_second_ref = xd->mode_info_context->mbmi.second_ref_frame > 0;
- int which_mv;
-
- for (which_mv = 0; which_mv < 1 + use_second_ref; ++which_mv) {
- const int clamp_mvs =
- which_mv ? xd->mode_info_context->mbmi.need_to_clamp_secondmv
- : xd->mode_info_context->mbmi.need_to_clamp_mvs;
- uint8_t *uptr, *vptr;
- int pre_stride = which_mv ? xd->second_pre.uv_stride
- : xd->pre.uv_stride;
- int_mv mv;
-
- struct scale_factors *scale = &xd->scale_factor_uv[which_mv];
- mv.as_int = xd->mode_info_context->mbmi.mv[which_mv].as_int;
-
-
- if (clamp_mvs)
- clamp_mv_to_umv_border(&mv.as_mv, xd);
-
- uptr = (which_mv ? xd->second_pre.u_buffer : xd->pre.u_buffer);
- vptr = (which_mv ? xd->second_pre.v_buffer : xd->pre.v_buffer);
-
- scale->set_scaled_offsets(scale, mb_row * 16, mb_col * 16);
-
- vp9_build_inter_predictor_q4(
- uptr, pre_stride, dst_u, dst_uvstride, &mv,
- scale, 8, 8,
- which_mv << (2 * CONFIG_IMPLICIT_COMPOUNDINTER_WEIGHT), &xd->subpix);
-
- vp9_build_inter_predictor_q4(
- vptr, pre_stride, dst_v, dst_uvstride, &mv,
- scale, 8, 8,
- which_mv << (2 * CONFIG_IMPLICIT_COMPOUNDINTER_WEIGHT), &xd->subpix);
- }
-}
-#endif
-
-#if CONFIG_IMPLICIT_COMPOUNDINTER_WEIGHT
static void build_inter_predictors_sby_w(MACROBLOCKD *x,
uint8_t *dst_y,
int dst_ystride,
@@ -1064,8 +1046,8 @@ static void build_inter_predictors_sby_w(MACROBLOCKD *x,
BLOCK_SIZE_TYPE bsize) {
const int bwl = mb_width_log2(bsize), bw = 1 << bwl;
const int bhl = mb_height_log2(bsize), bh = 1 << bhl;
- uint8_t *y1 = x->pre.y_buffer;
- uint8_t *y2 = x->second_pre.y_buffer;
+ uint8_t *y1 = x->plane[0].pre[0].buf;
+ uint8_t *y2 = x->plane[0].pre[1].buf;
int edge[4], n;
edge[0] = x->mb_to_top_edge;
@@ -1081,15 +1063,15 @@ static void build_inter_predictors_sby_w(MACROBLOCKD *x,
x->mb_to_left_edge = edge[2] - ((x_idx * 16) << 3);
x->mb_to_right_edge = edge[3] + (((bw - 1 - x_idx) * 16) << 3);
- x->pre.y_buffer = y1 + scaled_buffer_offset(x_idx * 16,
+ x->plane[0].pre[0].buf = y1 + scaled_buffer_offset(x_idx * 16,
y_idx * 16,
- x->pre.y_stride,
+ x->plane[0].pre[0].stride,
&x->scale_factor[0]);
if (x->mode_info_context->mbmi.second_ref_frame > 0) {
- x->second_pre.y_buffer = y2 +
+ x->plane[0].pre[1].buf = y2 +
scaled_buffer_offset(x_idx * 16,
y_idx * 16,
- x->second_pre.y_stride,
+ x->plane[0].pre[1].stride,
&x->scale_factor[1]);
}
build_inter16x16_predictors_mby_w(x,
@@ -1101,25 +1083,24 @@ static void build_inter_predictors_sby_w(MACROBLOCKD *x,
x->mb_to_left_edge = edge[2];
x->mb_to_right_edge = edge[3];
- x->pre.y_buffer = y1;
+ x->plane[0].pre[0].buf = y1;
if (x->mode_info_context->mbmi.second_ref_frame > 0) {
- x->second_pre.y_buffer = y2;
+ x->plane[0].pre[1].buf = y2;
}
}
void vp9_build_inter_predictors_sby(MACROBLOCKD *x,
- uint8_t *dst_y,
- int dst_ystride,
- int mb_row,
- int mb_col,
- BLOCK_SIZE_TYPE bsize) {
+ int mb_row,
+ int mb_col,
+ BLOCK_SIZE_TYPE bsize) {
+ uint8_t * const dst_y = x->plane[0].dst.buf;
+ const int dst_ystride = x->plane[0].dst.stride;
+
int weight = get_implicit_compoundinter_weight(x, mb_row, mb_col);
build_inter_predictors_sby_w(x, dst_y, dst_ystride, weight,
mb_row, mb_col, bsize);
}
-#endif
-#if CONFIG_IMPLICIT_COMPOUNDINTER_WEIGHT
static void build_inter_predictors_sbuv_w(MACROBLOCKD *x,
uint8_t *dst_u,
uint8_t *dst_v,
@@ -1130,8 +1111,8 @@ static void build_inter_predictors_sbuv_w(MACROBLOCKD *x,
BLOCK_SIZE_TYPE bsize) {
const int bwl = mb_width_log2(bsize), bw = 1 << bwl;
const int bhl = mb_height_log2(bsize), bh = 1 << bhl;
- uint8_t *u1 = x->pre.u_buffer, *v1 = x->pre.v_buffer;
- uint8_t *u2 = x->second_pre.u_buffer, *v2 = x->second_pre.v_buffer;
+ uint8_t *u1 = x->plane[1].pre[0].buf, *v1 = x->plane[2].pre[0].buf;
+ uint8_t *u2 = x->plane[1].pre[1].buf, *v2 = x->plane[2].pre[1].buf;
int edge[4], n;
edge[0] = x->mb_to_top_edge;
@@ -1150,18 +1131,18 @@ static void build_inter_predictors_sbuv_w(MACROBLOCKD *x,
scaled_uv_offset = scaled_buffer_offset(x_idx * 8,
y_idx * 8,
- x->pre.uv_stride,
+ x->plane[1].pre[0].stride,
&x->scale_factor_uv[0]);
- x->pre.u_buffer = u1 + scaled_uv_offset;
- x->pre.v_buffer = v1 + scaled_uv_offset;
+ x->plane[1].pre[0].buf = u1 + scaled_uv_offset;
+ x->plane[2].pre[0].buf = v1 + scaled_uv_offset;
if (x->mode_info_context->mbmi.second_ref_frame > 0) {
scaled_uv_offset = scaled_buffer_offset(x_idx * 8,
y_idx * 8,
- x->second_pre.uv_stride,
+ x->plane[1].pre[1].stride,
&x->scale_factor_uv[1]);
- x->second_pre.u_buffer = u2 + scaled_uv_offset;
- x->second_pre.v_buffer = v2 + scaled_uv_offset;
+ x->plane[1].pre[1].buf = u2 + scaled_uv_offset;
+ x->plane[2].pre[1].buf = v2 + scaled_uv_offset;
}
build_inter16x16_predictors_mbuv_w(x,
@@ -1174,22 +1155,23 @@ static void build_inter_predictors_sbuv_w(MACROBLOCKD *x,
x->mb_to_left_edge = edge[2];
x->mb_to_right_edge = edge[3];
- x->pre.u_buffer = u1;
- x->pre.v_buffer = v1;
+ x->plane[1].pre[0].buf = u1;
+ x->plane[2].pre[0].buf = v1;
if (x->mode_info_context->mbmi.second_ref_frame > 0) {
- x->second_pre.u_buffer = u2;
- x->second_pre.v_buffer = v2;
+ x->plane[1].pre[1].buf = u2;
+ x->plane[2].pre[1].buf = v2;
}
}
void vp9_build_inter_predictors_sbuv(MACROBLOCKD *xd,
- uint8_t *dst_u,
- uint8_t *dst_v,
- int dst_uvstride,
int mb_row,
int mb_col,
BLOCK_SIZE_TYPE bsize) {
+ uint8_t *const dst_u = xd->plane[1].dst.buf;
+ uint8_t *const dst_v = xd->plane[2].dst.buf;
+ const int dst_uvstride = xd->plane[1].dst.stride;
+
#ifdef USE_IMPLICIT_WEIGHT_UV
int weight = get_implicit_compoundinter_weight(xd, mb_row, mb_col);
#else
@@ -1199,86 +1181,24 @@ void vp9_build_inter_predictors_sbuv(MACROBLOCKD *xd,
weight, mb_row, mb_col, bsize);
}
-#else
-
-void vp9_build_inter_predictors_sbuv(MACROBLOCKD *x,
- uint8_t *dst_u,
- uint8_t *dst_v,
- int dst_uvstride,
- int mb_row,
- int mb_col,
- BLOCK_SIZE_TYPE bsize) {
- const int bwl = mb_width_log2(bsize), bw = 1 << bwl;
- const int bhl = mb_height_log2(bsize), bh = 1 << bhl;
- uint8_t *u1 = x->pre.u_buffer, *v1 = x->pre.v_buffer;
- uint8_t *u2 = x->second_pre.u_buffer, *v2 = x->second_pre.v_buffer;
- int edge[4], n;
-
- edge[0] = x->mb_to_top_edge;
- edge[1] = x->mb_to_bottom_edge;
- edge[2] = x->mb_to_left_edge;
- edge[3] = x->mb_to_right_edge;
-
- for (n = 0; n < bw * bh; n++) {
- int scaled_uv_offset;
- const int x_idx = n & (bw - 1), y_idx = n >> bwl;
-
- x->mb_to_top_edge = edge[0] - ((y_idx * 16) << 3);
- x->mb_to_bottom_edge = edge[1] + (((bh - 1 - y_idx) * 16) << 3);
- x->mb_to_left_edge = edge[2] - ((x_idx * 16) << 3);
- x->mb_to_right_edge = edge[3] + (((bw - 1 - x_idx) * 16) << 3);
-
- scaled_uv_offset = scaled_buffer_offset(x_idx * 8,
- y_idx * 8,
- x->pre.uv_stride,
- &x->scale_factor_uv[0]);
- x->pre.u_buffer = u1 + scaled_uv_offset;
- x->pre.v_buffer = v1 + scaled_uv_offset;
-
- if (x->mode_info_context->mbmi.second_ref_frame > 0) {
- scaled_uv_offset = scaled_buffer_offset(x_idx * 8,
- y_idx * 8,
- x->second_pre.uv_stride,
- &x->scale_factor_uv[1]);
- x->second_pre.u_buffer = u2 + scaled_uv_offset;
- x->second_pre.v_buffer = v2 + scaled_uv_offset;
- }
-
- vp9_build_inter16x16_predictors_mbuv(x,
- dst_u + y_idx * 8 * dst_uvstride + x_idx * 8,
- dst_v + y_idx * 8 * dst_uvstride + x_idx * 8,
- dst_uvstride, mb_row + y_idx, mb_col + x_idx);
- }
- x->mb_to_top_edge = edge[0];
- x->mb_to_bottom_edge = edge[1];
- x->mb_to_left_edge = edge[2];
- x->mb_to_right_edge = edge[3];
-
- x->pre.u_buffer = u1;
- x->pre.v_buffer = v1;
-
- if (x->mode_info_context->mbmi.second_ref_frame > 0) {
- x->second_pre.u_buffer = u2;
- x->second_pre.v_buffer = v2;
- }
-}
-#endif
-
void vp9_build_inter_predictors_sb(MACROBLOCKD *mb,
int mb_row, int mb_col,
BLOCK_SIZE_TYPE bsize) {
- uint8_t *const y = mb->dst.y_buffer;
- uint8_t *const u = mb->dst.u_buffer;
- uint8_t *const v = mb->dst.v_buffer;
- const int y_stride = mb->dst.y_stride;
- const int uv_stride = mb->dst.uv_stride;
-
- vp9_build_inter_predictors_sby(mb, y, y_stride, mb_row, mb_col, bsize);
- vp9_build_inter_predictors_sbuv(mb, u, v, uv_stride, mb_row, mb_col, bsize);
+#if CONFIG_COMP_INTERINTRA_PRED
+ uint8_t *const y = mb->plane[0].dst.buf;
+ uint8_t *const u = mb->plane[1].dst.buf;
+ uint8_t *const v = mb->plane[2].dst.buf;
+ const int y_stride = mb->plane[0].dst.stride;
+ const int uv_stride = mb->plane[1].dst.stride;
+#endif
+
+ vp9_build_inter_predictors_sby(mb, mb_row, mb_col, bsize);
+ vp9_build_inter_predictors_sbuv(mb, mb_row, mb_col, bsize);
+
#if CONFIG_COMP_INTERINTRA_PRED
if (mb->mode_info_context->mbmi.second_ref_frame == INTRA_FRAME) {
if (bsize == BLOCK_SIZE_SB32X32)
- vp9_build_interintra_32x32_predictors_sb(mb, y, u, v,
+ vp9_build_interintra_32x32_predictors_sb(mb, y, u, v,
y_stride, uv_stride);
else
vp9_build_interintra_64x64_predictors_sb(mb, y, u, v,
@@ -1286,79 +1206,10 @@ void vp9_build_inter_predictors_sb(MACROBLOCKD *mb,
}
#endif
}
-
-static void build_inter4x4_predictors_mb(MACROBLOCKD *xd,
- int mb_row, int mb_col) {
- int i;
- MB_MODE_INFO * mbmi = &xd->mode_info_context->mbmi;
- BLOCKD *blockd = xd->block;
- int which_mv = 0;
- const int use_second_ref = mbmi->second_ref_frame > 0;
-#if CONFIG_IMPLICIT_COMPOUNDINTER_WEIGHT && defined(USE_IMPLICIT_WEIGHT_SPLITMV)
- int weight = get_implicit_compoundinter_weight_splitmv(xd, mb_row, mb_col);
-#else
- int weight = AVERAGE_WEIGHT;
-#endif
-
- if (xd->mode_info_context->mbmi.partitioning != PARTITIONING_4X4) {
- for (i = 0; i < 16; i += 8) {
- BLOCKD *d0 = &blockd[i];
- BLOCKD *d1 = &blockd[i + 2];
- const int y = i & 8;
-
- blockd[i + 0].bmi = xd->mode_info_context->bmi[i + 0];
- blockd[i + 2].bmi = xd->mode_info_context->bmi[i + 2];
-
- for (which_mv = 0; which_mv < 1 + use_second_ref; ++which_mv) {
- if (mbmi->need_to_clamp_mvs) {
- clamp_mv_to_umv_border(&blockd[i + 0].bmi.as_mv[which_mv].as_mv, xd);
- clamp_mv_to_umv_border(&blockd[i + 2].bmi.as_mv[which_mv].as_mv, xd);
- }
-
- build_2x1_inter_predictor(d0, d1, xd->scale_factor, 8, 16, which_mv,
- which_mv ? weight : 0,
- &xd->subpix, mb_row * 16 + y, mb_col * 16);
- }
- }
- } else {
- for (i = 0; i < 16; i += 2) {
- BLOCKD *d0 = &blockd[i];
- BLOCKD *d1 = &blockd[i + 1];
- const int x = (i & 3) * 4;
- const int y = (i >> 2) * 4;
-
- blockd[i + 0].bmi = xd->mode_info_context->bmi[i + 0];
- blockd[i + 1].bmi = xd->mode_info_context->bmi[i + 1];
-
- for (which_mv = 0; which_mv < 1 + use_second_ref; ++which_mv) {
- build_2x1_inter_predictor(d0, d1, xd->scale_factor, 4, 16, which_mv,
- which_mv ? weight : 0,
- &xd->subpix,
- mb_row * 16 + y, mb_col * 16 + x);
- }
- }
- }
-#if CONFIG_IMPLICIT_COMPOUNDINTER_WEIGHT
-#if !defined(USE_IMPLICIT_WEIGHT_UV)
- weight = AVERAGE_WEIGHT;
-#endif
-#endif
- for (i = 16; i < 24; i += 2) {
- BLOCKD *d0 = &blockd[i];
- BLOCKD *d1 = &blockd[i + 1];
- const int x = 4 * (i & 1);
- const int y = ((i - 16) >> 1) * 4;
-
- for (which_mv = 0; which_mv < 1 + use_second_ref; ++which_mv) {
- build_2x1_inter_predictor(d0, d1, xd->scale_factor_uv, 4, 8, which_mv,
- which_mv ? weight : 0, &xd->subpix,
- mb_row * 8 + y, mb_col * 8 + x);
- }
- }
-}
+#endif // CONFIG_IMPLICIT_COMPOUNDINTER_WEIGHT
static INLINE int round_mv_comp(int value) {
- return (value < 0 ? value - 4 : value + 4) / 8;
+ return (value < 0 ? value - 2 : value + 2) / 4;
}
static int mi_mv_pred_row(MACROBLOCKD *mb, int off, int idx) {
@@ -1377,128 +1228,9 @@ static int mi_mv_pred_col(MACROBLOCKD *mb, int off, int idx) {
return round_mv_comp(temp);
}
-static int b_mv_pred_row(MACROBLOCKD *mb, int off, int idx) {
- BLOCKD *const blockd = mb->block;
- const int temp = blockd[off + 0].bmi.as_mv[idx].as_mv.row +
- blockd[off + 1].bmi.as_mv[idx].as_mv.row +
- blockd[off + 4].bmi.as_mv[idx].as_mv.row +
- blockd[off + 5].bmi.as_mv[idx].as_mv.row;
- return round_mv_comp(temp);
-}
-
-static int b_mv_pred_col(MACROBLOCKD *mb, int off, int idx) {
- BLOCKD *const blockd = mb->block;
- const int temp = blockd[off + 0].bmi.as_mv[idx].as_mv.col +
- blockd[off + 1].bmi.as_mv[idx].as_mv.col +
- blockd[off + 4].bmi.as_mv[idx].as_mv.col +
- blockd[off + 5].bmi.as_mv[idx].as_mv.col;
- return round_mv_comp(temp);
-}
-
-
-static void build_4x4uvmvs(MACROBLOCKD *xd) {
- int i, j;
- BLOCKD *blockd = xd->block;
-
- for (i = 0; i < 2; i++) {
- for (j = 0; j < 2; j++) {
- const int yoffset = i * 8 + j * 2;
- const int uoffset = 16 + i * 2 + j;
- const int voffset = 20 + i * 2 + j;
-
- MV *u = &blockd[uoffset].bmi.as_mv[0].as_mv;
- MV *v = &blockd[voffset].bmi.as_mv[0].as_mv;
- u->row = mi_mv_pred_row(xd, yoffset, 0);
- u->col = mi_mv_pred_col(xd, yoffset, 0);
-
- // if (x->mode_info_context->mbmi.need_to_clamp_mvs)
- clamp_uvmv_to_umv_border(u, xd);
-
- // if (x->mode_info_context->mbmi.need_to_clamp_mvs)
- clamp_uvmv_to_umv_border(u, xd);
-
- v->row = u->row;
- v->col = u->col;
-
- if (xd->mode_info_context->mbmi.second_ref_frame > 0) {
- u = &blockd[uoffset].bmi.as_mv[1].as_mv;
- v = &blockd[voffset].bmi.as_mv[1].as_mv;
- u->row = mi_mv_pred_row(xd, yoffset, 1);
- u->col = mi_mv_pred_col(xd, yoffset, 1);
-
- // if (mbmi->need_to_clamp_mvs)
- clamp_uvmv_to_umv_border(u, xd);
-
- // if (mbmi->need_to_clamp_mvs)
- clamp_uvmv_to_umv_border(u, xd);
-
- v->row = u->row;
- v->col = u->col;
- }
- }
- }
-}
-
-void vp9_build_inter_predictors_mb(MACROBLOCKD *xd,
- int mb_row,
- int mb_col) {
- if (xd->mode_info_context->mbmi.mode != SPLITMV) {
- vp9_build_inter_predictors_sb(xd, mb_row, mb_col, BLOCK_SIZE_MB16X16);
- } else {
- build_4x4uvmvs(xd);
- build_inter4x4_predictors_mb(xd, mb_row, mb_col);
- }
-}
-
/*encoder only*/
void vp9_build_inter4x4_predictors_mbuv(MACROBLOCKD *xd,
int mb_row, int mb_col) {
- int i, j, weight;
- BLOCKD *const blockd = xd->block;
-
- /* build uv mvs */
- for (i = 0; i < 2; i++) {
- for (j = 0; j < 2; j++) {
- const int yoffset = i * 8 + j * 2;
- const int uoffset = 16 + i * 2 + j;
- const int voffset = 20 + i * 2 + j;
-
- MV *u = &blockd[uoffset].bmi.as_mv[0].as_mv;
- MV *v = &blockd[voffset].bmi.as_mv[0].as_mv;
-
- v->row = u->row = b_mv_pred_row(xd, yoffset, 0);
- v->col = u->col = b_mv_pred_col(xd, yoffset, 0);
-
- if (xd->mode_info_context->mbmi.second_ref_frame > 0) {
- u = &blockd[uoffset].bmi.as_mv[1].as_mv;
- v = &blockd[voffset].bmi.as_mv[1].as_mv;
-
- v->row = u->row = b_mv_pred_row(xd, yoffset, 1);
- v->col = u->col = b_mv_pred_col(xd, yoffset, 1);
- }
- }
- }
-
-#if CONFIG_IMPLICIT_COMPOUNDINTER_WEIGHT && \
- defined(USE_IMPLICIT_WEIGHT_SPLITMV) && \
- defined(USE_IMPLICIT_WEIGHT_UV)
- weight = get_implicit_compoundinter_weight_splitmv(xd, mb_row, mb_col);
-#else
- weight = AVERAGE_WEIGHT;
-#endif
- for (i = 16; i < 24; i += 2) {
- const int use_second_ref = xd->mode_info_context->mbmi.second_ref_frame > 0;
- const int x = 4 * (i & 1);
- const int y = ((i - 16) >> 1) * 4;
-
- int which_mv;
- BLOCKD *d0 = &blockd[i];
- BLOCKD *d1 = &blockd[i + 1];
-
- for (which_mv = 0; which_mv < 1 + use_second_ref; ++which_mv) {
- build_2x1_inter_predictor(d0, d1, xd->scale_factor_uv, 4, 8, which_mv,
- which_mv ? weight : 0,
- &xd->subpix, mb_row * 8 + y, mb_col * 8 + x);
- }
- }
+ vp9_build_inter_predictors_sbuv(xd, mb_row, mb_col,
+ BLOCK_SIZE_MB16X16);
}
diff --git a/vp9/common/vp9_reconinter.h b/vp9/common/vp9_reconinter.h
index 38981e9c1..51b705f71 100644
--- a/vp9/common/vp9_reconinter.h
+++ b/vp9/common/vp9_reconinter.h
@@ -16,24 +16,12 @@
struct subpix_fn_table;
-void vp9_build_inter16x16_predictors_mbuv(MACROBLOCKD *xd,
- uint8_t *dst_u,
- uint8_t *dst_v,
- int dst_uvstride,
- int mb_row,
- int mb_col);
-
-void vp9_build_inter_predictors_sby(MACROBLOCKD *x,
- uint8_t *dst_y,
- int dst_ystride,
+void vp9_build_inter_predictors_sby(MACROBLOCKD *xd,
int mb_row,
int mb_col,
BLOCK_SIZE_TYPE bsize);
-void vp9_build_inter_predictors_sbuv(MACROBLOCKD *x,
- uint8_t *dst_u,
- uint8_t *dst_v,
- int dst_uvstride,
+void vp9_build_inter_predictors_sbuv(MACROBLOCKD *xd,
int mb_row,
int mb_col,
BLOCK_SIZE_TYPE bsize);
@@ -42,14 +30,6 @@ void vp9_build_inter_predictors_sb(MACROBLOCKD *mb,
int mb_row, int mb_col,
BLOCK_SIZE_TYPE bsize);
-void vp9_build_inter_predictors_mb(MACROBLOCKD *xd,
- int mb_row,
- int mb_col);
-
-void vp9_build_inter4x4_predictors_mbuv(MACROBLOCKD *xd,
- int mb_row,
- int mb_col);
-
void vp9_setup_interp_filters(MACROBLOCKD *xd,
INTERPOLATIONFILTERTYPE filter,
VP9_COMMON *cm);
@@ -91,8 +71,68 @@ static int scaled_buffer_offset(int x_offset,
int y_offset,
int stride,
const struct scale_factors *scale) {
- return scale->scale_value_y(y_offset, scale) * stride +
- scale->scale_value_x(x_offset, scale);
+ if (scale)
+ return scale->scale_value_y(y_offset, scale) * stride +
+ scale->scale_value_x(x_offset, scale);
+ return y_offset * stride + x_offset;
+}
+
+static void setup_pred_plane(struct buf_2d *dst,
+ uint8_t *src, int stride,
+ int mb_row, int mb_col,
+ const struct scale_factors *scale,
+ int subsampling_x, int subsampling_y) {
+ const int x = (16 * mb_col) >> subsampling_x;
+ const int y = (16 * mb_row) >> subsampling_y;
+ dst->buf = src + scaled_buffer_offset(x, y, stride, scale);
+ dst->stride = stride;
+}
+
+// TODO(jkoleszar): audit all uses of this that don't set mb_row, mb_col
+static void setup_dst_planes(MACROBLOCKD *xd,
+ const YV12_BUFFER_CONFIG *src,
+ int mb_row, int mb_col) {
+ setup_pred_plane(&xd->plane[0].dst,
+ src->y_buffer, src->y_stride,
+ mb_row, mb_col, NULL,
+ xd->plane[0].subsampling_x, xd->plane[0].subsampling_y);
+ setup_pred_plane(&xd->plane[1].dst,
+ src->u_buffer, src->uv_stride,
+ mb_row, mb_col, NULL,
+ xd->plane[1].subsampling_x, xd->plane[1].subsampling_y);
+ setup_pred_plane(&xd->plane[2].dst,
+ src->v_buffer, src->uv_stride,
+ mb_row, mb_col, NULL,
+ xd->plane[2].subsampling_x, xd->plane[2].subsampling_y);
+}
+
+static void setup_pre_planes(MACROBLOCKD *xd,
+ const YV12_BUFFER_CONFIG *src0,
+ const YV12_BUFFER_CONFIG *src1,
+ int mb_row, int mb_col,
+ const struct scale_factors *scale,
+ const struct scale_factors *scale_uv) {
+ int i;
+
+ for (i = 0; i < 2; i++) {
+ const YV12_BUFFER_CONFIG *src = i ? src1 : src0;
+
+ if (!src)
+ continue;
+
+ setup_pred_plane(&xd->plane[0].pre[i],
+ src->y_buffer, src->y_stride,
+ mb_row, mb_col, scale ? scale + i : NULL,
+ xd->plane[0].subsampling_x, xd->plane[0].subsampling_y);
+ setup_pred_plane(&xd->plane[1].pre[i],
+ src->u_buffer, src->uv_stride,
+ mb_row, mb_col, scale_uv ? scale_uv + i : NULL,
+ xd->plane[1].subsampling_x, xd->plane[1].subsampling_y);
+ setup_pred_plane(&xd->plane[2].pre[i],
+ src->v_buffer, src->uv_stride,
+ mb_row, mb_col, scale_uv ? scale_uv + i : NULL,
+ xd->plane[2].subsampling_x, xd->plane[2].subsampling_y);
+ }
}
static void setup_pred_block(YV12_BUFFER_CONFIG *dst,
diff --git a/vp9/common/vp9_reconintra.c b/vp9/common/vp9_reconintra.c
index 88c3f191e..6caebd784 100644
--- a/vp9/common/vp9_reconintra.c
+++ b/vp9/common/vp9_reconintra.c
@@ -269,15 +269,6 @@ static void corner_predictor(uint8_t *ypred_ptr, int y_stride, int n,
}
}
-void vp9_recon_intra_mbuv(MACROBLOCKD *xd) {
- int i;
- for (i = 16; i < 24; i += 2) {
- BLOCKD *b = &xd->block[i];
- vp9_recon2b(*(b->base_dst) + b->dst, b->diff,
- *(b->base_dst) + b->dst, b->dst_stride);
- }
-}
-
static INLINE int log2_minus_1(int n) {
switch (n) {
case 4: return 1;
@@ -609,7 +600,7 @@ void vp9_build_interintra_16x16_predictors_mby(MACROBLOCKD *xd,
int ystride) {
uint8_t intrapredictor[256];
vp9_build_intra_predictors(
- xd->dst.y_buffer, xd->dst.y_stride,
+ xd->plane[0].dst.buf, xd->plane[0].dst.stride,
intrapredictor, 16,
xd->mode_info_context->mbmi.interintra_mode, 16, 16,
xd->up_available, xd->left_available, xd->right_available);
@@ -624,12 +615,12 @@ void vp9_build_interintra_16x16_predictors_mbuv(MACROBLOCKD *xd,
uint8_t uintrapredictor[64];
uint8_t vintrapredictor[64];
vp9_build_intra_predictors(
- xd->dst.u_buffer, xd->dst.uv_stride,
+ xd->plane[1].dst.buf, xd->plane[1].dst.stride,
uintrapredictor, 8,
xd->mode_info_context->mbmi.interintra_uv_mode, 8, 8,
xd->up_available, xd->left_available, xd->right_available);
vp9_build_intra_predictors(
- xd->dst.v_buffer, xd->dst.uv_stride,
+ xd->plane[2].dst.buf, xd->plane[1].dst.stride,
vintrapredictor, 8,
xd->mode_info_context->mbmi.interintra_uv_mode, 8, 8,
xd->up_available, xd->left_available, xd->right_available);
@@ -644,7 +635,7 @@ void vp9_build_interintra_32x32_predictors_sby(MACROBLOCKD *xd,
int ystride) {
uint8_t intrapredictor[1024];
vp9_build_intra_predictors(
- xd->dst.y_buffer, xd->dst.y_stride,
+ xd->plane[0].dst.buf, xd->plane[0].dst.stride,
intrapredictor, 32,
xd->mode_info_context->mbmi.interintra_mode, 32, 32,
xd->up_available, xd->left_available, xd->right_available);
@@ -659,12 +650,12 @@ void vp9_build_interintra_32x32_predictors_sbuv(MACROBLOCKD *xd,
uint8_t uintrapredictor[256];
uint8_t vintrapredictor[256];
vp9_build_intra_predictors(
- xd->dst.u_buffer, xd->dst.uv_stride,
+ xd->plane[1].dst.buf, xd->plane[1].dst.stride,
uintrapredictor, 16,
xd->mode_info_context->mbmi.interintra_uv_mode, 16, 16,
xd->up_available, xd->left_available, xd->right_available);
vp9_build_intra_predictors(
- xd->dst.v_buffer, xd->dst.uv_stride,
+ xd->plane[2].dst.buf, xd->plane[1].dst.stride,
vintrapredictor, 16,
xd->mode_info_context->mbmi.interintra_uv_mode, 16, 16,
xd->up_available, xd->left_available, xd->right_available);
@@ -689,7 +680,7 @@ void vp9_build_interintra_64x64_predictors_sby(MACROBLOCKD *xd,
int ystride) {
uint8_t intrapredictor[4096];
const int mode = xd->mode_info_context->mbmi.interintra_mode;
- vp9_build_intra_predictors(xd->dst.y_buffer, xd->dst.y_stride,
+ vp9_build_intra_predictors(xd->plane[0].dst.buf, xd->plane[0].dst.stride,
intrapredictor, 64, mode, 64, 64,
xd->up_available, xd->left_available,
xd->right_available);
@@ -704,11 +695,11 @@ void vp9_build_interintra_64x64_predictors_sbuv(MACROBLOCKD *xd,
uint8_t uintrapredictor[1024];
uint8_t vintrapredictor[1024];
const int mode = xd->mode_info_context->mbmi.interintra_uv_mode;
- vp9_build_intra_predictors(xd->dst.u_buffer, xd->dst.uv_stride,
+ vp9_build_intra_predictors(xd->plane[1].dst.buf, xd->plane[1].dst.stride,
uintrapredictor, 32, mode, 32, 32,
xd->up_available, xd->left_available,
xd->right_available);
- vp9_build_intra_predictors(xd->dst.v_buffer, xd->dst.uv_stride,
+ vp9_build_intra_predictors(xd->plane[2].dst.buf, xd->plane[1].dst.stride,
vintrapredictor, 32, mode, 32, 32,
xd->up_available, xd->left_available,
xd->right_available);
@@ -734,8 +725,8 @@ void vp9_build_intra_predictors_sby_s(MACROBLOCKD *xd,
const int bwl = b_width_log2(bsize), bw = 4 << bwl;
const int bhl = b_height_log2(bsize), bh = 4 << bhl;
- vp9_build_intra_predictors(xd->dst.y_buffer, xd->dst.y_stride,
- xd->dst.y_buffer, xd->dst.y_stride,
+ vp9_build_intra_predictors(xd->plane[0].dst.buf, xd->plane[0].dst.stride,
+ xd->plane[0].dst.buf, xd->plane[0].dst.stride,
xd->mode_info_context->mbmi.mode,
bw, bh,
xd->up_available, xd->left_available,
@@ -747,13 +738,13 @@ void vp9_build_intra_predictors_sbuv_s(MACROBLOCKD *xd,
const int bwl = b_width_log2(bsize) - 1, bw = 4 << bwl;
const int bhl = b_height_log2(bsize) - 1, bh = 4 << bhl;
- vp9_build_intra_predictors(xd->dst.u_buffer, xd->dst.uv_stride,
- xd->dst.u_buffer, xd->dst.uv_stride,
+ vp9_build_intra_predictors(xd->plane[1].dst.buf, xd->plane[1].dst.stride,
+ xd->plane[1].dst.buf, xd->plane[1].dst.stride,
xd->mode_info_context->mbmi.uv_mode,
bw, bh, xd->up_available,
xd->left_available, xd->right_available);
- vp9_build_intra_predictors(xd->dst.v_buffer, xd->dst.uv_stride,
- xd->dst.v_buffer, xd->dst.uv_stride,
+ vp9_build_intra_predictors(xd->plane[2].dst.buf, xd->plane[1].dst.stride,
+ xd->plane[2].dst.buf, xd->plane[1].dst.stride,
xd->mode_info_context->mbmi.uv_mode,
bw, bh, xd->up_available,
xd->left_available, xd->right_available);
diff --git a/vp9/common/vp9_reconintra.h b/vp9/common/vp9_reconintra.h
index b97b6089d..1e0cfa42d 100644
--- a/vp9/common/vp9_reconintra.h
+++ b/vp9/common/vp9_reconintra.h
@@ -14,8 +14,6 @@
#include "vpx/vpx_integer.h"
#include "vp9/common/vp9_blockd.h"
-void vp9_recon_intra_mbuv(MACROBLOCKD *xd);
-
B_PREDICTION_MODE vp9_find_dominant_direction(uint8_t *ptr,
int stride, int n,
int tx, int ty);
diff --git a/vp9/common/vp9_reconintra4x4.c b/vp9/common/vp9_reconintra4x4.c
index a0700010b..7f81b051b 100644
--- a/vp9/common/vp9_reconintra4x4.c
+++ b/vp9/common/vp9_reconintra4x4.c
@@ -258,15 +258,22 @@ void vp9_intra4x4_predict(MACROBLOCKD *xd,
switch (b_mode) {
case B_DC_PRED: {
- int expected_dc = 0;
-
- for (i = 0; i < 4; i++) {
- expected_dc += above[i];
- expected_dc += left[i];
+ int expected_dc = 128;
+ if (have_top || have_left) {
+ int average = 0;
+ int count = 0;
+ if (have_top) {
+ for (i = 0; i < 4; i++)
+ average += above[i];
+ count += 4;
+ }
+ if (have_left) {
+ for (i = 0; i < 4; i++)
+ average += left[i];
+ count += 4;
+ }
+ expected_dc = (average + (count >> 1)) / count;
}
-
- expected_dc = ROUND_POWER_OF_TWO(expected_dc, 3);
-
for (r = 0; r < 4; r++) {
for (c = 0; c < 4; c++)
predictor[c] = expected_dc;
diff --git a/vp9/common/vp9_rtcd_defs.sh b/vp9/common/vp9_rtcd_defs.sh
index f9f2395f3..1f838a574 100644
--- a/vp9/common/vp9_rtcd_defs.sh
+++ b/vp9/common/vp9_rtcd_defs.sh
@@ -78,17 +78,14 @@ prototype void vp9_recon4b "uint8_t *pred_ptr, int16_t *diff_ptr, uint8_t *dst_p
specialize vp9_recon4b
# specialize vp9_recon4b sse2
-prototype void vp9_recon_mb "struct macroblockd *x"
-specialize vp9_recon_mb
+prototype void vp9_recon_sb "struct macroblockd *x, enum BLOCK_SIZE_TYPE bsize"
+specialize vp9_recon_sb
-prototype void vp9_recon_mby "struct macroblockd *x"
-specialize vp9_recon_mby
+prototype void vp9_recon_sby "struct macroblockd *x, enum BLOCK_SIZE_TYPE bsize"
+specialize vp9_recon_sby
-prototype void vp9_recon_sby_s "struct macroblockd *x, uint8_t *dst, enum BLOCK_SIZE_TYPE bsize"
-specialize vp9_recon_sby_s
-
-prototype void vp9_recon_sbuv_s "struct macroblockd *x, uint8_t *udst, uint8_t *vdst, enum BLOCK_SIZE_TYPE bsize"
-specialize void vp9_recon_sbuv_s
+prototype void vp9_recon_sbuv "struct macroblockd *x, enum BLOCK_SIZE_TYPE bsize"
+specialize void vp9_recon_sbuv
prototype void vp9_build_intra_predictors "uint8_t *src, int src_stride, uint8_t *pred, int y_stride, int mode, int bw, int bh, int up_available, int left_available, int right_available"
specialize void vp9_build_intra_predictors
diff --git a/vp9/common/x86/vp9_asm_stubs.c b/vp9/common/x86/vp9_asm_stubs.c
index 310f8ed24..2b66834a7 100644
--- a/vp9/common/x86/vp9_asm_stubs.c
+++ b/vp9/common/x86/vp9_asm_stubs.c
@@ -278,43 +278,20 @@ void vp9_convolve8_ssse3(const uint8_t *src, int src_stride,
const int16_t *filter_x, int x_step_q4,
const int16_t *filter_y, int y_step_q4,
int w, int h) {
- DECLARE_ALIGNED_ARRAY(16, unsigned char, fdata2, 16*71);
+ DECLARE_ALIGNED_ARRAY(16, unsigned char, fdata2, 64*71);
+ assert(w <= 64);
assert(h <= 64);
-
- if (x_step_q4 == 16 && y_step_q4 == 16 &&
- filter_x[3] != 128 && filter_y[3] != 128) {
- if (w == 16) {
- vp9_filter_block1d16_h8_ssse3(src - 3 * src_stride, src_stride,
- fdata2, 16,
- h + 7, filter_x);
- vp9_filter_block1d16_v8_ssse3(fdata2, 16,
- dst, dst_stride,
- h, filter_y);
- return;
- }
- if (w == 8) {
- vp9_filter_block1d8_h8_ssse3(src - 3 * src_stride, src_stride,
- fdata2, 16,
- h + 7, filter_x);
- vp9_filter_block1d8_v8_ssse3(fdata2, 16,
- dst, dst_stride,
- h, filter_y);
- return;
- }
- if (w == 4) {
- vp9_filter_block1d4_h8_ssse3(src - 3 * src_stride, src_stride,
- fdata2, 16,
- h + 7, filter_x);
- vp9_filter_block1d4_v8_ssse3(fdata2, 16,
- dst, dst_stride,
- h, filter_y);
- return;
- }
+ if (x_step_q4 == 16 && y_step_q4 == 16) {
+ vp9_convolve8_horiz_ssse3(src - 3 * src_stride, src_stride, fdata2, 64,
+ filter_x, x_step_q4, filter_y, y_step_q4,
+ w, h + 7);
+ vp9_convolve8_vert_ssse3(fdata2 + 3 * 64, 64, dst, dst_stride,
+ filter_x, x_step_q4, filter_y, y_step_q4, w, h);
+ } else {
+ vp9_convolve8_c(src, src_stride, dst, dst_stride,
+ filter_x, x_step_q4, filter_y, y_step_q4, w, h);
}
- vp9_convolve8_c(src, src_stride, dst, dst_stride,
- filter_x, x_step_q4, filter_y, y_step_q4,
- w, h);
}
void vp9_convolve8_avg_ssse3(const uint8_t *src, int src_stride,
@@ -322,42 +299,20 @@ void vp9_convolve8_avg_ssse3(const uint8_t *src, int src_stride,
const int16_t *filter_x, int x_step_q4,
const int16_t *filter_y, int y_step_q4,
int w, int h) {
- DECLARE_ALIGNED_ARRAY(16, unsigned char, fdata2, 16*71);
+ DECLARE_ALIGNED_ARRAY(16, unsigned char, fdata2, 64*71);
+ assert(w <= 64);
assert(h <= 64);
-
- if (x_step_q4 == 16 && y_step_q4 == 16 &&
- filter_x[3] != 128 && filter_y[3] != 128) {
- if (w == 16) {
- vp9_filter_block1d16_h8_ssse3(src - 3 * src_stride, src_stride,
- fdata2, 16,
- h + 7, filter_x);
- vp9_filter_block1d16_v8_avg_ssse3(fdata2, 16,
- dst, dst_stride,
- h, filter_y);
- return;
- }
- if (w == 8) {
- vp9_filter_block1d8_h8_ssse3(src - 3 * src_stride, src_stride,
- fdata2, 16,
- h + 7, filter_x);
- vp9_filter_block1d8_v8_avg_ssse3(fdata2, 16,
- dst, dst_stride,
- h, filter_y);
- return;
- }
- if (w == 4) {
- vp9_filter_block1d4_h8_ssse3(src - 3 * src_stride, src_stride,
- fdata2, 16,
- h + 7, filter_x);
- vp9_filter_block1d4_v8_avg_ssse3(fdata2, 16,
- dst, dst_stride,
- h, filter_y);
- return;
- }
+ if (x_step_q4 == 16 && y_step_q4 == 16) {
+ vp9_convolve8_horiz_ssse3(src - 3 * src_stride, src_stride, fdata2, 64,
+ filter_x, x_step_q4, filter_y, y_step_q4,
+ w, h + 7);
+ vp9_convolve8_avg_vert_ssse3(fdata2 + 3 * 64, 64, dst, dst_stride,
+ filter_x, x_step_q4, filter_y, y_step_q4,
+ w, h);
+ } else {
+ vp9_convolve8_avg_c(src, src_stride, dst, dst_stride,
+ filter_x, x_step_q4, filter_y, y_step_q4, w, h);
}
- vp9_convolve8_avg_c(src, src_stride, dst, dst_stride,
- filter_x, x_step_q4, filter_y, y_step_q4,
- w, h);
}
#endif
diff --git a/vp9/common/x86/vp9_recon_wrapper_sse2.c b/vp9/common/x86/vp9_recon_wrapper_sse2.c
index 12d2f970c..97148fbb8 100644
--- a/vp9/common/x86/vp9_recon_wrapper_sse2.c
+++ b/vp9/common/x86/vp9_recon_wrapper_sse2.c
@@ -35,7 +35,7 @@ static void build_intra_predictors_mbuv_x86(MACROBLOCKD *xd,
build_intra_pred_mbuv_fn_t ho_fn) {
int mode = xd->mode_info_context->mbmi.uv_mode;
build_intra_pred_mbuv_fn_t fn;
- int src_stride = xd->dst.uv_stride;
+ int src_stride = xd->plane[1].dst.stride;
switch (mode) {
case V_PRED:
@@ -68,34 +68,34 @@ static void build_intra_predictors_mbuv_x86(MACROBLOCKD *xd,
return;
}
- fn(dst_u, dst_stride, xd->dst.u_buffer, src_stride);
- fn(dst_v, dst_stride, xd->dst.v_buffer, src_stride);
+ fn(dst_u, dst_stride, xd->plane[1].dst.buf, src_stride);
+ fn(dst_v, dst_stride, xd->plane[2].dst.buf, src_stride);
}
void vp9_build_intra_predictors_mbuv_sse2(MACROBLOCKD *xd) {
- build_intra_predictors_mbuv_x86(xd, xd->dst.u_buffer,
- xd->dst.v_buffer, xd->dst.uv_stride,
+ build_intra_predictors_mbuv_x86(xd, xd->plane[1].dst.buf,
+ xd->plane[2].dst.buf, xd->plane[1].dst.stride,
vp9_intra_pred_uv_tm_sse2,
vp9_intra_pred_uv_ho_mmx2);
}
void vp9_build_intra_predictors_mbuv_ssse3(MACROBLOCKD *xd) {
- build_intra_predictors_mbuv_x86(xd, xd->dst.u_buffer,
- xd->dst.v_buffer, xd->dst.uv_stride,
+ build_intra_predictors_mbuv_x86(xd, xd->plane[1].dst.buf,
+ xd->plane[2].dst.buf, xd->plane[1].dst.stride,
vp9_intra_pred_uv_tm_ssse3,
vp9_intra_pred_uv_ho_ssse3);
}
void vp9_build_intra_predictors_mbuv_s_sse2(MACROBLOCKD *xd) {
- build_intra_predictors_mbuv_x86(xd, xd->dst.u_buffer,
- xd->dst.v_buffer, xd->dst.uv_stride,
+ build_intra_predictors_mbuv_x86(xd, xd->plane[1].dst.buf,
+ xd->plane[2].dst.buf, xd->plane[1].dst.stride,
vp9_intra_pred_uv_tm_sse2,
vp9_intra_pred_uv_ho_mmx2);
}
void vp9_build_intra_predictors_mbuv_s_ssse3(MACROBLOCKD *xd) {
- build_intra_predictors_mbuv_x86(xd, xd->dst.u_buffer,
- xd->dst.v_buffer, xd->dst.uv_stride,
+ build_intra_predictors_mbuv_x86(xd, xd->plane[1].dst.buf,
+ xd->plane[2].dst.buf, xd->plane[1].dst.stride,
vp9_intra_pred_uv_tm_ssse3,
vp9_intra_pred_uv_ho_ssse3);
}
diff --git a/vp9/decoder/vp9_dboolhuff.c b/vp9/decoder/vp9_dboolhuff.c
index 390a68475..9921ea727 100644
--- a/vp9/decoder/vp9_dboolhuff.c
+++ b/vp9/decoder/vp9_dboolhuff.c
@@ -13,25 +13,25 @@
#include "vp9/decoder/vp9_dboolhuff.h"
-int vp9_start_decode(BOOL_DECODER *br, const uint8_t *buffer, size_t size) {
- br->buffer_end = buffer + size;
- br->buffer = buffer;
- br->value = 0;
- br->count = -8;
- br->range = 255;
+int vp9_reader_init(vp9_reader *r, const uint8_t *buffer, size_t size) {
+ r->buffer_end = buffer + size;
+ r->buffer = buffer;
+ r->value = 0;
+ r->count = -8;
+ r->range = 255;
if (size && !buffer)
return 1;
- vp9_reader_fill(br);
+ vp9_reader_fill(r);
return 0;
}
-void vp9_reader_fill(BOOL_DECODER *br) {
- const uint8_t *const buffer_end = br->buffer_end;
- const uint8_t *buffer = br->buffer;
- VP9_BD_VALUE value = br->value;
- int count = br->count;
+void vp9_reader_fill(vp9_reader *r) {
+ const uint8_t *const buffer_end = r->buffer_end;
+ const uint8_t *buffer = r->buffer;
+ VP9_BD_VALUE value = r->value;
+ int count = r->count;
int shift = VP9_BD_VALUE_SIZE - 8 - (count + 8);
int loop_end = 0;
const int bits_left = (int)((buffer_end - buffer)*CHAR_BIT);
@@ -50,8 +50,17 @@ void vp9_reader_fill(BOOL_DECODER *br) {
}
}
- br->buffer = buffer;
- br->value = value;
- br->count = count;
+ r->buffer = buffer;
+ r->value = value;
+ r->count = count;
+}
+
+const uint8_t *vp9_reader_find_end(vp9_reader *r) {
+ // Find the end of the coded buffer
+ while (r->count > CHAR_BIT && r->count < VP9_BD_VALUE_SIZE) {
+ r->count -= CHAR_BIT;
+ r->buffer--;
+ }
+ return r->buffer;
}
diff --git a/vp9/decoder/vp9_dboolhuff.h b/vp9/decoder/vp9_dboolhuff.h
index dab330c02..b50aa35fd 100644
--- a/vp9/decoder/vp9_dboolhuff.h
+++ b/vp9/decoder/vp9_dboolhuff.h
@@ -33,24 +33,17 @@ typedef struct {
VP9_BD_VALUE value;
int count;
unsigned int range;
-} BOOL_DECODER;
+} vp9_reader;
DECLARE_ALIGNED(16, extern const uint8_t, vp9_norm[256]);
-int vp9_start_decode(BOOL_DECODER *br, const uint8_t *buffer, size_t size);
+int vp9_reader_init(vp9_reader *r, const uint8_t *buffer, size_t size);
-void vp9_reader_fill(BOOL_DECODER *br);
+void vp9_reader_fill(vp9_reader *r);
-static INLINE const uint8_t *vp9_reader_find_end(BOOL_DECODER *br) {
- // Find the end of the coded buffer
- while (br->count > CHAR_BIT && br->count < VP9_BD_VALUE_SIZE) {
- br->count -= CHAR_BIT;
- br->buffer--;
- }
- return br->buffer;
-}
+const uint8_t *vp9_reader_find_end(vp9_reader *r);
-static int vp9_read(BOOL_DECODER *br, int probability) {
+static int vp9_read(vp9_reader *br, int probability) {
unsigned int bit = 0;
VP9_BD_VALUE value;
VP9_BD_VALUE bigsplit;
@@ -87,21 +80,20 @@ static int vp9_read(BOOL_DECODER *br, int probability) {
return bit;
}
-static int vp9_read_bit(BOOL_DECODER *r) {
+static int vp9_read_bit(vp9_reader *r) {
return vp9_read(r, 128); // vp9_prob_half
}
-static int vp9_read_literal(BOOL_DECODER *br, int bits) {
+static int vp9_read_literal(vp9_reader *br, int bits) {
int z = 0, bit;
- for (bit = bits - 1; bit >= 0; bit--) {
+ for (bit = bits - 1; bit >= 0; bit--)
z |= vp9_read_bit(br) << bit;
- }
return z;
}
-static int bool_error(BOOL_DECODER *br) {
+static int vp9_reader_has_error(vp9_reader *r) {
// Check if we have reached the end of the buffer.
//
// Variable 'count' stores the number of bits in the 'value' buffer, minus
@@ -116,7 +108,7 @@ static int bool_error(BOOL_DECODER *br) {
//
// 1 if we have tried to decode bits after the end of stream was encountered.
// 0 No error.
- return br->count > VP9_BD_VALUE_SIZE && br->count < VP9_LOTS_OF_BITS;
+ return r->count > VP9_BD_VALUE_SIZE && r->count < VP9_LOTS_OF_BITS;
}
#endif // VP9_DECODER_VP9_DBOOLHUFF_H_
diff --git a/vp9/decoder/vp9_decodemv.c b/vp9/decoder/vp9_decodemv.c
index 6478a8ebc..0df2e079f 100644
--- a/vp9/decoder/vp9_decodemv.c
+++ b/vp9/decoder/vp9_decodemv.c
@@ -73,14 +73,10 @@ static MB_PREDICTION_MODE read_uv_mode(vp9_reader *r, const vp9_prob *p) {
return (MB_PREDICTION_MODE)treed_read(r, vp9_uv_mode_tree, p);
}
-// This function reads the current macro block's segnent id from the bitstream
-// It should only be called if a segment map update is indicated.
-static void read_mb_segid(vp9_reader *r, MB_MODE_INFO *mi, MACROBLOCKD *xd) {
- if (xd->segmentation_enabled && xd->update_mb_segmentation_map) {
- const vp9_prob *const p = xd->mb_segment_tree_probs;
- mi->segment_id = vp9_read(r, p[0]) ? 2 + vp9_read(r, p[2])
- : vp9_read(r, p[1]);
- }
+static int read_mb_segid(vp9_reader *r, MACROBLOCKD *xd) {
+ const vp9_prob *const p = xd->mb_segment_tree_probs;
+ return vp9_read(r, p[0]) ? 2 + vp9_read(r, p[2])
+ : vp9_read(r, p[1]);
}
// This function reads the current macro block's segnent id from the bitstream
@@ -88,8 +84,8 @@ static void read_mb_segid(vp9_reader *r, MB_MODE_INFO *mi, MACROBLOCKD *xd) {
static int read_mb_segid_except(vp9_reader *r,
VP9_COMMON *cm, MACROBLOCKD *xd,
int mb_row, int mb_col) {
- const int mb_index = mb_row * cm->mb_cols + mb_col;
- const int pred_seg_id = vp9_get_pred_mb_segid(cm, xd, mb_index);
+ const BLOCK_SIZE_TYPE sb_type = xd->mode_info_context->mbmi.sb_type;
+ const int pred_seg_id = vp9_get_pred_mb_segid(cm, sb_type, mb_row, mb_col);
const vp9_prob *const p = xd->mb_segment_tree_probs;
const vp9_prob prob = xd->mb_segment_mispred_tree_probs[pred_seg_id];
@@ -98,21 +94,27 @@ static int read_mb_segid_except(vp9_reader *r,
: (pred_seg_id >= 2 ? vp9_read(r, p[1]) : (pred_seg_id == 0));
}
-#if CONFIG_NEW_MVREF
-int vp9_read_mv_ref_id(vp9_reader *r, vp9_prob *ref_id_probs) {
- int ref_index = 0;
+static void set_segment_id(VP9_COMMON *cm, MB_MODE_INFO *mbmi,
+ int mb_row, int mb_col, int segment_id) {
+ const int mb_index = mb_row * cm->mb_cols + mb_col;
+ const BLOCK_SIZE_TYPE sb_type = mbmi->sb_type;
+ if (sb_type) {
+ const int bw = 1 << mb_width_log2(sb_type);
+ const int bh = 1 << mb_height_log2(sb_type);
+ const int ymbs = MIN(cm->mb_rows - mb_row, bh);
+ const int xmbs = MIN(cm->mb_cols - mb_col, bw);
+ int x, y;
- if (vp9_read(r, ref_id_probs[0])) {
- ref_index++;
- if (vp9_read(r, ref_id_probs[1])) {
- ref_index++;
- if (vp9_read(r, ref_id_probs[2]))
- ref_index++;
+ for (y = 0; y < ymbs; y++) {
+ for (x = 0; x < xmbs; x++) {
+ const int index = mb_index + (y * cm->mb_cols + x);
+ cm->last_frame_seg_map[index] = segment_id;
+ }
}
+ } else {
+ cm->last_frame_seg_map[mb_index] = segment_id;
}
- return ref_index;
}
-#endif
extern const int vp9_i8x8_block[4];
static void kfread_modes(VP9D_COMP *pbi, MODE_INFO *m,
@@ -121,37 +123,21 @@ static void kfread_modes(VP9D_COMP *pbi, MODE_INFO *m,
VP9_COMMON *const cm = &pbi->common;
MACROBLOCKD *const xd = &pbi->mb;
const int mis = cm->mode_info_stride;
- const int map_index = mb_row * cm->mb_cols + mb_col;
m->mbmi.ref_frame = INTRA_FRAME;
- // Read the Macroblock segmentation map if it is being updated explicitly
- // this frame (reset to 0 by default).
+ // Read segmentation map if it is being updated explicitly this frame
m->mbmi.segment_id = 0;
- if (xd->update_mb_segmentation_map) {
- read_mb_segid(r, &m->mbmi, xd);
- if (m->mbmi.sb_type) {
- const int bw = 1 << mb_width_log2(m->mbmi.sb_type);
- const int bh = 1 << mb_height_log2(m->mbmi.sb_type);
- const int ymbs = MIN(cm->mb_rows - mb_row, bh);
- const int xmbs = MIN(cm->mb_cols - mb_col, bw);
- int x, y;
-
- for (y = 0; y < ymbs; y++) {
- for (x = 0; x < xmbs; x++) {
- const int index = y * cm->mb_cols + x;
- cm->last_frame_seg_map[map_index + index] = m->mbmi.segment_id;
- }
- }
- } else {
- cm->last_frame_seg_map[map_index] = m->mbmi.segment_id;
- }
+ if (xd->segmentation_enabled && xd->update_mb_segmentation_map) {
+ m->mbmi.segment_id = read_mb_segid(r, xd);
+ set_segment_id(cm, &m->mbmi, mb_row, mb_col, m->mbmi.segment_id);
}
- m->mbmi.mb_skip_coeff = vp9_segfeature_active(&pbi->mb, m->mbmi.segment_id,
+ m->mbmi.mb_skip_coeff = vp9_segfeature_active(xd, m->mbmi.segment_id,
SEG_LVL_SKIP);
if (!m->mbmi.mb_skip_coeff)
m->mbmi.mb_skip_coeff = vp9_read(r, vp9_get_pred_prob(cm, xd, PRED_MBSKIP));
+ // luma mode
m->mbmi.mode = m->mbmi.sb_type ?
read_kf_sb_ymode(r, cm->sb_kf_ymode_prob[cm->kf_ymode_probs_index]):
read_kf_mb_ymode(r, cm->kf_ymode_prob[cm->kf_ymode_probs_index]);
@@ -159,19 +145,19 @@ static void kfread_modes(VP9D_COMP *pbi, MODE_INFO *m,
m->mbmi.ref_frame = INTRA_FRAME;
if (m->mbmi.mode == I4X4_PRED) {
- int i = 0;
- do {
+ int i;
+ for (i = 0; i < 16; ++i) {
const B_PREDICTION_MODE a = above_block_mode(m, i, mis);
const B_PREDICTION_MODE l = xd->left_available || (i & 3) ?
left_block_mode(m, i) : B_DC_PRED;
m->bmi[i].as_mode.first = read_kf_bmode(r, cm->kf_bmode_prob[a][l]);
- } while (++i < 16);
+ }
}
if (m->mbmi.mode == I8X8_PRED) {
int i;
- for (i = 0; i < 4; i++) {
+ for (i = 0; i < 4; ++i) {
const int ib = vp9_i8x8_block[i];
const int mode8x8 = read_i8x8_mode(r, cm->fc.i8x8_mode_prob);
@@ -180,7 +166,10 @@ static void kfread_modes(VP9D_COMP *pbi, MODE_INFO *m,
m->bmi[ib + 4].as_mode.first = mode8x8;
m->bmi[ib + 5].as_mode.first = mode8x8;
}
- } else {
+ }
+
+ // chroma mode
+ if (m->mbmi.mode != I8X8_PRED) {
m->mbmi.uv_mode = read_uv_mode(r, cm->kf_uv_mode_prob[m->mbmi.mode]);
}
@@ -517,12 +506,10 @@ static void mb_mode_mv_init(VP9D_COMP *pbi, vp9_reader *r) {
for (i = 0; i < VP9_I32X32_MODES - 1; ++i)
cm->fc.sb_ymode_prob[i] = vp9_read_prob(r);
- for (j = 0; j < PARTITION_PLANES; j++) {
- if (vp9_read_bit(r)) {
+ for (j = 0; j < PARTITION_PLANES; j++)
+ if (vp9_read_bit(r))
for (i = 0; i < PARTITION_TYPES - 1; i++)
cm->fc.partition_prob[j][i] = vp9_read_prob(r);
- }
- }
read_nmvprobs(r, nmvc, xd->allow_high_precision_mv);
}
@@ -531,79 +518,40 @@ static void mb_mode_mv_init(VP9D_COMP *pbi, vp9_reader *r) {
// This function either reads the segment id for the current macroblock from
// the bitstream or if the value is temporally predicted asserts the predicted
// value
-static void read_mb_segment_id(VP9D_COMP *pbi,
- int mb_row, int mb_col,
- vp9_reader *r) {
+static int read_mb_segment_id(VP9D_COMP *pbi, int mb_row, int mb_col,
+ vp9_reader *r) {
VP9_COMMON *const cm = &pbi->common;
MACROBLOCKD *const xd = &pbi->mb;
MODE_INFO *const mi = xd->mode_info_context;
MB_MODE_INFO *const mbmi = &mi->mbmi;
- const int mb_index = mb_row * cm->mb_cols + mb_col;
- if (xd->segmentation_enabled) {
- if (xd->update_mb_segmentation_map) {
- // Is temporal coding of the segment id for this mb enabled.
- if (cm->temporal_update) {
- // Get the context based probability for reading the
- // prediction status flag
- vp9_prob pred_prob = vp9_get_pred_prob(cm, xd, PRED_SEG_ID);
-
- // Read the prediction status flag
- unsigned char seg_pred_flag = vp9_read(r, pred_prob);
-
- // Store the prediction flag.
- vp9_set_pred_flag(xd, PRED_SEG_ID, seg_pred_flag);
-
- // If the value is flagged as correctly predicted
- // then use the predicted value, otherwise decode it explicitly
- mbmi->segment_id = seg_pred_flag ?
- vp9_get_pred_mb_segid(cm, xd, mb_index) :
- read_mb_segid_except(r, cm, xd, mb_row, mb_col);
- } else {
- // Normal unpredicted coding mode
- read_mb_segid(r, mbmi, xd);
- }
+ if (!xd->segmentation_enabled)
+ return 0; // Default for disabled segmentation
- if (mbmi->sb_type) {
- const int bw = 1 << mb_width_log2(mbmi->sb_type);
- const int bh = 1 << mb_height_log2(mbmi->sb_type);
- const int ymbs = MIN(cm->mb_rows - mb_row, bh);
- const int xmbs = MIN(cm->mb_cols - mb_col, bw);
- int x, y;
-
- for (y = 0; y < ymbs; y++) {
- for (x = 0; x < xmbs; x++) {
- const int index = y * cm->mb_cols + x;
- cm->last_frame_seg_map[mb_index + index] = mbmi->segment_id;
- }
- }
- } else {
- cm->last_frame_seg_map[mb_index] = mbmi->segment_id;
- }
+ if (xd->update_mb_segmentation_map) {
+ int segment_id;
+
+ if (cm->temporal_update) {
+ // Temporal coding of the segment id for this mb is enabled.
+ // Get the context based probability for reading the
+ // prediction status flag
+ const vp9_prob pred_prob = vp9_get_pred_prob(cm, xd, PRED_SEG_ID);
+ const int pred_flag = vp9_read(r, pred_prob);
+ vp9_set_pred_flag(xd, PRED_SEG_ID, pred_flag);
+
+ // If the value is flagged as correctly predicted
+ // then use the predicted value, otherwise decode it explicitly
+ segment_id = pred_flag ? vp9_get_pred_mb_segid(cm, mbmi->sb_type,
+ mb_row, mb_col)
+ : read_mb_segid_except(r, cm, xd, mb_row, mb_col);
} else {
- if (mbmi->sb_type) {
- const int bw = 1 << mb_width_log2(mbmi->sb_type);
- const int bh = 1 << mb_height_log2(mbmi->sb_type);
- const int ymbs = MIN(cm->mb_rows - mb_row, bh);
- const int xmbs = MIN(cm->mb_cols - mb_col, bw);
- unsigned segment_id = -1;
- int x, y;
-
- for (y = 0; y < ymbs; y++) {
- for (x = 0; x < xmbs; x++) {
- segment_id = MIN(segment_id,
- cm->last_frame_seg_map[mb_index + x + y * cm->mb_cols]);
- }
- }
- mbmi->segment_id = segment_id;
- } else {
- mbmi->segment_id = cm->last_frame_seg_map[mb_index];
- }
+ segment_id = read_mb_segid(r, xd); // Normal unpredicted coding mode
}
+
+ set_segment_id(cm, mbmi, mb_row, mb_col, segment_id); // Side effect
+ return segment_id;
} else {
- // The encoder explicitly sets the segment_id to 0
- // when segmentation is disabled
- mbmi->segment_id = 0;
+ return vp9_get_pred_mb_segid(cm, mbmi->sb_type, mb_row, mb_col);
}
}
@@ -678,7 +626,7 @@ static void read_mb_modes_mv(VP9D_COMP *pbi, MODE_INFO *mi, MB_MODE_INFO *mbmi,
mb_to_right_edge = xd->mb_to_right_edge + RIGHT_BOTTOM_MARGIN;
// Read the macroblock segment id.
- read_mb_segment_id(pbi, mb_row, mb_col, r);
+ mbmi->segment_id = read_mb_segment_id(pbi, mb_row, mb_col, r);
mbmi->mb_skip_coeff = vp9_segfeature_active(xd, mbmi->segment_id,
SEG_LVL_SKIP);
@@ -696,20 +644,14 @@ static void read_mb_modes_mv(VP9D_COMP *pbi, MODE_INFO *mi, MB_MODE_INFO *mbmi,
const MV_REFERENCE_FRAME ref_frame = mbmi->ref_frame;
struct scale_factors *sf0 = &xd->scale_factor[0];
- struct scale_factors *sf_uv0 = &xd->scale_factor_uv[0];
*sf0 = cm->active_ref_scale[mbmi->ref_frame - 1];
{
- const int use_prev_in_find_best_ref = sf0->x_num == sf0->x_den &&
- sf0->y_num == sf0->y_den &&
- !cm->error_resilient_mode &&
- !cm->frame_parallel_decoding_mode;
-
// Select the appropriate reference frame for this MB
const int ref_fb_idx = cm->active_ref_idx[ref_frame - 1];
- setup_pred_block(&xd->pre, &cm->yv12_fb[ref_fb_idx],
- mb_row, mb_col, sf0, sf_uv0);
+ setup_pre_planes(xd, &cm->yv12_fb[ref_fb_idx], NULL,
+ mb_row, mb_col, xd->scale_factor, xd->scale_factor_uv);
#ifdef DEC_DEBUG
if (dec_debug)
@@ -733,9 +675,6 @@ static void read_mb_modes_mv(VP9D_COMP *pbi, MODE_INFO *mi, MB_MODE_INFO *mbmi,
if (mbmi->mode != ZEROMV) {
vp9_find_best_ref_mvs(xd,
- use_prev_in_find_best_ref ? xd->pre.y_buffer
- : NULL,
- xd->pre.y_stride,
mbmi->ref_mvs[ref_frame],
&nearest, &nearby);
@@ -772,16 +711,11 @@ static void read_mb_modes_mv(VP9D_COMP *pbi, MODE_INFO *mi, MB_MODE_INFO *mbmi,
if (mbmi->second_ref_frame > 0) {
const MV_REFERENCE_FRAME second_ref_frame = mbmi->second_ref_frame;
struct scale_factors *sf1 = &xd->scale_factor[1];
- struct scale_factors *sf_uv1 = &xd->scale_factor_uv[1];
- const int use_prev_in_find_best_ref = sf1->x_num == sf1->x_den &&
- sf1->y_num == sf1->y_den &&
- !cm->error_resilient_mode &&
- !cm->frame_parallel_decoding_mode;
const int second_ref_fb_idx = cm->active_ref_idx[second_ref_frame - 1];
*sf1 = cm->active_ref_scale[second_ref_frame - 1];
- setup_pred_block(&xd->second_pre, &cm->yv12_fb[second_ref_fb_idx],
- mb_row, mb_col, sf1, sf_uv1);
+ setup_pre_planes(xd, NULL, &cm->yv12_fb[second_ref_fb_idx],
+ mb_row, mb_col, xd->scale_factor, xd->scale_factor_uv);
vp9_find_mv_refs(cm, xd, mi,
use_prev_in_find_mv_refs ? prev_mi : NULL,
@@ -790,9 +724,6 @@ static void read_mb_modes_mv(VP9D_COMP *pbi, MODE_INFO *mi, MB_MODE_INFO *mbmi,
if (mbmi->mode != ZEROMV) {
vp9_find_best_ref_mvs(xd,
- use_prev_in_find_best_ref ?
- xd->second_pre.y_buffer : NULL,
- xd->second_pre.y_stride,
mbmi->ref_mvs[second_ref_frame],
&nearest_second,
&nearby_second);
@@ -829,29 +760,6 @@ static void read_mb_modes_mv(VP9D_COMP *pbi, MODE_INFO *mi, MB_MODE_INFO *mbmi,
#endif
}
-#if CONFIG_NEW_MVREF
- // if ((mbmi->mode == NEWMV) || (mbmi->mode == SPLITMV))
- if (mbmi->mode == NEWMV) {
- int best_index;
- MV_REFERENCE_FRAME ref_frame = mbmi->ref_frame;
-
- // Encode the index of the choice.
- best_index =
- vp9_read_mv_ref_id(r, xd->mb_mv_ref_probs[ref_frame]);
-
- best_mv.as_int = mbmi->ref_mvs[ref_frame][best_index].as_int;
-
- if (mbmi->second_ref_frame > 0) {
- ref_frame = mbmi->second_ref_frame;
-
- // Encode the index of the choice.
- best_index =
- vp9_read_mv_ref_id(r, xd->mb_mv_ref_probs[ref_frame]);
- best_mv_second.as_int = mbmi->ref_mvs[ref_frame][best_index].as_int;
- }
- }
-#endif
-
mbmi->uv_mode = DC_PRED;
switch (mbmi->mode) {
case SPLITMV: {
@@ -1108,258 +1016,6 @@ void vp9_decode_mode_mvs_init(VP9D_COMP* const pbi, vp9_reader *r) {
mb_mode_mv_init(pbi, r);
}
-#if CONFIG_CODE_NONZEROCOUNT
-static uint16_t read_nzc(VP9_COMMON *const cm,
- int nzc_context,
- TX_SIZE tx_size,
- int ref,
- int type,
- vp9_reader *r) {
- int c, e;
- uint16_t nzc;
- if (!get_nzc_used(tx_size)) return 0;
- if (tx_size == TX_32X32) {
- c = treed_read(r, vp9_nzc32x32_tree,
- cm->fc.nzc_probs_32x32[nzc_context][ref][type]);
- cm->fc.nzc_counts_32x32[nzc_context][ref][type][c]++;
- } else if (tx_size == TX_16X16) {
- c = treed_read(r, vp9_nzc16x16_tree,
- cm->fc.nzc_probs_16x16[nzc_context][ref][type]);
- cm->fc.nzc_counts_16x16[nzc_context][ref][type][c]++;
- } else if (tx_size == TX_8X8) {
- c = treed_read(r, vp9_nzc8x8_tree,
- cm->fc.nzc_probs_8x8[nzc_context][ref][type]);
- cm->fc.nzc_counts_8x8[nzc_context][ref][type][c]++;
- } else if (tx_size == TX_4X4) {
- c = treed_read(r, vp9_nzc4x4_tree,
- cm->fc.nzc_probs_4x4[nzc_context][ref][type]);
- cm->fc.nzc_counts_4x4[nzc_context][ref][type][c]++;
- } else {
- assert(0);
- }
- nzc = vp9_basenzcvalue[c];
- if ((e = vp9_extranzcbits[c])) {
- int x = 0;
- while (e--) {
- int b = vp9_read(
- r, cm->fc.nzc_pcat_probs[nzc_context][c - NZC_TOKENS_NOEXTRA][e]);
- x |= (b << e);
- cm->fc.nzc_pcat_counts[nzc_context][c - NZC_TOKENS_NOEXTRA][e][b]++;
- }
- nzc += x;
- }
- if (tx_size == TX_32X32)
- assert(nzc <= 1024);
- else if (tx_size == TX_16X16)
- assert(nzc <= 256);
- else if (tx_size == TX_8X8)
- assert(nzc <= 64);
- else if (tx_size == TX_4X4)
- assert(nzc <= 16);
- return nzc;
-}
-
-static void read_nzcs_sb64(VP9_COMMON *const cm,
- MACROBLOCKD* xd,
- int mb_row,
- int mb_col,
- vp9_reader *r) {
- MODE_INFO *m = xd->mode_info_context;
- MB_MODE_INFO *const mi = &m->mbmi;
- int j, nzc_context;
- const int ref = m->mbmi.ref_frame != INTRA_FRAME;
-
- assert(mb_col == get_mb_col(xd));
- assert(mb_row == get_mb_row(xd));
-
- vpx_memset(m->mbmi.nzcs, 0, 384 * sizeof(m->mbmi.nzcs[0]));
-
- if (mi->mb_skip_coeff)
- return;
-
- switch (mi->txfm_size) {
- case TX_32X32:
- for (j = 0; j < 256; j += 64) {
- nzc_context = vp9_get_nzc_context_y_sb64(cm, m, mb_row, mb_col, j);
- m->mbmi.nzcs[j] = read_nzc(cm, nzc_context, TX_32X32, ref, 0, r);
- }
- for (j = 256; j < 384; j += 64) {
- nzc_context = vp9_get_nzc_context_uv_sb64(cm, m, mb_row, mb_col, j);
- m->mbmi.nzcs[j] = read_nzc(cm, nzc_context, TX_32X32, ref, 1, r);
- }
- break;
-
- case TX_16X16:
- for (j = 0; j < 256; j += 16) {
- nzc_context = vp9_get_nzc_context_y_sb64(cm, m, mb_row, mb_col, j);
- m->mbmi.nzcs[j] = read_nzc(cm, nzc_context, TX_16X16, ref, 0, r);
- }
- for (j = 256; j < 384; j += 16) {
- nzc_context = vp9_get_nzc_context_uv_sb64(cm, m, mb_row, mb_col, j);
- m->mbmi.nzcs[j] = read_nzc(cm, nzc_context, TX_16X16, ref, 1, r);
- }
- break;
-
- case TX_8X8:
- for (j = 0; j < 256; j += 4) {
- nzc_context = vp9_get_nzc_context_y_sb64(cm, m, mb_row, mb_col, j);
- m->mbmi.nzcs[j] = read_nzc(cm, nzc_context, TX_8X8, ref, 0, r);
- }
- for (j = 256; j < 384; j += 4) {
- nzc_context = vp9_get_nzc_context_uv_sb64(cm, m, mb_row, mb_col, j);
- m->mbmi.nzcs[j] = read_nzc(cm, nzc_context, TX_8X8, ref, 1, r);
- }
- break;
-
- case TX_4X4:
- for (j = 0; j < 256; ++j) {
- nzc_context = vp9_get_nzc_context_y_sb64(cm, m, mb_row, mb_col, j);
- m->mbmi.nzcs[j] = read_nzc(cm, nzc_context, TX_4X4, ref, 0, r);
- }
- for (j = 256; j < 384; ++j) {
- nzc_context = vp9_get_nzc_context_uv_sb64(cm, m, mb_row, mb_col, j);
- m->mbmi.nzcs[j] = read_nzc(cm, nzc_context, TX_4X4, ref, 1, r);
- }
- break;
-
- default:
- break;
- }
-}
-
-static void read_nzcs_sb32(VP9_COMMON *const cm,
- MACROBLOCKD* xd,
- int mb_row,
- int mb_col,
- vp9_reader *r) {
- MODE_INFO *m = xd->mode_info_context;
- MB_MODE_INFO *const mi = &m->mbmi;
- int j, nzc_context;
- const int ref = m->mbmi.ref_frame != INTRA_FRAME;
-
- assert(mb_col == get_mb_col(xd));
- assert(mb_row == get_mb_row(xd));
-
- vpx_memset(m->mbmi.nzcs, 0, 384 * sizeof(m->mbmi.nzcs[0]));
-
- if (mi->mb_skip_coeff)
- return;
-
- switch (mi->txfm_size) {
- case TX_32X32:
- for (j = 0; j < 64; j += 64) {
- nzc_context = vp9_get_nzc_context_y_sb32(cm, m, mb_row, mb_col, j);
- m->mbmi.nzcs[j] = read_nzc(cm, nzc_context, TX_32X32, ref, 0, r);
- }
- for (j = 64; j < 96; j += 16) {
- nzc_context = vp9_get_nzc_context_uv_sb32(cm, m, mb_row, mb_col, j);
- m->mbmi.nzcs[j] = read_nzc(cm, nzc_context, TX_16X16, ref, 1, r);
- }
- break;
-
- case TX_16X16:
- for (j = 0; j < 64; j += 16) {
- nzc_context = vp9_get_nzc_context_y_sb32(cm, m, mb_row, mb_col, j);
- m->mbmi.nzcs[j] = read_nzc(cm, nzc_context, TX_16X16, ref, 0, r);
- }
- for (j = 64; j < 96; j += 16) {
- nzc_context = vp9_get_nzc_context_uv_sb32(cm, m, mb_row, mb_col, j);
- m->mbmi.nzcs[j] = read_nzc(cm, nzc_context, TX_16X16, ref, 1, r);
- }
- break;
-
- case TX_8X8:
- for (j = 0; j < 64; j += 4) {
- nzc_context = vp9_get_nzc_context_y_sb32(cm, m, mb_row, mb_col, j);
- m->mbmi.nzcs[j] = read_nzc(cm, nzc_context, TX_8X8, ref, 0, r);
- }
- for (j = 64; j < 96; j += 4) {
- nzc_context = vp9_get_nzc_context_uv_sb32(cm, m, mb_row, mb_col, j);
- m->mbmi.nzcs[j] = read_nzc(cm, nzc_context, TX_8X8, ref, 1, r);
- }
- break;
-
- case TX_4X4:
- for (j = 0; j < 64; ++j) {
- nzc_context = vp9_get_nzc_context_y_sb32(cm, m, mb_row, mb_col, j);
- m->mbmi.nzcs[j] = read_nzc(cm, nzc_context, TX_4X4, ref, 0, r);
- }
- for (j = 64; j < 96; ++j) {
- nzc_context = vp9_get_nzc_context_uv_sb32(cm, m, mb_row, mb_col, j);
- m->mbmi.nzcs[j] = read_nzc(cm, nzc_context, TX_4X4, ref, 1, r);
- }
- break;
-
- default:
- break;
- }
-}
-
-static void read_nzcs_mb16(VP9_COMMON *const cm,
- MACROBLOCKD* xd,
- int mb_row,
- int mb_col,
- vp9_reader *r) {
- MODE_INFO *m = xd->mode_info_context;
- MB_MODE_INFO *const mi = &m->mbmi;
- int j, nzc_context;
- const int ref = m->mbmi.ref_frame != INTRA_FRAME;
-
- assert(mb_col == get_mb_col(xd));
- assert(mb_row == get_mb_row(xd));
-
- vpx_memset(m->mbmi.nzcs, 0, 384 * sizeof(m->mbmi.nzcs[0]));
-
- if (mi->mb_skip_coeff)
- return;
-
- switch (mi->txfm_size) {
- case TX_16X16:
- for (j = 0; j < 16; j += 16) {
- nzc_context = vp9_get_nzc_context_y_mb16(cm, m, mb_row, mb_col, j);
- m->mbmi.nzcs[j] = read_nzc(cm, nzc_context, TX_16X16, ref, 0, r);
- }
- for (j = 16; j < 24; j += 4) {
- nzc_context = vp9_get_nzc_context_uv_mb16(cm, m, mb_row, mb_col, j);
- m->mbmi.nzcs[j] = read_nzc(cm, nzc_context, TX_8X8, ref, 1, r);
- }
- break;
-
- case TX_8X8:
- for (j = 0; j < 16; j += 4) {
- nzc_context = vp9_get_nzc_context_y_mb16(cm, m, mb_row, mb_col, j);
- m->mbmi.nzcs[j] = read_nzc(cm, nzc_context, TX_8X8, ref, 0, r);
- }
- if (mi->mode == I8X8_PRED || mi->mode == SPLITMV) {
- for (j = 16; j < 24; ++j) {
- nzc_context = vp9_get_nzc_context_uv_mb16(cm, m, mb_row, mb_col, j);
- m->mbmi.nzcs[j] = read_nzc(cm, nzc_context, TX_4X4, ref, 1, r);
- }
- } else {
- for (j = 16; j < 24; j += 4) {
- nzc_context = vp9_get_nzc_context_uv_mb16(cm, m, mb_row, mb_col, j);
- m->mbmi.nzcs[j] = read_nzc(cm, nzc_context, TX_8X8, ref, 1, r);
- }
- }
- break;
-
- case TX_4X4:
- for (j = 0; j < 16; ++j) {
- nzc_context = vp9_get_nzc_context_y_mb16(cm, m, mb_row, mb_col, j);
- m->mbmi.nzcs[j] = read_nzc(cm, nzc_context, TX_4X4, ref, 0, r);
- }
- for (j = 16; j < 24; ++j) {
- nzc_context = vp9_get_nzc_context_uv_mb16(cm, m, mb_row, mb_col, j);
- m->mbmi.nzcs[j] = read_nzc(cm, nzc_context, TX_4X4, ref, 1, r);
- }
- break;
-
- default:
- break;
- }
-}
-#endif // CONFIG_CODE_NONZEROCOUNT
-
void vp9_decode_mb_mode_mv(VP9D_COMP* const pbi,
MACROBLOCKD* const xd,
int mb_row,
@@ -1378,14 +1034,6 @@ void vp9_decode_mb_mode_mv(VP9D_COMP* const pbi,
mi->mbmi.ref_frame - 1, mi->mbmi.second_ref_frame - 1,
cm->active_ref_scale);
}
-#if CONFIG_CODE_NONZEROCOUNT
- if (mbmi->sb_type == BLOCK_SIZE_SB64X64)
- read_nzcs_sb64(cm, xd, mb_row, mb_col, r);
- else if (mbmi->sb_type == BLOCK_SIZE_SB32X32)
- read_nzcs_sb32(cm, xd, mb_row, mb_col, r);
- else
- read_nzcs_mb16(cm, xd, mb_row, mb_col, r);
-#endif // CONFIG_CODE_NONZEROCOUNT
if (mbmi->sb_type) {
const int bw = 1 << mb_width_log2(mbmi->sb_type);
diff --git a/vp9/decoder/vp9_decodframe.c b/vp9/decoder/vp9_decodframe.c
index 4af921872..a16b6108a 100644
--- a/vp9/decoder/vp9_decodframe.c
+++ b/vp9/decoder/vp9_decodframe.c
@@ -86,31 +86,31 @@ static int inv_recenter_nonneg(int v, int m) {
return m - ((v + 1) >> 1);
}
-static int decode_uniform(BOOL_DECODER *br, int n) {
+static int decode_uniform(vp9_reader *r, int n) {
int v;
const int l = get_unsigned_bits(n);
const int m = (1 << l) - n;
if (!l)
return 0;
- v = vp9_read_literal(br, l - 1);
- return v < m ? v : (v << 1) - m + vp9_read_bit(br);
+ v = vp9_read_literal(r, l - 1);
+ return v < m ? v : (v << 1) - m + vp9_read_bit(r);
}
-static int decode_term_subexp(BOOL_DECODER *br, int k, int num_syms) {
+static int decode_term_subexp(vp9_reader *r, int k, int num_syms) {
int i = 0, mk = 0, word;
while (1) {
const int b = i ? k + i - 1 : k;
const int a = 1 << b;
if (num_syms <= mk + 3 * a) {
- word = decode_uniform(br, num_syms - mk) + mk;
+ word = decode_uniform(r, num_syms - mk) + mk;
break;
} else {
- if (vp9_read_bit(br)) {
+ if (vp9_read_bit(r)) {
i++;
mk += a;
} else {
- word = vp9_read_literal(br, b) + mk;
+ word = vp9_read_literal(r, b) + mk;
break;
}
}
@@ -118,11 +118,11 @@ static int decode_term_subexp(BOOL_DECODER *br, int k, int num_syms) {
return word;
}
-static int decode_unsigned_max(BOOL_DECODER *br, int max) {
+static int decode_unsigned_max(vp9_reader *r, int max) {
int data = 0, bit = 0, lmax = max;
while (lmax) {
- data |= vp9_read_bit(br) << bit++;
+ data |= vp9_read_bit(r) << bit++;
lmax >>= 1;
}
return data > max ? max : data;
@@ -154,8 +154,8 @@ static int inv_remap_prob(int v, int m) {
}
}
-static vp9_prob read_prob_diff_update(vp9_reader *const bc, int oldp) {
- int delp = decode_term_subexp(bc, SUBEXP_PARAM, 255);
+static vp9_prob read_prob_diff_update(vp9_reader *r, int oldp) {
+ int delp = decode_term_subexp(r, SUBEXP_PARAM, 255);
return (vp9_prob)inv_remap_prob(delp, oldp);
}
@@ -220,51 +220,23 @@ static void mb_init_dequantizer(VP9D_COMP *pbi, MACROBLOCKD *mb) {
}
}
-#if CONFIG_CODE_NONZEROCOUNT
-static void propagate_nzcs(VP9_COMMON *cm, MACROBLOCKD *xd) {
- MODE_INFO *m = xd->mode_info_context;
- BLOCK_SIZE_TYPE sb_type = m->mbmi.sb_type;
- const int mis = cm->mode_info_stride;
- int n;
- if (sb_type == BLOCK_SIZE_SB64X64) {
- for (n = 0; n < 16; ++n) {
- int i = n >> 2;
- int j = n & 3;
- if (i == 0 && j == 0) continue;
- vpx_memcpy((m + j + mis * i)->mbmi.nzcs, m->mbmi.nzcs,
- 384 * sizeof(m->mbmi.nzcs[0]));
- }
- } else if (sb_type == BLOCK_SIZE_SB32X32) {
- for (n = 0; n < 4; ++n) {
- int i = n >> 1;
- int j = n & 1;
- if (i == 0 && j == 0) continue;
- vpx_memcpy((m + j + mis * i)->mbmi.nzcs, m->mbmi.nzcs,
- 384 * sizeof(m->mbmi.nzcs[0]));
- }
- }
-}
-#endif
-
-static void decode_16x16(VP9D_COMP *pbi, MACROBLOCKD *xd,
- BOOL_DECODER* const bc) {
+static void decode_16x16(MACROBLOCKD *xd) {
const TX_TYPE tx_type = get_tx_type_16x16(xd, 0);
vp9_dequant_iht_add_16x16_c(tx_type, xd->plane[0].qcoeff,
- xd->block[0].dequant, xd->dst.y_buffer,
- xd->dst.y_stride, xd->plane[0].eobs[0]);
+ xd->block[0].dequant, xd->plane[0].dst.buf,
+ xd->plane[0].dst.stride, xd->plane[0].eobs[0]);
vp9_dequant_idct_add_8x8(xd->plane[1].qcoeff, xd->block[16].dequant,
- xd->dst.u_buffer, xd->dst.uv_stride,
+ xd->plane[1].dst.buf, xd->plane[1].dst.stride,
xd->plane[1].eobs[0]);
vp9_dequant_idct_add_8x8(xd->plane[2].qcoeff, xd->block[20].dequant,
- xd->dst.v_buffer, xd->dst.uv_stride,
+ xd->plane[2].dst.buf, xd->plane[1].dst.stride,
xd->plane[2].eobs[0]);
}
-static void decode_8x8(VP9D_COMP *pbi, MACROBLOCKD *xd,
- BOOL_DECODER* const bc) {
+static void decode_8x8(MACROBLOCKD *xd) {
const MB_PREDICTION_MODE mode = xd->mode_info_context->mbmi.mode;
// luma
// if the first one is DCT_DCT assume all the rest are as well
@@ -277,7 +249,7 @@ static void decode_8x8(VP9D_COMP *pbi, MACROBLOCKD *xd,
int16_t *q = BLOCK_OFFSET(xd->plane[0].qcoeff, idx, 16);
int16_t *dq = xd->block[0].dequant;
uint8_t *dst = *(xd->block[ib].base_dst) + xd->block[ib].dst;
- int stride = xd->dst.y_stride;
+ int stride = xd->plane[0].dst.stride;
if (mode == I8X8_PRED) {
BLOCKD *b = &xd->block[ib];
int i8x8mode = b->bmi.as_mode.first;
@@ -289,8 +261,8 @@ static void decode_8x8(VP9D_COMP *pbi, MACROBLOCKD *xd,
}
} else {
vp9_dequant_idct_add_y_block_8x8(xd->plane[0].qcoeff,
- xd->block[0].dequant, xd->dst.y_buffer,
- xd->dst.y_stride, xd);
+ xd->block[0].dequant, xd->plane[0].dst.buf,
+ xd->plane[0].dst.stride, xd);
}
// chroma
@@ -317,16 +289,16 @@ static void decode_8x8(VP9D_COMP *pbi, MACROBLOCKD *xd,
}
} else if (mode == SPLITMV) {
xd->itxm_add_uv_block(xd->plane[1].qcoeff, xd->block[16].dequant,
- xd->dst.u_buffer, xd->dst.uv_stride, xd->plane[1].eobs);
+ xd->plane[1].dst.buf, xd->plane[1].dst.stride, xd->plane[1].eobs);
xd->itxm_add_uv_block(xd->plane[2].qcoeff, xd->block[16].dequant,
- xd->dst.v_buffer, xd->dst.uv_stride, xd->plane[2].eobs);
+ xd->plane[2].dst.buf, xd->plane[1].dst.stride, xd->plane[2].eobs);
} else {
vp9_dequant_idct_add_8x8(xd->plane[1].qcoeff, xd->block[16].dequant,
- xd->dst.u_buffer, xd->dst.uv_stride,
+ xd->plane[1].dst.buf, xd->plane[1].dst.stride,
xd->plane[1].eobs[0]);
vp9_dequant_idct_add_8x8(xd->plane[2].qcoeff, xd->block[16].dequant,
- xd->dst.v_buffer, xd->dst.uv_stride,
+ xd->plane[2].dst.buf, xd->plane[1].dst.stride,
xd->plane[2].eobs[0]);
}
}
@@ -347,8 +319,7 @@ static INLINE void dequant_add_y(MACROBLOCKD *xd, TX_TYPE tx_type, int idx) {
}
-static void decode_4x4(VP9D_COMP *pbi, MACROBLOCKD *xd,
- BOOL_DECODER* const bc) {
+static void decode_4x4(VP9D_COMP *pbi, MACROBLOCKD *xd, vp9_reader *r) {
TX_TYPE tx_type;
int i = 0;
const MB_PREDICTION_MODE mode = xd->mode_info_context->mbmi.mode;
@@ -386,7 +357,7 @@ static void decode_4x4(VP9D_COMP *pbi, MACROBLOCKD *xd,
xd->mode_info_context->bmi[i].as_mode.context = b->bmi.as_mode.context =
vp9_find_bpred_context(xd, b);
if (!xd->mode_info_context->mbmi.mb_skip_coeff)
- vp9_decode_coefs_4x4(pbi, xd, bc, PLANE_TYPE_Y_WITH_DC, i);
+ vp9_decode_coefs_4x4(pbi, xd, r, PLANE_TYPE_Y_WITH_DC, i);
#endif
vp9_intra4x4_predict(xd, b, b_mode, *(b->base_dst) + b->dst,
b->dst_stride);
@@ -395,31 +366,31 @@ static void decode_4x4(VP9D_COMP *pbi, MACROBLOCKD *xd,
}
#if CONFIG_NEWBINTRAMODES
if (!xd->mode_info_context->mbmi.mb_skip_coeff)
- vp9_decode_mb_tokens_4x4_uv(pbi, xd, bc);
+ vp9_decode_mb_tokens_4x4_uv(pbi, xd, r);
#endif
vp9_build_intra_predictors_sbuv_s(xd, BLOCK_SIZE_MB16X16);
xd->itxm_add_uv_block(xd->plane[1].qcoeff, xd->block[16].dequant,
- xd->dst.u_buffer, xd->dst.uv_stride, xd->plane[1].eobs);
+ xd->plane[1].dst.buf, xd->plane[1].dst.stride, xd->plane[1].eobs);
xd->itxm_add_uv_block(xd->plane[2].qcoeff, xd->block[16].dequant,
- xd->dst.v_buffer, xd->dst.uv_stride, xd->plane[2].eobs);
+ xd->plane[2].dst.buf, xd->plane[1].dst.stride, xd->plane[2].eobs);
} else if (mode == SPLITMV || get_tx_type_4x4(xd, 0) == DCT_DCT) {
xd->itxm_add_y_block(xd->plane[0].qcoeff,
xd->block[0].dequant,
- xd->dst.y_buffer, xd->dst.y_stride, xd);
+ xd->plane[0].dst.buf, xd->plane[0].dst.stride, xd);
xd->itxm_add_uv_block(xd->plane[1].qcoeff, xd->block[16].dequant,
- xd->dst.u_buffer, xd->dst.uv_stride, xd->plane[1].eobs);
+ xd->plane[1].dst.buf, xd->plane[1].dst.stride, xd->plane[1].eobs);
xd->itxm_add_uv_block(xd->plane[2].qcoeff, xd->block[16].dequant,
- xd->dst.v_buffer, xd->dst.uv_stride, xd->plane[2].eobs);
+ xd->plane[2].dst.buf, xd->plane[1].dst.stride, xd->plane[2].eobs);
} else {
for (i = 0; i < 16; i++) {
tx_type = get_tx_type_4x4(xd, i);
dequant_add_y(xd, tx_type, i);
}
xd->itxm_add_uv_block(xd->plane[1].qcoeff, xd->block[16].dequant,
- xd->dst.u_buffer, xd->dst.uv_stride,
+ xd->plane[1].dst.buf, xd->plane[1].dst.stride,
xd->plane[1].eobs);
xd->itxm_add_uv_block(xd->plane[2].qcoeff, xd->block[16].dequant,
- xd->dst.v_buffer, xd->dst.uv_stride,
+ xd->plane[2].dst.buf, xd->plane[1].dst.stride,
xd->plane[2].eobs);
}
}
@@ -433,10 +404,11 @@ static INLINE void decode_sby_32x32(MACROBLOCKD *mb, BLOCK_SIZE_TYPE bsize) {
for (n = 0; n < y_count; n++) {
const int x_idx = n & (bw - 1);
const int y_idx = n >> bwl;
- const int y_offset = (y_idx * 32) * mb->dst.y_stride + (x_idx * 32);
+ const int y_offset = (y_idx * 32) * mb->plane[0].dst.stride + (x_idx * 32);
vp9_dequant_idct_add_32x32(BLOCK_OFFSET(mb->plane[0].qcoeff, n, 1024),
mb->block[0].dequant ,
- mb->dst.y_buffer + y_offset, mb->dst.y_stride,
+ mb->plane[0].dst.buf + y_offset,
+ mb->plane[0].dst.stride,
mb->plane[0].eobs[n * 64]);
}
}
@@ -449,15 +421,18 @@ static INLINE void decode_sbuv_32x32(MACROBLOCKD *mb, BLOCK_SIZE_TYPE bsize) {
for (n = 0; n < uv_count; n++) {
const int x_idx = n & (bw - 1);
const int y_idx = n >> (bwl - 1);
- const int uv_offset = (y_idx * 32) * mb->dst.uv_stride + (x_idx * 32);
+ const int uv_offset = (y_idx * 32) * mb->plane[1].dst.stride +
+ (x_idx * 32);
vp9_dequant_idct_add_32x32(BLOCK_OFFSET(mb->plane[1].qcoeff, n, 1024),
mb->block[16].dequant,
- mb->dst.u_buffer + uv_offset,
- mb->dst.uv_stride, mb->plane[1].eobs[n * 64]);
+ mb->plane[1].dst.buf + uv_offset,
+ mb->plane[1].dst.stride,
+ mb->plane[1].eobs[n * 64]);
vp9_dequant_idct_add_32x32(BLOCK_OFFSET(mb->plane[2].qcoeff, n, 1024),
mb->block[20].dequant,
- mb->dst.v_buffer + uv_offset,
- mb->dst.uv_stride, mb->plane[2].eobs[n * 64]);
+ mb->plane[2].dst.buf + uv_offset,
+ mb->plane[1].dst.stride,
+ mb->plane[2].eobs[n * 64]);
}
}
@@ -470,14 +445,14 @@ static INLINE void decode_sby_16x16(MACROBLOCKD *mb, BLOCK_SIZE_TYPE bsize) {
for (n = 0; n < y_count; n++) {
const int x_idx = n & (bw - 1);
const int y_idx = n >> bwl;
- const int y_offset = (y_idx * 16) * mb->dst.y_stride + (x_idx * 16);
+ const int y_offset = (y_idx * 16) * mb->plane[0].dst.stride + (x_idx * 16);
const TX_TYPE tx_type = get_tx_type_16x16(mb,
(y_idx * (4 * bw) + x_idx) * 4);
vp9_dequant_iht_add_16x16_c(tx_type,
BLOCK_OFFSET(mb->plane[0].qcoeff, n, 256),
mb->block[0].dequant,
- mb->dst.y_buffer + y_offset,
- mb->dst.y_stride,
+ mb->plane[0].dst.buf + y_offset,
+ mb->plane[0].dst.stride,
mb->plane[0].eobs[n * 16]);
}
}
@@ -493,14 +468,16 @@ static INLINE void decode_sbuv_16x16(MACROBLOCKD *mb, BLOCK_SIZE_TYPE bsize) {
for (n = 0; n < uv_count; n++) {
const int x_idx = n & (bw - 1);
const int y_idx = n >> (bwl - 1);
- const int uv_offset = (y_idx * 16) * mb->dst.uv_stride + (x_idx * 16);
+ const int uv_offset = (y_idx * 16) * mb->plane[1].dst.stride + (x_idx * 16);
vp9_dequant_idct_add_16x16(BLOCK_OFFSET(mb->plane[1].qcoeff, n, 256),
mb->block[16].dequant,
- mb->dst.u_buffer + uv_offset, mb->dst.uv_stride,
+ mb->plane[1].dst.buf + uv_offset,
+ mb->plane[1].dst.stride,
mb->plane[1].eobs[n * 16]);
vp9_dequant_idct_add_16x16(BLOCK_OFFSET(mb->plane[2].qcoeff, n, 256),
mb->block[20].dequant,
- mb->dst.v_buffer + uv_offset, mb->dst.uv_stride,
+ mb->plane[2].dst.buf + uv_offset,
+ mb->plane[1].dst.stride,
mb->plane[2].eobs[n * 16]);
}
}
@@ -515,14 +492,15 @@ static INLINE void decode_sby_8x8(MACROBLOCKD *xd, BLOCK_SIZE_TYPE bsize) {
for (n = 0; n < y_count; n++) {
const int x_idx = n & (bw - 1);
const int y_idx = n >> bwl;
- const int y_offset = (y_idx * 8) * xd->dst.y_stride + (x_idx * 8);
+ const int y_offset = (y_idx * 8) * xd->plane[0].dst.stride + (x_idx * 8);
const TX_TYPE tx_type = get_tx_type_8x8(xd,
(y_idx * (2 * bw) + x_idx) * 2);
vp9_dequant_iht_add_8x8_c(tx_type,
BLOCK_OFFSET(xd->plane[0].qcoeff, n, 64),
xd->block[0].dequant,
- xd->dst.y_buffer + y_offset, xd->dst.y_stride,
+ xd->plane[0].dst.buf + y_offset,
+ xd->plane[0].dst.stride,
xd->plane[0].eobs[n * 4]);
}
}
@@ -537,14 +515,16 @@ static INLINE void decode_sbuv_8x8(MACROBLOCKD *xd, BLOCK_SIZE_TYPE bsize) {
for (n = 0; n < uv_count; n++) {
const int x_idx = n & (bw - 1);
const int y_idx = n >> (bwl - 1);
- const int uv_offset = (y_idx * 8) * xd->dst.uv_stride + (x_idx * 8);
+ const int uv_offset = (y_idx * 8) * xd->plane[1].dst.stride + (x_idx * 8);
vp9_dequant_idct_add_8x8(BLOCK_OFFSET(xd->plane[1].qcoeff, n, 64),
xd->block[16].dequant,
- xd->dst.u_buffer + uv_offset, xd->dst.uv_stride,
+ xd->plane[1].dst.buf + uv_offset,
+ xd->plane[1].dst.stride,
xd->plane[1].eobs[n * 4]);
vp9_dequant_idct_add_8x8(BLOCK_OFFSET(xd->plane[2].qcoeff, n, 64),
xd->block[20].dequant,
- xd->dst.v_buffer + uv_offset, xd->dst.uv_stride,
+ xd->plane[2].dst.buf + uv_offset,
+ xd->plane[1].dst.stride,
xd->plane[2].eobs[n * 4]);
}
}
@@ -558,18 +538,19 @@ static INLINE void decode_sby_4x4(MACROBLOCKD *xd, BLOCK_SIZE_TYPE bsize) {
for (n = 0; n < y_count; n++) {
const int x_idx = n & (bw - 1);
const int y_idx = n >> bwl;
- const int y_offset = (y_idx * 4) * xd->dst.y_stride + (x_idx * 4);
+ const int y_offset = (y_idx * 4) * xd->plane[0].dst.stride + (x_idx * 4);
const TX_TYPE tx_type = get_tx_type_4x4(xd, n);
if (tx_type == DCT_DCT) {
xd->itxm_add(BLOCK_OFFSET(xd->plane[0].qcoeff, n, 16),
xd->block[0].dequant,
- xd->dst.y_buffer + y_offset, xd->dst.y_stride,
+ xd->plane[0].dst.buf + y_offset, xd->plane[0].dst.stride,
xd->plane[0].eobs[n]);
} else {
vp9_dequant_iht_add_c(tx_type,
BLOCK_OFFSET(xd->plane[0].qcoeff, n, 16),
- xd->block[0].dequant, xd->dst.y_buffer + y_offset,
- xd->dst.y_stride, xd->plane[0].eobs[n]);
+ xd->block[0].dequant,
+ xd->plane[0].dst.buf + y_offset,
+ xd->plane[0].dst.stride, xd->plane[0].eobs[n]);
}
}
}
@@ -583,13 +564,15 @@ static INLINE void decode_sbuv_4x4(MACROBLOCKD *xd, BLOCK_SIZE_TYPE bsize) {
for (n = 0; n < uv_count; n++) {
const int x_idx = n & (bw - 1);
const int y_idx = n >> (bwl - 1);
- const int uv_offset = (y_idx * 4) * xd->dst.uv_stride + (x_idx * 4);
+ const int uv_offset = (y_idx * 4) * xd->plane[1].dst.stride + (x_idx * 4);
xd->itxm_add(BLOCK_OFFSET(xd->plane[1].qcoeff, n, 16),
xd->block[16].dequant,
- xd->dst.u_buffer + uv_offset, xd->dst.uv_stride, xd->plane[1].eobs[n]);
+ xd->plane[1].dst.buf + uv_offset, xd->plane[1].dst.stride,
+ xd->plane[1].eobs[n]);
xd->itxm_add(BLOCK_OFFSET(xd->plane[2].qcoeff, n, 16),
xd->block[20].dequant,
- xd->dst.v_buffer + uv_offset, xd->dst.uv_stride, xd->plane[2].eobs[n]);
+ xd->plane[2].dst.buf + uv_offset, xd->plane[1].dst.stride,
+ xd->plane[2].eobs[n]);
}
}
@@ -612,7 +595,7 @@ static void decode_sb_16x16(MACROBLOCKD *mb, BLOCK_SIZE_TYPE bsize) {
}
static void decode_sb(VP9D_COMP *pbi, MACROBLOCKD *xd, int mb_row, int mb_col,
- BOOL_DECODER* const bc, BLOCK_SIZE_TYPE bsize) {
+ vp9_reader *r, BLOCK_SIZE_TYPE bsize) {
const int bwl = mb_width_log2(bsize), bhl = mb_height_log2(bsize);
const int bw = 1 << bwl, bh = 1 << bhl;
int n, eobtotal;
@@ -635,16 +618,13 @@ static void decode_sb(VP9D_COMP *pbi, MACROBLOCKD *xd, int mb_row, int mb_col,
if (mi->mbmi.mb_skip_coeff) {
vp9_reset_sb_tokens_context(xd, bsize);
-#if CONFIG_CODE_NONZEROCOUNT
- vpx_memset(mi->mbmi.nzcs, 0, 384 * sizeof(mi->mbmi.nzcs[0]));
-#endif
} else {
// re-initialize macroblock dequantizer before detokenization
if (xd->segmentation_enabled)
mb_init_dequantizer(pbi, xd);
// dequantization and idct
- eobtotal = vp9_decode_tokens(pbi, xd, bc, bsize);
+ eobtotal = vp9_decode_tokens(pbi, xd, r, bsize);
if (eobtotal == 0) { // skip loopfilter
for (n = 0; n < bw * bh; n++) {
const int x_idx = n & (bw - 1), y_idx = n >> bwl;
@@ -672,17 +652,13 @@ static void decode_sb(VP9D_COMP *pbi, MACROBLOCKD *xd, int mb_row, int mb_col,
}
}
}
-
-#if CONFIG_CODE_NONZEROCOUNT
- propagate_nzcs(&pbi->common, xd);
-#endif
}
// TODO(jingning): Need to merge SB and MB decoding. The MB decoding currently
// couples special handles on I8x8, B_PRED, and splitmv modes.
static void decode_mb(VP9D_COMP *pbi, MACROBLOCKD *xd,
int mb_row, int mb_col,
- BOOL_DECODER* const bc) {
+ vp9_reader *r) {
int eobtotal = 0;
const MB_PREDICTION_MODE mode = xd->mode_info_context->mbmi.mode;
const int tx_size = xd->mode_info_context->mbmi.txfm_size;
@@ -708,7 +684,7 @@ static void decode_mb(VP9D_COMP *pbi, MACROBLOCKD *xd,
xd->mode_info_context->mbmi.mode, tx_size,
xd->mode_info_context->mbmi.interp_filter);
#endif
- vp9_build_inter_predictors_mb(xd, mb_row, mb_col);
+ vp9_build_inter_predictors_sb(xd, mb_row, mb_col, BLOCK_SIZE_MB16X16);
}
if (xd->mode_info_context->mbmi.mb_skip_coeff) {
@@ -718,11 +694,11 @@ static void decode_mb(VP9D_COMP *pbi, MACROBLOCKD *xd,
if (xd->segmentation_enabled)
mb_init_dequantizer(pbi, xd);
- if (!bool_error(bc)) {
+ if (!vp9_reader_has_error(r)) {
#if CONFIG_NEWBINTRAMODES
if (mode != I4X4_PRED)
#endif
- eobtotal = vp9_decode_tokens(pbi, xd, bc, BLOCK_SIZE_MB16X16);
+ eobtotal = vp9_decode_tokens(pbi, xd, r, BLOCK_SIZE_MB16X16);
}
}
@@ -730,7 +706,7 @@ static void decode_mb(VP9D_COMP *pbi, MACROBLOCKD *xd,
mode != I4X4_PRED &&
mode != SPLITMV &&
mode != I8X8_PRED &&
- !bool_error(bc)) {
+ !vp9_reader_has_error(r)) {
xd->mode_info_context->mbmi.mb_skip_coeff = 1;
} else {
#if 0 // def DEC_DEBUG
@@ -739,11 +715,11 @@ static void decode_mb(VP9D_COMP *pbi, MACROBLOCKD *xd,
#endif
if (tx_size == TX_16X16) {
- decode_16x16(pbi, xd, bc);
+ decode_16x16(xd);
} else if (tx_size == TX_8X8) {
- decode_8x8(pbi, xd, bc);
+ decode_8x8(xd);
} else {
- decode_4x4(pbi, xd, bc);
+ decode_4x4(pbi, xd, r);
}
}
@@ -761,21 +737,21 @@ static void decode_mb(VP9D_COMP *pbi, MACROBLOCKD *xd,
printf("final y\n");
for (i = 0; i < 16; i++) {
for (j = 0; j < 16; j++)
- printf("%3d ", xd->dst.y_buffer[i * xd->dst.y_stride + j]);
+ printf("%3d ", xd->plane[0].dst.buf[i * xd->plane[0].dst.stride + j]);
printf("\n");
}
printf("\n");
printf("final u\n");
for (i = 0; i < 8; i++) {
for (j = 0; j < 8; j++)
- printf("%3d ", xd->dst.u_buffer[i * xd->dst.uv_stride + j]);
+ printf("%3d ", xd->plane[1].dst.buf[i * xd->plane[1].dst.stride + j]);
printf("\n");
}
printf("\n");
printf("final v\n");
for (i = 0; i < 8; i++) {
for (j = 0; j < 8; j++)
- printf("%3d ", xd->dst.v_buffer[i * xd->dst.uv_stride + j]);
+ printf("%3d ", xd->plane[2].dst.buf[i * xd->plane[1].dst.stride + j]);
printf("\n");
}
fflush(stdout);
@@ -823,9 +799,9 @@ static void set_offsets(VP9D_COMP *pbi, BLOCK_SIZE_TYPE bsize,
set_mb_row(cm, xd, mb_row, bh);
set_mb_col(cm, xd, mb_col, bw);
- xd->dst.y_buffer = dst_fb->y_buffer + recon_yoffset;
- xd->dst.u_buffer = dst_fb->u_buffer + recon_uvoffset;
- xd->dst.v_buffer = dst_fb->v_buffer + recon_uvoffset;
+ xd->plane[0].dst.buf = dst_fb->y_buffer + recon_yoffset;
+ xd->plane[1].dst.buf = dst_fb->u_buffer + recon_uvoffset;
+ xd->plane[2].dst.buf = dst_fb->v_buffer + recon_uvoffset;
}
static void set_refs(VP9D_COMP *pbi, int mb_row, int mb_col) {
@@ -839,8 +815,8 @@ static void set_refs(VP9D_COMP *pbi, int mb_row, int mb_col) {
const YV12_BUFFER_CONFIG *cfg = &cm->yv12_fb[fb_idx];
xd->scale_factor[0] = cm->active_ref_scale[mbmi->ref_frame - 1];
xd->scale_factor_uv[0] = cm->active_ref_scale[mbmi->ref_frame - 1];
- setup_pred_block(&xd->pre, cfg, mb_row, mb_col,
- &xd->scale_factor[0], &xd->scale_factor_uv[0]);
+ setup_pre_planes(xd, cfg, NULL, mb_row, mb_col,
+ xd->scale_factor, xd->scale_factor_uv);
xd->corrupted |= cfg->corrupted;
if (mbmi->second_ref_frame > INTRA_FRAME) {
@@ -849,8 +825,8 @@ static void set_refs(VP9D_COMP *pbi, int mb_row, int mb_col) {
const YV12_BUFFER_CONFIG *second_cfg = &cm->yv12_fb[second_fb_idx];
xd->scale_factor[1] = cm->active_ref_scale[mbmi->second_ref_frame - 1];
xd->scale_factor_uv[1] = cm->active_ref_scale[mbmi->second_ref_frame - 1];
- setup_pred_block(&xd->second_pre, second_cfg, mb_row, mb_col,
- &xd->scale_factor[1], &xd->scale_factor_uv[1]);
+ setup_pre_planes(xd, NULL, second_cfg, mb_row, mb_col,
+ xd->scale_factor, xd->scale_factor_uv);
xd->corrupted |= second_cfg->corrupted;
}
}
@@ -870,7 +846,7 @@ static void decode_modes_b(VP9D_COMP *pbi, int mb_row, int mb_col,
else
decode_mb(pbi, xd, mb_row, mb_col, r);
- xd->corrupted |= bool_error(r);
+ xd->corrupted |= vp9_reader_has_error(r);
}
static void decode_modes_sb(VP9D_COMP *pbi, int mb_row, int mb_col,
@@ -961,7 +937,7 @@ static void setup_token_decoder(VP9D_COMP *pbi,
"Truncated packet or corrupt partition "
"%d length", 1);
- if (vp9_start_decode(r, data, partition_size))
+ if (vp9_reader_init(r, data, partition_size))
vpx_internal_error(&pc->error, VPX_CODEC_MEM_ERROR,
"Failed to allocate bool decoder %d", 1);
}
@@ -986,45 +962,35 @@ static void init_frame(VP9D_COMP *pbi) {
xd->corrupted = 0;
}
-#if CONFIG_CODE_NONZEROCOUNT
-static void read_nzc_probs_common(VP9_COMMON *cm,
- BOOL_DECODER* const bc,
+#if CONFIG_CODE_ZEROGROUP
+static void read_zpc_probs_common(VP9_COMMON *cm,
+ vp9_reader* bc,
TX_SIZE tx_size) {
- int c, r, b, t;
- int tokens, nodes;
- vp9_prob *nzc_probs;
- vp9_prob upd;
-
- if (!get_nzc_used(tx_size)) return;
+ int r, b, p, n;
+ vp9_zpc_probs *zpc_probs;
+ vp9_prob upd = ZPC_UPDATE_PROB;
+ if (!get_zpc_used(tx_size)) return;
if (!vp9_read_bit(bc)) return;
if (tx_size == TX_32X32) {
- tokens = NZC32X32_TOKENS;
- nzc_probs = cm->fc.nzc_probs_32x32[0][0][0];
- upd = NZC_UPDATE_PROB_32X32;
+ zpc_probs = &cm->fc.zpc_probs_32x32;
} else if (tx_size == TX_16X16) {
- tokens = NZC16X16_TOKENS;
- nzc_probs = cm->fc.nzc_probs_16x16[0][0][0];
- upd = NZC_UPDATE_PROB_16X16;
+ zpc_probs = &cm->fc.zpc_probs_16x16;
} else if (tx_size == TX_8X8) {
- tokens = NZC8X8_TOKENS;
- nzc_probs = cm->fc.nzc_probs_8x8[0][0][0];
- upd = NZC_UPDATE_PROB_8X8;
+ zpc_probs = &cm->fc.zpc_probs_8x8;
} else {
- tokens = NZC4X4_TOKENS;
- nzc_probs = cm->fc.nzc_probs_4x4[0][0][0];
- upd = NZC_UPDATE_PROB_4X4;
+ zpc_probs = &cm->fc.zpc_probs_4x4;
}
- nodes = tokens - 1;
- for (c = 0; c < MAX_NZC_CONTEXTS; ++c) {
- for (r = 0; r < REF_TYPES; ++r) {
- for (b = 0; b < BLOCK_TYPES; ++b) {
- int offset = c * REF_TYPES * BLOCK_TYPES + r * BLOCK_TYPES + b;
- int offset_nodes = offset * nodes;
- for (t = 0; t < nodes; ++t) {
- vp9_prob *p = &nzc_probs[offset_nodes + t];
+ for (r = 0; r < REF_TYPES; ++r) {
+ for (b = 0; b < ZPC_BANDS; ++b) {
+ for (p = 0; p < ZPC_PTOKS; ++p) {
+ for (n = 0; n < ZPC_NODES; ++n) {
+ vp9_prob *q = &(*zpc_probs)[r][b][p][n];
+#if USE_ZPC_EXTRA == 0
+ if (n == 1) continue;
+#endif
if (vp9_read(bc, upd)) {
- *p = read_prob_diff_update(bc, *p);
+ *q = read_prob_diff_update(bc, *q);
}
}
}
@@ -1032,45 +998,20 @@ static void read_nzc_probs_common(VP9_COMMON *cm,
}
}
-static void read_nzc_pcat_probs(VP9_COMMON *cm, BOOL_DECODER* const bc) {
- int c, t, b;
- vp9_prob upd = NZC_UPDATE_PROB_PCAT;
- if (!(get_nzc_used(TX_4X4) || get_nzc_used(TX_8X8) ||
- get_nzc_used(TX_16X16) || get_nzc_used(TX_32X32)))
- return;
- if (!vp9_read_bit(bc)) {
- return;
- }
- for (c = 0; c < MAX_NZC_CONTEXTS; ++c) {
- for (t = 0; t < NZC_TOKENS_EXTRA; ++t) {
- int bits = vp9_extranzcbits[t + NZC_TOKENS_NOEXTRA];
- for (b = 0; b < bits; ++b) {
- vp9_prob *p = &cm->fc.nzc_pcat_probs[c][t][b];
- if (vp9_read(bc, upd)) {
- *p = read_prob_diff_update(bc, *p);
- }
- }
- }
- }
-}
-
-static void read_nzc_probs(VP9_COMMON *cm,
- BOOL_DECODER* const bc) {
- read_nzc_probs_common(cm, bc, TX_4X4);
+static void read_zpc_probs(VP9_COMMON *cm,
+ vp9_reader* bc) {
+ read_zpc_probs_common(cm, bc, TX_4X4);
if (cm->txfm_mode != ONLY_4X4)
- read_nzc_probs_common(cm, bc, TX_8X8);
+ read_zpc_probs_common(cm, bc, TX_8X8);
if (cm->txfm_mode > ALLOW_8X8)
- read_nzc_probs_common(cm, bc, TX_16X16);
+ read_zpc_probs_common(cm, bc, TX_16X16);
if (cm->txfm_mode > ALLOW_16X16)
- read_nzc_probs_common(cm, bc, TX_32X32);
-#ifdef NZC_PCAT_UPDATE
- read_nzc_pcat_probs(cm, bc);
-#endif
+ read_zpc_probs_common(cm, bc, TX_32X32);
}
-#endif // CONFIG_CODE_NONZEROCOUNT
+#endif // CONFIG_CODE_ZEROGROUP
static void read_coef_probs_common(VP9D_COMP *pbi,
- BOOL_DECODER* const bc,
+ vp9_reader *r,
vp9_coeff_probs *coef_probs,
TX_SIZE tx_size) {
#if CONFIG_MODELCOEFPROB && MODEL_BASED_UPDATE
@@ -1081,24 +1022,20 @@ static void read_coef_probs_common(VP9D_COMP *pbi,
int i, j, k, l, m;
- if (vp9_read_bit(bc)) {
+ if (vp9_read_bit(r)) {
for (i = 0; i < BLOCK_TYPES; i++) {
for (j = 0; j < REF_TYPES; j++) {
for (k = 0; k < COEF_BANDS; k++) {
for (l = 0; l < PREV_COEF_CONTEXTS; l++) {
-#if CONFIG_CODE_NONZEROCOUNT
- const int mstart = get_nzc_used(tx_size);
-#else
const int mstart = 0;
-#endif
if (l >= 3 && k == 0)
continue;
for (m = mstart; m < entropy_nodes_update; m++) {
vp9_prob *const p = coef_probs[i][j][k][l] + m;
- if (vp9_read(bc, vp9_coef_update_prob[m])) {
- *p = read_prob_diff_update(bc, *p);
+ if (vp9_read(r, vp9_coef_update_prob[m])) {
+ *p = read_prob_diff_update(r, *p);
#if CONFIG_MODELCOEFPROB && MODEL_BASED_UPDATE
if (m == UNCONSTRAINED_NODES - 1)
vp9_get_model_distribution(*p, coef_probs[i][j][k][l], i, j);
@@ -1112,19 +1049,20 @@ static void read_coef_probs_common(VP9D_COMP *pbi,
}
}
-static void read_coef_probs(VP9D_COMP *pbi, BOOL_DECODER* const bc) {
- VP9_COMMON *const pc = &pbi->common;
+static void read_coef_probs(VP9D_COMP *pbi, vp9_reader *r) {
+ const TXFM_MODE mode = pbi->common.txfm_mode;
+ FRAME_CONTEXT *const fc = &pbi->common.fc;
- read_coef_probs_common(pbi, bc, pc->fc.coef_probs_4x4, TX_4X4);
+ read_coef_probs_common(pbi, r, fc->coef_probs_4x4, TX_4X4);
- if (pbi->common.txfm_mode != ONLY_4X4)
- read_coef_probs_common(pbi, bc, pc->fc.coef_probs_8x8, TX_8X8);
+ if (mode != ONLY_4X4)
+ read_coef_probs_common(pbi, r, fc->coef_probs_8x8, TX_8X8);
- if (pbi->common.txfm_mode > ALLOW_8X8)
- read_coef_probs_common(pbi, bc, pc->fc.coef_probs_16x16, TX_16X16);
+ if (mode > ALLOW_8X8)
+ read_coef_probs_common(pbi, r, fc->coef_probs_16x16, TX_16X16);
- if (pbi->common.txfm_mode > ALLOW_16X16)
- read_coef_probs_common(pbi, bc, pc->fc.coef_probs_32x32, TX_32X32);
+ if (mode > ALLOW_16X16)
+ read_coef_probs_common(pbi, r, fc->coef_probs_32x32, TX_32X32);
}
static void update_frame_size(VP9D_COMP *pbi) {
@@ -1149,42 +1087,43 @@ static void update_frame_size(VP9D_COMP *pbi) {
static void setup_segmentation(VP9_COMMON *pc, MACROBLOCKD *xd, vp9_reader *r) {
int i, j;
+ xd->update_mb_segmentation_map = 0;
+ xd->update_mb_segmentation_data = 0;
+
xd->segmentation_enabled = vp9_read_bit(r);
if (xd->segmentation_enabled) {
- // Read whether or not the segmentation map is being explicitly updated
- // this frame.
+ // Segmentation map update
xd->update_mb_segmentation_map = vp9_read_bit(r);
-
if (xd->update_mb_segmentation_map) {
- // Which macro block level features are enabled. Read the probs used to
- // decode the segment id for each macro block.
for (i = 0; i < MB_FEATURE_TREE_PROBS; i++)
- xd->mb_segment_tree_probs[i] = vp9_read_bit(r) ? vp9_read_prob(r) : 255;
+ xd->mb_segment_tree_probs[i] = vp9_read_bit(r) ? vp9_read_prob(r)
+ : MAX_PROB;
- // Read the prediction probs needed to decode the segment id
pc->temporal_update = vp9_read_bit(r);
if (pc->temporal_update) {
const vp9_prob *p = xd->mb_segment_tree_probs;
- vp9_prob *p_mod = xd->mb_segment_mispred_tree_probs;
+ vp9_prob *mispred_p = xd->mb_segment_mispred_tree_probs;
const int c0 = p[0] * p[1];
const int c1 = p[0] * (256 - p[1]);
const int c2 = (256 - p[0]) * p[2];
const int c3 = (256 - p[0]) * (256 - p[2]);
- p_mod[0] = get_binary_prob(c1, c2 + c3);
- p_mod[1] = get_binary_prob(c0, c2 + c3);
- p_mod[2] = get_binary_prob(c0 + c1, c3);
- p_mod[3] = get_binary_prob(c0 + c1, c2);
+ mispred_p[0] = get_binary_prob(c1, c2 + c3);
+ mispred_p[1] = get_binary_prob(c0, c2 + c3);
+ mispred_p[2] = get_binary_prob(c0 + c1, c3);
+ mispred_p[3] = get_binary_prob(c0 + c1, c2);
for (i = 0; i < PREDICTION_PROBS; i++)
- pc->segment_pred_probs[i] = vp9_read_bit(r) ? vp9_read_prob(r) : 255;
+ pc->segment_pred_probs[i] = vp9_read_bit(r) ? vp9_read_prob(r)
+ : MAX_PROB;
} else {
for (i = 0; i < PREDICTION_PROBS; i++)
- pc->segment_pred_probs[i] = 255;
+ pc->segment_pred_probs[i] = MAX_PROB;
}
}
+ // Segmentation data update
xd->update_mb_segmentation_data = vp9_read_bit(r);
if (xd->update_mb_segmentation_data) {
xd->mb_segment_abs_delta = vp9_read_bit(r);
@@ -1343,7 +1282,7 @@ static const uint8_t *setup_frame_size(VP9D_COMP *pbi, int scaling_active,
return data;
}
-static void update_frame_context(VP9D_COMP *pbi, vp9_reader *r) {
+static void update_frame_context(VP9D_COMP *pbi) {
FRAME_CONTEXT *const fc = &pbi->common.fc;
vp9_copy(fc->pre_coef_probs_4x4, fc->coef_probs_4x4);
@@ -1381,29 +1320,22 @@ static void update_frame_context(VP9D_COMP *pbi, vp9_reader *r) {
vp9_zero(fc->interintra_counts);
#endif
-#if CONFIG_CODE_NONZEROCOUNT
- vp9_copy(fc->pre_nzc_probs_4x4, fc->nzc_probs_4x4);
- vp9_copy(fc->pre_nzc_probs_8x8, fc->nzc_probs_8x8);
- vp9_copy(fc->pre_nzc_probs_16x16, fc->nzc_probs_16x16);
- vp9_copy(fc->pre_nzc_probs_32x32, fc->nzc_probs_32x32);
- vp9_copy(fc->pre_nzc_pcat_probs, fc->nzc_pcat_probs);
-
- vp9_zero(fc->nzc_counts_4x4);
- vp9_zero(fc->nzc_counts_8x8);
- vp9_zero(fc->nzc_counts_16x16);
- vp9_zero(fc->nzc_counts_32x32);
- vp9_zero(fc->nzc_pcat_counts);
-#endif
+#if CONFIG_CODE_ZEROGROUP
+ vp9_copy(fc->pre_zpc_probs_4x4, fc->zpc_probs_4x4);
+ vp9_copy(fc->pre_zpc_probs_8x8, fc->zpc_probs_8x8);
+ vp9_copy(fc->pre_zpc_probs_16x16, fc->zpc_probs_16x16);
+ vp9_copy(fc->pre_zpc_probs_32x32, fc->zpc_probs_32x32);
- read_coef_probs(pbi, r);
-#if CONFIG_CODE_NONZEROCOUNT
- read_nzc_probs(&pbi->common, r);
+ vp9_zero(fc->zpc_counts_4x4);
+ vp9_zero(fc->zpc_counts_8x8);
+ vp9_zero(fc->zpc_counts_16x16);
+ vp9_zero(fc->zpc_counts_32x32);
#endif
}
static void decode_tiles(VP9D_COMP *pbi,
const uint8_t *data, int first_partition_size,
- BOOL_DECODER *header_bc, BOOL_DECODER *residual_bc) {
+ vp9_reader *header_bc, vp9_reader *residual_bc) {
VP9_COMMON *const pc = &pbi->common;
const uint8_t *data_ptr = data + first_partition_size;
@@ -1429,7 +1361,7 @@ static void decode_tiles(VP9D_COMP *pbi,
if (pbi->oxcf.inv_tile_order) {
const int n_cols = pc->tile_columns;
const uint8_t *data_ptr2[4][1 << 6];
- BOOL_DECODER UNINITIALIZED_IS_SAFE(bc_bak);
+ vp9_reader UNINITIALIZED_IS_SAFE(bc_bak);
// pre-initialize the offsets, we're going to read in inverse order
data_ptr2[0][0] = data_ptr;
@@ -1483,7 +1415,7 @@ static void decode_tiles(VP9D_COMP *pbi,
}
int vp9_decode_frame(VP9D_COMP *pbi, const uint8_t **p_data_end) {
- BOOL_DECODER header_bc, residual_bc;
+ vp9_reader header_bc, residual_bc;
VP9_COMMON *const pc = &pbi->common;
MACROBLOCKD *const xd = &pbi->mb;
const uint8_t *data = pbi->source;
@@ -1541,7 +1473,7 @@ int vp9_decode_frame(VP9D_COMP *pbi, const uint8_t **p_data_end) {
pc->width, pc->height,
VP9BORDERINPIXELS);
- if (vp9_start_decode(&header_bc, data, first_partition_size))
+ if (vp9_reader_init(&header_bc, data, first_partition_size))
vpx_internal_error(&pc->error, VPX_CODEC_MEM_ERROR,
"Failed to allocate bool decoder 0");
@@ -1549,17 +1481,7 @@ int vp9_decode_frame(VP9D_COMP *pbi, const uint8_t **p_data_end) {
pc->clamp_type = (CLAMP_TYPE)vp9_read_bit(&header_bc);
pc->error_resilient_mode = vp9_read_bit(&header_bc);
- setup_segmentation(pc, xd, &header_bc);
-
- setup_pred_probs(pc, &header_bc);
-
xd->lossless = vp9_read_bit(&header_bc);
- pc->txfm_mode = xd->lossless ? ONLY_4X4 : read_txfm_mode(&header_bc);
- if (pc->txfm_mode == TX_MODE_SELECT) {
- pc->prob_tx[0] = vp9_read_prob(&header_bc);
- pc->prob_tx[1] = vp9_read_prob(&header_bc);
- pc->prob_tx[2] = vp9_read_prob(&header_bc);
- }
setup_loopfilter(pc, xd, &header_bc);
@@ -1625,6 +1547,17 @@ int vp9_decode_frame(VP9D_COMP *pbi, const uint8_t **p_data_end) {
vpx_memcpy(&pc->fc, &pc->frame_contexts[pc->frame_context_idx],
sizeof(pc->fc));
+ setup_segmentation(pc, xd, &header_bc);
+
+ setup_pred_probs(pc, &header_bc);
+
+ pc->txfm_mode = xd->lossless ? ONLY_4X4 : read_txfm_mode(&header_bc);
+ if (pc->txfm_mode == TX_MODE_SELECT) {
+ pc->prob_tx[0] = vp9_read_prob(&header_bc);
+ pc->prob_tx[1] = vp9_read_prob(&header_bc);
+ pc->prob_tx[2] = vp9_read_prob(&header_bc);
+ }
+
// Read inter mode probability context updates
if (pc->frame_type != KEY_FRAME) {
int i, j;
@@ -1639,25 +1572,6 @@ int vp9_decode_frame(VP9D_COMP *pbi, const uint8_t **p_data_end) {
}
#endif
-#if CONFIG_NEW_MVREF
- // If Key frame reset mv ref id probabilities to defaults
- if (pc->frame_type != KEY_FRAME) {
- // Read any mv_ref index probability updates
- int i, j;
-
- for (i = 0; i < MAX_REF_FRAMES; ++i) {
- // Skip the dummy entry for intra ref frame.
- if (i == INTRA_FRAME)
- continue;
-
- // Read any updates to probabilities
- for (j = 0; j < MAX_MV_REF_CANDIDATES - 1; ++j)
- if (vp9_read(&header_bc, VP9_MVREF_UPDATE_PROB))
- xd->mb_mv_ref_probs[i][j] = vp9_read_prob(&header_bc);
- }
- }
-#endif
-
if (0) {
FILE *z = fopen("decodestats.stt", "a");
fprintf(z, "%6d F:%d,R:%d,Q:%d\n",
@@ -1668,13 +1582,17 @@ int vp9_decode_frame(VP9D_COMP *pbi, const uint8_t **p_data_end) {
fclose(z);
}
- update_frame_context(pbi, &header_bc);
+ update_frame_context(pbi);
+
+ read_coef_probs(pbi, &header_bc);
+#if CONFIG_CODE_ZEROGROUP
+ read_zpc_probs(&pbi->common, &header_bc);
+#endif
// Initialize xd pointers. Any reference should do for xd->pre, so use 0.
- vpx_memcpy(&xd->pre, &pc->yv12_fb[pc->active_ref_idx[0]],
- sizeof(YV12_BUFFER_CONFIG));
- vpx_memcpy(&xd->dst, &pc->yv12_fb[pc->new_fb_idx],
- sizeof(YV12_BUFFER_CONFIG));
+ setup_pre_planes(xd, &pc->yv12_fb[pc->active_ref_idx[0]], NULL,
+ 0, 0, NULL, NULL);
+ setup_dst_planes(xd, &pc->yv12_fb[pc->new_fb_idx], 0, 0);
// Create the segmentation map structure and set to 0
if (!pc->last_frame_seg_map)
@@ -1706,7 +1624,7 @@ int vp9_decode_frame(VP9D_COMP *pbi, const uint8_t **p_data_end) {
// Collect information about decoder corruption.
// 1. Check first boolean decoder for errors.
// 2. Check the macroblock information
- pc->yv12_fb[pc->new_fb_idx].corrupted = bool_error(&header_bc) |
+ pc->yv12_fb[pc->new_fb_idx].corrupted = vp9_reader_has_error(&header_bc) |
corrupt_tokens;
if (!pbi->decoded_key_frame) {
@@ -1719,8 +1637,8 @@ int vp9_decode_frame(VP9D_COMP *pbi, const uint8_t **p_data_end) {
if (!pc->error_resilient_mode && !pc->frame_parallel_decoding_mode) {
vp9_adapt_coef_probs(pc);
-#if CONFIG_CODE_NONZEROCOUNT
- vp9_adapt_nzc_probs(pc);
+#if CONFIG_CODE_ZEROGROUP
+ vp9_adapt_zpc_probs(pc);
#endif
}
diff --git a/vp9/decoder/vp9_detokenize.c b/vp9/decoder/vp9_detokenize.c
index b3a6927c2..18ef51ace 100644
--- a/vp9/decoder/vp9_detokenize.c
+++ b/vp9/decoder/vp9_detokenize.c
@@ -60,25 +60,29 @@ static const vp9_prob cat6_prob[15] = {
DECLARE_ALIGNED(16, extern const uint8_t, vp9_norm[256]);
+#if CONFIG_CODE_ZEROGROUP
+#define ZEROGROUP_ADVANCE() \
+ do { \
+ token_cache[scan[c]] = ZERO_TOKEN; \
+ is_last_zero[o] = 1; \
+ c++; \
+ } while (0)
+#define INCREMENT_COUNT(token) \
+ do { \
+ coef_counts[type][ref][get_coef_band(scan, txfm_size, c)] \
+ [pt][token]++; \
+ token_cache[scan[c]] = token; \
+ is_last_zero[o] = (token == ZERO_TOKEN); \
+ } while (0)
+#else
#define INCREMENT_COUNT(token) \
do { \
coef_counts[type][ref][get_coef_band(scan, txfm_size, c)] \
[pt][token]++; \
- token_cache[c] = token; \
- pt = vp9_get_coef_context(scan, nb, pad, token_cache, \
- c + 1, default_eob); \
+ token_cache[scan[c]] = token; \
} while (0)
+#endif
-#if CONFIG_CODE_NONZEROCOUNT
-#define WRITE_COEF_CONTINUE(val, token) \
- { \
- qcoeff_ptr[scan[c]] = vp9_read_and_apply_sign(r, val); \
- INCREMENT_COUNT(token); \
- c++; \
- nzc++; \
- continue; \
- }
-#else
#define WRITE_COEF_CONTINUE(val, token) \
{ \
qcoeff_ptr[scan[c]] = vp9_read_and_apply_sign(r, val); \
@@ -86,7 +90,12 @@ DECLARE_ALIGNED(16, extern const uint8_t, vp9_norm[256]);
c++; \
continue; \
}
-#endif // CONFIG_CODE_NONZEROCOUNT
+
+#define WRITE_COEF_ONE() \
+{ \
+ qcoeff_ptr[scan[c]] = vp9_read_and_apply_sign(br, 1); \
+ INCREMENT_COUNT(ONE_TOKEN); \
+}
#define ADJUST_COEF(prob, bits_count) \
do { \
@@ -108,14 +117,21 @@ static int decode_coefs(VP9D_COMP *dx, const MACROBLOCKD *xd,
vp9_prob *prob;
vp9_coeff_count *coef_counts;
const int ref = xd->mode_info_context->mbmi.ref_frame != INTRA_FRAME;
-#if CONFIG_CODE_NONZEROCOUNT
- const int nzc_used = get_nzc_used(txfm_size);
- uint16_t nzc = 0;
- uint16_t nzc_expected =
- nzc_used ? xd->mode_info_context->mbmi.nzcs[block_idx] : 0;
+ TX_TYPE tx_type = DCT_DCT;
+#if CONFIG_CODE_ZEROGROUP
+ int is_eoo[3] = {0, 0, 0};
+ int is_last_zero[3] = {0, 0, 0};
+ int o, rc;
+ vp9_zpc_probs *zpc_probs;
+ vp9_zpc_count *zpc_count;
+ vp9_prob *zprobs;
+ int eoo = 0, use_eoo;
#endif
const int *scan, *nb;
uint8_t token_cache[1024];
+#if CONFIG_CODE_ZEROGROUP
+ vpx_memset(token_cache, UNKNOWN_TOKEN, sizeof(token_cache));
+#endif
if (xd->mode_info_context->mbmi.sb_type == BLOCK_SIZE_SB64X64) {
aidx = vp9_block2above_sb64[txfm_size][block_idx];
@@ -147,8 +163,8 @@ static int decode_coefs(VP9D_COMP *dx, const MACROBLOCKD *xd,
switch (txfm_size) {
default:
case TX_4X4: {
- const TX_TYPE tx_type = (type == PLANE_TYPE_Y_WITH_DC) ?
- get_tx_type_4x4(xd, block_idx) : DCT_DCT;
+ tx_type = (type == PLANE_TYPE_Y_WITH_DC) ?
+ get_tx_type_4x4(xd, block_idx) : DCT_DCT;
switch (tx_type) {
default:
scan = vp9_default_zig_zag1d_4x4;
@@ -165,6 +181,10 @@ static int decode_coefs(VP9D_COMP *dx, const MACROBLOCKD *xd,
coef_probs = fc->coef_probs_4x4;
coef_counts = fc->coef_counts_4x4;
default_eob = 16;
+#if CONFIG_CODE_ZEROGROUP
+ zpc_probs = &(fc->zpc_probs_4x4);
+ zpc_count = &(fc->zpc_counts_4x4);
+#endif
break;
}
case TX_8X8: {
@@ -172,8 +192,8 @@ static int decode_coefs(VP9D_COMP *dx, const MACROBLOCKD *xd,
const int sz = 3 + mb_width_log2(sb_type);
const int x = block_idx & ((1 << sz) - 1);
const int y = block_idx - x;
- const TX_TYPE tx_type = (type == PLANE_TYPE_Y_WITH_DC) ?
- get_tx_type_8x8(xd, y + (x >> 1)) : DCT_DCT;
+ tx_type = (type == PLANE_TYPE_Y_WITH_DC) ?
+ get_tx_type_8x8(xd, y + (x >> 1)) : DCT_DCT;
switch (tx_type) {
default:
scan = vp9_default_zig_zag1d_8x8;
@@ -190,6 +210,10 @@ static int decode_coefs(VP9D_COMP *dx, const MACROBLOCKD *xd,
above_ec = (A0[aidx] + A0[aidx + 1]) != 0;
left_ec = (L0[lidx] + L0[lidx + 1]) != 0;
default_eob = 64;
+#if CONFIG_CODE_ZEROGROUP
+ zpc_probs = &(fc->zpc_probs_8x8);
+ zpc_count = &(fc->zpc_counts_8x8);
+#endif
break;
}
case TX_16X16: {
@@ -197,8 +221,8 @@ static int decode_coefs(VP9D_COMP *dx, const MACROBLOCKD *xd,
const int sz = 4 + mb_width_log2(sb_type);
const int x = block_idx & ((1 << sz) - 1);
const int y = block_idx - x;
- const TX_TYPE tx_type = (type == PLANE_TYPE_Y_WITH_DC) ?
- get_tx_type_16x16(xd, y + (x >> 2)) : DCT_DCT;
+ tx_type = (type == PLANE_TYPE_Y_WITH_DC) ?
+ get_tx_type_16x16(xd, y + (x >> 2)) : DCT_DCT;
switch (tx_type) {
default:
scan = vp9_default_zig_zag1d_16x16;
@@ -222,6 +246,10 @@ static int decode_coefs(VP9D_COMP *dx, const MACROBLOCKD *xd,
left_ec = (L0[lidx] + L0[lidx + 1] + L0[lidx + 2] + L0[lidx + 3]) != 0;
}
default_eob = 256;
+#if CONFIG_CODE_ZEROGROUP
+ zpc_probs = &(fc->zpc_probs_16x16);
+ zpc_count = &(fc->zpc_counts_16x16);
+#endif
break;
}
case TX_32X32:
@@ -248,6 +276,10 @@ static int decode_coefs(VP9D_COMP *dx, const MACROBLOCKD *xd,
L1[lidx] + L1[lidx + 1] + L1[lidx + 2] + L1[lidx + 3]) != 0;
}
default_eob = 1024;
+#if CONFIG_CODE_ZEROGROUP
+ zpc_probs = &fc->zpc_probs_32x32;
+ zpc_count = &fc->zpc_counts_32x32;
+#endif
break;
}
@@ -256,35 +288,67 @@ static int decode_coefs(VP9D_COMP *dx, const MACROBLOCKD *xd,
while (1) {
int val;
+ int band;
const uint8_t *cat6 = cat6_prob;
-
if (c >= seg_eob)
break;
-#if CONFIG_CODE_NONZEROCOUNT
- if (nzc_used && nzc == nzc_expected)
+ if (c)
+ pt = vp9_get_coef_context(scan, nb, pad, token_cache,
+ c, default_eob);
+ band = get_coef_band(scan, txfm_size, c);
+ prob = coef_probs[type][ref][band][pt];
+ fc->eob_branch_counts[txfm_size][type][ref][band][pt]++;
+ if (!vp9_read(r, prob[EOB_CONTEXT_NODE]))
break;
+#if CONFIG_CODE_ZEROGROUP
+ rc = scan[c];
+ o = vp9_get_orientation(rc, txfm_size);
+ if (token_cache[rc] == ZERO_TOKEN || is_eoo[o]) {
+ coef_counts[type][ref][band][pt][ZERO_TOKEN]++;
+ ZEROGROUP_ADVANCE();
+ goto SKIP_START;
+ }
#endif
- prob = coef_probs[type][ref][get_coef_band(scan, txfm_size, c)][pt];
- fc->eob_branch_counts[txfm_size][type][ref]
- [get_coef_band(scan, txfm_size, c)][pt]++;
-#if CONFIG_CODE_NONZEROCOUNT
- if (!nzc_used)
-#endif
- if (!vp9_read(r, prob[EOB_CONTEXT_NODE]))
- break;
+
SKIP_START:
if (c >= seg_eob)
break;
-#if CONFIG_CODE_NONZEROCOUNT
- if (nzc_used && nzc == nzc_expected)
- break;
- // decode zero node only if there are zeros left
- if (!nzc_used || seg_eob - nzc_expected - c + nzc > 0)
+ if (c)
+ pt = vp9_get_coef_context(scan, nb, pad, token_cache,
+ c, default_eob);
+ band = get_coef_band(scan, txfm_size, c);
+ prob = coef_probs[type][ref][band][pt];
+#if CONFIG_CODE_ZEROGROUP
+ rc = scan[c];
+ o = vp9_get_orientation(rc, txfm_size);
+ if (token_cache[rc] == ZERO_TOKEN || is_eoo[o]) {
+ ZEROGROUP_ADVANCE();
+ goto SKIP_START;
+ }
+ zprobs = (*zpc_probs)[ref]
+ [coef_to_zpc_band(band)]
+ [coef_to_zpc_ptok(pt)];
#endif
if (!vp9_read(r, prob[ZERO_CONTEXT_NODE])) {
+#if CONFIG_CODE_ZEROGROUP
+ eoo = 0;
+#if USE_ZPC_EOORIENT == 1
+ use_eoo = vp9_use_eoo(c, seg_eob, scan, txfm_size, is_last_zero, is_eoo);
+#else
+ use_eoo = 0;
+#endif
+ if (use_eoo) {
+ eoo = !vp9_read(r, zprobs[0]);
+ ++(*zpc_count)[ref]
+ [coef_to_zpc_band(band)]
+ [coef_to_zpc_ptok(pt)][0][!eoo];
+ if (eoo) {
+ is_eoo[o] = 1;
+ }
+ }
+#endif
INCREMENT_COUNT(ZERO_TOKEN);
++c;
- prob = coef_probs[type][ref][get_coef_band(scan, txfm_size, c)][pt];
goto SKIP_START;
}
// ONE_CONTEXT_NODE_0_
@@ -347,18 +411,9 @@ SKIP_START:
WRITE_COEF_CONTINUE(val, DCT_VAL_CATEGORY6);
}
-#if CONFIG_CODE_NONZEROCOUNT
- if (!nzc_used)
-#endif
- if (c < seg_eob)
- coef_counts[type][ref][get_coef_band(scan, txfm_size, c)]
- [pt][DCT_EOB_TOKEN]++;
-#if CONFIG_CODE_NONZEROCOUNT
- if (!nzc_used)
- xd->mode_info_context->mbmi.nzcs[block_idx] = nzc;
- else
- assert(nzc == nzc_expected);
-#endif
+ if (c < seg_eob)
+ coef_counts[type][ref][get_coef_band(scan, txfm_size, c)]
+ [pt][DCT_EOB_TOKEN]++;
A0[aidx] = L0[lidx] = c > 0;
if (txfm_size >= TX_8X8) {
diff --git a/vp9/decoder/vp9_treereader.h b/vp9/decoder/vp9_treereader.h
index c9832e11d..4535688ea 100644
--- a/vp9/decoder/vp9_treereader.h
+++ b/vp9/decoder/vp9_treereader.h
@@ -15,8 +15,6 @@
#include "vp9/common/vp9_treecoder.h"
#include "vp9/decoder/vp9_dboolhuff.h"
-typedef BOOL_DECODER vp9_reader;
-
#define vp9_read_prob(r) ((vp9_prob)vp9_read_literal(r, 8))
#define vp9_read_and_apply_sign(r, value) (vp9_read_bit(r) ? -(value) : (value))
diff --git a/vp9/encoder/vp9_bitstream.c b/vp9/encoder/vp9_bitstream.c
index bcfbd6094..e94f616c3 100644
--- a/vp9/encoder/vp9_bitstream.c
+++ b/vp9/encoder/vp9_bitstream.c
@@ -50,21 +50,15 @@ vp9_coeff_stats tree_update_hist_32x32[BLOCK_TYPES];
extern unsigned int active_section;
#endif
-#if CONFIG_CODE_NONZEROCOUNT
-#ifdef NZC_STATS
-unsigned int nzc_stats_4x4[MAX_NZC_CONTEXTS][REF_TYPES][BLOCK_TYPES]
- [NZC4X4_TOKENS];
-unsigned int nzc_stats_8x8[MAX_NZC_CONTEXTS][REF_TYPES][BLOCK_TYPES]
- [NZC8X8_TOKENS];
-unsigned int nzc_stats_16x16[MAX_NZC_CONTEXTS][REF_TYPES][BLOCK_TYPES]
- [NZC16X16_TOKENS];
-unsigned int nzc_stats_32x32[MAX_NZC_CONTEXTS][REF_TYPES][BLOCK_TYPES]
- [NZC32X32_TOKENS];
-unsigned int nzc_pcat_stats[MAX_NZC_CONTEXTS][NZC_TOKENS_EXTRA]
- [NZC_BITS_EXTRA][2];
-void init_nzcstats();
-void update_nzcstats(VP9_COMMON *const cm);
-void print_nzcstats();
+#if CONFIG_CODE_ZEROGROUP
+#ifdef ZPC_STATS
+vp9_zpc_count zpc_stats_4x4;
+vp9_zpc_count zpc_stats_8x8;
+vp9_zpc_count zpc_stats_16x16;
+vp9_zpc_count zpc_stats_32x32;
+void init_zpcstats();
+void update_zpcstats(VP9_COMMON *const cm);
+void print_zpcstats();
#endif
#endif
@@ -277,55 +271,6 @@ static void update_inter_mode_probs(VP9_COMMON *cm,
}
}
-#if CONFIG_NEW_MVREF
-static void update_mv_ref_probs(VP9_COMP *cpi,
- int mvref_probs[MAX_REF_FRAMES]
- [MAX_MV_REF_CANDIDATES-1]) {
- MACROBLOCKD *xd = &cpi->mb.e_mbd;
- int rf; // Reference frame
- int ref_c; // Motion reference candidate
- int node; // Probability node index
-
- for (rf = 0; rf < MAX_REF_FRAMES; ++rf) {
- int count = 0;
-
- // Skip the dummy entry for intra ref frame.
- if (rf == INTRA_FRAME) {
- continue;
- }
-
- // Sum the counts for all candidates
- for (ref_c = 0; ref_c < MAX_MV_REF_CANDIDATES; ++ref_c) {
- count += cpi->mb_mv_ref_count[rf][ref_c];
- }
-
- // Calculate the tree node probabilities
- for (node = 0; node < MAX_MV_REF_CANDIDATES-1; ++node) {
- int new_prob, old_cost, new_cost;
- unsigned int branch_cnts[2];
-
- // How many hits on each branch at this node
- branch_cnts[0] = cpi->mb_mv_ref_count[rf][node];
- branch_cnts[1] = count - cpi->mb_mv_ref_count[rf][node];
-
- // Work out cost of coding branches with the old and optimal probability
- old_cost = cost_branch256(branch_cnts, xd->mb_mv_ref_probs[rf][node]);
- new_prob = get_prob(branch_cnts[0], count);
- new_cost = cost_branch256(branch_cnts, new_prob);
-
- // Take current 0 branch cases out of residual count
- count -= cpi->mb_mv_ref_count[rf][node];
-
- if ((new_cost + VP9_MV_REF_UPDATE_COST) <= old_cost) {
- mvref_probs[rf][node] = new_prob;
- } else {
- mvref_probs[rf][node] = xd->mb_mv_ref_probs[rf][node];
- }
- }
- }
-}
-#endif
-
static void write_ymode(vp9_writer *bc, int m, const vp9_prob *p) {
write_token(bc, vp9_ymode_tree, p, vp9_ymode_encodings + m);
}
@@ -469,40 +414,58 @@ static void pack_mb_tokens(vp9_writer* const bc,
TOKENEXTRA *p = *tp;
while (p < stop) {
- const int t = p->Token;
+ const int t = p->token;
const struct vp9_token *const a = vp9_coef_encodings + t;
- const vp9_extra_bit_struct *const b = vp9_extra_bits + t;
+ const vp9_extra_bit *const b = vp9_extra_bits + t;
int i = 0;
const unsigned char *pp = p->context_tree;
int v = a->value;
int n = a->len;
+ int ncount = n;
if (t == EOSB_TOKEN)
{
++p;
break;
}
+ assert(pp != 0);
+#if CONFIG_CODE_ZEROGROUP
+ if (t == ZPC_ISOLATED || t == ZPC_EOORIENT) {
+ assert((p - 1)->token == ZERO_TOKEN);
+ encode_bool(bc, t == ZPC_ISOLATED, *pp);
+ ++p;
+ continue;
+ } else if (p->skip_coef_val) {
+ assert(p->skip_eob_node == 0);
+ assert(t == DCT_EOB_TOKEN || t == ZERO_TOKEN);
+ encode_bool(bc, t == ZERO_TOKEN, *pp);
+ ++p;
+ continue;
+ }
+#endif
/* skip one or two nodes */
if (p->skip_eob_node) {
n -= p->skip_eob_node;
i = 2 * p->skip_eob_node;
+ ncount -= p->skip_eob_node;
}
do {
const int bb = (v >> --n) & 1;
vp9_write(bc, bb, pp[i >> 1]);
i = vp9_coef_tree[i + bb];
- } while (n);
+ ncount--;
+ } while (n && ncount);
if (b->base_val) {
- const int e = p->Extra, L = b->Len;
+ const int e = p->extra, l = b->len;
- if (L) {
+ if (l) {
const unsigned char *pp = b->prob;
int v = e >> 1;
- int n = L; /* number of bits in v, assumed nonzero */
+ int n = l; /* number of bits in v, assumed nonzero */
int i = 0;
do {
@@ -562,38 +525,6 @@ static void write_nmv(VP9_COMP *cpi, vp9_writer *bc,
vp9_encode_nmv_fp(bc, &e, &ref->as_mv, nmvc, usehp);
}
-#if CONFIG_NEW_MVREF
-static void vp9_write_mv_ref_id(vp9_writer *w,
- vp9_prob * ref_id_probs,
- int mv_ref_id) {
- // Encode the index for the MV reference.
- switch (mv_ref_id) {
- case 0:
- vp9_write(w, 0, ref_id_probs[0]);
- break;
- case 1:
- vp9_write(w, 1, ref_id_probs[0]);
- vp9_write(w, 0, ref_id_probs[1]);
- break;
- case 2:
- vp9_write(w, 1, ref_id_probs[0]);
- vp9_write(w, 1, ref_id_probs[1]);
- vp9_write(w, 0, ref_id_probs[2]);
- break;
- case 3:
- vp9_write(w, 1, ref_id_probs[0]);
- vp9_write(w, 1, ref_id_probs[1]);
- vp9_write(w, 1, ref_id_probs[2]);
- break;
-
- // TRAP.. This should not happen
- default:
- assert(0);
- break;
- }
-}
-#endif
-
// This function writes the current macro block's segnment id to the bitstream
// It should only be called if a segment map update is indicated.
static void write_mb_segid(vp9_writer *bc,
@@ -635,9 +566,9 @@ static void write_mb_segid_except(VP9_COMMON *cm,
const MACROBLOCKD *xd,
int mb_row, int mb_col) {
// Encode the MB segment id.
- int seg_id = mi->segment_id;
- int pred_seg_id = vp9_get_pred_mb_segid(cm, xd,
- mb_row * cm->mb_cols + mb_col);
+ const int seg_id = mi->segment_id;
+ const BLOCK_SIZE_TYPE sb_type = xd->mode_info_context->mbmi.sb_type;
+ const int pred_seg_id = vp9_get_pred_mb_segid(cm, sb_type, mb_row, mb_col);
const vp9_prob *p = xd->mb_segment_tree_probs;
const vp9_prob p1 = xd->mb_segment_mispred_tree_probs[pred_seg_id];
@@ -901,22 +832,6 @@ static void pack_inter_mode_mvs(VP9_COMP *cpi, MODE_INFO *m,
}
#endif
-#if CONFIG_NEW_MVREF
- // if ((mode == NEWMV) || (mode == SPLITMV)) {
- if (mode == NEWMV) {
- // Encode the index of the choice.
- vp9_write_mv_ref_id(bc,
- xd->mb_mv_ref_probs[rf], mi->best_index);
-
- if (mi->second_ref_frame > 0) {
- // Encode the index of the choice.
- vp9_write_mv_ref_id(
- bc, xd->mb_mv_ref_probs[mi->second_ref_frame],
- mi->best_second_index);
- }
- }
-#endif
-
switch (mode) { /* new, split require MVs */
case NEWMV:
#ifdef ENTROPY_STATS
@@ -1082,328 +997,59 @@ static void write_mb_modes_kf(const VP9_COMP *cpi,
}
}
-#if CONFIG_CODE_NONZEROCOUNT
-static void write_nzc(VP9_COMP *const cpi,
- uint16_t nzc,
- int nzc_context,
- TX_SIZE tx_size,
- int ref,
- int type,
- vp9_writer* const bc) {
- VP9_COMMON *const cm = &cpi->common;
- int c, e;
- // if (!cpi->dummy_packing && cm->current_video_frame == 27)
- // printf("nzc: %d, tx_size: %d\n", nzc, tx_size);
- if (!get_nzc_used(tx_size)) return;
- c = codenzc(nzc);
- if (tx_size == TX_32X32) {
- write_token(bc, vp9_nzc32x32_tree,
- cm->fc.nzc_probs_32x32[nzc_context][ref][type],
- vp9_nzc32x32_encodings + c);
- // cm->fc.nzc_counts_32x32[nzc_context][ref][type][c]++;
- } else if (tx_size == TX_16X16) {
- write_token(bc, vp9_nzc16x16_tree,
- cm->fc.nzc_probs_16x16[nzc_context][ref][type],
- vp9_nzc16x16_encodings + c);
- // cm->fc.nzc_counts_16x16[nzc_context][ref][type][c]++;
- } else if (tx_size == TX_8X8) {
- write_token(bc, vp9_nzc8x8_tree,
- cm->fc.nzc_probs_8x8[nzc_context][ref][type],
- vp9_nzc8x8_encodings + c);
- // cm->fc.nzc_counts_8x8[nzc_context][ref][type][c]++;
- } else if (tx_size == TX_4X4) {
- write_token(bc, vp9_nzc4x4_tree,
- cm->fc.nzc_probs_4x4[nzc_context][ref][type],
- vp9_nzc4x4_encodings + c);
- // cm->fc.nzc_counts_4x4[nzc_context][ref][type][c]++;
- } else {
- assert(0);
- }
-
- if ((e = vp9_extranzcbits[c])) {
- int x = nzc - vp9_basenzcvalue[c];
- while (e--) {
- int b = (x >> e) & 1;
- vp9_write(bc, b,
- cm->fc.nzc_pcat_probs[nzc_context][c - NZC_TOKENS_NOEXTRA][e]);
- // cm->fc.nzc_pcat_counts[nzc_context][c - NZC_TOKENS_NOEXTRA][e][b]++;
- }
- }
-}
-
-static void write_nzcs_sb64(VP9_COMP *cpi,
- MACROBLOCKD *xd,
- int mb_row,
- int mb_col,
- vp9_writer* const bc) {
- VP9_COMMON *const cm = &cpi->common;
- MODE_INFO *m = xd->mode_info_context;
- MB_MODE_INFO *const mi = &m->mbmi;
- int j, nzc_context;
- const int ref = m->mbmi.ref_frame != INTRA_FRAME;
-
- assert(mb_col == get_mb_col(xd));
- assert(mb_row == get_mb_row(xd));
-
- if (mi->mb_skip_coeff)
- return;
-
- switch (mi->txfm_size) {
- case TX_32X32:
- for (j = 0; j < 256; j += 64) {
- nzc_context = vp9_get_nzc_context_y_sb64(cm, m, mb_row, mb_col, j);
- write_nzc(cpi, m->mbmi.nzcs[j], nzc_context, TX_32X32, ref, 0, bc);
- }
- for (j = 256; j < 384; j += 64) {
- nzc_context = vp9_get_nzc_context_uv_sb64(cm, m, mb_row, mb_col, j);
- write_nzc(cpi, m->mbmi.nzcs[j], nzc_context, TX_32X32, ref, 1, bc);
- }
- break;
-
- case TX_16X16:
- for (j = 0; j < 256; j += 16) {
- nzc_context = vp9_get_nzc_context_y_sb64(cm, m, mb_row, mb_col, j);
- write_nzc(cpi, m->mbmi.nzcs[j], nzc_context, TX_16X16, ref, 0, bc);
- }
- for (j = 256; j < 384; j += 16) {
- nzc_context = vp9_get_nzc_context_uv_sb64(cm, m, mb_row, mb_col, j);
- write_nzc(cpi, m->mbmi.nzcs[j], nzc_context, TX_16X16, ref, 1, bc);
- }
- break;
-
- case TX_8X8:
- for (j = 0; j < 256; j += 4) {
- nzc_context = vp9_get_nzc_context_y_sb64(cm, m, mb_row, mb_col, j);
- write_nzc(cpi, m->mbmi.nzcs[j], nzc_context, TX_8X8, ref, 0, bc);
- }
- for (j = 256; j < 384; j += 4) {
- nzc_context = vp9_get_nzc_context_uv_sb64(cm, m, mb_row, mb_col, j);
- write_nzc(cpi, m->mbmi.nzcs[j], nzc_context, TX_8X8, ref, 1, bc);
- }
- break;
-
- case TX_4X4:
- for (j = 0; j < 256; ++j) {
- nzc_context = vp9_get_nzc_context_y_sb64(cm, m, mb_row, mb_col, j);
- write_nzc(cpi, m->mbmi.nzcs[j], nzc_context, TX_4X4, ref, 0, bc);
- }
- for (j = 256; j < 384; ++j) {
- nzc_context = vp9_get_nzc_context_uv_sb64(cm, m, mb_row, mb_col, j);
- write_nzc(cpi, m->mbmi.nzcs[j], nzc_context, TX_4X4, ref, 1, bc);
- }
- break;
-
- default:
- break;
- }
-}
-
-static void write_nzcs_sb32(VP9_COMP *cpi,
- MACROBLOCKD *xd,
- int mb_row,
- int mb_col,
- vp9_writer* const bc) {
- VP9_COMMON *const cm = &cpi->common;
- MODE_INFO *m = xd->mode_info_context;
- MB_MODE_INFO *const mi = &m->mbmi;
- int j, nzc_context;
- const int ref = m->mbmi.ref_frame != INTRA_FRAME;
-
- assert(mb_col == get_mb_col(xd));
- assert(mb_row == get_mb_row(xd));
-
- if (mi->mb_skip_coeff)
- return;
-
- switch (mi->txfm_size) {
- case TX_32X32:
- for (j = 0; j < 64; j += 64) {
- nzc_context = vp9_get_nzc_context_y_sb32(cm, m, mb_row, mb_col, j);
- write_nzc(cpi, m->mbmi.nzcs[j], nzc_context, TX_32X32, ref, 0, bc);
- }
- for (j = 64; j < 96; j += 16) {
- nzc_context = vp9_get_nzc_context_uv_sb32(cm, m, mb_row, mb_col, j);
- write_nzc(cpi, m->mbmi.nzcs[j], nzc_context, TX_16X16, ref, 1, bc);
- }
- break;
-
- case TX_16X16:
- for (j = 0; j < 64; j += 16) {
- nzc_context = vp9_get_nzc_context_y_sb32(cm, m, mb_row, mb_col, j);
- write_nzc(cpi, m->mbmi.nzcs[j], nzc_context, TX_16X16, ref, 0, bc);
- }
- for (j = 64; j < 96; j += 16) {
- nzc_context = vp9_get_nzc_context_uv_sb32(cm, m, mb_row, mb_col, j);
- write_nzc(cpi, m->mbmi.nzcs[j], nzc_context, TX_16X16, ref, 1, bc);
- }
- break;
-
- case TX_8X8:
- for (j = 0; j < 64; j += 4) {
- nzc_context = vp9_get_nzc_context_y_sb32(cm, m, mb_row, mb_col, j);
- write_nzc(cpi, m->mbmi.nzcs[j], nzc_context, TX_8X8, ref, 0, bc);
- }
- for (j = 64; j < 96; j += 4) {
- nzc_context = vp9_get_nzc_context_uv_sb32(cm, m, mb_row, mb_col, j);
- write_nzc(cpi, m->mbmi.nzcs[j], nzc_context, TX_8X8, ref, 1, bc);
- }
- break;
-
- case TX_4X4:
- for (j = 0; j < 64; ++j) {
- nzc_context = vp9_get_nzc_context_y_sb32(cm, m, mb_row, mb_col, j);
- write_nzc(cpi, m->mbmi.nzcs[j], nzc_context, TX_4X4, ref, 0, bc);
- }
- for (j = 64; j < 96; ++j) {
- nzc_context = vp9_get_nzc_context_uv_sb32(cm, m, mb_row, mb_col, j);
- write_nzc(cpi, m->mbmi.nzcs[j], nzc_context, TX_4X4, ref, 1, bc);
- }
- break;
-
- default:
- break;
- }
-}
-
-static void write_nzcs_mb16(VP9_COMP *cpi,
- MACROBLOCKD *xd,
- int mb_row,
- int mb_col,
- vp9_writer* const bc) {
- VP9_COMMON *const cm = &cpi->common;
- MODE_INFO *m = xd->mode_info_context;
- MB_MODE_INFO *const mi = &m->mbmi;
- int j, nzc_context;
- const int ref = m->mbmi.ref_frame != INTRA_FRAME;
-
- assert(mb_col == get_mb_col(xd));
- assert(mb_row == get_mb_row(xd));
-
- if (mi->mb_skip_coeff)
- return;
-
- switch (mi->txfm_size) {
- case TX_16X16:
- for (j = 0; j < 16; j += 16) {
- nzc_context = vp9_get_nzc_context_y_mb16(cm, m, mb_row, mb_col, j);
- write_nzc(cpi, m->mbmi.nzcs[j], nzc_context, TX_16X16, ref, 0, bc);
- }
- for (j = 16; j < 24; j += 4) {
- nzc_context = vp9_get_nzc_context_uv_mb16(cm, m, mb_row, mb_col, j);
- write_nzc(cpi, m->mbmi.nzcs[j], nzc_context, TX_8X8, ref, 1, bc);
- }
- break;
-
- case TX_8X8:
- for (j = 0; j < 16; j += 4) {
- nzc_context = vp9_get_nzc_context_y_mb16(cm, m, mb_row, mb_col, j);
- write_nzc(cpi, m->mbmi.nzcs[j], nzc_context, TX_8X8, ref, 0, bc);
- }
- if (mi->mode == I8X8_PRED || mi->mode == SPLITMV) {
- for (j = 16; j < 24; ++j) {
- nzc_context = vp9_get_nzc_context_uv_mb16(cm, m, mb_row, mb_col, j);
- write_nzc(cpi, m->mbmi.nzcs[j], nzc_context, TX_4X4, ref, 1, bc);
- }
- } else {
- for (j = 16; j < 24; j += 4) {
- nzc_context = vp9_get_nzc_context_uv_mb16(cm, m, mb_row, mb_col, j);
- write_nzc(cpi, m->mbmi.nzcs[j], nzc_context, TX_8X8, ref, 1, bc);
- }
- }
- break;
-
- case TX_4X4:
- for (j = 0; j < 16; ++j) {
- nzc_context = vp9_get_nzc_context_y_mb16(cm, m, mb_row, mb_col, j);
- write_nzc(cpi, m->mbmi.nzcs[j], nzc_context, TX_4X4, ref, 0, bc);
- }
- for (j = 16; j < 24; ++j) {
- nzc_context = vp9_get_nzc_context_uv_mb16(cm, m, mb_row, mb_col, j);
- write_nzc(cpi, m->mbmi.nzcs[j], nzc_context, TX_4X4, ref, 1, bc);
- }
- break;
-
- default:
- break;
- }
-}
-
-#ifdef NZC_STATS
-void init_nzcstats() {
- vp9_zero(nzc_stats_4x4);
- vp9_zero(nzc_stats_8x8);
- vp9_zero(nzc_stats_16x16);
- vp9_zero(nzc_stats_32x32);
- vp9_zero(nzc_pcat_stats);
-}
-
-void update_nzcstats(VP9_COMMON *const cm) {
- int c, r, b, t;
-
- for (c = 0; c < MAX_NZC_CONTEXTS; ++c) {
- for (r = 0; r < REF_TYPES; ++r) {
- for (b = 0; b < BLOCK_TYPES; ++b) {
- for (t = 0; t < NZC4X4_TOKENS; ++t) {
- nzc_stats_4x4[c][r][b][t] += cm->fc.nzc_counts_4x4[c][r][b][t];
- }
- }
- }
- }
- for (c = 0; c < MAX_NZC_CONTEXTS; ++c) {
- for (r = 0; r < REF_TYPES; ++r) {
- for (b = 0; b < BLOCK_TYPES; ++b) {
- for (t = 0; t < NZC8X8_TOKENS; ++t) {
- nzc_stats_8x8[c][r][b][t] += cm->fc.nzc_counts_8x8[c][r][b][t];
- }
- }
- }
- }
- for (c = 0; c < MAX_NZC_CONTEXTS; ++c) {
- for (r = 0; r < REF_TYPES; ++r) {
- for (b = 0; b < BLOCK_TYPES; ++b) {
- for (t = 0; t < NZC16X16_TOKENS; ++t) {
- nzc_stats_16x16[c][r][b][t] += cm->fc.nzc_counts_16x16[c][r][b][t];
- }
- }
- }
- }
- for (c = 0; c < MAX_NZC_CONTEXTS; ++c) {
- for (r = 0; r < REF_TYPES; ++r) {
- for (b = 0; b < BLOCK_TYPES; ++b) {
- for (t = 0; t < NZC32X32_TOKENS; ++t) {
- nzc_stats_32x32[c][r][b][t] += cm->fc.nzc_counts_32x32[c][r][b][t];
+#if CONFIG_CODE_ZEROGROUP
+#ifdef ZPC_STATS
+void init_zpcstats() {
+ vp9_zero(zpc_stats_4x4);
+ vp9_zero(zpc_stats_8x8);
+ vp9_zero(zpc_stats_16x16);
+ vp9_zero(zpc_stats_32x32);
+}
+
+void update_zpcstats(VP9_COMMON *const cm) {
+ int r, b, p, n;
+ for (r = 0; r < REF_TYPES; ++r) {
+ for (b = 0; b < ZPC_BANDS; ++b) {
+ for (p = 0; p < ZPC_PTOKS; ++p) {
+ for (n = 0; n < ZPC_NODES; ++n) {
+ zpc_stats_4x4[r][b][p][n][0] += cm->fc.zpc_counts_4x4[r][b][p][n][0];
+ zpc_stats_4x4[r][b][p][n][1] += cm->fc.zpc_counts_4x4[r][b][p][n][1];
+ zpc_stats_8x8[r][b][p][n][0] += cm->fc.zpc_counts_8x8[r][b][p][n][0];
+ zpc_stats_8x8[r][b][p][n][1] += cm->fc.zpc_counts_8x8[r][b][p][n][1];
+ zpc_stats_16x16[r][b][p][n][0] +=
+ cm->fc.zpc_counts_16x16[r][b][p][n][0];
+ zpc_stats_16x16[r][b][p][n][1] +=
+ cm->fc.zpc_counts_16x16[r][b][p][n][1];
+ zpc_stats_32x32[r][b][p][n][0] +=
+ cm->fc.zpc_counts_32x32[r][b][p][n][0];
+ zpc_stats_32x32[r][b][p][n][1] +=
+ cm->fc.zpc_counts_32x32[r][b][p][n][1];
}
}
}
}
- for (c = 0; c < MAX_NZC_CONTEXTS; ++c) {
- for (t = 0; t < NZC_TOKENS_EXTRA; ++t) {
- int bits = vp9_extranzcbits[t + NZC_TOKENS_NOEXTRA];
- for (b = 0; b < bits; ++b) {
- nzc_pcat_stats[c][t][b][0] += cm->fc.nzc_pcat_counts[c][t][b][0];
- nzc_pcat_stats[c][t][b][1] += cm->fc.nzc_pcat_counts[c][t][b][1];
- }
- }
- }
}
-void print_nzcstats() {
- int c, r, b, t;
+void print_zpcstats() {
+ int r, b, p, n;
FILE *f;
printf(
- "static const unsigned int default_nzc_counts_4x4[MAX_NZC_CONTEXTS]\n"
- " [REF_TYPES]\n"
- " [BLOCK_TYPES]\n"
- " [NZC4X4_TOKENS] = {\n");
- for (c = 0; c < MAX_NZC_CONTEXTS; ++c) {
+ "static const unsigned int default_zpc_probs_4x4[REF_TYPES]\n"
+ " [ZPC_BANDS]\n"
+ " [ZPC_PTOKS]\n"
+ " [ZPC_NODES] = {\n");
+ for (r = 0; r < REF_TYPES; ++r) {
printf(" {\n");
- for (r = 0; r < REF_TYPES; ++r) {
+ for (b = 0; b < ZPC_BANDS; ++b) {
printf(" {\n");
- for (b = 0; b < BLOCK_TYPES; ++b) {
+ for (p = 0; p < ZPC_PTOKS; ++p) {
printf(" {");
- for (t = 0; t < NZC4X4_TOKENS; ++t) {
- printf(" %-3d,", nzc_stats_4x4[c][r][b][t]);
+ for (n = 0; n < ZPC_NODES; ++n) {
+ vp9_prob prob = get_binary_prob(zpc_stats_4x4[r][b][p][n][0],
+ zpc_stats_4x4[r][b][p][n][1]);
+ printf(" %-3d [%d/%d],", prob, zpc_stats_4x4[r][b][p][n][0],
+ zpc_stats_4x4[r][b][p][n][1]);
}
printf(" },\n");
}
@@ -1412,20 +1058,22 @@ void print_nzcstats() {
printf(" },\n");
}
printf("};\n");
-
printf(
- "static const unsigned int default_nzc_counts_8x8[MAX_NZC_CONTEXTS]\n"
- " [REF_TYPES]\n"
- " [BLOCK_TYPES]\n"
- " [NZC8X8_TOKENS] = {\n");
- for (c = 0; c < MAX_NZC_CONTEXTS; ++c) {
+ "static const unsigned int default_zpc_probs_8x8[REF_TYPES]\n"
+ " [ZPC_BANDS]\n"
+ " [ZPC_PTOKS]\n"
+ " [ZPC_NODES] = {\n");
+ for (r = 0; r < REF_TYPES; ++r) {
printf(" {\n");
- for (r = 0; r < REF_TYPES; ++r) {
+ for (b = 0; b < ZPC_BANDS; ++b) {
printf(" {\n");
- for (b = 0; b < BLOCK_TYPES; ++b) {
+ for (p = 0; p < ZPC_PTOKS; ++p) {
printf(" {");
- for (t = 0; t < NZC8X8_TOKENS; ++t) {
- printf(" %-3d,", nzc_stats_8x8[c][r][b][t]);
+ for (n = 0; n < ZPC_NODES; ++n) {
+ vp9_prob prob = get_binary_prob(zpc_stats_8x8[r][b][p][n][0],
+ zpc_stats_8x8[r][b][p][n][1]);
+ printf(" %-3d [%d/%d],", prob, zpc_stats_8x8[r][b][p][n][0],
+ zpc_stats_8x8[r][b][p][n][1]);
}
printf(" },\n");
}
@@ -1434,21 +1082,22 @@ void print_nzcstats() {
printf(" },\n");
}
printf("};\n");
-
printf(
- "static const unsigned int default_nzc_counts_16x16[MAX_NZC_CONTEXTS]\n"
- " [REF_TYPES]\n"
- " [BLOCK_TYPES]\n"
- " [NZC16X16_TOKENS] = {"
- "\n");
- for (c = 0; c < MAX_NZC_CONTEXTS; ++c) {
+ "static const unsigned int default_zpc_probs_16x16[REF_TYPES]\n"
+ " [ZPC_BANDS]\n"
+ " [ZPC_PTOKS]\n"
+ " [ZPC_NODES] = {\n");
+ for (r = 0; r < REF_TYPES; ++r) {
printf(" {\n");
- for (r = 0; r < REF_TYPES; ++r) {
+ for (b = 0; b < ZPC_BANDS; ++b) {
printf(" {\n");
- for (b = 0; b < BLOCK_TYPES; ++b) {
+ for (p = 0; p < ZPC_PTOKS; ++p) {
printf(" {");
- for (t = 0; t < NZC16X16_TOKENS; ++t) {
- printf(" %-3d,", nzc_stats_16x16[c][r][b][t]);
+ for (n = 0; n < ZPC_NODES; ++n) {
+ vp9_prob prob = get_binary_prob(zpc_stats_16x16[r][b][p][n][0],
+ zpc_stats_16x16[r][b][p][n][1]);
+ printf(" %-3d [%d/%d],", prob, zpc_stats_16x16[r][b][p][n][0],
+ zpc_stats_16x16[r][b][p][n][1]);
}
printf(" },\n");
}
@@ -1457,21 +1106,22 @@ void print_nzcstats() {
printf(" },\n");
}
printf("};\n");
-
printf(
- "static const unsigned int default_nzc_counts_32x32[MAX_NZC_CONTEXTS]\n"
- " [REF_TYPES]\n"
- " [BLOCK_TYPES]\n"
- " [NZC32X32_TOKENS] = {"
- "\n");
- for (c = 0; c < MAX_NZC_CONTEXTS; ++c) {
+ "static const unsigned int default_zpc_probs_32x32[REF_TYPES]\n"
+ " [ZPC_BANDS]\n"
+ " [ZPC_PTOKS]\n"
+ " [ZPC_NODES] = {\n");
+ for (r = 0; r < REF_TYPES; ++r) {
printf(" {\n");
- for (r = 0; r < REF_TYPES; ++r) {
+ for (b = 0; b < ZPC_BANDS; ++b) {
printf(" {\n");
- for (b = 0; b < BLOCK_TYPES; ++b) {
+ for (p = 0; p < ZPC_PTOKS; ++p) {
printf(" {");
- for (t = 0; t < NZC32X32_TOKENS; ++t) {
- printf(" %-3d,", nzc_stats_32x32[c][r][b][t]);
+ for (n = 0; n < ZPC_NODES; ++n) {
+ vp9_prob prob = get_binary_prob(zpc_stats_32x32[r][b][p][n][0],
+ zpc_stats_32x32[r][b][p][n][1]);
+ printf(" %-3d [%d/%d],", prob, zpc_stats_32x32[r][b][p][n][0],
+ zpc_stats_32x32[r][b][p][n][1]);
}
printf(" },\n");
}
@@ -1481,162 +1131,15 @@ void print_nzcstats() {
}
printf("};\n");
- printf(
- "static const vp9_prob default_nzc_pcat_counts[MAX_NZC_CONTEXTS]\n"
- " [NZC_TOKENS_EXTRA]\n"
- " [NZC_BITS_EXTRA] = {\n");
- for (c = 0; c < MAX_NZC_CONTEXTS; ++c) {
- printf(" {\n");
- for (t = 0; t < NZC_TOKENS_EXTRA; ++t) {
- printf(" {");
- for (b = 0; b < NZC_BITS_EXTRA; ++b) {
- printf(" %d/%d,",
- nzc_pcat_stats[c][t][b][0], nzc_pcat_stats[c][t][b][1]);
- }
- printf(" },\n");
- }
- printf(" },\n");
- }
- printf("};\n");
-
- printf(
- "static const vp9_prob default_nzc_probs_4x4[MAX_NZC_CONTEXTS]\n"
- " [REF_TYPES]\n"
- " [BLOCK_TYPES]\n"
- " [NZC4X4_TOKENS] = {\n");
- for (c = 0; c < MAX_NZC_CONTEXTS; ++c) {
- printf(" {\n");
- for (r = 0; r < REF_TYPES; ++r) {
- printf(" {\n");
- for (b = 0; b < BLOCK_TYPES; ++b) {
- vp9_prob probs[NZC4X4_NODES];
- unsigned int branch_ct[NZC4X4_NODES][2];
- vp9_tree_probs_from_distribution(vp9_nzc4x4_tree,
- probs, branch_ct,
- nzc_stats_4x4[c][r][b], 0);
- printf(" {");
- for (t = 0; t < NZC4X4_NODES; ++t) {
- printf(" %-3d,", probs[t]);
- }
- printf(" },\n");
- }
- printf(" },\n");
- }
- printf(" },\n");
- }
- printf("};\n");
-
- printf(
- "static const vp9_prob default_nzc_probs_8x8[MAX_NZC_CONTEXTS]\n"
- " [REF_TYPES]\n"
- " [BLOCK_TYPES]\n"
- " [NZC8X8_TOKENS] = {\n");
- for (c = 0; c < MAX_NZC_CONTEXTS; ++c) {
- printf(" {\n");
- for (r = 0; r < REF_TYPES; ++r) {
- printf(" {\n");
- for (b = 0; b < BLOCK_TYPES; ++b) {
- vp9_prob probs[NZC8X8_NODES];
- unsigned int branch_ct[NZC8X8_NODES][2];
- vp9_tree_probs_from_distribution(vp9_nzc8x8_tree,
- probs, branch_ct,
- nzc_stats_8x8[c][r][b], 0);
- printf(" {");
- for (t = 0; t < NZC8X8_NODES; ++t) {
- printf(" %-3d,", probs[t]);
- }
- printf(" },\n");
- }
- printf(" },\n");
- }
- printf(" },\n");
- }
- printf("};\n");
-
- printf(
- "static const vp9_prob default_nzc_probs_16x16[MAX_NZC_CONTEXTS]\n"
- " [REF_TYPES]\n"
- " [BLOCK_TYPES]\n"
- " [NZC16X16_TOKENS] = {\n");
- for (c = 0; c < MAX_NZC_CONTEXTS; ++c) {
- printf(" {\n");
- for (r = 0; r < REF_TYPES; ++r) {
- printf(" {\n");
- for (b = 0; b < BLOCK_TYPES; ++b) {
- vp9_prob probs[NZC16X16_NODES];
- unsigned int branch_ct[NZC16X16_NODES][2];
- vp9_tree_probs_from_distribution(vp9_nzc16x16_tree,
- probs, branch_ct,
- nzc_stats_16x16[c][r][b], 0);
- printf(" {");
- for (t = 0; t < NZC16X16_NODES; ++t) {
- printf(" %-3d,", probs[t]);
- }
- printf(" },\n");
- }
- printf(" },\n");
- }
- printf(" },\n");
- }
- printf("};\n");
-
- printf(
- "static const vp9_prob default_nzc_probs_32x32[MAX_NZC_CONTEXTS]\n"
- " [REF_TYPES]\n"
- " [BLOCK_TYPES]\n"
- " [NZC32X32_TOKENS] = {\n");
- for (c = 0; c < MAX_NZC_CONTEXTS; ++c) {
- printf(" {\n");
- for (r = 0; r < REF_TYPES; ++r) {
- printf(" {\n");
- for (b = 0; b < BLOCK_TYPES; ++b) {
- vp9_prob probs[NZC32X32_NODES];
- unsigned int branch_ct[NZC32X32_NODES][2];
- vp9_tree_probs_from_distribution(vp9_nzc32x32_tree,
- probs, branch_ct,
- nzc_stats_32x32[c][r][b], 0);
- printf(" {");
- for (t = 0; t < NZC32X32_NODES; ++t) {
- printf(" %-3d,", probs[t]);
- }
- printf(" },\n");
- }
- printf(" },\n");
- }
- printf(" },\n");
- }
- printf("};\n");
-
- printf(
- "static const vp9_prob default_nzc_pcat_probs[MAX_NZC_CONTEXTS]\n"
- " [NZC_TOKENS_EXTRA]\n"
- " [NZC_BITS_EXTRA] = {\n");
- for (c = 0; c < MAX_NZC_CONTEXTS; ++c) {
- printf(" {\n");
- for (t = 0; t < NZC_TOKENS_EXTRA; ++t) {
- printf(" {");
- for (b = 0; b < NZC_BITS_EXTRA; ++b) {
- vp9_prob prob = get_binary_prob(nzc_pcat_stats[c][t][b][0],
- nzc_pcat_stats[c][t][b][1]);
- printf(" %-3d,", prob);
- }
- printf(" },\n");
- }
- printf(" },\n");
- }
- printf("};\n");
-
- f = fopen("nzcstats.bin", "wb");
- fwrite(nzc_stats_4x4, sizeof(nzc_stats_4x4), 1, f);
- fwrite(nzc_stats_8x8, sizeof(nzc_stats_8x8), 1, f);
- fwrite(nzc_stats_16x16, sizeof(nzc_stats_16x16), 1, f);
- fwrite(nzc_stats_32x32, sizeof(nzc_stats_32x32), 1, f);
- fwrite(nzc_pcat_stats, sizeof(nzc_pcat_stats), 1, f);
+ f = fopen("zpcstats.bin", "wb");
+ fwrite(zpc_stats_4x4, sizeof(zpc_stats_4x4), 1, f);
+ fwrite(zpc_stats_8x8, sizeof(zpc_stats_8x8), 1, f);
+ fwrite(zpc_stats_16x16, sizeof(zpc_stats_16x16), 1, f);
+ fwrite(zpc_stats_32x32, sizeof(zpc_stats_32x32), 1, f);
fclose(f);
}
#endif
-
-#endif // CONFIG_CODE_NONZEROCOUNT
+#endif // CONFIG_CODE_ZEROGROUP
static void write_modes_b(VP9_COMP *cpi, MODE_INFO *m, vp9_writer *bc,
TOKENEXTRA **tok, TOKENEXTRA *tok_end,
@@ -1660,14 +1163,6 @@ static void write_modes_b(VP9_COMP *cpi, MODE_INFO *m, vp9_writer *bc,
active_section = 1;
#endif
}
-#if CONFIG_CODE_NONZEROCOUNT
- if (m->mbmi.sb_type == BLOCK_SIZE_SB64X64)
- write_nzcs_sb64(cpi, xd, mb_row, mb_col, bc);
- else if (m->mbmi.sb_type == BLOCK_SIZE_SB32X32)
- write_nzcs_sb32(cpi, xd, mb_row, mb_col, bc);
- else
- write_nzcs_mb16(cpi, xd, mb_row, mb_col, bc);
-#endif
assert(*tok < tok_end);
pack_mb_tokens(bc, tok, tok_end);
@@ -1876,92 +1371,64 @@ static void build_coeff_contexts(VP9_COMP *cpi) {
cpi->frame_branch_ct_32x32, BLOCK_TYPES);
}
-#if CONFIG_CODE_NONZEROCOUNT
-static void update_nzc_probs_common(VP9_COMP* cpi,
+#if CONFIG_CODE_ZEROGROUP
+static void update_zpc_probs_common(VP9_COMP* cpi,
vp9_writer* const bc,
TX_SIZE tx_size) {
- VP9_COMMON *cm = &cpi->common;
- int c, r, b, t;
+ int r, b, p, n;
+ VP9_COMMON *const cm = &cpi->common;
int update[2] = {0, 0};
int savings = 0;
- int tokens, nodes;
- const vp9_tree_index *nzc_tree;
- vp9_prob *new_nzc_probs;
- vp9_prob *old_nzc_probs;
- unsigned int *nzc_counts;
- unsigned int (*nzc_branch_ct)[2];
- vp9_prob upd;
-
- if (!get_nzc_used(tx_size)) return;
+ vp9_zpc_probs newprobs;
+ vp9_zpc_probs *zpc_probs;
+ vp9_zpc_count *zpc_counts;
+ vp9_prob upd = ZPC_UPDATE_PROB;
+
+ if (!get_zpc_used(tx_size)) return;
if (tx_size == TX_32X32) {
- tokens = NZC32X32_TOKENS;
- nzc_tree = vp9_nzc32x32_tree;
- old_nzc_probs = cm->fc.nzc_probs_32x32[0][0][0];
- new_nzc_probs = cpi->frame_nzc_probs_32x32[0][0][0];
- nzc_counts = cm->fc.nzc_counts_32x32[0][0][0];
- nzc_branch_ct = cpi->frame_nzc_branch_ct_32x32[0][0][0];
- upd = NZC_UPDATE_PROB_32X32;
+ zpc_probs = &cm->fc.zpc_probs_32x32;
+ zpc_counts = &cm->fc.zpc_counts_32x32;
} else if (tx_size == TX_16X16) {
- tokens = NZC16X16_TOKENS;
- nzc_tree = vp9_nzc16x16_tree;
- old_nzc_probs = cm->fc.nzc_probs_16x16[0][0][0];
- new_nzc_probs = cpi->frame_nzc_probs_16x16[0][0][0];
- nzc_counts = cm->fc.nzc_counts_16x16[0][0][0];
- nzc_branch_ct = cpi->frame_nzc_branch_ct_16x16[0][0][0];
- upd = NZC_UPDATE_PROB_16X16;
+ zpc_probs = &cm->fc.zpc_probs_16x16;
+ zpc_counts = &cm->fc.zpc_counts_16x16;
} else if (tx_size == TX_8X8) {
- tokens = NZC8X8_TOKENS;
- nzc_tree = vp9_nzc8x8_tree;
- old_nzc_probs = cm->fc.nzc_probs_8x8[0][0][0];
- new_nzc_probs = cpi->frame_nzc_probs_8x8[0][0][0];
- nzc_counts = cm->fc.nzc_counts_8x8[0][0][0];
- nzc_branch_ct = cpi->frame_nzc_branch_ct_8x8[0][0][0];
- upd = NZC_UPDATE_PROB_8X8;
+ zpc_probs = &cm->fc.zpc_probs_8x8;
+ zpc_counts = &cm->fc.zpc_counts_8x8;
} else {
- nzc_tree = vp9_nzc4x4_tree;
- tokens = NZC4X4_TOKENS;
- old_nzc_probs = cm->fc.nzc_probs_4x4[0][0][0];
- new_nzc_probs = cpi->frame_nzc_probs_4x4[0][0][0];
- nzc_counts = cm->fc.nzc_counts_4x4[0][0][0];
- nzc_branch_ct = cpi->frame_nzc_branch_ct_4x4[0][0][0];
- upd = NZC_UPDATE_PROB_4X4;
- }
- nodes = tokens - 1;
- // Get the new probabilities and the branch counts
- for (c = 0; c < MAX_NZC_CONTEXTS; ++c) {
- for (r = 0; r < REF_TYPES; ++r) {
- for (b = 0; b < BLOCK_TYPES; ++b) {
- int offset = c * REF_TYPES * BLOCK_TYPES + r * BLOCK_TYPES + b;
- int offset_nodes = offset * nodes;
- int offset_tokens = offset * tokens;
- vp9_tree_probs_from_distribution(nzc_tree,
- new_nzc_probs + offset_nodes,
- nzc_branch_ct + offset_nodes,
- nzc_counts + offset_tokens, 0);
+ zpc_probs = &cm->fc.zpc_probs_4x4;
+ zpc_counts = &cm->fc.zpc_counts_4x4;
+ }
+ for (r = 0; r < REF_TYPES; ++r) {
+ for (b = 0; b < ZPC_BANDS; ++b) {
+ for (p = 0; p < ZPC_PTOKS; ++p) {
+ for (n = 0; n < ZPC_NODES; ++n) {
+ newprobs[r][b][p][n] = get_binary_prob((*zpc_counts)[r][b][p][n][0],
+ (*zpc_counts)[r][b][p][n][1]);
+ }
}
}
}
-
- for (c = 0; c < MAX_NZC_CONTEXTS; ++c) {
- for (r = 0; r < REF_TYPES; ++r) {
- for (b = 0; b < BLOCK_TYPES; ++b) {
- int offset = c * REF_TYPES * BLOCK_TYPES + r * BLOCK_TYPES + b;
- int offset_nodes = offset * nodes;
- for (t = 0; t < nodes; ++t) {
- vp9_prob newp = new_nzc_probs[offset_nodes + t];
- vp9_prob oldp = old_nzc_probs[offset_nodes + t];
+ for (r = 0; r < REF_TYPES; ++r) {
+ for (b = 0; b < ZPC_BANDS; ++b) {
+ for (p = 0; p < ZPC_PTOKS; ++p) {
+ for (n = 0; n < ZPC_NODES; ++n) {
+ vp9_prob newp = newprobs[r][b][p][n];
+ vp9_prob oldp = (*zpc_probs)[r][b][p][n];
int s, u = 0;
+#if USE_ZPC_EXTRA == 0
+ if (n == 1) continue;
+#endif
#if defined(SEARCH_NEWP)
- s = prob_diff_update_savings_search(nzc_branch_ct[offset_nodes],
- oldp, &newp, upd);
- if (s > 0 && newp != oldp)
- u = 1;
- if (u)
- savings += s - (int)(vp9_cost_zero(upd));
- else
- savings -= (int)(vp9_cost_zero(upd));
+ s = prob_diff_update_savings_search((*zpc_counts)[r][b][p][n],
+ oldp, &newp, upd);
+ if (s > 0 && newp != oldp)
+ u = 1;
+ if (u)
+ savings += s - (int)(vp9_cost_zero(upd));
+ else
+ savings -= (int)(vp9_cost_zero(upd));
#else
- s = prob_update_savings(nzc_branch_ct[offset_nodes],
+ s = prob_update_savings((*zpc_counts)[r][b][p][n],
oldp, newp, upd);
if (s > 0)
u = 1;
@@ -1975,98 +1442,26 @@ static void update_nzc_probs_common(VP9_COMP* cpi,
}
if (update[1] == 0 || savings < 0) {
vp9_write_bit(bc, 0);
- } else {
- vp9_write_bit(bc, 1);
- for (c = 0; c < MAX_NZC_CONTEXTS; ++c) {
- for (r = 0; r < REF_TYPES; ++r) {
- for (b = 0; b < BLOCK_TYPES; ++b) {
- int offset = c * REF_TYPES * BLOCK_TYPES + r * BLOCK_TYPES + b;
- int offset_nodes = offset * nodes;
- for (t = 0; t < nodes; ++t) {
- vp9_prob newp = new_nzc_probs[offset_nodes + t];
- vp9_prob *oldp = &old_nzc_probs[offset_nodes + t];
- int s, u = 0;
-#if defined(SEARCH_NEWP)
- s = prob_diff_update_savings_search(nzc_branch_ct[offset_nodes],
- *oldp, &newp, upd);
- if (s > 0 && newp != *oldp)
- u = 1;
-#else
- s = prob_update_savings(nzc_branch_ct[offset_nodes],
- *oldp, newp, upd);
- if (s > 0)
- u = 1;
-#endif
- vp9_write(bc, u, upd);
- if (u) {
- /* send/use new probability */
- write_prob_diff_update(bc, newp, *oldp);
- *oldp = newp;
- }
- }
- }
- }
- }
- }
-}
-
-static void update_nzc_pcat_probs(VP9_COMP *cpi, vp9_writer* const bc) {
- VP9_COMMON *cm = &cpi->common;
- int c, t, b;
- int update[2] = {0, 0};
- int savings = 0;
- vp9_prob upd = NZC_UPDATE_PROB_PCAT;
- if (!(get_nzc_used(TX_4X4) || get_nzc_used(TX_8X8) ||
- get_nzc_used(TX_16X16) || get_nzc_used(TX_32X32)))
return;
- for (c = 0; c < MAX_NZC_CONTEXTS; ++c) {
- for (t = 0; t < NZC_TOKENS_EXTRA; ++t) {
- int bits = vp9_extranzcbits[t + NZC_TOKENS_NOEXTRA];
- for (b = 0; b < bits; ++b) {
- vp9_prob newp = get_binary_prob(cm->fc.nzc_pcat_counts[c][t][b][0],
- cm->fc.nzc_pcat_counts[c][t][b][1]);
- vp9_prob oldp = cm->fc.nzc_pcat_probs[c][t][b];
- int s, u = 0;
-#if defined(SEARCH_NEWP)
- s = prob_diff_update_savings_search(cm->fc.nzc_pcat_counts[c][t][b],
- oldp, &newp, upd);
- if (s > 0 && newp != oldp)
- u = 1;
- if (u)
- savings += s - (int)(vp9_cost_zero(upd));
- else
- savings -= (int)(vp9_cost_zero(upd));
-#else
- s = prob_update_savings(cm->fc.nzc_pcat_counts[c][t][b],
- oldp, newp, upd);
- if (s > 0)
- u = 1;
- if (u)
- savings += s;
-#endif
- update[u]++;
- }
- }
}
- if (update[1] == 0 || savings < 0) {
- vp9_write_bit(bc, 0);
- } else {
- vp9_write_bit(bc, 1);
- for (c = 0; c < MAX_NZC_CONTEXTS; ++c) {
- for (t = 0; t < NZC_TOKENS_EXTRA; ++t) {
- int bits = vp9_extranzcbits[t + NZC_TOKENS_NOEXTRA];
- for (b = 0; b < bits; ++b) {
- vp9_prob newp = get_binary_prob(cm->fc.nzc_pcat_counts[c][t][b][0],
- cm->fc.nzc_pcat_counts[c][t][b][1]);
- vp9_prob *oldp = &cm->fc.nzc_pcat_probs[c][t][b];
+ vp9_write_bit(bc, 1);
+ for (r = 0; r < REF_TYPES; ++r) {
+ for (b = 0; b < ZPC_BANDS; ++b) {
+ for (p = 0; p < ZPC_PTOKS; ++p) {
+ for (n = 0; n < ZPC_NODES; ++n) {
+ vp9_prob newp = newprobs[r][b][p][n];
+ vp9_prob *oldp = &(*zpc_probs)[r][b][p][n];
int s, u = 0;
+#if USE_ZPC_EXTRA == 0
+ if (n == 1) continue;
+#endif
#if defined(SEARCH_NEWP)
- s = prob_diff_update_savings_search(cm->fc.nzc_pcat_counts[c][t][b],
+ s = prob_diff_update_savings_search((*zpc_counts)[r][b][p][n],
*oldp, &newp, upd);
if (s > 0 && newp != *oldp)
u = 1;
#else
- s = prob_update_savings(cm->fc.nzc_pcat_counts[c][t][b],
+ s = prob_update_savings((*zpc_counts)[r][b][p][n],
*oldp, newp, upd);
if (s > 0)
u = 1;
@@ -2083,24 +1478,21 @@ static void update_nzc_pcat_probs(VP9_COMP *cpi, vp9_writer* const bc) {
}
}
-static void update_nzc_probs(VP9_COMP* cpi,
+static void update_zpc_probs(VP9_COMP* cpi,
vp9_writer* const bc) {
- update_nzc_probs_common(cpi, bc, TX_4X4);
+ update_zpc_probs_common(cpi, bc, TX_4X4);
if (cpi->common.txfm_mode != ONLY_4X4)
- update_nzc_probs_common(cpi, bc, TX_8X8);
+ update_zpc_probs_common(cpi, bc, TX_8X8);
if (cpi->common.txfm_mode > ALLOW_8X8)
- update_nzc_probs_common(cpi, bc, TX_16X16);
+ update_zpc_probs_common(cpi, bc, TX_16X16);
if (cpi->common.txfm_mode > ALLOW_16X16)
- update_nzc_probs_common(cpi, bc, TX_32X32);
-#ifdef NZC_PCAT_UPDATE
- update_nzc_pcat_probs(cpi, bc);
-#endif
-#ifdef NZC_STATS
+ update_zpc_probs_common(cpi, bc, TX_32X32);
+#ifdef ZPC_STATS
if (!cpi->dummy_packing)
- update_nzcstats(&cpi->common);
+ update_zpcstats(&cpi->common);
#endif
}
-#endif // CONFIG_CODE_NONZEROCOUNT
+#endif // CONFIG_CODE_ZEROGROUP
static void update_coef_probs_common(vp9_writer* const bc,
VP9_COMP *cpi,
@@ -2121,11 +1513,7 @@ static void update_coef_probs_common(vp9_writer* const bc,
#endif
// vp9_prob bestupd = find_coef_update_prob(cpi);
-#if CONFIG_CODE_NONZEROCOUNT
- const int tstart = get_nzc_used(tx_size);
-#else
const int tstart = 0;
-#endif
/* dry run to see if there is any udpate at all needed */
savings = 0;
for (i = 0; i < BLOCK_TYPES; ++i) {
@@ -2415,141 +1803,8 @@ void vp9_pack_bitstream(VP9_COMP *cpi, unsigned char *dest,
// error resilient mode
vp9_write_bit(&header_bc, pc->error_resilient_mode);
- // Signal whether or not Segmentation is enabled
- vp9_write_bit(&header_bc, (xd->segmentation_enabled) ? 1 : 0);
-
- // Indicate which features are enabled
- if (xd->segmentation_enabled) {
- // Indicate whether or not the segmentation map is being updated.
- vp9_write_bit(&header_bc, (xd->update_mb_segmentation_map) ? 1 : 0);
-
- // If it is, then indicate the method that will be used.
- if (xd->update_mb_segmentation_map) {
- // Select the coding strategy (temporal or spatial)
- vp9_choose_segmap_coding_method(cpi);
- // Send the tree probabilities used to decode unpredicted
- // macro-block segments
- for (i = 0; i < MB_FEATURE_TREE_PROBS; i++) {
- const int prob = xd->mb_segment_tree_probs[i];
- if (prob != 255) {
- vp9_write_bit(&header_bc, 1);
- vp9_write_prob(&header_bc, prob);
- } else {
- vp9_write_bit(&header_bc, 0);
- }
- }
-
- // Write out the chosen coding method.
- vp9_write_bit(&header_bc, pc->temporal_update);
- if (pc->temporal_update) {
- for (i = 0; i < PREDICTION_PROBS; i++) {
- const int prob = pc->segment_pred_probs[i];
- if (prob != 255) {
- vp9_write_bit(&header_bc, 1);
- vp9_write_prob(&header_bc, prob);
- } else {
- vp9_write_bit(&header_bc, 0);
- }
- }
- }
- }
-
- vp9_write_bit(&header_bc, (xd->update_mb_segmentation_data) ? 1 : 0);
-
- // segment_reference_frames(cpi);
-
- if (xd->update_mb_segmentation_data) {
- vp9_write_bit(&header_bc, (xd->mb_segment_abs_delta) ? 1 : 0);
-
- // For each segments id...
- for (i = 0; i < MAX_MB_SEGMENTS; i++) {
- // For each segmentation codable feature...
- for (j = 0; j < SEG_LVL_MAX; j++) {
- const int8_t data = vp9_get_segdata(xd, i, j);
- const int data_max = vp9_seg_feature_data_max(j);
-
- // If the feature is enabled...
- if (vp9_segfeature_active(xd, i, j)) {
- vp9_write_bit(&header_bc, 1);
-
- // Is the segment data signed..
- if (vp9_is_segfeature_signed(j)) {
- // Encode the relevant feature data
- if (data < 0) {
- vp9_encode_unsigned_max(&header_bc, -data, data_max);
- vp9_write_bit(&header_bc, 1);
- } else {
- vp9_encode_unsigned_max(&header_bc, data, data_max);
- vp9_write_bit(&header_bc, 0);
- }
- } else {
- // Unsigned data element so no sign bit needed
- vp9_encode_unsigned_max(&header_bc, data, data_max);
- }
- } else {
- vp9_write_bit(&header_bc, 0);
- }
- }
- }
- }
- }
-
- // Encode the common prediction model status flag probability updates for
- // the reference frame
- update_refpred_stats(cpi);
- if (pc->frame_type != KEY_FRAME) {
- for (i = 0; i < PREDICTION_PROBS; i++) {
- if (cpi->ref_pred_probs_update[i]) {
- vp9_write_bit(&header_bc, 1);
- vp9_write_prob(&header_bc, pc->ref_pred_probs[i]);
- } else {
- vp9_write_bit(&header_bc, 0);
- }
- }
- }
-
+ // lossless mode: note this needs to be before loopfilter
vp9_write_bit(&header_bc, cpi->mb.e_mbd.lossless);
- if (cpi->mb.e_mbd.lossless) {
- pc->txfm_mode = ONLY_4X4;
- } else {
- if (pc->txfm_mode == TX_MODE_SELECT) {
- pc->prob_tx[0] = get_prob(cpi->txfm_count_32x32p[TX_4X4] +
- cpi->txfm_count_16x16p[TX_4X4] +
- cpi->txfm_count_8x8p[TX_4X4],
- cpi->txfm_count_32x32p[TX_4X4] +
- cpi->txfm_count_32x32p[TX_8X8] +
- cpi->txfm_count_32x32p[TX_16X16] +
- cpi->txfm_count_32x32p[TX_32X32] +
- cpi->txfm_count_16x16p[TX_4X4] +
- cpi->txfm_count_16x16p[TX_8X8] +
- cpi->txfm_count_16x16p[TX_16X16] +
- cpi->txfm_count_8x8p[TX_4X4] +
- cpi->txfm_count_8x8p[TX_8X8]);
- pc->prob_tx[1] = get_prob(cpi->txfm_count_32x32p[TX_8X8] +
- cpi->txfm_count_16x16p[TX_8X8],
- cpi->txfm_count_32x32p[TX_8X8] +
- cpi->txfm_count_32x32p[TX_16X16] +
- cpi->txfm_count_32x32p[TX_32X32] +
- cpi->txfm_count_16x16p[TX_8X8] +
- cpi->txfm_count_16x16p[TX_16X16]);
- pc->prob_tx[2] = get_prob(cpi->txfm_count_32x32p[TX_16X16],
- cpi->txfm_count_32x32p[TX_16X16] +
- cpi->txfm_count_32x32p[TX_32X32]);
- } else {
- pc->prob_tx[0] = 128;
- pc->prob_tx[1] = 128;
- pc->prob_tx[2] = 128;
- }
- vp9_write_literal(&header_bc, pc->txfm_mode <= 3 ? pc->txfm_mode : 3, 2);
- if (pc->txfm_mode > ALLOW_16X16) {
- vp9_write_bit(&header_bc, pc->txfm_mode == TX_MODE_SELECT);
- }
- if (pc->txfm_mode == TX_MODE_SELECT) {
- vp9_write_prob(&header_bc, pc->prob_tx[0]);
- vp9_write_prob(&header_bc, pc->prob_tx[1]);
- vp9_write_prob(&header_bc, pc->prob_tx[2]);
- }
- }
// Encode the loop filter level and type
vp9_write_bit(&header_bc, pc->filter_type);
@@ -2727,6 +1982,141 @@ void vp9_pack_bitstream(VP9_COMP *cpi, unsigned char *dest,
active_section = 7;
#endif
+ // Signal whether or not Segmentation is enabled
+ vp9_write_bit(&header_bc, (xd->segmentation_enabled) ? 1 : 0);
+
+ // Indicate which features are enabled
+ if (xd->segmentation_enabled) {
+ // Indicate whether or not the segmentation map is being updated.
+ vp9_write_bit(&header_bc, (xd->update_mb_segmentation_map) ? 1 : 0);
+
+ // If it is, then indicate the method that will be used.
+ if (xd->update_mb_segmentation_map) {
+ // Select the coding strategy (temporal or spatial)
+ vp9_choose_segmap_coding_method(cpi);
+ // Send the tree probabilities used to decode unpredicted
+ // macro-block segments
+ for (i = 0; i < MB_FEATURE_TREE_PROBS; i++) {
+ const int prob = xd->mb_segment_tree_probs[i];
+ if (prob != 255) {
+ vp9_write_bit(&header_bc, 1);
+ vp9_write_prob(&header_bc, prob);
+ } else {
+ vp9_write_bit(&header_bc, 0);
+ }
+ }
+
+ // Write out the chosen coding method.
+ vp9_write_bit(&header_bc, (pc->temporal_update) ? 1 : 0);
+ if (pc->temporal_update) {
+ for (i = 0; i < PREDICTION_PROBS; i++) {
+ const int prob = pc->segment_pred_probs[i];
+ if (prob != 255) {
+ vp9_write_bit(&header_bc, 1);
+ vp9_write_prob(&header_bc, prob);
+ } else {
+ vp9_write_bit(&header_bc, 0);
+ }
+ }
+ }
+ }
+
+ vp9_write_bit(&header_bc, (xd->update_mb_segmentation_data) ? 1 : 0);
+
+ // segment_reference_frames(cpi);
+
+ if (xd->update_mb_segmentation_data) {
+ vp9_write_bit(&header_bc, (xd->mb_segment_abs_delta) ? 1 : 0);
+
+ // For each segments id...
+ for (i = 0; i < MAX_MB_SEGMENTS; i++) {
+ // For each segmentation codable feature...
+ for (j = 0; j < SEG_LVL_MAX; j++) {
+ const int8_t data = vp9_get_segdata(xd, i, j);
+ const int data_max = vp9_seg_feature_data_max(j);
+
+ // If the feature is enabled...
+ if (vp9_segfeature_active(xd, i, j)) {
+ vp9_write_bit(&header_bc, 1);
+
+ // Is the segment data signed..
+ if (vp9_is_segfeature_signed(j)) {
+ // Encode the relevant feature data
+ if (data < 0) {
+ vp9_encode_unsigned_max(&header_bc, -data, data_max);
+ vp9_write_bit(&header_bc, 1);
+ } else {
+ vp9_encode_unsigned_max(&header_bc, data, data_max);
+ vp9_write_bit(&header_bc, 0);
+ }
+ } else {
+ // Unsigned data element so no sign bit needed
+ vp9_encode_unsigned_max(&header_bc, data, data_max);
+ }
+ } else {
+ vp9_write_bit(&header_bc, 0);
+ }
+ }
+ }
+ }
+ }
+
+ // Encode the common prediction model status flag probability updates for
+ // the reference frame
+ update_refpred_stats(cpi);
+ if (pc->frame_type != KEY_FRAME) {
+ for (i = 0; i < PREDICTION_PROBS; i++) {
+ if (cpi->ref_pred_probs_update[i]) {
+ vp9_write_bit(&header_bc, 1);
+ vp9_write_prob(&header_bc, pc->ref_pred_probs[i]);
+ } else {
+ vp9_write_bit(&header_bc, 0);
+ }
+ }
+ }
+
+ if (cpi->mb.e_mbd.lossless) {
+ pc->txfm_mode = ONLY_4X4;
+ } else {
+ if (pc->txfm_mode == TX_MODE_SELECT) {
+ pc->prob_tx[0] = get_prob(cpi->txfm_count_32x32p[TX_4X4] +
+ cpi->txfm_count_16x16p[TX_4X4] +
+ cpi->txfm_count_8x8p[TX_4X4],
+ cpi->txfm_count_32x32p[TX_4X4] +
+ cpi->txfm_count_32x32p[TX_8X8] +
+ cpi->txfm_count_32x32p[TX_16X16] +
+ cpi->txfm_count_32x32p[TX_32X32] +
+ cpi->txfm_count_16x16p[TX_4X4] +
+ cpi->txfm_count_16x16p[TX_8X8] +
+ cpi->txfm_count_16x16p[TX_16X16] +
+ cpi->txfm_count_8x8p[TX_4X4] +
+ cpi->txfm_count_8x8p[TX_8X8]);
+ pc->prob_tx[1] = get_prob(cpi->txfm_count_32x32p[TX_8X8] +
+ cpi->txfm_count_16x16p[TX_8X8],
+ cpi->txfm_count_32x32p[TX_8X8] +
+ cpi->txfm_count_32x32p[TX_16X16] +
+ cpi->txfm_count_32x32p[TX_32X32] +
+ cpi->txfm_count_16x16p[TX_8X8] +
+ cpi->txfm_count_16x16p[TX_16X16]);
+ pc->prob_tx[2] = get_prob(cpi->txfm_count_32x32p[TX_16X16],
+ cpi->txfm_count_32x32p[TX_16X16] +
+ cpi->txfm_count_32x32p[TX_32X32]);
+ } else {
+ pc->prob_tx[0] = 128;
+ pc->prob_tx[1] = 128;
+ pc->prob_tx[2] = 128;
+ }
+ vp9_write_literal(&header_bc, pc->txfm_mode <= 3 ? pc->txfm_mode : 3, 2);
+ if (pc->txfm_mode > ALLOW_16X16) {
+ vp9_write_bit(&header_bc, pc->txfm_mode == TX_MODE_SELECT);
+ }
+ if (pc->txfm_mode == TX_MODE_SELECT) {
+ vp9_write_prob(&header_bc, pc->prob_tx[0]);
+ vp9_write_prob(&header_bc, pc->prob_tx[1]);
+ vp9_write_prob(&header_bc, pc->prob_tx[2]);
+ }
+ }
+
// If appropriate update the inter mode probability context and code the
// changes in the bitstream.
if (pc->frame_type != KEY_FRAME) {
@@ -2757,37 +2147,6 @@ void vp9_pack_bitstream(VP9_COMP *cpi, unsigned char *dest,
}
}
-#if CONFIG_NEW_MVREF
- if ((pc->frame_type != KEY_FRAME)) {
- int new_mvref_probs[MAX_REF_FRAMES][MAX_MV_REF_CANDIDATES-1];
- int i, j;
-
- update_mv_ref_probs(cpi, new_mvref_probs);
-
- for (i = 0; i < MAX_REF_FRAMES; ++i) {
- // Skip the dummy entry for intra ref frame.
- if (i == INTRA_FRAME) {
- continue;
- }
-
- // Encode any mandated updates to probabilities
- for (j = 0; j < MAX_MV_REF_CANDIDATES - 1; ++j) {
- if (new_mvref_probs[i][j] != xd->mb_mv_ref_probs[i][j]) {
- vp9_write(&header_bc, 1, VP9_MVREF_UPDATE_PROB);
- vp9_write_prob(&header_bc, new_mvref_probs[i][j]);
-
- // Only update the persistent copy if this is the "real pack"
- if (!cpi->dummy_packing) {
- xd->mb_mv_ref_probs[i][j] = new_mvref_probs[i][j];
- }
- } else {
- vp9_write(&header_bc, 0, VP9_MVREF_UPDATE_PROB);
- }
- }
- }
- }
-#endif
-
vp9_clear_system_state(); // __asm emms;
vp9_copy(cpi->common.fc.pre_coef_probs_4x4,
@@ -2798,26 +2157,15 @@ void vp9_pack_bitstream(VP9_COMP *cpi, unsigned char *dest,
cpi->common.fc.coef_probs_16x16);
vp9_copy(cpi->common.fc.pre_coef_probs_32x32,
cpi->common.fc.coef_probs_32x32);
-#if CONFIG_CODE_NONZEROCOUNT
- vp9_copy(cpi->common.fc.pre_nzc_probs_4x4,
- cpi->common.fc.nzc_probs_4x4);
- vp9_copy(cpi->common.fc.pre_nzc_probs_8x8,
- cpi->common.fc.nzc_probs_8x8);
- vp9_copy(cpi->common.fc.pre_nzc_probs_16x16,
- cpi->common.fc.nzc_probs_16x16);
- vp9_copy(cpi->common.fc.pre_nzc_probs_32x32,
- cpi->common.fc.nzc_probs_32x32);
- vp9_copy(cpi->common.fc.pre_nzc_pcat_probs,
- cpi->common.fc.nzc_pcat_probs);
- // NOTE that if the counts are reset, we also need to uncomment
- // the count updates in the write_nzc function
- /*
- vp9_zero(cpi->common.fc.nzc_counts_4x4);
- vp9_zero(cpi->common.fc.nzc_counts_8x8);
- vp9_zero(cpi->common.fc.nzc_counts_16x16);
- vp9_zero(cpi->common.fc.nzc_counts_32x32);
- vp9_zero(cpi->common.fc.nzc_pcat_counts);
- */
+#if CONFIG_CODE_ZEROGROUP
+ vp9_copy(cpi->common.fc.pre_zpc_probs_4x4,
+ cpi->common.fc.zpc_probs_4x4);
+ vp9_copy(cpi->common.fc.pre_zpc_probs_8x8,
+ cpi->common.fc.zpc_probs_8x8);
+ vp9_copy(cpi->common.fc.pre_zpc_probs_16x16,
+ cpi->common.fc.zpc_probs_16x16);
+ vp9_copy(cpi->common.fc.pre_zpc_probs_32x32,
+ cpi->common.fc.zpc_probs_32x32);
#endif
vp9_copy(cpi->common.fc.pre_sb_ymode_prob, cpi->common.fc.sb_ymode_prob);
vp9_copy(cpi->common.fc.pre_ymode_prob, cpi->common.fc.ymode_prob);
@@ -2836,8 +2184,8 @@ void vp9_pack_bitstream(VP9_COMP *cpi, unsigned char *dest,
vp9_zero(cpi->common.fc.mv_ref_ct);
update_coef_probs(cpi, &header_bc);
-#if CONFIG_CODE_NONZEROCOUNT
- update_nzc_probs(cpi, &header_bc);
+#if CONFIG_CODE_ZEROGROUP
+ update_zpc_probs(cpi, &header_bc);
#endif
#ifdef ENTROPY_STATS
diff --git a/vp9/encoder/vp9_block.h b/vp9/encoder/vp9_block.h
index aaaaa84be..965797da1 100644
--- a/vp9/encoder/vp9_block.h
+++ b/vp9/encoder/vp9_block.h
@@ -157,12 +157,6 @@ struct macroblock {
unsigned char *active_ptr;
vp9_coeff_count token_costs[TX_SIZE_MAX_SB][BLOCK_TYPES];
-#if CONFIG_CODE_NONZEROCOUNT
- unsigned int nzc_costs_4x4[MAX_NZC_CONTEXTS][REF_TYPES][BLOCK_TYPES][17];
- unsigned int nzc_costs_8x8[MAX_NZC_CONTEXTS][REF_TYPES][BLOCK_TYPES][65];
- unsigned int nzc_costs_16x16[MAX_NZC_CONTEXTS][REF_TYPES][BLOCK_TYPES][257];
- unsigned int nzc_costs_32x32[MAX_NZC_CONTEXTS][REF_TYPES][BLOCK_TYPES][1025];
-#endif
int optimize;
diff --git a/vp9/encoder/vp9_encodeframe.c b/vp9/encoder/vp9_encodeframe.c
index 5f29c2770..1f20d5e1c 100644
--- a/vp9/encoder/vp9_encodeframe.c
+++ b/vp9/encoder/vp9_encodeframe.c
@@ -278,7 +278,7 @@ static void build_activity_map(VP9_COMP *cpi) {
// for each macroblock col in image
for (mb_col = 0; mb_col < cm->mb_cols; mb_col++) {
#if ALT_ACT_MEASURE
- xd->dst.y_buffer = new_yv12->y_buffer + recon_yoffset;
+ xd->plane[0].dst.buf = new_yv12->y_buffer + recon_yoffset;
xd->left_available = (mb_col != 0);
recon_yoffset += 16;
#endif
@@ -305,8 +305,8 @@ static void build_activity_map(VP9_COMP *cpi) {
#if ALT_ACT_MEASURE
// extend the recon for intra prediction
- vp9_extend_mb_row(new_yv12, xd->dst.y_buffer + 16,
- xd->dst.u_buffer + 8, xd->dst.v_buffer + 8);
+ vp9_extend_mb_row(new_yv12, xd->plane[0].dst.buf + 16,
+ xd->plane[1].dst.buf + 8, xd->plane[2].dst.buf + 8);
#endif
}
@@ -345,87 +345,6 @@ void vp9_activity_masking(VP9_COMP *cpi, MACROBLOCK *x) {
adjust_act_zbin(cpi, x);
}
-#if CONFIG_NEW_MVREF
-static int vp9_cost_mv_ref_id(vp9_prob * ref_id_probs, int mv_ref_id) {
- int cost;
-
- // Encode the index for the MV reference.
- switch (mv_ref_id) {
- case 0:
- cost = vp9_cost_zero(ref_id_probs[0]);
- break;
- case 1:
- cost = vp9_cost_one(ref_id_probs[0]);
- cost += vp9_cost_zero(ref_id_probs[1]);
- break;
- case 2:
- cost = vp9_cost_one(ref_id_probs[0]);
- cost += vp9_cost_one(ref_id_probs[1]);
- cost += vp9_cost_zero(ref_id_probs[2]);
- break;
- case 3:
- cost = vp9_cost_one(ref_id_probs[0]);
- cost += vp9_cost_one(ref_id_probs[1]);
- cost += vp9_cost_one(ref_id_probs[2]);
- break;
-
- // TRAP.. This should not happen
- default:
- assert(0);
- break;
- }
- return cost;
-}
-
-// Estimate the cost of each coding the vector using each reference candidate
-static unsigned int pick_best_mv_ref(MACROBLOCK *x,
- MV_REFERENCE_FRAME ref_frame,
- int_mv target_mv,
- int_mv * mv_ref_list,
- int_mv * best_ref) {
- int i;
- int best_index = 0;
- int cost, cost2;
- int zero_seen = (mv_ref_list[0].as_int) ? 0 : 1;
- MACROBLOCKD *xd = &x->e_mbd;
- int max_mv = MV_MAX;
-
- cost = vp9_cost_mv_ref_id(xd->mb_mv_ref_probs[ref_frame], 0) +
- vp9_mv_bit_cost(&target_mv, &mv_ref_list[0], x->nmvjointcost,
- x->mvcost, 96, xd->allow_high_precision_mv);
-
- for (i = 1; i < MAX_MV_REF_CANDIDATES; ++i) {
- // If we see a 0,0 reference vector for a second time we have reached
- // the end of the list of valid candidate vectors.
- if (!mv_ref_list[i].as_int) {
- if (zero_seen)
- break;
- else
- zero_seen = 1;
- }
-
- // Check for cases where the reference choice would give rise to an
- // uncodable/out of range residual for row or col.
- if ((abs(target_mv.as_mv.row - mv_ref_list[i].as_mv.row) > max_mv) ||
- (abs(target_mv.as_mv.col - mv_ref_list[i].as_mv.col) > max_mv)) {
- continue;
- }
-
- cost2 = vp9_cost_mv_ref_id(xd->mb_mv_ref_probs[ref_frame], i) +
- vp9_mv_bit_cost(&target_mv, &mv_ref_list[i], x->nmvjointcost,
- x->mvcost, 96, xd->allow_high_precision_mv);
-
- if (cost2 < cost) {
- cost = cost2;
- best_index = i;
- }
- }
- best_ref->as_int = mv_ref_list[best_index].as_int;
-
- return best_index;
-}
-#endif
-
static void update_state(VP9_COMP *cpi,
PICK_MODE_CONTEXT *ctx,
BLOCK_SIZE_TYPE bsize,
@@ -561,31 +480,11 @@ static void update_state(VP9_COMP *cpi,
if (mbmi->mode == SPLITMV || mbmi->mode == NEWMV) {
int_mv best_mv, best_second_mv;
MV_REFERENCE_FRAME rf = mbmi->ref_frame;
-#if CONFIG_NEW_MVREF
- unsigned int best_index;
- MV_REFERENCE_FRAME sec_ref_frame = mbmi->second_ref_frame;
-#endif
best_mv.as_int = ctx->best_ref_mv.as_int;
best_second_mv.as_int = ctx->second_best_ref_mv.as_int;
if (mbmi->mode == NEWMV) {
best_mv.as_int = mbmi->ref_mvs[rf][0].as_int;
best_second_mv.as_int = mbmi->ref_mvs[mbmi->second_ref_frame][0].as_int;
-#if CONFIG_NEW_MVREF
- best_index = pick_best_mv_ref(x, rf, mbmi->mv[0],
- mbmi->ref_mvs[rf], &best_mv);
- mbmi->best_index = best_index;
- ++cpi->mb_mv_ref_count[rf][best_index];
-
- if (mbmi->second_ref_frame > 0) {
- unsigned int best_index;
- best_index =
- pick_best_mv_ref(x, sec_ref_frame, mbmi->mv[1],
- mbmi->ref_mvs[sec_ref_frame],
- &best_second_mv);
- mbmi->best_second_index = best_index;
- ++cpi->mb_mv_ref_count[sec_ref_frame][best_index];
- }
-#endif
}
mbmi->best_mv.as_int = best_mv.as_int;
mbmi->best_second_mv.as_int = best_second_mv.as_int;
@@ -652,9 +551,6 @@ static void set_offsets(VP9_COMP *cpi,
xd->above_context = cm->above_context + mb_col;
xd->left_context = cm->left_context + (mb_row & 3);
- // GF active flags data structure
- x->gf_active_ptr = (signed char *)&cpi->gf_active_flags[idx_map];
-
// Activity map pointer
x->mb_activity_ptr = &cpi->mb_activity_map[idx_map];
x->active_ptr = cpi->active_map + idx_map;
@@ -666,9 +562,7 @@ static void set_offsets(VP9_COMP *cpi,
xd->prev_mode_info_context = cm->prev_mi + idx_str;
// Set up destination pointers
- setup_pred_block(&xd->dst,
- &cm->yv12_fb[dst_fb_idx],
- mb_row, mb_col, NULL, NULL);
+ setup_dst_planes(xd, &cm->yv12_fb[dst_fb_idx], mb_row, mb_col);
/* Set up limit values for MV components to prevent them from
* extending beyond the UMV borders assuming 16x16 block size */
@@ -844,9 +738,6 @@ static void update_stats(VP9_COMP *cpi, int mb_row, int mb_col) {
if ((mbmi->mode == ZEROMV) && (mbmi->ref_frame == LAST_FRAME))
cpi->inter_zz_count++;
}
-#if CONFIG_CODE_NONZEROCOUNT
- vp9_update_nzc_counts(&cpi->common, xd, mb_row, mb_col);
-#endif
}
static void encode_sb(VP9_COMP *cpi,
@@ -870,7 +761,7 @@ static void encode_sb(VP9_COMP *cpi,
update_stats(cpi, mb_row, mb_col);
cpi->partition_count[partition_plane(bsize)][PARTITION_NONE]++;
- (*tp)->Token = EOSB_TOKEN;
+ (*tp)->token = EOSB_TOKEN;
(*tp)++;
}
#if CONFIG_SBSEGMENT
@@ -888,7 +779,7 @@ static void encode_sb(VP9_COMP *cpi,
if (output_enabled) {
update_stats(cpi, mb_row, mb_col + i);
- (*tp)->Token = EOSB_TOKEN;
+ (*tp)->token = EOSB_TOKEN;
(*tp)++;
}
}
@@ -906,7 +797,7 @@ static void encode_sb(VP9_COMP *cpi,
if (output_enabled) {
update_stats(cpi, mb_row + i, mb_col);
- (*tp)->Token = EOSB_TOKEN;
+ (*tp)->token = EOSB_TOKEN;
(*tp)++;
}
}
@@ -937,7 +828,7 @@ static void encode_sb(VP9_COMP *cpi,
if (output_enabled) {
update_stats(cpi, mb_row + y_idx, mb_col + x_idx);
- (*tp)->Token = EOSB_TOKEN;
+ (*tp)->token = EOSB_TOKEN;
(*tp)++;
}
}
@@ -970,7 +861,7 @@ static void encode_sb64(VP9_COMP *cpi,
1, mb_row, mb_col, bsize);
update_stats(cpi, mb_row, mb_col);
- (*tp)->Token = EOSB_TOKEN;
+ (*tp)->token = EOSB_TOKEN;
(*tp)++;
cpi->partition_count[partition_plane(bsize)][PARTITION_NONE]++;
#if CONFIG_SBSEGMENT
@@ -985,7 +876,7 @@ static void encode_sb64(VP9_COMP *cpi,
1, mb_row, mb_col + i * 2, BLOCK_SIZE_SB32X64);
update_stats(cpi, mb_row, mb_col + i * 2);
- (*tp)->Token = EOSB_TOKEN;
+ (*tp)->token = EOSB_TOKEN;
(*tp)++;
}
} else if (is_sb[0] == BLOCK_SIZE_SB64X32) {
@@ -999,7 +890,7 @@ static void encode_sb64(VP9_COMP *cpi,
1, mb_row + i * 2, mb_col, BLOCK_SIZE_SB64X32);
update_stats(cpi, mb_row + i * 2, mb_col);
- (*tp)->Token = EOSB_TOKEN;
+ (*tp)->token = EOSB_TOKEN;
(*tp)++;
}
#endif
@@ -1344,8 +1235,11 @@ static void init_encode_frame_mb_context(VP9_COMP *cpi) {
// Copy data over into macro block data structures.
x->src = *cpi->Source;
- xd->pre = cm->yv12_fb[cm->ref_frame_map[cpi->lst_fb_idx]];
- xd->dst = cm->yv12_fb[cm->new_fb_idx];
+
+ // TODO(jkoleszar): are these initializations required?
+ setup_pre_planes(xd, &cm->yv12_fb[cm->ref_frame_map[cpi->lst_fb_idx]], NULL,
+ 0, 0, NULL, NULL);
+ setup_dst_planes(xd, &cm->yv12_fb[cm->new_fb_idx], 0, 0);
// set up frame for intra coded blocks
vp9_setup_intra_recon(&cm->yv12_fb[cm->new_fb_idx]);
@@ -1444,15 +1338,11 @@ static void encode_frame_internal(VP9_COMP *cpi) {
vp9_zero(cpi->coef_counts_16x16);
vp9_zero(cpi->coef_counts_32x32);
vp9_zero(cm->fc.eob_branch_counts);
-#if CONFIG_CODE_NONZEROCOUNT
- vp9_zero(cm->fc.nzc_counts_4x4);
- vp9_zero(cm->fc.nzc_counts_8x8);
- vp9_zero(cm->fc.nzc_counts_16x16);
- vp9_zero(cm->fc.nzc_counts_32x32);
- vp9_zero(cm->fc.nzc_pcat_counts);
-#endif
-#if CONFIG_NEW_MVREF
- vp9_zero(cpi->mb_mv_ref_count);
+#if CONFIG_CODE_ZEROGROUP
+ vp9_zero(cm->fc.zpc_counts_4x4);
+ vp9_zero(cm->fc.zpc_counts_8x8);
+ vp9_zero(cm->fc.zpc_counts_16x16);
+ vp9_zero(cm->fc.zpc_counts_32x32);
#endif
cpi->mb.e_mbd.lossless = (cm->base_qindex == 0 &&
@@ -1506,6 +1396,8 @@ static void encode_frame_internal(VP9_COMP *cpi) {
encode_sb_row(cpi, mb_row, &tp, &totalrate);
}
cpi->tok_count[tile_col] = (unsigned int)(tp - tp_old);
+ assert(tp - cpi->tok <=
+ get_token_alloc(cm->mb_rows, cm->mb_cols));
}
}
}
@@ -1982,137 +1874,6 @@ static void adjust_act_zbin(VP9_COMP *cpi, MACROBLOCK *x) {
#endif
}
-#if CONFIG_CODE_NONZEROCOUNT
-static void gather_nzcs_mb16(VP9_COMMON *const cm,
- MACROBLOCKD *xd) {
- int i;
- vpx_memset(xd->mode_info_context->mbmi.nzcs, 0,
- 384 * sizeof(xd->mode_info_context->mbmi.nzcs[0]));
- switch (xd->mode_info_context->mbmi.txfm_size) {
- case TX_4X4:
- for (i = 0; i < 24; ++i) {
- xd->mode_info_context->mbmi.nzcs[i] = xd->nzcs[i];
- }
- break;
-
- case TX_8X8:
- for (i = 0; i < 16; i += 4) {
- xd->mode_info_context->mbmi.nzcs[i] = xd->nzcs[i];
- }
- if (xd->mode_info_context->mbmi.mode == I8X8_PRED ||
- xd->mode_info_context->mbmi.mode == SPLITMV) {
- for (i = 16; i < 24; ++i) {
- xd->mode_info_context->mbmi.nzcs[i] = xd->nzcs[i];
- }
- } else {
- for (i = 16; i < 24; i += 4) {
- xd->mode_info_context->mbmi.nzcs[i] = xd->nzcs[i];
- }
- }
- break;
-
- case TX_16X16:
- xd->mode_info_context->mbmi.nzcs[0] = xd->nzcs[0];
- for (i = 16; i < 24; i += 4) {
- xd->mode_info_context->mbmi.nzcs[i] = xd->nzcs[i];
- }
- break;
-
- default:
- break;
- }
-}
-
-static void gather_nzcs_sb32(VP9_COMMON *const cm,
- MACROBLOCKD *xd) {
- MODE_INFO *m = xd->mode_info_context;
- int mis = cm->mode_info_stride;
- int i, j;
-
- vpx_memset(m->mbmi.nzcs, 0,
- 384 * sizeof(xd->mode_info_context->mbmi.nzcs[0]));
- switch (xd->mode_info_context->mbmi.txfm_size) {
- case TX_4X4:
- for (i = 0; i < 96; ++i) {
- xd->mode_info_context->mbmi.nzcs[i] = xd->nzcs[i];
- }
- break;
-
- case TX_8X8:
- for (i = 0; i < 96; i += 4) {
- xd->mode_info_context->mbmi.nzcs[i] = xd->nzcs[i];
- }
- break;
-
- case TX_16X16:
- for (i = 0; i < 96; i += 16) {
- xd->mode_info_context->mbmi.nzcs[i] = xd->nzcs[i];
- }
- break;
-
- case TX_32X32:
- xd->mode_info_context->mbmi.nzcs[0] = xd->nzcs[0];
- for (i = 64; i < 96; i += 16) {
- xd->mode_info_context->mbmi.nzcs[i] = xd->nzcs[i];
- }
- break;
-
- default:
- break;
- }
- for (i = 0; i < 2; ++i)
- for (j = 0; j < 2; ++j) {
- if (i == 0 && j == 0) continue;
- vpx_memcpy((m + j + mis * i)->mbmi.nzcs, m->mbmi.nzcs,
- 384 * sizeof(m->mbmi.nzcs[0]));
- }
-}
-
-static void gather_nzcs_sb64(VP9_COMMON *const cm,
- MACROBLOCKD *xd) {
- MODE_INFO *m = xd->mode_info_context;
- int mis = cm->mode_info_stride;
- int i, j;
-
- vpx_memset(xd->mode_info_context->mbmi.nzcs, 0,
- 384 * sizeof(xd->mode_info_context->mbmi.nzcs[0]));
- switch (xd->mode_info_context->mbmi.txfm_size) {
- case TX_4X4:
- for (i = 0; i < 384; ++i) {
- xd->mode_info_context->mbmi.nzcs[i] = xd->nzcs[i];
- }
- break;
-
- case TX_8X8:
- for (i = 0; i < 384; i += 4) {
- xd->mode_info_context->mbmi.nzcs[i] = xd->nzcs[i];
- }
- break;
-
- case TX_16X16:
- for (i = 0; i < 384; i += 16) {
- xd->mode_info_context->mbmi.nzcs[i] = xd->nzcs[i];
- }
- break;
-
- case TX_32X32:
- for (i = 0; i < 384; i += 64) {
- xd->mode_info_context->mbmi.nzcs[i] = xd->nzcs[i];
- }
- break;
-
- default:
- break;
- }
- for (i = 0; i < 4; ++i)
- for (j = 0; j < 4; ++j) {
- if (i == 0 && j == 0) continue;
- vpx_memcpy((m + j + mis * i)->mbmi.nzcs, m->mbmi.nzcs,
- 384 * sizeof(m->mbmi.nzcs[0]));
- }
-}
-#endif
-
static void encode_macroblock(VP9_COMP *cpi, TOKENEXTRA **t,
int output_enabled,
int mb_row, int mb_col) {
@@ -2188,7 +1949,7 @@ static void encode_macroblock(VP9_COMP *cpi, TOKENEXTRA **t,
if (output_enabled)
sum_intra_stats(cpi, x);
} else {
- int ref_fb_idx;
+ int ref_fb_idx, second_ref_fb_idx;
#ifdef ENC_DEBUG
if (enc_debug)
printf("Mode %d skip %d tx_size %d ref %d ref2 %d mv %d %d interp %d\n",
@@ -2207,27 +1968,21 @@ static void encode_macroblock(VP9_COMP *cpi, TOKENEXTRA **t,
else
ref_fb_idx = cpi->common.ref_frame_map[cpi->alt_fb_idx];
- setup_pred_block(&xd->pre,
- &cpi->common.yv12_fb[ref_fb_idx],
- mb_row, mb_col,
- &xd->scale_factor[0], &xd->scale_factor_uv[0]);
-
if (mbmi->second_ref_frame > 0) {
- int second_ref_fb_idx;
-
if (mbmi->second_ref_frame == LAST_FRAME)
second_ref_fb_idx = cpi->common.ref_frame_map[cpi->lst_fb_idx];
else if (mbmi->second_ref_frame == GOLDEN_FRAME)
second_ref_fb_idx = cpi->common.ref_frame_map[cpi->gld_fb_idx];
else
second_ref_fb_idx = cpi->common.ref_frame_map[cpi->alt_fb_idx];
-
- setup_pred_block(&xd->second_pre,
- &cpi->common.yv12_fb[second_ref_fb_idx],
- mb_row, mb_col,
- &xd->scale_factor[1], &xd->scale_factor_uv[1]);
}
+ setup_pre_planes(xd,
+ &cpi->common.yv12_fb[ref_fb_idx],
+ mbmi->second_ref_frame > 0 ? &cpi->common.yv12_fb[second_ref_fb_idx]
+ : NULL,
+ mb_row, mb_col, xd->scale_factor, xd->scale_factor_uv);
+
if (!x->skip) {
vp9_encode_inter16x16(cm, x, mb_row, mb_col);
} else {
@@ -2235,11 +1990,11 @@ static void encode_macroblock(VP9_COMP *cpi, TOKENEXTRA **t,
#if CONFIG_COMP_INTERINTRA_PRED
if (xd->mode_info_context->mbmi.second_ref_frame == INTRA_FRAME) {
vp9_build_interintra_16x16_predictors_mb(xd,
- xd->dst.y_buffer,
- xd->dst.u_buffer,
- xd->dst.v_buffer,
- xd->dst.y_stride,
- xd->dst.uv_stride);
+ xd->plane[0].dst.buf,
+ xd->plane[1].dst.buf,
+ xd->plane[2].dst.buf,
+ xd->plane[0].dst.stride,
+ xd->plane[1].dst.stride);
}
#endif
}
@@ -2277,30 +2032,27 @@ static void encode_macroblock(VP9_COMP *cpi, TOKENEXTRA **t,
printf("final y\n");
for (i = 0; i < 16; i++) {
for (j = 0; j < 16; j++)
- printf("%3d ", xd->dst.y_buffer[i * xd->dst.y_stride + j]);
+ printf("%3d ", xd->plane[0].dst.buf[i * xd->plane[0].dst.stride + j]);
printf("\n");
}
printf("\n");
printf("final u\n");
for (i = 0; i < 8; i++) {
for (j = 0; j < 8; j++)
- printf("%3d ", xd->dst.u_buffer[i * xd->dst.uv_stride + j]);
+ printf("%3d ", xd->plane[1].dst.buf[i * xd->plane[1].dst.stride + j]);
printf("\n");
}
printf("\n");
printf("final v\n");
for (i = 0; i < 8; i++) {
for (j = 0; j < 8; j++)
- printf("%3d ", xd->dst.v_buffer[i * xd->dst.uv_stride + j]);
+ printf("%3d ", xd->plane[2].dst.buf[i * xd->plane[1].dst.stride + j]);
printf("\n");
}
fflush(stdout);
}
#endif
-#if CONFIG_CODE_NONZEROCOUNT
- gather_nzcs_mb16(cm, xd);
-#endif
vp9_tokenize_mb(cpi, xd, t, !output_enabled);
} else {
@@ -2342,8 +2094,6 @@ static void encode_macroblock(VP9_COMP *cpi, TOKENEXTRA **t,
}
}
-void __attribute__((noinline)) hi(void) { }
-
static void encode_superblock(VP9_COMP *cpi, TOKENEXTRA **t,
int output_enabled, int mb_row, int mb_col,
BLOCK_SIZE_TYPE bsize) {
@@ -2351,13 +2101,13 @@ static void encode_superblock(VP9_COMP *cpi, TOKENEXTRA **t,
MACROBLOCK *const x = &cpi->mb;
MACROBLOCKD *const xd = &x->e_mbd;
const uint8_t *src = x->src.y_buffer;
- uint8_t *dst = xd->dst.y_buffer;
+ uint8_t *dst = xd->plane[0].dst.buf;
const uint8_t *usrc = x->src.u_buffer;
- uint8_t *udst = xd->dst.u_buffer;
+ uint8_t *udst = xd->plane[1].dst.buf;
const uint8_t *vsrc = x->src.v_buffer;
- uint8_t *vdst = xd->dst.v_buffer;
- int src_y_stride = x->src.y_stride, dst_y_stride = xd->dst.y_stride;
- int src_uv_stride = x->src.uv_stride, dst_uv_stride = xd->dst.uv_stride;
+ uint8_t *vdst = xd->plane[2].dst.buf;
+ int src_y_stride = x->src.y_stride, dst_y_stride = xd->plane[0].dst.stride;
+ int src_uv_stride = x->src.uv_stride, dst_uv_stride = xd->plane[1].dst.stride;
int n;
MODE_INFO *mi = x->e_mbd.mode_info_context;
unsigned int segment_id = mi->mbmi.segment_id;
@@ -2407,7 +2157,7 @@ static void encode_superblock(VP9_COMP *cpi, TOKENEXTRA **t,
if (output_enabled)
sum_intra_stats(cpi, x);
} else {
- int ref_fb_idx;
+ int ref_fb_idx, second_ref_fb_idx;
assert(cm->frame_type != KEY_FRAME);
@@ -2418,27 +2168,21 @@ static void encode_superblock(VP9_COMP *cpi, TOKENEXTRA **t,
else
ref_fb_idx = cpi->common.ref_frame_map[cpi->alt_fb_idx];
- setup_pred_block(&xd->pre,
- &cpi->common.yv12_fb[ref_fb_idx],
- mb_row, mb_col,
- &xd->scale_factor[0], &xd->scale_factor_uv[0]);
-
if (xd->mode_info_context->mbmi.second_ref_frame > 0) {
- int second_ref_fb_idx;
-
if (xd->mode_info_context->mbmi.second_ref_frame == LAST_FRAME)
second_ref_fb_idx = cpi->common.ref_frame_map[cpi->lst_fb_idx];
else if (xd->mode_info_context->mbmi.second_ref_frame == GOLDEN_FRAME)
second_ref_fb_idx = cpi->common.ref_frame_map[cpi->gld_fb_idx];
else
second_ref_fb_idx = cpi->common.ref_frame_map[cpi->alt_fb_idx];
-
- setup_pred_block(&xd->second_pre,
- &cpi->common.yv12_fb[second_ref_fb_idx],
- mb_row, mb_col,
- &xd->scale_factor[1], &xd->scale_factor_uv[1]);
}
+ setup_pre_planes(xd,
+ &cpi->common.yv12_fb[ref_fb_idx],
+ xd->mode_info_context->mbmi.second_ref_frame > 0
+ ? &cpi->common.yv12_fb[second_ref_fb_idx] : NULL,
+ mb_row, mb_col, xd->scale_factor, xd->scale_factor_uv);
+
vp9_build_inter_predictors_sb(xd, mb_row, mb_col, bsize);
}
@@ -2521,15 +2265,7 @@ static void encode_superblock(VP9_COMP *cpi, TOKENEXTRA **t,
break;
default: assert(0);
}
- vp9_recon_sby_s_c(xd, dst, bsize);
- vp9_recon_sbuv_s_c(&x->e_mbd, udst, vdst, bsize);
-#if CONFIG_CODE_NONZEROCOUNT
- if (bsize == BLOCK_SIZE_SB32X32) {
- gather_nzcs_sb32(cm, &x->e_mbd);
- } else {
- gather_nzcs_sb64(cm, &x->e_mbd);
- }
-#endif
+ vp9_recon_sb_c(xd, bsize);
vp9_tokenize_sb(cpi, &x->e_mbd, t, !output_enabled, bsize);
} else {
// FIXME(rbultje): not tile-aware (mi - 1)
diff --git a/vp9/encoder/vp9_encodeintra.c b/vp9/encoder/vp9_encodeintra.c
index bccd22bf9..5c6559bdb 100644
--- a/vp9/encoder/vp9_encodeintra.c
+++ b/vp9/encoder/vp9_encodeintra.c
@@ -88,7 +88,7 @@ void vp9_encode_intra16x16mby(VP9_COMMON *const cm, MACROBLOCK *x) {
vp9_build_intra_predictors_sby_s(xd, BLOCK_SIZE_MB16X16);
vp9_subtract_sby_s_c(x->src_diff,
x->src.y_buffer, x->src.y_stride,
- xd->dst.y_buffer, xd->dst.y_stride,
+ xd->plane[0].dst.buf, xd->plane[0].dst.stride,
BLOCK_SIZE_MB16X16);
switch (tx_size) {
@@ -115,7 +115,7 @@ void vp9_encode_intra16x16mby(VP9_COMMON *const cm, MACROBLOCK *x) {
break;
}
- vp9_recon_mby(xd);
+ vp9_recon_sby(xd, BLOCK_SIZE_MB16X16);
}
void vp9_encode_intra16x16mbuv(VP9_COMMON *const cm, MACROBLOCK *x) {
@@ -125,7 +125,8 @@ void vp9_encode_intra16x16mbuv(VP9_COMMON *const cm, MACROBLOCK *x) {
vp9_build_intra_predictors_sbuv_s(xd, BLOCK_SIZE_MB16X16);
vp9_subtract_sbuv_s_c(x->src_diff,
x->src.u_buffer, x->src.v_buffer, x->src.uv_stride,
- xd->dst.u_buffer, xd->dst.v_buffer, xd->dst.uv_stride,
+ xd->plane[1].dst.buf, xd->plane[2].dst.buf,
+ xd->plane[1].dst.stride,
BLOCK_SIZE_MB16X16);
switch (tx_size) {
@@ -145,7 +146,7 @@ void vp9_encode_intra16x16mbuv(VP9_COMMON *const cm, MACROBLOCK *x) {
break;
}
- vp9_recon_intra_mbuv(xd);
+ vp9_recon_sbuv(xd, BLOCK_SIZE_MB16X16);
}
void vp9_encode_intra8x8(MACROBLOCK *x, int ib) {
diff --git a/vp9/encoder/vp9_encodemb.c b/vp9/encoder/vp9_encodemb.c
index dbbde31d2..c841c2823 100644
--- a/vp9/encoder/vp9_encodemb.c
+++ b/vp9/encoder/vp9_encodemb.c
@@ -107,11 +107,12 @@ void vp9_subtract_sbuv_s_c(int16_t *diff, const uint8_t *usrc,
static void subtract_mb(MACROBLOCK *x) {
MACROBLOCKD *xd = &x->e_mbd;
vp9_subtract_sby_s_c(x->src_diff, x->src.y_buffer, x->src.y_stride,
- xd->dst.y_buffer, xd->dst.y_stride,
+ xd->plane[0].dst.buf, xd->plane[0].dst.stride,
BLOCK_SIZE_MB16X16);
vp9_subtract_sbuv_s_c(x->src_diff, x->src.u_buffer, x->src.v_buffer,
x->src.uv_stride,
- xd->dst.u_buffer, xd->dst.v_buffer, xd->dst.uv_stride,
+ xd->plane[1].dst.buf, xd->plane[2].dst.buf,
+ xd->plane[1].dst.stride,
BLOCK_SIZE_MB16X16);
}
@@ -294,10 +295,10 @@ static int trellis_get_coeff_context(const int *scan,
int idx, int token,
uint8_t *token_cache,
int pad, int l) {
- int bak = token_cache[idx], pt;
- token_cache[idx] = token;
+ int bak = token_cache[scan[idx]], pt;
+ token_cache[scan[idx]] = token;
pt = vp9_get_coef_context(scan, nb, pad, token_cache, idx + 1, l);
- token_cache[idx] = bak;
+ token_cache[scan[idx]] = bak;
return pt;
}
@@ -325,24 +326,6 @@ static void optimize_b(VP9_COMMON *const cm,
int const *scan, *nb;
const int mul = 1 + (tx_size == TX_32X32);
uint8_t token_cache[1024];
-#if CONFIG_CODE_NONZEROCOUNT
- // TODO(debargha): the dynamic programming approach used in this function
- // is not compatible with the true rate cost when nzcs are used. Note
- // the total rate is the sum of the nzc rate and the indicvidual token
- // rates. The latter part can be optimized in this function, but because
- // the nzc rate is a function of all the other tokens without a Markov
- // relationship this rate cannot be considered correctly.
- // The current implementation uses a suboptimal approach to account for
- // the nzc rates somewhat, but in reality the optimization approach needs
- // to change substantially.
- const int nzc_used = get_nzc_used(tx_size);
- uint16_t nzc = xd->nzcs[ib];
- uint16_t nzc0, nzc1;
- uint16_t final_nzc = 0, final_nzc_exp;
- int nzc_context = vp9_get_nzc_context(cm, xd, ib);
- unsigned int *nzc_cost;
- nzc0 = nzc1 = nzc;
-#endif
assert((!type && !pb_idx.plane) || (type && pb_idx.plane));
dqcoeff_ptr = BLOCK_OFFSET(xd->plane[pb_idx.plane].dqcoeff, pb_idx.block, 16);
@@ -352,9 +335,6 @@ static void optimize_b(VP9_COMMON *const cm,
case TX_4X4: {
const TX_TYPE tx_type = get_tx_type_4x4(xd, ib);
default_eob = 16;
-#if CONFIG_CODE_NONZEROCOUNT
- nzc_cost = mb->nzc_costs_4x4[nzc_context][ref][type];
-#endif
if (tx_type == DCT_ADST) {
scan = vp9_col_scan_4x4;
} else if (tx_type == ADST_DCT) {
@@ -377,9 +357,6 @@ static void optimize_b(VP9_COMMON *const cm,
scan = vp9_default_zig_zag1d_8x8;
}
default_eob = 64;
-#if CONFIG_CODE_NONZEROCOUNT
- nzc_cost = mb->nzc_costs_8x8[nzc_context][ref][type];
-#endif
break;
}
case TX_16X16: {
@@ -395,17 +372,11 @@ static void optimize_b(VP9_COMMON *const cm,
scan = vp9_default_zig_zag1d_16x16;
}
default_eob = 256;
-#if CONFIG_CODE_NONZEROCOUNT
- nzc_cost = mb->nzc_costs_16x16[nzc_context][ref][type];
-#endif
break;
}
case TX_32X32:
scan = vp9_default_zig_zag1d_32x32;
default_eob = 1024;
-#if CONFIG_CODE_NONZEROCOUNT
- nzc_cost = mb->nzc_costs_32x32[nzc_context][ref][type];
-#endif
break;
}
assert(eob <= default_eob);
@@ -417,11 +388,7 @@ static void optimize_b(VP9_COMMON *const cm,
rddiv = mb->rddiv;
memset(best_index, 0, sizeof(best_index));
/* Initialize the sentinel node of the trellis. */
-#if CONFIG_CODE_NONZEROCOUNT
- tokens[eob][0].rate = nzc_used ? nzc_cost[nzc] : 0;
-#else
tokens[eob][0].rate = 0;
-#endif
tokens[eob][0].error = 0;
tokens[eob][0].next = default_eob;
tokens[eob][0].token = DCT_EOB_TOKEN;
@@ -429,14 +396,11 @@ static void optimize_b(VP9_COMMON *const cm,
*(tokens[eob] + 1) = *(tokens[eob] + 0);
next = eob;
for (i = 0; i < eob; i++)
- token_cache[i] = vp9_dct_value_tokens_ptr[qcoeff_ptr[scan[i]]].Token;
+ token_cache[scan[i]] = vp9_dct_value_tokens_ptr[qcoeff_ptr[scan[i]]].token;
nb = vp9_get_coef_neighbors_handle(scan, &pad);
for (i = eob; i-- > i0;) {
int base_bits, d2, dx;
-#if CONFIG_CODE_NONZEROCOUNT
- int new_nzc0, new_nzc1;
-#endif
rc = scan[i];
x = qcoeff_ptr[rc];
@@ -448,7 +412,7 @@ static void optimize_b(VP9_COMMON *const cm,
/* Evaluate the first possibility for this state. */
rate0 = tokens[next][0].rate;
rate1 = tokens[next][1].rate;
- t0 = (vp9_dct_value_tokens_ptr + x)->Token;
+ t0 = (vp9_dct_value_tokens_ptr + x)->token;
/* Consider both possible successor states. */
if (next < default_eob) {
band = get_coef_band(scan, tx_size, i + 1);
@@ -471,9 +435,6 @@ static void optimize_b(VP9_COMMON *const cm,
tokens[i][0].token = t0;
tokens[i][0].qc = x;
best_index[i][0] = best;
-#if CONFIG_CODE_NONZEROCOUNT
- new_nzc0 = (best ? nzc1 : nzc0);
-#endif
/* Evaluate the second possibility for this state. */
rate0 = tokens[next][0].rate;
@@ -500,16 +461,8 @@ static void optimize_b(VP9_COMMON *const cm,
DCT_EOB_TOKEN : ZERO_TOKEN;
t1 = tokens[next][1].token == DCT_EOB_TOKEN ?
DCT_EOB_TOKEN : ZERO_TOKEN;
-#if CONFIG_CODE_NONZEROCOUNT
- // Account for rate drop because of the nzc change.
- // TODO(debargha): Find a better solution
- if (nzc_used) {
- rate0 -= nzc_cost[nzc0] - nzc_cost[nzc0 - 1];
- rate1 -= nzc_cost[nzc1] - nzc_cost[nzc1 - 1];
- }
-#endif
} else {
- t0 = t1 = (vp9_dct_value_tokens_ptr + x)->Token;
+ t0 = t1 = (vp9_dct_value_tokens_ptr + x)->token;
}
if (next < default_eob) {
band = get_coef_band(scan, tx_size, i + 1);
@@ -542,11 +495,6 @@ static void optimize_b(VP9_COMMON *const cm,
tokens[i][1].token = best ? t1 : t0;
tokens[i][1].qc = x;
best_index[i][1] = best;
-#if CONFIG_CODE_NONZEROCOUNT
- new_nzc1 = (best ? nzc1 : nzc0) - (!x);
- nzc0 = new_nzc0;
- nzc1 = new_nzc1;
-#endif
/* Finally, make this the new head of the trellis. */
next = i;
}
@@ -585,17 +533,13 @@ static void optimize_b(VP9_COMMON *const cm,
rate1 += mb->token_costs[tx_size][type][ref][band][pt][t1];
UPDATE_RD_COST();
best = rd_cost1 < rd_cost0;
-#if CONFIG_CODE_NONZEROCOUNT
- final_nzc_exp = (best ? nzc1 : nzc0);
-#endif
final_eob = i0 - 1;
+ vpx_memset(qcoeff_ptr, 0, sizeof(*qcoeff_ptr) * (16 << (tx_size * 2)));
+ vpx_memset(dqcoeff_ptr, 0, sizeof(*dqcoeff_ptr) * (16 << (tx_size * 2)));
for (i = next; i < eob; i = next) {
x = tokens[i][best].qc;
if (x) {
final_eob = i;
-#if CONFIG_CODE_NONZEROCOUNT
- ++final_nzc;
-#endif
}
rc = scan[i];
qcoeff_ptr[rc] = x;
@@ -608,10 +552,6 @@ static void optimize_b(VP9_COMMON *const cm,
xd->plane[pb_idx.plane].eobs[pb_idx.block] = final_eob;
*a = *l = (final_eob > 0);
-#if CONFIG_CODE_NONZEROCOUNT
- assert(final_nzc == final_nzc_exp);
- xd->nzcs[ib] = final_nzc;
-#endif
}
void vp9_optimize_sby_32x32(VP9_COMMON *const cm, MACROBLOCK *x,
@@ -903,25 +843,24 @@ void vp9_encode_inter16x16(VP9_COMMON *const cm, MACROBLOCK *x,
int mb_row, int mb_col) {
MACROBLOCKD *const xd = &x->e_mbd;
- vp9_build_inter_predictors_mb(xd, mb_row, mb_col);
+ vp9_build_inter_predictors_sb(xd, mb_row, mb_col, BLOCK_SIZE_MB16X16);
subtract_mb(x);
vp9_fidct_mb(cm, x);
- vp9_recon_mb(xd);
+ vp9_recon_sb(xd, BLOCK_SIZE_MB16X16);
}
/* this function is used by first pass only */
void vp9_encode_inter16x16y(MACROBLOCK *x, int mb_row, int mb_col) {
MACROBLOCKD *xd = &x->e_mbd;
- vp9_build_inter_predictors_sby(xd, xd->dst.y_buffer, xd->dst.y_stride,
- mb_row, mb_col, BLOCK_SIZE_MB16X16);
+ vp9_build_inter_predictors_sby(xd, mb_row, mb_col, BLOCK_SIZE_MB16X16);
vp9_subtract_sby_s_c(x->src_diff, x->src.y_buffer, x->src.y_stride,
- xd->dst.y_buffer, xd->dst.y_stride,
+ xd->plane[0].dst.buf, xd->plane[0].dst.stride,
BLOCK_SIZE_MB16X16);
vp9_transform_sby_4x4(x, BLOCK_SIZE_MB16X16);
vp9_quantize_sby_4x4(x, BLOCK_SIZE_MB16X16);
vp9_inverse_transform_sby_4x4(xd, BLOCK_SIZE_MB16X16);
- vp9_recon_mby(xd);
+ vp9_recon_sby(xd, BLOCK_SIZE_MB16X16);
}
diff --git a/vp9/encoder/vp9_firstpass.c b/vp9/encoder/vp9_firstpass.c
index f2e004608..b18dce4be 100644
--- a/vp9/encoder/vp9_firstpass.c
+++ b/vp9/encoder/vp9_firstpass.c
@@ -32,6 +32,8 @@
#include "vp9/common/vp9_entropymv.h"
#include "vp9/encoder/vp9_encodemv.h"
#include "./vpx_scale_rtcd.h"
+// TODO(jkoleszar): for setup_dst_planes
+#include "vp9/common/vp9_reconinter.h"
#define OUTPUT_FPF 0
@@ -356,7 +358,7 @@ static void zz_motion_search(VP9_COMP *cpi, MACROBLOCK *x, YV12_BUFFER_CONFIG *r
int ref_stride = d->pre_stride;
// Set up pointers for this macro block recon buffer
- xd->pre.y_buffer = recon_buffer->y_buffer + recon_yoffset;
+ xd->plane[0].pre[0].buf = recon_buffer->y_buffer + recon_yoffset;
ref_ptr = (uint8_t *)(*(d->base_pre) + d->pre);
@@ -400,7 +402,7 @@ static void first_pass_motion_search(VP9_COMP *cpi, MACROBLOCK *x,
v_fn_ptr.vf = vp9_mse16x16;
// Set up pointers for this macro block recon buffer
- xd->pre.y_buffer = recon_buffer->y_buffer + recon_yoffset;
+ xd->plane[0].pre[0].buf = recon_buffer->y_buffer + recon_yoffset;
// Initial step/diamond search centred on best mv
tmp_mv.as_int = 0;
@@ -483,8 +485,8 @@ void vp9_first_pass(VP9_COMP *cpi) {
vp9_clear_system_state(); // __asm emms;
x->src = * cpi->Source;
- xd->pre = *lst_yv12;
- xd->dst = *new_yv12;
+ setup_pre_planes(xd, lst_yv12, NULL, 0, 0, NULL, NULL);
+ setup_dst_planes(xd, new_yv12, 0, 0);
x->partition_info = x->pi;
@@ -533,9 +535,9 @@ void vp9_first_pass(VP9_COMP *cpi) {
int use_dc_pred = (mb_col || mb_row) && (!mb_col || !mb_row);
set_mb_col(cm, xd, mb_col, 1 << mb_height_log2(BLOCK_SIZE_MB16X16));
- xd->dst.y_buffer = new_yv12->y_buffer + recon_yoffset;
- xd->dst.u_buffer = new_yv12->u_buffer + recon_uvoffset;
- xd->dst.v_buffer = new_yv12->v_buffer + recon_uvoffset;
+ xd->plane[0].dst.buf = new_yv12->y_buffer + recon_yoffset;
+ xd->plane[1].dst.buf = new_yv12->u_buffer + recon_uvoffset;
+ xd->plane[2].dst.buf = new_yv12->v_buffer + recon_uvoffset;
xd->left_available = (mb_col != 0);
// do intra 16x16 prediction
@@ -599,9 +601,9 @@ void vp9_first_pass(VP9_COMP *cpi) {
}
// Reset to last frame as reference buffer
- xd->pre.y_buffer = lst_yv12->y_buffer + recon_yoffset;
- xd->pre.u_buffer = lst_yv12->u_buffer + recon_uvoffset;
- xd->pre.v_buffer = lst_yv12->v_buffer + recon_uvoffset;
+ xd->plane[0].pre[0].buf = lst_yv12->y_buffer + recon_yoffset;
+ xd->plane[1].pre[0].buf = lst_yv12->u_buffer + recon_uvoffset;
+ xd->plane[2].pre[0].buf = lst_yv12->v_buffer + recon_uvoffset;
// In accumulating a score for the older reference frame
// take the best of the motion predicted score and
@@ -700,8 +702,8 @@ void vp9_first_pass(VP9_COMP *cpi) {
x->src.v_buffer += 8 * x->src.uv_stride - 8 * cm->mb_cols;
// extend the recon for intra prediction
- vp9_extend_mb_row(new_yv12, xd->dst.y_buffer + 16,
- xd->dst.u_buffer + 8, xd->dst.v_buffer + 8);
+ vp9_extend_mb_row(new_yv12, xd->plane[0].dst.buf + 16,
+ xd->plane[1].dst.buf + 8, xd->plane[2].dst.buf + 8);
vp9_clear_system_state(); // __asm emms;
}
diff --git a/vp9/encoder/vp9_mbgraph.c b/vp9/encoder/vp9_mbgraph.c
index e9da395bc..924d9fdd1 100644
--- a/vp9/encoder/vp9_mbgraph.c
+++ b/vp9/encoder/vp9_mbgraph.c
@@ -71,10 +71,10 @@ static unsigned int do_16x16_motion_iteration(VP9_COMP *cpi,
}
vp9_set_mbmode_and_mvs(x, NEWMV, dst_mv);
- vp9_build_inter_predictors_sby(xd, xd->dst.y_buffer, xd->dst.y_stride,
- mb_row, mb_col, BLOCK_SIZE_MB16X16);
+ vp9_build_inter_predictors_sby(xd, mb_row, mb_col, BLOCK_SIZE_MB16X16);
best_err = vp9_sad16x16(x->src.y_buffer, x->src.y_stride,
- xd->dst.y_buffer, xd->dst.y_stride, INT_MAX);
+ xd->plane[0].dst.buf, xd->plane[0].dst.stride,
+ INT_MAX);
/* restore UMV window */
x->mv_col_min = tmp_col_min;
@@ -110,15 +110,16 @@ static int do_16x16_motion_search
b->src_stride = x->src.y_stride;
b->src = x->src.y_stride * (n & 12) + (n & 3) * 4;
- d->base_pre = &xd->pre.y_buffer;
- d->pre_stride = xd->pre.y_stride;
- d->pre = xd->pre.y_stride * (n & 12) + (n & 3) * 4;
+ d->base_pre = &xd->plane[0].pre[0].buf;
+ d->pre_stride = xd->plane[0].pre[0].stride;
+ d->pre = xd->plane[0].pre[0].stride * (n & 12) + (n & 3) * 4;
}
// Try zero MV first
// FIXME should really use something like near/nearest MV and/or MV prediction
err = vp9_sad16x16(x->src.y_buffer, x->src.y_stride,
- xd->pre.y_buffer, xd->pre.y_stride, INT_MAX);
+ xd->plane[0].pre[0].buf, xd->plane[0].pre[0].stride,
+ INT_MAX);
dst_mv->as_int = 0;
// Test last reference frame using the previous best mv as the
@@ -162,7 +163,8 @@ static int do_16x16_zerozero_search
// Try zero MV first
// FIXME should really use something like near/nearest MV and/or MV prediction
err = vp9_sad16x16(x->src.y_buffer, x->src.y_stride,
- xd->pre.y_buffer, xd->pre.y_stride, INT_MAX);
+ xd->plane[0].pre[0].buf, xd->plane[0].pre[0].stride,
+ INT_MAX);
dst_mv->as_int = 0;
@@ -189,13 +191,13 @@ static int find_best_16x16_intra
xd->mode_info_context->mbmi.mode = mode;
vp9_build_intra_predictors(x->src.y_buffer, x->src.y_stride,
- xd->dst.y_buffer, xd->dst.y_stride,
+ xd->plane[0].dst.buf, xd->plane[0].dst.stride,
xd->mode_info_context->mbmi.mode,
bw, bh,
xd->up_available, xd->left_available,
xd->right_available);
err = vp9_sad16x16(x->src.y_buffer, x->src.y_stride,
- xd->dst.y_buffer, xd->dst.y_stride, best_err);
+ xd->plane[0].dst.buf, xd->plane[0].dst.stride, best_err);
// find best
if (err < best_err) {
@@ -234,8 +236,8 @@ static void update_mbgraph_mb_stats
x->src.y_buffer = buf->y_buffer + mb_y_offset;
x->src.y_stride = buf->y_stride;
- xd->dst.y_buffer = cm->yv12_fb[cm->new_fb_idx].y_buffer + mb_y_offset;
- xd->dst.y_stride = cm->yv12_fb[cm->new_fb_idx].y_stride;
+ xd->plane[0].dst.buf = cm->yv12_fb[cm->new_fb_idx].y_buffer + mb_y_offset;
+ xd->plane[0].dst.stride = cm->yv12_fb[cm->new_fb_idx].y_stride;
// do intra 16x16 prediction
intra_error = find_best_16x16_intra(cpi, buf, mb_y_offset,
@@ -247,8 +249,8 @@ static void update_mbgraph_mb_stats
// Golden frame MV search, if it exists and is different than last frame
if (golden_ref) {
int g_motion_error;
- xd->pre.y_buffer = golden_ref->y_buffer + mb_y_offset;
- xd->pre.y_stride = golden_ref->y_stride;
+ xd->plane[0].pre[0].buf = golden_ref->y_buffer + mb_y_offset;
+ xd->plane[0].pre[0].stride = golden_ref->y_stride;
g_motion_error = do_16x16_motion_search(cpi, prev_golden_ref_mv,
&stats->ref[GOLDEN_FRAME].m.mv,
buf, mb_y_offset,
@@ -263,8 +265,8 @@ static void update_mbgraph_mb_stats
// Alt-ref frame MV search, if it exists and is different than last/golden frame
if (alt_ref) {
int a_motion_error;
- xd->pre.y_buffer = alt_ref->y_buffer + mb_y_offset;
- xd->pre.y_stride = alt_ref->y_stride;
+ xd->plane[0].pre[0].buf = alt_ref->y_buffer + mb_y_offset;
+ xd->plane[0].pre[0].stride = alt_ref->y_stride;
a_motion_error = do_16x16_zerozero_search(cpi,
&stats->ref[ALTREF_FRAME].m.mv,
buf, mb_y_offset,
@@ -303,9 +305,9 @@ static void update_mbgraph_frame_stats
x->mv_row_max = (cm->mb_rows - 1) * 16 + VP9BORDERINPIXELS
- 16 - VP9_INTERP_EXTEND;
xd->up_available = 0;
- xd->dst.y_stride = buf->y_stride;
- xd->pre.y_stride = buf->y_stride;
- xd->dst.uv_stride = buf->uv_stride;
+ xd->plane[0].dst.stride = buf->y_stride;
+ xd->plane[0].pre[0].stride = buf->y_stride;
+ xd->plane[1].dst.stride = buf->uv_stride;
xd->mode_info_context = &mi_local;
for (mb_row = 0; mb_row < cm->mb_rows; mb_row++) {
diff --git a/vp9/encoder/vp9_onyx_if.c b/vp9/encoder/vp9_onyx_if.c
index 85ac5231d..b29487013 100644
--- a/vp9/encoder/vp9_onyx_if.c
+++ b/vp9/encoder/vp9_onyx_if.c
@@ -114,10 +114,10 @@ extern void init_nmvstats();
extern void print_nmvstats();
#endif
-#if CONFIG_CODE_NONZEROCOUNT
-#ifdef NZC_STATS
-extern void init_nzcstats();
-extern void print_nzcstats();
+#if CONFIG_CODE_ZEROGROUP
+#ifdef ZPC_STATS
+extern void init_zpcstats();
+extern void print_zpcstats();
#endif
#endif
@@ -332,10 +332,6 @@ static void dealloc_compressor_data(VP9_COMP *cpi) {
vpx_free(cpi->tok);
cpi->tok = 0;
- // Structure used to monitor GF usage
- vpx_free(cpi->gf_active_flags);
- cpi->gf_active_flags = 0;
-
// Activity mask based per mb zbin adjustments
vpx_free(cpi->mb_activity_map);
cpi->mb_activity_map = 0;
@@ -899,7 +895,7 @@ void vp9_alloc_compressor_data(VP9_COMP *cpi) {
vpx_free(cpi->tok);
{
- unsigned int tokens = cm->mb_rows * cm->mb_cols * (24 * 16 + 1);
+ unsigned int tokens = get_token_alloc(cm->mb_rows, cm->mb_cols);
CHECK_MEM_ERROR(cpi->tok, vpx_calloc(tokens, sizeof(*cpi->tok)));
}
@@ -909,13 +905,6 @@ void vp9_alloc_compressor_data(VP9_COMP *cpi) {
cpi->gf_bad_count = 0;
cpi->gf_update_recommended = 0;
-
- // Structures used to minitor GF usage
- vpx_free(cpi->gf_active_flags);
- CHECK_MEM_ERROR(cpi->gf_active_flags,
- vpx_calloc(1, cm->mb_rows * cm->mb_cols));
- cpi->gf_active_count = cm->mb_rows * cm->mb_cols;
-
vpx_free(cpi->mb_activity_map);
CHECK_MEM_ERROR(cpi->mb_activity_map,
vpx_calloc(sizeof(unsigned int),
@@ -1444,9 +1433,9 @@ VP9_PTR vp9_create_compressor(VP9_CONFIG *oxcf) {
#ifdef NMV_STATS
init_nmvstats();
#endif
-#if CONFIG_CODE_NONZEROCOUNT
-#ifdef NZC_STATS
- init_nzcstats();
+#if CONFIG_CODE_ZEROGROUP
+#ifdef ZPC_STATS
+ init_zpcstats();
#endif
#endif
@@ -1654,12 +1643,11 @@ VP9_PTR vp9_create_compressor(VP9_CONFIG *oxcf) {
cpi->common.error.setjmp = 0;
vp9_zero(cpi->y_uv_mode_count)
-#if CONFIG_CODE_NONZEROCOUNT
- vp9_zero(cm->fc.nzc_counts_4x4);
- vp9_zero(cm->fc.nzc_counts_8x8);
- vp9_zero(cm->fc.nzc_counts_16x16);
- vp9_zero(cm->fc.nzc_counts_32x32);
- vp9_zero(cm->fc.nzc_pcat_counts);
+#if CONFIG_CODE_ZEROGROUP
+ vp9_zero(cm->fc.zpc_counts_4x4);
+ vp9_zero(cm->fc.zpc_counts_8x8);
+ vp9_zero(cm->fc.zpc_counts_16x16);
+ vp9_zero(cm->fc.zpc_counts_32x32);
#endif
return (VP9_PTR) cpi;
@@ -1688,10 +1676,10 @@ void vp9_remove_compressor(VP9_PTR *ptr) {
if (cpi->pass != 1)
print_nmvstats();
#endif
-#if CONFIG_CODE_NONZEROCOUNT
-#ifdef NZC_STATS
+#if CONFIG_CODE_ZEROGROUP
+#ifdef ZPC_STATS
if (cpi->pass != 1)
- print_nzcstats();
+ print_zpcstats();
#endif
#endif
@@ -2231,12 +2219,6 @@ static void scale_and_extend_frame(YV12_BUFFER_CONFIG *src_fb,
static void update_alt_ref_frame_stats(VP9_COMP *cpi) {
- VP9_COMMON *cm = &cpi->common;
-
- // Update data structure that monitors level of reference to last GF
- vpx_memset(cpi->gf_active_flags, 1, (cm->mb_rows * cm->mb_cols));
- cpi->gf_active_count = cm->mb_rows * cm->mb_cols;
-
// this frame refreshes means next frames don't unless specified by user
cpi->common.frames_since_golden = 0;
@@ -2248,18 +2230,10 @@ static void update_alt_ref_frame_stats(VP9_COMP *cpi) {
// Set the alternate reference frame active flag
cpi->source_alt_ref_active = 1;
-
-
}
static void update_golden_frame_stats(VP9_COMP *cpi) {
- VP9_COMMON *cm = &cpi->common;
-
// Update the Golden frame usage counts.
if (cpi->refresh_golden_frame) {
- // Update data structure that monitors level of reference to last GF
- vpx_memset(cpi->gf_active_flags, 1, (cm->mb_rows * cm->mb_cols));
- cpi->gf_active_count = cm->mb_rows * cm->mb_cols;
-
// this frame refreshes means next frames don't unless specified by user
cpi->refresh_golden_frame = 0;
cpi->common.frames_since_golden = 0;
@@ -3293,28 +3267,9 @@ static void encode_frame_to_data_rate(VP9_COMP *cpi,
}
}
- // Update the GF usage maps.
- // This is done after completing the compression of a frame when all modes
- // etc. are finalized but before loop filter
- vp9_update_gf_useage_maps(cpi, cm, &cpi->mb);
-
if (cm->frame_type == KEY_FRAME)
cpi->refresh_last_frame = 1;
-#if 0
- {
- FILE *f = fopen("gfactive.stt", "a");
- fprintf(f, "%8d %8d %8d %8d %8d\n",
- cm->current_video_frame,
- (100 * cpi->gf_active_count)
- / (cpi->common.mb_rows * cpi->common.mb_cols),
- cpi->this_iiratio,
- cpi->next_iiratio,
- cpi->refresh_golden_frame);
- fclose(f);
- }
-#endif
-
cm->frame_to_show = &cm->yv12_fb[cm->new_fb_idx];
#if WRITE_RECON_BUFFER
@@ -3345,8 +3300,8 @@ static void encode_frame_to_data_rate(VP9_COMP *cpi,
if (!cpi->common.error_resilient_mode &&
!cpi->common.frame_parallel_decoding_mode) {
vp9_adapt_coef_probs(&cpi->common);
-#if CONFIG_CODE_NONZEROCOUNT
- vp9_adapt_nzc_probs(&cpi->common);
+#if CONFIG_CODE_ZEROGROUP
+ vp9_adapt_zpc_probs(&cpi->common);
#endif
}
if (cpi->common.frame_type != KEY_FRAME) {
diff --git a/vp9/encoder/vp9_onyx_int.h b/vp9/encoder/vp9_onyx_int.h
index 197ede20d..e6a2a3183 100644
--- a/vp9/encoder/vp9_onyx_int.h
+++ b/vp9/encoder/vp9_onyx_int.h
@@ -114,17 +114,11 @@ typedef struct {
int mv_ref_ct[INTER_MODE_CONTEXTS][4][2];
int vp9_mode_contexts[INTER_MODE_CONTEXTS][4];
-#if CONFIG_CODE_NONZEROCOUNT
- vp9_prob nzc_probs_4x4
- [MAX_NZC_CONTEXTS][REF_TYPES][BLOCK_TYPES][NZC4X4_NODES];
- vp9_prob nzc_probs_8x8
- [MAX_NZC_CONTEXTS][REF_TYPES][BLOCK_TYPES][NZC8X8_NODES];
- vp9_prob nzc_probs_16x16
- [MAX_NZC_CONTEXTS][REF_TYPES][BLOCK_TYPES][NZC16X16_NODES];
- vp9_prob nzc_probs_32x32
- [MAX_NZC_CONTEXTS][REF_TYPES][BLOCK_TYPES][NZC32X32_NODES];
- vp9_prob nzc_pcat_probs[MAX_NZC_CONTEXTS]
- [NZC_TOKENS_EXTRA][NZC_BITS_EXTRA];
+#if CONFIG_CODE_ZEROGROUP
+ vp9_zpc_probs zpc_probs_4x4;
+ vp9_zpc_probs zpc_probs_8x8;
+ vp9_zpc_probs zpc_probs_16x16;
+ vp9_zpc_probs zpc_probs_32x32;
#endif
} CODING_CONTEXT;
@@ -487,25 +481,6 @@ typedef struct VP9_COMP {
vp9_coeff_probs frame_coef_probs_32x32[BLOCK_TYPES];
vp9_coeff_stats frame_branch_ct_32x32[BLOCK_TYPES];
-#if CONFIG_CODE_NONZEROCOUNT
- vp9_prob frame_nzc_probs_4x4
- [MAX_NZC_CONTEXTS][REF_TYPES][BLOCK_TYPES][NZC4X4_NODES];
- unsigned int frame_nzc_branch_ct_4x4
- [MAX_NZC_CONTEXTS][REF_TYPES][BLOCK_TYPES][NZC4X4_NODES][2];
- vp9_prob frame_nzc_probs_8x8
- [MAX_NZC_CONTEXTS][REF_TYPES][BLOCK_TYPES][NZC8X8_NODES];
- unsigned int frame_nzc_branch_ct_8x8
- [MAX_NZC_CONTEXTS][REF_TYPES][BLOCK_TYPES][NZC8X8_NODES][2];
- vp9_prob frame_nzc_probs_16x16
- [MAX_NZC_CONTEXTS][REF_TYPES][BLOCK_TYPES][NZC16X16_NODES];
- unsigned int frame_nzc_branch_ct_16x16
- [MAX_NZC_CONTEXTS][REF_TYPES][BLOCK_TYPES][NZC16X16_NODES][2];
- vp9_prob frame_nzc_probs_32x32
- [MAX_NZC_CONTEXTS][REF_TYPES][BLOCK_TYPES][NZC32X32_NODES];
- unsigned int frame_nzc_branch_ct_32x32
- [MAX_NZC_CONTEXTS][REF_TYPES][BLOCK_TYPES][NZC32X32_NODES][2];
-#endif
-
int gfu_boost;
int last_boost;
int kf_boost;
@@ -658,12 +633,6 @@ typedef struct VP9_COMP {
unsigned int activity_avg;
unsigned int *mb_activity_map;
int *mb_norm_activity_map;
-
- // Record of which MBs still refer to last golden frame either
- // directly or through 0,0
- unsigned char *gf_active_flags;
- int gf_active_count;
-
int output_partition;
// Store last frame's MV info for next frame MV prediction
@@ -682,10 +651,6 @@ typedef struct VP9_COMP {
[VP9_SWITCHABLE_FILTERS];
unsigned int best_switchable_interp_count[VP9_SWITCHABLE_FILTERS];
-#if CONFIG_NEW_MVREF
- unsigned int mb_mv_ref_count[MAX_REF_FRAMES][MAX_MV_REF_CANDIDATES];
-#endif
-
int initial_width;
int initial_height;
diff --git a/vp9/encoder/vp9_quantize.c b/vp9/encoder/vp9_quantize.c
index 80d984965..18f0f4fa0 100644
--- a/vp9/encoder/vp9_quantize.c
+++ b/vp9/encoder/vp9_quantize.c
@@ -45,9 +45,6 @@ void vp9_ht_quantize_b_4x4(MACROBLOCK *mb, int b_idx, TX_TYPE tx_type) {
int16_t *dequant_ptr = d->dequant;
int zbin_oq_value = b->zbin_extra;
const int *pt_scan;
-#if CONFIG_CODE_NONZEROCOUNT
- int nzc = 0;
-#endif
switch (tx_type) {
case ADST_DCT:
@@ -87,9 +84,6 @@ void vp9_ht_quantize_b_4x4(MACROBLOCK *mb, int b_idx, TX_TYPE tx_type) {
if (y) {
eob = i; // last nonzero coeffs
-#if CONFIG_CODE_NONZEROCOUNT
- ++nzc; // number of nonzero coeffs
-#endif
zbin_boost_ptr = b->zrun_zbin_boost; // reset zero runlength
}
}
@@ -97,9 +91,6 @@ void vp9_ht_quantize_b_4x4(MACROBLOCK *mb, int b_idx, TX_TYPE tx_type) {
}
xd->plane[0].eobs[b_idx] = eob + 1;
-#if CONFIG_CODE_NONZEROCOUNT
- xd->nzcs[b_idx] = nzc;
-#endif
}
void vp9_regular_quantize_b_4x4(MACROBLOCK *mb, int b_idx, int y_blocks) {
@@ -123,9 +114,6 @@ void vp9_regular_quantize_b_4x4(MACROBLOCK *mb, int b_idx, int y_blocks) {
uint8_t *quant_shift_ptr = b->quant_shift;
int16_t *dequant_ptr = d->dequant;
int zbin_oq_value = b->zbin_extra;
-#if CONFIG_CODE_NONZEROCOUNT
- int nzc = 0;
-#endif
if (c_idx == 0) assert(pb_idx.plane == 0);
if (c_idx == 16) assert(pb_idx.plane == 1);
@@ -157,9 +145,6 @@ void vp9_regular_quantize_b_4x4(MACROBLOCK *mb, int b_idx, int y_blocks) {
if (y) {
eob = i; // last nonzero coeffs
-#if CONFIG_CODE_NONZEROCOUNT
- ++nzc; // number of nonzero coeffs
-#endif
zbin_boost_ptr = b->zrun_zbin_boost; // reset zero runlength
}
}
@@ -167,9 +152,6 @@ void vp9_regular_quantize_b_4x4(MACROBLOCK *mb, int b_idx, int y_blocks) {
}
xd->plane[pb_idx.plane].eobs[pb_idx.block] = eob + 1;
-#if CONFIG_CODE_NONZEROCOUNT
- xd->nzcs[b_idx] = nzc;
-#endif
}
void vp9_regular_quantize_b_8x8(MACROBLOCK *mb, int b_idx, TX_TYPE tx_type,
@@ -216,9 +198,6 @@ void vp9_regular_quantize_b_8x8(MACROBLOCK *mb, int b_idx, TX_TYPE tx_type,
uint8_t *quant_shift_ptr = b->quant_shift;
int16_t *dequant_ptr = d->dequant;
int zbin_oq_value = b->zbin_extra;
-#if CONFIG_CODE_NONZEROCOUNT
- int nzc = 0;
-#endif
eob = -1;
@@ -242,9 +221,6 @@ void vp9_regular_quantize_b_8x8(MACROBLOCK *mb, int b_idx, TX_TYPE tx_type,
if (y) {
eob = 0; // last nonzero coeffs
-#if CONFIG_CODE_NONZEROCOUNT
- ++nzc; // number of nonzero coeffs
-#endif
zero_run = 0;
}
}
@@ -271,22 +247,13 @@ void vp9_regular_quantize_b_8x8(MACROBLOCK *mb, int b_idx, TX_TYPE tx_type,
if (y) {
eob = i; // last nonzero coeffs
-#if CONFIG_CODE_NONZEROCOUNT
- ++nzc; // number of nonzero coeffs
-#endif
zero_run = 0;
}
}
}
xd->plane[pb_idx.plane].eobs[pb_idx.block] = eob + 1;
-#if CONFIG_CODE_NONZEROCOUNT
- xd->nzcs[b_idx] = nzc;
-#endif
} else {
xd->plane[pb_idx.plane].eobs[pb_idx.block] = 0;
-#if CONFIG_CODE_NONZEROCOUNT
- xd->nzcs[b_idx] = 0;
-#endif
}
}
@@ -297,18 +264,12 @@ static void quantize(int16_t *zbin_boost_orig_ptr,
int16_t *qcoeff_ptr, int16_t *dqcoeff_ptr,
int16_t *dequant_ptr, int zbin_oq_value,
uint16_t *eob_ptr,
-#if CONFIG_CODE_NONZEROCOUNT
- uint16_t *nzc_ptr,
-#endif
const int *scan, int mul) {
int i, rc, eob;
int zbin;
int x, y, z, sz;
int zero_run = 0;
int16_t *zbin_boost_ptr = zbin_boost_orig_ptr;
-#if CONFIG_CODE_NONZEROCOUNT
- int nzc = 0;
-#endif
vpx_memset(qcoeff_ptr, 0, n_coeffs*sizeof(int16_t));
vpx_memset(dqcoeff_ptr, 0, n_coeffs*sizeof(int16_t));
@@ -337,18 +298,12 @@ static void quantize(int16_t *zbin_boost_orig_ptr,
if (y) {
eob = i; // last nonzero coeffs
zero_run = 0;
-#if CONFIG_CODE_NONZEROCOUNT
- ++nzc; // number of nonzero coeffs
-#endif
}
}
}
}
*eob_ptr = eob + 1;
-#if CONFIG_CODE_NONZEROCOUNT
- *nzc_ptr = nzc;
-#endif
}
void vp9_regular_quantize_b_16x16(MACROBLOCK *mb, int b_idx, TX_TYPE tx_type,
@@ -384,9 +339,6 @@ void vp9_regular_quantize_b_16x16(MACROBLOCK *mb, int b_idx, TX_TYPE tx_type,
d->dequant,
b->zbin_extra,
&xd->plane[pb_idx.plane].eobs[pb_idx.block],
-#if CONFIG_CODE_NONZEROCOUNT
- &xd->nzcs[b_idx],
-#endif
pt_scan, 1);
}
@@ -410,9 +362,6 @@ void vp9_regular_quantize_b_32x32(MACROBLOCK *mb, int b_idx, int y_blocks) {
d->dequant,
b->zbin_extra,
&xd->plane[pb_idx.plane].eobs[pb_idx.block],
-#if CONFIG_CODE_NONZEROCOUNT
- &xd->nzcs[b_idx],
-#endif
vp9_default_zig_zag1d_32x32, 2);
}
diff --git a/vp9/encoder/vp9_ratectrl.c b/vp9/encoder/vp9_ratectrl.c
index 96d857fe7..7a419fb61 100644
--- a/vp9/encoder/vp9_ratectrl.c
+++ b/vp9/encoder/vp9_ratectrl.c
@@ -172,12 +172,11 @@ void vp9_save_coding_context(VP9_COMP *cpi) {
#if CONFIG_COMP_INTERINTRA_PRED
cc->interintra_prob = cm->fc.interintra_prob;
#endif
-#if CONFIG_CODE_NONZEROCOUNT
- vp9_copy(cc->nzc_probs_4x4, cm->fc.nzc_probs_4x4);
- vp9_copy(cc->nzc_probs_8x8, cm->fc.nzc_probs_8x8);
- vp9_copy(cc->nzc_probs_16x16, cm->fc.nzc_probs_16x16);
- vp9_copy(cc->nzc_probs_32x32, cm->fc.nzc_probs_32x32);
- vp9_copy(cc->nzc_pcat_probs, cm->fc.nzc_pcat_probs);
+#if CONFIG_CODE_ZEROGROUP
+ vp9_copy(cc->zpc_probs_4x4, cm->fc.zpc_probs_4x4);
+ vp9_copy(cc->zpc_probs_8x8, cm->fc.zpc_probs_8x8);
+ vp9_copy(cc->zpc_probs_16x16, cm->fc.zpc_probs_16x16);
+ vp9_copy(cc->zpc_probs_32x32, cm->fc.zpc_probs_32x32);
#endif
}
@@ -235,12 +234,11 @@ void vp9_restore_coding_context(VP9_COMP *cpi) {
#if CONFIG_COMP_INTERINTRA_PRED
cm->fc.interintra_prob = cc->interintra_prob;
#endif
-#if CONFIG_CODE_NONZEROCOUNT
- vp9_copy(cm->fc.nzc_probs_4x4, cc->nzc_probs_4x4);
- vp9_copy(cm->fc.nzc_probs_8x8, cc->nzc_probs_8x8);
- vp9_copy(cm->fc.nzc_probs_16x16, cc->nzc_probs_16x16);
- vp9_copy(cm->fc.nzc_probs_32x32, cc->nzc_probs_32x32);
- vp9_copy(cm->fc.nzc_pcat_probs, cc->nzc_pcat_probs);
+#if CONFIG_CODE_ZEROGROUP
+ vp9_copy(cm->fc.zpc_probs_4x4, cc->zpc_probs_4x4);
+ vp9_copy(cm->fc.zpc_probs_8x8, cc->zpc_probs_8x8);
+ vp9_copy(cm->fc.zpc_probs_16x16, cc->zpc_probs_16x16);
+ vp9_copy(cm->fc.zpc_probs_32x32, cc->zpc_probs_32x32);
#endif
}
diff --git a/vp9/encoder/vp9_rdopt.c b/vp9/encoder/vp9_rdopt.c
index 4716d7cdd..7009e32aa 100644
--- a/vp9/encoder/vp9_rdopt.c
+++ b/vp9/encoder/vp9_rdopt.c
@@ -162,60 +162,6 @@ static void fill_token_costs(vp9_coeff_count *c,
}
}
-#if CONFIG_CODE_NONZEROCOUNT
-static void fill_nzc_costs(VP9_COMP *cpi, TX_SIZE tx_size) {
- int nzc_context, r, b, nzc, values;
- int cost[16];
- values = (16 << (2 * tx_size)) + 1;
-
- for (nzc_context = 0; nzc_context < MAX_NZC_CONTEXTS; ++nzc_context) {
- for (r = 0; r < REF_TYPES; ++r) {
- for (b = 0; b < BLOCK_TYPES; ++b) {
- unsigned int *nzc_costs;
- if (tx_size == TX_4X4) {
- vp9_cost_tokens(cost,
- cpi->common.fc.nzc_probs_4x4[nzc_context][r][b],
- vp9_nzc4x4_tree);
- nzc_costs = cpi->mb.nzc_costs_4x4[nzc_context][r][b];
- } else if (tx_size == TX_8X8) {
- vp9_cost_tokens(cost,
- cpi->common.fc.nzc_probs_8x8[nzc_context][r][b],
- vp9_nzc8x8_tree);
- nzc_costs = cpi->mb.nzc_costs_8x8[nzc_context][r][b];
- } else if (tx_size == TX_16X16) {
- vp9_cost_tokens(cost,
- cpi->common.fc.nzc_probs_16x16[nzc_context][r][b],
- vp9_nzc16x16_tree);
- nzc_costs = cpi->mb.nzc_costs_16x16[nzc_context][r][b];
- } else {
- vp9_cost_tokens(cost,
- cpi->common.fc.nzc_probs_32x32[nzc_context][r][b],
- vp9_nzc32x32_tree);
- nzc_costs = cpi->mb.nzc_costs_32x32[nzc_context][r][b];
- }
-
- for (nzc = 0; nzc < values; ++nzc) {
- int e, c, totalcost = 0;
- c = codenzc(nzc);
- totalcost = cost[c];
- if ((e = vp9_extranzcbits[c])) {
- int x = nzc - vp9_basenzcvalue[c];
- while (e--) {
- totalcost += vp9_cost_bit(
- cpi->common.fc.nzc_pcat_probs[nzc_context]
- [c - NZC_TOKENS_NOEXTRA][e],
- ((x >> e) & 1));
- }
- }
- nzc_costs[nzc] = totalcost;
- }
- }
- }
- }
-}
-#endif
-
-
static int rd_iifactor[32] = { 4, 4, 3, 2, 1, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
@@ -313,12 +259,6 @@ void vp9_initialize_rd_consts(VP9_COMP *cpi, int qindex) {
cpi->common.fc.coef_probs_16x16, TX_16X16);
fill_token_costs(cpi->mb.token_costs[TX_32X32],
cpi->common.fc.coef_probs_32x32, TX_32X32);
-#if CONFIG_CODE_NONZEROCOUNT
- fill_nzc_costs(cpi, TX_4X4);
- fill_nzc_costs(cpi, TX_8X8);
- fill_nzc_costs(cpi, TX_16X16);
- fill_nzc_costs(cpi, TX_32X32);
-#endif
for (i = 0; i < 2; i++)
vp9_cost_tokens(cpi->mb.partition_cost[i],
@@ -374,11 +314,16 @@ static INLINE int cost_coeffs(VP9_COMMON *const cm, MACROBLOCK *mb,
sizeof(ENTROPY_CONTEXT_PLANES)/sizeof(ENTROPY_CONTEXT);
ENTROPY_CONTEXT *const l1 = l +
sizeof(ENTROPY_CONTEXT_PLANES)/sizeof(ENTROPY_CONTEXT);
+ TX_TYPE tx_type = DCT_DCT;
-#if CONFIG_CODE_NONZEROCOUNT
- const int nzc_used = get_nzc_used(tx_size);
- int nzc_context = vp9_get_nzc_context(cm, xd, ib);
- unsigned int *nzc_cost;
+#if CONFIG_CODE_ZEROGROUP
+ int last_nz_pos[3] = {-1, -1, -1}; // Encoder only
+ int is_eoo_list[3] = {0, 0, 0};
+ int is_eoo_negative[3] = {0, 0, 0};
+ int is_last_zero[3] = {0, 0, 0};
+ int o, rc, skip_coef_val;
+ vp9_zpc_probs *zpc_probs;
+ uint8_t token_cache_full[1024];
#endif
const int segment_id = xd->mode_info_context->mbmi.segment_id;
vp9_prob (*coef_probs)[REF_TYPES][COEF_BANDS][PREV_COEF_CONTEXTS]
@@ -386,6 +331,10 @@ static INLINE int cost_coeffs(VP9_COMMON *const cm, MACROBLOCK *mb,
int seg_eob, default_eob;
uint8_t token_cache[1024];
+#if CONFIG_CODE_ZEROGROUP
+ vpx_memset(token_cache, UNKNOWN_TOKEN, sizeof(token_cache));
+#endif
+
// Check for consistency of tx_size with mode info
assert((!type && !pb_idx.plane) || (type && pb_idx.plane));
if (type == PLANE_TYPE_Y_WITH_DC) {
@@ -397,13 +346,10 @@ static INLINE int cost_coeffs(VP9_COMMON *const cm, MACROBLOCK *mb,
switch (tx_size) {
case TX_4X4: {
- const TX_TYPE tx_type = (type == PLANE_TYPE_Y_WITH_DC) ?
- get_tx_type_4x4(xd, ib) : DCT_DCT;
+ tx_type = (type == PLANE_TYPE_Y_WITH_DC) ?
+ get_tx_type_4x4(xd, ib) : DCT_DCT;
a_ec = *a;
l_ec = *l;
-#if CONFIG_CODE_NONZEROCOUNT
- nzc_cost = mb->nzc_costs_4x4[nzc_context][ref][type];
-#endif
coef_probs = cm->fc.coef_probs_4x4;
seg_eob = 16;
if (tx_type == ADST_DCT) {
@@ -413,14 +359,17 @@ static INLINE int cost_coeffs(VP9_COMMON *const cm, MACROBLOCK *mb,
} else {
scan = vp9_default_zig_zag1d_4x4;
}
+#if CONFIG_CODE_ZEROGROUP
+ zpc_probs = &cm->fc.zpc_probs_4x4;
+#endif
break;
}
case TX_8X8: {
const BLOCK_SIZE_TYPE sb_type = xd->mode_info_context->mbmi.sb_type;
const int sz = 3 + mb_width_log2(sb_type);
const int x = ib & ((1 << sz) - 1), y = ib - x;
- const TX_TYPE tx_type = (type == PLANE_TYPE_Y_WITH_DC) ?
- get_tx_type_8x8(xd, y + (x >> 1)) : DCT_DCT;
+ TX_TYPE tx_type = (type == PLANE_TYPE_Y_WITH_DC) ?
+ get_tx_type_8x8(xd, y + (x >> 1)) : DCT_DCT;
a_ec = (a[0] + a[1]) != 0;
l_ec = (l[0] + l[1]) != 0;
if (tx_type == ADST_DCT) {
@@ -430,19 +379,19 @@ static INLINE int cost_coeffs(VP9_COMMON *const cm, MACROBLOCK *mb,
} else {
scan = vp9_default_zig_zag1d_8x8;
}
-#if CONFIG_CODE_NONZEROCOUNT
- nzc_cost = mb->nzc_costs_8x8[nzc_context][ref][type];
-#endif
coef_probs = cm->fc.coef_probs_8x8;
seg_eob = 64;
+#if CONFIG_CODE_ZEROGROUP
+ zpc_probs = &cm->fc.zpc_probs_8x8;
+#endif
break;
}
case TX_16X16: {
const BLOCK_SIZE_TYPE sb_type = xd->mode_info_context->mbmi.sb_type;
const int sz = 4 + mb_width_log2(sb_type);
const int x = ib & ((1 << sz) - 1), y = ib - x;
- const TX_TYPE tx_type = (type == PLANE_TYPE_Y_WITH_DC) ?
- get_tx_type_16x16(xd, y + (x >> 2)) : DCT_DCT;
+ TX_TYPE tx_type = (type == PLANE_TYPE_Y_WITH_DC) ?
+ get_tx_type_16x16(xd, y + (x >> 2)) : DCT_DCT;
if (tx_type == ADST_DCT) {
scan = vp9_row_scan_16x16;
} else if (tx_type == DCT_ADST) {
@@ -450,9 +399,6 @@ static INLINE int cost_coeffs(VP9_COMMON *const cm, MACROBLOCK *mb,
} else {
scan = vp9_default_zig_zag1d_16x16;
}
-#if CONFIG_CODE_NONZEROCOUNT
- nzc_cost = mb->nzc_costs_16x16[nzc_context][ref][type];
-#endif
coef_probs = cm->fc.coef_probs_16x16;
seg_eob = 256;
if (type == PLANE_TYPE_UV) {
@@ -462,13 +408,13 @@ static INLINE int cost_coeffs(VP9_COMMON *const cm, MACROBLOCK *mb,
a_ec = (a[0] + a[1] + a[2] + a[3]) != 0;
l_ec = (l[0] + l[1] + l[2] + l[3]) != 0;
}
+#if CONFIG_CODE_ZEROGROUP
+ zpc_probs = &cm->fc.zpc_probs_16x16;
+#endif
break;
}
case TX_32X32:
scan = vp9_default_zig_zag1d_32x32;
-#if CONFIG_CODE_NONZEROCOUNT
- nzc_cost = mb->nzc_costs_32x32[nzc_context][ref][type];
-#endif
coef_probs = cm->fc.coef_probs_32x32;
seg_eob = 1024;
if (type == PLANE_TYPE_UV) {
@@ -487,6 +433,9 @@ static INLINE int cost_coeffs(VP9_COMMON *const cm, MACROBLOCK *mb,
l_ec = (l[0] + l[1] + l[2] + l[3] +
l1[0] + l1[1] + l1[2] + l1[3]) != 0;
}
+#if CONFIG_CODE_ZEROGROUP
+ zpc_probs = &cm->fc.zpc_probs_32x32;
+#endif
break;
default:
abort();
@@ -498,50 +447,113 @@ static INLINE int cost_coeffs(VP9_COMMON *const cm, MACROBLOCK *mb,
nb = vp9_get_coef_neighbors_handle(scan, &pad);
default_eob = seg_eob;
-#if CONFIG_CODE_NONZEROCOUNT
- if (!nzc_used)
-#endif
- if (vp9_segfeature_active(xd, segment_id, SEG_LVL_SKIP))
- seg_eob = 0;
+ if (vp9_segfeature_active(xd, segment_id, SEG_LVL_SKIP))
+ seg_eob = 0;
/* sanity check to ensure that we do not have spurious non-zero q values */
if (eob < seg_eob)
assert(qcoeff_ptr[scan[eob]] == 0);
- {
-#if CONFIG_CODE_NONZEROCOUNT
- int nzc = 0;
+#if CONFIG_CODE_ZEROGROUP
+ vpx_memset(token_cache_full, ZERO_TOKEN, sizeof(token_cache_full));
+ for (c = 0; c < eob; ++c) {
+ rc = scan[c];
+ token_cache_full[rc] = vp9_dct_value_tokens_ptr[qcoeff_ptr[rc]].token;
+ o = vp9_get_orientation(rc, tx_size);
+ if (qcoeff_ptr[rc] != 0)
+ last_nz_pos[o] = c;
+ }
#endif
- for (; c < eob; c++) {
+ {
+ for (c = 0; c < eob; c++) {
int v = qcoeff_ptr[scan[c]];
- int t = vp9_dct_value_tokens_ptr[v].Token;
-#if CONFIG_CODE_NONZEROCOUNT
- nzc += (v != 0);
+ int t = vp9_dct_value_tokens_ptr[v].token;
+ int band = get_coef_band(scan, tx_size, c);
+ if (c)
+ pt = vp9_get_coef_context(scan, nb, pad, token_cache, c, default_eob);
+#if CONFIG_CODE_ZEROGROUP
+ rc = scan[c];
+ o = vp9_get_orientation(rc, tx_size);
+ skip_coef_val = (token_cache[rc] == ZERO_TOKEN || is_eoo_list[o]);
+ if (!skip_coef_val) {
+ cost += token_costs[band][pt][t] + vp9_dct_value_cost_ptr[v];
+ } else {
+ assert(v == 0);
+ }
+#else
+ cost += token_costs[band][pt][t] + vp9_dct_value_cost_ptr[v];
#endif
- token_cache[c] = t;
- cost += token_costs[get_coef_band(scan, tx_size, c)][pt][t];
- cost += vp9_dct_value_cost_ptr[v];
-#if !CONFIG_CODE_NONZEROCOUNT
- if (!c || token_cache[c - 1])
- cost += vp9_cost_bit(coef_probs[type][ref]
- [get_coef_band(scan, tx_size, c)]
- [pt][0], 1);
+ if (!c || token_cache[scan[c - 1]])
+ cost += vp9_cost_bit(coef_probs[type][ref][band][pt][0], 1);
+ token_cache[scan[c]] = t;
+#if CONFIG_CODE_ZEROGROUP
+ if (t == ZERO_TOKEN && !skip_coef_val) {
+ int eoo = 0, use_eoo;
+#if USE_ZPC_EOORIENT == 1
+ use_eoo = vp9_use_eoo(c, seg_eob, scan, tx_size,
+ is_last_zero, is_eoo_list);
+#else
+ use_eoo = 0;
#endif
- pt = vp9_get_coef_context(scan, nb, pad, token_cache, c + 1, default_eob);
- }
-#if CONFIG_CODE_NONZEROCOUNT
- if (nzc_used)
- cost += nzc_cost[nzc];
- else
+ if (use_eoo) {
+ eoo = vp9_is_eoo(c, eob, scan, tx_size, qcoeff_ptr, last_nz_pos);
+ if (eoo && is_eoo_negative[o]) eoo = 0;
+ if (eoo) {
+ int c_;
+ int savings = 0;
+ int zsaved = 0;
+ savings = vp9_cost_bit((*zpc_probs)[ref]
+ [coef_to_zpc_band(band)]
+ [coef_to_zpc_ptok(pt)][0], 1) -
+ vp9_cost_bit((*zpc_probs)[ref]
+ [coef_to_zpc_band(band)]
+ [coef_to_zpc_ptok(pt)][0], 0);
+ for (c_ = c + 1; c_ < eob; ++c_) {
+ if (o == vp9_get_orientation(scan[c_], tx_size)) {
+ int pt_ = vp9_get_coef_context(scan, nb, pad,
+ token_cache_full, c_,
+ default_eob);
+ int band_ = get_coef_band(scan, tx_size, c_);
+ assert(token_cache_full[scan[c_]] == ZERO_TOKEN);
+ if (!c_ || token_cache_full[scan[c_ - 1]])
+ savings += vp9_cost_bit(
+ coef_probs[type][ref][band_][pt_][0], 1);
+ savings += vp9_cost_bit(
+ coef_probs[type][ref][band_][pt_][1], 0);
+ zsaved++;
+ }
+ }
+ if (savings < 0) {
+ // if (zsaved < ZPC_ZEROSSAVED_EOO) {
+ eoo = 0;
+ is_eoo_negative[o] = 1;
+ }
+ }
+ }
+ if (use_eoo) {
+ cost += vp9_cost_bit((*zpc_probs)[ref]
+ [coef_to_zpc_band(band)]
+ [coef_to_zpc_ptok(pt)][0], !eoo);
+ if (eoo) {
+ assert(is_eoo_list[o] == 0);
+ is_eoo_list[o] = 1;
+ }
+ }
+ }
+ is_last_zero[o] = (t == ZERO_TOKEN);
#endif
- if (c < seg_eob)
- cost += mb->token_costs[tx_size][type][ref]
- [get_coef_band(scan, tx_size, c)]
- [pt][DCT_EOB_TOKEN];
+ }
+ if (c < seg_eob) {
+ if (c)
+ pt = vp9_get_coef_context(scan, nb, pad, token_cache, c, default_eob);
+ cost += mb->token_costs[tx_size][type][ref]
+ [get_coef_band(scan, tx_size, c)]
+ [pt][DCT_EOB_TOKEN];
+ }
}
- // is eob first coefficient;
- pt = (c > 0);
+ // is eob first coefficient;
+ pt = (c > 0);
*a = *l = pt;
if (tx_size >= TX_8X8) {
a[1] = l[1] = pt;
@@ -835,8 +847,8 @@ static void super_block_yrd(VP9_COMP *cpi,
VP9_COMMON *const cm = &cpi->common;
MACROBLOCKD *const xd = &x->e_mbd;
int r[TX_SIZE_MAX_SB][2], d[TX_SIZE_MAX_SB], s[TX_SIZE_MAX_SB];
- uint8_t *src = x->src.y_buffer, *dst = xd->dst.y_buffer;
- int src_y_stride = x->src.y_stride, dst_y_stride = xd->dst.y_stride;
+ uint8_t *src = x->src.y_buffer, *dst = xd->plane[0].dst.buf;
+ int src_y_stride = x->src.y_stride, dst_y_stride = xd->plane[0].dst.stride;
vp9_subtract_sby_s_c(x->src_diff, src, src_y_stride, dst, dst_y_stride, bs);
@@ -1519,9 +1531,9 @@ static void super_block_uvrd(VP9_COMMON *const cm, MACROBLOCK *x,
BLOCK_SIZE_TYPE bsize) {
MACROBLOCKD *const xd = &x->e_mbd;
MB_MODE_INFO *const mbmi = &xd->mode_info_context->mbmi;
- uint8_t *usrc = x->src.u_buffer, *udst = xd->dst.u_buffer;
- uint8_t *vsrc = x->src.v_buffer, *vdst = xd->dst.v_buffer;
- int src_uv_stride = x->src.uv_stride, dst_uv_stride = xd->dst.uv_stride;
+ uint8_t *usrc = x->src.u_buffer, *udst = xd->plane[1].dst.buf;
+ uint8_t *vsrc = x->src.v_buffer, *vdst = xd->plane[2].dst.buf;
+ int src_uv_stride = x->src.uv_stride, dst_uv_stride = xd->plane[1].dst.stride;
vp9_subtract_sbuv_s_c(x->src_diff, usrc, vsrc, src_uv_stride,
udst, vdst, dst_uv_stride, bsize);
@@ -2474,7 +2486,7 @@ static void mv_pred(VP9_COMP *cpi, MACROBLOCK *x,
int row_offset, col_offset;
// Get the sad for each candidate reference mv
- for (i = 0; i < 4; i++) {
+ for (i = 0; i < MAX_MV_REF_CANDIDATES; i++) {
this_mv.as_int = mbmi->ref_mvs[ref_frame][i].as_int;
// The list is at an end if we see 0 for a second time.
@@ -2690,7 +2702,7 @@ static void setup_buffer_inter(VP9_COMP *cpi, MACROBLOCK *x,
YV12_BUFFER_CONFIG *yv12 = &cm->yv12_fb[cpi->common.ref_frame_map[idx]];
MACROBLOCKD *const xd = &x->e_mbd;
MB_MODE_INFO *const mbmi = &xd->mode_info_context->mbmi;
- int use_prev_in_find_mv_refs, use_prev_in_find_best_ref;
+ int use_prev_in_find_mv_refs;
// set up scaling factors
scale[frame_type] = cpi->common.active_ref_scale[frame_type - 1];
@@ -2715,15 +2727,7 @@ static void setup_buffer_inter(VP9_COMP *cpi, MACROBLOCK *x,
cpi->common.ref_frame_sign_bias);
// Candidate refinement carried out at encoder and decoder
- use_prev_in_find_best_ref =
- scale[frame_type].x_num == scale[frame_type].x_den &&
- scale[frame_type].y_num == scale[frame_type].y_den &&
- !cm->error_resilient_mode &&
- !cm->frame_parallel_decoding_mode;
vp9_find_best_ref_mvs(xd,
- use_prev_in_find_best_ref ?
- yv12_mb[frame_type].y_buffer : NULL,
- yv12->y_stride,
mbmi->ref_mvs[frame_type],
&frame_nearest_mv[frame_type],
&frame_near_mv[frame_type]);
@@ -2820,7 +2824,6 @@ static enum BlockSize y_bsizet_to_block_size(BLOCK_SIZE_TYPE bs) {
static int64_t handle_inter_mode(VP9_COMP *cpi, MACROBLOCK *x,
BLOCK_SIZE_TYPE bsize,
- int *saddone, int near_sadidx[],
int mdcounts[4], int64_t txfm_cache[],
int *rate2, int *distortion, int *skippable,
int *compmode_cost,
@@ -2882,7 +2885,7 @@ static int64_t handle_inter_mode(VP9_COMP *cpi, MACROBLOCK *x,
x->nmvjointcost, x->mvcost, 96,
x->e_mbd.allow_high_precision_mv);
} else {
- YV12_BUFFER_CONFIG backup_yv12 = xd->pre;
+ struct buf_2d backup_yv12[MAX_MB_PLANE] = {{0}};
int bestsme = INT_MAX;
int further_steps, step_param = cpi->sf.first_step;
int sadpb = x->sadperbit16;
@@ -2895,13 +2898,16 @@ static int64_t handle_inter_mode(VP9_COMP *cpi, MACROBLOCK *x,
int tmp_row_max = x->mv_row_max;
if (scaled_ref_frame) {
+ int i;
+
// Swap out the reference frame for a version that's been scaled to
// match the resolution of the current frame, allowing the existing
// motion search code to be used without additional modifications.
- xd->pre = *scaled_ref_frame;
- xd->pre.y_buffer += mb_row * 16 * xd->pre.y_stride + mb_col * 16;
- xd->pre.u_buffer += mb_row * 8 * xd->pre.uv_stride + mb_col * 8;
- xd->pre.v_buffer += mb_row * 8 * xd->pre.uv_stride + mb_col * 8;
+ for (i = 0; i < MAX_MB_PLANE; i++)
+ backup_yv12[i] = xd->plane[i].pre[0];
+
+ setup_pre_planes(xd, scaled_ref_frame, NULL, mb_row, mb_col,
+ NULL, NULL);
}
vp9_clamp_mv_min_max(x, &ref_mv[0]);
@@ -2951,7 +2957,10 @@ static int64_t handle_inter_mode(VP9_COMP *cpi, MACROBLOCK *x,
// restore the predictor, if required
if (scaled_ref_frame) {
- xd->pre = backup_yv12;
+ int i;
+
+ for (i = 0; i < MAX_MB_PLANE; i++)
+ xd->plane[i].pre[0] = backup_yv12[i];
}
}
break;
@@ -3035,7 +3044,8 @@ static int64_t handle_inter_mode(VP9_COMP *cpi, MACROBLOCK *x,
int tmp_dist_y, tmp_dist_u, tmp_dist_v;
vp9_build_inter_predictors_sb(xd, mb_row, mb_col, bsize);
var = cpi->fn_ptr[block_size].vf(*(b->base_src), b->src_stride,
- xd->dst.y_buffer, xd->dst.y_stride,
+ xd->plane[0].dst.buf,
+ xd->plane[0].dst.stride,
&sse);
// Note our transform coeffs are 8 times an orthogonal transform.
// Hence quantizer step is also 8 times. To get effective quantizer
@@ -3044,13 +3054,15 @@ static int64_t handle_inter_mode(VP9_COMP *cpi, MACROBLOCK *x,
xd->block[0].dequant[1] >> 3,
&tmp_rate_y, &tmp_dist_y);
var = cpi->fn_ptr[uv_block_size].vf(x->src.u_buffer, x->src.uv_stride,
- xd->dst.u_buffer, xd->dst.uv_stride,
+ xd->plane[1].dst.buf,
+ xd->plane[1].dst.stride,
&sse);
model_rd_from_var_lapndz(var, 8 * bw * 8 * bh,
xd->block[16].dequant[1] >> 3,
&tmp_rate_u, &tmp_dist_u);
var = cpi->fn_ptr[uv_block_size].vf(x->src.v_buffer, x->src.uv_stride,
- xd->dst.v_buffer, xd->dst.uv_stride,
+ xd->plane[2].dst.buf,
+ xd->plane[1].dst.stride,
&sse);
model_rd_from_var_lapndz(var, 8 * bw * 8 * bh,
xd->block[20].dequant[1] >> 3,
@@ -3079,15 +3091,15 @@ static int64_t handle_inter_mode(VP9_COMP *cpi, MACROBLOCK *x,
int i;
for (i = 0; i < 16 * bh; ++i)
vpx_memcpy(tmp_ybuf + i * 16 * bw,
- xd->dst.y_buffer + i * xd->dst.y_stride,
+ xd->plane[0].dst.buf + i * xd->plane[0].dst.stride,
sizeof(unsigned char) * 16 * bw);
for (i = 0; i < 8 * bh; ++i)
vpx_memcpy(tmp_ubuf + i * 8 * bw,
- xd->dst.u_buffer + i * xd->dst.uv_stride,
+ xd->plane[1].dst.buf + i * xd->plane[1].dst.stride,
sizeof(unsigned char) * 8 * bw);
for (i = 0; i < 8 * bh; ++i)
vpx_memcpy(tmp_vbuf + i * 8 * bw,
- xd->dst.v_buffer + i * xd->dst.uv_stride,
+ xd->plane[2].dst.buf + i * xd->plane[1].dst.stride,
sizeof(unsigned char) * 8 * bw);
pred_exists = 1;
}
@@ -3120,18 +3132,21 @@ static int64_t handle_inter_mode(VP9_COMP *cpi, MACROBLOCK *x,
int tmp_dist_y, tmp_dist_u, tmp_dist_v;
vp9_build_inter_predictors_sb(xd, mb_row, mb_col, BLOCK_SIZE_MB16X16);
var = vp9_variance16x16(*(b->base_src), b->src_stride,
- xd->dst.y_buffer, xd->dst.y_stride, &sse);
+ xd->plane[0].dst.buf, xd->plane[0].dst.stride,
+ &sse);
// Note our transform coeffs are 8 times an orthogonal transform.
// Hence quantizer step is also 8 times. To get effective quantizer
// we need to divide by 8 before sending to modeling function.
model_rd_from_var_lapndz(var, 16 * 16, xd->block[0].dequant[1] >> 3,
&tmp_rate_y, &tmp_dist_y);
var = vp9_variance8x8(x->src.u_buffer, x->src.uv_stride,
- xd->dst.u_buffer, xd->dst.uv_stride, &sse);
+ xd->plane[1].dst.buf, xd->plane[1].dst.stride,
+ &sse);
model_rd_from_var_lapndz(var, 8 * 8, xd->block[16].dequant[1] >> 3,
&tmp_rate_u, &tmp_dist_u);
var = vp9_variance8x8(x->src.v_buffer, x->src.uv_stride,
- xd->dst.v_buffer, xd->dst.uv_stride, &sse);
+ xd->plane[2].dst.buf, xd->plane[1].dst.stride,
+ &sse);
model_rd_from_var_lapndz(var, 8 * 8, xd->block[20].dequant[1] >> 3,
&tmp_rate_v, &tmp_dist_v);
rd = RDCOST(x->rdmult, x->rddiv,
@@ -3158,15 +3173,15 @@ static int64_t handle_inter_mode(VP9_COMP *cpi, MACROBLOCK *x,
int i;
for (i = 0; i < 16 * bh; ++i)
vpx_memcpy(tmp_ybuf + i * 16 * bw,
- xd->dst.y_buffer + i * xd->dst.y_stride,
+ xd->plane[0].dst.buf + i * xd->plane[0].dst.stride,
sizeof(unsigned char) * 16 * bw);
for (i = 0; i < 8 * bh; ++i)
vpx_memcpy(tmp_ubuf + i * 8 * bw,
- xd->dst.u_buffer + i * xd->dst.uv_stride,
+ xd->plane[1].dst.buf + i * xd->plane[1].dst.stride,
sizeof(unsigned char) * 8 * bw);
for (i = 0; i < 8 * bh; ++i)
vpx_memcpy(tmp_vbuf + i * 8 * bw,
- xd->dst.v_buffer + i * xd->dst.uv_stride,
+ xd->plane[2].dst.buf + i * xd->plane[1].dst.stride,
sizeof(unsigned char) * 8 * bw);
pred_exists = 1;
}
@@ -3185,13 +3200,13 @@ static int64_t handle_inter_mode(VP9_COMP *cpi, MACROBLOCK *x,
if (pred_exists) {
// FIXME(rbultje): mb code still predicts into xd->predictor
for (i = 0; i < bh * 16; ++i)
- vpx_memcpy(xd->dst.y_buffer + i * xd->dst.y_stride,
+ vpx_memcpy(xd->plane[0].dst.buf + i * xd->plane[0].dst.stride,
tmp_ybuf + i * bw * 16, sizeof(unsigned char) * bw * 16);
for (i = 0; i < bh * 8; ++i)
- vpx_memcpy(xd->dst.u_buffer + i * xd->dst.uv_stride,
+ vpx_memcpy(xd->plane[1].dst.buf + i * xd->plane[1].dst.stride,
tmp_ubuf + i * bw * 8, sizeof(unsigned char) * bw * 8);
for (i = 0; i < bh * 8; ++i)
- vpx_memcpy(xd->dst.v_buffer + i * xd->dst.uv_stride,
+ vpx_memcpy(xd->plane[2].dst.buf + i * xd->plane[1].dst.stride,
tmp_vbuf + i * bw * 8, sizeof(unsigned char) * bw * 8);
} else {
// Handles the special case when a filter that is not in the
@@ -3217,11 +3232,13 @@ static int64_t handle_inter_mode(VP9_COMP *cpi, MACROBLOCK *x,
if (bsize != BLOCK_SIZE_MB16X16) {
var = cpi->fn_ptr[block_size].vf(*(b->base_src), b->src_stride,
- xd->dst.y_buffer, xd->dst.y_stride,
+ xd->plane[0].dst.buf,
+ xd->plane[0].dst.stride,
&sse);
} else {
var = vp9_variance16x16(*(b->base_src), b->src_stride,
- xd->dst.y_buffer, xd->dst.y_stride, &sse);
+ xd->plane[0].dst.buf, xd->plane[0].dst.stride,
+ &sse);
}
if ((int)sse < threshold) {
@@ -3237,18 +3254,20 @@ static int64_t handle_inter_mode(VP9_COMP *cpi, MACROBLOCK *x,
unsigned int sse2u, sse2v;
// FIXME(rbultje): mb predictors predict into xd->predictor
var = cpi->fn_ptr[uv_block_size].vf(x->src.u_buffer, x->src.uv_stride,
- xd->dst.u_buffer,
- xd->dst.uv_stride, &sse2u);
+ xd->plane[1].dst.buf,
+ xd->plane[1].dst.stride, &sse2u);
var = cpi->fn_ptr[uv_block_size].vf(x->src.v_buffer, x->src.uv_stride,
- xd->dst.v_buffer,
- xd->dst.uv_stride, &sse2v);
+ xd->plane[2].dst.buf,
+ xd->plane[1].dst.stride, &sse2v);
sse2 = sse2u + sse2v;
} else {
unsigned int sse2u, sse2v;
var = vp9_variance8x8(x->src.u_buffer, x->src.uv_stride,
- xd->dst.u_buffer, xd->dst.uv_stride, &sse2u);
+ xd->plane[1].dst.buf, xd->plane[1].dst.stride,
+ &sse2u);
var = vp9_variance8x8(x->src.v_buffer, x->src.uv_stride,
- xd->dst.v_buffer, xd->dst.uv_stride, &sse2v);
+ xd->plane[2].dst.buf, xd->plane[1].dst.stride,
+ &sse2v);
sse2 = sse2u + sse2v;
}
@@ -3345,9 +3364,6 @@ static void rd_pick_inter_mode(VP9_COMP *cpi, MACROBLOCK *x,
int distortion_uv = INT_MAX;
int64_t best_yrd = INT64_MAX;
- int near_sadidx[8] = {0, 1, 2, 3, 4, 5, 6, 7};
- int saddone = 0;
-
int_mv frame_mv[MB_MODE_COUNT][MAX_REF_FRAMES];
int frame_mdcounts[4][4];
YV12_BUFFER_CONFIG yv12_mb[4];
@@ -3529,7 +3545,6 @@ static void rd_pick_inter_mode(VP9_COMP *cpi, MACROBLOCK *x,
int ref = mbmi->ref_frame;
int fb;
- xd->pre = yv12_mb[ref];
best_ref_mv = mbmi->ref_mvs[ref][0];
vpx_memcpy(mdcounts, frame_mdcounts[ref], sizeof(mdcounts));
@@ -3548,10 +3563,15 @@ static void rd_pick_inter_mode(VP9_COMP *cpi, MACROBLOCK *x,
if (mbmi->second_ref_frame > 0) {
int ref = mbmi->second_ref_frame;
- xd->second_pre = yv12_mb[ref];
second_best_ref_mv = mbmi->ref_mvs[ref][0];
}
+ // TODO(jkoleszar) scaling/translation handled during creation of yv12_mb
+ // currently.
+ setup_pre_planes(xd, &yv12_mb[mbmi->ref_frame],
+ mbmi->second_ref_frame > 0 ? &yv12_mb[mbmi->second_ref_frame] : NULL,
+ 0, 0, NULL, NULL);
+
// Experimental code. Special case for gf and arf zeromv modes.
// Increase zbin size to suppress noise
if (cpi->zbin_mode_boost_enabled) {
@@ -3765,13 +3785,14 @@ static void rd_pick_inter_mode(VP9_COMP *cpi, MACROBLOCK *x,
if (tmp_rd < best_yrd) {
int uv_skippable;
- vp9_build_inter4x4_predictors_mbuv(&x->e_mbd, mb_row, mb_col);
+ vp9_build_inter_predictors_sbuv(&x->e_mbd, mb_row, mb_col,
+ BLOCK_SIZE_MB16X16);
vp9_subtract_sbuv_s_c(x->src_diff,
x->src.u_buffer,
x->src.v_buffer, x->src.uv_stride,
- xd->dst.u_buffer,
- xd->dst.v_buffer, xd->dst.uv_stride,
+ xd->plane[1].dst.buf,
+ xd->plane[2].dst.buf, xd->plane[1].dst.stride,
BLOCK_SIZE_MB16X16);
super_block_uvrd_4x4(cm, x, &rate_uv, &distortion_uv,
@@ -3808,7 +3829,7 @@ static void rd_pick_inter_mode(VP9_COMP *cpi, MACROBLOCK *x,
}
#endif
this_rd = handle_inter_mode(cpi, x, BLOCK_SIZE_MB16X16,
- &saddone, near_sadidx, mdcounts, txfm_cache,
+ mdcounts, txfm_cache,
&rate2, &distortion2, &skippable,
&compmode_cost,
#if CONFIG_COMP_INTERINTRA_PRED
@@ -4341,7 +4362,7 @@ int64_t vp9_rd_pick_inter_mode_sb(VP9_COMP *cpi, MACROBLOCK *x,
MB_MODE_INFO *mbmi = &xd->mode_info_context->mbmi;
MB_PREDICTION_MODE this_mode;
MB_PREDICTION_MODE best_mode = DC_PRED;
- MV_REFERENCE_FRAME ref_frame;
+ MV_REFERENCE_FRAME ref_frame, second_ref;
unsigned char segment_id = xd->mode_info_context->mbmi.segment_id;
int comp_pred, i;
int_mv frame_mv[MB_MODE_COUNT][MAX_REF_FRAMES];
@@ -4354,8 +4375,6 @@ int64_t vp9_rd_pick_inter_mode_sb(VP9_COMP *cpi, MACROBLOCK *x,
cpi->gld_fb_idx,
cpi->alt_fb_idx};
int mdcounts[4];
- int near_sadidx[8] = { 0, 1, 2, 3, 4, 5, 6, 7 };
- int saddone = 0;
int64_t best_rd = INT64_MAX;
int64_t best_txfm_rd[NB_TXFM_MODES];
int64_t best_txfm_diff[NB_TXFM_MODES];
@@ -4520,8 +4539,6 @@ int64_t vp9_rd_pick_inter_mode_sb(VP9_COMP *cpi, MACROBLOCK *x,
// continue;
if (comp_pred) {
- int second_ref;
-
if (ref_frame == ALTREF_FRAME) {
second_ref = LAST_FRAME;
} else {
@@ -4533,7 +4550,6 @@ int64_t vp9_rd_pick_inter_mode_sb(VP9_COMP *cpi, MACROBLOCK *x,
set_scale_factors(xd, mbmi->ref_frame, mbmi->second_ref_frame,
scale_factor);
- xd->second_pre = yv12_mb[second_ref];
mode_excluded =
mode_excluded ?
mode_excluded : cm->comp_pred_mode == SINGLE_PREDICTION_ONLY;
@@ -4551,7 +4567,9 @@ int64_t vp9_rd_pick_inter_mode_sb(VP9_COMP *cpi, MACROBLOCK *x,
}
}
- xd->pre = yv12_mb[ref_frame];
+ setup_pre_planes(xd, &yv12_mb[ref_frame],
+ comp_pred ? &yv12_mb[second_ref] : NULL, 0, 0, NULL, NULL);
+
vpx_memcpy(mdcounts, frame_mdcounts[ref_frame], sizeof(mdcounts));
// If the segment reference frame feature is enabled....
@@ -4624,7 +4642,7 @@ int64_t vp9_rd_pick_inter_mode_sb(VP9_COMP *cpi, MACROBLOCK *x,
}
#endif
this_rd = handle_inter_mode(cpi, x, bsize,
- &saddone, near_sadidx, mdcounts, txfm_cache,
+ mdcounts, txfm_cache,
&rate2, &distortion2, &skippable,
&compmode_cost,
#if CONFIG_COMP_INTERINTRA_PRED
diff --git a/vp9/encoder/vp9_segmentation.c b/vp9/encoder/vp9_segmentation.c
index aac42f738..6336969fb 100644
--- a/vp9/encoder/vp9_segmentation.c
+++ b/vp9/encoder/vp9_segmentation.c
@@ -15,50 +15,6 @@
#include "vp9/common/vp9_pred_common.h"
#include "vp9/common/vp9_tile_common.h"
-void vp9_update_gf_useage_maps(VP9_COMP *cpi, VP9_COMMON *cm, MACROBLOCK *x) {
- int mb_row, mb_col;
-
- MODE_INFO *this_mb_mode_info = cm->mi;
-
- x->gf_active_ptr = (signed char *)cpi->gf_active_flags;
-
- if ((cm->frame_type == KEY_FRAME) || (cpi->refresh_golden_frame)) {
- // Reset Gf useage monitors
- vpx_memset(cpi->gf_active_flags, 1, (cm->mb_rows * cm->mb_cols));
- cpi->gf_active_count = cm->mb_rows * cm->mb_cols;
- } else {
- // for each macroblock row in image
- for (mb_row = 0; mb_row < cm->mb_rows; mb_row++) {
- // for each macroblock col in image
- for (mb_col = 0; mb_col < cm->mb_cols; mb_col++) {
-
- // If using golden then set GF active flag if not already set.
- // If using last frame 0,0 mode then leave flag as it is
- // else if using non 0,0 motion or intra modes then clear
- // flag if it is currently set
- if ((this_mb_mode_info->mbmi.ref_frame == GOLDEN_FRAME) ||
- (this_mb_mode_info->mbmi.ref_frame == ALTREF_FRAME)) {
- if (*(x->gf_active_ptr) == 0) {
- *(x->gf_active_ptr) = 1;
- cpi->gf_active_count++;
- }
- } else if ((this_mb_mode_info->mbmi.mode != ZEROMV) &&
- *(x->gf_active_ptr)) {
- *(x->gf_active_ptr) = 0;
- cpi->gf_active_count--;
- }
-
- x->gf_active_ptr++; // Step onto next entry
- this_mb_mode_info++; // skip to next mb
-
- }
-
- // this is to account for the border
- this_mb_mode_info++;
- }
- }
-}
-
void vp9_enable_segmentation(VP9_PTR ptr) {
VP9_COMP *cpi = (VP9_COMP *)(ptr);
@@ -215,7 +171,6 @@ static void count_segs(VP9_COMP *cpi,
int bw, int bh, int mb_row, int mb_col) {
VP9_COMMON *const cm = &cpi->common;
MACROBLOCKD *const xd = &cpi->mb.e_mbd;
- const int segmap_index = mb_row * cm->mb_cols + mb_col;
const int segment_id = mi->mbmi.segment_id;
xd->mode_info_context = mi;
@@ -228,7 +183,8 @@ static void count_segs(VP9_COMP *cpi,
// Temporal prediction not allowed on key frames
if (cm->frame_type != KEY_FRAME) {
// Test to see if the segment id matches the predicted value.
- const int pred_seg_id = vp9_get_pred_mb_segid(cm, xd, segmap_index);
+ const int pred_seg_id = vp9_get_pred_mb_segid(cm, mi->mbmi.sb_type,
+ mb_row, mb_col);
const int seg_predicted = (segment_id == pred_seg_id);
// Get the segment id prediction context
diff --git a/vp9/encoder/vp9_segmentation.h b/vp9/encoder/vp9_segmentation.h
index 1c90c2f2d..2183771c4 100644
--- a/vp9/encoder/vp9_segmentation.h
+++ b/vp9/encoder/vp9_segmentation.h
@@ -15,8 +15,6 @@
#include "vp9/common/vp9_blockd.h"
#include "vp9/encoder/vp9_onyx_int.h"
-void vp9_update_gf_useage_maps(VP9_COMP *cpi, VP9_COMMON *cm, MACROBLOCK *x);
-
void vp9_enable_segmentation(VP9_PTR ptr);
void vp9_disable_segmentation(VP9_PTR ptr);
diff --git a/vp9/encoder/vp9_temporal_filter.c b/vp9/encoder/vp9_temporal_filter.c
index 6149518ca..be3875ad6 100644
--- a/vp9/encoder/vp9_temporal_filter.c
+++ b/vp9/encoder/vp9_temporal_filter.c
@@ -221,9 +221,9 @@ static void temporal_filter_iterate_c(VP9_COMP *cpi,
DECLARE_ALIGNED_ARRAY(16, uint8_t, predictor, 16 * 16 + 8 * 8 + 8 * 8);
// Save input state
- uint8_t *y_buffer = mbd->pre.y_buffer;
- uint8_t *u_buffer = mbd->pre.u_buffer;
- uint8_t *v_buffer = mbd->pre.v_buffer;
+ uint8_t *y_buffer = mbd->plane[0].pre[0].buf;
+ uint8_t *u_buffer = mbd->plane[1].pre[0].buf;
+ uint8_t *v_buffer = mbd->plane[2].pre[0].buf;
for (mb_row = 0; mb_row < mb_rows; mb_row++) {
#if ALT_REF_MC_ENABLED
@@ -368,9 +368,9 @@ static void temporal_filter_iterate_c(VP9_COMP *cpi,
}
// Restore input state
- mbd->pre.y_buffer = y_buffer;
- mbd->pre.u_buffer = u_buffer;
- mbd->pre.v_buffer = v_buffer;
+ mbd->plane[0].pre[0].buf = y_buffer;
+ mbd->plane[1].pre[0].buf = u_buffer;
+ mbd->plane[2].pre[0].buf = v_buffer;
}
void vp9_temporal_filter_prepare(VP9_COMP *cpi, int distance) {
diff --git a/vp9/encoder/vp9_tokenize.c b/vp9/encoder/vp9_tokenize.c
index b68ef5d23..f6a720651 100644
--- a/vp9/encoder/vp9_tokenize.c
+++ b/vp9/encoder/vp9_tokenize.c
@@ -36,21 +36,6 @@ extern vp9_coeff_stats tree_update_hist_16x16[BLOCK_TYPES];
extern vp9_coeff_stats tree_update_hist_32x32[BLOCK_TYPES];
#endif /* ENTROPY_STATS */
-#if CONFIG_CODE_NONZEROCOUNT
-#ifdef NZC_STATS
-unsigned int nzc_counts_4x4[MAX_NZC_CONTEXTS][REF_TYPES][BLOCK_TYPES]
- [NZC4X4_TOKENS];
-unsigned int nzc_counts_8x8[MAX_NZC_CONTEXTS][REF_TYPES][BLOCK_TYPES]
- [NZC8X8_TOKENS];
-unsigned int nzc_counts_16x16[MAX_NZC_CONTEXTS][REF_TYPES][BLOCK_TYPES]
- [NZC16X16_TOKENS];
-unsigned int nzc_counts_32x32[MAX_NZC_CONTEXTS][REF_TYPES][BLOCK_TYPES]
- [NZC32X32_TOKENS];
-unsigned int nzc_pcat_counts[MAX_NZC_CONTEXTS][NZC_TOKENS_EXTRA]
- [NZC_BITS_EXTRA][2];
-#endif
-#endif
-
static TOKENVALUE dct_value_tokens[DCT_MAX_VALUE * 2];
const TOKENVALUE *vp9_dct_value_tokens_ptr;
static int dct_value_cost[DCT_MAX_VALUE * 2];
@@ -59,7 +44,7 @@ const int *vp9_dct_value_cost_ptr;
static void fill_value_tokens() {
TOKENVALUE *const t = dct_value_tokens + DCT_MAX_VALUE;
- vp9_extra_bit_struct *const e = vp9_extra_bits;
+ vp9_extra_bit *const e = vp9_extra_bits;
int i = -DCT_MAX_VALUE;
int sign = 1;
@@ -77,25 +62,25 @@ static void fill_value_tokens() {
while (++j < 11 && e[j].base_val <= a) {}
- t[i].Token = --j;
+ t[i].token = --j;
eb |= (a - e[j].base_val) << 1;
} else
- t[i].Token = a;
+ t[i].token = a;
- t[i].Extra = eb;
+ t[i].extra = eb;
}
// initialize the cost for extra bits for all possible coefficient value.
{
int cost = 0;
- vp9_extra_bit_struct *p = vp9_extra_bits + t[i].Token;
+ vp9_extra_bit *p = vp9_extra_bits + t[i].token;
if (p->base_val) {
- const int extra = t[i].Extra;
- const int Length = p->Len;
+ const int extra = t[i].extra;
+ const int length = p->len;
- if (Length)
- cost += treed_cost(p->tree, p->prob, extra >> 1, Length);
+ if (length)
+ cost += treed_cost(p->tree, p->prob, extra >> 1, length);
cost += vp9_cost_bit(vp9_prob_half, extra & 1); /* sign */
dct_value_cost[i + DCT_MAX_VALUE] = cost;
@@ -121,7 +106,7 @@ static void tokenize_b(VP9_COMP *cpi,
int dry_run) {
MB_MODE_INFO *mbmi = &xd->mode_info_context->mbmi;
int pt; /* near block/prev token context index */
- int c = 0;
+ int c = 0, rc = 0;
TOKENEXTRA *t = *tp; /* store tokens starting here */
const struct plane_block_idx pb_idx = plane_block_idx(y_blocks, ib);
const int eob = xd->plane[pb_idx.plane].eobs[pb_idx.block];
@@ -132,15 +117,23 @@ static void tokenize_b(VP9_COMP *cpi,
const BLOCK_SIZE_TYPE sb_type = mbmi->sb_type;
const int *scan, *nb;
vp9_coeff_count *counts;
- vp9_coeff_probs *probs;
+ vp9_coeff_probs *coef_probs;
const int ref = mbmi->ref_frame != INTRA_FRAME;
ENTROPY_CONTEXT *a, *l, *a1, *l1, *a2, *l2, *a3, *l3, a_ec, l_ec;
uint8_t token_cache[1024];
-#if CONFIG_CODE_NONZEROCOUNT
- const int nzc_used = get_nzc_used(tx_size);
- int zerosleft = 0, nzc = 0;
- if (eob == 0)
- assert(xd->nzcs[ib] == 0);
+ TX_TYPE tx_type = DCT_DCT;
+#if CONFIG_CODE_ZEROGROUP
+ int last_nz_pos[3] = {-1, -1, -1}; // Encoder only
+ int is_eoo_list[3] = {0, 0, 0};
+ int is_last_zero[3] = {0, 0, 0};
+ int is_eoo_negative[3] = {0, 0, 0};
+ int o;
+ vp9_zpc_probs *zpc_probs;
+ vp9_zpc_count *zpc_count;
+ uint8_t token_cache_full[1024];
+#endif
+#if CONFIG_CODE_ZEROGROUP
+ vpx_memset(token_cache, UNKNOWN_TOKEN, sizeof(token_cache));
#endif
assert((!type && !pb_idx.plane) || (type && pb_idx.plane));
@@ -206,8 +199,8 @@ static void tokenize_b(VP9_COMP *cpi,
switch (tx_size) {
default:
case TX_4X4: {
- const TX_TYPE tx_type = (type == PLANE_TYPE_Y_WITH_DC) ?
- get_tx_type_4x4(xd, ib) : DCT_DCT;
+ tx_type = (type == PLANE_TYPE_Y_WITH_DC) ?
+ get_tx_type_4x4(xd, ib) : DCT_DCT;
a_ec = *a;
l_ec = *l;
seg_eob = 16;
@@ -220,14 +213,18 @@ static void tokenize_b(VP9_COMP *cpi,
}
}
counts = cpi->coef_counts_4x4;
- probs = cpi->common.fc.coef_probs_4x4;
+ coef_probs = cpi->common.fc.coef_probs_4x4;
+#if CONFIG_CODE_ZEROGROUP
+ zpc_count = &cpi->common.fc.zpc_counts_4x4;
+ zpc_probs = &cpi->common.fc.zpc_probs_4x4;
+#endif
break;
}
case TX_8X8: {
const int sz = 3 + mb_width_log2(sb_type);
const int x = ib & ((1 << sz) - 1), y = ib - x;
- const TX_TYPE tx_type = (type == PLANE_TYPE_Y_WITH_DC) ?
- get_tx_type_8x8(xd, y + (x >> 1)) : DCT_DCT;
+ tx_type = (type == PLANE_TYPE_Y_WITH_DC) ?
+ get_tx_type_8x8(xd, y + (x >> 1)) : DCT_DCT;
a_ec = (a[0] + a[1]) != 0;
l_ec = (l[0] + l[1]) != 0;
seg_eob = 64;
@@ -240,14 +237,18 @@ static void tokenize_b(VP9_COMP *cpi,
}
}
counts = cpi->coef_counts_8x8;
- probs = cpi->common.fc.coef_probs_8x8;
+ coef_probs = cpi->common.fc.coef_probs_8x8;
+#if CONFIG_CODE_ZEROGROUP
+ zpc_count = &cpi->common.fc.zpc_counts_8x8;
+ zpc_probs = &cpi->common.fc.zpc_probs_8x8;
+#endif
break;
}
case TX_16X16: {
const int sz = 4 + mb_width_log2(sb_type);
const int x = ib & ((1 << sz) - 1), y = ib - x;
- const TX_TYPE tx_type = (type == PLANE_TYPE_Y_WITH_DC) ?
- get_tx_type_16x16(xd, y + (x >> 2)) : DCT_DCT;
+ tx_type = (type == PLANE_TYPE_Y_WITH_DC) ?
+ get_tx_type_16x16(xd, y + (x >> 2)) : DCT_DCT;
if (type != PLANE_TYPE_UV) {
a_ec = (a[0] + a[1] + a[2] + a[3]) != 0;
l_ec = (l[0] + l[1] + l[2] + l[3]) != 0;
@@ -265,7 +266,11 @@ static void tokenize_b(VP9_COMP *cpi,
}
}
counts = cpi->coef_counts_16x16;
- probs = cpi->common.fc.coef_probs_16x16;
+ coef_probs = cpi->common.fc.coef_probs_16x16;
+#if CONFIG_CODE_ZEROGROUP
+ zpc_count = &cpi->common.fc.zpc_counts_16x16;
+ zpc_probs = &cpi->common.fc.zpc_probs_16x16;
+#endif
break;
}
case TX_32X32:
@@ -283,7 +288,11 @@ static void tokenize_b(VP9_COMP *cpi,
seg_eob = 1024;
scan = vp9_default_zig_zag1d_32x32;
counts = cpi->coef_counts_32x32;
- probs = cpi->common.fc.coef_probs_32x32;
+ coef_probs = cpi->common.fc.coef_probs_32x32;
+#if CONFIG_CODE_ZEROGROUP
+ zpc_count = &cpi->common.fc.zpc_counts_32x32;
+ zpc_probs = &cpi->common.fc.zpc_probs_32x32;
+#endif
break;
}
@@ -294,56 +303,129 @@ static void tokenize_b(VP9_COMP *cpi,
if (vp9_segfeature_active(xd, segment_id, SEG_LVL_SKIP))
seg_eob = 0;
+#if CONFIG_CODE_ZEROGROUP
+ vpx_memset(token_cache_full, ZERO_TOKEN, sizeof(token_cache_full));
+ for (c = 0; c < eob; ++c) {
+ rc = scan[c];
+ token_cache_full[rc] = vp9_dct_value_tokens_ptr[qcoeff_ptr[rc]].token;
+ o = vp9_get_orientation(rc, tx_size);
+ if (qcoeff_ptr[rc] != 0) {
+ last_nz_pos[o] = c;
+ }
+ }
+#endif
+ c = 0;
do {
const int band = get_coef_band(scan, tx_size, c);
int token;
int v = 0;
-#if CONFIG_CODE_NONZEROCOUNT
- if (nzc_used)
- zerosleft = seg_eob - xd->nzcs[ib] - c + nzc;
-#endif
+ rc = scan[c];
+ if (c)
+ pt = vp9_get_coef_context(scan, nb, pad, token_cache, c, default_eob);
if (c < eob) {
- const int rc = scan[c];
v = qcoeff_ptr[rc];
assert(-DCT_MAX_VALUE <= v && v < DCT_MAX_VALUE);
- t->Extra = vp9_dct_value_tokens_ptr[v].Extra;
- token = vp9_dct_value_tokens_ptr[v].Token;
+ t->extra = vp9_dct_value_tokens_ptr[v].extra;
+ token = vp9_dct_value_tokens_ptr[v].token;
} else {
-#if CONFIG_CODE_NONZEROCOUNT
- if (nzc_used)
- break;
- else
-#endif
- token = DCT_EOB_TOKEN;
+ token = DCT_EOB_TOKEN;
}
- t->Token = token;
- t->context_tree = probs[type][ref][band][pt];
-#if CONFIG_CODE_NONZEROCOUNT
- // Skip zero node if there are no zeros left
- if (nzc_used)
- t->skip_eob_node = 1 + (zerosleft == 0);
- else
+ t->token = token;
+ t->context_tree = coef_probs[type][ref][band][pt];
+ t->skip_eob_node = (c > 0) && (token_cache[scan[c - 1]] == 0);
+ assert(vp9_coef_encodings[t->token].len - t->skip_eob_node > 0);
+#if CONFIG_CODE_ZEROGROUP
+ o = vp9_get_orientation(rc, tx_size);
+ t->skip_coef_val = (token_cache[rc] == ZERO_TOKEN || is_eoo_list[o]);
+ if (t->skip_coef_val) {
+ assert(v == 0);
+ }
+ // No need to transmit any token
+ if (t->skip_eob_node && t->skip_coef_val) {
+ assert(token == ZERO_TOKEN);
+ is_last_zero[o] = 1;
+ token_cache[scan[c]] = ZERO_TOKEN;
+ continue;
+ }
#endif
- t->skip_eob_node = (c > 0) && (token_cache[c - 1] == 0);
- assert(vp9_coef_encodings[t->Token].len - t->skip_eob_node > 0);
if (!dry_run) {
++counts[type][ref][band][pt][token];
if (!t->skip_eob_node)
++cpi->common.fc.eob_branch_counts[tx_size][type][ref][band][pt];
}
-#if CONFIG_CODE_NONZEROCOUNT
- nzc += (v != 0);
+ token_cache[scan[c]] = token;
+#if CONFIG_CODE_ZEROGROUP
+ if (token == ZERO_TOKEN && !t->skip_coef_val) {
+ int eoo = 0, use_eoo;
+#if USE_ZPC_EOORIENT == 1
+ use_eoo = vp9_use_eoo(c, seg_eob, scan, tx_size,
+ is_last_zero, is_eoo_list);
+#else
+ use_eoo = 0;
+#endif
+ if (use_eoo) {
+ eoo = vp9_is_eoo(c, eob, scan, tx_size, qcoeff_ptr, last_nz_pos);
+ if (eoo && is_eoo_negative[o]) eoo = 0;
+ if (eoo) {
+ int c_;
+ int savings = 0;
+ int zsaved = 0;
+ savings =
+ vp9_cost_bit((*zpc_probs)[ref]
+ [coef_to_zpc_band(band)]
+ [coef_to_zpc_ptok(pt)][0], 1) -
+ vp9_cost_bit((*zpc_probs)[ref]
+ [coef_to_zpc_band(band)]
+ [coef_to_zpc_ptok(pt)][0], 0);
+ for (c_ = c + 1; c_ < eob; ++c_) {
+ if (o == vp9_get_orientation(scan[c_], tx_size)) {
+ int pt_ = vp9_get_coef_context(scan, nb, pad, token_cache_full,
+ c_, default_eob);
+ int band_ = get_coef_band(scan, tx_size, c_);
+ assert(token_cache_full[scan[c_]] == ZERO_TOKEN);
+ if (!c_ || token_cache_full[scan[c_ - 1]])
+ savings +=
+ vp9_cost_bit(coef_probs[type][ref][band_][pt_][0], 1);
+ savings += vp9_cost_bit(coef_probs[type][ref][band_][pt_][1], 0);
+ zsaved++;
+ }
+ }
+ /*
+ if (!dry_run)
+ if (savings > 0)
+ printf("savings %d zsaved %d (%d, %d)\n",
+ savings, zsaved, tx_size, band);
+ */
+ if (savings < 0) {
+ eoo = 0;
+ is_eoo_negative[o] = 1;
+ }
+ }
+ }
+ if (use_eoo) {
+ t++;
+ t->skip_eob_node = t->skip_coef_val = 0;
+ // transmit the eoo symbol
+ t->token = !eoo ? ZPC_ISOLATED : ZPC_EOORIENT;
+ t->context_tree = &((*zpc_probs)[ref]
+ [coef_to_zpc_band(band)]
+ [coef_to_zpc_ptok(pt)][0]);
+ if (!dry_run)
+ (*zpc_count)[ref]
+ [coef_to_zpc_band(band)]
+ [coef_to_zpc_ptok(pt)][0][!eoo]++;
+ if (eoo) {
+ assert(is_eoo_list[o] == 0);
+ is_eoo_list[o] = 1;
+ }
+ }
+ }
+ is_last_zero[o] = (token == ZERO_TOKEN);
#endif
- token_cache[c] = token;
-
- pt = vp9_get_coef_context(scan, nb, pad, token_cache, c + 1, default_eob);
++t;
} while (c < eob && ++c < seg_eob);
-#if CONFIG_CODE_NONZEROCOUNT
- assert(nzc == xd->nzcs[ib]);
-#endif
*tp = t;
a_ec = l_ec = (c > 0); /* 0 <-> all coeff data is zero */
@@ -736,9 +818,6 @@ static void stuff_b(VP9_COMP *cpi,
TOKENEXTRA *t = *tp;
const int ref = mbmi->ref_frame != INTRA_FRAME;
ENTROPY_CONTEXT *a, *l, *a1, *l1, *a2, *l2, *a3, *l3, a_ec, l_ec;
-#if CONFIG_CODE_NONZEROCOUNT
- const int nzc_used = get_nzc_used(tx_size);
-#endif
if (sb_type == BLOCK_SIZE_SB64X64) {
a = (ENTROPY_CONTEXT *)xd->above_context +
@@ -804,23 +883,20 @@ static void stuff_b(VP9_COMP *cpi,
break;
}
-#if CONFIG_CODE_NONZEROCOUNT
- if (!nzc_used) {
+ pt = combine_entropy_contexts(a_ec, l_ec);
+ band = 0;
+ t->token = DCT_EOB_TOKEN;
+ t->context_tree = probs[type][ref][band][pt];
+ t->skip_eob_node = 0;
+#if CONFIG_CODE_ZEROGROUP
+ t->skip_coef_val = 0;
#endif
- pt = combine_entropy_contexts(a_ec, l_ec);
- band = 0;
- t->Token = DCT_EOB_TOKEN;
- t->context_tree = probs[type][ref][band][pt];
- t->skip_eob_node = 0;
- ++t;
- *tp = t;
- if (!dry_run) {
- ++counts[type][ref][band][pt][DCT_EOB_TOKEN];
- }
-#if CONFIG_CODE_NONZEROCOUNT
+ ++t;
+ *tp = t;
+ if (!dry_run) {
+ ++counts[type][ref][band][pt][DCT_EOB_TOKEN];
}
-#endif
- *a = *l = 0;
+ *a = *l = 0;
if (tx_size == TX_8X8) {
a[1] = 0;
l[1] = 0;
diff --git a/vp9/encoder/vp9_tokenize.h b/vp9/encoder/vp9_tokenize.h
index 2dcbd3002..da1c817a2 100644
--- a/vp9/encoder/vp9_tokenize.h
+++ b/vp9/encoder/vp9_tokenize.h
@@ -17,15 +17,18 @@
void vp9_tokenize_initialize();
typedef struct {
- int16_t Token;
- int16_t Extra;
+ int16_t token;
+ int16_t extra;
} TOKENVALUE;
typedef struct {
const vp9_prob *context_tree;
- int16_t Extra;
- uint8_t Token;
+ int16_t extra;
+ uint8_t token;
uint8_t skip_eob_node;
+#if CONFIG_CODE_ZEROGROUP
+ uint8_t skip_coef_val;
+#endif
} TOKENEXTRA;
typedef int64_t vp9_coeff_accum[REF_TYPES][COEF_BANDS][PREV_COEF_CONTEXTS]