summaryrefslogtreecommitdiff
path: root/vp9
diff options
context:
space:
mode:
Diffstat (limited to 'vp9')
-rw-r--r--vp9/common/vp9_alloccommon.c4
-rw-r--r--vp9/common/vp9_blockd.h42
-rw-r--r--vp9/common/vp9_common.h5
-rw-r--r--vp9/common/vp9_debugmodes.c147
-rw-r--r--vp9/common/vp9_default_coef_probs.h687
-rw-r--r--vp9/common/vp9_entropy.c53
-rw-r--r--vp9/common/vp9_entropy.h6
-rw-r--r--vp9/common/vp9_entropymode.c172
-rw-r--r--vp9/common/vp9_entropymode.h28
-rw-r--r--vp9/common/vp9_findnearmv.c10
-rw-r--r--vp9/common/vp9_findnearmv.h16
-rw-r--r--vp9/common/vp9_header.h39
-rw-r--r--vp9/common/vp9_idct.c42
-rw-r--r--vp9/common/vp9_idct.h1
-rw-r--r--vp9/common/vp9_loopfilter.c2
-rw-r--r--vp9/common/vp9_modecont.c16
-rw-r--r--vp9/common/vp9_modecont.h4
-rw-r--r--vp9/common/vp9_modecontext.c6
-rw-r--r--vp9/common/vp9_mvref_common.c3
-rw-r--r--vp9/common/vp9_onyxc_int.h46
-rw-r--r--vp9/common/vp9_postproc.c2
-rw-r--r--vp9/common/vp9_reconinter.c8
-rw-r--r--vp9/common/vp9_reconintra.c2
-rw-r--r--vp9/common/vp9_rtcd_defs.sh5
-rw-r--r--vp9/common/vp9_seg_common.c79
-rw-r--r--vp9/common/vp9_seg_common.h5
-rw-r--r--vp9/decoder/vp9_decodemv.c411
-rw-r--r--vp9/decoder/vp9_decodframe.c233
-rw-r--r--vp9/decoder/vp9_detokenize.c28
-rw-r--r--vp9/decoder/vp9_onyxd_if.c3
-rw-r--r--vp9/decoder/vp9_read_bit_buffer.h50
-rw-r--r--vp9/encoder/vp9_bitstream.c475
-rw-r--r--vp9/encoder/vp9_block.h9
-rw-r--r--vp9/encoder/vp9_boolhuff.c1
-rw-r--r--vp9/encoder/vp9_dct.c30
-rw-r--r--vp9/encoder/vp9_encodeframe.c424
-rw-r--r--vp9/encoder/vp9_encodeintra.c66
-rw-r--r--vp9/encoder/vp9_encodeintra.h2
-rw-r--r--vp9/encoder/vp9_encodemb.c53
-rw-r--r--vp9/encoder/vp9_firstpass.c2
-rw-r--r--vp9/encoder/vp9_modecosts.c24
-rw-r--r--vp9/encoder/vp9_onyx_if.c174
-rw-r--r--vp9/encoder/vp9_onyx_int.h39
-rw-r--r--vp9/encoder/vp9_ratectrl.c8
-rw-r--r--vp9/encoder/vp9_rdopt.c922
-rw-r--r--vp9/encoder/vp9_tokenize.c15
-rw-r--r--vp9/encoder/vp9_variance_c.c5
-rw-r--r--vp9/encoder/vp9_write_bit_buffer.h42
-rw-r--r--vp9/vp9_common.mk2
-rw-r--r--vp9/vp9_cx_iface.c114
-rw-r--r--vp9/vp9_dx_iface.c20
-rw-r--r--vp9/vp9cx.mk1
-rw-r--r--vp9/vp9dx.mk1
53 files changed, 2592 insertions, 1992 deletions
diff --git a/vp9/common/vp9_alloccommon.c b/vp9/common/vp9_alloccommon.c
index 73f26f6b8..ef14e1ab6 100644
--- a/vp9/common/vp9_alloccommon.c
+++ b/vp9/common/vp9_alloccommon.c
@@ -202,8 +202,6 @@ void vp9_create_common(VP9_COMMON *oci) {
vp9_init_mbmode_probs(oci);
- vp9_default_bmode_probs(oci->fc.bmode_prob);
-
oci->txfm_mode = ONLY_4X4;
oci->comp_pred_mode = HYBRID_PREDICTION;
oci->no_lpf = 0;
@@ -212,8 +210,6 @@ void vp9_create_common(VP9_COMMON *oci) {
// Initialize reference frame sign bias structure to defaults
vpx_memset(oci->ref_frame_sign_bias, 0, sizeof(oci->ref_frame_sign_bias));
-
- oci->kf_ymode_probs_update = 0;
}
void vp9_remove_common(VP9_COMMON *oci) {
diff --git a/vp9/common/vp9_blockd.h b/vp9/common/vp9_blockd.h
index b622aa3ec..ea330acc4 100644
--- a/vp9/common/vp9_blockd.h
+++ b/vp9/common/vp9_blockd.h
@@ -37,7 +37,6 @@
/* Segment Feature Masks */
#define SEGMENT_DELTADATA 0
#define SEGMENT_ABSDATA 1
-#define MAX_MV_REFS 9
#define MAX_MV_REF_CANDIDATES 2
typedef enum {
@@ -78,21 +77,17 @@ typedef enum {
D27_PRED, // Directional 27 deg = round(arctan(1/2) * 180/pi)
D63_PRED, // Directional 63 deg = round(arctan(2/1) * 180/pi)
TM_PRED, // True-motion
- I4X4_PRED, // Each 4x4 subblock has its own mode
NEARESTMV,
NEARMV,
ZEROMV,
NEWMV,
- SPLITMV,
MB_MODE_COUNT
} MB_PREDICTION_MODE;
static INLINE int is_inter_mode(MB_PREDICTION_MODE mode) {
- return mode >= NEARESTMV && mode <= SPLITMV;
+ return mode >= NEARESTMV && mode <= NEWMV;
}
-#define INTRA_MODE_COUNT (TM_PRED + 1)
-
// Segment level features.
typedef enum {
SEG_LVL_ALT_Q = 0, // Use alternate Quantizer ....
@@ -107,8 +102,7 @@ typedef enum {
TX_4X4 = 0, // 4x4 dct transform
TX_8X8 = 1, // 8x8 dct transform
TX_16X16 = 2, // 16x16 dct transform
- TX_SIZE_MAX_MB = 3, // Number of different transforms available
- TX_32X32 = TX_SIZE_MAX_MB, // 32x32 dct transform
+ TX_32X32 = 3, // 32x32 dct transform
TX_SIZE_MAX_SB, // Number of transforms available to SBs
} TX_SIZE;
@@ -119,16 +113,12 @@ typedef enum {
ADST_ADST = 3 // ADST in both directions
} TX_TYPE;
-#define VP9_YMODES (I4X4_PRED + 1)
-#define VP9_UV_MODES (TM_PRED + 1)
-#define VP9_I32X32_MODES (TM_PRED + 1)
+#define VP9_INTRA_MODES (TM_PRED + 1)
-#define VP9_MVREFS (1 + SPLITMV - NEARESTMV)
+#define VP9_MVREFS (1 + NEWMV - NEARESTMV)
#define WHT_UPSCALE_FACTOR 2
-#define VP9_BINTRAMODES INTRA_MODE_COUNT
-
/* For keyframes, intra block modes are predicted by the (already decoded)
modes for the Y blocks to the left and above us; for interframes, there
is a single probability table. */
@@ -316,10 +306,6 @@ typedef struct macroblockd {
/* 0 (do not update) 1 (update) the macroblock segmentation map. */
unsigned char update_mb_segmentation_map;
-#if CONFIG_IMPLICIT_SEGMENTATION
- unsigned char allow_implicit_segment_update;
-#endif
-
/* 0 (do not update) 1 (update) the macroblock segmentation feature data. */
unsigned char update_mb_segmentation_data;
@@ -538,14 +524,16 @@ static TX_TYPE txfm_map(MB_PREDICTION_MODE bmode) {
}
static TX_TYPE get_tx_type_4x4(const MACROBLOCKD *xd, int ib) {
- TX_TYPE tx_type = DCT_DCT;
- if (xd->lossless)
+ TX_TYPE tx_type;
+ MODE_INFO *mi = xd->mode_info_context;
+ MB_MODE_INFO *const mbmi = &mi->mbmi;
+ if (xd->lossless || mbmi->ref_frame != INTRA_FRAME)
return DCT_DCT;
- if (xd->mode_info_context->mbmi.mode == I4X4_PRED) {
- tx_type = txfm_map(
- xd->mode_info_context->bmi[ib].as_mode.first);
- } else if (xd->mode_info_context->mbmi.mode <= TM_PRED) {
- tx_type = txfm_map(xd->mode_info_context->mbmi.mode);
+ if (mbmi->sb_type < BLOCK_SIZE_SB8X8) {
+ tx_type = txfm_map(mi->bmi[ib].as_mode.first);
+ } else {
+ assert(mbmi->mode <= TM_PRED);
+ tx_type = txfm_map(mbmi->mode);
}
return tx_type;
}
@@ -689,7 +677,6 @@ static INLINE void foreach_predicted_block_in_plane(
const MACROBLOCKD* const xd, BLOCK_SIZE_TYPE bsize, int plane,
foreach_predicted_block_visitor visit, void *arg) {
int i, x, y;
- const MB_PREDICTION_MODE mode = xd->mode_info_context->mbmi.mode;
// block sizes in number of 4x4 blocks log 2 ("*_b")
// 4x4=0, 8x8=2, 16x16=4, 32x32=6, 64x64=8
@@ -700,7 +687,8 @@ static INLINE void foreach_predicted_block_in_plane(
// size of the predictor to use.
int pred_w, pred_h;
- if (mode == SPLITMV) {
+ if (xd->mode_info_context->mbmi.sb_type < BLOCK_SIZE_SB8X8) {
+ assert(bsize == BLOCK_SIZE_SB8X8);
pred_w = 0;
pred_h = 0;
} else {
diff --git a/vp9/common/vp9_common.h b/vp9/common/vp9_common.h
index b6252d93e..5c97f9863 100644
--- a/vp9/common/vp9_common.h
+++ b/vp9/common/vp9_common.h
@@ -60,4 +60,9 @@ static INLINE int multiple16(int value) {
return (value + 15) & ~15;
}
+#define SYNC_CODE_0 0x49
+#define SYNC_CODE_1 0x83
+#define SYNC_CODE_2 0x42
+
+
#endif // VP9_COMMON_VP9_COMMON_H_
diff --git a/vp9/common/vp9_debugmodes.c b/vp9/common/vp9_debugmodes.c
deleted file mode 100644
index 827e4bf84..000000000
--- a/vp9/common/vp9_debugmodes.c
+++ /dev/null
@@ -1,147 +0,0 @@
-/*
- * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
- *
- * Use of this source code is governed by a BSD-style license
- * that can be found in the LICENSE file in the root of the source
- * tree. An additional intellectual property rights grant can be found
- * in the file PATENTS. All contributing project authors may
- * be found in the AUTHORS file in the root of the source tree.
- */
-
-#include <stdio.h>
-#include "vp9/common/vp9_onyxc_int.h"
-#include "vp9/common/vp9_blockd.h"
-#include "vp9/common/vp9_tile_common.h"
-typedef struct {
- char *debug_array;
- int w;
- int h;
-} DEBUG_MODE_STRUCT;
-
-static void draw_rect(int r, int c, int w, int h, DEBUG_MODE_STRUCT *da) {
- int i;
- da->debug_array[r / 2 * da->w + c] = '+';
- for (i = r / 2 + 1; i < r / 2 + h / 2; i++) {
- da->debug_array[i * da->w + c] = '|';
- }
- for (i = c + 1; i < c + w; i++) {
- da->debug_array[r / 2 * da->w + i] = '-';
- }
-}
-static void debug_partitioning(VP9_COMMON * cm, MODE_INFO *m, int mi_row,
- int mi_col, BLOCK_SIZE_TYPE bsize,
- DEBUG_MODE_STRUCT *da) {
- const int mis = cm->mode_info_stride;
- int bwl, bhl;
- int bw, bh;
- int bsl = mi_width_log2(bsize), bs = (1 << bsl) / 2;
- int n;
- PARTITION_TYPE partition;
- BLOCK_SIZE_TYPE subsize;
-
- if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols)
- return;
-
- bwl = mi_width_log2(m->mbmi.sb_type);
- bhl = mi_height_log2(m->mbmi.sb_type);
- bw = 1 << bwl;
- bh = 1 << bhl;
-
- // parse the partition type
- if ((bwl == bsl) && (bhl == bsl))
- partition = PARTITION_NONE;
- else if ((bwl == bsl) && (bhl < bsl))
- partition = PARTITION_HORZ;
- else if ((bwl < bsl) && (bhl == bsl))
- partition = PARTITION_VERT;
- else if ((bwl < bsl) && (bhl < bsl))
- partition = PARTITION_SPLIT;
- else
- assert(0);
-
-#if CONFIG_AB4X4
- if (bsize == BLOCK_SIZE_SB8X8 && m->mbmi.sb_type < BLOCK_SIZE_SB8X8)
- partition = PARTITION_SPLIT;
- if (bsize < BLOCK_SIZE_SB8X8)
- return;
-#endif
-
-#if CONFIG_AB4X4
- if (bsize >= BLOCK_SIZE_SB8X8) {
-#else
- if (bsize > BLOCK_SIZE_SB8X8) {
-#endif
- }
-
- subsize = get_subsize(bsize, partition);
- switch (partition) {
- case PARTITION_NONE:
- draw_rect(mi_row * 8, mi_col * 8, bw * 8, bh * 8, da);
- break;
- case PARTITION_HORZ:
- draw_rect(mi_row * 8, mi_col * 8, bw * 8, bh * 8, da);
- if ((mi_row + bh) < cm->mi_rows)
- draw_rect(8 * bs + mi_row * 8, mi_col * 8, bw * 8, bh * 8, da);
- break;
- case PARTITION_VERT:
- draw_rect(mi_row * 8, mi_col * 8, bw * 8, bh * 8, da);
- if ((mi_col + bw) < cm->mi_cols)
- draw_rect(mi_row * 8, 8 * bs + mi_col * 8, bw * 8, bh * 8, da);
- break;
- case PARTITION_SPLIT:
- for (n = 0; n < 4; n++) {
- int j = n >> 1, i = n & 0x01;
- debug_partitioning(cm, m + j * bs * mis + i * bs, mi_row + j * bs,
- mi_col + i * bs, subsize, da);
- }
- break;
- default:
- assert(0);
- }
-}
-static void debug_partitionings(VP9_COMMON *c, DEBUG_MODE_STRUCT *da) {
- const int mis = c->mode_info_stride;
- MODE_INFO *m, *m_ptr = c->mi;
- int mi_row, mi_col;
-
- m_ptr += c->cur_tile_mi_col_start + c->cur_tile_mi_row_start * mis;
-
- for (mi_row = c->cur_tile_mi_row_start; mi_row < c->cur_tile_mi_row_end;
- mi_row += 8, m_ptr += 8 * mis) {
- m = m_ptr;
- for (mi_col = c->cur_tile_mi_col_start; mi_col < c->cur_tile_mi_col_end;
- mi_col += 8, m += 8) {
- debug_partitioning(c, m, mi_row, mi_col, BLOCK_SIZE_SB64X64, da);
- }
- }
-}
-void vp9_debug_tile_partitionings(VP9_COMMON *pc) {
- int tile_row, tile_col;
- DEBUG_MODE_STRUCT da;
-
- da.w = pc->width;
- da.h = pc->height / 2;
- da.debug_array = vpx_malloc(da.h * da.w);
- vpx_memset(da.debug_array, ' ', da.h * da.w);
- for (tile_row = 0; tile_row < pc->tile_rows; tile_row++) {
- vp9_get_tile_row_offsets(pc, tile_row);
- for (tile_col = 0; tile_col < pc->tile_columns; tile_col++) {
- vp9_get_tile_col_offsets(pc, tile_col);
-
- debug_partitionings(pc, &da);
- }
- }
- {
- FILE *f = fopen("partitionings.txt", "a");
- int i, j;
- fprintf(f, "\n\n\nFrame: %d \n", pc->current_video_frame);
- for (i = 0; i < da.h; i++) {
- for (j = 0; j < da.w; j++) {
- fprintf(f, "%c", da.debug_array[i * da.w + j]);
- }
- fprintf(f, "\n");
- }
- fclose(f);
- }
- vpx_free(da.debug_array);
-}
diff --git a/vp9/common/vp9_default_coef_probs.h b/vp9/common/vp9_default_coef_probs.h
index 3e8211954..15f9e97dd 100644
--- a/vp9/common/vp9_default_coef_probs.h
+++ b/vp9/common/vp9_default_coef_probs.h
@@ -11,6 +11,692 @@
/*Generated file, included by vp9_entropy.c*/
+#if CONFIG_BALANCED_COEFTREE
+static const vp9_coeff_probs_model default_coef_probs_4x4[BLOCK_TYPES] = {
+ { /* block Type 0 */
+ { /* Intra */
+ { /* Coeff Band 0 */
+ { 6, 213, 178 },
+ { 26, 113, 132 },
+ { 34, 17, 68 }
+ }, { /* Coeff Band 1 */
+ { 66, 96, 178 },
+ { 63, 96, 174 },
+ { 67, 54, 154 },
+ { 62, 28, 126 },
+ { 48, 9, 84 },
+ { 20, 1, 32 }
+ }, { /* Coeff Band 2 */
+ { 64, 144, 206 },
+ { 70, 99, 191 },
+ { 69, 36, 152 },
+ { 55, 9, 106 },
+ { 35, 1, 60 },
+ { 14, 1, 22 }
+ }, { /* Coeff Band 3 */
+ { 82, 154, 222 },
+ { 83, 112, 205 },
+ { 81, 31, 164 },
+ { 62, 7, 118 },
+ { 42, 1, 74 },
+ { 18, 1, 30 }
+ }, { /* Coeff Band 4 */
+ { 52, 179, 233 },
+ { 64, 132, 214 },
+ { 73, 36, 170 },
+ { 59, 8, 116 },
+ { 38, 1, 65 },
+ { 15, 1, 26 }
+ }, { /* Coeff Band 5 */
+ { 29, 175, 238 },
+ { 26, 169, 223 },
+ { 41, 80, 182 },
+ { 39, 32, 127 },
+ { 26, 10, 69 },
+ { 11, 2, 28 }
+ }
+ }, { /* Inter */
+ { /* Coeff Band 0 */
+ { 21, 226, 234 },
+ { 52, 182, 212 },
+ { 80, 112, 177 }
+ }, { /* Coeff Band 1 */
+ { 111, 164, 243 },
+ { 88, 152, 231 },
+ { 90, 43, 186 },
+ { 70, 12, 132 },
+ { 44, 2, 76 },
+ { 19, 1, 33 }
+ }, { /* Coeff Band 2 */
+ { 96, 185, 246 },
+ { 99, 127, 231 },
+ { 88, 21, 177 },
+ { 64, 5, 122 },
+ { 38, 1, 69 },
+ { 18, 1, 30 }
+ }, { /* Coeff Band 3 */
+ { 84, 206, 249 },
+ { 94, 147, 237 },
+ { 95, 33, 187 },
+ { 71, 8, 131 },
+ { 47, 1, 83 },
+ { 26, 1, 44 }
+ }, { /* Coeff Band 4 */
+ { 38, 221, 252 },
+ { 58, 177, 241 },
+ { 78, 46, 188 },
+ { 59, 9, 122 },
+ { 34, 1, 66 },
+ { 18, 1, 34 }
+ }, { /* Coeff Band 5 */
+ { 21, 216, 253 },
+ { 21, 206, 244 },
+ { 42, 93, 200 },
+ { 43, 41, 146 },
+ { 36, 13, 93 },
+ { 31, 1, 55 }
+ }
+ }
+ }, { /* block Type 1 */
+ { /* Intra */
+ { /* Coeff Band 0 */
+ { 7, 213, 219 },
+ { 23, 139, 182 },
+ { 38, 60, 125 }
+ }, { /* Coeff Band 1 */
+ { 69, 156, 220 },
+ { 52, 178, 213 },
+ { 69, 111, 190 },
+ { 69, 58, 155 },
+ { 58, 21, 104 },
+ { 39, 7, 60 }
+ }, { /* Coeff Band 2 */
+ { 68, 189, 228 },
+ { 70, 158, 221 },
+ { 83, 64, 189 },
+ { 73, 18, 141 },
+ { 48, 4, 88 },
+ { 23, 1, 41 }
+ }, { /* Coeff Band 3 */
+ { 99, 194, 236 },
+ { 91, 138, 224 },
+ { 91, 53, 189 },
+ { 74, 20, 142 },
+ { 48, 6, 90 },
+ { 22, 1, 41 }
+ }, { /* Coeff Band 4 */
+ { 52, 203, 244 },
+ { 60, 168, 231 },
+ { 75, 62, 189 },
+ { 61, 18, 132 },
+ { 38, 4, 72 },
+ { 17, 1, 39 }
+ }, { /* Coeff Band 5 */
+ { 33, 192, 247 },
+ { 31, 185, 234 },
+ { 46, 85, 185 },
+ { 39, 35, 132 },
+ { 28, 15, 80 },
+ { 13, 5, 38 }
+ }
+ }, { /* Inter */
+ { /* Coeff Band 0 */
+ { 5, 247, 246 },
+ { 28, 209, 228 },
+ { 65, 137, 203 }
+ }, { /* Coeff Band 1 */
+ { 69, 208, 250 },
+ { 54, 207, 242 },
+ { 81, 92, 204 },
+ { 70, 54, 153 },
+ { 58, 40, 108 },
+ { 58, 35, 71 }
+ }, { /* Coeff Band 2 */
+ { 65, 215, 250 },
+ { 72, 185, 239 },
+ { 92, 50, 197 },
+ { 75, 14, 147 },
+ { 49, 2, 99 },
+ { 26, 1, 53 }
+ }, { /* Coeff Band 3 */
+ { 70, 220, 251 },
+ { 76, 186, 241 },
+ { 90, 65, 198 },
+ { 75, 26, 151 },
+ { 58, 12, 112 },
+ { 34, 6, 49 }
+ }, { /* Coeff Band 4 */
+ { 34, 224, 253 },
+ { 44, 204, 245 },
+ { 69, 85, 204 },
+ { 64, 31, 150 },
+ { 44, 2, 78 },
+ { 1, 1, 128 }
+ }, { /* Coeff Band 5 */
+ { 25, 216, 253 },
+ { 21, 215, 248 },
+ { 47, 108, 214 },
+ { 47, 48, 160 },
+ { 26, 20, 90 },
+ { 64, 171, 128 }
+ }
+ }
+ }
+};
+static const vp9_coeff_probs_model default_coef_probs_8x8[BLOCK_TYPES] = {
+ { /* block Type 0 */
+ { /* Intra */
+ { /* Coeff Band 0 */
+ { 9, 203, 199 },
+ { 26, 92, 128 },
+ { 28, 11, 55 }
+ }, { /* Coeff Band 1 */
+ { 99, 54, 160 },
+ { 78, 99, 155 },
+ { 80, 44, 138 },
+ { 71, 17, 115 },
+ { 51, 5, 80 },
+ { 27, 1, 40 }
+ }, { /* Coeff Band 2 */
+ { 135, 81, 190 },
+ { 113, 61, 182 },
+ { 93, 16, 153 },
+ { 70, 4, 115 },
+ { 41, 1, 68 },
+ { 16, 1, 27 }
+ }, { /* Coeff Band 3 */
+ { 155, 103, 214 },
+ { 129, 48, 199 },
+ { 95, 10, 159 },
+ { 63, 1, 110 },
+ { 32, 1, 58 },
+ { 12, 1, 21 }
+ }, { /* Coeff Band 4 */
+ { 163, 149, 231 },
+ { 137, 69, 213 },
+ { 95, 11, 164 },
+ { 62, 3, 108 },
+ { 32, 1, 57 },
+ { 13, 1, 22 }
+ }, { /* Coeff Band 5 */
+ { 136, 189, 239 },
+ { 123, 102, 223 },
+ { 97, 19, 170 },
+ { 66, 4, 111 },
+ { 38, 1, 60 },
+ { 18, 1, 26 }
+ }
+ }, { /* Inter */
+ { /* Coeff Band 0 */
+ { 24, 226, 244 },
+ { 54, 178, 211 },
+ { 80, 74, 152 }
+ }, { /* Coeff Band 1 */
+ { 145, 153, 236 },
+ { 101, 163, 223 },
+ { 108, 50, 187 },
+ { 90, 22, 145 },
+ { 66, 8, 97 },
+ { 42, 4, 50 }
+ }, { /* Coeff Band 2 */
+ { 150, 159, 238 },
+ { 128, 90, 218 },
+ { 94, 9, 163 },
+ { 64, 3, 110 },
+ { 34, 1, 61 },
+ { 13, 1, 24 }
+ }, { /* Coeff Band 3 */
+ { 151, 162, 242 },
+ { 135, 80, 222 },
+ { 93, 9, 166 },
+ { 61, 3, 111 },
+ { 31, 1, 59 },
+ { 12, 1, 22 }
+ }, { /* Coeff Band 4 */
+ { 161, 170, 245 },
+ { 140, 84, 228 },
+ { 99, 8, 174 },
+ { 64, 1, 116 },
+ { 34, 1, 63 },
+ { 14, 1, 26 }
+ }, { /* Coeff Band 5 */
+ { 138, 197, 246 },
+ { 127, 109, 233 },
+ { 100, 16, 179 },
+ { 66, 3, 119 },
+ { 37, 1, 66 },
+ { 16, 1, 30 }
+ }
+ }
+ }, { /* block Type 1 */
+ { /* Intra */
+ { /* Coeff Band 0 */
+ { 6, 216, 212 },
+ { 25, 134, 171 },
+ { 43, 48, 118 }
+ }, { /* Coeff Band 1 */
+ { 93, 112, 209 },
+ { 66, 159, 206 },
+ { 82, 78, 184 },
+ { 75, 28, 148 },
+ { 46, 4, 82 },
+ { 18, 1, 28 }
+ }, { /* Coeff Band 2 */
+ { 108, 148, 220 },
+ { 90, 130, 216 },
+ { 92, 40, 186 },
+ { 73, 10, 135 },
+ { 46, 1, 79 },
+ { 20, 1, 35 }
+ }, { /* Coeff Band 3 */
+ { 125, 173, 232 },
+ { 109, 117, 223 },
+ { 97, 31, 183 },
+ { 71, 7, 127 },
+ { 44, 1, 76 },
+ { 21, 1, 36 }
+ }, { /* Coeff Band 4 */
+ { 133, 195, 236 },
+ { 112, 121, 224 },
+ { 97, 23, 178 },
+ { 69, 3, 122 },
+ { 42, 1, 72 },
+ { 19, 1, 34 }
+ }, { /* Coeff Band 5 */
+ { 132, 180, 238 },
+ { 119, 102, 225 },
+ { 101, 18, 179 },
+ { 71, 3, 124 },
+ { 42, 1, 70 },
+ { 17, 1, 28 }
+ }
+ }, { /* Inter */
+ { /* Coeff Band 0 */
+ { 5, 242, 250 },
+ { 26, 198, 226 },
+ { 58, 98, 168 }
+ }, { /* Coeff Band 1 */
+ { 82, 201, 246 },
+ { 50, 219, 237 },
+ { 94, 107, 205 },
+ { 89, 61, 167 },
+ { 77, 31, 131 },
+ { 57, 14, 91 }
+ }, { /* Coeff Band 2 */
+ { 99, 202, 247 },
+ { 96, 165, 234 },
+ { 100, 31, 190 },
+ { 72, 8, 131 },
+ { 41, 1, 72 },
+ { 14, 1, 24 }
+ }, { /* Coeff Band 3 */
+ { 108, 204, 248 },
+ { 107, 156, 235 },
+ { 103, 27, 186 },
+ { 71, 4, 124 },
+ { 39, 1, 66 },
+ { 14, 1, 19 }
+ }, { /* Coeff Band 4 */
+ { 120, 211, 248 },
+ { 118, 149, 234 },
+ { 107, 19, 182 },
+ { 72, 3, 126 },
+ { 40, 1, 69 },
+ { 16, 1, 24 }
+ }, { /* Coeff Band 5 */
+ { 127, 199, 245 },
+ { 122, 125, 232 },
+ { 112, 20, 186 },
+ { 82, 3, 136 },
+ { 55, 1, 88 },
+ { 10, 1, 38 }
+ }
+ }
+ }
+};
+static const vp9_coeff_probs_model default_coef_probs_16x16[BLOCK_TYPES] = {
+ { /* block Type 0 */
+ { /* Intra */
+ { /* Coeff Band 0 */
+ { 25, 9, 101 },
+ { 25, 2, 67 },
+ { 15, 1, 28 }
+ }, { /* Coeff Band 1 */
+ { 67, 30, 118 },
+ { 61, 56, 116 },
+ { 60, 31, 105 },
+ { 52, 11, 85 },
+ { 34, 2, 54 },
+ { 14, 1, 22 }
+ }, { /* Coeff Band 2 */
+ { 107, 58, 149 },
+ { 92, 53, 147 },
+ { 78, 14, 123 },
+ { 56, 3, 87 },
+ { 35, 1, 56 },
+ { 17, 1, 27 }
+ }, { /* Coeff Band 3 */
+ { 142, 61, 171 },
+ { 111, 30, 162 },
+ { 80, 4, 128 },
+ { 53, 1, 87 },
+ { 31, 1, 52 },
+ { 14, 1, 24 }
+ }, { /* Coeff Band 4 */
+ { 171, 73, 200 },
+ { 129, 28, 184 },
+ { 86, 3, 140 },
+ { 54, 1, 90 },
+ { 28, 1, 49 },
+ { 12, 1, 21 }
+ }, { /* Coeff Band 5 */
+ { 193, 129, 227 },
+ { 148, 28, 200 },
+ { 90, 2, 144 },
+ { 53, 1, 90 },
+ { 28, 1, 50 },
+ { 13, 1, 22 }
+ }
+ }, { /* Inter */
+ { /* Coeff Band 0 */
+ { 60, 7, 234 },
+ { 64, 4, 184 },
+ { 56, 1, 104 }
+ }, { /* Coeff Band 1 */
+ { 150, 111, 210 },
+ { 87, 185, 202 },
+ { 101, 81, 177 },
+ { 90, 34, 142 },
+ { 67, 11, 95 },
+ { 38, 2, 51 }
+ }, { /* Coeff Band 2 */
+ { 153, 139, 218 },
+ { 120, 72, 195 },
+ { 90, 11, 147 },
+ { 63, 3, 101 },
+ { 39, 1, 61 },
+ { 20, 1, 33 }
+ }, { /* Coeff Band 3 */
+ { 171, 132, 223 },
+ { 131, 56, 200 },
+ { 92, 6, 147 },
+ { 58, 1, 95 },
+ { 32, 1, 52 },
+ { 14, 1, 23 }
+ }, { /* Coeff Band 4 */
+ { 183, 137, 227 },
+ { 139, 48, 204 },
+ { 91, 3, 148 },
+ { 55, 1, 91 },
+ { 28, 1, 47 },
+ { 13, 1, 21 }
+ }, { /* Coeff Band 5 */
+ { 198, 149, 234 },
+ { 153, 32, 208 },
+ { 95, 2, 148 },
+ { 55, 1, 90 },
+ { 30, 1, 51 },
+ { 16, 1, 25 }
+ }
+ }
+ }, { /* block Type 1 */
+ { /* Intra */
+ { /* Coeff Band 0 */
+ { 7, 209, 217 },
+ { 31, 106, 151 },
+ { 40, 21, 86 }
+ }, { /* Coeff Band 1 */
+ { 101, 71, 184 },
+ { 74, 131, 177 },
+ { 88, 50, 158 },
+ { 78, 16, 129 },
+ { 51, 2, 82 },
+ { 18, 1, 29 }
+ }, { /* Coeff Band 2 */
+ { 116, 115, 199 },
+ { 102, 88, 191 },
+ { 94, 22, 160 },
+ { 74, 6, 122 },
+ { 47, 1, 77 },
+ { 18, 1, 30 }
+ }, { /* Coeff Band 3 */
+ { 157, 124, 210 },
+ { 130, 53, 201 },
+ { 102, 10, 165 },
+ { 73, 1, 120 },
+ { 42, 1, 69 },
+ { 16, 1, 27 }
+ }, { /* Coeff Band 4 */
+ { 174, 147, 225 },
+ { 134, 67, 212 },
+ { 100, 10, 168 },
+ { 66, 1, 111 },
+ { 36, 1, 60 },
+ { 16, 1, 27 }
+ }, { /* Coeff Band 5 */
+ { 185, 165, 232 },
+ { 147, 56, 214 },
+ { 105, 5, 165 },
+ { 66, 1, 108 },
+ { 35, 1, 59 },
+ { 16, 1, 27 }
+ }
+ }, { /* Inter */
+ { /* Coeff Band 0 */
+ { 3, 232, 245 },
+ { 18, 162, 210 },
+ { 38, 64, 131 }
+ }, { /* Coeff Band 1 */
+ { 84, 187, 239 },
+ { 35, 231, 231 },
+ { 82, 150, 209 },
+ { 87, 97, 181 },
+ { 81, 64, 151 },
+ { 67, 60, 119 }
+ }, { /* Coeff Band 2 */
+ { 107, 185, 239 },
+ { 100, 149, 224 },
+ { 107, 34, 185 },
+ { 83, 12, 141 },
+ { 49, 4, 92 },
+ { 21, 1, 40 }
+ }, { /* Coeff Band 3 */
+ { 125, 184, 243 },
+ { 121, 127, 228 },
+ { 113, 25, 185 },
+ { 82, 6, 134 },
+ { 48, 1, 82 },
+ { 26, 1, 38 }
+ }, { /* Coeff Band 4 */
+ { 143, 185, 245 },
+ { 133, 115, 231 },
+ { 114, 14, 184 },
+ { 77, 3, 126 },
+ { 43, 1, 68 },
+ { 34, 1, 40 }
+ }, { /* Coeff Band 5 */
+ { 170, 194, 241 },
+ { 151, 80, 226 },
+ { 118, 9, 180 },
+ { 81, 1, 130 },
+ { 51, 1, 78 },
+ { 18, 1, 49 }
+ }
+ }
+ }
+};
+static const vp9_coeff_probs_model default_coef_probs_32x32[BLOCK_TYPES] = {
+ { /* block Type 0 */
+ { /* Intra */
+ { /* Coeff Band 0 */
+ { 29, 42, 137 },
+ { 26, 3, 60 },
+ { 13, 1, 23 }
+ }, { /* Coeff Band 1 */
+ { 69, 36, 122 },
+ { 63, 57, 123 },
+ { 60, 33, 112 },
+ { 52, 11, 90 },
+ { 32, 2, 52 },
+ { 10, 1, 15 }
+ }, { /* Coeff Band 2 */
+ { 107, 55, 143 },
+ { 86, 69, 143 },
+ { 74, 24, 116 },
+ { 52, 5, 78 },
+ { 29, 1, 44 },
+ { 12, 1, 18 }
+ }, { /* Coeff Band 3 */
+ { 137, 71, 160 },
+ { 107, 34, 152 },
+ { 73, 6, 114 },
+ { 44, 1, 69 },
+ { 25, 1, 40 },
+ { 12, 1, 18 }
+ }, { /* Coeff Band 4 */
+ { 165, 70, 174 },
+ { 118, 24, 159 },
+ { 74, 3, 117 },
+ { 45, 1, 73 },
+ { 26, 1, 43 },
+ { 12, 1, 19 }
+ }, { /* Coeff Band 5 */
+ { 220, 93, 223 },
+ { 153, 10, 187 },
+ { 86, 2, 131 },
+ { 49, 1, 79 },
+ { 26, 1, 43 },
+ { 12, 1, 20 }
+ }
+ }, { /* Inter */
+ { /* Coeff Band 0 */
+ { 30, 58, 227 },
+ { 35, 10, 172 },
+ { 24, 23, 112 }
+ }, { /* Coeff Band 1 */
+ { 117, 145, 219 },
+ { 51, 221, 216 },
+ { 75, 169, 196 },
+ { 88, 96, 165 },
+ { 77, 43, 117 },
+ { 53, 18, 60 }
+ }, { /* Coeff Band 2 */
+ { 128, 176, 225 },
+ { 108, 114, 202 },
+ { 92, 19, 152 },
+ { 65, 4, 103 },
+ { 38, 1, 61 },
+ { 19, 1, 30 }
+ }, { /* Coeff Band 3 */
+ { 146, 184, 228 },
+ { 122, 95, 205 },
+ { 92, 11, 149 },
+ { 62, 1, 98 },
+ { 35, 1, 57 },
+ { 17, 1, 26 }
+ }, { /* Coeff Band 4 */
+ { 165, 192, 230 },
+ { 132, 81, 206 },
+ { 93, 6, 147 },
+ { 58, 1, 94 },
+ { 32, 1, 52 },
+ { 15, 1, 24 }
+ }, { /* Coeff Band 5 */
+ { 204, 223, 234 },
+ { 156, 49, 204 },
+ { 97, 3, 145 },
+ { 59, 1, 92 },
+ { 33, 1, 52 },
+ { 15, 1, 24 }
+ }
+ }
+ }, { /* block Type 1 */
+ { /* Intra */
+ { /* Coeff Band 0 */
+ { 7, 184, 200 },
+ { 25, 67, 113 },
+ { 30, 9, 59 }
+ }, { /* Coeff Band 1 */
+ { 92, 42, 158 },
+ { 65, 121, 159 },
+ { 77, 56, 146 },
+ { 70, 22, 120 },
+ { 47, 4, 76 },
+ { 18, 1, 26 }
+ }, { /* Coeff Band 2 */
+ { 113, 81, 177 },
+ { 96, 75, 167 },
+ { 84, 24, 136 },
+ { 63, 8, 100 },
+ { 37, 1, 58 },
+ { 13, 1, 19 }
+ }, { /* Coeff Band 3 */
+ { 147, 85, 194 },
+ { 119, 36, 178 },
+ { 88, 8, 139 },
+ { 59, 1, 93 },
+ { 31, 1, 49 },
+ { 10, 1, 18 }
+ }, { /* Coeff Band 4 */
+ { 169, 108, 210 },
+ { 131, 41, 191 },
+ { 92, 5, 144 },
+ { 56, 1, 88 },
+ { 29, 1, 47 },
+ { 14, 1, 22 }
+ }, { /* Coeff Band 5 */
+ { 210, 106, 223 },
+ { 148, 14, 192 },
+ { 89, 2, 138 },
+ { 52, 1, 84 },
+ { 29, 1, 47 },
+ { 14, 1, 23 }
+ }
+ }, { /* Inter */
+ { /* Coeff Band 0 */
+ { 3, 207, 245 },
+ { 12, 102, 213 },
+ { 18, 33, 144 }
+ }, { /* Coeff Band 1 */
+ { 85, 205, 245 },
+ { 18, 249, 242 },
+ { 59, 221, 229 },
+ { 91, 166, 213 },
+ { 88, 117, 183 },
+ { 70, 95, 149 }
+ }, { /* Coeff Band 2 */
+ { 114, 193, 241 },
+ { 104, 155, 221 },
+ { 100, 33, 181 },
+ { 78, 10, 132 },
+ { 43, 2, 75 },
+ { 15, 1, 48 }
+ }, { /* Coeff Band 3 */
+ { 118, 198, 244 },
+ { 117, 142, 224 },
+ { 111, 25, 179 },
+ { 83, 4, 134 },
+ { 57, 1, 84 },
+ { 1, 1, 1 }
+ }, { /* Coeff Band 4 */
+ { 144, 201, 248 },
+ { 136, 130, 234 },
+ { 124, 12, 188 },
+ { 83, 1, 130 },
+ { 61, 1, 66 },
+ { 64, 171, 128 }
+ }, { /* Coeff Band 5 */
+ { 174, 227, 250 },
+ { 165, 118, 242 },
+ { 132, 21, 197 },
+ { 84, 3, 134 },
+ { 70, 1, 69 },
+ { 1, 1, 1 }
+ }
+ }
+ }
+};
+#else
static const vp9_coeff_probs_model default_coef_probs_4x4[BLOCK_TYPES] = {
{ /* block Type 0 */
{ /* Intra */
@@ -695,3 +1381,4 @@ static const vp9_coeff_probs_model default_coef_probs_32x32[BLOCK_TYPES] = {
}
}
};
+#endif
diff --git a/vp9/common/vp9_entropy.c b/vp9/common/vp9_entropy.c
index 1b7da6cd5..5a1e91a9d 100644
--- a/vp9/common/vp9_entropy.c
+++ b/vp9/common/vp9_entropy.c
@@ -201,11 +201,16 @@ DECLARE_ALIGNED(16, const int, vp9_default_scan_32x32[1024]) = {
const vp9_tree_index vp9_coef_tree[ 22] = /* corresponding _CONTEXT_NODEs */
{
- -DCT_EOB_TOKEN, 2, /* 0 = EOB */
- -ZERO_TOKEN, 4, /* 1 = ZERO */
- -ONE_TOKEN, 6, /* 2 = ONE */
- 8, 12, /* 3 = LOW_VAL */
- -TWO_TOKEN, 10, /* 4 = TWO */
+#if CONFIG_BALANCED_COEFTREE
+ -ZERO_TOKEN, 2, /* 0 = ZERO */
+ -DCT_EOB_TOKEN, 4, /* 1 = EOB */
+#else
+ -DCT_EOB_TOKEN, 2, /* 0 = EOB */
+ -ZERO_TOKEN, 4, /* 1 = ZERO */
+#endif
+ -ONE_TOKEN, 6, /* 2 = ONE */
+ 8, 12, /* 3 = LOW_VAL */
+ -TWO_TOKEN, 10, /* 4 = TWO */
-THREE_TOKEN, -FOUR_TOKEN, /* 5 = THREE */
14, 16, /* 6 = HIGH_LOW */
-DCT_VAL_CATEGORY1, -DCT_VAL_CATEGORY2, /* 7 = CAT_ONE */
@@ -229,9 +234,14 @@ static const vp9_prob Pcat6[] = {
};
const vp9_tree_index vp9_coefmodel_tree[6] = {
+#if CONFIG_BALANCED_COEFTREE
+ -ZERO_TOKEN, 2,
+ -DCT_EOB_MODEL_TOKEN, 4,
+#else
-DCT_EOB_MODEL_TOKEN, 2, /* 0 = EOB */
-ZERO_TOKEN, 4, /* 1 = ZERO */
- -ONE_TOKEN, -TWO_TOKEN, /* 2 = ONE */
+#endif
+ -ONE_TOKEN, -TWO_TOKEN,
};
// Model obtained from a 2-sided zero-centerd distribuition derived
@@ -468,7 +478,7 @@ int vp9_get_coef_context(const int *scan, const int *neighbors,
} else {
ctx = token_cache[scan[neighbors[MAX_NEIGHBORS * c + 0]]];
}
- return vp9_pt_energy_class[ctx];
+ return ctx;
}
};
@@ -642,6 +652,17 @@ void vp9_coef_tree_initialize() {
#define COEF_COUNT_SAT_AFTER_KEY 24
#define COEF_MAX_UPDATE_FACTOR_AFTER_KEY 128
+void vp9_full_to_model_count(unsigned int *model_count,
+ unsigned int *full_count) {
+ int n;
+ model_count[ZERO_TOKEN] = full_count[ZERO_TOKEN];
+ model_count[ONE_TOKEN] = full_count[ONE_TOKEN];
+ model_count[TWO_TOKEN] = full_count[TWO_TOKEN];
+ for (n = THREE_TOKEN; n < DCT_EOB_TOKEN; ++n)
+ model_count[TWO_TOKEN] += full_count[n];
+ model_count[DCT_EOB_MODEL_TOKEN] = full_count[DCT_EOB_TOKEN];
+}
+
void vp9_full_to_model_counts(
vp9_coeff_count_model *model_count, vp9_coeff_count *full_count) {
int i, j, k, l;
@@ -649,19 +670,10 @@ void vp9_full_to_model_counts(
for (j = 0; j < REF_TYPES; ++j)
for (k = 0; k < COEF_BANDS; ++k)
for (l = 0; l < PREV_COEF_CONTEXTS; ++l) {
- int n;
if (l >= 3 && k == 0)
continue;
- model_count[i][j][k][l][ZERO_TOKEN] =
- full_count[i][j][k][l][ZERO_TOKEN];
- model_count[i][j][k][l][ONE_TOKEN] =
- full_count[i][j][k][l][ONE_TOKEN];
- model_count[i][j][k][l][TWO_TOKEN] =
- full_count[i][j][k][l][TWO_TOKEN];
- for (n = THREE_TOKEN; n < DCT_EOB_TOKEN; ++n)
- model_count[i][j][k][l][TWO_TOKEN] += full_count[i][j][k][l][n];
- model_count[i][j][k][l][DCT_EOB_MODEL_TOKEN] =
- full_count[i][j][k][l][DCT_EOB_TOKEN];
+ vp9_full_to_model_count(model_count[i][j][k][l],
+ full_count[i][j][k][l]);
}
}
@@ -688,8 +700,13 @@ static void adapt_coef_probs(
vp9_coefmodel_tree,
coef_probs, branch_ct,
coef_counts[i][j][k][l], 0);
+#if CONFIG_BALANCED_COEFTREE
+ branch_ct[1][1] = eob_branch_count[i][j][k][l] - branch_ct[1][0];
+ coef_probs[1] = get_binary_prob(branch_ct[1][0], branch_ct[1][1]);
+#else
branch_ct[0][1] = eob_branch_count[i][j][k][l] - branch_ct[0][0];
coef_probs[0] = get_binary_prob(branch_ct[0][0], branch_ct[0][1]);
+#endif
for (t = 0; t < entropy_nodes_adapt; ++t) {
count = branch_ct[t][0] + branch_ct[t][1];
count = count > count_sat ? count_sat : count;
diff --git a/vp9/common/vp9_entropy.h b/vp9/common/vp9_entropy.h
index 5d57f149e..27e3bec5a 100644
--- a/vp9/common/vp9_entropy.h
+++ b/vp9/common/vp9_entropy.h
@@ -85,8 +85,7 @@ extern vp9_extra_bit vp9_extra_bits[12]; /* indexed by token value */
/*# define DC_TOKEN_CONTEXTS 3*/ /* 00, 0!0, !0!0 */
#define PREV_COEF_CONTEXTS 6
-// Disables backward coef probs adaption
-// #define DISABLE_COEF_ADAPT
+// #define ENTROPY_STATS
typedef unsigned int vp9_coeff_count[REF_TYPES][COEF_BANDS][PREV_COEF_CONTEXTS]
[MAX_ENTROPY_TOKENS];
@@ -162,6 +161,7 @@ const int *vp9_get_coef_neighbors_handle(const int *scan, int *pad);
#define UNCONSTRAINED_NODES 3
#define MODEL_NODES (ENTROPY_NODES - UNCONSTRAINED_NODES)
+
#define PIVOT_NODE 2 // which node is pivot
typedef vp9_prob vp9_coeff_probs_model[REF_TYPES][COEF_BANDS]
@@ -174,6 +174,8 @@ typedef unsigned int vp9_coeff_count_model[REF_TYPES][COEF_BANDS]
typedef unsigned int vp9_coeff_stats_model[REF_TYPES][COEF_BANDS]
[PREV_COEF_CONTEXTS]
[UNCONSTRAINED_NODES][2];
+extern void vp9_full_to_model_count(unsigned int *model_count,
+ unsigned int *full_count);
extern void vp9_full_to_model_counts(
vp9_coeff_count_model *model_count, vp9_coeff_count *full_count);
diff --git a/vp9/common/vp9_entropymode.c b/vp9/common/vp9_entropymode.c
index 622f1dcf4..0274b4d05 100644
--- a/vp9/common/vp9_entropymode.c
+++ b/vp9/common/vp9_entropymode.c
@@ -15,24 +15,12 @@
#include "vp9/common/vp9_alloccommon.h"
#include "vpx_mem/vpx_mem.h"
-static const unsigned int kf_y_mode_cts[8][VP9_YMODES] = {
- /* DC V H D45 135 117 153 D27 D63 TM i4X4 */
- {12, 6, 5, 5, 5, 5, 5, 5, 5, 2, 200},
- {25, 13, 13, 7, 7, 7, 7, 7, 7, 6, 160},
- {31, 17, 18, 8, 8, 8, 8, 8, 8, 9, 139},
- {40, 22, 23, 8, 8, 8, 8, 8, 8, 12, 116},
- {53, 26, 28, 8, 8, 8, 8, 8, 8, 13, 94},
- {68, 33, 35, 8, 8, 8, 8, 8, 8, 17, 68},
- {78, 38, 38, 8, 8, 8, 8, 8, 8, 19, 52},
- {89, 42, 42, 8, 8, 8, 8, 8, 8, 21, 34},
+static const unsigned int y_mode_cts[VP9_INTRA_MODES] = {
+ /* DC V H D45 D135 D117 D153 D27 D63 TM */
+ 98, 19, 15, 14, 14, 14, 14, 12, 12, 13,
};
-static const unsigned int y_mode_cts [VP9_YMODES] = {
- /* DC V H D45 135 117 153 D27 D63 TM i4X4 */
- 98, 19, 15, 14, 14, 14, 14, 12, 12, 13, 0
-};
-
-static const unsigned int uv_mode_cts [VP9_YMODES] [VP9_UV_MODES] = {
+static const unsigned int uv_mode_cts[VP9_INTRA_MODES][VP9_INTRA_MODES] = {
/* DC V H D45 135 117 153 D27 D63 TM */
{ 200, 15, 15, 10, 10, 10, 10, 10, 10, 6}, /* DC */
{ 130, 75, 10, 10, 10, 10, 10, 10, 10, 6}, /* V */
@@ -44,10 +32,9 @@ static const unsigned int uv_mode_cts [VP9_YMODES] [VP9_UV_MODES] = {
{ 150, 15, 10, 10, 10, 10, 10, 75, 10, 6}, /* D27 */
{ 150, 15, 10, 10, 10, 10, 10, 10, 75, 6}, /* D63 */
{ 160, 30, 30, 10, 10, 10, 10, 10, 10, 16}, /* TM */
- { 150, 35, 41, 10, 10, 10, 10, 10, 10, 10}, /* i4X4 */
};
-static const unsigned int kf_uv_mode_cts [VP9_YMODES] [VP9_UV_MODES] = {
+static const unsigned int kf_uv_mode_cts[VP9_INTRA_MODES][VP9_INTRA_MODES] = {
// DC V H D45 135 117 153 D27 D63 TM
{ 160, 24, 24, 20, 20, 20, 20, 20, 20, 8}, /* DC */
{ 102, 64, 30, 20, 20, 20, 20, 20, 20, 10}, /* V */
@@ -59,12 +46,6 @@ static const unsigned int kf_uv_mode_cts [VP9_YMODES] [VP9_UV_MODES] = {
{ 102, 33, 20, 20, 20, 20, 20, 64, 20, 14}, /* D27 */
{ 102, 33, 20, 20, 20, 20, 20, 20, 64, 14}, /* D63 */
{ 132, 36, 30, 20, 20, 20, 20, 20, 20, 18}, /* TM */
- { 122, 41, 35, 20, 20, 20, 20, 20, 20, 18}, /* I4X4 */
-};
-
-static const unsigned int bmode_cts[VP9_BINTRAMODES] = {
- /* DC V H D45 D135 D117 D153 D27 D63 TM */
- 43891, 10036, 3920, 3363, 2546, 5119, 2471, 1723, 3221, 17694
};
const vp9_prob vp9_partition_probs[NUM_PARTITION_CONTEXTS]
@@ -89,7 +70,7 @@ const vp9_prob vp9_partition_probs[NUM_PARTITION_CONTEXTS]
};
/* Array indices are identical to previously-existing INTRAMODECONTEXTNODES. */
-const vp9_tree_index vp9_bmode_tree[VP9_BINTRAMODES * 2 - 2] = {
+const vp9_tree_index vp9_intra_mode_tree[VP9_INTRA_MODES * 2 - 2] = {
-DC_PRED, 2, /* 0 = DC_NODE */
-TM_PRED, 4, /* 1 = TM_NODE */
-V_PRED, 6, /* 2 = V_NODE */
@@ -101,53 +82,6 @@ const vp9_tree_index vp9_bmode_tree[VP9_BINTRAMODES * 2 - 2] = {
-D153_PRED, -D27_PRED /* 8 = D153_NODE */
};
-/* Again, these trees use the same probability indices as their
- explicitly-programmed predecessors. */
-const vp9_tree_index vp9_ymode_tree[VP9_YMODES * 2 - 2] = {
- 2, 14,
- -DC_PRED, 4,
- 6, 8,
- -D45_PRED, -D135_PRED,
- 10, 12,
- -D117_PRED, -D153_PRED,
- -D27_PRED, -D63_PRED,
- 16, 18,
- -V_PRED, -H_PRED,
- -TM_PRED, -I4X4_PRED
-};
-
-const vp9_tree_index vp9_kf_ymode_tree[VP9_YMODES * 2 - 2] = {
- 2, 14,
- -DC_PRED, 4,
- 6, 8,
- -D45_PRED, -D135_PRED,
- 10, 12,
- -D117_PRED, -D153_PRED,
- -D27_PRED, -D63_PRED,
- 16, 18,
- -V_PRED, -H_PRED,
- -TM_PRED, -I4X4_PRED
-};
-
-const vp9_tree_index vp9_uv_mode_tree[VP9_UV_MODES * 2 - 2] = {
- 2, 14,
- -DC_PRED, 4,
- 6, 8,
- -D45_PRED, -D135_PRED,
- 10, 12,
- -D117_PRED, -D153_PRED,
- -D27_PRED, -D63_PRED,
- -V_PRED, 16,
- -H_PRED, -TM_PRED
-};
-
-const vp9_tree_index vp9_mv_ref_tree[8] = {
- -ZEROMV, 2,
- -NEARESTMV, 4,
- -NEARMV, 6,
- -NEWMV, -SPLITMV
-};
-
const vp9_tree_index vp9_sb_mv_ref_tree[6] = {
-ZEROMV, 2,
-NEARESTMV, 4,
@@ -160,39 +94,23 @@ const vp9_tree_index vp9_partition_tree[6] = {
-PARTITION_VERT, -PARTITION_SPLIT
};
-struct vp9_token vp9_bmode_encodings[VP9_BINTRAMODES];
-struct vp9_token vp9_kf_bmode_encodings[VP9_BINTRAMODES];
-struct vp9_token vp9_ymode_encodings[VP9_YMODES];
-struct vp9_token vp9_sb_ymode_encodings[VP9_I32X32_MODES];
-struct vp9_token vp9_sb_kf_ymode_encodings[VP9_I32X32_MODES];
-struct vp9_token vp9_kf_ymode_encodings[VP9_YMODES];
-struct vp9_token vp9_uv_mode_encodings[VP9_UV_MODES];
+struct vp9_token vp9_intra_mode_encodings[VP9_INTRA_MODES];
-struct vp9_token vp9_mv_ref_encoding_array[VP9_MVREFS];
struct vp9_token vp9_sb_mv_ref_encoding_array[VP9_MVREFS];
struct vp9_token vp9_partition_encodings[PARTITION_TYPES];
void vp9_init_mbmode_probs(VP9_COMMON *x) {
- unsigned int bct[VP9_YMODES][2]; // num Ymodes > num UV modes
+ unsigned int bct[VP9_INTRA_MODES][2]; // num Ymodes > num UV modes
int i;
- vp9_tree_probs_from_distribution(vp9_ymode_tree, x->fc.ymode_prob,
+ vp9_tree_probs_from_distribution(vp9_intra_mode_tree, x->fc.y_mode_prob,
bct, y_mode_cts, 0);
- vp9_tree_probs_from_distribution(vp9_sb_ymode_tree, x->fc.sb_ymode_prob,
- bct, y_mode_cts, 0);
- for (i = 0; i < 8; i++) {
- vp9_tree_probs_from_distribution(vp9_kf_ymode_tree, x->kf_ymode_prob[i],
- bct, kf_y_mode_cts[i], 0);
- vp9_tree_probs_from_distribution(vp9_sb_kf_ymode_tree,
- x->sb_kf_ymode_prob[i], bct,
- kf_y_mode_cts[i], 0);
- }
- for (i = 0; i < VP9_YMODES; i++) {
- vp9_tree_probs_from_distribution(vp9_uv_mode_tree, x->kf_uv_mode_prob[i],
+ for (i = 0; i < VP9_INTRA_MODES; i++) {
+ vp9_tree_probs_from_distribution(vp9_intra_mode_tree, x->kf_uv_mode_prob[i],
bct, kf_uv_mode_cts[i], 0);
- vp9_tree_probs_from_distribution(vp9_uv_mode_tree, x->fc.uv_mode_prob[i],
+ vp9_tree_probs_from_distribution(vp9_intra_mode_tree, x->fc.uv_mode_prob[i],
bct, uv_mode_cts[i], 0);
}
@@ -207,19 +125,6 @@ void vp9_init_mbmode_probs(VP9_COMMON *x) {
x->ref_pred_probs[2] = DEFAULT_PRED_PROB_2;
}
-
-static void intra_bmode_probs_from_distribution(
- vp9_prob p[VP9_BINTRAMODES - 1],
- unsigned int branch_ct[VP9_BINTRAMODES - 1][2],
- const unsigned int events[VP9_BINTRAMODES]) {
- vp9_tree_probs_from_distribution(vp9_bmode_tree, p, branch_ct, events, 0);
-}
-
-void vp9_default_bmode_probs(vp9_prob p[VP9_BINTRAMODES - 1]) {
- unsigned int branch_ct[VP9_BINTRAMODES - 1][2];
- intra_bmode_probs_from_distribution(p, branch_ct, bmode_cts);
-}
-
#if VP9_SWITCHABLE_FILTERS == 3
const vp9_tree_index vp9_switchable_interp_tree[VP9_SWITCHABLE_FILTERS*2-2] = {
-0, 2,
@@ -254,19 +159,11 @@ const int vp9_switchable_interp_map[SWITCHABLE+1] = {-1, 0, 1, -1, -1};
const int vp9_is_interpolating_filter[SWITCHABLE + 1] = {0, 1, 1, 1, -1};
void vp9_entropy_mode_init() {
- vp9_tokens_from_tree(vp9_kf_bmode_encodings, vp9_bmode_tree);
- vp9_tokens_from_tree(vp9_bmode_encodings, vp9_bmode_tree);
- vp9_tokens_from_tree(vp9_ymode_encodings, vp9_ymode_tree);
- vp9_tokens_from_tree(vp9_kf_ymode_encodings, vp9_kf_ymode_tree);
- vp9_tokens_from_tree(vp9_sb_ymode_encodings, vp9_sb_ymode_tree);
- vp9_tokens_from_tree(vp9_sb_kf_ymode_encodings, vp9_sb_kf_ymode_tree);
- vp9_tokens_from_tree(vp9_uv_mode_encodings, vp9_uv_mode_tree);
+ vp9_tokens_from_tree(vp9_intra_mode_encodings, vp9_intra_mode_tree);
vp9_tokens_from_tree(vp9_switchable_interp_encodings,
vp9_switchable_interp_tree);
vp9_tokens_from_tree(vp9_partition_encodings, vp9_partition_tree);
- vp9_tokens_from_tree_offset(vp9_mv_ref_encoding_array,
- vp9_mv_ref_tree, NEARESTMV);
vp9_tokens_from_tree_offset(vp9_sb_mv_ref_encoding_array,
vp9_sb_mv_ref_tree, NEARESTMV);
}
@@ -281,7 +178,7 @@ void vp9_init_mode_contexts(VP9_COMMON *pc) {
void vp9_accum_mv_refs(VP9_COMMON *pc,
MB_PREDICTION_MODE m,
const int context) {
- unsigned int (*mv_ref_ct)[4][2] = pc->fc.mv_ref_ct;
+ unsigned int (*mv_ref_ct)[VP9_MVREFS - 1][2] = pc->fc.mv_ref_ct;
if (m == ZEROMV) {
++mv_ref_ct[context][0][0];
@@ -295,11 +192,6 @@ void vp9_accum_mv_refs(VP9_COMMON *pc,
++mv_ref_ct[context][2][0];
} else {
++mv_ref_ct[context][2][1];
- if (m == NEWMV) {
- ++mv_ref_ct[context][3][0];
- } else {
- ++mv_ref_ct[context][3][1];
- }
}
}
}
@@ -309,11 +201,11 @@ void vp9_accum_mv_refs(VP9_COMMON *pc,
#define MVREF_MAX_UPDATE_FACTOR 128
void vp9_adapt_mode_context(VP9_COMMON *pc) {
int i, j;
- unsigned int (*mv_ref_ct)[4][2] = pc->fc.mv_ref_ct;
- int (*mode_context)[4] = pc->fc.vp9_mode_contexts;
+ unsigned int (*mv_ref_ct)[VP9_MVREFS - 1][2] = pc->fc.mv_ref_ct;
+ int (*mode_context)[VP9_MVREFS - 1] = pc->fc.vp9_mode_contexts;
for (j = 0; j < INTER_MODE_CONTEXTS; j++) {
- for (i = 0; i < 4; i++) {
+ for (i = 0; i < VP9_MVREFS - 1; i++) {
int count = mv_ref_ct[j][i][0] + mv_ref_ct[j][i][1], factor;
count = count > MVREF_COUNT_SAT ? MVREF_COUNT_SAT : count;
@@ -355,15 +247,15 @@ void vp9_adapt_mode_probs(VP9_COMMON *cm) {
int t;
printf("static const unsigned int\nymode_counts"
- "[VP9_YMODES] = {\n");
- for (t = 0; t < VP9_YMODES; ++t)
+ "[VP9_INTRA_MODES] = {\n");
+ for (t = 0; t < VP9_INTRA_MODES; ++t)
printf("%d, ", fc->ymode_counts[t]);
printf("};\n");
printf("static const unsigned int\nuv_mode_counts"
- "[VP9_YMODES] [VP9_UV_MODES] = {\n");
- for (i = 0; i < VP9_YMODES; ++i) {
+ "[VP9_INTRA_MODES] [VP9_INTRA_MODES] = {\n");
+ for (i = 0; i < VP9_INTRA_MODES; ++i) {
printf(" {");
- for (t = 0; t < VP9_UV_MODES; ++t)
+ for (t = 0; t < VP9_INTRA_MODES; ++t)
printf("%d, ", fc->uv_mode_counts[i][t]);
printf("},\n");
}
@@ -385,22 +277,15 @@ void vp9_adapt_mode_probs(VP9_COMMON *cm) {
printf("};\n");
#endif
- update_mode_probs(VP9_YMODES, vp9_ymode_tree,
- fc->ymode_counts, fc->pre_ymode_prob,
- fc->ymode_prob, 0);
- update_mode_probs(VP9_I32X32_MODES, vp9_sb_ymode_tree,
- fc->sb_ymode_counts, fc->pre_sb_ymode_prob,
- fc->sb_ymode_prob, 0);
+ update_mode_probs(VP9_INTRA_MODES, vp9_intra_mode_tree,
+ fc->y_mode_counts, fc->pre_y_mode_prob,
+ fc->y_mode_prob, 0);
- for (i = 0; i < VP9_YMODES; ++i)
- update_mode_probs(VP9_UV_MODES, vp9_uv_mode_tree,
+ for (i = 0; i < VP9_INTRA_MODES; ++i)
+ update_mode_probs(VP9_INTRA_MODES, vp9_intra_mode_tree,
fc->uv_mode_counts[i], fc->pre_uv_mode_prob[i],
fc->uv_mode_prob[i], 0);
- update_mode_probs(VP9_BINTRAMODES, vp9_bmode_tree,
- fc->bmode_counts, fc->pre_bmode_prob,
- fc->bmode_prob, 0);
-
for (i = 0; i < NUM_PARTITION_CONTEXTS; i++)
update_mode_probs(PARTITION_TYPES, vp9_partition_tree,
fc->partition_counts[i], fc->pre_partition_prob[i],
@@ -438,8 +323,7 @@ void vp9_setup_past_independence(VP9_COMMON *cm, MACROBLOCKD *xd) {
vp9_default_coef_probs(cm);
vp9_init_mbmode_probs(cm);
- vp9_default_bmode_probs(cm->fc.bmode_prob);
- vpx_memcpy(cm->kf_bmode_prob, vp9_kf_default_bmode_probs,
+ vpx_memcpy(cm->kf_y_mode_prob, vp9_kf_default_bmode_probs,
sizeof(vp9_kf_default_bmode_probs));
vp9_init_mv_probs(cm);
diff --git a/vp9/common/vp9_entropymode.h b/vp9/common/vp9_entropymode.h
index 8fbc6f20e..b7315603e 100644
--- a/vp9/common/vp9_entropymode.h
+++ b/vp9/common/vp9_entropymode.h
@@ -19,31 +19,17 @@
extern int vp9_mv_cont(const int_mv *l, const int_mv *a);
-extern const vp9_prob vp9_kf_default_bmode_probs[VP9_BINTRAMODES]
- [VP9_BINTRAMODES]
- [VP9_BINTRAMODES -1 ];
-
-extern const vp9_tree_index vp9_bmode_tree[];
-extern const vp9_tree_index vp9_ymode_tree[];
-extern const vp9_tree_index vp9_kf_ymode_tree[];
-extern const vp9_tree_index vp9_uv_mode_tree[];
-#define vp9_sb_ymode_tree vp9_uv_mode_tree
-#define vp9_sb_kf_ymode_tree vp9_uv_mode_tree
-extern const vp9_tree_index vp9_mv_ref_tree[];
+extern const vp9_prob vp9_kf_default_bmode_probs[VP9_INTRA_MODES]
+ [VP9_INTRA_MODES]
+ [VP9_INTRA_MODES -1 ];
+
+extern const vp9_tree_index vp9_intra_mode_tree[];
extern const vp9_tree_index vp9_sb_mv_ref_tree[];
-extern const vp9_tree_index vp9_sub_mv_ref_tree[];
-extern struct vp9_token vp9_bmode_encodings[VP9_BINTRAMODES];
-extern struct vp9_token vp9_kf_bmode_encodings[VP9_BINTRAMODES];
-extern struct vp9_token vp9_ymode_encodings[VP9_YMODES];
-extern struct vp9_token vp9_sb_ymode_encodings[VP9_I32X32_MODES];
-extern struct vp9_token vp9_sb_kf_ymode_encodings[VP9_I32X32_MODES];
-extern struct vp9_token vp9_kf_ymode_encodings[VP9_YMODES];
-extern struct vp9_token vp9_uv_mode_encodings[VP9_UV_MODES];
+extern struct vp9_token vp9_intra_mode_encodings[VP9_INTRA_MODES];
/* Inter mode values do not start at zero */
-extern struct vp9_token vp9_mv_ref_encoding_array[VP9_MVREFS];
extern struct vp9_token vp9_sb_mv_ref_encoding_array[VP9_MVREFS];
// probability models for partition information
@@ -67,8 +53,6 @@ extern void vp9_accum_mv_refs(struct VP9Common *pc,
MB_PREDICTION_MODE m,
const int context);
-void vp9_default_bmode_probs(vp9_prob dest[VP9_BINTRAMODES - 1]);
-
void vp9_adapt_mode_probs(struct VP9Common *);
#define VP9_SWITCHABLE_FILTERS 3 /* number of switchable filters */
diff --git a/vp9/common/vp9_findnearmv.c b/vp9/common/vp9_findnearmv.c
index 435dfdca6..9df6ce141 100644
--- a/vp9/common/vp9_findnearmv.c
+++ b/vp9/common/vp9_findnearmv.c
@@ -15,13 +15,6 @@
#include "vp9/common/vp9_sadmxn.h"
#include "vp9/common/vp9_subpelvar.h"
-const uint8_t vp9_mbsplit_offset[4][16] = {
- { 0, 8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
- { 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
- { 0, 2, 8, 10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
- { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15}
-};
-
static void lower_mv_precision(int_mv *mv, int usehp) {
if (!usehp || !vp9_use_nmv_hp(&mv->as_mv)) {
if (mv->as_mv.row & 1)
@@ -31,11 +24,10 @@ static void lower_mv_precision(int_mv *mv, int usehp) {
}
}
-vp9_prob *vp9_mv_ref_probs(VP9_COMMON *pc, vp9_prob p[4], int context) {
+vp9_prob *vp9_mv_ref_probs(VP9_COMMON *pc, vp9_prob *p, int context) {
p[0] = pc->fc.vp9_mode_contexts[context][0];
p[1] = pc->fc.vp9_mode_contexts[context][1];
p[2] = pc->fc.vp9_mode_contexts[context][2];
- p[3] = pc->fc.vp9_mode_contexts[context][3];
return p;
}
diff --git a/vp9/common/vp9_findnearmv.h b/vp9/common/vp9_findnearmv.h
index bc5afb12d..456d6c852 100644
--- a/vp9/common/vp9_findnearmv.h
+++ b/vp9/common/vp9_findnearmv.h
@@ -87,12 +87,12 @@ static MB_PREDICTION_MODE left_block_mode(const MODE_INFO *cur_mb, int b) {
/* On L edge, get from MB to left of us */
--cur_mb;
- if (cur_mb->mbmi.mode <= TM_PRED) {
- return cur_mb->mbmi.mode;
- } else if (cur_mb->mbmi.mode == I4X4_PRED) {
+ if (cur_mb->mbmi.ref_frame != INTRA_FRAME) {
+ return DC_PRED;
+ } else if (cur_mb->mbmi.sb_type < BLOCK_SIZE_SB8X8) {
return ((cur_mb->bmi + 1 + b)->as_mode.first);
} else {
- return DC_PRED;
+ return cur_mb->mbmi.mode;
}
}
assert(b == 1 || b == 3);
@@ -105,12 +105,12 @@ static MB_PREDICTION_MODE above_block_mode(const MODE_INFO *cur_mb,
/* On top edge, get from MB above us */
cur_mb -= mi_stride;
- if (cur_mb->mbmi.mode <= TM_PRED) {
- return cur_mb->mbmi.mode;
- } else if (cur_mb->mbmi.mode == I4X4_PRED) {
+ if (cur_mb->mbmi.ref_frame != INTRA_FRAME) {
+ return DC_PRED;
+ } else if (cur_mb->mbmi.sb_type < BLOCK_SIZE_SB8X8) {
return ((cur_mb->bmi + 2 + b)->as_mode.first);
} else {
- return DC_PRED;
+ return cur_mb->mbmi.mode;
}
}
diff --git a/vp9/common/vp9_header.h b/vp9/common/vp9_header.h
deleted file mode 100644
index 96b04e7d7..000000000
--- a/vp9/common/vp9_header.h
+++ /dev/null
@@ -1,39 +0,0 @@
-/*
- * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
- *
- * Use of this source code is governed by a BSD-style license
- * that can be found in the LICENSE file in the root of the source
- * tree. An additional intellectual property rights grant can be found
- * in the file PATENTS. All contributing project authors may
- * be found in the AUTHORS file in the root of the source tree.
- */
-
-#ifndef VP9_COMMON_VP9_HEADER_H_
-#define VP9_COMMON_VP9_HEADER_H_
-
-/* 24 bits total */
-typedef struct {
- unsigned int type: 1;
- unsigned int version: 3;
- unsigned int show_frame: 1;
-
- /* Allow 2^20 bytes = 8 megabits for first partition */
-
- unsigned int first_partition_length_in_bytes: 19;
-
-#ifdef PACKET_TESTING
- unsigned int frame_number;
- unsigned int update_gold: 1;
- unsigned int uses_gold: 1;
- unsigned int update_last: 1;
- unsigned int uses_last: 1;
-#endif
-} VP9_HEADER;
-
-#ifdef PACKET_TESTING
-#define VP9_HEADER_SIZE 8
-#else
-#define VP9_HEADER_SIZE 3
-#endif
-
-#endif // VP9_COMMON_VP9_HEADER_H_
diff --git a/vp9/common/vp9_idct.c b/vp9/common/vp9_idct.c
index 026ba913d..dcc7f0330 100644
--- a/vp9/common/vp9_idct.c
+++ b/vp9/common/vp9_idct.c
@@ -32,20 +32,17 @@ void vp9_short_iwalsh4x4_add_c(int16_t *input, uint8_t *dest, int dest_stride) {
c1 = ip[1] >> WHT_UPSCALE_FACTOR;
d1 = ip[2] >> WHT_UPSCALE_FACTOR;
b1 = ip[3] >> WHT_UPSCALE_FACTOR;
-
- c1 = a1 - c1;
- b1 += d1;
- e1 = (c1 - b1) >> 1;
- a1 -= e1;
- d1 += e1;
- b1 = a1 - b1;
- c1 -= d1;
-
+ a1 += c1;
+ d1 -= b1;
+ e1 = (a1 - d1) >> 1;
+ b1 = e1 - b1;
+ c1 = e1 - c1;
+ a1 -= b1;
+ d1 += c1;
op[0] = a1;
op[1] = b1;
op[2] = c1;
op[3] = d1;
-
ip += 4;
op += 4;
}
@@ -56,15 +53,13 @@ void vp9_short_iwalsh4x4_add_c(int16_t *input, uint8_t *dest, int dest_stride) {
c1 = ip[4 * 1];
d1 = ip[4 * 2];
b1 = ip[4 * 3];
-
- c1 = a1 - c1;
- b1 += d1;
- e1 = (c1 - b1) >> 1;
- a1 -= e1;
- d1 += e1;
- b1 = a1 - b1;
- c1 -= d1;
-
+ a1 += c1;
+ d1 -= b1;
+ e1 = (a1 - d1) >> 1;
+ b1 = e1 - b1;
+ c1 = e1 - c1;
+ a1 -= b1;
+ d1 += c1;
dest[dest_stride * 0] = clip_pixel(dest[dest_stride * 0] + a1);
dest[dest_stride * 1] = clip_pixel(dest[dest_stride * 1] + b1);
dest[dest_stride * 2] = clip_pixel(dest[dest_stride * 2] + c1);
@@ -84,16 +79,17 @@ void vp9_short_iwalsh4x4_1_add_c(int16_t *in, uint8_t *dest, int dest_stride) {
a1 = ip[0] >> WHT_UPSCALE_FACTOR;
e1 = a1 >> 1;
- op[0] = op[1] = op[2] = a1 - e1;
- op[3] = e1;
+ a1 -= e1;
+ op[0] = a1;
+ op[1] = op[2] = op[3] = e1;
ip = tmp;
for (i = 0; i < 4; i++) {
e1 = ip[0] >> 1;
a1 = ip[0] - e1;
dest[dest_stride * 0] = clip_pixel(dest[dest_stride * 0] + a1);
- dest[dest_stride * 1] = clip_pixel(dest[dest_stride * 1] + a1);
- dest[dest_stride * 2] = clip_pixel(dest[dest_stride * 2] + a1);
+ dest[dest_stride * 1] = clip_pixel(dest[dest_stride * 1] + e1);
+ dest[dest_stride * 2] = clip_pixel(dest[dest_stride * 2] + e1);
dest[dest_stride * 3] = clip_pixel(dest[dest_stride * 3] + e1);
ip++;
dest++;
diff --git a/vp9/common/vp9_idct.h b/vp9/common/vp9_idct.h
index 176bf5da4..af35432c4 100644
--- a/vp9/common/vp9_idct.h
+++ b/vp9/common/vp9_idct.h
@@ -17,6 +17,7 @@
#include "vpx/vpx_integer.h"
#include "vp9/common/vp9_common.h"
+
// Constants and Macros used by all idct/dct functions
#define DCT_CONST_BITS 14
#define DCT_CONST_ROUNDING (1 << (DCT_CONST_BITS - 1))
diff --git a/vp9/common/vp9_loopfilter.c b/vp9/common/vp9_loopfilter.c
index ab1499485..eb7d01c89 100644
--- a/vp9/common/vp9_loopfilter.c
+++ b/vp9/common/vp9_loopfilter.c
@@ -26,12 +26,10 @@ static void lf_init_lut(loop_filter_info_n *lfi) {
lfi->mode_lf_lut[V_PRED] = 1;
lfi->mode_lf_lut[H_PRED] = 1;
lfi->mode_lf_lut[TM_PRED] = 1;
- lfi->mode_lf_lut[I4X4_PRED] = 0;
lfi->mode_lf_lut[ZEROMV] = 1;
lfi->mode_lf_lut[NEARESTMV] = 2;
lfi->mode_lf_lut[NEARMV] = 2;
lfi->mode_lf_lut[NEWMV] = 2;
- lfi->mode_lf_lut[SPLITMV] = 3;
}
void vp9_loop_filter_update_sharpness(loop_filter_info_n *lfi,
diff --git a/vp9/common/vp9_modecont.c b/vp9/common/vp9_modecont.c
index 86a8fb850..973bb068c 100644
--- a/vp9/common/vp9_modecont.c
+++ b/vp9/common/vp9_modecont.c
@@ -11,12 +11,12 @@
#include "vp9/common/vp9_entropy.h"
-const int vp9_default_mode_contexts[INTER_MODE_CONTEXTS][4] = {
- {2, 173, 34, 229}, // 0 = both zero mv
- {7, 145, 85, 225}, // 1 = one zero mv + one a predicted mv
- {7, 166, 63, 231}, // 2 = two predicted mvs
- {7, 94, 66, 219}, // 3 = one predicted/zero and one new mv
- {8, 64, 46, 213}, // 4 = two new mvs
- {17, 81, 31, 231}, // 5 = one intra neighbour + x
- {25, 29, 30, 246}, // 6 = two intra neighbours
+const int vp9_default_mode_contexts[INTER_MODE_CONTEXTS][VP9_MVREFS - 1] = {
+ {2, 173, 34}, // 0 = both zero mv
+ {7, 145, 85}, // 1 = one zero mv + one a predicted mv
+ {7, 166, 63}, // 2 = two predicted mvs
+ {7, 94, 66}, // 3 = one predicted/zero and one new mv
+ {8, 64, 46}, // 4 = two new mvs
+ {17, 81, 31}, // 5 = one intra neighbour + x
+ {25, 29, 30}, // 6 = two intra neighbours
};
diff --git a/vp9/common/vp9_modecont.h b/vp9/common/vp9_modecont.h
index 24f1a6cb3..a6c489325 100644
--- a/vp9/common/vp9_modecont.h
+++ b/vp9/common/vp9_modecont.h
@@ -11,6 +11,8 @@
#ifndef VP9_COMMON_VP9_MODECONT_H_
#define VP9_COMMON_VP9_MODECONT_H_
-extern const int vp9_default_mode_contexts[INTER_MODE_CONTEXTS][4];
+#include "vp9/common/vp9_entropy.h"
+
+extern const int vp9_default_mode_contexts[INTER_MODE_CONTEXTS][VP9_MVREFS - 1];
#endif // VP9_COMMON_VP9_MODECONT_H_
diff --git a/vp9/common/vp9_modecontext.c b/vp9/common/vp9_modecontext.c
index 5f084eadc..39d53f57b 100644
--- a/vp9/common/vp9_modecontext.c
+++ b/vp9/common/vp9_modecontext.c
@@ -11,9 +11,9 @@
#include "vp9/common/vp9_entropymode.h"
-const vp9_prob vp9_kf_default_bmode_probs[VP9_BINTRAMODES]
- [VP9_BINTRAMODES]
- [VP9_BINTRAMODES-1] = {
+const vp9_prob vp9_kf_default_bmode_probs[VP9_INTRA_MODES]
+ [VP9_INTRA_MODES]
+ [VP9_INTRA_MODES-1] = {
{ // Above 0
{ 231, 9, 124, 138, 96, 200, 76, 42, 88, }, // left 0
{ 152, 11, 187, 112, 170, 139, 130, 91, 113, }, // left 1
diff --git a/vp9/common/vp9_mvref_common.c b/vp9/common/vp9_mvref_common.c
index 68937807a..2a0e0e134 100644
--- a/vp9/common/vp9_mvref_common.c
+++ b/vp9/common/vp9_mvref_common.c
@@ -220,7 +220,8 @@ void vp9_find_mv_refs_idx(VP9_COMMON *cm, MACROBLOCKD *xd, MODE_INFO *here,
add_candidate_mv(mv_ref_list, candidate_scores,
&refmv_count, c_refmv, 16);
}
- split_count += (candidate_mi->mbmi.mode == SPLITMV);
+ split_count += (candidate_mi->mbmi.sb_type < BLOCK_SIZE_SB8X8 &&
+ candidate_mi->mbmi.ref_frame != INTRA_FRAME);
// Count number of neihgbours coded intra and zeromv
intra_count += (candidate_mi->mbmi.mode < NEARESTMV);
diff --git a/vp9/common/vp9_onyxc_int.h b/vp9/common/vp9_onyxc_int.h
index 07d82b37f..db7e1e871 100644
--- a/vp9/common/vp9_onyxc_int.h
+++ b/vp9/common/vp9_onyxc_int.h
@@ -24,10 +24,6 @@
#include "vp9/common/vp9_postproc.h"
#endif
-/*#ifdef PACKET_TESTING*/
-#include "vp9/common/vp9_header.h"
-/*#endif*/
-
/* Create/destroy static data structures. */
void vp9_initialize_common(void);
@@ -51,23 +47,19 @@ void vp9_initialize_common(void);
#define MAX_LAG_BUFFERS 25
typedef struct frame_contexts {
- vp9_prob bmode_prob[VP9_BINTRAMODES - 1];
- vp9_prob ymode_prob[VP9_YMODES - 1]; /* interframe intra mode probs */
- vp9_prob sb_ymode_prob[VP9_I32X32_MODES - 1];
- vp9_prob uv_mode_prob[VP9_YMODES][VP9_UV_MODES - 1];
+ vp9_prob y_mode_prob[VP9_INTRA_MODES - 1]; /* interframe intra mode probs */
+ vp9_prob uv_mode_prob[VP9_INTRA_MODES][VP9_INTRA_MODES - 1];
vp9_prob partition_prob[NUM_PARTITION_CONTEXTS][PARTITION_TYPES - 1];
nmv_context nmvc;
nmv_context pre_nmvc;
- vp9_prob pre_bmode_prob[VP9_BINTRAMODES - 1];
- vp9_prob pre_ymode_prob[VP9_YMODES - 1]; /* interframe intra mode probs */
- vp9_prob pre_sb_ymode_prob[VP9_I32X32_MODES - 1];
- vp9_prob pre_uv_mode_prob[VP9_YMODES][VP9_UV_MODES - 1];
+ /* interframe intra mode probs */
+ vp9_prob pre_y_mode_prob[VP9_INTRA_MODES - 1];
+ vp9_prob pre_uv_mode_prob[VP9_INTRA_MODES][VP9_INTRA_MODES - 1];
vp9_prob pre_partition_prob[NUM_PARTITION_CONTEXTS][PARTITION_TYPES - 1];
- unsigned int bmode_counts[VP9_BINTRAMODES];
- unsigned int ymode_counts[VP9_YMODES]; /* interframe intra mode probs */
- unsigned int sb_ymode_counts[VP9_I32X32_MODES];
- unsigned int uv_mode_counts[VP9_YMODES][VP9_UV_MODES];
+ /* interframe intra mode probs */
+ unsigned int y_mode_counts[VP9_INTRA_MODES];
+ unsigned int uv_mode_counts[VP9_INTRA_MODES][VP9_INTRA_MODES];
unsigned int partition_counts[NUM_PARTITION_CONTEXTS][PARTITION_TYPES];
vp9_coeff_probs_model coef_probs_4x4[BLOCK_TYPES];
@@ -89,8 +81,8 @@ typedef struct frame_contexts {
vp9_prob switchable_interp_prob[VP9_SWITCHABLE_FILTERS + 1]
[VP9_SWITCHABLE_FILTERS - 1];
- int vp9_mode_contexts[INTER_MODE_CONTEXTS][4];
- unsigned int mv_ref_ct[INTER_MODE_CONTEXTS][4][2];
+ int vp9_mode_contexts[INTER_MODE_CONTEXTS][VP9_MVREFS - 1];
+ unsigned int mv_ref_ct[INTER_MODE_CONTEXTS][VP9_MVREFS - 1][2];
} FRAME_CONTEXT;
typedef enum {
@@ -158,6 +150,12 @@ typedef struct VP9Common {
int show_frame;
int last_show_frame;
+ // Flag signaling that the frame is encoded using only INTRA modes.
+ int intra_only;
+
+ // Flag signaling that the frame context should be reset to default values.
+ int reset_frame_context;
+
int frame_flags;
// MBs, mb_rows/cols is in 16-pixel units; mi_rows/cols is in
// MODE_INFO (8-pixel) units.
@@ -221,14 +219,10 @@ typedef struct VP9Common {
/* keyframe block modes are predicted by their above, left neighbors */
- vp9_prob kf_bmode_prob[VP9_BINTRAMODES]
- [VP9_BINTRAMODES]
- [VP9_BINTRAMODES - 1];
- vp9_prob kf_ymode_prob[8][VP9_YMODES - 1]; /* keyframe "" */
- vp9_prob sb_kf_ymode_prob[8][VP9_I32X32_MODES - 1];
- int kf_ymode_probs_index;
- int kf_ymode_probs_update;
- vp9_prob kf_uv_mode_prob[VP9_YMODES] [VP9_UV_MODES - 1];
+ vp9_prob kf_y_mode_prob[VP9_INTRA_MODES]
+ [VP9_INTRA_MODES]
+ [VP9_INTRA_MODES - 1];
+ vp9_prob kf_uv_mode_prob[VP9_INTRA_MODES] [VP9_INTRA_MODES - 1];
vp9_prob prob_intra_coded;
vp9_prob prob_last_coded;
diff --git a/vp9/common/vp9_postproc.c b/vp9/common/vp9_postproc.c
index 3aae669bf..4282ddd1c 100644
--- a/vp9/common/vp9_postproc.c
+++ b/vp9/common/vp9_postproc.c
@@ -53,7 +53,7 @@ static const unsigned char MB_PREDICTION_MODE_colors[MB_MODE_COUNT][3] = {
{ RGB_TO_YUV(0xCC33FF) }, /* Magenta */
};
-static const unsigned char B_PREDICTION_MODE_colors[INTRA_MODE_COUNT][3] = {
+static const unsigned char B_PREDICTION_MODE_colors[VP9_INTRA_MODES][3] = {
{ RGB_TO_YUV(0x6633ff) }, /* Purple */
{ RGB_TO_YUV(0xcc33ff) }, /* Magenta */
{ RGB_TO_YUV(0xff33cc) }, /* Pink */
diff --git a/vp9/common/vp9_reconinter.c b/vp9/common/vp9_reconinter.c
index cd0ba8445..2bc9c10a2 100644
--- a/vp9/common/vp9_reconinter.c
+++ b/vp9/common/vp9_reconinter.c
@@ -392,8 +392,10 @@ static void build_inter_predictors(int plane, int block,
assert(x < bw);
assert(y < bh);
- assert(xd->mode_info_context->mbmi.mode == SPLITMV || 4 << pred_w == bw);
- assert(xd->mode_info_context->mbmi.mode == SPLITMV || 4 << pred_h == bh);
+ assert(xd->mode_info_context->mbmi.sb_type < BLOCK_SIZE_SB8X8 ||
+ 4 << pred_w == bw);
+ assert(xd->mode_info_context->mbmi.sb_type < BLOCK_SIZE_SB8X8 ||
+ 4 << pred_h == bh);
for (which_mv = 0; which_mv < 1 + use_second_ref; ++which_mv) {
// source
@@ -412,7 +414,7 @@ static void build_inter_predictors(int plane, int block,
MV split_chroma_mv;
int_mv clamped_mv;
- if (xd->mode_info_context->mbmi.mode == SPLITMV) {
+ if (xd->mode_info_context->mbmi.sb_type < BLOCK_SIZE_SB8X8) {
if (plane == 0) {
mv = &xd->mode_info_context->bmi[block].as_mv[which_mv].as_mv;
} else {
diff --git a/vp9/common/vp9_reconintra.c b/vp9/common/vp9_reconintra.c
index dd60a76c7..b1f327b43 100644
--- a/vp9/common/vp9_reconintra.c
+++ b/vp9/common/vp9_reconintra.c
@@ -402,7 +402,7 @@ void vp9_predict_intra_block(MACROBLOCKD *xd,
(block_idx >> bwl) || xd->up_available;
const int have_left =
(block_idx & wmask) || xd->left_available;
- const int have_right = ((block_idx & wmask) != wmask);
+ int have_right = ((block_idx & wmask) != wmask);
const int txfm_block_size = 4 << tx_size;
assert(bwl >= 0);
diff --git a/vp9/common/vp9_rtcd_defs.sh b/vp9/common/vp9_rtcd_defs.sh
index bb491695d..7b834d7a0 100644
--- a/vp9/common/vp9_rtcd_defs.sh
+++ b/vp9/common/vp9_rtcd_defs.sh
@@ -208,7 +208,6 @@ specialize vp9_short_iht16x16_add
prototype void vp9_idct4_1d "int16_t *input, int16_t *output"
specialize vp9_idct4_1d sse2
-
# dct and add
prototype void vp9_dc_only_idct_add "int input_dc, uint8_t *pred_ptr, uint8_t *dst_ptr, int pitch, int stride"
@@ -265,6 +264,10 @@ specialize vp9_variance8x16 mmx sse2
prototype unsigned int vp9_variance8x8 "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse"
specialize vp9_variance8x8 mmx sse2
+prototype void vp9_get_sse_sum_8x8 "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse, int *sum"
+specialize vp9_get_sse_sum_8x8 sse2
+vp9_get_sse_sum_8x8_sse2=vp9_get8x8var_sse2
+
prototype unsigned int vp9_variance8x4 "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse"
specialize vp9_variance8x4 sse2
diff --git a/vp9/common/vp9_seg_common.c b/vp9/common/vp9_seg_common.c
index e5511a123..67dfeaed6 100644
--- a/vp9/common/vp9_seg_common.c
+++ b/vp9/common/vp9_seg_common.c
@@ -86,85 +86,6 @@ int vp9_check_segref(const MACROBLOCKD *xd, int segment_id,
}
-#if CONFIG_IMPLICIT_SEGMENTATION
-// This function defines an implicit segmentation for the next frame based
-// on predcition and transform decisions in the current frame.
-// For test purposes at the moment it uses ref frame and prediction size
-void vp9_implicit_segment_map_update(VP9_COMMON * cm) {
- int row, col;
- MODE_INFO *mi, *mi_ptr = cm->mi;
- unsigned char * map_ptr = cm->last_frame_seg_map;
-
- for (row = 0; row < cm->mb_rows; row++) {
- mi = mi_ptr;
-
- for (col = 0; col < cm->mb_cols; ++col, ++mi) {
- // Inter prediction
- if (mi->mbmi.ref_frame != INTRA_FRAME) {
- // Zero motion and prediction block size >= 16
- if ((mi->mbmi.sb_type >= BLOCK_SIZE_MB16X16) &&
- (mi->mbmi.mv[0].as_int == 0))
- map_ptr[col] = 1;
- else if (mi->mbmi.sb_type >= BLOCK_SIZE_SB32X32)
- map_ptr[col] = 2;
- else if (mi->mbmi.sb_type >= BLOCK_SIZE_MB16X16)
- map_ptr[col] = 3;
- else
- map_ptr[col] = 6;
-
- // Intra prediction
- } else {
- if (mi->mbmi.sb_type >= BLOCK_SIZE_SB32X32)
- map_ptr[col] = 4;
- else if (mi->mbmi.sb_type >= BLOCK_SIZE_MB16X16)
- map_ptr[col] = 5;
- else
- map_ptr[col] = 7;
- }
- }
- mi_ptr += cm->mode_info_stride;
- map_ptr += cm->mb_cols;
- }
-}
-
-// This function defines an implicit segmentation for the next frame based
-// on predcition and transform decisions in the current frame.
-// For test purposes at the moment only TX size is used.
-void vp9_implicit_segment_map_update_tx(VP9_COMMON * cm) {
- int row, col;
- MODE_INFO *mi, *mi_ptr = cm->mi;
- unsigned char * map_ptr = cm->last_frame_seg_map;
-
- for (row = 0; row < cm->mb_rows; row++) {
- mi = mi_ptr;
- for (col = 0; col < cm->mb_cols; ++col, ++mi) {
- // Intra modes
- if (mi->mbmi.ref_frame == INTRA_FRAME) {
- if (mi->mbmi.txfm_size == TX_4X4)
- map_ptr[col] = 7;
- else if (mi->mbmi.txfm_size <= TX_16X16)
- map_ptr[col] = 5;
- else
- map_ptr[col] = 4;
- } else {
- // Inter Modes
- if (mi->mbmi.txfm_size == TX_4X4)
- map_ptr[col] = 6;
- else if (mi->mbmi.txfm_size == TX_8X8)
- map_ptr[col] = 3;
- else if (mi->mbmi.txfm_size == TX_16X16)
- map_ptr[col] = 2;
- else
- map_ptr[col] = 1;
- }
- }
- mi_ptr += cm->mode_info_stride;
- map_ptr += cm->mb_cols;
- }
-}
-#endif
-
-
const vp9_tree_index vp9_segment_tree[14] = {
2, 4, 6, 8, 10, 12,
0, -1, -2, -3, -4, -5, -6, -7
diff --git a/vp9/common/vp9_seg_common.h b/vp9/common/vp9_seg_common.h
index 53d22a385..c424a57f4 100644
--- a/vp9/common/vp9_seg_common.h
+++ b/vp9/common/vp9_seg_common.h
@@ -55,11 +55,6 @@ int vp9_check_segref(const MACROBLOCKD *xd,
int segment_id,
MV_REFERENCE_FRAME ref_frame);
-#if CONFIG_IMPLICIT_SEGMENTATION
-void vp9_implicit_segment_map_update(VP9_COMMON * cm);
-#endif
-
-
extern const vp9_tree_index vp9_segment_tree[14];
#endif // VP9_COMMON_VP9_SEG_COMMON_H_
diff --git a/vp9/decoder/vp9_decodemv.c b/vp9/decoder/vp9_decodemv.c
index 2ae807f04..efd852b7a 100644
--- a/vp9/decoder/vp9_decodemv.c
+++ b/vp9/decoder/vp9_decodemv.c
@@ -35,35 +35,11 @@ int dec_mvcount = 0;
extern int dec_debug;
#endif
-static MB_PREDICTION_MODE read_bmode(vp9_reader *r, const vp9_prob *p) {
- MB_PREDICTION_MODE m = treed_read(r, vp9_bmode_tree, p);
+static MB_PREDICTION_MODE read_intra_mode(vp9_reader *r, const vp9_prob *p) {
+ MB_PREDICTION_MODE m = treed_read(r, vp9_intra_mode_tree, p);
return m;
}
-static MB_PREDICTION_MODE read_kf_bmode(vp9_reader *r, const vp9_prob *p) {
- return (MB_PREDICTION_MODE)treed_read(r, vp9_bmode_tree, p);
-}
-
-static MB_PREDICTION_MODE read_ymode(vp9_reader *r, const vp9_prob *p) {
- return (MB_PREDICTION_MODE)treed_read(r, vp9_ymode_tree, p);
-}
-
-static MB_PREDICTION_MODE read_sb_ymode(vp9_reader *r, const vp9_prob *p) {
- return (MB_PREDICTION_MODE)treed_read(r, vp9_sb_ymode_tree, p);
-}
-
-static MB_PREDICTION_MODE read_kf_sb_ymode(vp9_reader *r, const vp9_prob *p) {
- return (MB_PREDICTION_MODE)treed_read(r, vp9_uv_mode_tree, p);
-}
-
-static MB_PREDICTION_MODE read_kf_mb_ymode(vp9_reader *r, const vp9_prob *p) {
- return (MB_PREDICTION_MODE)treed_read(r, vp9_kf_ymode_tree, p);
-}
-
-static MB_PREDICTION_MODE read_uv_mode(vp9_reader *r, const vp9_prob *p) {
- return (MB_PREDICTION_MODE)treed_read(r, vp9_uv_mode_tree, p);
-}
-
static int read_mb_segid(vp9_reader *r, MACROBLOCKD *xd) {
return treed_read(r, vp9_segment_tree, xd->mb_segment_tree_probs);
}
@@ -118,19 +94,32 @@ static void kfread_modes(VP9D_COMP *pbi, MODE_INFO *m,
if (!m->mbmi.mb_skip_coeff)
m->mbmi.mb_skip_coeff = vp9_read(r, vp9_get_pred_prob(cm, xd, PRED_MBSKIP));
+ if (cm->txfm_mode == TX_MODE_SELECT &&
+ m->mbmi.sb_type >= BLOCK_SIZE_SB8X8) {
+ const int allow_16x16 = m->mbmi.sb_type >= BLOCK_SIZE_MB16X16;
+ const int allow_32x32 = m->mbmi.sb_type >= BLOCK_SIZE_SB32X32;
+ m->mbmi.txfm_size = select_txfm_size(cm, r, allow_16x16, allow_32x32);
+ } else if (cm->txfm_mode >= ALLOW_32X32 &&
+ m->mbmi.sb_type >= BLOCK_SIZE_SB32X32) {
+ m->mbmi.txfm_size = TX_32X32;
+ } else if (cm->txfm_mode >= ALLOW_16X16 &&
+ m->mbmi.sb_type >= BLOCK_SIZE_MB16X16) {
+ m->mbmi.txfm_size = TX_16X16;
+ } else if (cm->txfm_mode >= ALLOW_8X8 &&
+ m->mbmi.sb_type >= BLOCK_SIZE_SB8X8) {
+ m->mbmi.txfm_size = TX_8X8;
+ } else {
+ m->mbmi.txfm_size = TX_4X4;
+ }
+
// luma mode
+ m->mbmi.ref_frame = INTRA_FRAME;
if (m->mbmi.sb_type >= BLOCK_SIZE_SB8X8) {
const MB_PREDICTION_MODE A = above_block_mode(m, 0, mis);
const MB_PREDICTION_MODE L = xd->left_available ?
left_block_mode(m, 0) : DC_PRED;
- m->mbmi.mode = read_kf_bmode(r, cm->kf_bmode_prob[A][L]);
+ m->mbmi.mode = read_intra_mode(r, cm->kf_y_mode_prob[A][L]);
} else {
- m->mbmi.mode = I4X4_PRED;
- }
-
- m->mbmi.ref_frame = INTRA_FRAME;
-
- if (m->mbmi.sb_type < BLOCK_SIZE_SB8X8) {
int idx, idy;
int bw = 1 << b_width_log2(m->mbmi.sb_type);
int bh = 1 << b_height_log2(m->mbmi.sb_type);
@@ -143,35 +132,17 @@ static void kfread_modes(VP9D_COMP *pbi, MODE_INFO *m,
const MB_PREDICTION_MODE L = (xd->left_available || idx) ?
left_block_mode(m, ib) : DC_PRED;
m->bmi[ib].as_mode.first =
- read_kf_bmode(r, cm->kf_bmode_prob[A][L]);
+ read_intra_mode(r, cm->kf_y_mode_prob[A][L]);
for (k = 1; k < bh; ++k)
m->bmi[ib + k * 2].as_mode.first = m->bmi[ib].as_mode.first;
for (k = 1; k < bw; ++k)
m->bmi[ib + k].as_mode.first = m->bmi[ib].as_mode.first;
}
}
+ m->mbmi.mode = m->bmi[3].as_mode.first;
}
- m->mbmi.uv_mode = read_uv_mode(r, cm->kf_uv_mode_prob[m->mbmi.mode]);
-
- if (cm->txfm_mode == TX_MODE_SELECT &&
- m->mbmi.sb_type >= BLOCK_SIZE_SB8X8) {
- const int allow_16x16 = m->mbmi.sb_type >= BLOCK_SIZE_MB16X16;
- const int allow_32x32 = m->mbmi.sb_type >= BLOCK_SIZE_SB32X32;
- m->mbmi.txfm_size = select_txfm_size(cm, r, allow_16x16, allow_32x32);
- } else if (cm->txfm_mode >= ALLOW_32X32 &&
- m->mbmi.sb_type >= BLOCK_SIZE_SB32X32) {
- m->mbmi.txfm_size = TX_32X32;
- } else if (cm->txfm_mode >= ALLOW_16X16 &&
- m->mbmi.sb_type >= BLOCK_SIZE_MB16X16 &&
- m->mbmi.mode <= TM_PRED) {
- m->mbmi.txfm_size = TX_16X16;
- } else if (cm->txfm_mode >= ALLOW_8X8 &&
- m->mbmi.sb_type >= BLOCK_SIZE_SB8X8) {
- m->mbmi.txfm_size = TX_8X8;
- } else {
- m->mbmi.txfm_size = TX_4X4;
- }
+ m->mbmi.uv_mode = read_intra_mode(r, cm->kf_uv_mode_prob[m->mbmi.mode]);
}
static int read_mv_component(vp9_reader *r,
@@ -363,10 +334,6 @@ static MB_PREDICTION_MODE read_sb_mv_ref(vp9_reader *r, const vp9_prob *p) {
return (MB_PREDICTION_MODE) treed_read(r, vp9_sb_mv_ref_tree, p);
}
-static MB_PREDICTION_MODE read_mv_ref(vp9_reader *r, const vp9_prob *p) {
- return (MB_PREDICTION_MODE) treed_read(r, vp9_mv_ref_tree, p);
-}
-
#ifdef VPX_MODE_COUNT
unsigned int vp9_mv_cont_count[5][4] = {
{ 0, 0, 0, 0 },
@@ -395,10 +362,7 @@ static INLINE COMPPREDMODE_TYPE read_comp_pred_mode(vp9_reader *r) {
static void mb_mode_mv_init(VP9D_COMP *pbi, vp9_reader *r) {
VP9_COMMON *const cm = &pbi->common;
- if (cm->frame_type == KEY_FRAME) {
- if (!cm->kf_ymode_probs_update)
- cm->kf_ymode_probs_index = vp9_read_literal(r, 3);
- } else {
+ if (cm->frame_type != KEY_FRAME) {
nmv_context *const nmvc = &pbi->common.fc.nmvc;
MACROBLOCKD *const xd = &pbi->mb;
int i, j;
@@ -420,15 +384,10 @@ static void mb_mode_mv_init(VP9D_COMP *pbi, vp9_reader *r) {
for (i = 0; i < COMP_PRED_CONTEXTS; i++)
cm->prob_comppred[i] = vp9_read_prob(r);
- // VP9_YMODES
+ // VP9_INTRA_MODES
if (vp9_read_bit(r))
- for (i = 0; i < VP9_YMODES - 1; ++i)
- cm->fc.ymode_prob[i] = vp9_read_prob(r);
-
- // VP9_I32X32_MODES
- if (vp9_read_bit(r))
- for (i = 0; i < VP9_I32X32_MODES - 1; ++i)
- cm->fc.sb_ymode_prob[i] = vp9_read_prob(r);
+ for (i = 0; i < VP9_INTRA_MODES - 1; ++i)
+ cm->fc.y_mode_prob[i] = vp9_read_prob(r);
for (j = 0; j < NUM_PARTITION_CONTEXTS; ++j)
if (vp9_read_bit(r))
@@ -564,6 +523,24 @@ static void read_mb_modes_mv(VP9D_COMP *pbi, MODE_INFO *mi, MB_MODE_INFO *mbmi,
// Read the reference frame
mbmi->ref_frame = read_ref_frame(pbi, r, mbmi->segment_id);
+ if (cm->txfm_mode == TX_MODE_SELECT &&
+ (mbmi->mb_skip_coeff == 0 || mbmi->ref_frame == INTRA_FRAME) &&
+ bsize >= BLOCK_SIZE_SB8X8) {
+ const int allow_16x16 = bsize >= BLOCK_SIZE_MB16X16;
+ const int allow_32x32 = bsize >= BLOCK_SIZE_SB32X32;
+ mbmi->txfm_size = select_txfm_size(cm, r, allow_16x16, allow_32x32);
+ } else if (bsize >= BLOCK_SIZE_SB32X32 &&
+ cm->txfm_mode >= ALLOW_32X32) {
+ mbmi->txfm_size = TX_32X32;
+ } else if (cm->txfm_mode >= ALLOW_16X16 &&
+ bsize >= BLOCK_SIZE_MB16X16) {
+ mbmi->txfm_size = TX_16X16;
+ } else if (cm->txfm_mode >= ALLOW_8X8 && (bsize >= BLOCK_SIZE_SB8X8)) {
+ mbmi->txfm_size = TX_8X8;
+ } else {
+ mbmi->txfm_size = TX_4X4;
+ }
+
// If reference frame is an Inter frame
if (mbmi->ref_frame) {
int_mv nearest, nearby, best_mv;
@@ -585,15 +562,12 @@ static void read_mb_modes_mv(VP9D_COMP *pbi, MODE_INFO *mi, MB_MODE_INFO *mbmi,
// If the segment level skip mode enabled
if (vp9_segfeature_active(xd, mbmi->segment_id, SEG_LVL_SKIP)) {
mbmi->mode = ZEROMV;
- } else {
- if (bsize >= BLOCK_SIZE_SB8X8)
- mbmi->mode = read_sb_mv_ref(r, mv_ref_p);
- else
- mbmi->mode = SPLITMV;
+ } else if (bsize >= BLOCK_SIZE_SB8X8) {
+ mbmi->mode = read_sb_mv_ref(r, mv_ref_p);
vp9_accum_mv_refs(cm, mbmi->mode, mbmi->mb_mode_context[ref_frame]);
}
- if (mbmi->mode != ZEROMV) {
+ if (bsize < BLOCK_SIZE_SB8X8 || mbmi->mode != ZEROMV) {
vp9_find_best_ref_mvs(xd,
mbmi->ref_mvs[ref_frame],
&nearest, &nearby);
@@ -633,7 +607,7 @@ static void read_mb_modes_mv(VP9D_COMP *pbi, MODE_INFO *mi, MB_MODE_INFO *mbmi,
second_ref_frame, mbmi->ref_mvs[second_ref_frame],
cm->ref_frame_sign_bias);
- if (mbmi->mode != ZEROMV) {
+ if (bsize < BLOCK_SIZE_SB8X8 || mbmi->mode != ZEROMV) {
vp9_find_best_ref_mvs(xd,
mbmi->ref_mvs[second_ref_frame],
&nearest_second,
@@ -641,192 +615,170 @@ static void read_mb_modes_mv(VP9D_COMP *pbi, MODE_INFO *mi, MB_MODE_INFO *mbmi,
best_mv_second.as_int = mbmi->ref_mvs[second_ref_frame][0].as_int;
}
}
-
}
mbmi->uv_mode = DC_PRED;
- switch (mbmi->mode) {
- case SPLITMV:
- mbmi->need_to_clamp_mvs = 0;
- for (idy = 0; idy < 2; idy += bh) {
- for (idx = 0; idx < 2; idx += bw) {
- int_mv blockmv, secondmv;
- int blockmode;
- int i;
- j = idy * 2 + idx;
-
- blockmode = read_sb_mv_ref(r, mv_ref_p);
- vp9_accum_mv_refs(cm, blockmode, mbmi->mb_mode_context[ref_frame]);
- if (blockmode == NEARESTMV || blockmode == NEARMV) {
- MV_REFERENCE_FRAME rf2 = mbmi->second_ref_frame;
- vp9_append_sub8x8_mvs_for_idx(cm, xd, &nearest, &nearby, j, 0);
- if (rf2 > 0) {
- vp9_append_sub8x8_mvs_for_idx(cm, xd, &nearest_second,
- &nearby_second, j, 1);
- }
+ if (mbmi->sb_type < BLOCK_SIZE_SB8X8) {
+ mbmi->need_to_clamp_mvs = 0;
+ for (idy = 0; idy < 2; idy += bh) {
+ for (idx = 0; idx < 2; idx += bw) {
+ int_mv blockmv, secondmv;
+ int blockmode;
+ int i;
+ j = idy * 2 + idx;
+
+ blockmode = read_sb_mv_ref(r, mv_ref_p);
+ vp9_accum_mv_refs(cm, blockmode, mbmi->mb_mode_context[ref_frame]);
+ if (blockmode == NEARESTMV || blockmode == NEARMV) {
+ MV_REFERENCE_FRAME rf2 = mbmi->second_ref_frame;
+ vp9_append_sub8x8_mvs_for_idx(cm, xd, &nearest, &nearby, j, 0);
+ if (rf2 > 0) {
+ vp9_append_sub8x8_mvs_for_idx(cm, xd, &nearest_second,
+ &nearby_second, j, 1);
}
+ }
- switch (blockmode) {
- case NEWMV:
- decode_mv(r, &blockmv.as_mv, &best_mv.as_mv, nmvc,
- &cm->fc.NMVcount, xd->allow_high_precision_mv);
-
- if (mbmi->second_ref_frame > 0)
- decode_mv(r, &secondmv.as_mv, &best_mv_second.as_mv, nmvc,
- &cm->fc.NMVcount, xd->allow_high_precision_mv);
-
- #ifdef VPX_MODE_COUNT
- vp9_mv_cont_count[mv_contz][3]++;
- #endif
- break;
- case NEARESTMV:
- blockmv.as_int = nearest.as_int;
- if (mbmi->second_ref_frame > 0)
- secondmv.as_int = nearest_second.as_int;
- #ifdef VPX_MODE_COUNT
- vp9_mv_cont_count[mv_contz][0]++;
- #endif
- break;
- case NEARMV:
- blockmv.as_int = nearby.as_int;
- if (mbmi->second_ref_frame > 0)
- secondmv.as_int = nearby_second.as_int;
- #ifdef VPX_MODE_COUNT
- vp9_mv_cont_count[mv_contz][1]++;
- #endif
- break;
- case ZEROMV:
- blockmv.as_int = 0;
- if (mbmi->second_ref_frame > 0)
- secondmv.as_int = 0;
- #ifdef VPX_MODE_COUNT
- vp9_mv_cont_count[mv_contz][2]++;
- #endif
- break;
- default:
- break;
- }
- mi->bmi[j].as_mv[0].as_int = blockmv.as_int;
- if (mbmi->second_ref_frame > 0)
- mi->bmi[j].as_mv[1].as_int = secondmv.as_int;
-
- for (i = 1; i < bh; ++i)
- vpx_memcpy(&mi->bmi[j + i * 2], &mi->bmi[j], sizeof(mi->bmi[j]));
- for (i = 1; i < bw; ++i)
- vpx_memcpy(&mi->bmi[j + i], &mi->bmi[j], sizeof(mi->bmi[j]));
+ switch (blockmode) {
+ case NEWMV:
+ decode_mv(r, &blockmv.as_mv, &best_mv.as_mv, nmvc,
+ &cm->fc.NMVcount, xd->allow_high_precision_mv);
+
+ if (mbmi->second_ref_frame > 0)
+ decode_mv(r, &secondmv.as_mv, &best_mv_second.as_mv, nmvc,
+ &cm->fc.NMVcount, xd->allow_high_precision_mv);
+
+#ifdef VPX_MODE_COUNT
+ vp9_mv_cont_count[mv_contz][3]++;
+#endif
+ break;
+ case NEARESTMV:
+ blockmv.as_int = nearest.as_int;
+ if (mbmi->second_ref_frame > 0)
+ secondmv.as_int = nearest_second.as_int;
+#ifdef VPX_MODE_COUNT
+ vp9_mv_cont_count[mv_contz][0]++;
+#endif
+ break;
+ case NEARMV:
+ blockmv.as_int = nearby.as_int;
+ if (mbmi->second_ref_frame > 0)
+ secondmv.as_int = nearby_second.as_int;
+#ifdef VPX_MODE_COUNT
+ vp9_mv_cont_count[mv_contz][1]++;
+#endif
+ break;
+ case ZEROMV:
+ blockmv.as_int = 0;
+ if (mbmi->second_ref_frame > 0)
+ secondmv.as_int = 0;
+#ifdef VPX_MODE_COUNT
+ vp9_mv_cont_count[mv_contz][2]++;
+#endif
+ break;
+ default:
+ break;
}
+ mi->bmi[j].as_mv[0].as_int = blockmv.as_int;
+ if (mbmi->second_ref_frame > 0)
+ mi->bmi[j].as_mv[1].as_int = secondmv.as_int;
+
+ for (i = 1; i < bh; ++i)
+ vpx_memcpy(&mi->bmi[j + i * 2], &mi->bmi[j], sizeof(mi->bmi[j]));
+ for (i = 1; i < bw; ++i)
+ vpx_memcpy(&mi->bmi[j + i], &mi->bmi[j], sizeof(mi->bmi[j]));
+ mi->mbmi.mode = blockmode;
}
+ }
- mv0->as_int = mi->bmi[3].as_mv[0].as_int;
- mv1->as_int = mi->bmi[3].as_mv[1].as_int;
- break; /* done with SPLITMV */
-
- case NEARMV:
- // Clip "next_nearest" so that it does not extend to far out of image
- assign_and_clamp_mv(mv0, &nearby, mb_to_left_edge,
- mb_to_right_edge,
- mb_to_top_edge,
- mb_to_bottom_edge);
- if (mbmi->second_ref_frame > 0)
- assign_and_clamp_mv(mv1, &nearby_second, mb_to_left_edge,
- mb_to_right_edge,
- mb_to_top_edge,
- mb_to_bottom_edge);
- break;
-
- case NEARESTMV:
- // Clip "next_nearest" so that it does not extend to far out of image
- assign_and_clamp_mv(mv0, &nearest, mb_to_left_edge,
- mb_to_right_edge,
- mb_to_top_edge,
- mb_to_bottom_edge);
- if (mbmi->second_ref_frame > 0)
- assign_and_clamp_mv(mv1, &nearest_second, mb_to_left_edge,
+ mv0->as_int = mi->bmi[3].as_mv[0].as_int;
+ mv1->as_int = mi->bmi[3].as_mv[1].as_int;
+ } else {
+ switch (mbmi->mode) {
+ case NEARMV:
+ // Clip "next_nearest" so that it does not extend to far out of image
+ assign_and_clamp_mv(mv0, &nearby, mb_to_left_edge,
+ mb_to_right_edge,
+ mb_to_top_edge,
+ mb_to_bottom_edge);
+ if (mbmi->second_ref_frame > 0)
+ assign_and_clamp_mv(mv1, &nearby_second, mb_to_left_edge,
+ mb_to_right_edge,
+ mb_to_top_edge,
+ mb_to_bottom_edge);
+ break;
+
+ case NEARESTMV:
+ // Clip "next_nearest" so that it does not extend to far out of image
+ assign_and_clamp_mv(mv0, &nearest, mb_to_left_edge,
+ mb_to_right_edge,
+ mb_to_top_edge,
+ mb_to_bottom_edge);
+ if (mbmi->second_ref_frame > 0)
+ assign_and_clamp_mv(mv1, &nearest_second, mb_to_left_edge,
+ mb_to_right_edge,
+ mb_to_top_edge,
+ mb_to_bottom_edge);
+ break;
+
+ case ZEROMV:
+ mv0->as_int = 0;
+ if (mbmi->second_ref_frame > 0)
+ mv1->as_int = 0;
+ break;
+
+ case NEWMV:
+ decode_mv(r, &mv0->as_mv, &best_mv.as_mv, nmvc, &cm->fc.NMVcount,
+ xd->allow_high_precision_mv);
+ mbmi->need_to_clamp_mvs = check_mv_bounds(mv0,
+ mb_to_left_edge,
mb_to_right_edge,
mb_to_top_edge,
mb_to_bottom_edge);
- break;
-
- case ZEROMV:
- mv0->as_int = 0;
- if (mbmi->second_ref_frame > 0)
- mv1->as_int = 0;
- break;
-
- case NEWMV:
- decode_mv(r, &mv0->as_mv, &best_mv.as_mv, nmvc, &cm->fc.NMVcount,
- xd->allow_high_precision_mv);
- mbmi->need_to_clamp_mvs = check_mv_bounds(mv0,
- mb_to_left_edge,
- mb_to_right_edge,
- mb_to_top_edge,
- mb_to_bottom_edge);
-
- if (mbmi->second_ref_frame > 0) {
- decode_mv(r, &mv1->as_mv, &best_mv_second.as_mv, nmvc,
- &cm->fc.NMVcount, xd->allow_high_precision_mv);
- mbmi->need_to_clamp_secondmv = check_mv_bounds(mv1,
- mb_to_left_edge,
- mb_to_right_edge,
- mb_to_top_edge,
- mb_to_bottom_edge);
- }
- break;
- default:
-;
+
+ if (mbmi->second_ref_frame > 0) {
+ decode_mv(r, &mv1->as_mv, &best_mv_second.as_mv, nmvc,
+ &cm->fc.NMVcount, xd->allow_high_precision_mv);
+ mbmi->need_to_clamp_secondmv = check_mv_bounds(mv1,
+ mb_to_left_edge,
+ mb_to_right_edge,
+ mb_to_top_edge,
+ mb_to_bottom_edge);
+ }
+ break;
+ default:
#if CONFIG_DEBUG
- assert(0);
+ assert(0);
#endif
+ break;
+ }
}
} else {
// required for left and above block mv
mv0->as_int = 0;
if (bsize >= BLOCK_SIZE_SB8X8) {
- mbmi->mode = read_sb_ymode(r, cm->fc.sb_ymode_prob);
- cm->fc.sb_ymode_counts[mbmi->mode]++;
+ mbmi->mode = read_intra_mode(r, cm->fc.y_mode_prob);
+ cm->fc.y_mode_counts[mbmi->mode]++;
} else {
- mbmi->mode = I4X4_PRED;
- }
-
- // If MB mode is I4X4_PRED read the block modes
- if (bsize < BLOCK_SIZE_SB8X8) {
int idx, idy;
for (idy = 0; idy < 2; idy += bh) {
for (idx = 0; idx < 2; idx += bw) {
int ib = idy * 2 + idx, k;
- int m = read_sb_ymode(r, cm->fc.sb_ymode_prob);
+ int m = read_intra_mode(r, cm->fc.y_mode_prob);
mi->bmi[ib].as_mode.first = m;
- cm->fc.sb_ymode_counts[m]++;
+ cm->fc.y_mode_counts[m]++;
for (k = 1; k < bh; ++k)
mi->bmi[ib + k * 2].as_mode.first = m;
for (k = 1; k < bw; ++k)
mi->bmi[ib + k].as_mode.first = m;
}
}
+ mbmi->mode = mi->bmi[3].as_mode.first;
}
- mbmi->uv_mode = read_uv_mode(r, cm->fc.uv_mode_prob[mbmi->mode]);
+ mbmi->uv_mode = read_intra_mode(r, cm->fc.uv_mode_prob[mbmi->mode]);
cm->fc.uv_mode_counts[mbmi->mode][mbmi->uv_mode]++;
}
-
- if (cm->txfm_mode == TX_MODE_SELECT &&
- (mbmi->mb_skip_coeff == 0 || mbmi->ref_frame == INTRA_FRAME) &&
- bsize >= BLOCK_SIZE_SB8X8) {
- const int allow_16x16 = bsize >= BLOCK_SIZE_MB16X16;
- const int allow_32x32 = bsize >= BLOCK_SIZE_SB32X32;
- mbmi->txfm_size = select_txfm_size(cm, r, allow_16x16, allow_32x32);
- } else if (bsize >= BLOCK_SIZE_SB32X32 &&
- cm->txfm_mode >= ALLOW_32X32) {
- mbmi->txfm_size = TX_32X32;
- } else if (cm->txfm_mode >= ALLOW_16X16 &&
- bsize >= BLOCK_SIZE_MB16X16) {
- mbmi->txfm_size = TX_16X16;
- } else if (cm->txfm_mode >= ALLOW_8X8 && (bsize >= BLOCK_SIZE_SB8X8)) {
- mbmi->txfm_size = TX_8X8;
- } else {
- mbmi->txfm_size = TX_4X4;
- }
}
void vp9_decode_mode_mvs_init(VP9D_COMP* const pbi, vp9_reader *r) {
@@ -854,9 +806,6 @@ void vp9_decode_mb_mode_mv(VP9D_COMP* const pbi,
kfread_modes(pbi, mi, mi_row, mi_col, r);
} else {
read_mb_modes_mv(pbi, mi, &mi->mbmi, mi_row, mi_col, r);
- set_scale_factors(xd,
- mi->mbmi.ref_frame - 1, mi->mbmi.second_ref_frame - 1,
- cm->active_ref_scale);
}
if (1) {
diff --git a/vp9/decoder/vp9_decodframe.c b/vp9/decoder/vp9_decodframe.c
index 5f6224893..1a712ab14 100644
--- a/vp9/decoder/vp9_decodframe.c
+++ b/vp9/decoder/vp9_decodframe.c
@@ -10,29 +10,30 @@
#include <assert.h>
-#include "vp9/decoder/vp9_onyxd_int.h"
+#include "./vp9_rtcd.h"
+#include "vpx_mem/vpx_mem.h"
+#include "vpx_scale/vpx_scale.h"
+
+#include "vp9/common/vp9_extend.h"
+#include "vp9/common/vp9_modecont.h"
#include "vp9/common/vp9_common.h"
-#include "vp9/common/vp9_header.h"
#include "vp9/common/vp9_reconintra.h"
#include "vp9/common/vp9_reconinter.h"
#include "vp9/common/vp9_entropy.h"
-#include "vp9/decoder/vp9_decodframe.h"
-#include "vp9/decoder/vp9_detokenize.h"
#include "vp9/common/vp9_invtrans.h"
#include "vp9/common/vp9_alloccommon.h"
#include "vp9/common/vp9_entropymode.h"
#include "vp9/common/vp9_quant_common.h"
-#include "vpx_scale/vpx_scale.h"
+#include "vp9/common/vp9_seg_common.h"
+#include "vp9/common/vp9_tile_common.h"
-#include "vp9/decoder/vp9_decodemv.h"
-#include "vp9/common/vp9_extend.h"
-#include "vp9/common/vp9_modecont.h"
-#include "vpx_mem/vpx_mem.h"
#include "vp9/decoder/vp9_dboolhuff.h"
+#include "vp9/decoder/vp9_decodframe.h"
+#include "vp9/decoder/vp9_detokenize.h"
+#include "vp9/decoder/vp9_decodemv.h"
+#include "vp9/decoder/vp9_onyxd_int.h"
+#include "vp9/decoder/vp9_read_bit_buffer.h"
-#include "vp9/common/vp9_seg_common.h"
-#include "vp9/common/vp9_tile_common.h"
-#include "./vp9_rtcd.h"
// #define DEC_DEBUG
#ifdef DEC_DEBUG
@@ -258,10 +259,12 @@ static void decode_block_intra(int plane, int block, BLOCK_SIZE_TYPE bsize,
mode = plane == 0? xd->mode_info_context->mbmi.mode:
xd->mode_info_context->mbmi.uv_mode;
- if (bsize <= BLOCK_SIZE_SB8X8 && mode == I4X4_PRED && plane == 0)
+ if (xd->mode_info_context->mbmi.sb_type < BLOCK_SIZE_SB8X8 && plane == 0) {
+ assert(bsize == BLOCK_SIZE_SB8X8);
b_mode = xd->mode_info_context->bmi[raster_block].as_mode.first;
- else
+ } else {
b_mode = mode;
+ }
plane_b_size = b_width_log2(bsize) - xd->plane[plane].subsampling_x;
vp9_predict_intra_block(xd, tx_ib, plane_b_size, tx_size,
@@ -629,9 +632,6 @@ static void setup_segmentation(VP9_COMMON *pc, MACROBLOCKD *xd, vp9_reader *r) {
xd->update_mb_segmentation_map = 0;
xd->update_mb_segmentation_data = 0;
-#if CONFIG_IMPLICIT_SEGMENTATION
- xd->allow_implicit_segment_update = 0;
-#endif
xd->segmentation_enabled = vp9_read_bit(r);
if (!xd->segmentation_enabled)
@@ -639,9 +639,6 @@ static void setup_segmentation(VP9_COMMON *pc, MACROBLOCKD *xd, vp9_reader *r) {
// Segmentation map update
xd->update_mb_segmentation_map = vp9_read_bit(r);
-#if CONFIG_IMPLICIT_SEGMENTATION
- xd->allow_implicit_segment_update = vp9_read_bit(r);
-#endif
if (xd->update_mb_segmentation_map) {
for (i = 0; i < MB_SEG_TREE_PROBS; i++)
xd->mb_segment_tree_probs[i] = vp9_read_bit(r) ? vp9_read_prob(r)
@@ -746,32 +743,24 @@ static INTERPOLATIONFILTERTYPE read_mcomp_filter_type(vp9_reader *r) {
: vp9_read_literal(r, 2);
}
-static const uint8_t *read_frame_size(VP9_COMMON *const pc, const uint8_t *data,
- const uint8_t *data_end,
- int *width, int *height) {
- if (data + 4 < data_end) {
- const int w = read_le16(data);
- const int h = read_le16(data + 2);
- if (w <= 0)
- vpx_internal_error(&pc->error, VPX_CODEC_CORRUPT_FRAME,
- "Invalid frame width");
-
- if (h <= 0)
- vpx_internal_error(&pc->error, VPX_CODEC_CORRUPT_FRAME,
- "Invalid frame height");
- *width = w;
- *height = h;
- data += 4;
- } else {
- vpx_internal_error(&pc->error, VPX_CODEC_CORRUPT_FRAME,
- "Failed to read frame size");
- }
- return data;
+static void read_frame_size(VP9_COMMON *cm,
+ struct vp9_read_bit_buffer *rb,
+ int *width, int *height) {
+ const int w = vp9_rb_read_literal(rb, 16);
+ const int h = vp9_rb_read_literal(rb, 16);
+ if (w <= 0)
+ vpx_internal_error(&cm->error, VPX_CODEC_CORRUPT_FRAME,
+ "Invalid frame width");
+
+ if (h <= 0)
+ vpx_internal_error(&cm->error, VPX_CODEC_CORRUPT_FRAME,
+ "Invalid frame height");
+ *width = w;
+ *height = h;
}
-static const uint8_t *setup_frame_size(VP9D_COMP *pbi, int scaling_active,
- const uint8_t *data,
- const uint8_t *data_end) {
+static void setup_frame_size(VP9D_COMP *pbi, int scaling_active,
+ struct vp9_read_bit_buffer *rb) {
// If error concealment is enabled we should only parse the new size
// if we have enough data. Otherwise we will end up with the wrong size.
VP9_COMMON *const pc = &pbi->common;
@@ -781,9 +770,9 @@ static const uint8_t *setup_frame_size(VP9D_COMP *pbi, int scaling_active,
int height = pc->height;
if (scaling_active)
- data = read_frame_size(pc, data, data_end, &display_width, &display_height);
+ read_frame_size(pc, rb, &display_width, &display_height);
- data = read_frame_size(pc, data, data_end, &width, &height);
+ read_frame_size(pc, rb, &width, &height);
if (pc->width != width || pc->height != height) {
if (!pbi->initial_width || !pbi->initial_height) {
@@ -809,8 +798,6 @@ static const uint8_t *setup_frame_size(VP9D_COMP *pbi, int scaling_active,
vp9_update_frame_size(pc);
}
-
- return data;
}
static void update_frame_context(FRAME_CONTEXT *fc) {
@@ -818,10 +805,8 @@ static void update_frame_context(FRAME_CONTEXT *fc) {
vp9_copy(fc->pre_coef_probs_8x8, fc->coef_probs_8x8);
vp9_copy(fc->pre_coef_probs_16x16, fc->coef_probs_16x16);
vp9_copy(fc->pre_coef_probs_32x32, fc->coef_probs_32x32);
- vp9_copy(fc->pre_ymode_prob, fc->ymode_prob);
- vp9_copy(fc->pre_sb_ymode_prob, fc->sb_ymode_prob);
+ vp9_copy(fc->pre_y_mode_prob, fc->y_mode_prob);
vp9_copy(fc->pre_uv_mode_prob, fc->uv_mode_prob);
- vp9_copy(fc->pre_bmode_prob, fc->bmode_prob);
vp9_copy(fc->pre_partition_prob, fc->partition_prob);
fc->pre_nmvc = fc->nmvc;
@@ -830,10 +815,8 @@ static void update_frame_context(FRAME_CONTEXT *fc) {
vp9_zero(fc->coef_counts_16x16);
vp9_zero(fc->coef_counts_32x32);
vp9_zero(fc->eob_branch_counts);
- vp9_zero(fc->ymode_counts);
- vp9_zero(fc->sb_ymode_counts);
+ vp9_zero(fc->y_mode_counts);
vp9_zero(fc->uv_mode_counts);
- vp9_zero(fc->bmode_counts);
vp9_zero(fc->NMVcount);
vp9_zero(fc->mv_ref_ct);
vp9_zero(fc->partition_counts);
@@ -940,59 +923,87 @@ static void decode_tiles(VP9D_COMP *pbi,
}
}
-int vp9_decode_frame(VP9D_COMP *pbi, const uint8_t **p_data_end) {
- vp9_reader header_bc, residual_bc;
- VP9_COMMON *const pc = &pbi->common;
- MACROBLOCKD *const xd = &pbi->mb;
- const uint8_t *data = pbi->source;
- const uint8_t *data_end = data + pbi->source_sz;
- size_t first_partition_size = 0;
- YV12_BUFFER_CONFIG *new_fb = &pc->yv12_fb[pc->new_fb_idx];
- int i;
- xd->corrupted = 0; // start with no corruption of current frame
- new_fb->corrupted = 0;
+static void error_handler(void *data, int bit_offset) {
+ VP9_COMMON *const cm = (VP9_COMMON *)data;
+ vpx_internal_error(&cm->error, VPX_CODEC_CORRUPT_FRAME, "Truncated packet");
+}
- if (data_end - data < 3) {
- vpx_internal_error(&pc->error, VPX_CODEC_CORRUPT_FRAME, "Truncated packet");
- } else {
- int scaling_active;
- pc->last_frame_type = pc->frame_type;
- pc->frame_type = (FRAME_TYPE)(data[0] & 1);
- pc->version = (data[0] >> 1) & 7;
- pc->show_frame = (data[0] >> 4) & 1;
- scaling_active = (data[0] >> 5) & 1;
- pc->subsampling_x = (data[0] >> 6) & 1;
- pc->subsampling_y = (data[0] >> 7) & 1;
- first_partition_size = read_le16(data + 1);
-
- if (!read_is_valid(data, first_partition_size, data_end))
- vpx_internal_error(&pc->error, VPX_CODEC_CORRUPT_FRAME,
- "Truncated packet or corrupt partition 0 length");
+size_t read_uncompressed_header(VP9D_COMP *pbi,
+ struct vp9_read_bit_buffer *rb) {
+ VP9_COMMON *const cm = &pbi->common;
- data += 3;
+ int scaling_active;
+ cm->last_frame_type = cm->frame_type;
+ cm->frame_type = (FRAME_TYPE) vp9_rb_read_bit(rb);
+ cm->version = vp9_rb_read_literal(rb, 3);
+ cm->show_frame = vp9_rb_read_bit(rb);
+ scaling_active = vp9_rb_read_bit(rb);
+ cm->subsampling_x = vp9_rb_read_bit(rb);
+ cm->subsampling_y = vp9_rb_read_bit(rb);
+
+ if (cm->frame_type == KEY_FRAME) {
+ if (vp9_rb_read_literal(rb, 8) != SYNC_CODE_0 ||
+ vp9_rb_read_literal(rb, 8) != SYNC_CODE_1 ||
+ vp9_rb_read_literal(rb, 8) != SYNC_CODE_2) {
+ vpx_internal_error(&cm->error, VPX_CODEC_UNSUP_BITSTREAM,
+ "Invalid frame sync code");
+ }
+ }
- vp9_setup_version(pc);
+ setup_frame_size(pbi, scaling_active, rb);
- if (pc->frame_type == KEY_FRAME) {
- // When error concealment is enabled we should only check the sync
- // code if we have enough bits available
- if (data + 3 < data_end) {
- if (data[0] != 0x49 || data[1] != 0x83 || data[2] != 0x42)
- vpx_internal_error(&pc->error, VPX_CODEC_UNSUP_BITSTREAM,
- "Invalid frame sync code");
- }
- data += 3;
- }
+ if (!cm->show_frame) {
+ cm->intra_only = vp9_rb_read_bit(rb);
+ } else {
+ cm->intra_only = 0;
+ }
- data = setup_frame_size(pbi, scaling_active, data, data_end);
+ cm->frame_context_idx = vp9_rb_read_literal(rb, NUM_FRAME_CONTEXTS_LG2);
+ cm->clr_type = (YUV_TYPE)vp9_rb_read_bit(rb);
+
+ cm->error_resilient_mode = vp9_rb_read_bit(rb);
+ if (!cm->error_resilient_mode) {
+ cm->reset_frame_context = vp9_rb_read_bit(rb);
+ cm->refresh_frame_context = vp9_rb_read_bit(rb);
+ cm->frame_parallel_decoding_mode = vp9_rb_read_bit(rb);
+ } else {
+ cm->reset_frame_context = 0;
+ cm->refresh_frame_context = 0;
+ cm->frame_parallel_decoding_mode = 1;
}
- if ((!pbi->decoded_key_frame && pc->frame_type != KEY_FRAME) ||
+ return vp9_rb_read_literal(rb, 16);
+}
+
+int vp9_decode_frame(VP9D_COMP *pbi, const uint8_t **p_data_end) {
+ int i;
+ vp9_reader header_bc, residual_bc;
+ VP9_COMMON *const pc = &pbi->common;
+ MACROBLOCKD *const xd = &pbi->mb;
+ YV12_BUFFER_CONFIG *new_fb = &pc->yv12_fb[pc->new_fb_idx];
+ const uint8_t *data = pbi->source;
+ const uint8_t *data_end = pbi->source + pbi->source_sz;
+
+ struct vp9_read_bit_buffer rb = { data, data_end, 0,
+ pc, error_handler };
+ const size_t first_partition_size = read_uncompressed_header(pbi, &rb);
+ const int keyframe = pc->frame_type == KEY_FRAME;
+
+ data += vp9_rb_bytes_read(&rb);
+ xd->corrupted = 0;
+ new_fb->corrupted = 0;
+
+ if ((!pbi->decoded_key_frame && !keyframe) ||
pc->width == 0 || pc->height == 0) {
return -1;
}
+ vp9_setup_version(pc);
+ if (!read_is_valid(data, first_partition_size, data_end))
+ vpx_internal_error(&pc->error, VPX_CODEC_CORRUPT_FRAME,
+ "Truncated packet or corrupt partition 0 length");
+
init_frame(pbi);
// Reset the frame pointers to the current frame size
@@ -1004,9 +1015,6 @@ int vp9_decode_frame(VP9D_COMP *pbi, const uint8_t **p_data_end) {
vpx_internal_error(&pc->error, VPX_CODEC_MEM_ERROR,
"Failed to allocate bool decoder 0");
- pc->clr_type = (YUV_TYPE)vp9_read_bit(&header_bc);
- pc->error_resilient_mode = vp9_read_bit(&header_bc);
-
setup_loopfilter(pc, xd, &header_bc);
setup_quantization(pbi, &header_bc);
@@ -1028,7 +1036,7 @@ int vp9_decode_frame(VP9D_COMP *pbi, const uint8_t **p_data_end) {
// Determine if the golden frame or ARF buffer should be updated and how.
// For all non key frames the GF and ARF refresh flags and sign bias
// flags must be set explicitly.
- if (pc->frame_type == KEY_FRAME) {
+ if (keyframe) {
for (i = 0; i < ALLOWED_REFS_PER_FRAME; ++i)
pc->active_ref_idx[i] = pc->new_fb_idx;
} else {
@@ -1053,15 +1061,6 @@ int vp9_decode_frame(VP9D_COMP *pbi, const uint8_t **p_data_end) {
vp9_setup_interp_filters(xd, pc->mcomp_filter_type, pc);
}
- if (!pc->error_resilient_mode) {
- pc->refresh_frame_context = vp9_read_bit(&header_bc);
- pc->frame_parallel_decoding_mode = vp9_read_bit(&header_bc);
- } else {
- pc->refresh_frame_context = 0;
- pc->frame_parallel_decoding_mode = 1;
- }
-
- pc->frame_context_idx = vp9_read_literal(&header_bc, NUM_FRAME_CONTEXTS_LG2);
pc->fc = pc->frame_contexts[pc->frame_context_idx];
setup_segmentation(pc, xd, &header_bc);
@@ -1071,16 +1070,13 @@ int vp9_decode_frame(VP9D_COMP *pbi, const uint8_t **p_data_end) {
setup_txfm_mode(pc, xd->lossless, &header_bc);
// Read inter mode probability context updates
- if (pc->frame_type != KEY_FRAME) {
+ if (!keyframe) {
int i, j;
for (i = 0; i < INTER_MODE_CONTEXTS; ++i)
- for (j = 0; j < 4; ++j)
+ for (j = 0; j < VP9_MVREFS - 1; ++j)
if (vp9_read(&header_bc, 252))
pc->fc.vp9_mode_contexts[i][j] = vp9_read_prob(&header_bc);
}
- // Is this needed ?
- if (pc->frame_type == KEY_FRAME)
- vp9_default_coef_probs(pc);
update_frame_context(&pc->fc);
@@ -1114,7 +1110,7 @@ int vp9_decode_frame(VP9D_COMP *pbi, const uint8_t **p_data_end) {
new_fb->corrupted = vp9_reader_has_error(&header_bc) | xd->corrupted;
if (!pbi->decoded_key_frame) {
- if (pc->frame_type == KEY_FRAME && !new_fb->corrupted)
+ if (keyframe && !new_fb->corrupted)
pbi->decoded_key_frame = 1;
else
vpx_internal_error(&pc->error, VPX_CODEC_CORRUPT_FRAME,
@@ -1125,20 +1121,13 @@ int vp9_decode_frame(VP9D_COMP *pbi, const uint8_t **p_data_end) {
if (!pc->error_resilient_mode && !pc->frame_parallel_decoding_mode) {
vp9_adapt_coef_probs(pc);
- if (pc->frame_type != KEY_FRAME) {
+ if (!keyframe) {
vp9_adapt_mode_probs(pc);
vp9_adapt_mode_context(pc);
vp9_adapt_nmv_probs(pc, xd->allow_high_precision_mv);
}
}
-#if CONFIG_IMPLICIT_SEGMENTATION
- // If signalled at the frame level apply implicit updates to the segment map.
- if (!pc->error_resilient_mode && xd->allow_implicit_segment_update) {
- vp9_implicit_segment_map_update(pc);
- }
-#endif
-
if (pc->refresh_frame_context)
pc->frame_contexts[pc->frame_context_idx] = pc->fc;
diff --git a/vp9/decoder/vp9_detokenize.c b/vp9/decoder/vp9_detokenize.c
index 02900c08c..6f6c88a27 100644
--- a/vp9/decoder/vp9_detokenize.c
+++ b/vp9/decoder/vp9_detokenize.c
@@ -17,8 +17,13 @@
#include "vp9/decoder/vp9_detokenize.h"
#include "vp9/common/vp9_seg_common.h"
+#if CONFIG_BALANCED_COEFTREE
+#define ZERO_CONTEXT_NODE 0
+#define EOB_CONTEXT_NODE 1
+#else
#define EOB_CONTEXT_NODE 0
#define ZERO_CONTEXT_NODE 1
+#endif
#define ONE_CONTEXT_NODE 2
#define LOW_VAL_CONTEXT_NODE 3
#define TWO_CONTEXT_NODE 4
@@ -58,13 +63,15 @@ static const vp9_prob cat6_prob[15] = {
254, 254, 254, 252, 249, 243, 230, 196, 177, 153, 140, 133, 130, 129, 0
};
+DECLARE_ALIGNED(16, extern const uint8_t,
+ vp9_pt_energy_class[MAX_ENTROPY_TOKENS]);
#define INCREMENT_COUNT(token) \
do { \
coef_counts[type][ref][band][pt] \
[token >= TWO_TOKEN ? \
(token == DCT_EOB_TOKEN ? DCT_EOB_MODEL_TOKEN : TWO_TOKEN) : \
token]++; \
- token_cache[scan[c]] = token; \
+ token_cache[scan[c]] = vp9_pt_energy_class[token]; \
} while (0)
#define WRITE_COEF_CONTINUE(val, token) \
@@ -109,6 +116,9 @@ static int decode_coefs(VP9D_COMP *dx, const MACROBLOCKD *xd,
const int *scan, *nb;
uint8_t token_cache[1024];
const uint8_t * band_translate;
+#if CONFIG_BALANCED_COEFTREE
+ int skip_eob_node = 0;
+#endif
switch (txfm_size) {
default:
@@ -180,11 +190,13 @@ static int decode_coefs(VP9D_COMP *dx, const MACROBLOCKD *xd,
c, default_eob);
band = get_coef_band(band_translate, c);
prob = coef_probs[band][pt];
+#if !CONFIG_BALANCED_COEFTREE
fc->eob_branch_counts[txfm_size][type][ref][band][pt]++;
if (!vp9_read(r, prob[EOB_CONTEXT_NODE]))
break;
SKIP_START:
+#endif
if (c >= seg_eob)
break;
if (c)
@@ -196,8 +208,22 @@ SKIP_START:
if (!vp9_read(r, prob[ZERO_CONTEXT_NODE])) {
INCREMENT_COUNT(ZERO_TOKEN);
++c;
+#if CONFIG_BALANCED_COEFTREE
+ skip_eob_node = 1;
+ continue;
+#else
goto SKIP_START;
+#endif
}
+#if CONFIG_BALANCED_COEFTREE
+ if (!skip_eob_node) {
+ fc->eob_branch_counts[txfm_size][type][ref][band][pt]++;
+ if (!vp9_read(r, prob[EOB_CONTEXT_NODE]))
+ break;
+ }
+ skip_eob_node = 0;
+#endif
+
// ONE_CONTEXT_NODE_0_
if (!vp9_read(r, prob[ONE_CONTEXT_NODE])) {
WRITE_COEF_CONTINUE(1, ONE_TOKEN);
diff --git a/vp9/decoder/vp9_onyxd_if.c b/vp9/decoder/vp9_onyxd_if.c
index 24f9ca3f9..3cef88bcd 100644
--- a/vp9/decoder/vp9_onyxd_if.c
+++ b/vp9/decoder/vp9_onyxd_if.c
@@ -387,9 +387,6 @@ int vp9_receive_compressed_data(VP9D_PTR ptr,
cm->current_video_frame++;
}
- /*vp9_print_modes_and_motion_vectors(cm->mi, cm->mb_rows,cm->mb_cols,
- cm->current_video_frame);*/
-
pbi->ready_for_new_data = 0;
pbi->last_time_stamp = time_stamp;
pbi->source_sz = 0;
diff --git a/vp9/decoder/vp9_read_bit_buffer.h b/vp9/decoder/vp9_read_bit_buffer.h
new file mode 100644
index 000000000..fa2dbee8d
--- /dev/null
+++ b/vp9/decoder/vp9_read_bit_buffer.h
@@ -0,0 +1,50 @@
+/*
+ * Copyright (c) 2013 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef VP9_READ_BIT_BUFFER_
+#define VP9_READ_BIT_BUFFER_
+
+typedef void (*vp9_rb_error_handler)(void *data, int bit_offset);
+
+struct vp9_read_bit_buffer {
+ const uint8_t *bit_buffer;
+ const uint8_t *bit_buffer_end;
+ size_t bit_offset;
+
+ void *error_handler_data;
+ vp9_rb_error_handler error_handler;
+};
+
+static size_t vp9_rb_bytes_read(struct vp9_read_bit_buffer *rb) {
+ return rb->bit_offset / CHAR_BIT + (rb->bit_offset % CHAR_BIT > 0);
+}
+
+static int vp9_rb_read_bit(struct vp9_read_bit_buffer *rb) {
+ const int off = rb->bit_offset;
+ const int p = off / CHAR_BIT;
+ const int q = CHAR_BIT - 1 - off % CHAR_BIT;
+ if (rb->bit_buffer + p >= rb->bit_buffer_end) {
+ rb->error_handler(rb->error_handler_data, rb->bit_offset);
+ return 0;
+ } else {
+ const int bit = (rb->bit_buffer[p] & (1 << q)) >> q;
+ rb->bit_offset = off + 1;
+ return bit;
+ }
+}
+
+static int vp9_rb_read_literal(struct vp9_read_bit_buffer *rb, int bits) {
+ int value = 0, bit;
+ for (bit = bits - 1; bit >= 0; bit--)
+ value |= vp9_rb_read_bit(rb) << bit;
+ return value;
+}
+
+#endif // VP9_READ_BIT_BUFFER_
diff --git a/vp9/encoder/vp9_bitstream.c b/vp9/encoder/vp9_bitstream.c
index 26f22b4be..a95d7eb46 100644
--- a/vp9/encoder/vp9_bitstream.c
+++ b/vp9/encoder/vp9_bitstream.c
@@ -12,35 +12,37 @@
#include <stdio.h>
#include <limits.h>
-#include "vp9/common/vp9_header.h"
-#include "vp9/encoder/vp9_encodemv.h"
+#include "vpx/vpx_encoder.h"
+#include "vpx_mem/vpx_mem.h"
+
#include "vp9/common/vp9_entropymode.h"
#include "vp9/common/vp9_entropymv.h"
#include "vp9/common/vp9_findnearmv.h"
#include "vp9/common/vp9_tile_common.h"
-#include "vp9/encoder/vp9_mcomp.h"
-#include "vp9/common/vp9_systemdependent.h"
-#include "vp9/common/vp9_pragmas.h"
-#include "vpx/vpx_encoder.h"
-#include "vpx_mem/vpx_mem.h"
-#include "vp9/encoder/vp9_bitstream.h"
-#include "vp9/encoder/vp9_segmentation.h"
-
#include "vp9/common/vp9_seg_common.h"
#include "vp9/common/vp9_pred_common.h"
#include "vp9/common/vp9_entropy.h"
#include "vp9/common/vp9_entropymv.h"
#include "vp9/common/vp9_mvref_common.h"
#include "vp9/common/vp9_treecoder.h"
+#include "vp9/common/vp9_systemdependent.h"
+#include "vp9/common/vp9_pragmas.h"
+
+#include "vp9/encoder/vp9_mcomp.h"
+#include "vp9/encoder/vp9_encodemv.h"
+#include "vp9/encoder/vp9_bitstream.h"
+#include "vp9/encoder/vp9_segmentation.h"
+#include "vp9/encoder/vp9_write_bit_buffer.h"
+
#if defined(SECTIONBITS_OUTPUT)
unsigned __int64 Sectionbits[500];
#endif
#ifdef ENTROPY_STATS
-int intra_mode_stats[VP9_KF_BINTRAMODES]
- [VP9_KF_BINTRAMODES]
- [VP9_KF_BINTRAMODES];
+int intra_mode_stats[VP9_INTRA_MODES]
+ [VP9_INTRA_MODES]
+ [VP9_INTRA_MODES];
vp9_coeff_stats tree_update_hist_4x4[BLOCK_TYPES];
vp9_coeff_stats tree_update_hist_8x8[BLOCK_TYPES];
vp9_coeff_stats tree_update_hist_16x16[BLOCK_TYPES];
@@ -243,15 +245,12 @@ static void update_mbintra_mode_probs(VP9_COMP* const cpi,
vp9_writer* const bc) {
VP9_COMMON *const cm = &cpi->common;
- vp9_prob pnew[VP9_YMODES - 1];
- unsigned int bct[VP9_YMODES - 1][2];
-
- update_mode(bc, VP9_YMODES, vp9_ymode_encodings, vp9_ymode_tree, pnew,
- cm->fc.ymode_prob, bct, (unsigned int *)cpi->ymode_count);
+ vp9_prob pnew[VP9_INTRA_MODES - 1];
+ unsigned int bct[VP9_INTRA_MODES - 1][2];
- update_mode(bc, VP9_I32X32_MODES, vp9_sb_ymode_encodings,
- vp9_sb_ymode_tree, pnew, cm->fc.sb_ymode_prob, bct,
- (unsigned int *)cpi->sb_ymode_count);
+ update_mode(bc, VP9_INTRA_MODES, vp9_intra_mode_encodings,
+ vp9_intra_mode_tree, pnew,
+ cm->fc.y_mode_prob, bct, (unsigned int *)cpi->y_mode_count);
}
void vp9_update_skip_probs(VP9_COMP *cpi) {
@@ -322,15 +321,15 @@ static void update_refpred_stats(VP9_COMP *cpi) {
// The branch counts table is re-populated during the actual pack stage and in
// the decoder to facilitate backwards update of the context.
static void update_inter_mode_probs(VP9_COMMON *cm,
- int mode_context[INTER_MODE_CONTEXTS][4]) {
+ int mode_context[INTER_MODE_CONTEXTS][VP9_MVREFS - 1]) {
int i, j;
- unsigned int (*mv_ref_ct)[4][2] = cm->fc.mv_ref_ct;
+ unsigned int (*mv_ref_ct)[VP9_MVREFS - 1][2] = cm->fc.mv_ref_ct;
vpx_memcpy(mode_context, cm->fc.vp9_mode_contexts,
sizeof(cm->fc.vp9_mode_contexts));
for (i = 0; i < INTER_MODE_CONTEXTS; i++) {
- for (j = 0; j < 4; j++) {
+ for (j = 0; j < VP9_MVREFS - 1; j++) {
int new_prob, old_cost, new_cost;
// Work out cost of coding branches with the old and optimal probability
@@ -348,28 +347,8 @@ static void update_inter_mode_probs(VP9_COMMON *cm,
}
}
-static void write_ymode(vp9_writer *bc, int m, const vp9_prob *p) {
- write_token(bc, vp9_ymode_tree, p, vp9_ymode_encodings + m);
-}
-
-static void kfwrite_ymode(vp9_writer *bc, int m, const vp9_prob *p) {
- write_token(bc, vp9_kf_ymode_tree, p, vp9_kf_ymode_encodings + m);
-}
-
-static void write_sb_ymode(vp9_writer *bc, int m, const vp9_prob *p) {
- write_token(bc, vp9_sb_ymode_tree, p, vp9_sb_ymode_encodings + m);
-}
-
-static void sb_kfwrite_ymode(vp9_writer *bc, int m, const vp9_prob *p) {
- write_token(bc, vp9_uv_mode_tree, p, vp9_sb_kf_ymode_encodings + m);
-}
-
-static void write_uv_mode(vp9_writer *bc, int m, const vp9_prob *p) {
- write_token(bc, vp9_uv_mode_tree, p, vp9_uv_mode_encodings + m);
-}
-
-static void write_kf_bmode(vp9_writer *bc, int m, const vp9_prob *p) {
- write_token(bc, vp9_bmode_tree, p, vp9_kf_bmode_encodings + m);
+static void write_intra_mode(vp9_writer *bc, int m, const vp9_prob *p) {
+ write_token(bc, vp9_intra_mode_tree, p, vp9_intra_mode_encodings + m);
}
static int prob_update_savings(const unsigned int *ct,
@@ -471,7 +450,6 @@ static void pack_mb_tokens(vp9_writer* const bc,
const vp9_prob *pp;
int v = a->value;
int n = a->len;
- int ncount = n;
vp9_prob probs[ENTROPY_NODES];
if (t == EOSB_TOKEN) {
@@ -487,18 +465,25 @@ static void pack_mb_tokens(vp9_writer* const bc,
assert(pp != 0);
/* skip one or two nodes */
+#if !CONFIG_BALANCED_COEFTREE
if (p->skip_eob_node) {
n -= p->skip_eob_node;
i = 2 * p->skip_eob_node;
- ncount -= p->skip_eob_node;
}
+#endif
do {
const int bb = (v >> --n) & 1;
+#if CONFIG_BALANCED_COEFTREE
+ if (i == 2 && p->skip_eob_node) {
+ i += 2;
+ assert(bb == 1);
+ continue;
+ }
+#endif
vp9_write(bc, bb, pp[i >> 1]);
i = vp9_coef_tree[i + bb];
- ncount--;
- } while (n && ncount);
+ } while (n);
if (b->base_val) {
const int e = p->extra, l = b->len;
@@ -524,19 +509,10 @@ static void pack_mb_tokens(vp9_writer* const bc,
*tp = p;
}
-static void write_mv_ref(vp9_writer *bc, MB_PREDICTION_MODE m,
- const vp9_prob *p) {
-#if CONFIG_DEBUG
- assert(NEARESTMV <= m && m <= SPLITMV);
-#endif
- write_token(bc, vp9_mv_ref_tree, p,
- vp9_mv_ref_encoding_array - NEARESTMV + m);
-}
-
static void write_sb_mv_ref(vp9_writer *bc, MB_PREDICTION_MODE m,
const vp9_prob *p) {
#if CONFIG_DEBUG
- assert(NEARESTMV <= m && m < SPLITMV);
+ assert(NEARESTMV <= m && m <= NEWMV);
#endif
write_token(bc, vp9_sb_mv_ref_tree, p,
vp9_sb_mv_ref_encoding_array - NEARESTMV + m);
@@ -699,25 +675,37 @@ static void pack_inter_mode_mvs(VP9_COMP *cpi, MODE_INFO *m,
// Encode the reference frame.
encode_ref_frame(bc, pc, xd, segment_id, rf);
+ if (mi->sb_type >= BLOCK_SIZE_SB8X8 && pc->txfm_mode == TX_MODE_SELECT &&
+ !(rf != INTRA_FRAME &&
+ (skip_coeff || vp9_segfeature_active(xd, segment_id, SEG_LVL_SKIP)))) {
+ TX_SIZE sz = mi->txfm_size;
+ // FIXME(rbultje) code ternary symbol once all experiments are merged
+ vp9_write(bc, sz != TX_4X4, pc->prob_tx[0]);
+ if (mi->sb_type >= BLOCK_SIZE_MB16X16 && sz != TX_4X4) {
+ vp9_write(bc, sz != TX_8X8, pc->prob_tx[1]);
+ if (mi->sb_type >= BLOCK_SIZE_SB32X32 && sz != TX_8X8)
+ vp9_write(bc, sz != TX_16X16, pc->prob_tx[2]);
+ }
+ }
+
if (rf == INTRA_FRAME) {
#ifdef ENTROPY_STATS
active_section = 6;
#endif
- if (m->mbmi.sb_type >= BLOCK_SIZE_SB8X8)
- write_sb_ymode(bc, mode, pc->fc.sb_ymode_prob);
-
- if (m->mbmi.sb_type < BLOCK_SIZE_SB8X8) {
+ if (m->mbmi.sb_type >= BLOCK_SIZE_SB8X8) {
+ write_intra_mode(bc, mode, pc->fc.y_mode_prob);
+ } else {
int idx, idy;
int bw = 1 << b_width_log2(mi->sb_type);
int bh = 1 << b_height_log2(mi->sb_type);
for (idy = 0; idy < 2; idy += bh)
for (idx = 0; idx < 2; idx += bw)
- write_sb_ymode(bc, m->bmi[idy * 2 + idx].as_mode.first,
- pc->fc.sb_ymode_prob);
+ write_intra_mode(bc, m->bmi[idy * 2 + idx].as_mode.first,
+ pc->fc.y_mode_prob);
}
- write_uv_mode(bc, mi->uv_mode,
- pc->fc.uv_mode_prob[mode]);
+ write_intra_mode(bc, mi->uv_mode,
+ pc->fc.uv_mode_prob[mode]);
} else {
vp9_prob mv_ref_p[VP9_MVREFS - 1];
@@ -729,21 +717,20 @@ static void pack_inter_mode_mvs(VP9_COMP *cpi, MODE_INFO *m,
// If segment skip is not enabled code the mode.
if (!vp9_segfeature_active(xd, segment_id, SEG_LVL_SKIP)) {
- if (mi->sb_type >= BLOCK_SIZE_SB8X8)
+ if (mi->sb_type >= BLOCK_SIZE_SB8X8) {
write_sb_mv_ref(bc, mode, mv_ref_p);
- vp9_accum_mv_refs(&cpi->common, mode, mi->mb_mode_context[rf]);
+ vp9_accum_mv_refs(&cpi->common, mode, mi->mb_mode_context[rf]);
+ }
}
- if (is_inter_mode(mode)) {
- if (cpi->common.mcomp_filter_type == SWITCHABLE) {
- write_token(bc, vp9_switchable_interp_tree,
- vp9_get_pred_probs(&cpi->common, xd,
- PRED_SWITCHABLE_INTERP),
- vp9_switchable_interp_encodings +
- vp9_switchable_interp_map[mi->interp_filter]);
- } else {
- assert(mi->interp_filter == cpi->common.mcomp_filter_type);
- }
+ if (cpi->common.mcomp_filter_type == SWITCHABLE) {
+ write_token(bc, vp9_switchable_interp_tree,
+ vp9_get_pred_probs(&cpi->common, xd,
+ PRED_SWITCHABLE_INTERP),
+ vp9_switchable_interp_encodings +
+ vp9_switchable_interp_map[mi->interp_filter]);
+ } else {
+ assert(mi->interp_filter == cpi->common.mcomp_filter_type);
}
// does the feature use compound prediction or not
@@ -753,70 +740,51 @@ static void pack_inter_mode_mvs(VP9_COMP *cpi, MODE_INFO *m,
vp9_get_pred_prob(pc, xd, PRED_COMP));
}
- switch (mode) { /* new, split require MVs */
- case NEWMV:
+ if (xd->mode_info_context->mbmi.sb_type < BLOCK_SIZE_SB8X8) {
+ int j;
+ MB_PREDICTION_MODE blockmode;
+ int_mv blockmv;
+ int bwl = b_width_log2(mi->sb_type), bw = 1 << bwl;
+ int bhl = b_height_log2(mi->sb_type), bh = 1 << bhl;
+ int idx, idy;
+ for (idy = 0; idy < 2; idy += bh) {
+ for (idx = 0; idx < 2; idx += bw) {
+ j = idy * 2 + idx;
+ blockmode = cpi->mb.partition_info->bmi[j].mode;
+ blockmv = cpi->mb.partition_info->bmi[j].mv;
+ write_sb_mv_ref(bc, blockmode, mv_ref_p);
+ vp9_accum_mv_refs(&cpi->common, blockmode, mi->mb_mode_context[rf]);
+ if (blockmode == NEWMV) {
#ifdef ENTROPY_STATS
- active_section = 5;
+ active_section = 11;
#endif
- vp9_encode_mv(bc,
- &mi->mv[0].as_mv, &mi->best_mv.as_mv,
- nmvc, xd->allow_high_precision_mv);
+ vp9_encode_mv(bc, &blockmv.as_mv, &mi->best_mv.as_mv,
+ nmvc, xd->allow_high_precision_mv);
- if (mi->second_ref_frame > 0)
- vp9_encode_mv(bc,
- &mi->mv[1].as_mv, &mi->best_second_mv.as_mv,
- nmvc, xd->allow_high_precision_mv);
- break;
- case SPLITMV: {
- int j;
- MB_PREDICTION_MODE blockmode;
- int_mv blockmv;
- int bwl = b_width_log2(mi->sb_type), bw = 1 << bwl;
- int bhl = b_height_log2(mi->sb_type), bh = 1 << bhl;
- int idx, idy;
- for (idy = 0; idy < 2; idy += bh) {
- for (idx = 0; idx < 2; idx += bw) {
- j = idy * 2 + idx;
- blockmode = cpi->mb.partition_info->bmi[j].mode;
- blockmv = cpi->mb.partition_info->bmi[j].mv;
- write_sb_mv_ref(bc, blockmode, mv_ref_p);
- vp9_accum_mv_refs(&cpi->common, blockmode, mi->mb_mode_context[rf]);
- if (blockmode == NEWMV) {
-#ifdef ENTROPY_STATS
- active_section = 11;
-#endif
- vp9_encode_mv(bc, &blockmv.as_mv, &mi->best_mv.as_mv,
+ if (mi->second_ref_frame > 0)
+ vp9_encode_mv(bc,
+ &cpi->mb.partition_info->bmi[j].second_mv.as_mv,
+ &mi->best_second_mv.as_mv,
nmvc, xd->allow_high_precision_mv);
-
- if (mi->second_ref_frame > 0)
- vp9_encode_mv(bc,
- &cpi->mb.partition_info->bmi[j].second_mv.as_mv,
- &mi->best_second_mv.as_mv,
- nmvc, xd->allow_high_precision_mv);
- }
}
}
+ }
#ifdef MODE_STATS
- ++count_mb_seg[mi->partitioning];
+ ++count_mb_seg[mi->partitioning];
#endif
- break;
- }
- default:
- break;
- }
- }
+ } else if (mode == NEWMV) {
+#ifdef ENTROPY_STATS
+ active_section = 5;
+#endif
+ vp9_encode_mv(bc,
+ &mi->mv[0].as_mv, &mi->best_mv.as_mv,
+ nmvc, xd->allow_high_precision_mv);
- if (mi->sb_type >= BLOCK_SIZE_SB8X8 && pc->txfm_mode == TX_MODE_SELECT &&
- !(rf != INTRA_FRAME &&
- (skip_coeff || vp9_segfeature_active(xd, segment_id, SEG_LVL_SKIP)))) {
- TX_SIZE sz = mi->txfm_size;
- // FIXME(rbultje) code ternary symbol once all experiments are merged
- vp9_write(bc, sz != TX_4X4, pc->prob_tx[0]);
- if (mi->sb_type >= BLOCK_SIZE_MB16X16 && sz != TX_4X4) {
- vp9_write(bc, sz != TX_8X8, pc->prob_tx[1]);
- if (mi->sb_type >= BLOCK_SIZE_SB32X32 && sz != TX_8X8)
- vp9_write(bc, sz != TX_16X16, pc->prob_tx[2]);
+ if (mi->second_ref_frame > 0)
+ vp9_encode_mv(bc,
+ &mi->mv[1].as_mv, &mi->best_second_mv.as_mv,
+ nmvc, xd->allow_high_precision_mv);
}
}
}
@@ -841,14 +809,23 @@ static void write_mb_modes_kf(const VP9_COMP *cpi,
vp9_write(bc, skip_coeff, vp9_get_pred_prob(c, xd, PRED_MBSKIP));
}
+ if (m->mbmi.sb_type >= BLOCK_SIZE_SB8X8 && c->txfm_mode == TX_MODE_SELECT) {
+ TX_SIZE sz = m->mbmi.txfm_size;
+ // FIXME(rbultje) code ternary symbol once all experiments are merged
+ vp9_write(bc, sz != TX_4X4, c->prob_tx[0]);
+ if (m->mbmi.sb_type >= BLOCK_SIZE_MB16X16 && sz != TX_4X4) {
+ vp9_write(bc, sz != TX_8X8, c->prob_tx[1]);
+ if (m->mbmi.sb_type >= BLOCK_SIZE_SB32X32 && sz != TX_8X8)
+ vp9_write(bc, sz != TX_16X16, c->prob_tx[2]);
+ }
+ }
+
if (m->mbmi.sb_type >= BLOCK_SIZE_SB8X8) {
const MB_PREDICTION_MODE A = above_block_mode(m, 0, mis);
const MB_PREDICTION_MODE L = xd->left_available ?
left_block_mode(m, 0) : DC_PRED;
- write_kf_bmode(bc, ym, c->kf_bmode_prob[A][L]);
- }
-
- if (m->mbmi.sb_type < BLOCK_SIZE_SB8X8) {
+ write_intra_mode(bc, ym, c->kf_y_mode_prob[A][L]);
+ } else {
int idx, idy;
int bw = 1 << b_width_log2(m->mbmi.sb_type);
int bh = 1 << b_height_log2(m->mbmi.sb_type);
@@ -858,24 +835,16 @@ static void write_mb_modes_kf(const VP9_COMP *cpi,
const MB_PREDICTION_MODE A = above_block_mode(m, i, mis);
const MB_PREDICTION_MODE L = (xd->left_available || idx) ?
left_block_mode(m, i) : DC_PRED;
- write_kf_bmode(bc, m->bmi[i].as_mode.first,
- c->kf_bmode_prob[A][L]);
+ const int bm = m->bmi[i].as_mode.first;
+#ifdef ENTROPY_STATS
+ ++intra_mode_stats[A][L][bm];
+#endif
+ write_intra_mode(bc, bm, c->kf_y_mode_prob[A][L]);
}
}
}
- write_uv_mode(bc, m->mbmi.uv_mode, c->kf_uv_mode_prob[ym]);
-
- if (m->mbmi.sb_type >= BLOCK_SIZE_SB8X8 && c->txfm_mode == TX_MODE_SELECT) {
- TX_SIZE sz = m->mbmi.txfm_size;
- // FIXME(rbultje) code ternary symbol once all experiments are merged
- vp9_write(bc, sz != TX_4X4, c->prob_tx[0]);
- if (m->mbmi.sb_type >= BLOCK_SIZE_MB16X16 && sz != TX_4X4) {
- vp9_write(bc, sz != TX_8X8, c->prob_tx[1]);
- if (m->mbmi.sb_type >= BLOCK_SIZE_SB32X32 && sz != TX_8X8)
- vp9_write(bc, sz != TX_16X16, c->prob_tx[2]);
- }
- }
+ write_intra_mode(bc, m->mbmi.uv_mode, c->kf_uv_mode_prob[ym]);
}
static void write_modes_b(VP9_COMP *cpi, MODE_INFO *m, vp9_writer *bc,
@@ -1045,7 +1014,7 @@ static void print_prob_tree(vp9_coeff_probs *coef_probs, int block_types) {
fclose(f);
}
-static void build_tree_distribution(vp9_coeff_probs *coef_probs,
+static void build_tree_distribution(vp9_coeff_probs_model *coef_probs,
vp9_coeff_count *coef_counts,
unsigned int (*eob_branch_ct)[REF_TYPES]
[COEF_BANDS]
@@ -1060,6 +1029,7 @@ static void build_tree_distribution(vp9_coeff_probs *coef_probs,
#ifdef ENTROPY_STATS
int t = 0;
#endif
+ vp9_prob full_probs[ENTROPY_NODES];
for (i = 0; i < block_types; ++i) {
for (j = 0; j < REF_TYPES; ++j) {
@@ -1068,14 +1038,24 @@ static void build_tree_distribution(vp9_coeff_probs *coef_probs,
if (l >= 3 && k == 0)
continue;
vp9_tree_probs_from_distribution(vp9_coef_tree,
- coef_probs[i][j][k][l],
+ full_probs,
coef_branch_ct[i][j][k][l],
coef_counts[i][j][k][l], 0);
+ vpx_memcpy(coef_probs[i][j][k][l], full_probs,
+ sizeof(vp9_prob) * UNCONSTRAINED_NODES);
+#if CONFIG_BALANCED_COEFTREE
+ coef_branch_ct[i][j][k][l][1][1] = eob_branch_ct[i][j][k][l] -
+ coef_branch_ct[i][j][k][l][1][0];
+ coef_probs[i][j][k][l][1] =
+ get_binary_prob(coef_branch_ct[i][j][k][l][1][0],
+ coef_branch_ct[i][j][k][l][1][1]);
+#else
coef_branch_ct[i][j][k][l][0][1] = eob_branch_ct[i][j][k][l] -
coef_branch_ct[i][j][k][l][0][0];
coef_probs[i][j][k][l][0] =
get_binary_prob(coef_branch_ct[i][j][k][l][0][0],
coef_branch_ct[i][j][k][l][0][1]);
+#endif
#ifdef ENTROPY_STATS
if (!cpi->dummy_packing) {
for (t = 0; t < MAX_ENTROPY_TOKENS; ++t)
@@ -1127,7 +1107,7 @@ static void update_coef_probs_common(
#ifdef ENTROPY_STATS
vp9_coeff_stats *tree_update_hist,
#endif
- vp9_coeff_probs *new_frame_coef_probs,
+ vp9_coeff_probs_model *new_frame_coef_probs,
vp9_coeff_probs_model *old_frame_coef_probs,
vp9_coeff_stats *frame_branch_ct,
TX_SIZE tx_size) {
@@ -1136,7 +1116,6 @@ static void update_coef_probs_common(
int savings;
const int entropy_nodes_update = UNCONSTRAINED_NODES;
- // vp9_prob bestupd = find_coef_update_prob(cpi);
const int tstart = 0;
/* dry run to see if there is any udpate at all needed */
@@ -1150,7 +1129,7 @@ static void update_coef_probs_common(
vp9_prob newp = new_frame_coef_probs[i][j][k][l][t];
const vp9_prob oldp = old_frame_coef_probs[i][j][k][l][t];
const vp9_prob upd = vp9_coef_update_prob[t];
- int s; // = prev_coef_savings[t];
+ int s;
int u = 0;
if (l >= 3 && k == 0)
@@ -1192,11 +1171,10 @@ static void update_coef_probs_common(
vp9_prob newp = new_frame_coef_probs[i][j][k][l][t];
vp9_prob *oldp = old_frame_coef_probs[i][j][k][l] + t;
const vp9_prob upd = vp9_coef_update_prob[t];
- int s; // = prev_coef_savings[t];
+ int s;
int u = 0;
if (l >= 3 && k == 0)
continue;
-
if (t == PIVOT_NODE)
s = prob_diff_update_savings_search_model(
frame_branch_ct[i][j][k][l][0],
@@ -1278,37 +1256,6 @@ static void update_coef_probs(VP9_COMP* const cpi, vp9_writer* const bc) {
}
}
-#ifdef PACKET_TESTING
-FILE *vpxlogc = 0;
-#endif
-
-static void decide_kf_ymode_entropy(VP9_COMP *cpi) {
- int mode_cost[MB_MODE_COUNT];
- int bestcost = INT_MAX;
- int bestindex = 0;
- int i, j;
-
- for (i = 0; i < 8; i++) {
- int cost = 0;
-
- vp9_cost_tokens(mode_cost, cpi->common.kf_ymode_prob[i], vp9_kf_ymode_tree);
-
- for (j = 0; j < VP9_YMODES; j++)
- cost += mode_cost[j] * cpi->ymode_count[j];
-
- vp9_cost_tokens(mode_cost, cpi->common.sb_kf_ymode_prob[i],
- vp9_sb_ymode_tree);
- for (j = 0; j < VP9_I32X32_MODES; j++)
- cost += mode_cost[j] * cpi->sb_ymode_count[j];
-
- if (cost < bestcost) {
- bestindex = i;
- bestcost = cost;
- }
- }
- cpi->common.kf_ymode_probs_index = bestindex;
-
-}
static void segment_reference_frames(VP9_COMP *cpi) {
VP9_COMMON *oci = &cpi->common;
MODE_INFO *mi = oci->mi;
@@ -1417,9 +1364,6 @@ static void encode_segmentation(VP9_COMP *cpi, vp9_writer *w) {
// Segmentation map
vp9_write_bit(w, xd->update_mb_segmentation_map);
-#if CONFIG_IMPLICIT_SEGMENTATION
- vp9_write_bit(w, xd->allow_implicit_segment_update);
-#endif
if (xd->update_mb_segmentation_map) {
// Select the coding strategy (temporal or spatial)
vp9_choose_segmap_coding_method(cpi);
@@ -1482,60 +1426,67 @@ static void encode_segmentation(VP9_COMP *cpi, vp9_writer *w) {
}
}
-void vp9_pack_bitstream(VP9_COMP *cpi, uint8_t *dest, unsigned long *size) {
- int i;
- VP9_HEADER oh;
- VP9_COMMON *const pc = &cpi->common;
- vp9_writer header_bc, residual_bc;
- MACROBLOCKD *const xd = &cpi->mb.e_mbd;
- int extra_bytes_packed = 0;
-
- uint8_t *cx_data = dest;
+void write_uncompressed_header(VP9_COMMON *cm,
+ struct vp9_write_bit_buffer *wb) {
+ const int scaling_active = cm->width != cm->display_width ||
+ cm->height != cm->display_height;
- oh.show_frame = (int) pc->show_frame;
- oh.type = (int)pc->frame_type;
- oh.version = pc->version;
- oh.first_partition_length_in_bytes = 0;
+ vp9_wb_write_bit(wb, cm->frame_type);
+ vp9_wb_write_literal(wb, cm->version, 3);
+ vp9_wb_write_bit(wb, cm->show_frame);
+ vp9_wb_write_bit(wb, scaling_active);
+ vp9_wb_write_bit(wb, cm->subsampling_x);
+ vp9_wb_write_bit(wb, cm->subsampling_y);
- cx_data += 3;
+ if (cm->frame_type == KEY_FRAME) {
+ vp9_wb_write_literal(wb, SYNC_CODE_0, 8);
+ vp9_wb_write_literal(wb, SYNC_CODE_1, 8);
+ vp9_wb_write_literal(wb, SYNC_CODE_2, 8);
+ }
-#if defined(SECTIONBITS_OUTPUT)
- Sectionbits[active_section = 1] += sizeof(VP9_HEADER) * 8 * 256;
-#endif
+ if (scaling_active) {
+ vp9_wb_write_literal(wb, cm->display_width, 16);
+ vp9_wb_write_literal(wb, cm->display_height, 16);
+ }
- compute_update_table();
+ vp9_wb_write_literal(wb, cm->width, 16);
+ vp9_wb_write_literal(wb, cm->height, 16);
- /* every keyframe send startcode, width, height, scale factor, clamp
- * and color type.
- */
- if (oh.type == KEY_FRAME) {
- // Start / synch code
- cx_data[0] = 0x49;
- cx_data[1] = 0x83;
- cx_data[2] = 0x42;
- extra_bytes_packed = 3;
- cx_data += extra_bytes_packed;
+ if (!cm->show_frame) {
+ vp9_wb_write_bit(wb, cm->intra_only);
}
- if (pc->width != pc->display_width || pc->height != pc->display_height) {
- write_le16(cx_data, pc->display_width);
- write_le16(cx_data + 2, pc->display_height);
- cx_data += 4;
- extra_bytes_packed += 4;
+ vp9_wb_write_literal(wb, cm->frame_context_idx, NUM_FRAME_CONTEXTS_LG2);
+ vp9_wb_write_bit(wb, cm->clr_type);
+
+ vp9_wb_write_bit(wb, cm->error_resilient_mode);
+ if (!cm->error_resilient_mode) {
+ vp9_wb_write_bit(wb, cm->reset_frame_context);
+ vp9_wb_write_bit(wb, cm->refresh_frame_context);
+ vp9_wb_write_bit(wb, cm->frame_parallel_decoding_mode);
}
+}
- write_le16(cx_data, pc->width);
- write_le16(cx_data + 2, pc->height);
- extra_bytes_packed += 4;
- cx_data += 4;
+void vp9_pack_bitstream(VP9_COMP *cpi, uint8_t *dest, unsigned long *size) {
+ int i, bytes_packed;
+ VP9_COMMON *const pc = &cpi->common;
+ vp9_writer header_bc, residual_bc;
+ MACROBLOCKD *const xd = &cpi->mb.e_mbd;
- vp9_start_encode(&header_bc, cx_data);
+ uint8_t *cx_data = dest;
+ struct vp9_write_bit_buffer wb = {dest, 0};
+ struct vp9_write_bit_buffer first_partition_size_wb;
+
+ write_uncompressed_header(pc, &wb);
+ first_partition_size_wb = wb;
+ vp9_wb_write_literal(&wb, 0, 16); // don't know in advance first part. size
- // TODO(jkoleszar): remove these two unused bits?
- vp9_write_bit(&header_bc, pc->clr_type);
+ bytes_packed = vp9_rb_bytes_written(&wb);
+ cx_data += bytes_packed;
- // error resilient mode
- vp9_write_bit(&header_bc, pc->error_resilient_mode);
+ compute_update_table();
+
+ vp9_start_encode(&header_bc, cx_data);
encode_loopfilter(pc, xd, &header_bc);
@@ -1617,14 +1568,6 @@ void vp9_pack_bitstream(VP9_COMP *cpi, uint8_t *dest, unsigned long *size) {
vp9_write_literal(&header_bc, (pc->mcomp_filter_type), 2);
}
- if (!pc->error_resilient_mode) {
- vp9_write_bit(&header_bc, pc->refresh_frame_context);
- vp9_write_bit(&header_bc, pc->frame_parallel_decoding_mode);
- }
-
- vp9_write_literal(&header_bc, pc->frame_context_idx,
- NUM_FRAME_CONTEXTS_LG2);
-
#ifdef ENTROPY_STATS
if (pc->frame_type == INTER_FRAME)
active_section = 0;
@@ -1694,7 +1637,7 @@ void vp9_pack_bitstream(VP9_COMP *cpi, uint8_t *dest, unsigned long *size) {
// changes in the bitstream.
if (pc->frame_type != KEY_FRAME) {
int i, j;
- int new_context[INTER_MODE_CONTEXTS][4];
+ int new_context[INTER_MODE_CONTEXTS][VP9_MVREFS - 1];
if (!cpi->dummy_packing) {
update_inter_mode_probs(pc, new_context);
} else {
@@ -1704,7 +1647,7 @@ void vp9_pack_bitstream(VP9_COMP *cpi, uint8_t *dest, unsigned long *size) {
}
for (i = 0; i < INTER_MODE_CONTEXTS; i++) {
- for (j = 0; j < 4; j++) {
+ for (j = 0; j < VP9_MVREFS - 1; j++) {
if (new_context[i][j] != pc->fc.vp9_mode_contexts[i][j]) {
vp9_write(&header_bc, 1, 252);
vp9_write_prob(&header_bc, new_context[i][j]);
@@ -1731,10 +1674,8 @@ void vp9_pack_bitstream(VP9_COMP *cpi, uint8_t *dest, unsigned long *size) {
vp9_copy(cpi->common.fc.pre_coef_probs_32x32,
cpi->common.fc.coef_probs_32x32);
- vp9_copy(cpi->common.fc.pre_sb_ymode_prob, cpi->common.fc.sb_ymode_prob);
- vp9_copy(cpi->common.fc.pre_ymode_prob, cpi->common.fc.ymode_prob);
+ vp9_copy(cpi->common.fc.pre_y_mode_prob, cpi->common.fc.y_mode_prob);
vp9_copy(cpi->common.fc.pre_uv_mode_prob, cpi->common.fc.uv_mode_prob);
- vp9_copy(cpi->common.fc.pre_bmode_prob, cpi->common.fc.bmode_prob);
vp9_copy(cpi->common.fc.pre_partition_prob, cpi->common.fc.partition_prob);
cpi->common.fc.pre_nmvc = cpi->common.fc.nmvc;
vp9_zero(cpi->common.fc.mv_ref_ct);
@@ -1750,11 +1691,7 @@ void vp9_pack_bitstream(VP9_COMP *cpi, uint8_t *dest, unsigned long *size) {
vp9_write_prob(&header_bc, pc->mbskip_pred_probs[i]);
}
- if (pc->frame_type == KEY_FRAME) {
- if (!pc->kf_ymode_probs_update) {
- vp9_write_literal(&header_bc, pc->kf_ymode_probs_index, 3);
- }
- } else {
+ if (pc->frame_type != KEY_FRAME) {
// Update the probabilities used to encode reference frame data
update_ref_probs(cpi);
@@ -1820,35 +1757,11 @@ void vp9_pack_bitstream(VP9_COMP *cpi, uint8_t *dest, unsigned long *size) {
vp9_stop_encode(&header_bc);
- oh.first_partition_length_in_bytes = header_bc.pos;
-
- /* update frame tag */
- {
- int scaling = (pc->width != pc->display_width ||
- pc->height != pc->display_height);
- int v = (oh.first_partition_length_in_bytes << 8) |
- (pc->subsampling_y << 7) |
- (pc->subsampling_x << 6) |
- (scaling << 5) |
- (oh.show_frame << 4) |
- (oh.version << 1) |
- oh.type;
-
- assert(oh.first_partition_length_in_bytes <= 0xffff);
- dest[0] = v;
- dest[1] = v >> 8;
- dest[2] = v >> 16;
- }
-
- *size = VP9_HEADER_SIZE + extra_bytes_packed + header_bc.pos;
- if (pc->frame_type == KEY_FRAME) {
- decide_kf_ymode_entropy(cpi);
- } else {
- /* This is not required if the counts in cpi are consistent with the
- * final packing pass */
- // if (!cpi->dummy_packing) vp9_zero(cpi->NMVcount);
- }
+ // first partition size
+ assert(header_bc.pos <= 0xffff);
+ vp9_wb_write_literal(&first_partition_size_wb, header_bc.pos, 16);
+ *size = bytes_packed + header_bc.pos;
{
int tile_row, tile_col, total_size = 0;
diff --git a/vp9/encoder/vp9_block.h b/vp9/encoder/vp9_block.h
index 211eca4b4..e6d36cdf8 100644
--- a/vp9/encoder/vp9_block.h
+++ b/vp9/encoder/vp9_block.h
@@ -114,10 +114,9 @@ struct macroblock {
int *nmvsadcost_hp[2];
int **mvsadcost;
- int mbmode_cost[2][MB_MODE_COUNT];
+ int mbmode_cost[MB_MODE_COUNT];
int intra_uv_mode_cost[2][MB_MODE_COUNT];
- int bmode_costs[VP9_BINTRAMODES][VP9_BINTRAMODES][VP9_BINTRAMODES];
- int inter_bmode_costs[INTRA_MODE_COUNT];
+ int y_mode_costs[VP9_INTRA_MODES][VP9_INTRA_MODES][VP9_INTRA_MODES];
int switchable_interp_costs[VP9_SWITCHABLE_FILTERS + 1]
[VP9_SWITCHABLE_FILTERS];
@@ -134,7 +133,11 @@ struct macroblock {
unsigned char *active_ptr;
+ // note that token_costs is the cost when eob node is skipped
vp9_coeff_count token_costs[TX_SIZE_MAX_SB][BLOCK_TYPES];
+#if CONFIG_BALANCED_COEFTREE
+ vp9_coeff_count token_costs_noskip[TX_SIZE_MAX_SB][BLOCK_TYPES];
+#endif
int optimize;
diff --git a/vp9/encoder/vp9_boolhuff.c b/vp9/encoder/vp9_boolhuff.c
index 0fcb2579f..86143ca57 100644
--- a/vp9/encoder/vp9_boolhuff.c
+++ b/vp9/encoder/vp9_boolhuff.c
@@ -10,6 +10,7 @@
#include <assert.h>
#include "vp9/encoder/vp9_boolhuff.h"
+#include "vp9/common/vp9_entropy.h"
#if defined(SECTIONBITS_OUTPUT)
unsigned __int64 Sectionbits[500];
diff --git a/vp9/encoder/vp9_dct.c b/vp9/encoder/vp9_dct.c
index d22644424..8d4eec139 100644
--- a/vp9/encoder/vp9_dct.c
+++ b/vp9/encoder/vp9_dct.c
@@ -606,14 +606,13 @@ void vp9_short_walsh4x4_c(short *input, short *output, int pitch) {
c1 = ip[2 * pitch_short];
d1 = ip[3 * pitch_short];
- b1 = a1 - b1;
- c1 += d1;
- e1 = (c1 - b1) >> 1;
- a1 += e1;
- d1 -= e1;
- c1 = a1 - c1;
- b1 -= d1;
-
+ a1 += b1;
+ d1 = d1 - c1;
+ e1 = (a1 - d1) >> 1;
+ b1 = e1 - b1;
+ c1 = e1 - c1;
+ a1 -= c1;
+ d1 += b1;
op[0] = a1;
op[4] = c1;
op[8] = d1;
@@ -631,14 +630,13 @@ void vp9_short_walsh4x4_c(short *input, short *output, int pitch) {
c1 = ip[2];
d1 = ip[3];
- b1 = a1 - b1;
- c1 += d1;
- e1 = (c1 - b1) >> 1;
- a1 += e1;
- d1 -= e1;
- c1 = a1 - c1;
- b1 -= d1;
-
+ a1 += b1;
+ d1 -= c1;
+ e1 = (a1 - d1) >> 1;
+ b1 = e1 - b1;
+ c1 = e1 - c1;
+ a1 -= c1;
+ d1 += b1;
op[0] = a1 << WHT_UPSCALE_FACTOR;
op[1] = c1 << WHT_UPSCALE_FACTOR;
op[2] = d1 << WHT_UPSCALE_FACTOR;
diff --git a/vp9/encoder/vp9_encodeframe.c b/vp9/encoder/vp9_encodeframe.c
index ebee191ad..a38c1ffd3 100644
--- a/vp9/encoder/vp9_encodeframe.c
+++ b/vp9/encoder/vp9_encodeframe.c
@@ -10,6 +10,7 @@
#include "./vpx_config.h"
+#include "./vp9_rtcd.h"
#include "vp9/encoder/vp9_encodeframe.h"
#include "vp9/encoder/vp9_encodemb.h"
#include "vp9/encoder/vp9_encodemv.h"
@@ -97,6 +98,8 @@ static unsigned int alt_activity_measure(VP9_COMP *cpi,
return vp9_encode_intra(cpi, x, use_dc_pred);
}
+DECLARE_ALIGNED(16, static const uint8_t, vp9_64x64_zeros[64*64]) = { 0 };
+
// Measure the activity of the current macroblock
// What we measure here is TBD so abstracted to this function
@@ -332,7 +335,9 @@ static void update_state(VP9_COMP *cpi,
MACROBLOCKD *const xd = &x->e_mbd;
MODE_INFO *mi = &ctx->mic;
MB_MODE_INFO *const mbmi = &xd->mode_info_context->mbmi;
- int mb_mode = mi->mbmi.mode;
+#if CONFIG_DEBUG || CONFIG_INTERNAL_STATS
+ MB_PREDICTION_MODE mb_mode = mi->mbmi.mode;
+#endif
int mb_mode_index = ctx->best_mode_index;
const int mis = cpi->common.mode_info_stride;
const int bh = 1 << mi_height_log2(bsize), bw = 1 << mi_width_log2(bsize);
@@ -362,7 +367,8 @@ static void update_state(VP9_COMP *cpi,
ctx->txfm_rd_diff[ALLOW_32X32] = ctx->txfm_rd_diff[ALLOW_16X16];
}
- if (mb_mode == SPLITMV) {
+ if (mbmi->ref_frame != INTRA_FRAME &&
+ mbmi->sb_type < BLOCK_SIZE_SB8X8) {
vpx_memcpy(x->partition_info, &ctx->partition_info,
sizeof(PARTITION_INFO));
@@ -448,7 +454,8 @@ static void update_state(VP9_COMP *cpi,
*/
// Note how often each mode chosen as best
cpi->mode_chosen_counts[mb_mode_index]++;
- if (mbmi->mode == SPLITMV || mbmi->mode == NEWMV) {
+ if (mbmi->ref_frame != INTRA_FRAME &&
+ (mbmi->sb_type < BLOCK_SIZE_SB8X8 || mbmi->mode == NEWMV)) {
int_mv best_mv, best_second_mv;
MV_REFERENCE_FRAME rf = mbmi->ref_frame;
best_mv.as_int = ctx->best_ref_mv.as_int;
@@ -768,6 +775,35 @@ static void restore_context(VP9_COMP *cpi, int mi_row, int mi_col,
vpx_memcpy(cm->left_seg_context + (mi_row & MI_MASK), sl,
sizeof(PARTITION_CONTEXT) * mh);
}
+static void save_context(VP9_COMP *cpi, int mi_row, int mi_col,
+ ENTROPY_CONTEXT a[16 * MAX_MB_PLANE],
+ ENTROPY_CONTEXT l[16 * MAX_MB_PLANE],
+ PARTITION_CONTEXT sa[8],
+ PARTITION_CONTEXT sl[8],
+ BLOCK_SIZE_TYPE bsize) {
+ VP9_COMMON *const cm = &cpi->common;
+ MACROBLOCK *const x = &cpi->mb;
+ MACROBLOCKD *const xd = &x->e_mbd;
+ int p;
+ int bwl = b_width_log2(bsize), bw = 1 << bwl;
+ int bhl = b_height_log2(bsize), bh = 1 << bhl;
+ int mwl = mi_width_log2(bsize), mw = 1 << mwl;
+ int mhl = mi_height_log2(bsize), mh = 1 << mhl;
+
+ // buffer the above/left context information of the block in search.
+ for (p = 0; p < MAX_MB_PLANE; ++p) {
+ vpx_memcpy(a + bw * p, cm->above_context[p] +
+ (mi_col * 2 >> xd->plane[p].subsampling_x),
+ sizeof(ENTROPY_CONTEXT) * bw >> xd->plane[p].subsampling_x);
+ vpx_memcpy(l + bh * p, cm->left_context[p] +
+ ((mi_row & MI_MASK) * 2 >> xd->plane[p].subsampling_y),
+ sizeof(ENTROPY_CONTEXT) * bh >> xd->plane[p].subsampling_y);
+ }
+ vpx_memcpy(sa, cm->above_seg_context + mi_col,
+ sizeof(PARTITION_CONTEXT) * mw);
+ vpx_memcpy(sl, cm->left_seg_context + (mi_row & MI_MASK),
+ sizeof(PARTITION_CONTEXT) * mh);
+}
static void encode_b(VP9_COMP *cpi, TOKENEXTRA **tp,
int mi_row, int mi_col, int output_enabled,
@@ -860,6 +896,337 @@ static void encode_sb(VP9_COMP *cpi, TOKENEXTRA **tp,
}
}
+static void set_partitioning(VP9_COMP *cpi, MODE_INFO *m,
+ BLOCK_SIZE_TYPE bsize) {
+ VP9_COMMON *const cm = &cpi->common;
+ const int mis = cm->mode_info_stride;
+ int bsl = b_width_log2(bsize);
+ int bs = (1 << bsl) / 2; //
+ int block_row, block_col;
+ int row, col;
+
+ // this test function sets the entire macroblock to the same bsize
+ for (block_row = 0; block_row < 8; block_row += bs) {
+ for (block_col = 0; block_col < 8; block_col += bs) {
+ for (row = 0; row < bs; row++) {
+ for (col = 0; col < bs; col++) {
+ m[(block_row+row)*mis + block_col+col].mbmi.sb_type = bsize;
+ }
+ }
+ }
+ }
+}
+
+static void set_block_size(VP9_COMMON *const cm,
+ MODE_INFO *m, BLOCK_SIZE_TYPE bsize, int mis,
+ int mi_row, int mi_col) {
+ int row, col;
+ int bsl = b_width_log2(bsize);
+ int bs = (1 << bsl) / 2; //
+ MODE_INFO *m2 = m + mi_row * mis + mi_col;
+ for (row = 0; row < bs; row++) {
+ for (col = 0; col < bs; col++) {
+ if (mi_row + row >= cm->mi_rows || mi_col + col >= cm->mi_cols)
+ return;
+ m2[row*mis+col].mbmi.sb_type = bsize;
+ }
+ }
+}
+typedef struct {
+ int64_t sum_square_error;
+ int64_t sum_error;
+ int count;
+ int variance;
+} var;
+
+#define VT(TYPE, BLOCKSIZE) \
+ typedef struct { \
+ var none; \
+ var horz[2]; \
+ var vert[2]; \
+ BLOCKSIZE split[4]; } TYPE;
+
+VT(v8x8, var)
+VT(v16x16, v8x8)
+VT(v32x32, v16x16)
+VT(v64x64, v32x32)
+
+typedef enum {
+ V16X16,
+ V32X32,
+ V64X64,
+} TREE_LEVEL;
+
+// Set variance values given sum square error, sum error, count.
+static void fill_variance(var *v, int64_t s2, int64_t s, int c) {
+ v->sum_square_error = s2;
+ v->sum_error = s;
+ v->count = c;
+ v->variance = 256
+ * (v->sum_square_error - v->sum_error * v->sum_error / v->count)
+ / v->count;
+}
+
+// Fills a 16x16 variance tree node by calling get var8x8 var..
+static void fill_16x16_variance(const unsigned char *s, int sp,
+ const unsigned char *d, int dp, v16x16 *vt) {
+ unsigned int sse;
+ int sum;
+ vp9_get_sse_sum_8x8(s, sp, d, dp, &sse, &sum);
+ fill_variance(&vt->split[0].none, sse, sum, 64);
+ vp9_get_sse_sum_8x8(s + 8, sp, d + 8, dp, &sse, &sum);
+ fill_variance(&vt->split[1].none, sse, sum, 64);
+ vp9_get_sse_sum_8x8(s + 8 * sp, sp, d + 8 * dp, dp, &sse, &sum);
+ fill_variance(&vt->split[2].none, sse, sum, 64);
+ vp9_get_sse_sum_8x8(s + 8 * sp + 8, sp, d + 8 + 8 * dp, dp, &sse, &sum);
+ fill_variance(&vt->split[3].none, sse, sum, 64);
+}
+
+// Combine 2 variance structures by summing the sum_error, sum_square_error,
+// and counts and then calculating the new variance.
+void sum_2_variances(var *r, var *a, var*b) {
+ fill_variance(r, a->sum_square_error + b->sum_square_error,
+ a->sum_error + b->sum_error, a->count + b->count);
+}
+// Fill one level of our variance tree, by summing the split sums into each of
+// the horizontal, vertical and none from split and recalculating variance.
+#define fill_variance_tree(VT) \
+ sum_2_variances(VT.horz[0], VT.split[0].none, VT.split[1].none); \
+ sum_2_variances(VT.horz[1], VT.split[2].none, VT.split[3].none); \
+ sum_2_variances(VT.vert[0], VT.split[0].none, VT.split[2].none); \
+ sum_2_variances(VT.vert[1], VT.split[1].none, VT.split[3].none); \
+ sum_2_variances(VT.none, VT.vert[0], VT.vert[1]);
+
+// Set the blocksize in the macroblock info structure if the variance is less
+// than our threshold to one of none, horz, vert.
+#define set_vt_size(VT, BLOCKSIZE, R, C, ACTION) \
+ if (VT.none.variance < threshold) { \
+ set_block_size(cm, m, BLOCKSIZE, mis, R, C); \
+ ACTION; \
+ } \
+ if (VT.horz[0].variance < threshold && VT.horz[1].variance < threshold ) { \
+ set_block_size(cm, m, get_subsize(BLOCKSIZE, PARTITION_HORZ), mis, R, C); \
+ ACTION; \
+ } \
+ if (VT.vert[0].variance < threshold && VT.vert[1].variance < threshold ) { \
+ set_block_size(cm, m, get_subsize(BLOCKSIZE, PARTITION_VERT), mis, R, C); \
+ ACTION; \
+ }
+
+static void choose_partitioning(VP9_COMP *cpi, MODE_INFO *m, int mi_row,
+ int mi_col) {
+ VP9_COMMON * const cm = &cpi->common;
+ MACROBLOCK *x = &cpi->mb;
+ MACROBLOCKD *xd = &cpi->mb.e_mbd;
+ const int mis = cm->mode_info_stride;
+ // TODO(JBB): More experimentation or testing of this threshold;
+ int64_t threshold = 4;
+ int i, j, k;
+ v64x64 vt;
+ unsigned char * s;
+ int sp;
+ const unsigned char * d = xd->plane[0].pre->buf;
+ int dp = xd->plane[0].pre->stride;
+
+ set_offsets(cpi, mi_row, mi_col, BLOCK_SIZE_SB64X64);
+ s = x->plane[0].src.buf;
+ sp = x->plane[0].src.stride;
+
+ // TODO(JBB): Clearly the higher the quantizer the fewer partitions we want
+ // but this needs more experimentation.
+ threshold = threshold * cpi->common.base_qindex * cpi->common.base_qindex;
+
+ // if ( cm->frame_type == KEY_FRAME ) {
+ d = vp9_64x64_zeros;
+ dp = 64;
+ // }
+ // Fill in the entire tree of 8x8 variances for splits.
+ for (i = 0; i < 4; i++) {
+ const int x32_idx = ((i & 1) << 5);
+ const int y32_idx = ((i >> 1) << 5);
+ for (j = 0; j < 4; j++) {
+ const int x_idx = x32_idx + ((j & 1) << 4);
+ const int y_idx = y32_idx + ((j >> 1) << 4);
+ fill_16x16_variance(s + y_idx * sp + x_idx, sp, d + y_idx * dp + x_idx,
+ dp, &vt.split[i].split[j]);
+ }
+ }
+ // Fill the rest of the variance tree by summing the split partition
+ // values.
+ for (i = 0; i < 4; i++) {
+ for (j = 0; j < 4; j++) {
+ fill_variance_tree(&vt.split[i].split[j])
+ }
+ fill_variance_tree(&vt.split[i])
+ }
+ fill_variance_tree(&vt)
+
+ // Now go through the entire structure, splitting every blocksize until
+ // we get to one that's got a variance lower than our threshold, or we
+ // hit 8x8.
+ set_vt_size( vt, BLOCK_SIZE_SB64X64, mi_row, mi_col, return);
+ for (i = 0; i < 4; ++i) {
+ const int x32_idx = ((i & 1) << 2);
+ const int y32_idx = ((i >> 1) << 2);
+ set_vt_size(vt, BLOCK_SIZE_SB32X32, mi_row + y32_idx, mi_col + x32_idx,
+ continue);
+
+ for (j = 0; j < 4; ++j) {
+ const int x16_idx = ((j & 1) << 1);
+ const int y16_idx = ((j >> 1) << 1);
+ set_vt_size(vt, BLOCK_SIZE_MB16X16, mi_row + y32_idx + y16_idx,
+ mi_col+x32_idx+x16_idx, continue);
+
+ for (k = 0; k < 4; ++k) {
+ const int x8_idx = (k & 1);
+ const int y8_idx = (k >> 1);
+ set_block_size(cm, m, BLOCK_SIZE_SB8X8, mis,
+ mi_row + y32_idx + y16_idx + y8_idx,
+ mi_col + x32_idx + x16_idx + x8_idx);
+ }
+ }
+ }
+}
+static void rd_use_partition(VP9_COMP *cpi, MODE_INFO *m, TOKENEXTRA **tp,
+ int mi_row, int mi_col, BLOCK_SIZE_TYPE bsize,
+ int *rate, int *dist) {
+ VP9_COMMON * const cm = &cpi->common;
+ MACROBLOCK * const x = &cpi->mb;
+ MACROBLOCKD *xd = &cpi->mb.e_mbd;
+ const int mis = cm->mode_info_stride;
+ int bwl, bhl;
+ int bsl = b_width_log2(bsize);
+ int bs = (1 << bsl);
+ int bss = (1 << bsl)/4;
+ int i, pl;
+ PARTITION_TYPE partition;
+ BLOCK_SIZE_TYPE subsize;
+ ENTROPY_CONTEXT l[16 * MAX_MB_PLANE], a[16 * MAX_MB_PLANE];
+ PARTITION_CONTEXT sl[8], sa[8];
+ int r = 0, d = 0;
+
+ if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols)
+ return;
+
+
+ bwl = b_width_log2(m->mbmi.sb_type);
+ bhl = b_height_log2(m->mbmi.sb_type);
+
+ // parse the partition type
+ if ((bwl == bsl) && (bhl == bsl))
+ partition = PARTITION_NONE;
+ else if ((bwl == bsl) && (bhl < bsl))
+ partition = PARTITION_HORZ;
+ else if ((bwl < bsl) && (bhl == bsl))
+ partition = PARTITION_VERT;
+ else if ((bwl < bsl) && (bhl < bsl))
+ partition = PARTITION_SPLIT;
+ else
+ assert(0);
+
+ subsize = get_subsize(bsize, partition);
+
+ // TODO(JBB): this restriction is here because pick_sb_modes can return
+ // r's that are INT_MAX meaning we can't select a mode / mv for this block.
+ // when the code is made to work for less than sb8x8 we need to come up with
+ // a solution to this problem.
+ assert(subsize >= BLOCK_SIZE_SB8X8);
+
+ if (bsize >= BLOCK_SIZE_SB8X8) {
+ xd->left_seg_context = cm->left_seg_context + (mi_row & MI_MASK);
+ xd->above_seg_context = cm->above_seg_context + mi_col;
+ *(get_sb_partitioning(x, bsize)) = subsize;
+ }
+
+ pl = partition_plane_context(xd, bsize);
+ save_context(cpi, mi_row, mi_col, a, l, sa, sl, bsize);
+ switch (partition) {
+ case PARTITION_NONE:
+ pick_sb_modes(cpi, mi_row, mi_col, tp, &r, &d, bsize,
+ get_block_context(x, bsize));
+ r += x->partition_cost[pl][PARTITION_NONE];
+ break;
+ case PARTITION_HORZ:
+ *(get_sb_index(xd, subsize)) = 0;
+ pick_sb_modes(cpi, mi_row, mi_col, tp, &r, &d, subsize,
+ get_block_context(x, subsize));
+ if (mi_row + (bs >> 1) <= cm->mi_rows) {
+ int rt, dt;
+ update_state(cpi, get_block_context(x, subsize), subsize, 0);
+ encode_superblock(cpi, tp, 0, mi_row, mi_col, subsize);
+ *(get_sb_index(xd, subsize)) = 1;
+ pick_sb_modes(cpi, mi_row + (bs >> 2), mi_col, tp, &rt, &dt, subsize,
+ get_block_context(x, subsize));
+ r += rt;
+ d += dt;
+ }
+ set_partition_seg_context(cm, xd, mi_row, mi_col);
+ pl = partition_plane_context(xd, bsize);
+ r += x->partition_cost[pl][PARTITION_HORZ];
+ break;
+ case PARTITION_VERT:
+ *(get_sb_index(xd, subsize)) = 0;
+ pick_sb_modes(cpi, mi_row, mi_col, tp, &r, &d, subsize,
+ get_block_context(x, subsize));
+ if (mi_col + (bs >> 1) <= cm->mi_cols) {
+ int rt, dt;
+ update_state(cpi, get_block_context(x, subsize), subsize, 0);
+ encode_superblock(cpi, tp, 0, mi_row, mi_col, subsize);
+ *(get_sb_index(xd, subsize)) = 1;
+ pick_sb_modes(cpi, mi_row, mi_col + (bs >> 2), tp, &rt, &dt, subsize,
+ get_block_context(x, subsize));
+ r += rt;
+ d += dt;
+ }
+ set_partition_seg_context(cm, xd, mi_row, mi_col);
+ pl = partition_plane_context(xd, bsize);
+ r += x->partition_cost[pl][PARTITION_VERT];
+ restore_context(cpi, mi_row, mi_col, a, l, sa, sl, bsize);
+ break;
+ case PARTITION_SPLIT:
+ for (i = 0; i < 4; i++) {
+ int x_idx = (i & 1) * (bs >> 2);
+ int y_idx = (i >> 1) * (bs >> 2);
+ int jj = i >> 1, ii = i & 0x01;
+ int rt, dt;
+
+ if ((mi_row + y_idx >= cm->mi_rows) || (mi_col + x_idx >= cm->mi_cols))
+ continue;
+
+ *(get_sb_index(xd, subsize)) = i;
+
+ rd_use_partition(cpi, m + jj * bss * mis + ii * bss, tp, mi_row + y_idx,
+ mi_col + x_idx, subsize, &rt, &dt);
+ r += rt;
+ d += dt;
+ }
+ set_partition_seg_context(cm, xd, mi_row, mi_col);
+ pl = partition_plane_context(xd, bsize);
+ r += x->partition_cost[pl][PARTITION_SPLIT];
+ break;
+ default:
+ assert(0);
+ }
+
+ // update partition context
+#if CONFIG_AB4X4
+ if (bsize >= BLOCK_SIZE_SB8X8 &&
+ (bsize == BLOCK_SIZE_SB8X8 || partition != PARTITION_SPLIT)) {
+#else
+ if (bsize > BLOCK_SIZE_SB8X8
+ && (bsize == BLOCK_SIZE_MB16X16 || partition != PARTITION_SPLIT)) {
+#endif
+ set_partition_seg_context(cm, xd, mi_row, mi_col);
+ update_partition_context(xd, subsize, bsize);
+ }
+ restore_context(cpi, mi_row, mi_col, a, l, sa, sl, bsize);
+
+ if (r < INT_MAX && d < INT_MAX)
+ encode_sb(cpi, tp, mi_row, mi_col, bsize == BLOCK_SIZE_SB64X64, bsize);
+ *rate = r;
+ *dist = d;
+}
+
// TODO(jingning,jimbankoski,rbultje): properly skip partition types that are
// unlikely to be selected depending on previously rate-distortion optimization
@@ -876,7 +1243,7 @@ static void rd_pick_partition(VP9_COMP *cpi, TOKENEXTRA **tp,
ENTROPY_CONTEXT l[16 * MAX_MB_PLANE], a[16 * MAX_MB_PLANE];
PARTITION_CONTEXT sl[8], sa[8];
TOKENEXTRA *tp_orig = *tp;
- int i, p, pl;
+ int i, pl;
BLOCK_SIZE_TYPE subsize;
int srate = INT_MAX, sdist = INT_MAX;
@@ -888,19 +1255,7 @@ static void rd_pick_partition(VP9_COMP *cpi, TOKENEXTRA **tp,
}
assert(mi_height_log2(bsize) == mi_width_log2(bsize));
- // buffer the above/left context information of the block in search.
- for (p = 0; p < MAX_MB_PLANE; ++p) {
- vpx_memcpy(a + bs * p, cm->above_context[p] +
- (mi_col * 2 >> xd->plane[p].subsampling_x),
- sizeof(ENTROPY_CONTEXT) * bs >> xd->plane[p].subsampling_x);
- vpx_memcpy(l + bs * p, cm->left_context[p] +
- ((mi_row & MI_MASK) * 2 >> xd->plane[p].subsampling_y),
- sizeof(ENTROPY_CONTEXT) * bs >> xd->plane[p].subsampling_y);
- }
- vpx_memcpy(sa, cm->above_seg_context + mi_col,
- sizeof(PARTITION_CONTEXT) * ms);
- vpx_memcpy(sl, cm->left_seg_context + (mi_row & MI_MASK),
- sizeof(PARTITION_CONTEXT) * ms);
+ save_context(cpi, mi_row, mi_col, a, l, sa, sl, bsize);
// PARTITION_SPLIT
if (bsize >= BLOCK_SIZE_SB8X8) {
@@ -1028,6 +1383,8 @@ static void rd_pick_partition(VP9_COMP *cpi, TOKENEXTRA **tp,
*rate = srate;
*dist = sdist;
+ restore_context(cpi, mi_row, mi_col, a, l, sa, sl, bsize);
+
if (srate < INT_MAX && sdist < INT_MAX)
encode_sb(cpi, tp, mi_row, mi_col, bsize == BLOCK_SIZE_SB64X64, bsize);
@@ -1053,8 +1410,22 @@ static void encode_sb_row(VP9_COMP *cpi, int mi_row,
for (mi_col = cm->cur_tile_mi_col_start;
mi_col < cm->cur_tile_mi_col_end; mi_col += 8) {
int dummy_rate, dummy_dist;
- rd_pick_partition(cpi, tp, mi_row, mi_col, BLOCK_SIZE_SB64X64,
- &dummy_rate, &dummy_dist);
+ // TODO(JBB): remove the border conditions for 64x64 blocks once its fixed
+ // without this border check choose will fail on the border of every
+ // non 64x64.
+ if (cpi->speed < 5 ||
+ mi_col + 8 > cm->cur_tile_mi_col_end ||
+ mi_row + 8 > cm->cur_tile_mi_row_end) {
+ rd_pick_partition(cpi, tp, mi_row, mi_col, BLOCK_SIZE_SB64X64,
+ &dummy_rate, &dummy_dist);
+ } else {
+ const int idx_str = cm->mode_info_stride * mi_row + mi_col;
+ MODE_INFO *m = cm->mi + idx_str;
+ // set_partitioning(cpi, m, BLOCK_SIZE_SB8X8);
+ choose_partitioning(cpi, cm->mi, mi_row, mi_col);
+ rd_use_partition(cpi, m, tp, mi_row, mi_col, BLOCK_SIZE_SB64X64,
+ &dummy_rate, &dummy_dist);
+ }
}
}
@@ -1093,11 +1464,9 @@ static void init_encode_frame_mb_context(VP9_COMP *cpi) {
xd->mode_info_context->mbmi.uv_mode = DC_PRED;
vp9_zero(cpi->count_mb_ref_frame_usage)
- vp9_zero(cpi->bmode_count)
- vp9_zero(cpi->ymode_count)
+ vp9_zero(cpi->y_mode_count)
vp9_zero(cpi->y_uv_mode_count)
vp9_zero(cpi->common.fc.mv_ref_ct)
- vp9_zero(cpi->sb_ymode_count)
vp9_zero(cpi->partition_count);
// Note: this memset assumes above_context[0], [1] and [2]
@@ -1550,20 +1919,17 @@ static void sum_intra_stats(VP9_COMP *cpi, MACROBLOCK *x) {
const MB_PREDICTION_MODE m = xd->mode_info_context->mbmi.mode;
const MB_PREDICTION_MODE uvm = xd->mode_info_context->mbmi.uv_mode;
+ ++cpi->y_uv_mode_count[m][uvm];
if (xd->mode_info_context->mbmi.sb_type >= BLOCK_SIZE_SB8X8) {
- ++cpi->sb_ymode_count[m];
+ ++cpi->y_mode_count[m];
} else {
- ++cpi->ymode_count[m];
- }
- ++cpi->y_uv_mode_count[m][uvm];
- if (m == I4X4_PRED) {
int idx, idy;
int bw = 1 << b_width_log2(xd->mode_info_context->mbmi.sb_type);
int bh = 1 << b_height_log2(xd->mode_info_context->mbmi.sb_type);
for (idy = 0; idy < 2; idy += bh) {
for (idx = 0; idx < 2; idx += bw) {
int m = xd->mode_info_context->bmi[idy * 2 + idx].as_mode.first;
- ++cpi->sb_ymode_count[m];
+ ++cpi->y_mode_count[m];
}
}
}
@@ -1627,7 +1993,7 @@ static void encode_superblock(VP9_COMP *cpi, TOKENEXTRA **t,
cpi->zbin_mode_boost = GF_ZEROMV_ZBIN_BOOST;
else
cpi->zbin_mode_boost = LF_ZEROMV_ZBIN_BOOST;
- } else if (mbmi->mode == SPLITMV) {
+ } else if (mbmi->sb_type < BLOCK_SIZE_SB8X8) {
cpi->zbin_mode_boost = SPLIT_MV_ZBIN_BOOST;
} else {
cpi->zbin_mode_boost = MV_ZBIN_BOOST;
diff --git a/vp9/encoder/vp9_encodeintra.c b/vp9/encoder/vp9_encodeintra.c
index 5d7c244f1..57041a90b 100644
--- a/vp9/encoder/vp9_encodeintra.c
+++ b/vp9/encoder/vp9_encodeintra.c
@@ -16,72 +16,18 @@
#include "vp9/common/vp9_invtrans.h"
#include "vp9/encoder/vp9_encodeintra.h"
-static void encode_intra4x4block(MACROBLOCK *x, int ib, BLOCK_SIZE_TYPE bs);
-
int vp9_encode_intra(VP9_COMP *cpi, MACROBLOCK *x, int use_16x16_pred) {
MB_MODE_INFO * mbmi = &x->e_mbd.mode_info_context->mbmi;
(void) cpi;
-
+ mbmi->mode = DC_PRED;
+ mbmi->ref_frame = INTRA_FRAME;
if (use_16x16_pred) {
- mbmi->mode = DC_PRED;
- mbmi->uv_mode = DC_PRED;
- mbmi->ref_frame = INTRA_FRAME;
-
- vp9_encode_intra16x16mby(&cpi->common, x);
+ mbmi->txfm_size = TX_16X16;
+ vp9_encode_intra_block_y(&cpi->common, x, BLOCK_SIZE_MB16X16);
} else {
- int i;
-
- for (i = 0; i < 16; i++) {
- encode_intra4x4block(x, i, BLOCK_SIZE_MB16X16);
- }
+ mbmi->txfm_size = TX_4X4;
+ vp9_encode_intra_block_y(&cpi->common, x, BLOCK_SIZE_MB16X16);
}
return vp9_get_mb_ss(x->plane[0].src_diff);
}
-
-// This function is used only by the firstpass encoding.
-static void encode_intra4x4block(MACROBLOCK *x, int ib,
- BLOCK_SIZE_TYPE bsize) {
- MACROBLOCKD * const xd = &x->e_mbd;
- uint8_t* const src =
- raster_block_offset_uint8(xd, bsize, 0, ib,
- x->plane[0].src.buf, x->plane[0].src.stride);
- uint8_t* const dst =
- raster_block_offset_uint8(xd, bsize, 0, ib,
- xd->plane[0].dst.buf, xd->plane[0].dst.stride);
- int16_t* const src_diff =
- raster_block_offset_int16(xd, bsize, 0, ib,
- x->plane[0].src_diff);
- int16_t* const coeff = BLOCK_OFFSET(x->plane[0].coeff, ib, 16);
- const int bwl = b_width_log2(bsize), bhl = b_height_log2(bsize);
-
- assert(ib < (1 << (bwl + bhl)));
-
- vp9_intra4x4_predict(&x->e_mbd, ib, bsize, DC_PRED,
- dst, xd->plane[0].dst.stride);
- vp9_subtract_block(4, 4, src_diff, 4 << bwl,
- src, x->plane[0].src.stride,
- dst, xd->plane[0].dst.stride);
-
- x->fwd_txm4x4(src_diff, coeff, 8 << bwl);
- x->quantize_b_4x4(x, ib, DCT_DCT, 16);
- vp9_inverse_transform_b_4x4_add(&x->e_mbd, xd->plane[0].eobs[ib],
- BLOCK_OFFSET(xd->plane[0].dqcoeff, ib, 16),
- dst, xd->plane[0].dst.stride);
-}
-
-void vp9_encode_intra16x16mby(VP9_COMMON *const cm, MACROBLOCK *x) {
- MACROBLOCKD *xd = &x->e_mbd;
-
- vp9_build_intra_predictors_sby_s(xd, BLOCK_SIZE_MB16X16);
- vp9_encode_sby(cm, x, BLOCK_SIZE_MB16X16);
-}
-
-void vp9_encode_intra16x16mbuv(VP9_COMMON *const cm, MACROBLOCK *x) {
- MACROBLOCKD *xd = &x->e_mbd;
-
- vp9_build_intra_predictors_sbuv_s(xd, BLOCK_SIZE_MB16X16);
- vp9_encode_sbuv(cm, x, BLOCK_SIZE_MB16X16);
-}
-
-
diff --git a/vp9/encoder/vp9_encodeintra.h b/vp9/encoder/vp9_encodeintra.h
index 7da164c6a..14d144b74 100644
--- a/vp9/encoder/vp9_encodeintra.h
+++ b/vp9/encoder/vp9_encodeintra.h
@@ -14,8 +14,6 @@
#include "vp9/encoder/vp9_onyx_int.h"
int vp9_encode_intra(VP9_COMP *cpi, MACROBLOCK *x, int use_16x16_pred);
-void vp9_encode_intra16x16mby(VP9_COMMON *const cm, MACROBLOCK *x);
-void vp9_encode_intra16x16mbuv(VP9_COMMON *const cm, MACROBLOCK *x);
void vp9_encode_intra_block_y(VP9_COMMON *const cm, MACROBLOCK *mb,
BLOCK_SIZE_TYPE bs);
void vp9_encode_intra_block_uv(VP9_COMMON *const cm, MACROBLOCK *mb,
diff --git a/vp9/encoder/vp9_encodemb.c b/vp9/encoder/vp9_encodemb.c
index b7f60b127..98ea98031 100644
--- a/vp9/encoder/vp9_encodemb.c
+++ b/vp9/encoder/vp9_encodemb.c
@@ -20,6 +20,9 @@
#include "vp9/common/vp9_systemdependent.h"
#include "vp9_rtcd.h"
+DECLARE_ALIGNED(16, extern const uint8_t,
+ vp9_pt_energy_class[MAX_ENTROPY_TOKENS]);
+
void vp9_subtract_block(int rows, int cols,
int16_t *diff_ptr, int diff_stride,
const uint8_t *src_ptr, int src_stride,
@@ -105,7 +108,7 @@ static int trellis_get_coeff_context(const int *scan,
uint8_t *token_cache,
int pad, int l) {
int bak = token_cache[scan[idx]], pt;
- token_cache[scan[idx]] = token;
+ token_cache[scan[idx]] = vp9_pt_energy_class[token];
pt = vp9_get_coef_context(scan, nb, pad, token_cache, idx + 1, l);
token_cache[scan[idx]] = bak;
return pt;
@@ -189,7 +192,8 @@ static void optimize_b(VP9_COMMON *const cm, MACROBLOCK *mb,
*(tokens[eob] + 1) = *(tokens[eob] + 0);
next = eob;
for (i = 0; i < eob; i++)
- token_cache[scan[i]] = vp9_dct_value_tokens_ptr[qcoeff_ptr[scan[i]]].token;
+ token_cache[scan[i]] = vp9_pt_energy_class[vp9_dct_value_tokens_ptr[
+ qcoeff_ptr[scan[i]]].token];
nb = vp9_get_coef_neighbors_handle(scan, &pad);
for (i = eob; i-- > i0;) {
@@ -211,10 +215,21 @@ static void optimize_b(VP9_COMMON *const cm, MACROBLOCK *mb,
band = get_coef_band(band_translate, i + 1);
pt = trellis_get_coeff_context(scan, nb, i, t0, token_cache,
pad, default_eob);
+#if CONFIG_BALANCED_COEFTREE
+ rate0 +=
+ mb->token_costs_noskip[tx_size][type][ref][band][pt]
+ [tokens[next][0].token];
+ rate1 +=
+ mb->token_costs_noskip[tx_size][type][ref][band][pt]
+ [tokens[next][1].token];
+#else
rate0 +=
- mb->token_costs[tx_size][type][ref][band][pt][tokens[next][0].token];
+ mb->token_costs[tx_size][type][ref][band][pt]
+ [tokens[next][0].token];
rate1 +=
- mb->token_costs[tx_size][type][ref][band][pt][tokens[next][1].token];
+ mb->token_costs[tx_size][type][ref][band][pt]
+ [tokens[next][1].token];
+#endif
}
UPDATE_RD_COST();
/* And pick the best. */
@@ -262,14 +277,32 @@ static void optimize_b(VP9_COMMON *const cm, MACROBLOCK *mb,
if (t0 != DCT_EOB_TOKEN) {
pt = trellis_get_coeff_context(scan, nb, i, t0, token_cache,
pad, default_eob);
+#if CONFIG_BALANCED_COEFTREE
+ if (!x)
+ rate0 += mb->token_costs[tx_size][type][ref][band][pt][
+ tokens[next][0].token];
+ else
+ rate0 += mb->token_costs_noskip[tx_size][type][ref][band][pt][
+ tokens[next][0].token];
+#else
rate0 += mb->token_costs[tx_size][type][ref][band][pt][
tokens[next][0].token];
+#endif
}
if (t1 != DCT_EOB_TOKEN) {
pt = trellis_get_coeff_context(scan, nb, i, t1, token_cache,
pad, default_eob);
+#if CONFIG_BALANCED_COEFTREE
+ if (!x)
+ rate1 += mb->token_costs[tx_size][type][ref][band][pt][
+ tokens[next][1].token];
+ else
+ rate1 += mb->token_costs_noskip[tx_size][type][ref][band][pt][
+ tokens[next][1].token];
+#else
rate1 += mb->token_costs[tx_size][type][ref][band][pt][
tokens[next][1].token];
+#endif
}
}
@@ -322,8 +355,13 @@ static void optimize_b(VP9_COMMON *const cm, MACROBLOCK *mb,
error1 = tokens[next][1].error;
t0 = tokens[next][0].token;
t1 = tokens[next][1].token;
+#if CONFIG_BALANCED_COEFTREE
+ rate0 += mb->token_costs_noskip[tx_size][type][ref][band][pt][t0];
+ rate1 += mb->token_costs_noskip[tx_size][type][ref][band][pt][t1];
+#else
rate0 += mb->token_costs[tx_size][type][ref][band][pt][t0];
rate1 += mb->token_costs[tx_size][type][ref][band][pt][t1];
+#endif
UPDATE_RD_COST();
best = rd_cost1 < rd_cost0;
final_eob = i0 - 1;
@@ -610,6 +648,7 @@ static void encode_block_intra(int plane, int block, BLOCK_SIZE_TYPE bsize,
struct encode_b_args* const args = arg;
MACROBLOCK* const x = args->x;
MACROBLOCKD* const xd = &x->e_mbd;
+ MB_MODE_INFO* const mbmi = &xd->mode_info_context->mbmi;
const TX_SIZE tx_size = (TX_SIZE)(ss_txfrm_size / 2);
const int bw = 4 << (b_width_log2(bsize) - xd->plane[plane].subsampling_x);
const int raster_block = txfrm_block_to_raster_block(xd, bsize, plane,
@@ -634,9 +673,9 @@ static void encode_block_intra(int plane, int block, BLOCK_SIZE_TYPE bsize,
TX_TYPE tx_type;
int mode, b_mode;
- mode = plane == 0? xd->mode_info_context->mbmi.mode:
- xd->mode_info_context->mbmi.uv_mode;
- if (bsize <= BLOCK_SIZE_SB8X8 && mode == I4X4_PRED && plane == 0)
+ mode = plane == 0? mbmi->mode: mbmi->uv_mode;
+ if (mbmi->sb_type < BLOCK_SIZE_SB8X8 && plane == 0 &&
+ mbmi->ref_frame == INTRA_FRAME)
b_mode = xd->mode_info_context->bmi[ib].as_mode.first;
else
b_mode = mode;
diff --git a/vp9/encoder/vp9_firstpass.c b/vp9/encoder/vp9_firstpass.c
index 5ec696604..f57c8be6c 100644
--- a/vp9/encoder/vp9_firstpass.c
+++ b/vp9/encoder/vp9_firstpass.c
@@ -523,6 +523,7 @@ void vp9_first_pass(VP9_COMP *cpi) {
xd->left_available = (mb_col != 0);
xd->mode_info_context->mbmi.sb_type = BLOCK_SIZE_MB16X16;
+ xd->mode_info_context->mbmi.ref_frame = INTRA_FRAME;
// do intra 16x16 prediction
this_error = vp9_encode_intra(cpi, x, use_dc_pred);
@@ -619,6 +620,7 @@ void vp9_first_pass(VP9_COMP *cpi) {
this_error = motion_error;
vp9_set_mbmode_and_mvs(x, NEWMV, &mv);
xd->mode_info_context->mbmi.txfm_size = TX_4X4;
+ xd->mode_info_context->mbmi.ref_frame = LAST_FRAME;
vp9_build_inter_predictors_sby(xd, mb_row << 1,
mb_col << 1,
BLOCK_SIZE_MB16X16);
diff --git a/vp9/encoder/vp9_modecosts.c b/vp9/encoder/vp9_modecosts.c
index 171b44bf9..099a04404 100644
--- a/vp9/encoder/vp9_modecosts.c
+++ b/vp9/encoder/vp9_modecosts.c
@@ -17,29 +17,23 @@
void vp9_init_mode_costs(VP9_COMP *c) {
VP9_COMMON *x = &c->common;
- const vp9_tree_p T = vp9_bmode_tree;
- const vp9_tree_p KT = vp9_bmode_tree;
+ const vp9_tree_p KT = vp9_intra_mode_tree;
int i, j;
- for (i = 0; i < VP9_BINTRAMODES; i++) {
- for (j = 0; j < VP9_BINTRAMODES; j++) {
- vp9_cost_tokens((int *)c->mb.bmode_costs[i][j],
- x->kf_bmode_prob[i][j], KT);
+ for (i = 0; i < VP9_INTRA_MODES; i++) {
+ for (j = 0; j < VP9_INTRA_MODES; j++) {
+ vp9_cost_tokens((int *)c->mb.y_mode_costs[i][j],
+ x->kf_y_mode_prob[i][j], KT);
}
}
- vp9_cost_tokens((int *)c->mb.inter_bmode_costs, x->fc.bmode_prob, T);
-
// TODO(rbultje) separate tables for superblock costing?
- vp9_cost_tokens(c->mb.mbmode_cost[1], x->fc.sb_ymode_prob,
- vp9_sb_ymode_tree);
- vp9_cost_tokens(c->mb.mbmode_cost[0],
- x->sb_kf_ymode_prob[c->common.kf_ymode_probs_index],
- vp9_sb_ymode_tree);
+ vp9_cost_tokens(c->mb.mbmode_cost, x->fc.y_mode_prob,
+ vp9_intra_mode_tree);
vp9_cost_tokens(c->mb.intra_uv_mode_cost[1],
- x->fc.uv_mode_prob[VP9_YMODES - 1], vp9_uv_mode_tree);
+ x->fc.uv_mode_prob[VP9_INTRA_MODES - 1], vp9_intra_mode_tree);
vp9_cost_tokens(c->mb.intra_uv_mode_cost[0],
- x->kf_uv_mode_prob[VP9_YMODES - 1], vp9_uv_mode_tree);
+ x->kf_uv_mode_prob[VP9_INTRA_MODES - 1], vp9_intra_mode_tree);
for (i = 0; i <= VP9_SWITCHABLE_FILTERS; ++i)
vp9_cost_tokens((int *)c->mb.switchable_interp_costs[i],
diff --git a/vp9/encoder/vp9_onyx_if.c b/vp9/encoder/vp9_onyx_if.c
index 4bbb4152b..9c0609ed1 100644
--- a/vp9/encoder/vp9_onyx_if.c
+++ b/vp9/encoder/vp9_onyx_if.c
@@ -103,9 +103,9 @@ extern int skip_false_count;
#ifdef ENTROPY_STATS
-extern int intra_mode_stats[VP9_KF_BINTRAMODES]
- [VP9_KF_BINTRAMODES]
- [VP9_KF_BINTRAMODES];
+extern int intra_mode_stats[VP9_INTRA_MODES]
+ [VP9_INTRA_MODES]
+ [VP9_INTRA_MODES];
#endif
#ifdef NMV_STATS
@@ -258,9 +258,6 @@ void vp9_initialize_enc() {
init_done = 1;
}
}
-#ifdef PACKET_TESTING
-extern FILE *vpxlogc;
-#endif
static void setup_features(VP9_COMP *cpi) {
MACROBLOCKD *xd = &cpi->mb.e_mbd;
@@ -270,9 +267,6 @@ static void setup_features(VP9_COMP *cpi) {
xd->update_mb_segmentation_map = 0;
xd->update_mb_segmentation_data = 0;
-#if CONFIG_IMPLICIT_SEGMENTATION
- xd->allow_implicit_segment_update = 0;
-#endif
vpx_memset(xd->mb_segment_tree_probs, 255, sizeof(xd->mb_segment_tree_probs));
vp9_clearall_segfeatures(xd);
@@ -287,7 +281,6 @@ static void setup_features(VP9_COMP *cpi) {
set_default_lf_deltas(cpi);
}
-
static void dealloc_compressor_data(VP9_COMP *cpi) {
// Delete sementation map
vpx_free(cpi->segmentation_map);
@@ -358,9 +351,6 @@ static void configure_static_seg_features(VP9_COMP *cpi) {
vpx_memset(cpi->segmentation_map, 0, cm->mi_rows * cm->mi_cols);
xd->update_mb_segmentation_map = 0;
xd->update_mb_segmentation_data = 0;
-#if CONFIG_IMPLICIT_SEGMENTATION
- xd->allow_implicit_segment_update = 0;
-#endif
cpi->static_mb_pct = 0;
// Disable segmentation
@@ -374,9 +364,6 @@ static void configure_static_seg_features(VP9_COMP *cpi) {
vpx_memset(cpi->segmentation_map, 0, cm->mi_rows * cm->mi_cols);
xd->update_mb_segmentation_map = 0;
xd->update_mb_segmentation_data = 0;
-#if CONFIG_IMPLICIT_SEGMENTATION
- xd->allow_implicit_segment_update = 0;
-#endif
cpi->static_mb_pct = 0;
// Disable segmentation and individual segment features by default
@@ -475,53 +462,6 @@ static void configure_static_seg_features(VP9_COMP *cpi) {
}
}
-#if CONFIG_IMPLICIT_SEGMENTATION
-static double implict_seg_q_modifiers[MAX_MB_SEGMENTS] =
- {1.0, 0.95, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0};
-static void configure_implicit_segmentation(VP9_COMP *cpi, int frame_qindex) {
- VP9_COMMON *cm = &cpi->common;
- MACROBLOCKD *xd = &cpi->mb.e_mbd;
- int i;
- int qi_delta;
- double q_baseline = vp9_convert_qindex_to_q(frame_qindex);
-
- // Set the flags to allow implicit segment update but disallow explicit update
- xd->segmentation_enabled = 1;
- xd->allow_implicit_segment_update = 1;
- xd->update_mb_segmentation_map = 0;
-
- // For key frames clear down the segment map to a default state.
- if (cm->frame_type == KEY_FRAME) {
- // Clear down the global segmentation map
- vpx_memset(cpi->segmentation_map, 0, (cm->mb_rows * cm->mb_cols));
-
- // Clear down the segment features.
- vp9_clearall_segfeatures(xd);
-
- xd->update_mb_segmentation_data = 0;
-
- // Update the segment data if it is an arf or non overlay gf.
- } else if (cpi->refresh_alt_ref_frame ||
- (cpi->refresh_golden_frame && !cpi->is_src_frame_alt_ref)) {
- xd->update_mb_segmentation_data = 1;
-
- // Enable use of q deltas on segments 1 and up
- // Segment 0 is treated as a neutral segment with no changes
- for (i = 1; i < MAX_MB_SEGMENTS; ++i) {
- qi_delta = compute_qdelta(cpi, q_baseline,
- implict_seg_q_modifiers[i] * q_baseline);
- vp9_set_segdata(xd, i, SEG_LVL_ALT_Q, qi_delta);
- vp9_enable_segfeature(xd, i, SEG_LVL_ALT_Q);
- }
-
- // Where relevant assume segment data is delta data
- xd->mb_segment_abs_delta = SEGMENT_DELTADATA;
- } else {
- xd->update_mb_segmentation_data = 0;
- }
-}
-#endif
-
#ifdef ENTROPY_STATS
void vp9_update_mode_context_stats(VP9_COMP *cpi) {
VP9_COMMON *cm = &cpi->common;
@@ -541,7 +481,7 @@ void vp9_update_mode_context_stats(VP9_COMP *cpi) {
// Add in the values for this frame
for (i = 0; i < INTER_MODE_CONTEXTS; i++) {
- for (j = 0; j < 4; j++) {
+ for (j = 0; j < VP9_MVREFS - 1; j++) {
mv_ref_stats[i][j][0] += (int64_t)mv_ref_ct[i][j][0];
mv_ref_stats[i][j][1] += (int64_t)mv_ref_ct[i][j][1];
}
@@ -558,12 +498,13 @@ void print_mode_context(VP9_COMP *cpi) {
int i, j;
fprintf(f, "#include \"vp9_entropy.h\"\n");
- fprintf(f, "const int vp9_mode_contexts[INTER_MODE_CONTEXTS][4] =");
+ fprintf(f,
+ "const int vp9_mode_contexts[INTER_MODE_CONTEXTS][VP9_MVREFS - 1] =");
fprintf(f, "{\n");
for (j = 0; j < INTER_MODE_CONTEXTS; j++) {
fprintf(f, " {/* %d */ ", j);
fprintf(f, " ");
- for (i = 0; i < 4; i++) {
+ for (i = 0; i < VP9_MVREFS - 1; i++) {
int this_prob;
int64_t count = cpi->mv_ref_stats[j][i][0] + cpi->mv_ref_stats[j][i][1];
if (count)
@@ -700,6 +641,25 @@ static void set_rd_speed_thresholds(VP9_COMP *cpi, int mode, int speed) {
sf->thresh_mult[THR_COMP_SPLITGA ] += speed_multiplier * 4500;
sf->thresh_mult[THR_COMP_SPLITLG ] += speed_multiplier * 4500;
+ if (speed > 4) {
+ for (i = 0; i < MAX_MODES; ++i)
+ sf->thresh_mult[i] = INT_MAX;
+
+ sf->thresh_mult[THR_DC ] = 0;
+ sf->thresh_mult[THR_TM ] = 0;
+ sf->thresh_mult[THR_NEWMV ] = 4000;
+ sf->thresh_mult[THR_NEWG ] = 4000;
+ sf->thresh_mult[THR_NEWA ] = 4000;
+ sf->thresh_mult[THR_NEARESTMV] = 0;
+ sf->thresh_mult[THR_NEARESTG ] = 0;
+ sf->thresh_mult[THR_NEARESTA ] = 0;
+ sf->thresh_mult[THR_NEARMV ] = 2000;
+ sf->thresh_mult[THR_NEARG ] = 2000;
+ sf->thresh_mult[THR_NEARA ] = 2000;
+ sf->thresh_mult[THR_COMP_NEARESTLA] = 2000;
+ sf->recode_loop = 0;
+ }
+
/* disable frame modes if flags not set */
if (!(cpi->ref_frame_flags & VP9_LAST_FLAG)) {
sf->thresh_mult[THR_NEWMV ] = INT_MAX;
@@ -778,16 +738,12 @@ void vp9_set_speed_features(VP9_COMP *cpi) {
sf->optimize_coefficients = !cpi->oxcf.lossless;
sf->first_step = 0;
sf->max_step_search_steps = MAX_MVSEARCH_STEPS;
- sf->comp_inter_joint_serach = 1;
+ sf->comp_inter_joint_search = 1;
#if CONFIG_MULTIPLE_ARF
// Switch segmentation off.
sf->static_segmentation = 0;
#else
-#if CONFIG_IMPLICIT_SEGMENTATION
sf->static_segmentation = 0;
-#else
- sf->static_segmentation = 0;
-#endif
#endif
sf->mb16_breakout = 0;
@@ -801,55 +757,16 @@ void vp9_set_speed_features(VP9_COMP *cpi) {
// Switch segmentation off.
sf->static_segmentation = 0;
#else
-#if CONFIG_IMPLICIT_SEGMENTATION
- sf->static_segmentation = 0;
-#else
sf->static_segmentation = 0;
#endif
-#endif
sf->mb16_breakout = 0;
if (speed > 0) {
- /* Disable coefficient optimization above speed 0 */
sf->optimize_coefficients = 0;
sf->no_skip_block4x4_search = 0;
- sf->comp_inter_joint_serach = 0;
-
+ sf->comp_inter_joint_search = 0;
sf->first_step = 1;
-
- cpi->mode_check_freq[THR_SPLITG] = 2;
- cpi->mode_check_freq[THR_SPLITA] = 2;
- cpi->mode_check_freq[THR_SPLITMV] = 0;
-
- cpi->mode_check_freq[THR_COMP_SPLITGA] = 2;
- cpi->mode_check_freq[THR_COMP_SPLITLG] = 2;
- cpi->mode_check_freq[THR_COMP_SPLITLA] = 0;
}
-
- if (speed > 1) {
- cpi->mode_check_freq[THR_SPLITG] = 4;
- cpi->mode_check_freq[THR_SPLITA] = 4;
- cpi->mode_check_freq[THR_SPLITMV] = 2;
-
- cpi->mode_check_freq[THR_COMP_SPLITGA] = 4;
- cpi->mode_check_freq[THR_COMP_SPLITLG] = 4;
- cpi->mode_check_freq[THR_COMP_SPLITLA] = 2;
- }
-
- if (speed > 2) {
- cpi->mode_check_freq[THR_SPLITG] = 15;
- cpi->mode_check_freq[THR_SPLITA] = 15;
- cpi->mode_check_freq[THR_SPLITMV] = 7;
-
- cpi->mode_check_freq[THR_COMP_SPLITGA] = 15;
- cpi->mode_check_freq[THR_COMP_SPLITLG] = 15;
- cpi->mode_check_freq[THR_COMP_SPLITLA] = 7;
-
- // Only do recode loop on key frames, golden frames and
- // alt ref frames
- sf->recode_loop = 2;
- }
-
break;
}; /* switch */
@@ -1194,6 +1111,7 @@ void vp9_change_config(VP9_PTR ptr, VP9_CONFIG *oxcf) {
cpi->refresh_golden_frame = 0;
cpi->refresh_last_frame = 1;
cm->refresh_frame_context = 1;
+ cm->reset_frame_context = 0;
setup_features(cpi);
cpi->mb.e_mbd.allow_high_precision_mv = 0; // Default mv precision adaptation
@@ -1756,18 +1674,18 @@ void vp9_remove_compressor(VP9_PTR *ptr) {
fprintf(fmode, "\n#include \"vp9_entropymode.h\"\n\n");
fprintf(fmode, "const unsigned int vp9_kf_default_bmode_counts ");
- fprintf(fmode, "[VP9_KF_BINTRAMODES][VP9_KF_BINTRAMODES]"
- "[VP9_KF_BINTRAMODES] =\n{\n");
+ fprintf(fmode, "[VP9_INTRA_MODES][VP9_INTRA_MODES]"
+ "[VP9_INTRA_MODES] =\n{\n");
- for (i = 0; i < VP9_KF_BINTRAMODES; i++) {
+ for (i = 0; i < VP9_INTRA_MODES; i++) {
fprintf(fmode, " { // Above Mode : %d\n", i);
- for (j = 0; j < VP9_KF_BINTRAMODES; j++) {
+ for (j = 0; j < VP9_INTRA_MODES; j++) {
fprintf(fmode, " {");
- for (k = 0; k < VP9_KF_BINTRAMODES; k++) {
+ for (k = 0; k < VP9_INTRA_MODES; k++) {
if (!intra_mode_stats[i][j][k])
fprintf(fmode, " %5d, ", 1);
else
@@ -2621,6 +2539,7 @@ static void encode_frame_to_data_rate(VP9_COMP *cpi,
(cpi->oxcf.frame_parallel_decoding_mode != 0);
if (cm->error_resilient_mode) {
cm->frame_parallel_decoding_mode = 1;
+ cm->reset_frame_context = 0;
cm->refresh_frame_context = 0;
}
}
@@ -2931,16 +2850,7 @@ static void encode_frame_to_data_rate(VP9_COMP *cpi,
}
}
-#if CONFIG_IMPLICIT_SEGMENTATION
- if (!cm->error_resilient_mode && !cpi->sf.static_segmentation) {
- configure_implicit_segmentation(cpi, q);
- }
-#endif
-
// transform / motion compensation build reconstruction frame
- if (cm->frame_type == KEY_FRAME) {
- vp9_default_coef_probs(cm);
- }
vp9_encode_frame(cpi);
@@ -3183,15 +3093,7 @@ static void encode_frame_to_data_rate(VP9_COMP *cpi,
cpi->dummy_packing = 0;
vp9_pack_bitstream(cpi, dest, size);
-#if CONFIG_IMPLICIT_SEGMENTATION
- // Should we allow implicit update of the segment map.
- if (xd->allow_implicit_segment_update && !cm->error_resilient_mode) {
- vp9_implicit_segment_map_update(cm);
- // or has there been an explicit update
- } else if (xd->update_mb_segmentation_map) {
-#else
if (xd->update_mb_segmentation_map) {
-#endif
update_reference_segmentation_map(cpi);
}
@@ -3212,10 +3114,8 @@ static void encode_frame_to_data_rate(VP9_COMP *cpi,
}
if (cpi->common.frame_type != KEY_FRAME) {
- vp9_copy(cpi->common.fc.sb_ymode_counts, cpi->sb_ymode_count);
- vp9_copy(cpi->common.fc.ymode_counts, cpi->ymode_count);
+ vp9_copy(cpi->common.fc.y_mode_counts, cpi->y_mode_count);
vp9_copy(cpi->common.fc.uv_mode_counts, cpi->y_uv_mode_count);
- vp9_copy(cpi->common.fc.bmode_counts, cpi->bmode_count);
vp9_copy(cpi->common.fc.partition_counts, cpi->partition_count);
cpi->common.fc.NMVcount = cpi->NMVcount;
if (!cpi->common.error_resilient_mode &&
@@ -3680,6 +3580,7 @@ int vp9_get_compressed_data(VP9_PTR ptr, unsigned int *frame_flags,
}
cm->show_frame = 0;
+ cm->intra_only = 0;
cpi->refresh_alt_ref_frame = 1;
cpi->refresh_golden_frame = 0;
cpi->refresh_last_frame = 0;
@@ -3884,6 +3785,7 @@ int vp9_get_compressed_data(VP9_PTR ptr, unsigned int *frame_flags,
cpi->droppable = !frame_is_reference(cpi);
// return to normal state
+ cm->reset_frame_context = 0;
cm->refresh_frame_context = 1;
cpi->refresh_alt_ref_frame = 0;
cpi->refresh_golden_frame = 0;
diff --git a/vp9/encoder/vp9_onyx_int.h b/vp9/encoder/vp9_onyx_int.h
index 15f9571bb..9e259762d 100644
--- a/vp9/encoder/vp9_onyx_int.h
+++ b/vp9/encoder/vp9_onyx_int.h
@@ -81,17 +81,15 @@ typedef struct {
vp9_coeff_probs_model coef_probs_16x16[BLOCK_TYPES];
vp9_coeff_probs_model coef_probs_32x32[BLOCK_TYPES];
- vp9_prob sb_ymode_prob[VP9_I32X32_MODES - 1];
- vp9_prob ymode_prob[VP9_YMODES - 1]; /* interframe intra mode probs */
- vp9_prob uv_mode_prob[VP9_YMODES][VP9_UV_MODES - 1];
- vp9_prob bmode_prob[VP9_BINTRAMODES - 1];
+ vp9_prob y_mode_prob[VP9_INTRA_MODES - 1]; /* interframe intra mode probs */
+ vp9_prob uv_mode_prob[VP9_INTRA_MODES][VP9_INTRA_MODES - 1];
vp9_prob partition_prob[NUM_PARTITION_CONTEXTS][PARTITION_TYPES - 1];
vp9_prob switchable_interp_prob[VP9_SWITCHABLE_FILTERS + 1]
[VP9_SWITCHABLE_FILTERS - 1];
- int mv_ref_ct[INTER_MODE_CONTEXTS][4][2];
- int vp9_mode_contexts[INTER_MODE_CONTEXTS][4];
+ int mv_ref_ct[INTER_MODE_CONTEXTS][VP9_MVREFS - 1][2];
+ int vp9_mode_contexts[INTER_MODE_CONTEXTS][VP9_MVREFS - 1];
} CODING_CONTEXT;
@@ -115,8 +113,7 @@ typedef struct {
double new_mv_count;
double duration;
double count;
-}
-FIRSTPASS_STATS;
+} FIRSTPASS_STATS;
typedef struct {
int frames_so_far;
@@ -128,7 +125,6 @@ typedef struct {
double frame_mvr_abs;
double frame_mvc;
double frame_mvc_abs;
-
} ONEPASS_FRAMESTATS;
typedef struct {
@@ -200,8 +196,7 @@ typedef enum {
THR_COMP_SPLITLG,
THR_COMP_SPLITLA,
THR_COMP_SPLITGA,
-}
-THR_MODES;
+} THR_MODES;
typedef enum {
DIAMOND = 0,
@@ -225,7 +220,7 @@ typedef struct {
int search_best_filter;
int mb16_breakout;
int static_segmentation;
- int comp_inter_joint_serach;
+ int comp_inter_joint_search;
} SPEED_FEATURES;
enum BlockSize {
@@ -336,8 +331,8 @@ typedef struct VP9_COMP {
int single_pred_count[COMP_PRED_CONTEXTS];
// FIXME contextualize
int txfm_count_32x32p[TX_SIZE_MAX_SB];
- int txfm_count_16x16p[TX_SIZE_MAX_MB];
- int txfm_count_8x8p[TX_SIZE_MAX_MB - 1];
+ int txfm_count_16x16p[TX_SIZE_MAX_SB - 1];
+ int txfm_count_8x8p[TX_SIZE_MAX_SB - 2];
int64_t rd_tx_select_diff[NB_TXFM_MODES];
int rd_tx_select_threshes[4][NB_TXFM_MODES];
@@ -413,28 +408,26 @@ typedef struct VP9_COMP {
int cq_target_quality;
- int sb_ymode_count [VP9_I32X32_MODES];
- int ymode_count[VP9_YMODES]; /* intra MB type cts this frame */
- int bmode_count[VP9_BINTRAMODES];
- int y_uv_mode_count[VP9_YMODES][VP9_UV_MODES];
+ int y_mode_count[VP9_INTRA_MODES];
+ int y_uv_mode_count[VP9_INTRA_MODES][VP9_INTRA_MODES];
unsigned int partition_count[NUM_PARTITION_CONTEXTS][PARTITION_TYPES];
nmv_context_counts NMVcount;
vp9_coeff_count coef_counts_4x4[BLOCK_TYPES];
- vp9_coeff_probs frame_coef_probs_4x4[BLOCK_TYPES];
+ vp9_coeff_probs_model frame_coef_probs_4x4[BLOCK_TYPES];
vp9_coeff_stats frame_branch_ct_4x4[BLOCK_TYPES];
vp9_coeff_count coef_counts_8x8[BLOCK_TYPES];
- vp9_coeff_probs frame_coef_probs_8x8[BLOCK_TYPES];
+ vp9_coeff_probs_model frame_coef_probs_8x8[BLOCK_TYPES];
vp9_coeff_stats frame_branch_ct_8x8[BLOCK_TYPES];
vp9_coeff_count coef_counts_16x16[BLOCK_TYPES];
- vp9_coeff_probs frame_coef_probs_16x16[BLOCK_TYPES];
+ vp9_coeff_probs_model frame_coef_probs_16x16[BLOCK_TYPES];
vp9_coeff_stats frame_branch_ct_16x16[BLOCK_TYPES];
vp9_coeff_count coef_counts_32x32[BLOCK_TYPES];
- vp9_coeff_probs frame_coef_probs_32x32[BLOCK_TYPES];
+ vp9_coeff_probs_model frame_coef_probs_32x32[BLOCK_TYPES];
vp9_coeff_stats frame_branch_ct_32x32[BLOCK_TYPES];
int gfu_boost;
@@ -619,7 +612,7 @@ typedef struct VP9_COMP {
#endif
#ifdef ENTROPY_STATS
- int64_t mv_ref_stats[INTER_MODE_CONTEXTS][4][2];
+ int64_t mv_ref_stats[INTER_MODE_CONTEXTS][VP9_MVREFS - 1][2];
#endif
} VP9_COMP;
diff --git a/vp9/encoder/vp9_ratectrl.c b/vp9/encoder/vp9_ratectrl.c
index cf1132e3a..f4426adaa 100644
--- a/vp9/encoder/vp9_ratectrl.c
+++ b/vp9/encoder/vp9_ratectrl.c
@@ -124,9 +124,7 @@ void vp9_save_coding_context(VP9_COMP *cpi) {
vp9_copy(cc->vp9_mode_contexts, cm->fc.vp9_mode_contexts);
- vp9_copy(cc->ymode_prob, cm->fc.ymode_prob);
- vp9_copy(cc->sb_ymode_prob, cm->fc.sb_ymode_prob);
- vp9_copy(cc->bmode_prob, cm->fc.bmode_prob);
+ vp9_copy(cc->y_mode_prob, cm->fc.y_mode_prob);
vp9_copy(cc->uv_mode_prob, cm->fc.uv_mode_prob);
vp9_copy(cc->partition_prob, cm->fc.partition_prob);
@@ -163,9 +161,7 @@ void vp9_restore_coding_context(VP9_COMP *cpi) {
vp9_copy(cm->fc.vp9_mode_contexts, cc->vp9_mode_contexts);
- vp9_copy(cm->fc.ymode_prob, cc->ymode_prob);
- vp9_copy(cm->fc.sb_ymode_prob, cc->sb_ymode_prob);
- vp9_copy(cm->fc.bmode_prob, cc->bmode_prob);
+ vp9_copy(cm->fc.y_mode_prob, cc->y_mode_prob);
vp9_copy(cm->fc.uv_mode_prob, cc->uv_mode_prob);
vp9_copy(cm->fc.partition_prob, cc->partition_prob);
diff --git a/vp9/encoder/vp9_rdopt.c b/vp9/encoder/vp9_rdopt.c
index aa0557735..862e72f24 100644
--- a/vp9/encoder/vp9_rdopt.c
+++ b/vp9/encoder/vp9_rdopt.c
@@ -46,6 +46,12 @@
/* Factor to weigh the rate for switchable interp filters */
#define SWITCHABLE_INTERP_RATE_FACTOR 1
+DECLARE_ALIGNED(16, extern const uint8_t,
+ vp9_pt_energy_class[MAX_ENTROPY_TOKENS]);
+
+#define I4X4_PRED 0x8000
+#define SPLITMV 0x10000
+
const MODE_DEFINITION vp9_mode_order[MAX_MODES] = {
{ZEROMV, LAST_FRAME, NONE},
{DC_PRED, INTRA_FRAME, NONE},
@@ -81,7 +87,7 @@ const MODE_DEFINITION vp9_mode_order[MAX_MODES] = {
{SPLITMV, GOLDEN_FRAME, NONE},
{SPLITMV, ALTREF_FRAME, NONE},
- {I4X4_PRED, INTRA_FRAME, NONE},
+ {I4X4_PRED, INTRA_FRAME, NONE},
/* compound prediction modes */
{ZEROMV, LAST_FRAME, GOLDEN_FRAME},
@@ -105,11 +111,31 @@ const MODE_DEFINITION vp9_mode_order[MAX_MODES] = {
{SPLITMV, GOLDEN_FRAME, ALTREF_FRAME},
};
+#if CONFIG_BALANCED_COEFTREE
+static void fill_token_costs(vp9_coeff_count *c,
+ vp9_coeff_count *cnoskip,
+ vp9_coeff_probs_model *p,
+ TX_SIZE tx_size) {
+ int i, j, k, l;
+ for (i = 0; i < BLOCK_TYPES; i++)
+ for (j = 0; j < REF_TYPES; j++)
+ for (k = 0; k < COEF_BANDS; k++)
+ for (l = 0; l < PREV_COEF_CONTEXTS; l++) {
+ vp9_prob probs[ENTROPY_NODES];
+ vp9_model_to_full_probs(p[i][j][k][l], probs);
+ vp9_cost_tokens((int *)cnoskip[i][j][k][l], probs,
+ vp9_coef_tree);
+ // Replace the eob node prob with a very small value so that the
+ // cost approximately equals the cost without the eob node
+ probs[1] = 1;
+ vp9_cost_tokens((int *)c[i][j][k][l], probs, vp9_coef_tree);
+ }
+}
+#else
static void fill_token_costs(vp9_coeff_count *c,
vp9_coeff_probs_model *p,
TX_SIZE tx_size) {
int i, j, k, l;
-
for (i = 0; i < BLOCK_TYPES; i++)
for (j = 0; j < REF_TYPES; j++)
for (k = 0; k < COEF_BANDS; k++)
@@ -120,6 +146,7 @@ static void fill_token_costs(vp9_coeff_count *c,
vp9_coef_tree);
}
}
+#endif
static int rd_iifactor[32] = { 4, 4, 3, 2, 1, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
@@ -210,6 +237,20 @@ void vp9_initialize_rd_consts(VP9_COMP *cpi, int qindex) {
}
}
+#if CONFIG_BALANCED_COEFTREE
+ fill_token_costs(cpi->mb.token_costs[TX_4X4],
+ cpi->mb.token_costs_noskip[TX_4X4],
+ cpi->common.fc.coef_probs_4x4, TX_4X4);
+ fill_token_costs(cpi->mb.token_costs[TX_8X8],
+ cpi->mb.token_costs_noskip[TX_8X8],
+ cpi->common.fc.coef_probs_8x8, TX_8X8);
+ fill_token_costs(cpi->mb.token_costs[TX_16X16],
+ cpi->mb.token_costs_noskip[TX_16X16],
+ cpi->common.fc.coef_probs_16x16, TX_16X16);
+ fill_token_costs(cpi->mb.token_costs[TX_32X32],
+ cpi->mb.token_costs_noskip[TX_32X32],
+ cpi->common.fc.coef_probs_32x32, TX_32X32);
+#else
fill_token_costs(cpi->mb.token_costs[TX_4X4],
cpi->common.fc.coef_probs_4x4, TX_4X4);
fill_token_costs(cpi->mb.token_costs[TX_8X8],
@@ -218,6 +259,7 @@ void vp9_initialize_rd_consts(VP9_COMP *cpi, int qindex) {
cpi->common.fc.coef_probs_16x16, TX_16X16);
fill_token_costs(cpi->mb.token_costs[TX_32X32],
cpi->common.fc.coef_probs_32x32, TX_32X32);
+#endif
for (i = 0; i < NUM_PARTITION_CONTEXTS; i++)
vp9_cost_tokens(cpi->mb.partition_cost[i],
@@ -225,7 +267,6 @@ void vp9_initialize_rd_consts(VP9_COMP *cpi, int qindex) {
vp9_partition_tree);
/*rough estimate for costing*/
- cpi->common.kf_ymode_probs_index = cpi->common.base_qindex >> 4;
vp9_init_mode_costs(cpi);
if (cpi->common.frame_type != KEY_FRAME) {
@@ -271,7 +312,13 @@ static INLINE int cost_coeffs(VP9_COMMON *const cm, MACROBLOCK *mb,
TX_TYPE tx_type = DCT_DCT;
const int segment_id = xd->mode_info_context->mbmi.segment_id;
+#if CONFIG_BALANCED_COEFTREE
+ unsigned int (*token_costs_noskip)[PREV_COEF_CONTEXTS][MAX_ENTROPY_TOKENS] =
+ mb->token_costs_noskip[tx_size][type][ref];
+#else
vp9_prob coef_probs[COEF_BANDS][PREV_COEF_CONTEXTS][ENTROPY_NODES];
+#endif
+
int seg_eob, default_eob;
uint8_t token_cache[1024];
const uint8_t * band_translate;
@@ -291,8 +338,10 @@ static INLINE int cost_coeffs(VP9_COMMON *const cm, MACROBLOCK *mb,
get_tx_type_4x4(xd, block) : DCT_DCT;
above_ec = A[0] != 0;
left_ec = L[0] != 0;
+#if !CONFIG_BALANCED_COEFTREE
vp9_model_to_full_probs_sb(cm->fc.coef_probs_4x4[type][ref],
coef_probs);
+#endif
seg_eob = 16;
scan = get_scan_4x4(tx_type);
band_translate = vp9_coefband_trans_4x4;
@@ -307,8 +356,10 @@ static INLINE int cost_coeffs(VP9_COMMON *const cm, MACROBLOCK *mb,
above_ec = (A[0] + A[1]) != 0;
left_ec = (L[0] + L[1]) != 0;
scan = get_scan_8x8(tx_type);
+#if !CONFIG_BALANCED_COEFTREE
vp9_model_to_full_probs_sb(cm->fc.coef_probs_8x8[type][ref],
coef_probs);
+#endif
seg_eob = 64;
band_translate = vp9_coefband_trans_8x8plus;
break;
@@ -320,8 +371,10 @@ static INLINE int cost_coeffs(VP9_COMMON *const cm, MACROBLOCK *mb,
TX_TYPE tx_type = (type == PLANE_TYPE_Y_WITH_DC) ?
get_tx_type_16x16(xd, y + (x >> 2)) : DCT_DCT;
scan = get_scan_16x16(tx_type);
+#if !CONFIG_BALANCED_COEFTREE
vp9_model_to_full_probs_sb(cm->fc.coef_probs_16x16[type][ref],
coef_probs);
+#endif
seg_eob = 256;
above_ec = (A[0] + A[1] + A[2] + A[3]) != 0;
left_ec = (L[0] + L[1] + L[2] + L[3]) != 0;
@@ -330,8 +383,10 @@ static INLINE int cost_coeffs(VP9_COMMON *const cm, MACROBLOCK *mb,
}
case TX_32X32:
scan = vp9_default_scan_32x32;
+#if !CONFIG_BALANCED_COEFTREE
vp9_model_to_full_probs_sb(cm->fc.coef_probs_32x32[type][ref],
coef_probs);
+#endif
seg_eob = 1024;
above_ec = (A[0] + A[1] + A[2] + A[3] + A[4] + A[5] + A[6] + A[7]) != 0;
left_ec = (L[0] + L[1] + L[2] + L[3] + L[4] + L[5] + L[6] + L[7]) != 0;
@@ -362,18 +417,30 @@ static INLINE int cost_coeffs(VP9_COMMON *const cm, MACROBLOCK *mb,
if (c)
pt = vp9_get_coef_context(scan, nb, pad, token_cache, c, default_eob);
+#if CONFIG_BALANCED_COEFTREE
+ if (!c || token_cache[scan[c - 1]]) // do not skip eob
+ cost += token_costs_noskip[band][pt][t] + vp9_dct_value_cost_ptr[v];
+ else
+ cost += token_costs[band][pt][t] + vp9_dct_value_cost_ptr[v];
+#else
cost += token_costs[band][pt][t] + vp9_dct_value_cost_ptr[v];
-
if (!c || token_cache[scan[c - 1]])
cost += vp9_cost_bit(coef_probs[band][pt][0], 1);
- token_cache[scan[c]] = t;
+#endif
+ token_cache[scan[c]] = vp9_pt_energy_class[t];
}
if (c < seg_eob) {
if (c)
pt = vp9_get_coef_context(scan, nb, pad, token_cache, c, default_eob);
+#if CONFIG_BALANCED_COEFTREE
+ cost += mb->token_costs_noskip[tx_size][type][ref]
+ [get_coef_band(band_translate, c)]
+ [pt][DCT_EOB_TOKEN];
+#else
cost += mb->token_costs[tx_size][type][ref]
[get_coef_band(band_translate, c)]
[pt][DCT_EOB_TOKEN];
+#endif
}
}
@@ -556,9 +623,25 @@ static void super_block_yrd(VP9_COMP *cpi,
int64_t txfm_cache[NB_TXFM_MODES]) {
VP9_COMMON *const cm = &cpi->common;
int r[TX_SIZE_MAX_SB][2], d[TX_SIZE_MAX_SB], s[TX_SIZE_MAX_SB];
+ MACROBLOCKD *xd = &x->e_mbd;
+ MB_MODE_INFO *const mbmi = &xd->mode_info_context->mbmi;
vp9_subtract_sby(x, bs);
+ if (cpi->speed > 4) {
+ if (bs >= BLOCK_SIZE_SB32X32) {
+ mbmi->txfm_size = TX_32X32;
+ } else if (bs >= BLOCK_SIZE_MB16X16) {
+ mbmi->txfm_size = TX_16X16;
+ } else if (bs >= BLOCK_SIZE_SB8X8) {
+ mbmi->txfm_size = TX_8X8;
+ } else {
+ mbmi->txfm_size = TX_4X4;
+ }
+ super_block_yrd_for_txfm(cm, x, rate, distortion, skip, bs,
+ mbmi->txfm_size);
+ return;
+ }
if (bs >= BLOCK_SIZE_SB32X32)
super_block_yrd_for_txfm(cm, x, &r[TX_32X32][0], &d[TX_32X32], &s[TX_32X32],
bs, TX_32X32);
@@ -611,11 +694,7 @@ static int64_t rd_pick_intra4x4block(VP9_COMP *cpi, MACROBLOCK *x, int ib,
int64_t this_rd;
int ratey = 0;
- xd->mode_info_context->bmi[ib].as_mode.first = mode;
- if (cm->frame_type == KEY_FRAME)
- rate = bmode_costs[mode];
- else
- rate = x->mbmode_cost[cm->frame_type][mode];
+ rate = bmode_costs[mode];
distortion = 0;
vpx_memcpy(tempa, ta, sizeof(ta));
@@ -653,9 +732,6 @@ static int64_t rd_pick_intra4x4block(VP9_COMP *cpi, MACROBLOCK *x, int ib,
distortion += vp9_block_error(coeff, BLOCK_OFFSET(xd->plane[0].dqcoeff,
block, 16), 16) >> 2;
- vp9_intra4x4_predict(xd, block, BLOCK_SIZE_SB8X8, mode,
- dst, xd->plane[0].dst.stride);
-
if (best_tx_type != DCT_DCT)
vp9_short_iht4x4_add(BLOCK_OFFSET(xd->plane[0].dqcoeff, block, 16),
dst, xd->plane[0].dst.stride, best_tx_type);
@@ -726,16 +802,15 @@ static int64_t rd_pick_intra4x4mby_modes(VP9_COMP *cpi, MACROBLOCK *mb,
int64_t total_rd = 0;
ENTROPY_CONTEXT t_above[4], t_left[4];
int *bmode_costs;
+ MODE_INFO *const mic = xd->mode_info_context;
vpx_memcpy(t_above, xd->plane[0].above_context, sizeof(t_above));
vpx_memcpy(t_left, xd->plane[0].left_context, sizeof(t_left));
- xd->mode_info_context->mbmi.mode = I4X4_PRED;
- bmode_costs = mb->inter_bmode_costs;
+ bmode_costs = mb->mbmode_cost;
for (idy = 0; idy < 2; idy += bh) {
for (idx = 0; idx < 2; idx += bw) {
- MODE_INFO *const mic = xd->mode_info_context;
const int mis = xd->mode_info_stride;
MB_PREDICTION_MODE UNINITIALIZED_IS_SAFE(best_mode);
int UNINITIALIZED_IS_SAFE(r), UNINITIALIZED_IS_SAFE(ry);
@@ -747,7 +822,7 @@ static int64_t rd_pick_intra4x4mby_modes(VP9_COMP *cpi, MACROBLOCK *mb,
const MB_PREDICTION_MODE L = (xd->left_available || idx) ?
left_block_mode(mic, i) : DC_PRED;
- bmode_costs = mb->bmode_costs[A][L];
+ bmode_costs = mb->y_mode_costs[A][L];
}
total_rd += rd_pick_intra4x4block(cpi, mb, i, &best_mode, bmode_costs,
@@ -774,6 +849,7 @@ static int64_t rd_pick_intra4x4mby_modes(VP9_COMP *cpi, MACROBLOCK *mb,
*Rate = cost;
*rate_y = tot_rate_y;
*Distortion = distortion;
+ xd->mode_info_context->mbmi.mode = mic->bmi[3].as_mode.first;
return RDCOST(mb->rdmult, mb->rddiv, cost, distortion);
}
@@ -785,12 +861,13 @@ static int64_t rd_pick_intra_sby_mode(VP9_COMP *cpi, MACROBLOCK *x,
int64_t txfm_cache[NB_TXFM_MODES]) {
MB_PREDICTION_MODE mode;
MB_PREDICTION_MODE UNINITIALIZED_IS_SAFE(mode_selected);
- MACROBLOCKD *xd = &x->e_mbd;
+ MACROBLOCKD *const xd = &x->e_mbd;
int this_rate, this_rate_tokenonly;
int this_distortion, s;
int64_t best_rd = INT64_MAX, this_rd;
TX_SIZE UNINITIALIZED_IS_SAFE(best_tx);
int i;
+ int *bmode_costs = x->mbmode_cost;
if (bsize < BLOCK_SIZE_SB8X8) {
x->e_mbd.mode_info_context->mbmi.txfm_size = TX_4X4;
@@ -805,17 +882,19 @@ static int64_t rd_pick_intra_sby_mode(VP9_COMP *cpi, MACROBLOCK *x,
int64_t local_txfm_cache[NB_TXFM_MODES];
MODE_INFO *const mic = xd->mode_info_context;
const int mis = xd->mode_info_stride;
- const MB_PREDICTION_MODE A = above_block_mode(mic, 0, mis);
- const MB_PREDICTION_MODE L = xd->left_available ?
- left_block_mode(mic, 0) : DC_PRED;
-
- int *bmode_costs = x->bmode_costs[A][L];
+ if (cpi->common.frame_type == KEY_FRAME) {
+ const MB_PREDICTION_MODE A = above_block_mode(mic, 0, mis);
+ const MB_PREDICTION_MODE L = xd->left_available ?
+ left_block_mode(mic, 0) : DC_PRED;
+ bmode_costs = x->y_mode_costs[A][L];
+ }
x->e_mbd.mode_info_context->mbmi.mode = mode;
vp9_build_intra_predictors_sby_s(&x->e_mbd, bsize);
super_block_yrd(cpi, x, &this_rate_tokenonly, &this_distortion, &s,
bsize, local_txfm_cache);
+
this_rate = this_rate_tokenonly + bmode_costs[mode];
this_rd = RDCOST(x->rdmult, x->rddiv, this_rate, this_distortion);
@@ -925,10 +1004,10 @@ int vp9_cost_mv_ref(VP9_COMP *cpi,
VP9_COMMON *pc = &cpi->common;
vp9_prob p [VP9_MVREFS - 1];
- assert(NEARESTMV <= m && m <= SPLITMV);
+ assert(NEARESTMV <= m && m <= NEWMV);
vp9_mv_ref_probs(pc, p, mode_context);
- return cost_token(vp9_mv_ref_tree, p,
- vp9_mv_ref_encoding_array - NEARESTMV + m);
+ return cost_token(vp9_sb_mv_ref_tree, p,
+ vp9_sb_mv_ref_encoding_array - NEARESTMV + m);
} else
return 0;
}
@@ -938,19 +1017,18 @@ void vp9_set_mbmode_and_mvs(MACROBLOCK *x, MB_PREDICTION_MODE mb, int_mv *mv) {
x->e_mbd.mode_info_context->mbmi.mv[0].as_int = mv->as_int;
}
-static int labels2mode(MACROBLOCK *x,
- int const *labelings, int which_label,
+static int labels2mode(MACROBLOCK *x, int i,
MB_PREDICTION_MODE this_mode,
int_mv *this_mv, int_mv *this_second_mv,
int_mv frame_mv[MB_MODE_COUNT][MAX_REF_FRAMES],
- int_mv seg_mvs[MAX_REF_FRAMES - 1],
+ int_mv seg_mvs[MAX_REF_FRAMES],
int_mv *best_ref_mv,
int_mv *second_best_ref_mv,
int *mvjcost, int *mvcost[2], VP9_COMP *cpi) {
MACROBLOCKD *const xd = &x->e_mbd;
MODE_INFO *const mic = xd->mode_info_context;
MB_MODE_INFO * mbmi = &mic->mbmi;
- int i, cost = 0, thismvcost = 0;
+ int cost = 0, thismvcost = 0;
int idx, idy;
int bw = 1 << b_width_log2(mbmi->sb_type);
int bh = 1 << b_height_log2(mbmi->sb_type);
@@ -958,72 +1036,61 @@ static int labels2mode(MACROBLOCK *x,
/* We have to be careful retrieving previously-encoded motion vectors.
Ones from this macroblock have to be pulled from the BLOCKD array
as they have not yet made it to the bmi array in our MB_MODE_INFO. */
- for (i = 0; i < 4; ++i) {
- MB_PREDICTION_MODE m;
-
- if (labelings[i] != which_label)
- continue;
+ MB_PREDICTION_MODE m;
- {
- // the only time we should do costing for new motion vector or mode
- // is when we are on a new label (jbb May 08, 2007)
- switch (m = this_mode) {
- case NEWMV:
- if (mbmi->second_ref_frame > 0) {
- this_mv->as_int = seg_mvs[mbmi->ref_frame - 1].as_int;
- this_second_mv->as_int =
- seg_mvs[mbmi->second_ref_frame - 1].as_int;
- }
-
- thismvcost = vp9_mv_bit_cost(this_mv, best_ref_mv, mvjcost, mvcost,
- 102, xd->allow_high_precision_mv);
- if (mbmi->second_ref_frame > 0) {
- thismvcost += vp9_mv_bit_cost(this_second_mv, second_best_ref_mv,
- mvjcost, mvcost, 102,
- xd->allow_high_precision_mv);
- }
- break;
- case NEARESTMV:
- this_mv->as_int = frame_mv[NEARESTMV][mbmi->ref_frame].as_int;
- if (mbmi->second_ref_frame > 0)
- this_second_mv->as_int =
- frame_mv[NEARESTMV][mbmi->second_ref_frame].as_int;
- break;
- case NEARMV:
- this_mv->as_int = frame_mv[NEARMV][mbmi->ref_frame].as_int;
- if (mbmi->second_ref_frame > 0)
- this_second_mv->as_int =
- frame_mv[NEARMV][mbmi->second_ref_frame].as_int;
- break;
- case ZEROMV:
- this_mv->as_int = 0;
- if (mbmi->second_ref_frame > 0)
- this_second_mv->as_int = 0;
- break;
- default:
- break;
+ // the only time we should do costing for new motion vector or mode
+ // is when we are on a new label (jbb May 08, 2007)
+ switch (m = this_mode) {
+ case NEWMV:
+ this_mv->as_int = seg_mvs[mbmi->ref_frame].as_int;
+ thismvcost = vp9_mv_bit_cost(this_mv, best_ref_mv, mvjcost, mvcost,
+ 102, xd->allow_high_precision_mv);
+ if (mbmi->second_ref_frame > 0) {
+ this_second_mv->as_int = seg_mvs[mbmi->second_ref_frame].as_int;
+ thismvcost += vp9_mv_bit_cost(this_second_mv, second_best_ref_mv,
+ mvjcost, mvcost, 102,
+ xd->allow_high_precision_mv);
}
+ break;
+ case NEARESTMV:
+ this_mv->as_int = frame_mv[NEARESTMV][mbmi->ref_frame].as_int;
+ if (mbmi->second_ref_frame > 0)
+ this_second_mv->as_int =
+ frame_mv[NEARESTMV][mbmi->second_ref_frame].as_int;
+ break;
+ case NEARMV:
+ this_mv->as_int = frame_mv[NEARMV][mbmi->ref_frame].as_int;
+ if (mbmi->second_ref_frame > 0)
+ this_second_mv->as_int =
+ frame_mv[NEARMV][mbmi->second_ref_frame].as_int;
+ break;
+ case ZEROMV:
+ this_mv->as_int = 0;
+ if (mbmi->second_ref_frame > 0)
+ this_second_mv->as_int = 0;
+ break;
+ default:
+ break;
+ }
- cost = vp9_cost_mv_ref(cpi, this_mode,
- mbmi->mb_mode_context[mbmi->ref_frame]);
- }
+ cost = vp9_cost_mv_ref(cpi, this_mode,
+ mbmi->mb_mode_context[mbmi->ref_frame]);
- mic->bmi[i].as_mv[0].as_int = this_mv->as_int;
- if (mbmi->second_ref_frame > 0)
- mic->bmi[i].as_mv[1].as_int = this_second_mv->as_int;
+ mic->bmi[i].as_mv[0].as_int = this_mv->as_int;
+ if (mbmi->second_ref_frame > 0)
+ mic->bmi[i].as_mv[1].as_int = this_second_mv->as_int;
- x->partition_info->bmi[i].mode = m;
- x->partition_info->bmi[i].mv.as_int = this_mv->as_int;
- if (mbmi->second_ref_frame > 0)
- x->partition_info->bmi[i].second_mv.as_int = this_second_mv->as_int;
- for (idy = 0; idy < bh; ++idy) {
- for (idx = 0; idx < bw; ++idx) {
- vpx_memcpy(&mic->bmi[i + idy * 2 + idx],
- &mic->bmi[i], sizeof(mic->bmi[i]));
- vpx_memcpy(&x->partition_info->bmi[i + idy * 2 + idx],
- &x->partition_info->bmi[i],
- sizeof(x->partition_info->bmi[i]));
- }
+ x->partition_info->bmi[i].mode = m;
+ x->partition_info->bmi[i].mv.as_int = this_mv->as_int;
+ if (mbmi->second_ref_frame > 0)
+ x->partition_info->bmi[i].second_mv.as_int = this_second_mv->as_int;
+ for (idy = 0; idy < bh; ++idy) {
+ for (idx = 0; idx < bw; ++idx) {
+ vpx_memcpy(&mic->bmi[i + idy * 2 + idx],
+ &mic->bmi[i], sizeof(mic->bmi[i]));
+ vpx_memcpy(&x->partition_info->bmi[i + idy * 2 + idx],
+ &x->partition_info->bmi[i],
+ sizeof(x->partition_info->bmi[i]));
}
}
@@ -1033,90 +1100,86 @@ static int labels2mode(MACROBLOCK *x,
static int64_t encode_inter_mb_segment(VP9_COMMON *const cm,
MACROBLOCK *x,
- int const *labels,
- int which_label,
+ int i,
int *labelyrate,
int *distortion,
ENTROPY_CONTEXT *ta,
ENTROPY_CONTEXT *tl) {
- int i, k;
+ int k;
MACROBLOCKD *xd = &x->e_mbd;
BLOCK_SIZE_TYPE bsize = xd->mode_info_context->mbmi.sb_type;
int bwl = b_width_log2(bsize), bw = 1 << bwl;
int bhl = b_height_log2(bsize), bh = 1 << bhl;
int idx, idy;
+ const int src_stride = x->plane[0].src.stride;
+ uint8_t* const src =
+ raster_block_offset_uint8(xd, BLOCK_SIZE_SB8X8, 0, i,
+ x->plane[0].src.buf, src_stride);
+ int16_t* src_diff =
+ raster_block_offset_int16(xd, BLOCK_SIZE_SB8X8, 0, i,
+ x->plane[0].src_diff);
+ int16_t* coeff = BLOCK_OFFSET(x->plane[0].coeff, 16, i);
+ uint8_t* const pre =
+ raster_block_offset_uint8(xd, BLOCK_SIZE_SB8X8, 0, i,
+ xd->plane[0].pre[0].buf,
+ xd->plane[0].pre[0].stride);
+ uint8_t* const dst =
+ raster_block_offset_uint8(xd, BLOCK_SIZE_SB8X8, 0, i,
+ xd->plane[0].dst.buf,
+ xd->plane[0].dst.stride);
+ int thisdistortion = 0;
+ int thisrate = 0;
*labelyrate = 0;
*distortion = 0;
- for (i = 0; i < 4; i++) {
- if (labels[i] == which_label) {
- const int src_stride = x->plane[0].src.stride;
- uint8_t* const src =
- raster_block_offset_uint8(xd, BLOCK_SIZE_SB8X8, 0, i,
- x->plane[0].src.buf, src_stride);
- int16_t* src_diff =
- raster_block_offset_int16(xd, BLOCK_SIZE_SB8X8, 0, i,
- x->plane[0].src_diff);
- int16_t* coeff = BLOCK_OFFSET(x->plane[0].coeff, 16, i);
- uint8_t* const pre =
- raster_block_offset_uint8(xd, BLOCK_SIZE_SB8X8, 0, i,
- xd->plane[0].pre[0].buf,
- xd->plane[0].pre[0].stride);
- uint8_t* const dst =
- raster_block_offset_uint8(xd, BLOCK_SIZE_SB8X8, 0, i,
- xd->plane[0].dst.buf,
- xd->plane[0].dst.stride);
- int thisdistortion = 0;
- int thisrate = 0;
-
- vp9_build_inter_predictor(pre,
- xd->plane[0].pre[0].stride,
- dst,
- xd->plane[0].dst.stride,
- &xd->mode_info_context->bmi[i].as_mv[0],
- &xd->scale_factor[0],
- 4 * bw, 4 * bh, 0 /* no avg */, &xd->subpix);
-
- // TODO(debargha): Make this work properly with the
- // implicit-compoundinter-weight experiment when implicit
- // weighting for splitmv modes is turned on.
- if (xd->mode_info_context->mbmi.second_ref_frame > 0) {
- uint8_t* const second_pre =
- raster_block_offset_uint8(xd, BLOCK_SIZE_SB8X8, 0, i,
- xd->plane[0].pre[1].buf,
- xd->plane[0].pre[1].stride);
- vp9_build_inter_predictor(second_pre, xd->plane[0].pre[1].stride,
- dst, xd->plane[0].dst.stride,
- &xd->mode_info_context->bmi[i].as_mv[1],
- &xd->scale_factor[1], 4 * bw, 4 * bh, 1,
- &xd->subpix);
- }
- vp9_subtract_block(4 * bh, 4 * bw, src_diff, 8,
- src, src_stride,
- dst, xd->plane[0].dst.stride);
+ vp9_build_inter_predictor(pre,
+ xd->plane[0].pre[0].stride,
+ dst,
+ xd->plane[0].dst.stride,
+ &xd->mode_info_context->bmi[i].as_mv[0],
+ &xd->scale_factor[0],
+ 4 * bw, 4 * bh, 0 /* no avg */, &xd->subpix);
+
+ // TODO(debargha): Make this work properly with the
+ // implicit-compoundinter-weight experiment when implicit
+ // weighting for splitmv modes is turned on.
+ if (xd->mode_info_context->mbmi.second_ref_frame > 0) {
+ uint8_t* const second_pre =
+ raster_block_offset_uint8(xd, BLOCK_SIZE_SB8X8, 0, i,
+ xd->plane[0].pre[1].buf,
+ xd->plane[0].pre[1].stride);
+ vp9_build_inter_predictor(second_pre, xd->plane[0].pre[1].stride,
+ dst, xd->plane[0].dst.stride,
+ &xd->mode_info_context->bmi[i].as_mv[1],
+ &xd->scale_factor[1], 4 * bw, 4 * bh, 1,
+ &xd->subpix);
+ }
- k = i;
- for (idy = 0; idy < bh; ++idy) {
- for (idx = 0; idx < bw; ++idx) {
- k += (idy * 2 + idx);
- src_diff = raster_block_offset_int16(xd, BLOCK_SIZE_SB8X8, 0, k,
- x->plane[0].src_diff);
- coeff = BLOCK_OFFSET(x->plane[0].coeff, 16, k);
- x->fwd_txm4x4(src_diff, coeff, 16);
- x->quantize_b_4x4(x, k, DCT_DCT, 16);
- thisdistortion += vp9_block_error(coeff,
- BLOCK_OFFSET(xd->plane[0].dqcoeff,
- k, 16), 16);
- thisrate += cost_coeffs(cm, x, 0, k, PLANE_TYPE_Y_WITH_DC,
- ta + (k & 1),
- tl + (k >> 1), TX_4X4, 16);
- }
- }
- *distortion += thisdistortion;
- *labelyrate += thisrate;
+ vp9_subtract_block(4 * bh, 4 * bw, src_diff, 8,
+ src, src_stride,
+ dst, xd->plane[0].dst.stride);
+
+ k = i;
+ for (idy = 0; idy < bh; ++idy) {
+ for (idx = 0; idx < bw; ++idx) {
+ k += (idy * 2 + idx);
+ src_diff = raster_block_offset_int16(xd, BLOCK_SIZE_SB8X8, 0, k,
+ x->plane[0].src_diff);
+ coeff = BLOCK_OFFSET(x->plane[0].coeff, 16, k);
+ x->fwd_txm4x4(src_diff, coeff, 16);
+ x->quantize_b_4x4(x, k, DCT_DCT, 16);
+ thisdistortion += vp9_block_error(coeff,
+ BLOCK_OFFSET(xd->plane[0].dqcoeff,
+ k, 16), 16);
+ thisrate += cost_coeffs(cm, x, 0, k, PLANE_TYPE_Y_WITH_DC,
+ ta + (k & 1),
+ tl + (k >> 1), TX_4X4, 16);
}
}
+ *distortion += thisdistortion;
+ *labelyrate += thisrate;
+
*distortion >>= 2;
return RDCOST(x->rdmult, x->rddiv, *labelyrate, *distortion);
}
@@ -1188,11 +1251,45 @@ static enum BlockSize get_block_size(int bw, int bh) {
return -1;
}
+static INLINE void mi_buf_shift(MACROBLOCK *x, int i) {
+ MB_MODE_INFO *mbmi = &x->e_mbd.mode_info_context->mbmi;
+ x->plane[0].src.buf =
+ raster_block_offset_uint8(&x->e_mbd, BLOCK_SIZE_SB8X8, 0, i,
+ x->plane[0].src.buf,
+ x->plane[0].src.stride);
+ assert(((intptr_t)x->e_mbd.plane[0].pre[0].buf & 0x7) == 0);
+ x->e_mbd.plane[0].pre[0].buf =
+ raster_block_offset_uint8(&x->e_mbd, BLOCK_SIZE_SB8X8, 0, i,
+ x->e_mbd.plane[0].pre[0].buf,
+ x->e_mbd.plane[0].pre[0].stride);
+ if (mbmi->second_ref_frame)
+ x->e_mbd.plane[0].pre[1].buf =
+ raster_block_offset_uint8(&x->e_mbd, BLOCK_SIZE_SB8X8, 0, i,
+ x->e_mbd.plane[0].pre[1].buf,
+ x->e_mbd.plane[0].pre[1].stride);
+}
+
+static INLINE void mi_buf_restore(MACROBLOCK *x, struct buf_2d orig_src,
+ struct buf_2d orig_pre[2]) {
+ MB_MODE_INFO *mbmi = &x->e_mbd.mode_info_context->mbmi;
+ x->plane[0].src = orig_src;
+ x->e_mbd.plane[0].pre[0] = orig_pre[0];
+ if (mbmi->second_ref_frame)
+ x->e_mbd.plane[0].pre[1] = orig_pre[1];
+}
+
+static void iterative_motion_search(VP9_COMP *cpi, MACROBLOCK *x,
+ BLOCK_SIZE_TYPE bsize,
+ int_mv *frame_mv,
+ YV12_BUFFER_CONFIG **scaled_ref_frame,
+ int mi_row, int mi_col,
+ int_mv single_newmv[MAX_REF_FRAMES]);
+
static void rd_check_segment_txsize(VP9_COMP *cpi, MACROBLOCK *x,
BEST_SEG_INFO *bsi,
- int_mv seg_mvs[4][MAX_REF_FRAMES - 1]) {
+ int_mv seg_mvs[4][MAX_REF_FRAMES],
+ int mi_row, int mi_col) {
int i, j;
- static const int labels[4] = { 0, 1, 2, 3 };
int br = 0, bd = 0;
MB_PREDICTION_MODE this_mode;
MB_MODE_INFO * mbmi = &x->e_mbd.mode_info_context->mbmi;
@@ -1208,7 +1305,7 @@ static void rd_check_segment_txsize(VP9_COMP *cpi, MACROBLOCK *x,
int bhl = b_height_log2(bsize), bh = 1 << bhl;
int idx, idy;
vp9_variance_fn_ptr_t *v_fn_ptr;
-
+ YV12_BUFFER_CONFIG *scaled_ref_frame[2] = {NULL, NULL};
ENTROPY_CONTEXT t_above[4], t_left[4];
ENTROPY_CONTEXT t_above_b[4], t_left_b[4];
@@ -1255,18 +1352,21 @@ static void rd_check_segment_txsize(VP9_COMP *cpi, MACROBLOCK *x,
int distortion;
int labelyrate;
ENTROPY_CONTEXT t_above_s[4], t_left_s[4];
+ const struct buf_2d orig_src = x->plane[0].src;
+ struct buf_2d orig_pre[2];
+
+ vpx_memcpy(orig_pre, x->e_mbd.plane[0].pre, sizeof(orig_pre));
vpx_memcpy(t_above_s, t_above, sizeof(t_above_s));
vpx_memcpy(t_left_s, t_left, sizeof(t_left_s));
// motion search for newmv (single predictor case only)
if (mbmi->second_ref_frame <= 0 && this_mode == NEWMV) {
- int sseshift, n;
int step_param = 0;
int further_steps;
int thissme, bestsme = INT_MAX;
- const struct buf_2d orig_src = x->plane[0].src;
- const struct buf_2d orig_pre = x->e_mbd.plane[0].pre[0];
+ int sadpb = x->sadperbit4;
+ int_mv mvp_full;
/* Is the best so far sufficiently good that we cant justify doing
* and new motion search. */
@@ -1287,55 +1387,35 @@ static void rd_check_segment_txsize(VP9_COMP *cpi, MACROBLOCK *x,
further_steps = (MAX_MVSEARCH_STEPS - 1) - step_param;
- {
- int sadpb = x->sadperbit4;
- int_mv mvp_full;
-
- mvp_full.as_mv.row = bsi->mvp.as_mv.row >> 3;
- mvp_full.as_mv.col = bsi->mvp.as_mv.col >> 3;
-
- // find first label
- n = i;
-
- // adjust src pointer for this segment
- x->plane[0].src.buf =
- raster_block_offset_uint8(&x->e_mbd, BLOCK_SIZE_SB8X8, 0, n,
- x->plane[0].src.buf,
- x->plane[0].src.stride);
- assert(((intptr_t)x->e_mbd.plane[0].pre[0].buf & 0x7) == 0);
- x->e_mbd.plane[0].pre[0].buf =
- raster_block_offset_uint8(&x->e_mbd, BLOCK_SIZE_SB8X8, 0, n,
- x->e_mbd.plane[0].pre[0].buf,
- x->e_mbd.plane[0].pre[0].stride);
-
- bestsme = vp9_full_pixel_diamond(cpi, x, &mvp_full, step_param,
- sadpb, further_steps, 0, v_fn_ptr,
- bsi->ref_mv, &mode_mv[NEWMV]);
-
- sseshift = 0;
-
- // Should we do a full search (best quality only)
- if ((cpi->compressor_speed == 0) && (bestsme >> sseshift) > 4000) {
- /* Check if mvp_full is within the range. */
- clamp_mv(&mvp_full, x->mv_col_min, x->mv_col_max,
- x->mv_row_min, x->mv_row_max);
-
- thissme = cpi->full_search_sad(x, &mvp_full,
- sadpb, 16, v_fn_ptr,
- x->nmvjointcost, x->mvcost,
- bsi->ref_mv,
- n);
-
- if (thissme < bestsme) {
- bestsme = thissme;
- mode_mv[NEWMV].as_int =
- x->e_mbd.mode_info_context->bmi[n].as_mv[0].as_int;
- } else {
- /* The full search result is actually worse so re-instate the
- * previous best vector */
- x->e_mbd.mode_info_context->bmi[n].as_mv[0].as_int =
- mode_mv[NEWMV].as_int;
- }
+ mvp_full.as_mv.row = bsi->mvp.as_mv.row >> 3;
+ mvp_full.as_mv.col = bsi->mvp.as_mv.col >> 3;
+
+ // adjust src pointer for this block
+ mi_buf_shift(x, i);
+ bestsme = vp9_full_pixel_diamond(cpi, x, &mvp_full, step_param,
+ sadpb, further_steps, 0, v_fn_ptr,
+ bsi->ref_mv, &mode_mv[NEWMV]);
+
+ // Should we do a full search (best quality only)
+ if (cpi->compressor_speed == 0) {
+ /* Check if mvp_full is within the range. */
+ clamp_mv(&mvp_full, x->mv_col_min, x->mv_col_max,
+ x->mv_row_min, x->mv_row_max);
+
+ thissme = cpi->full_search_sad(x, &mvp_full,
+ sadpb, 16, v_fn_ptr,
+ x->nmvjointcost, x->mvcost,
+ bsi->ref_mv, i);
+
+ if (thissme < bestsme) {
+ bestsme = thissme;
+ mode_mv[NEWMV].as_int =
+ x->e_mbd.mode_info_context->bmi[i].as_mv[0].as_int;
+ } else {
+ /* The full search result is actually worse so re-instate the
+ * previous best vector */
+ x->e_mbd.mode_info_context->bmi[i].as_mv[0].as_int =
+ mode_mv[NEWMV].as_int;
}
}
@@ -1348,23 +1428,32 @@ static void rd_check_segment_txsize(VP9_COMP *cpi, MACROBLOCK *x,
&distortion, &sse);
// safe motion search result for use in compound prediction
- seg_mvs[i][mbmi->ref_frame - 1].as_int = mode_mv[NEWMV].as_int;
+ seg_mvs[i][mbmi->ref_frame].as_int = mode_mv[NEWMV].as_int;
}
// restore src pointers
- x->plane[0].src = orig_src;
- x->e_mbd.plane[0].pre[0] = orig_pre;
+ mi_buf_restore(x, orig_src, orig_pre);
} else if (mbmi->second_ref_frame > 0 && this_mode == NEWMV) {
- /* NEW4X4 */
- /* motion search not completed? Then skip newmv for this block with
- * comppred */
- if (seg_mvs[i][mbmi->second_ref_frame - 1].as_int == INVALID_MV ||
- seg_mvs[i][mbmi->ref_frame - 1].as_int == INVALID_MV) {
+ if (seg_mvs[i][mbmi->second_ref_frame].as_int == INVALID_MV ||
+ seg_mvs[i][mbmi->ref_frame ].as_int == INVALID_MV)
continue;
+
+ // adjust src pointers
+ mi_buf_shift(x, i);
+ if (cpi->sf.comp_inter_joint_search) {
+ iterative_motion_search(cpi, x, bsize, frame_mv[this_mode],
+ scaled_ref_frame,
+ mi_row, mi_col, seg_mvs[i]);
+ seg_mvs[i][mbmi->ref_frame].as_int =
+ frame_mv[this_mode][mbmi->ref_frame].as_int;
+ seg_mvs[i][mbmi->second_ref_frame].as_int =
+ frame_mv[this_mode][mbmi->second_ref_frame].as_int;
}
+ // restore src pointers
+ mi_buf_restore(x, orig_src, orig_pre);
}
- rate = labels2mode(x, labels, i, this_mode, &mode_mv[this_mode],
+ rate = labels2mode(x, i, this_mode, &mode_mv[this_mode],
&second_mode_mv[this_mode], frame_mv, seg_mvs[i],
bsi->ref_mv, bsi->second_ref_mv, x->nmvjointcost,
x->mvcost, cpi);
@@ -1381,7 +1470,7 @@ static void rd_check_segment_txsize(VP9_COMP *cpi, MACROBLOCK *x,
continue;
this_rd = encode_inter_mb_segment(&cpi->common,
- x, labels, i, &labelyrate,
+ x, i, &labelyrate,
&distortion, t_above_s, t_left_s);
this_rd += RDCOST(x->rdmult, x->rddiv, rate, 0);
rate += labelyrate;
@@ -1392,10 +1481,7 @@ static void rd_check_segment_txsize(VP9_COMP *cpi, MACROBLOCK *x,
bestlabelyrate = labelyrate;
mode_selected = this_mode;
best_label_rd = this_rd;
- for (j = 0; j < 4; j++)
- if (labels[j] == i)
- best_eobs[j] = x->e_mbd.plane[0].eobs[j];
-
+ best_eobs[i] = x->e_mbd.plane[0].eobs[i];
vpx_memcpy(t_above_b, t_above_s, sizeof(t_above_s));
vpx_memcpy(t_left_b, t_left_s, sizeof(t_left_s));
}
@@ -1404,7 +1490,7 @@ static void rd_check_segment_txsize(VP9_COMP *cpi, MACROBLOCK *x,
vpx_memcpy(t_above, t_above_b, sizeof(t_above));
vpx_memcpy(t_left, t_left_b, sizeof(t_left));
- labels2mode(x, labels, i, mode_selected, &mode_mv[mode_selected],
+ labels2mode(x, i, mode_selected, &mode_mv[mode_selected],
&second_mode_mv[mode_selected], frame_mv, seg_mvs[i],
bsi->ref_mv, bsi->second_ref_mv, x->nmvjointcost,
x->mvcost, cpi);
@@ -1443,12 +1529,6 @@ static void rd_check_segment_txsize(VP9_COMP *cpi, MACROBLOCK *x,
}
}
-static void rd_check_segment(VP9_COMP *cpi, MACROBLOCK *x,
- BEST_SEG_INFO *bsi,
- int_mv seg_mvs[4][MAX_REF_FRAMES - 1]) {
- rd_check_segment_txsize(cpi, x, bsi, seg_mvs);
-}
-
static int rd_pick_best_mbsegmentation(VP9_COMP *cpi, MACROBLOCK *x,
int_mv *best_ref_mv,
int_mv *second_best_ref_mv,
@@ -1457,7 +1537,8 @@ static int rd_pick_best_mbsegmentation(VP9_COMP *cpi, MACROBLOCK *x,
int *returnyrate,
int *returndistortion,
int *skippable, int mvthresh,
- int_mv seg_mvs[4][MAX_REF_FRAMES - 1]) {
+ int_mv seg_mvs[4][MAX_REF_FRAMES],
+ int mi_row, int mi_col) {
int i;
BEST_SEG_INFO bsi;
MB_MODE_INFO * mbmi = &x->e_mbd.mode_info_context->mbmi;
@@ -1473,7 +1554,7 @@ static int rd_pick_best_mbsegmentation(VP9_COMP *cpi, MACROBLOCK *x,
for (i = 0; i < 4; i++)
bsi.modes[i] = ZEROMV;
- rd_check_segment(cpi, x, &bsi, seg_mvs);
+ rd_check_segment_txsize(cpi, x, &bsi, seg_mvs, mi_row, mi_col);
/* set it to the best */
for (i = 0; i < 4; i++) {
@@ -1504,6 +1585,7 @@ static int rd_pick_best_mbsegmentation(VP9_COMP *cpi, MACROBLOCK *x,
*returndistortion = bsi.d;
*returnyrate = bsi.segment_yrate;
*skippable = vp9_sby_is_skippable(&x->e_mbd, BLOCK_SIZE_SB8X8);
+ mbmi->mode = bsi.modes[3];
return (int)(bsi.segment_rd);
}
@@ -1878,6 +1960,154 @@ static INLINE int get_switchable_rate(VP9_COMMON *cm, MACROBLOCK *x) {
return SWITCHABLE_INTERP_RATE_FACTOR * x->switchable_interp_costs[c][m];
}
+static void iterative_motion_search(VP9_COMP *cpi, MACROBLOCK *x,
+ BLOCK_SIZE_TYPE bsize,
+ int_mv *frame_mv,
+ YV12_BUFFER_CONFIG **scaled_ref_frame,
+ int mi_row, int mi_col,
+ int_mv single_newmv[MAX_REF_FRAMES]) {
+ int pw = 4 << b_width_log2(bsize), ph = 4 << b_height_log2(bsize);
+ MACROBLOCKD *xd = &x->e_mbd;
+ MB_MODE_INFO *mbmi = &xd->mode_info_context->mbmi;
+ int refs[2] = { mbmi->ref_frame,
+ (mbmi->second_ref_frame < 0 ? 0 : mbmi->second_ref_frame) };
+ int_mv ref_mv[2];
+ const enum BlockSize block_size = get_plane_block_size(bsize, &xd->plane[0]);
+ int ite;
+ // Prediction buffer from second frame.
+ uint8_t *second_pred = vpx_memalign(16, pw * ph * sizeof(uint8_t));
+
+ // Do joint motion search in compound mode to get more accurate mv.
+ struct buf_2d backup_yv12[MAX_MB_PLANE] = {{0}};
+ struct buf_2d backup_second_yv12[MAX_MB_PLANE] = {{0}};
+ struct buf_2d scaled_first_yv12;
+ int last_besterr[2] = {INT_MAX, INT_MAX};
+
+ ref_mv[0] = mbmi->ref_mvs[refs[0]][0];
+ ref_mv[1] = mbmi->ref_mvs[refs[1]][0];
+
+ if (scaled_ref_frame[0]) {
+ int i;
+ // Swap out the reference frame for a version that's been scaled to
+ // match the resolution of the current frame, allowing the existing
+ // motion search code to be used without additional modifications.
+ for (i = 0; i < MAX_MB_PLANE; i++)
+ backup_yv12[i] = xd->plane[i].pre[0];
+ setup_pre_planes(xd, scaled_ref_frame[0], NULL, mi_row, mi_col,
+ NULL, NULL);
+ }
+
+ if (scaled_ref_frame[1]) {
+ int i;
+ for (i = 0; i < MAX_MB_PLANE; i++)
+ backup_second_yv12[i] = xd->plane[i].pre[1];
+
+ setup_pre_planes(xd, scaled_ref_frame[1], NULL, mi_row, mi_col,
+ NULL, NULL);
+ }
+
+ xd->scale_factor[0].set_scaled_offsets(&xd->scale_factor[0],
+ mi_row, mi_col);
+ xd->scale_factor[1].set_scaled_offsets(&xd->scale_factor[1],
+ mi_row, mi_col);
+ scaled_first_yv12 = xd->plane[0].pre[0];
+
+ // Initialize mv using single prediction mode result.
+ frame_mv[refs[0]].as_int = single_newmv[refs[0]].as_int;
+ frame_mv[refs[1]].as_int = single_newmv[refs[1]].as_int;
+
+ // Allow joint search multiple times iteratively for each ref frame
+ // and break out the search loop if it couldn't find better mv.
+ for (ite = 0; ite < 4; ite++) {
+ struct buf_2d ref_yv12[2];
+ int bestsme = INT_MAX;
+ int sadpb = x->sadperbit16;
+ int_mv tmp_mv;
+ int search_range = 3;
+
+ int tmp_col_min = x->mv_col_min;
+ int tmp_col_max = x->mv_col_max;
+ int tmp_row_min = x->mv_row_min;
+ int tmp_row_max = x->mv_row_max;
+ int id = ite % 2;
+
+ // Initialized here because of compiler problem in Visual Studio.
+ ref_yv12[0] = xd->plane[0].pre[0];
+ ref_yv12[1] = xd->plane[0].pre[1];
+
+ // Get pred block from second frame.
+ vp9_build_inter_predictor(ref_yv12[!id].buf,
+ ref_yv12[!id].stride,
+ second_pred, pw,
+ &frame_mv[refs[!id]],
+ &xd->scale_factor[!id],
+ pw, ph, 0,
+ &xd->subpix);
+
+ // Compound motion search on first ref frame.
+ if (id)
+ xd->plane[0].pre[0] = ref_yv12[id];
+ vp9_clamp_mv_min_max(x, &ref_mv[id]);
+
+ // Use mv result from single mode as mvp.
+ tmp_mv.as_int = frame_mv[refs[id]].as_int;
+
+ tmp_mv.as_mv.col >>= 3;
+ tmp_mv.as_mv.row >>= 3;
+
+ // Small-range full-pixel motion search
+ bestsme = vp9_refining_search_8p_c(x, &tmp_mv, sadpb,
+ search_range,
+ &cpi->fn_ptr[block_size],
+ x->nmvjointcost, x->mvcost,
+ &ref_mv[id], second_pred,
+ pw, ph);
+
+ x->mv_col_min = tmp_col_min;
+ x->mv_col_max = tmp_col_max;
+ x->mv_row_min = tmp_row_min;
+ x->mv_row_max = tmp_row_max;
+
+ if (bestsme < INT_MAX) {
+ int dis; /* TODO: use dis in distortion calculation later. */
+ unsigned int sse;
+
+ bestsme = vp9_find_best_sub_pixel_comp(x, &tmp_mv,
+ &ref_mv[id],
+ x->errorperbit,
+ &cpi->fn_ptr[block_size],
+ x->nmvjointcost, x->mvcost,
+ &dis, &sse, second_pred,
+ pw, ph);
+ }
+
+ if (id)
+ xd->plane[0].pre[0] = scaled_first_yv12;
+
+ if (bestsme < last_besterr[id]) {
+ frame_mv[refs[id]].as_int = tmp_mv.as_int;
+ last_besterr[id] = bestsme;
+ } else {
+ break;
+ }
+ }
+
+ // restore the predictor
+ if (scaled_ref_frame[0]) {
+ int i;
+ for (i = 0; i < MAX_MB_PLANE; i++)
+ xd->plane[i].pre[0] = backup_yv12[i];
+ }
+
+ if (scaled_ref_frame[1]) {
+ int i;
+ for (i = 0; i < MAX_MB_PLANE; i++)
+ xd->plane[i].pre[1] = backup_second_yv12[i];
+ }
+
+ vpx_free(second_pred);
+}
+
static int64_t handle_inter_mode(VP9_COMP *cpi, MACROBLOCK *x,
BLOCK_SIZE_TYPE bsize,
int64_t txfm_cache[],
@@ -1920,145 +2150,13 @@ static int64_t handle_inter_mode(VP9_COMP *cpi, MACROBLOCK *x,
ref_mv[1] = mbmi->ref_mvs[refs[1]][0];
if (is_comp_pred) {
- if (cpi->sf.comp_inter_joint_serach) {
- int pw = 4 << b_width_log2(bsize), ph = 4 << b_height_log2(bsize);
- int ite;
- // Prediction buffer from second frame.
- uint8_t *second_pred = vpx_memalign(16, pw * ph * sizeof(uint8_t));
-
- // Do joint motion search in compound mode to get more accurate mv.
- struct buf_2d backup_yv12[MAX_MB_PLANE] = {{0}};
- struct buf_2d backup_second_yv12[MAX_MB_PLANE] = {{0}};
- struct buf_2d scaled_first_yv12;
- int last_besterr[2] = {INT_MAX, INT_MAX};
-
- if (scaled_ref_frame[0]) {
- int i;
-
- // Swap out the reference frame for a version that's been scaled to
- // match the resolution of the current frame, allowing the existing
- // motion search code to be used without additional modifications.
- for (i = 0; i < MAX_MB_PLANE; i++)
- backup_yv12[i] = xd->plane[i].pre[0];
-
- setup_pre_planes(xd, scaled_ref_frame[0], NULL, mi_row, mi_col,
- NULL, NULL);
- }
-
- if (scaled_ref_frame[1]) {
- int i;
-
- for (i = 0; i < MAX_MB_PLANE; i++)
- backup_second_yv12[i] = xd->plane[i].pre[1];
-
- setup_pre_planes(xd, scaled_ref_frame[1], NULL, mi_row, mi_col,
- NULL, NULL);
- }
- xd->scale_factor[0].set_scaled_offsets(&xd->scale_factor[0],
- mi_row, mi_col);
- xd->scale_factor[1].set_scaled_offsets(&xd->scale_factor[1],
- mi_row, mi_col);
-
- scaled_first_yv12 = xd->plane[0].pre[0];
-
- // Initialize mv using single prediction mode result.
- frame_mv[refs[0]].as_int = single_newmv[refs[0]].as_int;
- frame_mv[refs[1]].as_int = single_newmv[refs[1]].as_int;
-
- // Allow joint search multiple times iteratively for each ref frame
- // and break out the search loop if it couldn't find better mv.
- for (ite = 0; ite < 4; ite++) {
- struct buf_2d ref_yv12[2];
- int bestsme = INT_MAX;
- int sadpb = x->sadperbit16;
- int_mv tmp_mv;
- int search_range = 3;
-
- int tmp_col_min = x->mv_col_min;
- int tmp_col_max = x->mv_col_max;
- int tmp_row_min = x->mv_row_min;
- int tmp_row_max = x->mv_row_max;
- int id = ite % 2;
-
- // Initialized here because of compiler problem in Visual Studio.
- ref_yv12[0] = xd->plane[0].pre[0];
- ref_yv12[1] = xd->plane[0].pre[1];
-
- // Get pred block from second frame.
- vp9_build_inter_predictor(ref_yv12[!id].buf,
- ref_yv12[!id].stride,
- second_pred, pw,
- &frame_mv[refs[!id]],
- &xd->scale_factor[!id],
- pw, ph, 0,
- &xd->subpix);
-
- // Compound motion search on first ref frame.
- if (id)
- xd->plane[0].pre[0] = ref_yv12[id];
- vp9_clamp_mv_min_max(x, &ref_mv[id]);
-
- // Use mv result from single mode as mvp.
- tmp_mv.as_int = frame_mv[refs[id]].as_int;
-
- tmp_mv.as_mv.col >>= 3;
- tmp_mv.as_mv.row >>= 3;
-
- // Small-range full-pixel motion search
- bestsme = vp9_refining_search_8p_c(x, &tmp_mv, sadpb,
- search_range,
- &cpi->fn_ptr[block_size],
- x->nmvjointcost, x->mvcost,
- &ref_mv[id], second_pred,
- pw, ph);
-
- x->mv_col_min = tmp_col_min;
- x->mv_col_max = tmp_col_max;
- x->mv_row_min = tmp_row_min;
- x->mv_row_max = tmp_row_max;
-
- if (bestsme < INT_MAX) {
- int dis; /* TODO: use dis in distortion calculation later. */
- unsigned int sse;
-
- bestsme = vp9_find_best_sub_pixel_comp(x, &tmp_mv,
- &ref_mv[id],
- x->errorperbit,
- &cpi->fn_ptr[block_size],
- x->nmvjointcost, x->mvcost,
- &dis, &sse, second_pred,
- pw, ph);
- }
-
- if (id)
- xd->plane[0].pre[0] = scaled_first_yv12;
-
- if (bestsme < last_besterr[id]) {
- frame_mv[refs[id]].as_int =
- xd->mode_info_context->bmi[0].as_mv[1].as_int = tmp_mv.as_int;
- last_besterr[id] = bestsme;
- } else {
- break;
- }
- }
-
- // restore the predictor
- if (scaled_ref_frame[0]) {
- int i;
-
- for (i = 0; i < MAX_MB_PLANE; i++)
- xd->plane[i].pre[0] = backup_yv12[i];
- }
+ // Initialize mv using single prediction mode result.
+ frame_mv[refs[0]].as_int = single_newmv[refs[0]].as_int;
+ frame_mv[refs[1]].as_int = single_newmv[refs[1]].as_int;
- if (scaled_ref_frame[1]) {
- int i;
-
- for (i = 0; i < MAX_MB_PLANE; i++)
- xd->plane[i].pre[1] = backup_second_yv12[i];
- }
-
- vpx_free(second_pred);
- }
+ if (cpi->sf.comp_inter_joint_search)
+ iterative_motion_search(cpi, x, bsize, frame_mv, scaled_ref_frame,
+ mi_row, mi_col, single_newmv);
if (frame_mv[refs[0]].as_int == INVALID_MV ||
frame_mv[refs[1]].as_int == INVALID_MV)
@@ -2134,8 +2232,7 @@ static int64_t handle_inter_mode(VP9_COMP *cpi, MACROBLOCK *x,
x->nmvjointcost, x->mvcost,
&dis, &sse);
}
- frame_mv[refs[0]].as_int =
- xd->mode_info_context->bmi[0].as_mv[0].as_int = tmp_mv.as_int;
+ frame_mv[refs[0]].as_int = tmp_mv.as_int;
single_newmv[refs[0]].as_int = tmp_mv.as_int;
// Add the new motion vector cost to our rolling cost variable
@@ -2191,7 +2288,9 @@ static int64_t handle_inter_mode(VP9_COMP *cpi, MACROBLOCK *x,
(mbmi->mv[1].as_mv.col & 15) == 0;
// Search for best switchable filter by checking the variance of
// pred error irrespective of whether the filter will be used
- if (1) {
+ if (cpi->speed > 4) {
+ *best_filter = EIGHTTAP;
+ } else {
int i, newbest;
int tmp_rate_sum = 0, tmp_dist_sum = 0;
for (i = 0; i < VP9_SWITCHABLE_FILTERS; ++i) {
@@ -2328,6 +2427,7 @@ static int64_t handle_inter_mode(VP9_COMP *cpi, MACROBLOCK *x,
// Y cost and distortion
super_block_yrd(cpi, x, rate_y, distortion_y, &skippable_y,
bsize, txfm_cache);
+
*rate2 += *rate_y;
*distortion += *distortion_y;
@@ -2393,16 +2493,14 @@ void vp9_rd_pick_intra_mode_sb(VP9_COMP *cpi, MACROBLOCK *x,
*returnrate = rate4x4_y + rate_uv +
vp9_cost_bit(vp9_get_pred_prob(cm, xd, PRED_MBSKIP), 0);
*returndist = dist4x4_y + (dist_uv >> 2);
- for (i = 0; i < NB_TXFM_MODES; i++) {
- ctx->txfm_rd_diff[i] = MIN(err4x4, err - txfm_cache[i]);
- }
+ vpx_memset(ctx->txfm_rd_diff, 0, sizeof(ctx->txfm_rd_diff));
xd->mode_info_context->mbmi.txfm_size = TX_4X4;
} else {
*returnrate = rate_y + rate_uv +
vp9_cost_bit(vp9_get_pred_prob(cm, xd, PRED_MBSKIP), 0);
*returndist = dist_y + (dist_uv >> 2);
for (i = 0; i < NB_TXFM_MODES; i++) {
- ctx->txfm_rd_diff[i] = MIN(err4x4, err - txfm_cache[i]);
+ ctx->txfm_rd_diff[i] = txfm_cache[i] - txfm_cache[cm->txfm_mode];
}
xd->mode_info_context->mbmi.txfm_size = txfm_size;
xd->mode_info_context->mbmi.mode = mode;
@@ -2457,14 +2555,14 @@ int64_t vp9_rd_pick_inter_mode_sb(VP9_COMP *cpi, MACROBLOCK *x,
int64_t frame_distortions[MAX_REF_FRAMES] = {-1};
int intra_cost_penalty = 20 * vp9_dc_quant(cpi->common.base_qindex,
cpi->common.y_dc_delta_q);
- int_mv seg_mvs[4][MAX_REF_FRAMES - 1];
+ int_mv seg_mvs[4][MAX_REF_FRAMES];
union b_mode_info best_bmodes[4];
PARTITION_INFO best_partition;
for (i = 0; i < 4; i++) {
int j;
- for (j = 0; j < MAX_REF_FRAMES - 1; j++)
+ for (j = 0; j < MAX_REF_FRAMES; j++)
seg_mvs[i][j].as_int = INVALID_MV;
}
// Everywhere the flag is set the error is much higher than its neighbors.
@@ -2563,11 +2661,6 @@ int64_t vp9_rd_pick_inter_mode_sb(VP9_COMP *cpi, MACROBLOCK *x,
this_mode = vp9_mode_order[mode_index].mode;
ref_frame = vp9_mode_order[mode_index].ref_frame;
- if (!(ref_frame == INTRA_FRAME
- || (cpi->ref_frame_flags & flag_list[ref_frame]))) {
- continue;
- }
-
if (cpi->speed > 0 && bsize >= BLOCK_SIZE_SB8X8) {
if (!(ref_frame_mask & (1 << ref_frame))) {
continue;
@@ -2585,6 +2678,15 @@ int64_t vp9_rd_pick_inter_mode_sb(VP9_COMP *cpi, MACROBLOCK *x,
mbmi->ref_frame = ref_frame;
mbmi->second_ref_frame = vp9_mode_order[mode_index].second_ref_frame;
+ if (!(ref_frame == INTRA_FRAME
+ || (cpi->ref_frame_flags & flag_list[ref_frame]))) {
+ continue;
+ }
+ if (!(mbmi->second_ref_frame == NONE
+ || (cpi->ref_frame_flags & flag_list[mbmi->second_ref_frame]))) {
+ continue;
+ }
+
// TODO(jingning, jkoleszar): scaling reference frame not supported for
// SPLITMV.
if (mbmi->ref_frame > 0 &&
@@ -2680,8 +2782,6 @@ int64_t vp9_rd_pick_inter_mode_sb(VP9_COMP *cpi, MACROBLOCK *x,
if (this_mode == I4X4_PRED) {
int rate;
- // Note the rate value returned here includes the cost of coding
- // the I4X4_PRED mode : x->mbmode_cost[xd->frame_type][I4X4_PRED];
mbmi->txfm_size = TX_4X4;
rd_pick_intra4x4mby_modes(cpi, x, &rate, &rate_y,
&distortion_y, INT64_MAX);
@@ -2716,7 +2816,7 @@ int64_t vp9_rd_pick_inter_mode_sb(VP9_COMP *cpi, MACROBLOCK *x,
skippable = skippable && skip_uv[uv_tx];
mbmi->uv_mode = mode_uv[uv_tx];
- rate2 = rate_y + x->mbmode_cost[cm->frame_type][mbmi->mode] + rate_uv;
+ rate2 = rate_y + x->mbmode_cost[mbmi->mode] + rate_uv;
if (mbmi->mode != DC_PRED && mbmi->mode != TM_PRED)
rate2 += intra_cost_penalty;
distortion2 = distortion_y + distortion_uv;
@@ -2755,7 +2855,8 @@ int64_t vp9_rd_pick_inter_mode_sb(VP9_COMP *cpi, MACROBLOCK *x,
second_ref, INT64_MAX,
&rate, &rate_y, &distortion,
&skippable,
- (int)this_rd_thresh, seg_mvs);
+ (int)this_rd_thresh, seg_mvs,
+ mi_row, mi_col);
if (cpi->common.mcomp_filter_type == SWITCHABLE) {
const int rs = get_switchable_rate(cm, x);
tmp_rd += RDCOST(x->rdmult, x->rddiv, rs, 0);
@@ -2794,7 +2895,8 @@ int64_t vp9_rd_pick_inter_mode_sb(VP9_COMP *cpi, MACROBLOCK *x,
second_ref, INT64_MAX,
&rate, &rate_y, &distortion,
&skippable,
- (int)this_rd_thresh, seg_mvs);
+ (int)this_rd_thresh, seg_mvs,
+ mi_row, mi_col);
} else {
if (cpi->common.mcomp_filter_type == SWITCHABLE) {
int rs = get_switchable_rate(cm, x);
@@ -2843,7 +2945,6 @@ int64_t vp9_rd_pick_inter_mode_sb(VP9_COMP *cpi, MACROBLOCK *x,
compmode_cost =
vp9_cost_bit(vp9_get_pred_prob(cm, xd, PRED_COMP), is_comp_pred);
- mbmi->mode = this_mode;
} else {
YV12_BUFFER_CONFIG *scaled_ref_frame[2] = {NULL, NULL};
int fb = get_ref_frame_idx(cpi, mbmi->ref_frame);
@@ -2938,14 +3039,16 @@ int64_t vp9_rd_pick_inter_mode_sb(VP9_COMP *cpi, MACROBLOCK *x,
best_mode = this_mode;
}
- // Store the respective mode distortions for later use.
- if (mode_distortions[this_mode] == -1
- || distortion2 < mode_distortions[this_mode]) {
- mode_distortions[this_mode] = distortion2;
- }
- if (frame_distortions[mbmi->ref_frame] == -1
- || distortion2 < frame_distortions[mbmi->ref_frame]) {
- frame_distortions[mbmi->ref_frame] = distortion2;
+ if (this_mode != I4X4_PRED && this_mode != SPLITMV) {
+ // Store the respective mode distortions for later use.
+ if (mode_distortions[this_mode] == -1
+ || distortion2 < mode_distortions[this_mode]) {
+ mode_distortions[this_mode] = distortion2;
+ }
+ if (frame_distortions[mbmi->ref_frame] == -1
+ || distortion2 < frame_distortions[mbmi->ref_frame]) {
+ frame_distortions[mbmi->ref_frame] = distortion2;
+ }
}
// Did this mode help.. i.e. is it the new best mode
@@ -2954,7 +3057,7 @@ int64_t vp9_rd_pick_inter_mode_sb(VP9_COMP *cpi, MACROBLOCK *x,
// Note index of best mode so far
best_mode_index = mode_index;
- if (this_mode <= I4X4_PRED) {
+ if (ref_frame == INTRA_FRAME) {
/* required for left and above block mv */
mbmi->mv[0].as_int = 0;
}
@@ -3052,8 +3155,7 @@ int64_t vp9_rd_pick_inter_mode_sb(VP9_COMP *cpi, MACROBLOCK *x,
// Flag all modes that have a distortion thats > 2x the best we found at
// this level.
for (mode_index = 0; mode_index < MB_MODE_COUNT; ++mode_index) {
- if (mode_index == NEARESTMV || mode_index == NEARMV || mode_index == NEWMV
- || mode_index == SPLITMV)
+ if (mode_index == NEARESTMV || mode_index == NEARMV || mode_index == NEWMV)
continue;
if (mode_distortions[mode_index] > 2 * *returndistortion) {
@@ -3077,7 +3179,7 @@ int64_t vp9_rd_pick_inter_mode_sb(VP9_COMP *cpi, MACROBLOCK *x,
assert((cm->mcomp_filter_type == SWITCHABLE) ||
(cm->mcomp_filter_type == best_mbmode.interp_filter) ||
- (best_mbmode.mode <= I4X4_PRED));
+ (best_mbmode.ref_frame == INTRA_FRAME));
// Accumulate filter usage stats
// TODO(agrange): Use RD criteria to select interpolation filter mode.
@@ -3129,13 +3231,15 @@ int64_t vp9_rd_pick_inter_mode_sb(VP9_COMP *cpi, MACROBLOCK *x,
// macroblock modes
vpx_memcpy(mbmi, &best_mbmode, sizeof(MB_MODE_INFO));
- if (best_mbmode.mode == I4X4_PRED) {
+ if (best_mbmode.ref_frame == INTRA_FRAME &&
+ best_mbmode.sb_type < BLOCK_SIZE_SB8X8) {
for (i = 0; i < 4; i++) {
xd->mode_info_context->bmi[i].as_mode = best_bmodes[i].as_mode;
}
}
- if (best_mbmode.mode == SPLITMV) {
+ if (best_mbmode.ref_frame != INTRA_FRAME &&
+ best_mbmode.sb_type < BLOCK_SIZE_SB8X8) {
for (i = 0; i < 4; i++)
xd->mode_info_context->bmi[i].as_mv[0].as_int =
best_bmodes[i].as_mv[0].as_int;
diff --git a/vp9/encoder/vp9_tokenize.c b/vp9/encoder/vp9_tokenize.c
index 08efc84d4..79f72bb4b 100644
--- a/vp9/encoder/vp9_tokenize.c
+++ b/vp9/encoder/vp9_tokenize.c
@@ -36,6 +36,9 @@ extern vp9_coeff_stats tree_update_hist_16x16[BLOCK_TYPES];
extern vp9_coeff_stats tree_update_hist_32x32[BLOCK_TYPES];
#endif /* ENTROPY_STATS */
+DECLARE_ALIGNED(16, extern const uint8_t,
+ vp9_pt_energy_class[MAX_ENTROPY_TOKENS]);
+
static TOKENVALUE dct_value_tokens[DCT_MAX_VALUE * 2];
const TOKENVALUE *vp9_dct_value_tokens_ptr;
static int dct_value_cost[DCT_MAX_VALUE * 2];
@@ -221,14 +224,24 @@ static void tokenize_b(int plane, int block, BLOCK_SIZE_TYPE bsize,
t->token = token;
t->context_tree = coef_probs[type][ref][band][pt];
t->skip_eob_node = (c > 0) && (token_cache[scan[c - 1]] == 0);
+
+#if CONFIG_BALANCED_COEFTREE
+ assert(token <= ZERO_TOKEN ||
+ vp9_coef_encodings[t->token].len - t->skip_eob_node > 0);
+#else
assert(vp9_coef_encodings[t->token].len - t->skip_eob_node > 0);
+#endif
if (!dry_run) {
++counts[type][ref][band][pt][token];
+#if CONFIG_BALANCED_COEFTREE
+ if (!t->skip_eob_node && token > ZERO_TOKEN)
+#else
if (!t->skip_eob_node)
+#endif
++cpi->common.fc.eob_branch_counts[tx_size][type][ref][band][pt];
}
- token_cache[scan[c]] = token;
+ token_cache[scan[c]] = vp9_pt_energy_class[token];
++t;
} while (c < eob && ++c < seg_eob);
diff --git a/vp9/encoder/vp9_variance_c.c b/vp9/encoder/vp9_variance_c.c
index 7231dcf22..e9b36f356 100644
--- a/vp9/encoder/vp9_variance_c.c
+++ b/vp9/encoder/vp9_variance_c.c
@@ -318,6 +318,11 @@ unsigned int vp9_variance16x8_c(const uint8_t *src_ptr,
return (var - (((unsigned int)avg * avg) >> 7));
}
+void vp9_get_sse_sum_8x8_c(const uint8_t *src_ptr, int source_stride,
+ const uint8_t *ref_ptr, int ref_stride,
+ unsigned int *sse, int *sum) {
+ variance(src_ptr, source_stride, ref_ptr, ref_stride, 8, 8, sse, sum);
+}
unsigned int vp9_variance8x8_c(const uint8_t *src_ptr,
int source_stride,
diff --git a/vp9/encoder/vp9_write_bit_buffer.h b/vp9/encoder/vp9_write_bit_buffer.h
new file mode 100644
index 000000000..18cf40366
--- /dev/null
+++ b/vp9/encoder/vp9_write_bit_buffer.h
@@ -0,0 +1,42 @@
+/*
+ * Copyright (c) 2013 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef VP9_BIT_WRITE_BUFFER_H_
+#define VP9_BIT_WRITE_BUFFER_H_
+
+#include "vpx/vpx_integer.h"
+
+struct vp9_write_bit_buffer {
+ uint8_t *bit_buffer;
+ size_t bit_offset;
+};
+
+static size_t vp9_rb_bytes_written(struct vp9_write_bit_buffer *wb) {
+ return wb->bit_offset / CHAR_BIT + (wb->bit_offset % CHAR_BIT > 0);
+}
+
+static void vp9_wb_write_bit(struct vp9_write_bit_buffer *wb, int bit) {
+ const int off = wb->bit_offset;
+ const int p = off / CHAR_BIT;
+ const int q = CHAR_BIT - 1 - off % CHAR_BIT;
+ wb->bit_buffer[p] &= ~(1 << q);
+ wb->bit_buffer[p] |= bit << q;
+ wb->bit_offset = off + 1;
+}
+
+static void vp9_wb_write_literal(struct vp9_write_bit_buffer *wb,
+ int data, int bits) {
+ int bit;
+ for (bit = bits - 1; bit >= 0; bit--)
+ vp9_wb_write_bit(wb, (data >> bit) & 1);
+}
+
+
+#endif // VP9_BIT_WRITE_BUFFER_H_
diff --git a/vp9/vp9_common.mk b/vp9/vp9_common.mk
index 147804d03..29e832384 100644
--- a/vp9/vp9_common.mk
+++ b/vp9/vp9_common.mk
@@ -18,7 +18,6 @@ VP9_COMMON_SRCS-yes += common/vp9_asm_com_offsets.c
VP9_COMMON_SRCS-yes += common/vp9_coefupdateprobs.h
VP9_COMMON_SRCS-yes += common/vp9_convolve.c
VP9_COMMON_SRCS-yes += common/vp9_convolve.h
-VP9_COMMON_SRCS-yes += common/vp9_debugmodes.c
VP9_COMMON_SRCS-yes += common/vp9_default_coef_probs.h
VP9_COMMON_SRCS-yes += common/vp9_entropy.c
VP9_COMMON_SRCS-yes += common/vp9_entropymode.c
@@ -38,7 +37,6 @@ VP9_COMMON_SRCS-yes += common/vp9_entropymv.h
VP9_COMMON_SRCS-yes += common/vp9_enums.h
VP9_COMMON_SRCS-yes += common/vp9_extend.h
VP9_COMMON_SRCS-yes += common/vp9_findnearmv.h
-VP9_COMMON_SRCS-yes += common/vp9_header.h
VP9_COMMON_SRCS-yes += common/vp9_idct.h
VP9_COMMON_SRCS-yes += common/vp9_invtrans.h
VP9_COMMON_SRCS-yes += common/vp9_loopfilter.h
diff --git a/vp9/vp9_cx_iface.c b/vp9/vp9_cx_iface.c
index 9326165ed..e5b50894c 100644
--- a/vp9/vp9_cx_iface.c
+++ b/vp9/vp9_cx_iface.c
@@ -20,7 +20,7 @@
#include <stdlib.h>
#include <string.h>
-struct vp8_extracfg {
+struct vp9_extracfg {
struct vpx_codec_pkt_list *pkt_list;
int cpu_used; /** available cpu percentage in 1/16*/
unsigned int enable_auto_alt_ref; /** if encoder decides to uses alternate reference frame */
@@ -42,7 +42,7 @@ struct vp8_extracfg {
struct extraconfig_map {
int usage;
- struct vp8_extracfg cfg;
+ struct vp9_extracfg cfg;
};
static const struct extraconfig_map extracfg_map[] = {
@@ -73,7 +73,7 @@ static const struct extraconfig_map extracfg_map[] = {
struct vpx_codec_alg_priv {
vpx_codec_priv_t base;
vpx_codec_enc_cfg_t cfg;
- struct vp8_extracfg vp8_cfg;
+ struct vp9_extracfg vp8_cfg;
VP9_CONFIG oxcf;
VP9_PTR cpi;
unsigned char *cx_data;
@@ -131,7 +131,7 @@ update_error_state(vpx_codec_alg_priv_t *ctx,
static vpx_codec_err_t validate_config(vpx_codec_alg_priv_t *ctx,
const vpx_codec_enc_cfg_t *cfg,
- const struct vp8_extracfg *vp8_cfg) {
+ const struct vp9_extracfg *vp8_cfg) {
RANGE_CHECK(cfg, g_w, 1, 65535); /* 16 bits available */
RANGE_CHECK(cfg, g_h, 1, 65535); /* 16 bits available */
RANGE_CHECK(cfg, g_timebase.den, 1, 1000000000);
@@ -226,9 +226,9 @@ static vpx_codec_err_t validate_img(vpx_codec_alg_priv_t *ctx,
}
-static vpx_codec_err_t set_vp8e_config(VP9_CONFIG *oxcf,
+static vpx_codec_err_t set_vp9e_config(VP9_CONFIG *oxcf,
vpx_codec_enc_cfg_t cfg,
- struct vp8_extracfg vp8_cfg) {
+ struct vp9_extracfg vp8_cfg) {
oxcf->version = cfg.g_profile | (vp8_cfg.experimental ? 0x4 : 0);
oxcf->width = cfg.g_w;
oxcf->height = cfg.g_h;
@@ -351,7 +351,7 @@ static vpx_codec_err_t set_vp8e_config(VP9_CONFIG *oxcf,
return VPX_CODEC_OK;
}
-static vpx_codec_err_t vp8e_set_config(vpx_codec_alg_priv_t *ctx,
+static vpx_codec_err_t vp9e_set_config(vpx_codec_alg_priv_t *ctx,
const vpx_codec_enc_cfg_t *cfg) {
vpx_codec_err_t res;
@@ -370,7 +370,7 @@ static vpx_codec_err_t vp8e_set_config(vpx_codec_alg_priv_t *ctx,
if (!res) {
ctx->cfg = *cfg;
- set_vp8e_config(&ctx->oxcf, ctx->cfg, ctx->vp8_cfg);
+ set_vp9e_config(&ctx->oxcf, ctx->cfg, ctx->vp8_cfg);
vp9_change_config(ctx->cpi, &ctx->oxcf);
}
@@ -406,7 +406,7 @@ static vpx_codec_err_t set_param(vpx_codec_alg_priv_t *ctx,
int ctrl_id,
va_list args) {
vpx_codec_err_t res = VPX_CODEC_OK;
- struct vp8_extracfg xcfg = ctx->vp8_cfg;
+ struct vp9_extracfg xcfg = ctx->vp8_cfg;
#define MAP(id, var) case id: var = CAST(id, args); break;
@@ -433,7 +433,7 @@ static vpx_codec_err_t set_param(vpx_codec_alg_priv_t *ctx,
if (!res) {
ctx->vp8_cfg = xcfg;
- set_vp8e_config(&ctx->oxcf, ctx->cfg, ctx->vp8_cfg);
+ set_vp9e_config(&ctx->oxcf, ctx->cfg, ctx->vp8_cfg);
vp9_change_config(ctx->cpi, &ctx->oxcf);
}
@@ -442,7 +442,7 @@ static vpx_codec_err_t set_param(vpx_codec_alg_priv_t *ctx,
}
-static vpx_codec_err_t vp8e_common_init(vpx_codec_ctx_t *ctx,
+static vpx_codec_err_t vp9e_common_init(vpx_codec_ctx_t *ctx,
int experimental) {
vpx_codec_err_t res = VPX_CODEC_OK;
struct vpx_codec_alg_priv *priv;
@@ -488,7 +488,7 @@ static vpx_codec_err_t vp8e_common_init(vpx_codec_ctx_t *ctx,
priv->vp8_cfg.experimental = experimental;
// TODO(agrange) Check the limits set on this buffer, or the check that is
- // applied in vp8e_encode.
+ // applied in vp9e_encode.
priv->cx_data_sz = priv->cfg.g_w * priv->cfg.g_h * 3 / 2 * 8;
// priv->cx_data_sz = priv->cfg.g_w * priv->cfg.g_h * 3 / 2 * 2;
@@ -505,7 +505,7 @@ static vpx_codec_err_t vp8e_common_init(vpx_codec_ctx_t *ctx,
res = validate_config(priv, &priv->cfg, &priv->vp8_cfg);
if (!res) {
- set_vp8e_config(&ctx->priv->alg_priv->oxcf,
+ set_vp9e_config(&ctx->priv->alg_priv->oxcf,
ctx->priv->alg_priv->cfg,
ctx->priv->alg_priv->vp8_cfg);
optr = vp9_create_compressor(&ctx->priv->alg_priv->oxcf);
@@ -521,21 +521,21 @@ static vpx_codec_err_t vp8e_common_init(vpx_codec_ctx_t *ctx,
}
-static vpx_codec_err_t vp8e_init(vpx_codec_ctx_t *ctx,
+static vpx_codec_err_t vp9e_init(vpx_codec_ctx_t *ctx,
vpx_codec_priv_enc_mr_cfg_t *data) {
- return vp8e_common_init(ctx, 0);
+ return vp9e_common_init(ctx, 0);
}
#if CONFIG_EXPERIMENTAL
-static vpx_codec_err_t vp8e_exp_init(vpx_codec_ctx_t *ctx,
+static vpx_codec_err_t vp9e_exp_init(vpx_codec_ctx_t *ctx,
vpx_codec_priv_enc_mr_cfg_t *data) {
- return vp8e_common_init(ctx, 1);
+ return vp9e_common_init(ctx, 1);
}
#endif
-static vpx_codec_err_t vp8e_destroy(vpx_codec_alg_priv_t *ctx) {
+static vpx_codec_err_t vp9e_destroy(vpx_codec_alg_priv_t *ctx) {
free(ctx->cx_data);
vp9_remove_compressor(&ctx->cpi);
@@ -608,7 +608,7 @@ static int write_superframe_index(vpx_codec_alg_priv_t *ctx) {
return index_sz;
}
-static vpx_codec_err_t vp8e_encode(vpx_codec_alg_priv_t *ctx,
+static vpx_codec_err_t vp9e_encode(vpx_codec_alg_priv_t *ctx,
const vpx_image_t *img,
vpx_codec_pts_t pts,
unsigned long duration,
@@ -831,12 +831,12 @@ static vpx_codec_err_t vp8e_encode(vpx_codec_alg_priv_t *ctx,
}
-static const vpx_codec_cx_pkt_t *vp8e_get_cxdata(vpx_codec_alg_priv_t *ctx,
+static const vpx_codec_cx_pkt_t *vp9e_get_cxdata(vpx_codec_alg_priv_t *ctx,
vpx_codec_iter_t *iter) {
return vpx_codec_pkt_list_get(&ctx->pkt_list.head, iter);
}
-static vpx_codec_err_t vp8e_set_reference(vpx_codec_alg_priv_t *ctx,
+static vpx_codec_err_t vp9e_set_reference(vpx_codec_alg_priv_t *ctx,
int ctr_id,
va_list args) {
vpx_ref_frame_t *data = va_arg(args, vpx_ref_frame_t *);
@@ -853,7 +853,7 @@ static vpx_codec_err_t vp8e_set_reference(vpx_codec_alg_priv_t *ctx,
}
-static vpx_codec_err_t vp8e_copy_reference(vpx_codec_alg_priv_t *ctx,
+static vpx_codec_err_t vp9e_copy_reference(vpx_codec_alg_priv_t *ctx,
int ctr_id,
va_list args) {
@@ -886,7 +886,7 @@ static vpx_codec_err_t get_reference(vpx_codec_alg_priv_t *ctx,
}
}
-static vpx_codec_err_t vp8e_set_previewpp(vpx_codec_alg_priv_t *ctx,
+static vpx_codec_err_t vp9e_set_previewpp(vpx_codec_alg_priv_t *ctx,
int ctr_id,
va_list args) {
#if CONFIG_POSTPROC
@@ -907,7 +907,7 @@ static vpx_codec_err_t vp8e_set_previewpp(vpx_codec_alg_priv_t *ctx,
}
-static vpx_image_t *vp8e_get_preview(vpx_codec_alg_priv_t *ctx) {
+static vpx_image_t *vp9e_get_preview(vpx_codec_alg_priv_t *ctx) {
YV12_BUFFER_CONFIG sd;
vp9_ppflags_t flags = {0};
@@ -925,7 +925,7 @@ static vpx_image_t *vp8e_get_preview(vpx_codec_alg_priv_t *ctx) {
return NULL;
}
-static vpx_codec_err_t vp8e_update_entropy(vpx_codec_alg_priv_t *ctx,
+static vpx_codec_err_t vp9e_update_entropy(vpx_codec_alg_priv_t *ctx,
int ctr_id,
va_list args) {
int update = va_arg(args, int);
@@ -934,7 +934,7 @@ static vpx_codec_err_t vp8e_update_entropy(vpx_codec_alg_priv_t *ctx,
}
-static vpx_codec_err_t vp8e_update_reference(vpx_codec_alg_priv_t *ctx,
+static vpx_codec_err_t vp9e_update_reference(vpx_codec_alg_priv_t *ctx,
int ctr_id,
va_list args) {
int update = va_arg(args, int);
@@ -942,7 +942,7 @@ static vpx_codec_err_t vp8e_update_reference(vpx_codec_alg_priv_t *ctx,
return VPX_CODEC_OK;
}
-static vpx_codec_err_t vp8e_use_reference(vpx_codec_alg_priv_t *ctx,
+static vpx_codec_err_t vp9e_use_reference(vpx_codec_alg_priv_t *ctx,
int ctr_id,
va_list args) {
int reference_flag = va_arg(args, int);
@@ -950,7 +950,7 @@ static vpx_codec_err_t vp8e_use_reference(vpx_codec_alg_priv_t *ctx,
return VPX_CODEC_OK;
}
-static vpx_codec_err_t vp8e_set_roi_map(vpx_codec_alg_priv_t *ctx,
+static vpx_codec_err_t vp9e_set_roi_map(vpx_codec_alg_priv_t *ctx,
int ctr_id,
va_list args) {
vpx_roi_map_t *data = va_arg(args, vpx_roi_map_t *);
@@ -968,7 +968,7 @@ static vpx_codec_err_t vp8e_set_roi_map(vpx_codec_alg_priv_t *ctx,
}
-static vpx_codec_err_t vp8e_set_activemap(vpx_codec_alg_priv_t *ctx,
+static vpx_codec_err_t vp9e_set_activemap(vpx_codec_alg_priv_t *ctx,
int ctr_id,
va_list args) {
vpx_active_map_t *data = va_arg(args, vpx_active_map_t *);
@@ -985,7 +985,7 @@ static vpx_codec_err_t vp8e_set_activemap(vpx_codec_alg_priv_t *ctx,
return VPX_CODEC_INVALID_PARAM;
}
-static vpx_codec_err_t vp8e_set_scalemode(vpx_codec_alg_priv_t *ctx,
+static vpx_codec_err_t vp9e_set_scalemode(vpx_codec_alg_priv_t *ctx,
int ctr_id,
va_list args) {
@@ -1006,16 +1006,16 @@ static vpx_codec_err_t vp8e_set_scalemode(vpx_codec_alg_priv_t *ctx,
}
-static vpx_codec_ctrl_fn_map_t vp8e_ctf_maps[] = {
- {VP8_SET_REFERENCE, vp8e_set_reference},
- {VP8_COPY_REFERENCE, vp8e_copy_reference},
- {VP8_SET_POSTPROC, vp8e_set_previewpp},
- {VP8E_UPD_ENTROPY, vp8e_update_entropy},
- {VP8E_UPD_REFERENCE, vp8e_update_reference},
- {VP8E_USE_REFERENCE, vp8e_use_reference},
- {VP8E_SET_ROI_MAP, vp8e_set_roi_map},
- {VP8E_SET_ACTIVEMAP, vp8e_set_activemap},
- {VP8E_SET_SCALEMODE, vp8e_set_scalemode},
+static vpx_codec_ctrl_fn_map_t vp9e_ctf_maps[] = {
+ {VP8_SET_REFERENCE, vp9e_set_reference},
+ {VP8_COPY_REFERENCE, vp9e_copy_reference},
+ {VP8_SET_POSTPROC, vp9e_set_previewpp},
+ {VP8E_UPD_ENTROPY, vp9e_update_entropy},
+ {VP8E_UPD_REFERENCE, vp9e_update_reference},
+ {VP8E_USE_REFERENCE, vp9e_use_reference},
+ {VP8E_SET_ROI_MAP, vp9e_set_roi_map},
+ {VP8E_SET_ACTIVEMAP, vp9e_set_activemap},
+ {VP8E_SET_SCALEMODE, vp9e_set_scalemode},
{VP8E_SET_CPUUSED, set_param},
{VP8E_SET_NOISE_SENSITIVITY, set_param},
{VP8E_SET_ENABLEAUTOALTREF, set_param},
@@ -1036,7 +1036,7 @@ static vpx_codec_ctrl_fn_map_t vp8e_ctf_maps[] = {
{ -1, NULL},
};
-static vpx_codec_enc_cfg_map_t vp8e_usage_cfg_map[] = {
+static vpx_codec_enc_cfg_map_t vp9e_usage_cfg_map[] = {
{
0,
{
@@ -1101,9 +1101,9 @@ CODEC_INTERFACE(vpx_codec_vp9_cx) = {
VPX_CODEC_CAP_ENCODER | VPX_CODEC_CAP_PSNR |
VPX_CODEC_CAP_OUTPUT_PARTITION,
/* vpx_codec_caps_t caps; */
- vp8e_init, /* vpx_codec_init_fn_t init; */
- vp8e_destroy, /* vpx_codec_destroy_fn_t destroy; */
- vp8e_ctf_maps, /* vpx_codec_ctrl_fn_map_t *ctrl_maps; */
+ vp9e_init, /* vpx_codec_init_fn_t init; */
+ vp9e_destroy, /* vpx_codec_destroy_fn_t destroy; */
+ vp9e_ctf_maps, /* vpx_codec_ctrl_fn_map_t *ctrl_maps; */
NOT_IMPLEMENTED, /* vpx_codec_get_mmap_fn_t get_mmap; */
NOT_IMPLEMENTED, /* vpx_codec_set_mmap_fn_t set_mmap; */
{
@@ -1113,12 +1113,12 @@ CODEC_INTERFACE(vpx_codec_vp9_cx) = {
NOT_IMPLEMENTED, /* vpx_codec_frame_get_fn_t frame_get; */
},
{
- vp8e_usage_cfg_map, /* vpx_codec_enc_cfg_map_t peek_si; */
- vp8e_encode, /* vpx_codec_encode_fn_t encode; */
- vp8e_get_cxdata, /* vpx_codec_get_cx_data_fn_t frame_get; */
- vp8e_set_config,
+ vp9e_usage_cfg_map, /* vpx_codec_enc_cfg_map_t peek_si; */
+ vp9e_encode, /* vpx_codec_encode_fn_t encode; */
+ vp9e_get_cxdata, /* vpx_codec_get_cx_data_fn_t frame_get; */
+ vp9e_set_config,
NOT_IMPLEMENTED,
- vp8e_get_preview,
+ vp9e_get_preview,
} /* encoder functions */
};
@@ -1130,9 +1130,9 @@ CODEC_INTERFACE(vpx_codec_vp9x_cx) = {
VPX_CODEC_INTERNAL_ABI_VERSION,
VPX_CODEC_CAP_ENCODER | VPX_CODEC_CAP_PSNR,
/* vpx_codec_caps_t caps; */
- vp8e_exp_init, /* vpx_codec_init_fn_t init; */
- vp8e_destroy, /* vpx_codec_destroy_fn_t destroy; */
- vp8e_ctf_maps, /* vpx_codec_ctrl_fn_map_t *ctrl_maps; */
+ vp9e_exp_init, /* vpx_codec_init_fn_t init; */
+ vp9e_destroy, /* vpx_codec_destroy_fn_t destroy; */
+ vp9e_ctf_maps, /* vpx_codec_ctrl_fn_map_t *ctrl_maps; */
NOT_IMPLEMENTED, /* vpx_codec_get_mmap_fn_t get_mmap; */
NOT_IMPLEMENTED, /* vpx_codec_set_mmap_fn_t set_mmap; */
{
@@ -1142,12 +1142,12 @@ CODEC_INTERFACE(vpx_codec_vp9x_cx) = {
NOT_IMPLEMENTED, /* vpx_codec_frame_get_fn_t frame_get; */
},
{
- vp8e_usage_cfg_map, /* vpx_codec_enc_cfg_map_t peek_si; */
- vp8e_encode, /* vpx_codec_encode_fn_t encode; */
- vp8e_get_cxdata, /* vpx_codec_get_cx_data_fn_t frame_get; */
- vp8e_set_config,
+ vp9e_usage_cfg_map, /* vpx_codec_enc_cfg_map_t peek_si; */
+ vp9e_encode, /* vpx_codec_encode_fn_t encode; */
+ vp9e_get_cxdata, /* vpx_codec_get_cx_data_fn_t frame_get; */
+ vp9e_set_config,
NOT_IMPLEMENTED,
- vp8e_get_preview,
+ vp9e_get_preview,
} /* encoder functions */
};
#endif
diff --git a/vp9/vp9_dx_iface.c b/vp9/vp9_dx_iface.c
index ee1130402..b5aa10d52 100644
--- a/vp9/vp9_dx_iface.c
+++ b/vp9/vp9_dx_iface.c
@@ -215,26 +215,19 @@ static vpx_codec_err_t vp8_peek_si(const uint8_t *data,
if (data + data_sz <= data)
res = VPX_CODEC_INVALID_PARAM;
else {
- /* Parse uncompresssed part of key frame header.
- * 3 bytes:- including version, frame type and an offset
- * 3 bytes:- sync code (0x49, 0x83, 0x42)
- * 4 bytes:- including image width and height in the lowest 14 bits
- * of each 2-byte value.
- */
si->is_kf = 0;
- if (data_sz >= 10 && !(data[0] & 0x01)) { /* I-Frame */
- const uint8_t *c = data + 3;
+ if (data_sz >= 8 && !(data[0] & 0x80)) { /* I-Frame */
+ const uint8_t *c = data + 1;
si->is_kf = 1;
- /* vet via sync code */
- if (c[0] != 0x49 || c[1] != 0x83 || c[2] != 0x42)
+ if (c[0] != SYNC_CODE_0 || c[1] != SYNC_CODE_1 || c[2] != SYNC_CODE_2)
res = VPX_CODEC_UNSUP_BITSTREAM;
- si->w = (c[3] | (c[4] << 8));
- si->h = (c[5] | (c[6] << 8));
+ si->w = (c[3] << 8) | c[4];
+ si->h = (c[5] << 8) | c[6];
- /*printf("w=%d, h=%d\n", si->w, si->h);*/
+ // printf("w=%d, h=%d\n", si->w, si->h);
if (!(si->h | si->w))
res = VPX_CODEC_UNSUP_BITSTREAM;
} else
@@ -242,7 +235,6 @@ static vpx_codec_err_t vp8_peek_si(const uint8_t *data,
}
return res;
-
}
static vpx_codec_err_t vp8_get_si(vpx_codec_alg_priv_t *ctx,
diff --git a/vp9/vp9cx.mk b/vp9/vp9cx.mk
index 42ab02d31..86fd08850 100644
--- a/vp9/vp9cx.mk
+++ b/vp9/vp9cx.mk
@@ -28,6 +28,7 @@ VP9_CX_SRCS-yes += encoder/vp9_encodemv.c
VP9_CX_SRCS-yes += encoder/vp9_firstpass.c
VP9_CX_SRCS-yes += encoder/vp9_block.h
VP9_CX_SRCS-yes += encoder/vp9_boolhuff.h
+VP9_CX_SRCS-yes += encoder/vp9_write_bit_buffer.h
VP9_CX_SRCS-yes += encoder/vp9_bitstream.h
VP9_CX_SRCS-yes += encoder/vp9_encodeintra.h
VP9_CX_SRCS-yes += encoder/vp9_encodemb.h
diff --git a/vp9/vp9dx.mk b/vp9/vp9dx.mk
index 3be0b6dde..7ae3219ca 100644
--- a/vp9/vp9dx.mk
+++ b/vp9/vp9dx.mk
@@ -24,6 +24,7 @@ VP9_DX_SRCS-yes += decoder/vp9_decodframe.c
VP9_DX_SRCS-yes += decoder/vp9_decodframe.h
VP9_DX_SRCS-yes += decoder/vp9_detokenize.c
VP9_DX_SRCS-yes += decoder/vp9_dboolhuff.h
+VP9_DX_SRCS-yes += decoder/vp9_read_bit_buffer.h
VP9_DX_SRCS-yes += decoder/vp9_decodemv.h
VP9_DX_SRCS-yes += decoder/vp9_detokenize.h
VP9_DX_SRCS-yes += decoder/vp9_onyxd.h