summaryrefslogtreecommitdiff
path: root/vp8
diff options
context:
space:
mode:
Diffstat (limited to 'vp8')
-rw-r--r--vp8/common/alloccommon.c4
-rw-r--r--vp8/common/arm/arm_systemdependent.c20
-rw-r--r--vp8/common/arm/loopfilter_arm.h52
-rw-r--r--vp8/common/blockd.h17
-rw-r--r--vp8/common/coefupdateprobs.h3
-rw-r--r--vp8/common/default_coef_probs.h2
-rw-r--r--vp8/common/entropy.c6
-rw-r--r--vp8/common/entropy.h6
-rw-r--r--vp8/common/findnearmv.c15
-rw-r--r--vp8/common/generic/systemdependent.c63
-rw-r--r--vp8/common/idct.h4
-rw-r--r--vp8/common/idctllm.c2
-rw-r--r--vp8/common/invtrans.c148
-rw-r--r--vp8/common/invtrans.h15
-rw-r--r--vp8/common/loopfilter.c252
-rw-r--r--vp8/common/loopfilter.h66
-rw-r--r--vp8/common/loopfilter_filters.c84
-rw-r--r--vp8/common/onyxc_int.h23
-rw-r--r--vp8/common/pred_common.c22
-rw-r--r--vp8/common/pred_common.h20
-rw-r--r--vp8/common/recon.c59
-rw-r--r--vp8/common/recon.h273
-rw-r--r--vp8/common/reconinter.c53
-rw-r--r--vp8/common/reconinter.h7
-rw-r--r--vp8/common/reconintra.c8
-rw-r--r--vp8/common/reconintra4x4.c9
-rw-r--r--vp8/common/rtcd_defs.sh157
-rw-r--r--vp8/common/seg_common.c6
-rw-r--r--vp8/common/seg_common.h6
-rw-r--r--vp8/common/x86/loopfilter_x86.c16
-rw-r--r--vp8/common/x86/loopfilter_x86.h54
-rw-r--r--vp8/common/x86/recon_wrapper_sse2.c3
-rw-r--r--vp8/common/x86/recon_x86.h84
-rw-r--r--vp8/common/x86/x86_systemdependent.c33
-rw-r--r--vp8/decoder/decodemv.c151
-rw-r--r--vp8/decoder/decodemv.h1
-rw-r--r--vp8/decoder/decodframe.c215
-rw-r--r--vp8/decoder/dequantize.c2
-rw-r--r--vp8/decoder/dequantize.h4
-rw-r--r--vp8/decoder/detokenize.c39
-rw-r--r--vp8/decoder/detokenize.h2
-rw-r--r--vp8/decoder/generic/dsystemdependent.c2
-rw-r--r--vp8/decoder/onyxd_if.c34
-rw-r--r--vp8/decoder/onyxd_int.h11
-rw-r--r--vp8/decoder/reconintra_mt.c1
-rw-r--r--vp8/encoder/bitstream.c524
-rw-r--r--vp8/encoder/block.h17
-rw-r--r--vp8/encoder/dct.c2
-rw-r--r--vp8/encoder/dct.h5
-rw-r--r--vp8/encoder/encodeframe.c334
-rw-r--r--vp8/encoder/encodeintra.c236
-rw-r--r--vp8/encoder/encodemb.c500
-rw-r--r--vp8/encoder/encodemb.h16
-rw-r--r--vp8/encoder/firstpass.c3
-rw-r--r--vp8/encoder/generic/csystemdependent.c14
-rw-r--r--vp8/encoder/mbgraph.c2
-rw-r--r--vp8/encoder/onyx_if.c25
-rw-r--r--vp8/encoder/onyx_int.h19
-rw-r--r--vp8/encoder/picklpf.c4
-rw-r--r--vp8/encoder/quantize.c126
-rw-r--r--vp8/encoder/quantize.h40
-rw-r--r--vp8/encoder/ratectrl.c27
-rw-r--r--vp8/encoder/rdopt.c838
-rw-r--r--vp8/encoder/temporal_filter.c6
-rw-r--r--vp8/encoder/tokenize.c1066
-rw-r--r--vp8/encoder/tokenize.h8
-rw-r--r--vp8/encoder/x86/x86_csystemdependent.c14
-rw-r--r--vp8/vp8_common.mk2
-rw-r--r--vp8/vp8cx.mk2
69 files changed, 2365 insertions, 3519 deletions
diff --git a/vp8/common/alloccommon.c b/vp8/common/alloccommon.c
index 2c5b64cee..4323fdf4c 100644
--- a/vp8/common/alloccommon.c
+++ b/vp8/common/alloccommon.c
@@ -21,7 +21,7 @@
extern void vp8_init_scan_order_mask();
-static void update_mode_info_border(VP8_COMMON *cpi, MODE_INFO *mi_base) {
+void update_mode_info_border(VP8_COMMON *cpi, MODE_INFO *mi_base) {
int stride = cpi->mode_info_stride;
int i;
@@ -33,7 +33,7 @@ static void update_mode_info_border(VP8_COMMON *cpi, MODE_INFO *mi_base) {
vpx_memset(&mi_base[i * stride], 0, sizeof(MODE_INFO));
}
}
-static void update_mode_info_in_image(VP8_COMMON *cpi, MODE_INFO *mi) {
+void update_mode_info_in_image(VP8_COMMON *cpi, MODE_INFO *mi) {
int i, j;
// For each in image mode_info element set the in image flag to 1
diff --git a/vp8/common/arm/arm_systemdependent.c b/vp8/common/arm/arm_systemdependent.c
index 5fd5f5b1b..65a518188 100644
--- a/vp8/common/arm/arm_systemdependent.c
+++ b/vp8/common/arm/arm_systemdependent.c
@@ -49,17 +49,6 @@ void vp8_arch_arm_common_init(VP8_COMMON *ctx) {
// rtcd->idct.iwalsh1 = vp8_short_inv_walsh4x4_1_v6;
// rtcd->idct.iwalsh16 = vp8_short_inv_walsh4x4_v6;
- rtcd->loopfilter.normal_mb_v = vp8_loop_filter_mbv_armv6;
- rtcd->loopfilter.normal_b_v = vp8_loop_filter_bv_armv6;
- rtcd->loopfilter.normal_mb_h = vp8_loop_filter_mbh_armv6;
- rtcd->loopfilter.normal_b_h = vp8_loop_filter_bh_armv6;
- rtcd->loopfilter.simple_mb_v =
- vp8_loop_filter_simple_vertical_edge_armv6;
- rtcd->loopfilter.simple_b_v = vp8_loop_filter_bvs_armv6;
- rtcd->loopfilter.simple_mb_h =
- vp8_loop_filter_simple_horizontal_edge_armv6;
- rtcd->loopfilter.simple_b_h = vp8_loop_filter_bhs_armv6;
-
rtcd->recon.copy16x16 = vp8_copy_mem16x16_v6;
rtcd->recon.copy8x8 = vp8_copy_mem8x8_v6;
rtcd->recon.copy8x4 = vp8_copy_mem8x4_v6;
@@ -86,15 +75,6 @@ void vp8_arch_arm_common_init(VP8_COMMON *ctx) {
// rtcd->idct.iwalsh1 = vp8_short_inv_walsh4x4_1_neon;
// rtcd->idct.iwalsh16 = vp8_short_inv_walsh4x4_neon;
- rtcd->loopfilter.normal_mb_v = vp8_loop_filter_mbv_neon;
- rtcd->loopfilter.normal_b_v = vp8_loop_filter_bv_neon;
- rtcd->loopfilter.normal_mb_h = vp8_loop_filter_mbh_neon;
- rtcd->loopfilter.normal_b_h = vp8_loop_filter_bh_neon;
- rtcd->loopfilter.simple_mb_v = vp8_loop_filter_mbvs_neon;
- rtcd->loopfilter.simple_b_v = vp8_loop_filter_bvs_neon;
- rtcd->loopfilter.simple_mb_h = vp8_loop_filter_mbhs_neon;
- rtcd->loopfilter.simple_b_h = vp8_loop_filter_bhs_neon;
-
rtcd->recon.copy16x16 = vp8_copy_mem16x16_neon;
rtcd->recon.copy8x8 = vp8_copy_mem8x8_neon;
rtcd->recon.copy8x4 = vp8_copy_mem8x4_neon;
diff --git a/vp8/common/arm/loopfilter_arm.h b/vp8/common/arm/loopfilter_arm.h
index 390a547b0..5df2a181b 100644
--- a/vp8/common/arm/loopfilter_arm.h
+++ b/vp8/common/arm/loopfilter_arm.h
@@ -24,32 +24,6 @@ extern prototype_simple_loopfilter(vp8_loop_filter_bhs_armv6);
extern prototype_simple_loopfilter(vp8_loop_filter_simple_horizontal_edge_armv6);
extern prototype_simple_loopfilter(vp8_loop_filter_simple_vertical_edge_armv6);
-#if !CONFIG_RUNTIME_CPU_DETECT
-#undef vp8_lf_normal_mb_v
-#define vp8_lf_normal_mb_v vp8_loop_filter_mbv_armv6
-
-#undef vp8_lf_normal_b_v
-#define vp8_lf_normal_b_v vp8_loop_filter_bv_armv6
-
-#undef vp8_lf_normal_mb_h
-#define vp8_lf_normal_mb_h vp8_loop_filter_mbh_armv6
-
-#undef vp8_lf_normal_b_h
-#define vp8_lf_normal_b_h vp8_loop_filter_bh_armv6
-
-#undef vp8_lf_simple_mb_v
-#define vp8_lf_simple_mb_v vp8_loop_filter_simple_vertical_edge_armv6
-
-#undef vp8_lf_simple_b_v
-#define vp8_lf_simple_b_v vp8_loop_filter_bvs_armv6
-
-#undef vp8_lf_simple_mb_h
-#define vp8_lf_simple_mb_h vp8_loop_filter_simple_horizontal_edge_armv6
-
-#undef vp8_lf_simple_b_h
-#define vp8_lf_simple_b_h vp8_loop_filter_bhs_armv6
-#endif /* !CONFIG_RUNTIME_CPU_DETECT */
-
#endif /* HAVE_ARMV6 */
#if HAVE_ARMV7
@@ -62,32 +36,6 @@ extern prototype_simple_loopfilter(vp8_loop_filter_bvs_neon);
extern prototype_simple_loopfilter(vp8_loop_filter_mbhs_neon);
extern prototype_simple_loopfilter(vp8_loop_filter_bhs_neon);
-#if !CONFIG_RUNTIME_CPU_DETECT
-#undef vp8_lf_normal_mb_v
-#define vp8_lf_normal_mb_v vp8_loop_filter_mbv_neon
-
-#undef vp8_lf_normal_b_v
-#define vp8_lf_normal_b_v vp8_loop_filter_bv_neon
-
-#undef vp8_lf_normal_mb_h
-#define vp8_lf_normal_mb_h vp8_loop_filter_mbh_neon
-
-#undef vp8_lf_normal_b_h
-#define vp8_lf_normal_b_h vp8_loop_filter_bh_neon
-
-#undef vp8_lf_simple_mb_v
-#define vp8_lf_simple_mb_v vp8_loop_filter_mbvs_neon
-
-#undef vp8_lf_simple_b_v
-#define vp8_lf_simple_b_v vp8_loop_filter_bvs_neon
-
-#undef vp8_lf_simple_mb_h
-#define vp8_lf_simple_mb_h vp8_loop_filter_mbhs_neon
-
-#undef vp8_lf_simple_b_h
-#define vp8_lf_simple_b_h vp8_loop_filter_bhs_neon
-#endif /* !CONFIG_RUNTIME_CPU_DETECT */
-
#endif /* HAVE_ARMV7 */
#endif /* LOOPFILTER_ARM_H */
diff --git a/vp8/common/blockd.h b/vp8/common/blockd.h
index 6f980ad65..092d9ff21 100644
--- a/vp8/common/blockd.h
+++ b/vp8/common/blockd.h
@@ -52,10 +52,12 @@ typedef struct {
int r, c;
} POS;
-#define PLANE_TYPE_Y_NO_DC 0
-#define PLANE_TYPE_Y2 1
-#define PLANE_TYPE_UV 2
-#define PLANE_TYPE_Y_WITH_DC 3
+typedef enum PlaneType {
+ PLANE_TYPE_Y_NO_DC = 0,
+ PLANE_TYPE_Y2,
+ PLANE_TYPE_UV,
+ PLANE_TYPE_Y_WITH_DC,
+} PLANE_TYPE;
typedef char ENTROPY_CONTEXT;
typedef struct {
@@ -129,9 +131,7 @@ typedef enum {
typedef enum {
TX_4X4, // 4x4 dct transform
TX_8X8, // 8x8 dct transform
-#if CONFIG_TX16X16 || CONFIG_HYBRIDTRANSFORM16X16
TX_16X16, // 16x16 dct transform
-#endif
TX_SIZE_MAX // Number of different transforms available
} TX_SIZE;
@@ -237,7 +237,6 @@ union b_mode_info {
struct {
B_PREDICTION_MODE first;
#if CONFIG_HYBRIDTRANSFORM8X8 || CONFIG_HYBRIDTRANSFORM || CONFIG_HYBRIDTRANSFORM16X16
- B_PREDICTION_MODE test;
TX_TYPE tx_type;
#endif
@@ -261,10 +260,6 @@ typedef enum {
typedef struct {
MB_PREDICTION_MODE mode, uv_mode;
-#if CONFIG_HYBRIDTRANSFORM || CONFIG_HYBRIDTRANSFORM8X8 || CONFIG_HYBRIDTRANSFORM16X16
- MB_PREDICTION_MODE mode_rdopt;
-#endif
-
#if CONFIG_COMP_INTRA_PRED
MB_PREDICTION_MODE second_mode, second_uv_mode;
#endif
diff --git a/vp8/common/coefupdateprobs.h b/vp8/common/coefupdateprobs.h
index 0610356cc..185bc6d84 100644
--- a/vp8/common/coefupdateprobs.h
+++ b/vp8/common/coefupdateprobs.h
@@ -13,7 +13,4 @@
Generated file included by entropy.c */
#define COEF_UPDATE_PROB 252
#define COEF_UPDATE_PROB_8X8 252
-#if CONFIG_TX16X16 || CONFIG_HYBRIDTRANSFORM16X16
#define COEF_UPDATE_PROB_16X16 252
-#endif
-
diff --git a/vp8/common/default_coef_probs.h b/vp8/common/default_coef_probs.h
index 848104b12..5e21195ee 100644
--- a/vp8/common/default_coef_probs.h
+++ b/vp8/common/default_coef_probs.h
@@ -966,7 +966,6 @@ default_hybrid_coef_probs_8x8[BLOCK_TYPES_8X8]
};
#endif
-#if CONFIG_TX16X16
static const vp8_prob
default_coef_probs_16x16[BLOCK_TYPES_16X16]
[COEF_BANDS]
@@ -1382,4 +1381,3 @@ static const vp8_prob
}
};
#endif
-#endif
diff --git a/vp8/common/entropy.c b/vp8/common/entropy.c
index 67242d5ea..90f7a52c2 100644
--- a/vp8/common/entropy.c
+++ b/vp8/common/entropy.c
@@ -97,7 +97,6 @@ DECLARE_ALIGNED(64, const int, vp8_default_zig_zag1d_8x8[64]) = {
58, 59, 52, 45, 38, 31, 39, 46, 53, 60, 61, 54, 47, 55, 62, 63,
};
-#if CONFIG_TX16X16 || CONFIG_HYBRIDTRANSFORM16X16
// Table can be optimized.
DECLARE_ALIGNED(16, const int, vp8_coef_bands_16x16[256]) = {
0, 1, 2, 3, 5, 4, 4, 5, 5, 3, 6, 3, 5, 4, 6, 6,
@@ -135,7 +134,6 @@ DECLARE_ALIGNED(16, const int, vp8_default_zig_zag1d_16x16[256]) = {
203, 218, 233, 248, 249, 234, 219, 204, 189, 174, 159, 175, 190, 205, 220, 235,
250, 251, 236, 221, 206, 191, 207, 222, 237, 252, 253, 238, 223, 239, 254, 255,
};
-#endif
/* Array indices are identical to previously-existing CONTEXT_NODE indices */
@@ -222,7 +220,6 @@ void vp8_default_coef_probs(VP8_COMMON *pc) {
sizeof(pc->fc.hybrid_coef_probs_8x8));
#endif
-#if CONFIG_TX16X16
vpx_memcpy(pc->fc.coef_probs_16x16, default_coef_probs_16x16,
sizeof(pc->fc.coef_probs_16x16));
#if CONFIG_HYBRIDTRANSFORM16X16
@@ -230,7 +227,6 @@ void vp8_default_coef_probs(VP8_COMMON *pc) {
default_hybrid_coef_probs_16x16,
sizeof(pc->fc.hybrid_coef_probs_16x16));
#endif
-#endif
}
void vp8_coef_tree_initialize() {
@@ -419,7 +415,6 @@ void vp8_adapt_coef_probs(VP8_COMMON *cm) {
}
#endif
-#if CONFIG_TX16X16
for (i = 0; i < BLOCK_TYPES_16X16; ++i)
for (j = 0; j < COEF_BANDS; ++j)
for (k = 0; k < PREV_COEF_CONTEXTS; ++k) {
@@ -464,5 +459,4 @@ void vp8_adapt_coef_probs(VP8_COMMON *cm) {
}
}
#endif
-#endif
}
diff --git a/vp8/common/entropy.h b/vp8/common/entropy.h
index b3d3eff9f..b9dfb344f 100644
--- a/vp8/common/entropy.h
+++ b/vp8/common/entropy.h
@@ -37,9 +37,9 @@ extern const int vp8_i8x8_block[4];
#define DCT_VAL_CATEGORY5 9 /* 35-66 Extra Bits 5+1 */
#define DCT_VAL_CATEGORY6 10 /* 67+ Extra Bits 13+1 */
#define DCT_EOB_TOKEN 11 /* EOB Extra Bits 0+0 */
-
#define MAX_ENTROPY_TOKENS 12
#define ENTROPY_NODES 11
+#define EOSB_TOKEN 127 /* Not signalled, encoder only */
extern const vp8_tree_index vp8_coef_tree[];
@@ -74,9 +74,7 @@ extern vp8_extra_bit_struct vp8_extra_bits[12]; /* indexed by token value */
#define COEF_BANDS 8
extern DECLARE_ALIGNED(16, const int, vp8_coef_bands[16]);
extern DECLARE_ALIGNED(64, const int, vp8_coef_bands_8x8[64]);
-#if CONFIG_TX16X16 || CONFIG_HYBRIDTRANSFORM16X16
extern DECLARE_ALIGNED(16, const int, vp8_coef_bands_16x16[256]);
-#endif
/* Inside dimension is 3-valued measure of nearby complexity, that is,
the extent to which nearby coefficients are nonzero. For the first
@@ -115,9 +113,7 @@ extern short vp8_default_zig_zag_mask[16];
extern DECLARE_ALIGNED(64, const int, vp8_default_zig_zag1d_8x8[64]);
void vp8_coef_tree_initialize(void);
-#if CONFIG_TX16X16 || CONFIG_HYBRIDTRANSFORM16X16
extern DECLARE_ALIGNED(16, const int, vp8_default_zig_zag1d_16x16[256]);
-#endif
void vp8_adapt_coef_probs(struct VP8Common *);
#endif
diff --git a/vp8/common/findnearmv.c b/vp8/common/findnearmv.c
index 235ca46ce..285aabdb6 100644
--- a/vp8/common/findnearmv.c
+++ b/vp8/common/findnearmv.c
@@ -217,7 +217,7 @@ void vp8_find_best_ref_mvs(MACROBLOCKD *xd,
unsigned char *above_ref;
unsigned char *left_ref;
int sad;
- int sad_scores[MAX_MV_REFS];
+ int sad_scores[MAX_MV_REFS] = {0};
int_mv sorted_mvs[MAX_MV_REFS];
int zero_seen = FALSE;
@@ -259,12 +259,13 @@ void vp8_find_best_ref_mvs(MACROBLOCKD *xd,
((this_mv.as_mv.col + 3) >> 3):((this_mv.as_mv.col + 4) >> 3);
offset = ref_y_stride * row_offset + col_offset;
- sad = vp8_sad16x3_c(above_src, xd->dst.y_stride,
- above_ref + offset, ref_y_stride, INT_MAX);
-
- sad += vp8_sad3x16_c(left_src, xd->dst.y_stride,
- left_ref + offset, ref_y_stride, INT_MAX);
-
+ sad = 0;
+ if (xd->up_available)
+ sad += vp8_sad16x3_c(above_src, xd->dst.y_stride,
+ above_ref + offset, ref_y_stride, INT_MAX);
+ if (xd->left_available)
+ sad += vp8_sad3x16_c(left_src, xd->dst.y_stride,
+ left_ref + offset, ref_y_stride, INT_MAX);
// Add the entry to our list and then resort the list on score.
sad_scores[i] = sad;
sorted_mvs[i].as_int = this_mv.as_int;
diff --git a/vp8/common/generic/systemdependent.c b/vp8/common/generic/systemdependent.c
index 32b5e5a6c..0acae30f5 100644
--- a/vp8/common/generic/systemdependent.c
+++ b/vp8/common/generic/systemdependent.c
@@ -14,7 +14,6 @@
#include "vp8/common/g_common.h"
#include "vp8/common/subpixel.h"
#include "vp8/common/loopfilter.h"
-#include "vp8/common/recon.h"
#include "vp8/common/idct.h"
#include "vp8/common/onyxc_int.h"
@@ -33,60 +32,7 @@ void vp8_machine_specific_config(VP8_COMMON *ctx) {
rtcd->idct.idct8 = vp8_short_idct8x8_c;
rtcd->idct.idct1_scalar_add_8x8 = vp8_dc_only_idct_add_8x8_c;
rtcd->idct.ihaar2 = vp8_short_ihaar2x2_c;
-#if CONFIG_TX16X16 || CONFIG_HYBRIDTRANSFORM16X16
rtcd->idct.idct16x16 = vp8_short_idct16x16_c;
-#endif
- rtcd->recon.copy16x16 = vp8_copy_mem16x16_c;
- rtcd->recon.copy8x8 = vp8_copy_mem8x8_c;
- rtcd->recon.avg16x16 = vp8_avg_mem16x16_c;
- rtcd->recon.avg8x8 = vp8_avg_mem8x8_c;
- rtcd->recon.copy8x4 = vp8_copy_mem8x4_c;
- rtcd->recon.recon = vp8_recon_b_c;
- rtcd->recon.recon_uv = vp8_recon_uv_b_c;
- rtcd->recon.recon2 = vp8_recon2b_c;
- rtcd->recon.recon4 = vp8_recon4b_c;
- rtcd->recon.recon_mb = vp8_recon_mb_c;
- rtcd->recon.recon_mby = vp8_recon_mby_c;
-#if CONFIG_SUPERBLOCKS
- rtcd->recon.build_intra_predictors_sby_s =
- vp8_build_intra_predictors_sby_s;
- rtcd->recon.build_intra_predictors_sbuv_s =
- vp8_build_intra_predictors_sbuv_s;
-#endif
- rtcd->recon.build_intra_predictors_mby =
- vp8_build_intra_predictors_mby;
-#if CONFIG_COMP_INTRA_PRED
- rtcd->recon.build_comp_intra_predictors_mby =
- vp8_build_comp_intra_predictors_mby;
-#endif
- rtcd->recon.build_intra_predictors_mby_s =
- vp8_build_intra_predictors_mby_s;
- rtcd->recon.build_intra_predictors_mbuv =
- vp8_build_intra_predictors_mbuv;
- rtcd->recon.build_intra_predictors_mbuv_s =
- vp8_build_intra_predictors_mbuv_s;
-#if CONFIG_COMP_INTRA_PRED
- rtcd->recon.build_comp_intra_predictors_mbuv =
- vp8_build_comp_intra_predictors_mbuv;
-#endif
- rtcd->recon.intra4x4_predict =
- vp8_intra4x4_predict;
-#if CONFIG_COMP_INTRA_PRED
- rtcd->recon.comp_intra4x4_predict =
- vp8_comp_intra4x4_predict;
-#endif
- rtcd->recon.intra8x8_predict =
- vp8_intra8x8_predict;
-#if CONFIG_COMP_INTRA_PRED
- rtcd->recon.comp_intra8x8_predict =
- vp8_comp_intra8x8_predict;
-#endif
- rtcd->recon.intra_uv4x4_predict =
- vp8_intra_uv4x4_predict;
-#if CONFIG_COMP_INTRA_PRED
- rtcd->recon.comp_intra_uv4x4_predict =
- vp8_comp_intra_uv4x4_predict;
-#endif
rtcd->subpix.eighttap16x16 = vp8_eighttap_predict16x16_c;
rtcd->subpix.eighttap8x8 = vp8_eighttap_predict8x8_c;
@@ -118,15 +64,6 @@ void vp8_machine_specific_config(VP8_COMMON *ctx) {
rtcd->subpix.bilinear4x4 = vp8_bilinear_predict4x4_c;
rtcd->subpix.bilinear_avg4x4 = vp8_bilinear_predict_avg4x4_c;
- rtcd->loopfilter.normal_mb_v = vp8_loop_filter_mbv_c;
- rtcd->loopfilter.normal_b_v = vp8_loop_filter_bv_c;
- rtcd->loopfilter.normal_mb_h = vp8_loop_filter_mbh_c;
- rtcd->loopfilter.normal_b_h = vp8_loop_filter_bh_c;
- rtcd->loopfilter.simple_mb_v = vp8_loop_filter_simple_vertical_edge_c;
- rtcd->loopfilter.simple_b_v = vp8_loop_filter_bvs_c;
- rtcd->loopfilter.simple_mb_h = vp8_loop_filter_simple_horizontal_edge_c;
- rtcd->loopfilter.simple_b_h = vp8_loop_filter_bhs_c;
-
#if CONFIG_POSTPROC || (CONFIG_VP8_ENCODER && CONFIG_INTERNAL_STATS)
rtcd->postproc.down = vp8_mbpost_proc_down_c;
rtcd->postproc.across = vp8_mbpost_proc_across_ip_c;
diff --git a/vp8/common/idct.h b/vp8/common/idct.h
index a4246c2a7..d096e8182 100644
--- a/vp8/common/idct.h
+++ b/vp8/common/idct.h
@@ -43,12 +43,10 @@
#define Y2_WHT_UPSCALE_FACTOR 2
#endif
-#if CONFIG_TX16X16 || CONFIG_HYBRIDTRANSFORM16X16
#ifndef vp8_idct_idct16x16
#define vp8_idct_idct16x16 vp8_short_idct16x16_c
#endif
extern prototype_idct(vp8_idct_idct16x16);
-#endif
#ifndef vp8_idct_idct8
#define vp8_idct_idct8 vp8_short_idct8x8_c
@@ -136,9 +134,7 @@ typedef struct {
vp8_idct_fn_t ihaar2;
vp8_idct_fn_t ihaar2_1;
-#if CONFIG_TX16X16 || CONFIG_HYBRIDTRANSFORM16X16
vp8_idct_fn_t idct16x16;
-#endif
} vp8_idct_rtcd_vtable_t;
#if CONFIG_RUNTIME_CPU_DETECT
diff --git a/vp8/common/idctllm.c b/vp8/common/idctllm.c
index 5eb9d0c8a..7b3ac36a5 100644
--- a/vp8/common/idctllm.c
+++ b/vp8/common/idctllm.c
@@ -779,7 +779,6 @@ void vp8_short_ihaar2x2_c(short *input, short *output, int pitch) {
}
-#if CONFIG_TX16X16 || CONFIG_HYBRIDTRANSFORM16X16
#if 0
// Keep a really bad float version as reference for now.
void vp8_short_idct16x16_c(short *input, short *output, int pitch) {
@@ -1070,4 +1069,3 @@ void vp8_short_idct16x16_c(short *input, short *output, int pitch) {
}
vp8_clear_system_state(); // Make it simd safe : __asm emms;
}
-#endif
diff --git a/vp8/common/invtrans.c b/vp8/common/invtrans.c
index 9fc94eab8..e0f4d6f19 100644
--- a/vp8/common/invtrans.c
+++ b/vp8/common/invtrans.c
@@ -8,11 +8,8 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-
#include "invtrans.h"
-
-
static void recon_dcblock(MACROBLOCKD *xd) {
BLOCKD *b = &xd->block[24];
int i;
@@ -20,103 +17,78 @@ static void recon_dcblock(MACROBLOCKD *xd) {
for (i = 0; i < 16; i++) {
xd->block[i].dqcoeff[0] = b->diff[i];
}
-
}
+
static void recon_dcblock_8x8(MACROBLOCKD *xd) {
BLOCKD *b = &xd->block[24]; // for coeff 0, 2, 8, 10
+
xd->block[0].dqcoeff[0] = b->diff[0];
xd->block[4].dqcoeff[0] = b->diff[1];
xd->block[8].dqcoeff[0] = b->diff[4];
xd->block[12].dqcoeff[0] = b->diff[8];
-
-}
-
-#if CONFIG_HYBRIDTRANSFORM
-void vp8_inverse_htransform_b(const vp8_idct_rtcd_vtable_t *rtcd, BLOCKD *b, int pitch) {
- vp8_ihtllm_c(b->dqcoeff, b->diff, pitch, b->bmi.as_mode.tx_type, 4);
}
-#endif
-void vp8_inverse_transform_b(const vp8_idct_rtcd_vtable_t *rtcd, BLOCKD *b, int pitch) {
+void vp8_inverse_transform_b_4x4(const vp8_idct_rtcd_vtable_t *rtcd,
+ BLOCKD *b, int pitch) {
if (b->eob <= 1)
IDCT_INVOKE(rtcd, idct1)(b->dqcoeff, b->diff, pitch);
else
IDCT_INVOKE(rtcd, idct16)(b->dqcoeff, b->diff, pitch);
}
-
-void vp8_inverse_transform_mby(const vp8_idct_rtcd_vtable_t *rtcd,
- MACROBLOCKD *xd) {
- int i;
- BLOCKD *blockd = xd->block;
-
- /* do 2nd order transform on the dc block */
- IDCT_INVOKE(rtcd, iwalsh16)(blockd[24].dqcoeff, blockd[24].diff);
-
- recon_dcblock(xd);
-
- for (i = 0; i < 16; i++) {
- vp8_inverse_transform_b(rtcd, &blockd[i], 32);
- }
-
-}
-void vp8_inverse_transform_mbuv(const vp8_idct_rtcd_vtable_t *rtcd,
- MACROBLOCKD *xd) {
- int i;
- BLOCKD *blockd = xd->block;
-
- for (i = 16; i < 24; i++) {
- vp8_inverse_transform_b(rtcd, &blockd[i], 16);
- }
-
-}
-
-
-void vp8_inverse_transform_mb(const vp8_idct_rtcd_vtable_t *rtcd,
- MACROBLOCKD *xd) {
+void vp8_inverse_transform_mby_4x4(const vp8_idct_rtcd_vtable_t *rtcd,
+ MACROBLOCKD *xd) {
int i;
BLOCKD *blockd = xd->block;
- if (xd->mode_info_context->mbmi.mode != B_PRED &&
- xd->mode_info_context->mbmi.mode != I8X8_PRED &&
- xd->mode_info_context->mbmi.mode != SPLITMV) {
+ if (xd->mode_info_context->mbmi.mode != SPLITMV) {
/* do 2nd order transform on the dc block */
-
- IDCT_INVOKE(rtcd, iwalsh16)(&blockd[24].dqcoeff[0], blockd[24].diff);
+ IDCT_INVOKE(rtcd, iwalsh16)(blockd[24].dqcoeff, blockd[24].diff);
recon_dcblock(xd);
}
for (i = 0; i < 16; i++) {
- vp8_inverse_transform_b(rtcd, &blockd[i], 32);
+ vp8_inverse_transform_b_4x4(rtcd, &blockd[i], 32);
}
+}
+void vp8_inverse_transform_mbuv_4x4(const vp8_idct_rtcd_vtable_t *rtcd,
+ MACROBLOCKD *xd) {
+ int i;
+ BLOCKD *blockd = xd->block;
for (i = 16; i < 24; i++) {
- vp8_inverse_transform_b(rtcd, &blockd[i], 16);
+ vp8_inverse_transform_b_4x4(rtcd, &blockd[i], 16);
}
-
}
+void vp8_inverse_transform_mb_4x4(const vp8_idct_rtcd_vtable_t *rtcd,
+ MACROBLOCKD *xd) {
+ vp8_inverse_transform_mby_4x4(rtcd, xd);
+ vp8_inverse_transform_mbuv_4x4(rtcd, xd);
+}
-void vp8_inverse_transform_b_8x8(const vp8_idct_rtcd_vtable_t *rtcd, short *input_dqcoeff, short *output_coeff, int pitch) { // pay attention to use when 8x8
+void vp8_inverse_transform_b_8x8(const vp8_idct_rtcd_vtable_t *rtcd,
+ short *input_dqcoeff, short *output_coeff,
+ int pitch) {
// int b,i;
// if (b->eob > 1)
IDCT_INVOKE(rtcd, idct8)(input_dqcoeff, output_coeff, pitch);
// else
// IDCT_INVOKE(rtcd, idct8_1)(b->dqcoeff, b->diff, pitch);//pitch
-
}
-
void vp8_inverse_transform_mby_8x8(const vp8_idct_rtcd_vtable_t *rtcd,
MACROBLOCKD *xd) {
int i;
BLOCKD *blockd = xd->block;
- // do 2nd order transform on the dc block
- IDCT_INVOKE(rtcd, ihaar2)(blockd[24].dqcoeff, blockd[24].diff, 8);
+ if (xd->mode_info_context->mbmi.mode != SPLITMV) {
+ // do 2nd order transform on the dc block
+ IDCT_INVOKE(rtcd, ihaar2)(blockd[24].dqcoeff, blockd[24].diff, 8);
+ recon_dcblock_8x8(xd); // need to change for 8x8
+ }
- recon_dcblock_8x8(xd); // need to change for 8x8
for (i = 0; i < 9; i += 8) {
vp8_inverse_transform_b_8x8(rtcd, &blockd[i].dqcoeff[0],
&blockd[i].diff[0], 32);
@@ -125,8 +97,8 @@ void vp8_inverse_transform_mby_8x8(const vp8_idct_rtcd_vtable_t *rtcd,
vp8_inverse_transform_b_8x8(rtcd, &blockd[i + 2].dqcoeff[0],
&blockd[i].diff[0], 32);
}
-
}
+
void vp8_inverse_transform_mbuv_8x8(const vp8_idct_rtcd_vtable_t *rtcd,
MACROBLOCKD *xd) {
int i;
@@ -136,42 +108,14 @@ void vp8_inverse_transform_mbuv_8x8(const vp8_idct_rtcd_vtable_t *rtcd,
vp8_inverse_transform_b_8x8(rtcd, &blockd[i].dqcoeff[0],
&blockd[i].diff[0], 16);
}
-
}
-
void vp8_inverse_transform_mb_8x8(const vp8_idct_rtcd_vtable_t *rtcd,
MACROBLOCKD *xd) {
- int i;
- BLOCKD *blockd = xd->block;
-
- if (xd->mode_info_context->mbmi.mode != B_PRED &&
- xd->mode_info_context->mbmi.mode != SPLITMV) {
- // do 2nd order transform on the dc block
-
- IDCT_INVOKE(rtcd, ihaar2)(&blockd[24].dqcoeff[0],
- blockd[24].diff, 8);// dqcoeff[0]
- recon_dcblock_8x8(xd); // need to change for 8x8
-
- }
-
- for (i = 0; i < 9; i += 8) {
- vp8_inverse_transform_b_8x8(rtcd, &blockd[i].dqcoeff[0],
- &blockd[i].diff[0], 32);
- }
- for (i = 2; i < 11; i += 8) {
- vp8_inverse_transform_b_8x8(rtcd, &blockd[i + 2].dqcoeff[0],
- &blockd[i].diff[0], 32);
- }
-
- for (i = 16; i < 24; i += 4) {
- vp8_inverse_transform_b_8x8(rtcd, &blockd[i].dqcoeff[0],
- &blockd[i].diff[0], 16);
- }
-
+ vp8_inverse_transform_mby_8x8(rtcd, xd);
+ vp8_inverse_transform_mbuv_8x8(rtcd, xd);
}
-#if CONFIG_TX16X16 || CONFIG_HYBRIDTRANSFORM16X16
void vp8_inverse_transform_b_16x16(const vp8_idct_rtcd_vtable_t *rtcd,
short *input_dqcoeff,
short *output_coeff, int pitch) {
@@ -180,34 +124,12 @@ void vp8_inverse_transform_b_16x16(const vp8_idct_rtcd_vtable_t *rtcd,
void vp8_inverse_transform_mby_16x16(const vp8_idct_rtcd_vtable_t *rtcd,
MACROBLOCKD *xd) {
- vp8_inverse_transform_b_16x16(rtcd, &xd->block[0].dqcoeff[0],
- &xd->block[0].diff[0], 32);
-}
-
-// U,V blocks are 8x8 per macroblock, so just run 8x8
-void vp8_inverse_transform_mbuv_16x16(const vp8_idct_rtcd_vtable_t *rtcd,
- MACROBLOCKD *xd) {
- int i;
- BLOCKD *blockd = xd->block;
-
- for (i = 16; i < 24; i += 4)
- vp8_inverse_transform_b_8x8(rtcd, &blockd[i].dqcoeff[0],
- &blockd[i].diff[0], 16);
+ vp8_inverse_transform_b_16x16(rtcd, &xd->block[0].dqcoeff[0],
+ &xd->block[0].diff[0], 32);
}
void vp8_inverse_transform_mb_16x16(const vp8_idct_rtcd_vtable_t *rtcd,
MACROBLOCKD *xd) {
- int i;
- BLOCKD *blockd = xd->block;
-
- // Luma
- vp8_inverse_transform_b_16x16(rtcd, &blockd[0].dqcoeff[0],
- &blockd[0].diff[0], 32);
-
- // U, V
- // Chroma blocks are downscaled, so run an 8x8 on them.
- for (i = 16; i < 24; i+= 4)
- vp8_inverse_transform_b_8x8(rtcd, &blockd[i].dqcoeff[0],
- &blockd[i].diff[0], 16);
+ vp8_inverse_transform_mby_16x16(rtcd, xd);
+ vp8_inverse_transform_mbuv_8x8(rtcd, xd);
}
-#endif
diff --git a/vp8/common/invtrans.h b/vp8/common/invtrans.h
index 2097c368c..47a9caaf2 100644
--- a/vp8/common/invtrans.h
+++ b/vp8/common/invtrans.h
@@ -16,26 +16,19 @@
#include "idct.h"
#include "blockd.h"
-#if CONFIG_HYBRIDTRANSFORM
-extern void vp8_inverse_htransform_b(const vp8_idct_rtcd_vtable_t *rtcd, BLOCKD *b, int pitch);
-#endif
-
-extern void vp8_inverse_transform_b(const vp8_idct_rtcd_vtable_t *rtcd, BLOCKD *b, int pitch);
-extern void vp8_inverse_transform_mb(const vp8_idct_rtcd_vtable_t *rtcd, MACROBLOCKD *xd);
-extern void vp8_inverse_transform_mby(const vp8_idct_rtcd_vtable_t *rtcd, MACROBLOCKD *xd);
-extern void vp8_inverse_transform_mbuv(const vp8_idct_rtcd_vtable_t *rtcd, MACROBLOCKD *xd);
+extern void vp8_inverse_transform_b_4x4(const vp8_idct_rtcd_vtable_t *rtcd, BLOCKD *b, int pitch);
+extern void vp8_inverse_transform_mb_4x4(const vp8_idct_rtcd_vtable_t *rtcd, MACROBLOCKD *xd);
+extern void vp8_inverse_transform_mby_4x4(const vp8_idct_rtcd_vtable_t *rtcd, MACROBLOCKD *xd);
+extern void vp8_inverse_transform_mbuv_4x4(const vp8_idct_rtcd_vtable_t *rtcd, MACROBLOCKD *xd);
extern void vp8_inverse_transform_b_8x8(const vp8_idct_rtcd_vtable_t *rtcd, short *input_dqcoeff, short *output_coeff, int pitch);
extern void vp8_inverse_transform_mb_8x8(const vp8_idct_rtcd_vtable_t *rtcd, MACROBLOCKD *xd);
extern void vp8_inverse_transform_mby_8x8(const vp8_idct_rtcd_vtable_t *rtcd, MACROBLOCKD *xd);
extern void vp8_inverse_transform_mbuv_8x8(const vp8_idct_rtcd_vtable_t *rtcd, MACROBLOCKD *xd);
-#if CONFIG_TX16X16 || CONFIG_HYBRIDTRANSFORM16X16
extern void vp8_inverse_transform_b_16x16(const vp8_idct_rtcd_vtable_t *rtcd,
short *input_dqcoeff, short *output_coeff,
int pitch);
extern void vp8_inverse_transform_mb_16x16(const vp8_idct_rtcd_vtable_t *rtcd, MACROBLOCKD *xd);
extern void vp8_inverse_transform_mby_16x16(const vp8_idct_rtcd_vtable_t *rtcd, MACROBLOCKD *xd);
-extern void vp8_inverse_transform_mbuv_16x16(const vp8_idct_rtcd_vtable_t *rtcd, MACROBLOCKD *xd);
-#endif
#endif
diff --git a/vp8/common/loopfilter.c b/vp8/common/loopfilter.c
index 727326cba..b34ee0272 100644
--- a/vp8/common/loopfilter.c
+++ b/vp8/common/loopfilter.c
@@ -16,102 +16,6 @@
#include "vp8/common/seg_common.h"
-typedef unsigned char uc;
-
-prototype_loopfilter(vp8_loop_filter_horizontal_edge_c);
-prototype_loopfilter(vp8_loop_filter_vertical_edge_c);
-
-
-prototype_loopfilter(vp8_mbloop_filter_horizontal_edge_c);
-prototype_loopfilter(vp8_mbloop_filter_vertical_edge_c);
-
-prototype_simple_loopfilter(vp8_loop_filter_simple_horizontal_edge_c);
-prototype_simple_loopfilter(vp8_loop_filter_simple_vertical_edge_c);
-
-/* Horizontal MB filtering */
-void vp8_loop_filter_mbh_c(unsigned char *y_ptr, unsigned char *u_ptr,
- unsigned char *v_ptr, int y_stride, int uv_stride,
- loop_filter_info *lfi) {
- vp8_mbloop_filter_horizontal_edge_c(y_ptr, y_stride, lfi->mblim, lfi->lim, lfi->hev_thr, 2);
-
- if (u_ptr)
- vp8_mbloop_filter_horizontal_edge_c(u_ptr, uv_stride, lfi->mblim, lfi->lim, lfi->hev_thr, 1);
-
- if (v_ptr)
- vp8_mbloop_filter_horizontal_edge_c(v_ptr, uv_stride, lfi->mblim, lfi->lim, lfi->hev_thr, 1);
-}
-
-/* Vertical MB Filtering */
-void vp8_loop_filter_mbv_c(unsigned char *y_ptr, unsigned char *u_ptr,
- unsigned char *v_ptr, int y_stride, int uv_stride,
- loop_filter_info *lfi) {
- vp8_mbloop_filter_vertical_edge_c(y_ptr, y_stride, lfi->mblim, lfi->lim, lfi->hev_thr, 2);
-
- if (u_ptr)
- vp8_mbloop_filter_vertical_edge_c(u_ptr, uv_stride, lfi->mblim, lfi->lim, lfi->hev_thr, 1);
-
- if (v_ptr)
- vp8_mbloop_filter_vertical_edge_c(v_ptr, uv_stride, lfi->mblim, lfi->lim, lfi->hev_thr, 1);
-}
-
-/* Horizontal B Filtering */
-void vp8_loop_filter_bh_c(unsigned char *y_ptr, unsigned char *u_ptr,
- unsigned char *v_ptr, int y_stride, int uv_stride,
- loop_filter_info *lfi) {
- vp8_loop_filter_horizontal_edge_c(y_ptr + 4 * y_stride, y_stride, lfi->blim, lfi->lim, lfi->hev_thr, 2);
- vp8_loop_filter_horizontal_edge_c(y_ptr + 8 * y_stride, y_stride, lfi->blim, lfi->lim, lfi->hev_thr, 2);
- vp8_loop_filter_horizontal_edge_c(y_ptr + 12 * y_stride, y_stride, lfi->blim, lfi->lim, lfi->hev_thr, 2);
-
- if (u_ptr)
- vp8_loop_filter_horizontal_edge_c(u_ptr + 4 * uv_stride, uv_stride, lfi->blim, lfi->lim, lfi->hev_thr, 1);
-
- if (v_ptr)
- vp8_loop_filter_horizontal_edge_c(v_ptr + 4 * uv_stride, uv_stride, lfi->blim, lfi->lim, lfi->hev_thr, 1);
-}
-
-void vp8_loop_filter_bh8x8_c(unsigned char *y_ptr, unsigned char *u_ptr,
- unsigned char *v_ptr, int y_stride, int uv_stride,
- loop_filter_info *lfi) {
- vp8_mbloop_filter_horizontal_edge_c(
- y_ptr + 8 * y_stride, y_stride, lfi->blim, lfi->lim, lfi->hev_thr, 2);
-}
-
-void vp8_loop_filter_bhs_c(unsigned char *y_ptr, int y_stride,
- const unsigned char *blimit) {
- vp8_loop_filter_simple_horizontal_edge_c(y_ptr + 4 * y_stride, y_stride, blimit);
- vp8_loop_filter_simple_horizontal_edge_c(y_ptr + 8 * y_stride, y_stride, blimit);
- vp8_loop_filter_simple_horizontal_edge_c(y_ptr + 12 * y_stride, y_stride, blimit);
-}
-
-/* Vertical B Filtering */
-void vp8_loop_filter_bv_c(unsigned char *y_ptr, unsigned char *u_ptr,
- unsigned char *v_ptr, int y_stride, int uv_stride,
- loop_filter_info *lfi) {
- vp8_loop_filter_vertical_edge_c(y_ptr + 4, y_stride, lfi->blim, lfi->lim, lfi->hev_thr, 2);
- vp8_loop_filter_vertical_edge_c(y_ptr + 8, y_stride, lfi->blim, lfi->lim, lfi->hev_thr, 2);
- vp8_loop_filter_vertical_edge_c(y_ptr + 12, y_stride, lfi->blim, lfi->lim, lfi->hev_thr, 2);
-
- if (u_ptr)
- vp8_loop_filter_vertical_edge_c(u_ptr + 4, uv_stride, lfi->blim, lfi->lim, lfi->hev_thr, 1);
-
- if (v_ptr)
- vp8_loop_filter_vertical_edge_c(v_ptr + 4, uv_stride, lfi->blim, lfi->lim, lfi->hev_thr, 1);
-}
-
-void vp8_loop_filter_bv8x8_c(unsigned char *y_ptr, unsigned char *u_ptr,
- unsigned char *v_ptr, int y_stride, int uv_stride,
- loop_filter_info *lfi) {
- vp8_mbloop_filter_vertical_edge_c(
- y_ptr + 8, y_stride, lfi->blim, lfi->lim, lfi->hev_thr, 2);
-}
-
-void vp8_loop_filter_bvs_c(unsigned char *y_ptr, int y_stride,
- const unsigned char *blimit) {
- vp8_loop_filter_simple_vertical_edge_c(y_ptr + 4, y_stride, blimit);
- vp8_loop_filter_simple_vertical_edge_c(y_ptr + 8, y_stride, blimit);
- vp8_loop_filter_simple_vertical_edge_c(y_ptr + 12, y_stride, blimit);
-}
-
static void lf_init_lut(loop_filter_info_n *lfi) {
int filt_lvl;
@@ -281,7 +185,7 @@ void vp8_loop_filter_frame
) {
YV12_BUFFER_CONFIG *post = cm->frame_to_show;
loop_filter_info_n *lfi_n = &cm->lf_info;
- loop_filter_info lfi;
+ struct loop_filter_info lfi;
FRAME_TYPE frame_type = cm->frame_type;
@@ -332,20 +236,16 @@ void vp8_loop_filter_frame
mode_info_context[-1].mbmi.mb_skip_coeff)
#endif
)
- vp8_loop_filter_mbv_c
- (y_ptr, u_ptr, v_ptr, post->y_stride, post->uv_stride, &lfi);
+ vp8_loop_filter_mbv(y_ptr, u_ptr, v_ptr, post->y_stride,
+ post->uv_stride, &lfi);
- if (!skip_lf
-#if CONFIG_TX16X16 || CONFIG_HYBRIDTRANSFORM16X16
- && tx_type != TX_16X16
-#endif
- ) {
+ if (!skip_lf && tx_type != TX_16X16) {
if (tx_type == TX_8X8)
- vp8_loop_filter_bv8x8_c
- (y_ptr, u_ptr, v_ptr, post->y_stride, post->uv_stride, &lfi);
+ vp8_loop_filter_bv8x8(y_ptr, u_ptr, v_ptr, post->y_stride,
+ post->uv_stride, &lfi);
else
- LF_INVOKE(&cm->rtcd.loopfilter, normal_b_v)
- (y_ptr, u_ptr, v_ptr, post->y_stride, post->uv_stride, &lfi);
+ vp8_loop_filter_bv(y_ptr, u_ptr, v_ptr, post->y_stride,
+ post->uv_stride, &lfi);
}
@@ -357,20 +257,16 @@ void vp8_loop_filter_frame
mode_info_context[-cm->mode_info_stride].mbmi.mb_skip_coeff)
#endif
)
- vp8_loop_filter_mbh_c
- (y_ptr, u_ptr, v_ptr, post->y_stride, post->uv_stride, &lfi);
+ vp8_loop_filter_mbh(y_ptr, u_ptr, v_ptr, post->y_stride,
+ post->uv_stride, &lfi);
- if (!skip_lf
-#if CONFIG_TX16X16 || CONFIG_HYBRIDTRANSFORM16X16
- && tx_type != TX_16X16
-#endif
- ) {
+ if (!skip_lf && tx_type != TX_16X16) {
if (tx_type == TX_8X8)
- vp8_loop_filter_bh8x8_c
- (y_ptr, u_ptr, v_ptr, post->y_stride, post->uv_stride, &lfi);
+ vp8_loop_filter_bh8x8(y_ptr, u_ptr, v_ptr, post->y_stride,
+ post->uv_stride, &lfi);
else
- LF_INVOKE(&cm->rtcd.loopfilter, normal_b_h)
- (y_ptr, u_ptr, v_ptr, post->y_stride, post->uv_stride, &lfi);
+ vp8_loop_filter_bh(y_ptr, u_ptr, v_ptr, post->y_stride,
+ post->uv_stride, &lfi);
}
} else {
// FIXME: Not 8x8 aware
@@ -381,12 +277,12 @@ void vp8_loop_filter_frame
mode_info_context[-1].mbmi.mb_skip_coeff)
#endif
)
- LF_INVOKE(&cm->rtcd.loopfilter, simple_mb_v)
- (y_ptr, post->y_stride, lfi_n->mblim[filter_level]);
+ vp8_loop_filter_simple_mbv(y_ptr, post->y_stride,
+ lfi_n->mblim[filter_level]);
if (!skip_lf)
- LF_INVOKE(&cm->rtcd.loopfilter, simple_b_v)
- (y_ptr, post->y_stride, lfi_n->blim[filter_level]);
+ vp8_loop_filter_simple_bv(y_ptr, post->y_stride,
+ lfi_n->blim[filter_level]);
/* don't apply across umv border */
if (mb_row > 0
@@ -396,12 +292,12 @@ void vp8_loop_filter_frame
mode_info_context[-cm->mode_info_stride].mbmi.mb_skip_coeff)
#endif
)
- LF_INVOKE(&cm->rtcd.loopfilter, simple_mb_h)
- (y_ptr, post->y_stride, lfi_n->mblim[filter_level]);
+ vp8_loop_filter_simple_mbh(y_ptr, post->y_stride,
+ lfi_n->mblim[filter_level]);
if (!skip_lf)
- LF_INVOKE(&cm->rtcd.loopfilter, simple_b_h)
- (y_ptr, post->y_stride, lfi_n->blim[filter_level]);
+ vp8_loop_filter_simple_bh(y_ptr, post->y_stride,
+ lfi_n->blim[filter_level]);
}
}
@@ -433,7 +329,7 @@ void vp8_loop_filter_frame_yonly
int mb_col;
loop_filter_info_n *lfi_n = &cm->lf_info;
- loop_filter_info lfi;
+ struct loop_filter_info lfi;
int filter_level;
FRAME_TYPE frame_type = cm->frame_type;
@@ -475,57 +371,43 @@ void vp8_loop_filter_frame_yonly
lfi.hev_thr = lfi_n->hev_thr[hev_index];
if (mb_col > 0)
- vp8_loop_filter_mbv_c
- (y_ptr, 0, 0, post->y_stride, 0, &lfi);
+ vp8_loop_filter_mbv(y_ptr, 0, 0, post->y_stride, 0, &lfi);
- if (!skip_lf
-#if CONFIG_TX16X16 || CONFIG_HYBRIDTRANSFORM16X16
- && tx_type != TX_16X16
-#endif
- ) {
+ if (!skip_lf && tx_type != TX_16X16) {
if (tx_type == TX_8X8)
- vp8_loop_filter_bv8x8_c
- (y_ptr, 0, 0, post->y_stride, 0, &lfi);
+ vp8_loop_filter_bv8x8(y_ptr, 0, 0, post->y_stride, 0, &lfi);
else
- LF_INVOKE(&cm->rtcd.loopfilter, normal_b_v)
- (y_ptr, 0, 0, post->y_stride, 0, &lfi);
+ vp8_loop_filter_bv(y_ptr, 0, 0, post->y_stride, 0, &lfi);
}
/* don't apply across umv border */
if (mb_row > 0)
- vp8_loop_filter_mbh_c
- (y_ptr, 0, 0, post->y_stride, 0, &lfi);
+ vp8_loop_filter_mbh(y_ptr, 0, 0, post->y_stride, 0, &lfi);
- if (!skip_lf
-#if CONFIG_TX16X16 || CONFIG_HYBRIDTRANSFORM16X16
- && tx_type != TX_16X16
-#endif
- ) {
+ if (!skip_lf && tx_type != TX_16X16) {
if (tx_type == TX_8X8)
- vp8_loop_filter_bh8x8_c
- (y_ptr, 0, 0, post->y_stride, 0, &lfi);
+ vp8_loop_filter_bh8x8(y_ptr, 0, 0, post->y_stride, 0, &lfi);
else
- LF_INVOKE(&cm->rtcd.loopfilter, normal_b_h)
- (y_ptr, 0, 0, post->y_stride, 0, &lfi);
+ vp8_loop_filter_bh(y_ptr, 0, 0, post->y_stride, 0, &lfi);
}
} else {
// FIXME: Not 8x8 aware
if (mb_col > 0)
- LF_INVOKE(&cm->rtcd.loopfilter, simple_mb_v)
- (y_ptr, post->y_stride, lfi_n->mblim[filter_level]);
+ vp8_loop_filter_simple_mbv(y_ptr, post->y_stride,
+ lfi_n->mblim[filter_level]);
if (!skip_lf)
- LF_INVOKE(&cm->rtcd.loopfilter, simple_b_v)
- (y_ptr, post->y_stride, lfi_n->blim[filter_level]);
+ vp8_loop_filter_simple_bv(y_ptr, post->y_stride,
+ lfi_n->blim[filter_level]);
/* don't apply across umv border */
if (mb_row > 0)
- LF_INVOKE(&cm->rtcd.loopfilter, simple_mb_h)
- (y_ptr, post->y_stride, lfi_n->mblim[filter_level]);
+ vp8_loop_filter_simple_mbh(y_ptr, post->y_stride,
+ lfi_n->mblim[filter_level]);
if (!skip_lf)
- LF_INVOKE(&cm->rtcd.loopfilter, simple_b_h)
- (y_ptr, post->y_stride, lfi_n->blim[filter_level]);
+ vp8_loop_filter_simple_bh(y_ptr, post->y_stride,
+ lfi_n->blim[filter_level]);
}
}
@@ -552,7 +434,7 @@ void vp8_loop_filter_frame_segment(VP8_COMMON *cm, MACROBLOCKD *xd,
int mb_col;
loop_filter_info_n *lfi_n = &cm->lf_info;
- loop_filter_info lfi;
+ struct loop_filter_info lfi;
int filter_level;
FRAME_TYPE frame_type = cm->frame_type;
@@ -605,41 +487,35 @@ void vp8_loop_filter_frame_segment(VP8_COMMON *cm, MACROBLOCKD *xd,
lfi.hev_thr = lfi_n->hev_thr[hev_index];
if (mb_col > 0)
- vp8_loop_filter_mbv_c(y_ptr, 0, 0, post->y_stride, 0,
+ vp8_loop_filter_mbv(y_ptr, 0, 0, post->y_stride, 0,
&lfi);
if (!skip_lf)
- LF_INVOKE(&cm->rtcd.loopfilter, normal_b_v)(
- y_ptr, 0, 0, post->y_stride, 0, &lfi);
+ vp8_loop_filter_bv(y_ptr, 0, 0, post->y_stride, 0, &lfi);
/* don't apply across umv border */
if (mb_row > 0)
- vp8_loop_filter_mbh_c(y_ptr, 0, 0, post->y_stride, 0,
+ vp8_loop_filter_mbh(y_ptr, 0, 0, post->y_stride, 0,
&lfi);
if (!skip_lf)
- LF_INVOKE(&cm->rtcd.loopfilter, normal_b_h)(
- y_ptr, 0, 0, post->y_stride, 0, &lfi);
+ vp8_loop_filter_bh(y_ptr, 0, 0, post->y_stride, 0, &lfi);
} else {
if (mb_col > 0)
- LF_INVOKE(&cm->rtcd.loopfilter, simple_mb_v)(
- y_ptr, post->y_stride,
+ vp8_loop_filter_simple_mbv(y_ptr, post->y_stride,
lfi_n->mblim[filter_level]);
if (!skip_lf)
- LF_INVOKE(&cm->rtcd.loopfilter, simple_b_v)(
- y_ptr, post->y_stride,
+ vp8_loop_filter_simple_bv(y_ptr, post->y_stride,
lfi_n->blim[filter_level]);
/* don't apply across umv border */
if (mb_row > 0)
- LF_INVOKE(&cm->rtcd.loopfilter, simple_mb_h)(
- y_ptr, post->y_stride,
+ vp8_loop_filter_simple_mbh(y_ptr, post->y_stride,
lfi_n->mblim[filter_level]);
if (!skip_lf)
- LF_INVOKE(&cm->rtcd.loopfilter, simple_b_h)(
- y_ptr, post->y_stride,
+ vp8_loop_filter_simple_bh(y_ptr, post->y_stride,
lfi_n->blim[filter_level]);
}
}
@@ -672,7 +548,7 @@ void vp8_loop_filter_partial_frame
int linestocopy, i;
loop_filter_info_n *lfi_n = &cm->lf_info;
- loop_filter_info lfi;
+ struct loop_filter_info lfi;
int filter_level;
int alt_flt_enabled = xd->segmentation_enabled;
@@ -737,34 +613,30 @@ void vp8_loop_filter_partial_frame
lfi.hev_thr = lfi_n->hev_thr[hev_index];
if (mb_col > 0)
- LF_INVOKE(&cm->rtcd.loopfilter, normal_mb_v)
- (y_ptr, 0, 0, post->y_stride, 0, &lfi);
+ vp8_loop_filter_mbv(y_ptr, 0, 0, post->y_stride, 0, &lfi);
if (!skip_lf)
- LF_INVOKE(&cm->rtcd.loopfilter, normal_b_v)
- (y_ptr, 0, 0, post->y_stride, 0, &lfi);
+ vp8_loop_filter_bv(y_ptr, 0, 0, post->y_stride, 0, &lfi);
- LF_INVOKE(&cm->rtcd.loopfilter, normal_mb_h)
- (y_ptr, 0, 0, post->y_stride, 0, &lfi);
+ vp8_loop_filter_mbh(y_ptr, 0, 0, post->y_stride, 0, &lfi);
if (!skip_lf)
- LF_INVOKE(&cm->rtcd.loopfilter, normal_b_h)
- (y_ptr, 0, 0, post->y_stride, 0, &lfi);
+ vp8_loop_filter_bh(y_ptr, 0, 0, post->y_stride, 0, &lfi);
} else {
if (mb_col > 0)
- LF_INVOKE(&cm->rtcd.loopfilter, simple_mb_v)
- (y_ptr, post->y_stride, lfi_n->mblim[filter_level]);
+ vp8_loop_filter_simple_mbv (y_ptr, post->y_stride,
+ lfi_n->mblim[filter_level]);
if (!skip_lf)
- LF_INVOKE(&cm->rtcd.loopfilter, simple_b_v)
- (y_ptr, post->y_stride, lfi_n->blim[filter_level]);
+ vp8_loop_filter_simple_bv(y_ptr, post->y_stride,
+ lfi_n->blim[filter_level]);
- LF_INVOKE(&cm->rtcd.loopfilter, simple_mb_h)
- (y_ptr, post->y_stride, lfi_n->mblim[filter_level]);
+ vp8_loop_filter_simple_mbh(y_ptr, post->y_stride,
+ lfi_n->mblim[filter_level]);
if (!skip_lf)
- LF_INVOKE(&cm->rtcd.loopfilter, simple_b_h)
- (y_ptr, post->y_stride, lfi_n->blim[filter_level]);
+ vp8_loop_filter_simple_bh(y_ptr, post->y_stride,
+ lfi_n->blim[filter_level]);
}
}
diff --git a/vp8/common/loopfilter.h b/vp8/common/loopfilter.h
index b2ba95d5f..df78b8c12 100644
--- a/vp8/common/loopfilter.h
+++ b/vp8/common/loopfilter.h
@@ -42,12 +42,12 @@ typedef struct {
unsigned char mode_lf_lut[MB_MODE_COUNT];
} loop_filter_info_n;
-typedef struct {
+struct loop_filter_info {
const unsigned char *mblim;
const unsigned char *blim;
const unsigned char *lim;
const unsigned char *hev_thr;
-} loop_filter_info;
+};
#define prototype_loopfilter(sym) \
@@ -56,7 +56,7 @@ typedef struct {
#define prototype_loopfilter_block(sym) \
void sym(unsigned char *y, unsigned char *u, unsigned char *v, \
- int ystride, int uv_stride, loop_filter_info *lfi)
+ int ystride, int uv_stride, struct loop_filter_info *lfi)
#define prototype_simple_loopfilter(sym) \
void sym(unsigned char *y, int ystride, const unsigned char *blimit)
@@ -69,66 +69,6 @@ typedef struct {
#include "arm/loopfilter_arm.h"
#endif
-#ifndef vp8_lf_normal_mb_v
-#define vp8_lf_normal_mb_v vp8_loop_filter_mbv_c
-#endif
-extern prototype_loopfilter_block(vp8_lf_normal_mb_v);
-
-#ifndef vp8_lf_normal_b_v
-#define vp8_lf_normal_b_v vp8_loop_filter_bv_c
-#endif
-extern prototype_loopfilter_block(vp8_lf_normal_b_v);
-
-#ifndef vp8_lf_normal_mb_h
-#define vp8_lf_normal_mb_h vp8_loop_filter_mbh_c
-#endif
-extern prototype_loopfilter_block(vp8_lf_normal_mb_h);
-
-#ifndef vp8_lf_normal_b_h
-#define vp8_lf_normal_b_h vp8_loop_filter_bh_c
-#endif
-extern prototype_loopfilter_block(vp8_lf_normal_b_h);
-
-#ifndef vp8_lf_simple_mb_v
-#define vp8_lf_simple_mb_v vp8_loop_filter_simple_vertical_edge_c
-#endif
-extern prototype_simple_loopfilter(vp8_lf_simple_mb_v);
-
-#ifndef vp8_lf_simple_b_v
-#define vp8_lf_simple_b_v vp8_loop_filter_bvs_c
-#endif
-extern prototype_simple_loopfilter(vp8_lf_simple_b_v);
-
-#ifndef vp8_lf_simple_mb_h
-#define vp8_lf_simple_mb_h vp8_loop_filter_simple_horizontal_edge_c
-#endif
-extern prototype_simple_loopfilter(vp8_lf_simple_mb_h);
-
-#ifndef vp8_lf_simple_b_h
-#define vp8_lf_simple_b_h vp8_loop_filter_bhs_c
-#endif
-extern prototype_simple_loopfilter(vp8_lf_simple_b_h);
-
-typedef prototype_loopfilter_block((*vp8_lf_block_fn_t));
-typedef prototype_simple_loopfilter((*vp8_slf_block_fn_t));
-
-typedef struct {
- vp8_lf_block_fn_t normal_mb_v;
- vp8_lf_block_fn_t normal_b_v;
- vp8_lf_block_fn_t normal_mb_h;
- vp8_lf_block_fn_t normal_b_h;
- vp8_slf_block_fn_t simple_mb_v;
- vp8_slf_block_fn_t simple_b_v;
- vp8_slf_block_fn_t simple_mb_h;
- vp8_slf_block_fn_t simple_b_h;
-} vp8_loopfilter_rtcd_vtable_t;
-
-#if CONFIG_RUNTIME_CPU_DETECT
-#define LF_INVOKE(ctx,fn) (ctx)->fn
-#else
-#define LF_INVOKE(ctx,fn) vp8_lf_##fn
-#endif
-
typedef void loop_filter_uvfunction
(
unsigned char *u, /* source pointer */
diff --git a/vp8/common/loopfilter_filters.c b/vp8/common/loopfilter_filters.c
index ef69ffecc..3f97d2101 100644
--- a/vp8/common/loopfilter_filters.c
+++ b/vp8/common/loopfilter_filters.c
@@ -379,3 +379,87 @@ void vp8_loop_filter_simple_vertical_edge_c
} while (++i < 16);
}
+
+/* Vertical MB Filtering */
+void vp8_loop_filter_mbv_c(unsigned char *y_ptr, unsigned char *u_ptr,
+ unsigned char *v_ptr, int y_stride, int uv_stride,
+ struct loop_filter_info *lfi) {
+ vp8_mbloop_filter_vertical_edge_c(y_ptr, y_stride, lfi->mblim, lfi->lim, lfi->hev_thr, 2);
+
+ if (u_ptr)
+ vp8_mbloop_filter_vertical_edge_c(u_ptr, uv_stride, lfi->mblim, lfi->lim, lfi->hev_thr, 1);
+
+ if (v_ptr)
+ vp8_mbloop_filter_vertical_edge_c(v_ptr, uv_stride, lfi->mblim, lfi->lim, lfi->hev_thr, 1);
+}
+
+/* Vertical B Filtering */
+void vp8_loop_filter_bv_c(unsigned char *y_ptr, unsigned char *u_ptr,
+ unsigned char *v_ptr, int y_stride, int uv_stride,
+ struct loop_filter_info *lfi) {
+ vp8_loop_filter_vertical_edge_c(y_ptr + 4, y_stride, lfi->blim, lfi->lim, lfi->hev_thr, 2);
+ vp8_loop_filter_vertical_edge_c(y_ptr + 8, y_stride, lfi->blim, lfi->lim, lfi->hev_thr, 2);
+ vp8_loop_filter_vertical_edge_c(y_ptr + 12, y_stride, lfi->blim, lfi->lim, lfi->hev_thr, 2);
+
+ if (u_ptr)
+ vp8_loop_filter_vertical_edge_c(u_ptr + 4, uv_stride, lfi->blim, lfi->lim, lfi->hev_thr, 1);
+
+ if (v_ptr)
+ vp8_loop_filter_vertical_edge_c(v_ptr + 4, uv_stride, lfi->blim, lfi->lim, lfi->hev_thr, 1);
+}
+
+/* Horizontal MB filtering */
+void vp8_loop_filter_mbh_c(unsigned char *y_ptr, unsigned char *u_ptr,
+ unsigned char *v_ptr, int y_stride, int uv_stride,
+ struct loop_filter_info *lfi) {
+ vp8_mbloop_filter_horizontal_edge_c(y_ptr, y_stride, lfi->mblim, lfi->lim, lfi->hev_thr, 2);
+
+ if (u_ptr)
+ vp8_mbloop_filter_horizontal_edge_c(u_ptr, uv_stride, lfi->mblim, lfi->lim, lfi->hev_thr, 1);
+
+ if (v_ptr)
+ vp8_mbloop_filter_horizontal_edge_c(v_ptr, uv_stride, lfi->mblim, lfi->lim, lfi->hev_thr, 1);
+}
+
+/* Horizontal B Filtering */
+void vp8_loop_filter_bh_c(unsigned char *y_ptr, unsigned char *u_ptr,
+ unsigned char *v_ptr, int y_stride, int uv_stride,
+ struct loop_filter_info *lfi) {
+ vp8_loop_filter_horizontal_edge_c(y_ptr + 4 * y_stride, y_stride, lfi->blim, lfi->lim, lfi->hev_thr, 2);
+ vp8_loop_filter_horizontal_edge_c(y_ptr + 8 * y_stride, y_stride, lfi->blim, lfi->lim, lfi->hev_thr, 2);
+ vp8_loop_filter_horizontal_edge_c(y_ptr + 12 * y_stride, y_stride, lfi->blim, lfi->lim, lfi->hev_thr, 2);
+
+ if (u_ptr)
+ vp8_loop_filter_horizontal_edge_c(u_ptr + 4 * uv_stride, uv_stride, lfi->blim, lfi->lim, lfi->hev_thr, 1);
+
+ if (v_ptr)
+ vp8_loop_filter_horizontal_edge_c(v_ptr + 4 * uv_stride, uv_stride, lfi->blim, lfi->lim, lfi->hev_thr, 1);
+}
+
+void vp8_loop_filter_bh8x8_c(unsigned char *y_ptr, unsigned char *u_ptr,
+ unsigned char *v_ptr, int y_stride, int uv_stride,
+ struct loop_filter_info *lfi) {
+ vp8_mbloop_filter_horizontal_edge_c(
+ y_ptr + 8 * y_stride, y_stride, lfi->blim, lfi->lim, lfi->hev_thr, 2);
+}
+
+void vp8_loop_filter_bhs_c(unsigned char *y_ptr, int y_stride,
+ const unsigned char *blimit) {
+ vp8_loop_filter_simple_horizontal_edge_c(y_ptr + 4 * y_stride, y_stride, blimit);
+ vp8_loop_filter_simple_horizontal_edge_c(y_ptr + 8 * y_stride, y_stride, blimit);
+ vp8_loop_filter_simple_horizontal_edge_c(y_ptr + 12 * y_stride, y_stride, blimit);
+}
+
+void vp8_loop_filter_bv8x8_c(unsigned char *y_ptr, unsigned char *u_ptr,
+ unsigned char *v_ptr, int y_stride, int uv_stride,
+ struct loop_filter_info *lfi) {
+ vp8_mbloop_filter_vertical_edge_c(
+ y_ptr + 8, y_stride, lfi->blim, lfi->lim, lfi->hev_thr, 2);
+}
+
+void vp8_loop_filter_bvs_c(unsigned char *y_ptr, int y_stride,
+ const unsigned char *blimit) {
+ vp8_loop_filter_simple_vertical_edge_c(y_ptr + 4, y_stride, blimit);
+ vp8_loop_filter_simple_vertical_edge_c(y_ptr + 8, y_stride, blimit);
+ vp8_loop_filter_simple_vertical_edge_c(y_ptr + 12, y_stride, blimit);
+}
diff --git a/vp8/common/onyxc_int.h b/vp8/common/onyxc_int.h
index 7c6093b41..0396a7087 100644
--- a/vp8/common/onyxc_int.h
+++ b/vp8/common/onyxc_int.h
@@ -14,12 +14,12 @@
#include "vpx_config.h"
#include "vpx/internal/vpx_codec_internal.h"
+#include "vpx_rtcd.h"
#include "loopfilter.h"
#include "entropymv.h"
#include "entropy.h"
#include "entropymode.h"
#include "idct.h"
-#include "recon.h"
#if CONFIG_POSTPROC
#include "postproc.h"
#endif
@@ -58,12 +58,10 @@ typedef struct frame_contexts {
#if CONFIG_HYBRIDTRANSFORM8X8
vp8_prob hybrid_coef_probs_8x8 [BLOCK_TYPES_8X8] [COEF_BANDS] [PREV_COEF_CONTEXTS] [ENTROPY_NODES];
#endif
-#if CONFIG_TX16X16
vp8_prob coef_probs_16x16 [BLOCK_TYPES_16X16] [COEF_BANDS] [PREV_COEF_CONTEXTS] [ENTROPY_NODES];
#if CONFIG_HYBRIDTRANSFORM16X16
vp8_prob hybrid_coef_probs_16x16 [BLOCK_TYPES_16X16] [COEF_BANDS] [PREV_COEF_CONTEXTS] [ENTROPY_NODES];
#endif
-#endif
#if CONFIG_NEWMVENTROPY
nmv_context nmvc;
@@ -101,14 +99,12 @@ typedef struct frame_contexts {
[PREV_COEF_CONTEXTS] [ENTROPY_NODES];
#endif
-#if CONFIG_TX16X16
vp8_prob pre_coef_probs_16x16 [BLOCK_TYPES_16X16] [COEF_BANDS]
[PREV_COEF_CONTEXTS] [ENTROPY_NODES];
#if CONFIG_HYBRIDTRANSFORM16X16
vp8_prob pre_hybrid_coef_probs_16x16 [BLOCK_TYPES_16X16] [COEF_BANDS]
[PREV_COEF_CONTEXTS] [ENTROPY_NODES];
#endif
-#endif
unsigned int coef_counts [BLOCK_TYPES] [COEF_BANDS]
[PREV_COEF_CONTEXTS] [MAX_ENTROPY_TOKENS];
@@ -124,14 +120,12 @@ typedef struct frame_contexts {
[PREV_COEF_CONTEXTS] [MAX_ENTROPY_TOKENS];
#endif
-#if CONFIG_TX16X16
unsigned int coef_counts_16x16 [BLOCK_TYPES_16X16] [COEF_BANDS]
[PREV_COEF_CONTEXTS] [MAX_ENTROPY_TOKENS];
#if CONFIG_HYBRIDTRANSFORM16X16
unsigned int hybrid_coef_counts_16x16 [BLOCK_TYPES_16X16] [COEF_BANDS]
[PREV_COEF_CONTEXTS] [MAX_ENTROPY_TOKENS];
#endif
-#endif
#if CONFIG_NEWMVENTROPY
nmv_context_counts NMVcount;
@@ -163,18 +157,20 @@ typedef enum {
NB_PREDICTION_TYPES = 3,
} COMPPREDMODE_TYPE;
-/* TODO: allows larger transform */
typedef enum {
ONLY_4X4 = 0,
- ALLOW_8X8 = 1
+ ALLOW_8X8 = 1,
+ ALLOW_16X16 = 2,
+#if CONFIG_TX_SELECT
+ TX_MODE_SELECT = 3,
+#endif
+ NB_TXFM_MODES = 3 + CONFIG_TX_SELECT,
} TXFM_MODE;
typedef struct VP8_COMMON_RTCD {
#if CONFIG_RUNTIME_CPU_DETECT
vp8_idct_rtcd_vtable_t idct;
- vp8_recon_rtcd_vtable_t recon;
vp8_subpix_rtcd_vtable_t subpix;
- vp8_loopfilter_rtcd_vtable_t loopfilter;
#if CONFIG_POSTPROC
vp8_postproc_rtcd_vtable_t postproc;
#endif
@@ -306,6 +302,11 @@ typedef struct VP8Common {
vp8_prob prob_comppred[COMP_PRED_CONTEXTS];
+#if CONFIG_TX_SELECT
+ // FIXME contextualize
+ vp8_prob prob_tx[TX_SIZE_MAX - 1];
+#endif
+
vp8_prob mbskip_pred_probs[MBSKIP_CONTEXTS];
FRAME_CONTEXT lfc_a; /* last alt ref entropy */
diff --git a/vp8/common/pred_common.c b/vp8/common/pred_common.c
index cb80a0f7e..a32389433 100644
--- a/vp8/common/pred_common.c
+++ b/vp8/common/pred_common.c
@@ -15,8 +15,8 @@
// TBD prediction functions for various bitstream signals
// Returns a context number for the given MB prediction signal
-unsigned char get_pred_context(VP8_COMMON *const cm,
- MACROBLOCKD *const xd,
+unsigned char get_pred_context(const VP8_COMMON *const cm,
+ const MACROBLOCKD *const xd,
PRED_ID pred_id) {
int pred_context;
MODE_INFO *m = xd->mode_info_context;
@@ -106,8 +106,8 @@ unsigned char get_pred_context(VP8_COMMON *const cm,
// This function returns a context probability for coding a given
// prediction signal
-vp8_prob get_pred_prob(VP8_COMMON *const cm,
- MACROBLOCKD *const xd,
+vp8_prob get_pred_prob(const VP8_COMMON *const cm,
+ const MACROBLOCKD *const xd,
PRED_ID pred_id) {
vp8_prob pred_probability;
int pred_context;
@@ -146,10 +146,10 @@ vp8_prob get_pred_prob(VP8_COMMON *const cm,
// This function returns a context probability ptr for coding a given
// prediction signal
-vp8_prob *get_pred_probs(VP8_COMMON *const cm,
- MACROBLOCKD *const xd,
+const vp8_prob *get_pred_probs(const VP8_COMMON *const cm,
+ const MACROBLOCKD *const xd,
PRED_ID pred_id) {
- vp8_prob *pred_probability;
+ const vp8_prob *pred_probability;
int pred_context;
// Get the appropriate prediction context
@@ -191,7 +191,7 @@ vp8_prob *get_pred_probs(VP8_COMMON *const cm,
// This function returns the status of the given prediction signal.
// I.e. is the predicted value for the given signal correct.
-unsigned char get_pred_flag(MACROBLOCKD *const xd,
+unsigned char get_pred_flag(const MACROBLOCKD *const xd,
PRED_ID pred_id) {
unsigned char pred_flag = 0;
@@ -260,14 +260,14 @@ void set_pred_flag(MACROBLOCKD *const xd,
// peredict various bitstream signals.
// Macroblock segment id prediction function
-unsigned char get_pred_mb_segid(VP8_COMMON *const cm, int MbIndex) {
+unsigned char get_pred_mb_segid(const VP8_COMMON *const cm, int MbIndex) {
// Currently the prediction for the macroblock segment ID is
// the value stored for this macroblock in the previous frame.
return cm->last_frame_seg_map[MbIndex];
}
-MV_REFERENCE_FRAME get_pred_ref(VP8_COMMON *const cm,
- MACROBLOCKD *const xd) {
+MV_REFERENCE_FRAME get_pred_ref(const VP8_COMMON *const cm,
+ const MACROBLOCKD *const xd) {
MODE_INFO *m = xd->mode_info_context;
MV_REFERENCE_FRAME left;
diff --git a/vp8/common/pred_common.h b/vp8/common/pred_common.h
index f4992f555..402e0235f 100644
--- a/vp8/common/pred_common.h
+++ b/vp8/common/pred_common.h
@@ -28,19 +28,19 @@ typedef enum {
} PRED_ID;
-extern unsigned char get_pred_context(VP8_COMMON *const cm,
- MACROBLOCKD *const xd,
+extern unsigned char get_pred_context(const VP8_COMMON *const cm,
+ const MACROBLOCKD *const xd,
PRED_ID pred_id);
-extern vp8_prob get_pred_prob(VP8_COMMON *const cm,
- MACROBLOCKD *const xd,
+extern vp8_prob get_pred_prob(const VP8_COMMON *const cm,
+ const MACROBLOCKD *const xd,
PRED_ID pred_id);
-extern vp8_prob *get_pred_probs(VP8_COMMON *const cm,
- MACROBLOCKD *const xd,
+extern const vp8_prob *get_pred_probs(const VP8_COMMON *const cm,
+ const MACROBLOCKD *const xd,
PRED_ID pred_id);
-extern unsigned char get_pred_flag(MACROBLOCKD *const xd,
+extern unsigned char get_pred_flag(const MACROBLOCKD *const xd,
PRED_ID pred_id);
extern void set_pred_flag(MACROBLOCKD *const xd,
@@ -48,10 +48,10 @@ extern void set_pred_flag(MACROBLOCKD *const xd,
unsigned char pred_flag);
-extern unsigned char get_pred_mb_segid(VP8_COMMON *const cm, int MbIndex);
+extern unsigned char get_pred_mb_segid(const VP8_COMMON *const cm, int MbIndex);
-extern MV_REFERENCE_FRAME get_pred_ref(VP8_COMMON *const cm,
- MACROBLOCKD *const xd);
+extern MV_REFERENCE_FRAME get_pred_ref(const VP8_COMMON *const cm,
+ const MACROBLOCKD *const xd);
extern void compute_mod_refprobs(VP8_COMMON *const cm);
#endif /* __INC_PRED_COMMON_H__ */
diff --git a/vp8/common/recon.c b/vp8/common/recon.c
index cf2d2fb85..ce35e4b12 100644
--- a/vp8/common/recon.c
+++ b/vp8/common/recon.c
@@ -10,7 +10,7 @@
#include "vpx_ports/config.h"
-#include "recon.h"
+#include "vpx_rtcd.h"
#include "blockd.h"
void vp8_recon_b_c
@@ -125,7 +125,7 @@ void vp8_recon2b_c
}
#if CONFIG_SUPERBLOCKS
-void vp8_recon_mby_s_c(const vp8_recon_rtcd_vtable_t *rtcd, MACROBLOCKD *xd, uint8_t *dst) {
+void vp8_recon_mby_s_c(MACROBLOCKD *xd, uint8_t *dst) {
int x, y;
BLOCKD *b = &xd->block[0];
int stride = b->dst_stride;
@@ -145,7 +145,7 @@ void vp8_recon_mby_s_c(const vp8_recon_rtcd_vtable_t *rtcd, MACROBLOCKD *xd, uin
}
}
-void vp8_recon_mbuv_s_c(const vp8_recon_rtcd_vtable_t *rtcd, MACROBLOCKD *xd, uint8_t *udst, uint8_t *vdst) {
+void vp8_recon_mbuv_s_c(MACROBLOCKD *xd, uint8_t *udst, uint8_t *vdst) {
int x, y, i;
uint8_t *dst = udst;
@@ -170,71 +170,28 @@ void vp8_recon_mbuv_s_c(const vp8_recon_rtcd_vtable_t *rtcd, MACROBLOCKD *xd, ui
}
#endif
-void vp8_recon_mby_c(const vp8_recon_rtcd_vtable_t *rtcd, MACROBLOCKD *xd) {
-#if ARCH_ARM
- BLOCKD *b = &xd->block[0];
- RECON_INVOKE(rtcd, recon4)(b->predictor, b->diff, *(b->base_dst) + b->dst, b->dst_stride);
-
- /*b = &xd->block[4];*/
- b += 4;
- RECON_INVOKE(rtcd, recon4)(b->predictor, b->diff, *(b->base_dst) + b->dst, b->dst_stride);
-
- /*b = &xd->block[8];*/
- b += 4;
- RECON_INVOKE(rtcd, recon4)(b->predictor, b->diff, *(b->base_dst) + b->dst, b->dst_stride);
-
- /*b = &xd->block[12];*/
- b += 4;
- RECON_INVOKE(rtcd, recon4)(b->predictor, b->diff, *(b->base_dst) + b->dst, b->dst_stride);
-#else
+void vp8_recon_mby_c(MACROBLOCKD *xd) {
int i;
for (i = 0; i < 16; i += 4) {
BLOCKD *b = &xd->block[i];
- RECON_INVOKE(rtcd, recon4)(b->predictor, b->diff, *(b->base_dst) + b->dst, b->dst_stride);
+ vp8_recon4b(b->predictor, b->diff, *(b->base_dst) + b->dst, b->dst_stride);
}
-#endif
}
-void vp8_recon_mb_c(const vp8_recon_rtcd_vtable_t *rtcd, MACROBLOCKD *xd) {
-#if ARCH_ARM
- BLOCKD *b = &xd->block[0];
-
- RECON_INVOKE(rtcd, recon4)(b->predictor, b->diff, *(b->base_dst) + b->dst, b->dst_stride);
- b += 4;
- RECON_INVOKE(rtcd, recon4)(b->predictor, b->diff, *(b->base_dst) + b->dst, b->dst_stride);
- b += 4;
- RECON_INVOKE(rtcd, recon4)(b->predictor, b->diff, *(b->base_dst) + b->dst, b->dst_stride);
- b += 4;
- RECON_INVOKE(rtcd, recon4)(b->predictor, b->diff, *(b->base_dst) + b->dst, b->dst_stride);
- b += 4;
-
- /*b = &xd->block[16];*/
-
- RECON_INVOKE(rtcd, recon2)(b->predictor, b->diff, *(b->base_dst) + b->dst, b->dst_stride);
- b++;
- b++;
- RECON_INVOKE(rtcd, recon2)(b->predictor, b->diff, *(b->base_dst) + b->dst, b->dst_stride);
- b++;
- b++;
- RECON_INVOKE(rtcd, recon2)(b->predictor, b->diff, *(b->base_dst) + b->dst, b->dst_stride);
- b++;
- b++;
- RECON_INVOKE(rtcd, recon2)(b->predictor, b->diff, *(b->base_dst) + b->dst, b->dst_stride);
-#else
+void vp8_recon_mb_c(MACROBLOCKD *xd) {
int i;
for (i = 0; i < 16; i += 4) {
BLOCKD *b = &xd->block[i];
- RECON_INVOKE(rtcd, recon4)(b->predictor, b->diff, *(b->base_dst) + b->dst, b->dst_stride);
+ vp8_recon4b(b->predictor, b->diff, *(b->base_dst) + b->dst, b->dst_stride);
}
for (i = 16; i < 24; i += 2) {
BLOCKD *b = &xd->block[i];
- RECON_INVOKE(rtcd, recon2)(b->predictor, b->diff, *(b->base_dst) + b->dst, b->dst_stride);
+ vp8_recon2b(b->predictor, b->diff, *(b->base_dst) + b->dst, b->dst_stride);
}
-#endif
}
diff --git a/vp8/common/recon.h b/vp8/common/recon.h
deleted file mode 100644
index 0bb5c8863..000000000
--- a/vp8/common/recon.h
+++ /dev/null
@@ -1,273 +0,0 @@
-/*
- * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
- *
- * Use of this source code is governed by a BSD-style license
- * that can be found in the LICENSE file in the root of the source
- * tree. An additional intellectual property rights grant can be found
- * in the file PATENTS. All contributing project authors may
- * be found in the AUTHORS file in the root of the source tree.
- */
-
-
-#ifndef __INC_RECON_H
-#define __INC_RECON_H
-
-#include "blockd.h"
-
-#define prototype_copy_block(sym) \
- void sym(unsigned char *src, int src_pitch, unsigned char *dst, int dst_pitch)
-
-#define prototype_recon_block(sym) \
- void sym(unsigned char *pred, short *diff, unsigned char *dst, int pitch)
-
-#define prototype_recon_macroblock(sym) \
- void sym(const struct vp8_recon_rtcd_vtable *rtcd, MACROBLOCKD *xd)
-
-#define prototype_build_intra_predictors(sym) \
- void sym(MACROBLOCKD *xd)
-
-#define prototype_intra4x4_predict(sym) \
- void sym(BLOCKD *x, int b_mode, unsigned char *predictor)
-
-#if CONFIG_COMP_INTRA_PRED
-#define prototype_comp_intra4x4_predict(sym) \
- void sym(BLOCKD *x, int b_mode, int mode2, unsigned char *predictor)
-#endif
-
-struct vp8_recon_rtcd_vtable;
-
-#if ARCH_X86 || ARCH_X86_64
-#include "x86/recon_x86.h"
-#endif
-
-#if ARCH_ARM
-#include "arm/recon_arm.h"
-#endif
-
-#ifndef vp8_recon_copy16x16
-#define vp8_recon_copy16x16 vp8_copy_mem16x16_c
-#endif
-extern prototype_copy_block(vp8_recon_copy16x16);
-
-#ifndef vp8_recon_copy8x8
-#define vp8_recon_copy8x8 vp8_copy_mem8x8_c
-#endif
-extern prototype_copy_block(vp8_recon_copy8x8);
-
-#ifndef vp8_recon_avg16x16
-#define vp8_recon_avg16x16 vp8_avg_mem16x16_c
-#endif
-extern prototype_copy_block(vp8_recon_avg16x16);
-
-#ifndef vp8_recon_avg8x8
-#define vp8_recon_avg8x8 vp8_avg_mem8x8_c
-#endif
-extern prototype_copy_block(vp8_recon_avg8x8);
-
-#ifndef vp8_recon_copy8x4
-#define vp8_recon_copy8x4 vp8_copy_mem8x4_c
-#endif
-extern prototype_copy_block(vp8_recon_copy8x4);
-
-#ifndef vp8_recon_recon
-#define vp8_recon_recon vp8_recon_b_c
-#endif
-extern prototype_recon_block(vp8_recon_recon);
-
-#ifndef vp8_recon_recon_uv
-#define vp8_recon_recon_uv vp8_recon_uv_b_c
-#endif
-extern prototype_recon_block(vp8_recon_recon_uv);
-
-extern prototype_recon_block(vp8_recon_recon);
-#ifndef vp8_recon_recon2
-#define vp8_recon_recon2 vp8_recon2b_c
-#endif
-extern prototype_recon_block(vp8_recon_recon2);
-
-#ifndef vp8_recon_recon4
-#define vp8_recon_recon4 vp8_recon4b_c
-#endif
-extern prototype_recon_block(vp8_recon_recon4);
-
-#ifndef vp8_recon_recon_mb
-#define vp8_recon_recon_mb vp8_recon_mb_c
-#endif
-extern prototype_recon_macroblock(vp8_recon_recon_mb);
-
-#ifndef vp8_recon_recon_mby
-#define vp8_recon_recon_mby vp8_recon_mby_c
-#endif
-extern prototype_recon_macroblock(vp8_recon_recon_mby);
-
-#ifndef vp8_recon_build_intra_predictors_sby_s
-#define vp8_recon_build_intra_predictors_sby_s vp8_build_intra_predictors_sby_s
-#endif
-extern prototype_build_intra_predictors(vp8_recon_build_intra_predictors_sby_s);
-
-#ifndef vp8_recon_build_intra_predictors_mby
-#define vp8_recon_build_intra_predictors_mby vp8_build_intra_predictors_mby
-#endif
-extern prototype_build_intra_predictors\
-(vp8_recon_build_intra_predictors_mby);
-
-#if CONFIG_COMP_INTRA_PRED
-#ifndef vp8_recon_build_comp_intra_predictors_mby
-#define vp8_recon_build_comp_intra_predictors_mby vp8_build_comp_intra_predictors_mby
-#endif
-extern prototype_build_intra_predictors\
-(vp8_recon_build_comp_intra_predictors_mby);
-#endif
-
-#ifndef vp8_recon_build_intra8x8_predictors_mby
-#define vp8_recon_build_intra8x8_predictors_mby vp8_build_intra8x8_predictors_mby
-#endif
-extern prototype_build_intra_predictors\
-(vp8_recon_build_intra8x8_predictors_mby);
-
-#ifndef vp8_recon_build_intra_predictors_mby_s
-#define vp8_recon_build_intra_predictors_mby_s vp8_build_intra_predictors_mby_s
-#endif
-extern prototype_build_intra_predictors\
-(vp8_recon_build_intra_predictors_mby_s);
-
-#ifndef vp8_recon_build_intra_predictors_sbuv_s
-#define vp8_recon_build_intra_predictors_sbuv_s vp8_build_intra_predictors_sbuv_s
-#endif
-extern prototype_build_intra_predictors(vp8_recon_build_intra_predictors_sbuv_s);
-
-#ifndef vp8_recon_build_intra_predictors_mbuv
-#define vp8_recon_build_intra_predictors_mbuv vp8_build_intra_predictors_mbuv
-#endif
-extern prototype_build_intra_predictors\
-(vp8_recon_build_intra_predictors_mbuv);
-
-#ifndef vp8_recon_build_intra8x8_predictors_mbuv
-#define vp8_recon_build_intra8x8_predictors_mbuv vp8_build_intra8x8_predictors_mbuv
-#endif
-extern prototype_build_intra_predictors\
-(vp8_recon_build_intra8x8_predictors_mbuv);
-
-#ifndef vp8_recon_build_intra_predictors_mbuv_s
-#define vp8_recon_build_intra_predictors_mbuv_s vp8_build_intra_predictors_mbuv_s
-#endif
-extern prototype_build_intra_predictors\
-(vp8_recon_build_intra_predictors_mbuv_s);
-
-#if CONFIG_COMP_INTRA_PRED
-#ifndef vp8_recon_build_comp_intra_predictors_mbuv
-#define vp8_recon_build_comp_intra_predictors_mbuv vp8_build_comp_intra_predictors_mbuv
-#endif
-extern prototype_build_intra_predictors\
-(vp8_recon_build_comp_intra_predictors_mbuv);
-#endif
-
-#ifndef vp8_recon_intra4x4_predict
-#define vp8_recon_intra4x4_predict vp8_intra4x4_predict
-#endif
-extern prototype_intra4x4_predict\
-(vp8_recon_intra4x4_predict);
-
-#if CONFIG_COMP_INTRA_PRED
-#ifndef vp8_recon_comp_intra4x4_predict
-#define vp8_recon_comp_intra4x4_predict vp8_comp_intra4x4_predict
-#endif
-extern prototype_comp_intra4x4_predict\
-(vp8_recon_comp_intra4x4_predict);
-#endif
-
-#ifndef vp8_recon_intra8x8_predict
-#define vp8_recon_intra8x8_predict vp8_intra8x8_predict
-#endif
-extern prototype_intra4x4_predict\
-(vp8_recon_intra8x8_predict);
-
-#if CONFIG_COMP_INTRA_PRED
-#ifndef vp8_recon_comp_intra8x8_predict
-#define vp8_recon_comp_intra8x8_predict vp8_comp_intra8x8_predict
-#endif
-extern prototype_comp_intra4x4_predict\
-(vp8_recon_comp_intra8x8_predict);
-#endif
-
-#ifndef vp8_recon_intra_uv4x4_predict
-#define vp8_recon_intra_uv4x4_predict vp8_intra_uv4x4_predict
-#endif
-extern prototype_intra4x4_predict\
-(vp8_recon_intra_uv4x4_predict);
-
-#if CONFIG_COMP_INTRA_PRED
-#ifndef vp8_recon_comp_intra_uv4x4_predict
-#define vp8_recon_comp_intra_uv4x4_predict vp8_comp_intra_uv4x4_predict
-#endif
-extern prototype_comp_intra4x4_predict\
-(vp8_recon_comp_intra_uv4x4_predict);
-#endif
-
-typedef prototype_copy_block((*vp8_copy_block_fn_t));
-typedef prototype_recon_block((*vp8_recon_fn_t));
-typedef prototype_recon_macroblock((*vp8_recon_mb_fn_t));
-typedef prototype_build_intra_predictors((*vp8_build_intra_pred_fn_t));
-typedef prototype_intra4x4_predict((*vp8_intra4x4_pred_fn_t));
-#if CONFIG_COMP_INTRA_PRED
-typedef prototype_comp_intra4x4_predict((*vp8_comp_intra4x4_pred_fn_t));
-#endif
-typedef struct vp8_recon_rtcd_vtable {
- vp8_copy_block_fn_t copy16x16;
- vp8_copy_block_fn_t copy8x8;
- vp8_copy_block_fn_t avg16x16;
- vp8_copy_block_fn_t avg8x8;
- vp8_copy_block_fn_t copy8x4;
- vp8_recon_fn_t recon;
- vp8_recon_fn_t recon_uv;
- vp8_recon_fn_t recon2;
- vp8_recon_fn_t recon4;
- vp8_recon_mb_fn_t recon_mb;
- vp8_recon_mb_fn_t recon_mby;
-#if CONFIG_SUPERBLOCKS
- vp8_build_intra_pred_fn_t build_intra_predictors_sby_s;
-#endif
- vp8_build_intra_pred_fn_t build_intra_predictors_mby_s;
- vp8_build_intra_pred_fn_t build_intra_predictors_mby;
-#if CONFIG_COMP_INTRA_PRED
- vp8_build_intra_pred_fn_t build_comp_intra_predictors_mby;
-#endif
-#if CONFIG_SUPERBLOCKS
- vp8_build_intra_pred_fn_t build_intra_predictors_sbuv_s;
-#endif
- vp8_build_intra_pred_fn_t build_intra_predictors_mbuv_s;
- vp8_build_intra_pred_fn_t build_intra_predictors_mbuv;
-#if CONFIG_COMP_INTRA_PRED
- vp8_build_intra_pred_fn_t build_comp_intra_predictors_mbuv;
-#endif
- vp8_intra4x4_pred_fn_t intra4x4_predict;
-#if CONFIG_COMP_INTRA_PRED
- vp8_comp_intra4x4_pred_fn_t comp_intra4x4_predict;
-#endif
- vp8_intra4x4_pred_fn_t intra8x8_predict;
-#if CONFIG_COMP_INTRA_PRED
- vp8_comp_intra4x4_pred_fn_t comp_intra8x8_predict;
-#endif
- vp8_intra4x4_pred_fn_t intra_uv4x4_predict;
-#if CONFIG_COMP_INTRA_PRED
- vp8_comp_intra4x4_pred_fn_t comp_intra_uv4x4_predict;
-#endif
-} vp8_recon_rtcd_vtable_t;
-
-#if CONFIG_RUNTIME_CPU_DETECT
-#define RECON_INVOKE(ctx,fn) (ctx)->fn
-#else
-#define RECON_INVOKE(ctx,fn) vp8_recon_##fn
-#endif
-
-void vp8_recon_intra_mbuv(const vp8_recon_rtcd_vtable_t *rtcd,
- MACROBLOCKD *xd);
-
-#if CONFIG_SUPERBLOCKS
-extern void vp8_recon_mby_s_c(const vp8_recon_rtcd_vtable_t *rtcd,
- MACROBLOCKD *xd, uint8_t *dst);
-extern void vp8_recon_mbuv_s_c(const vp8_recon_rtcd_vtable_t *rtcd,
- MACROBLOCKD *xd, uint8_t *udst, uint8_t *vdst);
-#endif
-
-#endif
diff --git a/vp8/common/reconinter.c b/vp8/common/reconinter.c
index 647b3ada7..a41d233ab 100644
--- a/vp8/common/reconinter.c
+++ b/vp8/common/reconinter.c
@@ -11,7 +11,6 @@
#include "vpx_ports/config.h"
#include "vpx/vpx_integer.h"
-#include "recon.h"
#include "subpixel.h"
#include "blockd.h"
#include "reconinter.h"
@@ -295,7 +294,7 @@ void vp8_build_2nd_inter_predictors_b(BLOCKD *d, int pitch, vp8_subpix_fn_t sppf
}
}
-static void build_inter_predictors4b(MACROBLOCKD *xd, BLOCKD *d, int pitch) {
+void vp8_build_inter_predictors4b(MACROBLOCKD *xd, BLOCKD *d, int pitch) {
unsigned char *ptr_base;
unsigned char *ptr;
unsigned char *pred_ptr = d->predictor;
@@ -310,8 +309,7 @@ static void build_inter_predictors4b(MACROBLOCKD *xd, BLOCKD *d, int pitch) {
xd->subpixel_predict8x8(ptr, d->pre_stride, (mv.as_mv.col & 7) << 1,
(mv.as_mv.row & 7) << 1, pred_ptr, pitch);
} else {
- RECON_INVOKE(&xd->rtcd->recon, copy8x8)
- (ptr, d->pre_stride, pred_ptr, pitch);
+ vp8_copy_mem8x8(ptr, d->pre_stride, pred_ptr, pitch);
}
}
@@ -321,8 +319,8 @@ static void build_inter_predictors4b(MACROBLOCKD *xd, BLOCKD *d, int pitch) {
* come from an earlier call to build_inter_predictors_4b()) with the
* predictor of the second reference frame / motion vector.
*/
-static void build_2nd_inter_predictors4b(MACROBLOCKD *xd,
- BLOCKD *d, int pitch) {
+void vp8_build_2nd_inter_predictors4b(MACROBLOCKD *xd,
+ BLOCKD *d, int pitch) {
unsigned char *ptr_base;
unsigned char *ptr;
unsigned char *pred_ptr = d->predictor;
@@ -337,8 +335,7 @@ static void build_2nd_inter_predictors4b(MACROBLOCKD *xd,
xd->subpixel_predict_avg8x8(ptr, d->pre_stride, (mv.as_mv.col & 7) << 1,
(mv.as_mv.row & 7) << 1, pred_ptr, pitch);
} else {
- RECON_INVOKE(&xd->rtcd->recon, avg8x8)
- (ptr, d->pre_stride, pred_ptr, pitch);
+ vp8_avg_mem8x8(ptr, d->pre_stride, pred_ptr, pitch);
}
}
@@ -357,7 +354,7 @@ static void build_inter_predictors2b(MACROBLOCKD *xd, BLOCKD *d, int pitch) {
xd->subpixel_predict8x4(ptr, d->pre_stride, (mv.as_mv.col & 7) << 1,
(mv.as_mv.row & 7) << 1, pred_ptr, pitch);
} else {
- RECON_INVOKE(&xd->rtcd->recon, copy8x4)(ptr, d->pre_stride, pred_ptr, pitch);
+ vp8_copy_mem8x4(ptr, d->pre_stride, pred_ptr, pitch);
}
}
@@ -662,8 +659,7 @@ void vp8_build_1st_inter16x16_predictors_mby(MACROBLOCKD *xd,
(ymv.as_mv.row & 7) << 1,
dst_y, dst_ystride);
} else {
- RECON_INVOKE(&xd->rtcd->recon, copy16x16)
- (ptr, pre_stride, dst_y, dst_ystride);
+ vp8_copy_mem16x16(ptr, pre_stride, dst_y, dst_ystride);
}
}
@@ -742,10 +738,8 @@ void vp8_build_1st_inter16x16_predictors_mbuv(MACROBLOCKD *xd,
xd->subpixel_predict8x8(vptr, pre_stride, _o16x16mv.as_mv.col & 15,
_o16x16mv.as_mv.row & 15, dst_v, dst_uvstride);
} else {
- RECON_INVOKE(&xd->rtcd->recon, copy8x8)
- (uptr, pre_stride, dst_u, dst_uvstride);
- RECON_INVOKE(&xd->rtcd->recon, copy8x8)
- (vptr, pre_stride, dst_v, dst_uvstride);
+ vp8_copy_mem8x8(uptr, pre_stride, dst_u, dst_uvstride);
+ vp8_copy_mem8x8(vptr, pre_stride, dst_v, dst_uvstride);
}
}
@@ -864,8 +858,7 @@ void vp8_build_2nd_inter16x16_predictors_mby(MACROBLOCKD *xd,
} else {
// TODO Needs to AVERAGE with the dst_y
// For now, do not apply the prediction filter in these cases!
- RECON_INVOKE(&xd->rtcd->recon, avg16x16)(ptr, pre_stride, dst_y,
- dst_ystride);
+ vp8_avg_mem16x16(ptr, pre_stride, dst_y, dst_ystride);
}
} else
#endif // CONFIG_PRED_FILTER
@@ -874,8 +867,7 @@ void vp8_build_2nd_inter16x16_predictors_mby(MACROBLOCKD *xd,
xd->subpixel_predict_avg16x16(ptr, pre_stride, (mv_col & 7) << 1,
(mv_row & 7) << 1, dst_y, dst_ystride);
} else {
- RECON_INVOKE(&xd->rtcd->recon, avg16x16)(ptr, pre_stride, dst_y,
- dst_ystride);
+ vp8_avg_mem16x16(ptr, pre_stride, dst_y, dst_ystride);
}
}
}
@@ -938,8 +930,7 @@ void vp8_build_2nd_inter16x16_predictors_mbuv(MACROBLOCKD *xd,
} else {
// TODO Needs to AVERAGE with the dst_[u|v]
// For now, do not apply the prediction filter here!
- RECON_INVOKE(&xd->rtcd->recon, avg8x8)(pSrc, pre_stride, pDst,
- dst_uvstride);
+ vp8_avg_mem8x8(pSrc, pre_stride, pDst, dst_uvstride);
}
// V
@@ -954,8 +945,8 @@ void vp8_build_2nd_inter16x16_predictors_mbuv(MACROBLOCKD *xd,
xd->subpixel_predict_avg8x8(vptr, pre_stride, omv_col & 15,
omv_row & 15, dst_v, dst_uvstride);
} else {
- RECON_INVOKE(&xd->rtcd->recon, avg8x8)(uptr, pre_stride, dst_u, dst_uvstride);
- RECON_INVOKE(&xd->rtcd->recon, avg8x8)(vptr, pre_stride, dst_v, dst_uvstride);
+ vp8_avg_mem8x8(uptr, pre_stride, dst_u, dst_uvstride);
+ vp8_avg_mem8x8(vptr, pre_stride, dst_v, dst_uvstride);
}
}
@@ -994,16 +985,16 @@ static void build_inter4x4_predictors_mb(MACROBLOCKD *xd) {
}
- build_inter_predictors4b(xd, &blockd[ 0], 16);
- build_inter_predictors4b(xd, &blockd[ 2], 16);
- build_inter_predictors4b(xd, &blockd[ 8], 16);
- build_inter_predictors4b(xd, &blockd[10], 16);
+ vp8_build_inter_predictors4b(xd, &blockd[ 0], 16);
+ vp8_build_inter_predictors4b(xd, &blockd[ 2], 16);
+ vp8_build_inter_predictors4b(xd, &blockd[ 8], 16);
+ vp8_build_inter_predictors4b(xd, &blockd[10], 16);
if (mbmi->second_ref_frame) {
- build_2nd_inter_predictors4b(xd, &blockd[ 0], 16);
- build_2nd_inter_predictors4b(xd, &blockd[ 2], 16);
- build_2nd_inter_predictors4b(xd, &blockd[ 8], 16);
- build_2nd_inter_predictors4b(xd, &blockd[10], 16);
+ vp8_build_2nd_inter_predictors4b(xd, &blockd[ 0], 16);
+ vp8_build_2nd_inter_predictors4b(xd, &blockd[ 2], 16);
+ vp8_build_2nd_inter_predictors4b(xd, &blockd[ 8], 16);
+ vp8_build_2nd_inter_predictors4b(xd, &blockd[10], 16);
}
} else {
for (i = 0; i < 16; i += 2) {
diff --git a/vp8/common/reconinter.h b/vp8/common/reconinter.h
index 37e34b5e1..c78611920 100644
--- a/vp8/common/reconinter.h
+++ b/vp8/common/reconinter.h
@@ -12,9 +12,7 @@
#ifndef __INC_RECONINTER_H
#define __INC_RECONINTER_H
-#if CONFIG_RUNTIME_CPU_DETECT
#include "onyxc_int.h"
-#endif
extern void vp8_build_1st_inter16x16_predictors_mby(MACROBLOCKD *xd,
unsigned char *dst_y,
@@ -61,6 +59,11 @@ extern void vp8_build_inter_predictors_b(BLOCKD *d, int pitch,
extern void vp8_build_2nd_inter_predictors_b(BLOCKD *d, int pitch,
vp8_subpix_fn_t sppf);
+extern void vp8_build_inter_predictors4b(MACROBLOCKD *xd, BLOCKD *d,
+ int pitch);
+extern void vp8_build_2nd_inter_predictors4b(MACROBLOCKD *xd,
+ BLOCKD *d, int pitch);
+
extern void vp8_build_inter4x4_predictors_mbuv(MACROBLOCKD *xd);
extern void vp8_setup_interp_filters(MACROBLOCKD *xd,
INTERPOLATIONFILTERTYPE filter,
diff --git a/vp8/common/reconintra.c b/vp8/common/reconintra.c
index cad9652b7..9bee3a7da 100644
--- a/vp8/common/reconintra.c
+++ b/vp8/common/reconintra.c
@@ -10,7 +10,7 @@
#include <stdio.h>
#include "vpx_ports/config.h"
-#include "recon.h"
+#include "vpx_rtcd.h"
#include "reconintra.h"
#include "vpx_mem/vpx_mem.h"
@@ -196,14 +196,12 @@ void d153_predictor(unsigned char *ypred_ptr, int y_stride, int n,
}
}
-void vp8_recon_intra_mbuv(const vp8_recon_rtcd_vtable_t *rtcd,
- MACROBLOCKD *xd) {
+void vp8_recon_intra_mbuv(MACROBLOCKD *xd) {
int i;
for (i = 16; i < 24; i += 2) {
BLOCKD *b = &xd->block[i];
- RECON_INVOKE(rtcd, recon2)(b->predictor, b->diff,
- *(b->base_dst) + b->dst, b->dst_stride);
+ vp8_recon2b(b->predictor, b->diff,*(b->base_dst) + b->dst, b->dst_stride);
}
}
diff --git a/vp8/common/reconintra4x4.c b/vp8/common/reconintra4x4.c
index 69faab09d..0ba0a2cff 100644
--- a/vp8/common/reconintra4x4.c
+++ b/vp8/common/reconintra4x4.c
@@ -10,13 +10,12 @@
#include "vpx_ports/config.h"
-#include "recon.h"
#include "vpx_mem/vpx_mem.h"
#include "reconintra.h"
+#include "vpx_rtcd.h"
-void vp8_intra4x4_predict(BLOCKD *x,
- int b_mode,
- unsigned char *predictor) {
+void vp8_intra4x4_predict_c(BLOCKD *x, int b_mode,
+ unsigned char *predictor) {
int i, r, c;
unsigned char *Above = *(x->base_dst) + x->dst - x->dst_stride;
@@ -276,7 +275,7 @@ void vp8_intra4x4_predict(BLOCKD *x,
}
#if CONFIG_COMP_INTRA_PRED
-void vp8_comp_intra4x4_predict(BLOCKD *x,
+void vp8_comp_intra4x4_predict_c(BLOCKD *x,
int b_mode, int b_mode2,
unsigned char *out_predictor) {
unsigned char predictor[2][4 * 16];
diff --git a/vp8/common/rtcd_defs.sh b/vp8/common/rtcd_defs.sh
index 66029f88e..ef272df90 100644
--- a/vp8/common/rtcd_defs.sh
+++ b/vp8/common/rtcd_defs.sh
@@ -1,6 +1,13 @@
common_forward_decls() {
cat <<EOF
-struct blockd;
+#include "vp8/common/blockd.h"
+
+struct loop_filter_info;
+
+/* Encoder forward decls */
+struct variance_vtable;
+union int_mv;
+struct yv12_buffer_config;
EOF
}
forward_decls common_forward_decls
@@ -19,3 +26,151 @@ if [ "$CONFIG_GCC" = "yes" ]; then
specialize vp8_filter_block2d_8x8_8 sse4_1 sse2
specialize vp8_filter_block2d_16x16_8 sse4_1 sse2
fi
+
+
+#
+# RECON
+#
+prototype void vp8_copy_mem16x16 "unsigned char *src, int src_pitch, unsigned char *dst, int dst_pitch"
+specialize vp8_copy_mem16x16 mmx sse2 media neon dspr2
+vp8_copy_mem16x16_media=vp8_copy_mem16x16_v6
+vp8_copy_mem16x16_dspr2=vp8_copy_mem16x16_dspr2
+
+prototype void vp8_copy_mem8x8 "unsigned char *src, int src_pitch, unsigned char *dst, int dst_pitch"
+specialize vp8_copy_mem8x8 mmx media neon dspr2
+vp8_copy_mem8x8_media=vp8_copy_mem8x8_v6
+vp8_copy_mem8x8_dspr2=vp8_copy_mem8x8_dspr2
+
+prototype void vp8_copy_mem8x4 "unsigned char *src, int src_pitch, unsigned char *dst, int dst_pitch"
+specialize vp8_copy_mem8x4 mmx
+
+prototype void vp8_intra4x4_predict "unsigned char *Above, unsigned char *yleft, int left_stride, B_PREDICTION_MODE b_mode, unsigned char *dst, int dst_stride, unsigned char top_left"
+specialize vp8_intra4x4_predict
+
+prototype void vp8_avg_mem16x16 "unsigned char *src, int src_pitch, unsigned char *dst, int dst_pitch"
+specialize vp8_avg_mem16x16
+
+prototype void vp8_avg_mem8x8 "unsigned char *src, int src_pitch, unsigned char *dst, int dst_pitch"
+specialize vp8_avg_mem8x8
+
+prototype void vp8_copy_mem8x4 "unsigned char *src, int src_pitch, unsigned char *dst, int dst_pitch"
+specialize vp8_copy_mem8x4 mmx media neon dspr2
+vp8_copy_mem8x4_media=vp8_copy_mem8x4_v6
+vp8_copy_mem8x4_dspr2=vp8_copy_mem8x4_dspr2
+
+prototype void vp8_recon_b "unsigned char *pred_ptr, short *diff_ptr, unsigned char *dst_ptr, int stride"
+specialize vp8_recon_b
+
+prototype void vp8_recon_uv_b "unsigned char *pred_ptr, short *diff_ptr, unsigned char *dst_ptr, int stride"
+specialize vp8_recon_uv_b
+
+prototype void vp8_recon2b "unsigned char *pred_ptr, short *diff_ptr, unsigned char *dst_ptr, int stride"
+specialize vp8_recon2b sse2
+
+prototype void vp8_recon4b "unsigned char *pred_ptr, short *diff_ptr, unsigned char *dst_ptr, int stride"
+specialize vp8_recon4b sse2
+
+prototype void vp8_recon_mb "MACROBLOCKD *x"
+specialize vp8_recon_mb
+
+prototype void vp8_recon_mby "MACROBLOCKD *x"
+specialize vp8_recon_mby
+
+prototype void vp8_build_intra_predictors_mby_s "MACROBLOCKD *x"
+specialize vp8_build_intra_predictors_mby_s
+
+prototype void vp8_build_intra_predictors_sby_s "MACROBLOCKD *x"
+specialize vp8_build_intra_predictors_sby_s;
+
+prototype void vp8_build_intra_predictors_sbuv_s "MACROBLOCKD *x"
+specialize vp8_build_intra_predictors_sbuv_s;
+
+prototype void vp8_build_intra_predictors_mby "MACROBLOCKD *x"
+specialize vp8_build_intra_predictors_mby;
+
+prototype void vp8_build_comp_intra_predictors_mby "MACROBLOCKD *x"
+specialize vp8_build_comp_intra_predictors_mby;
+
+prototype void vp8_build_intra_predictors_mby_s "MACROBLOCKD *x"
+specialize vp8_build_intra_predictors_mby_s;
+
+prototype void vp8_build_intra_predictors_mbuv "MACROBLOCKD *x"
+specialize vp8_build_intra_predictors_mbuv;
+
+prototype void vp8_build_intra_predictors_mbuv_s "MACROBLOCKD *x"
+specialize vp8_build_intra_predictors_mbuv_s;
+
+prototype void vp8_build_comp_intra_predictors_mbuv "MACROBLOCKD *x"
+specialize vp8_build_comp_intra_predictors_mbuv;
+
+prototype void vp8_intra4x4_predict "BLOCKD *x, int b_mode, unsigned char *predictor"
+specialize vp8_intra4x4_predict;
+
+prototype void vp8_comp_intra4x4_predict "BLOCKD *x, int b_mode, int second_mode, unsigned char *predictor"
+specialize vp8_comp_intra4x4_predict;
+
+prototype void vp8_intra8x8_predict "BLOCKD *x, int b_mode, unsigned char *predictor"
+specialize vp8_intra8x8_predict;
+
+prototype void vp8_comp_intra8x8_predict "BLOCKD *x, int b_mode, int second_mode, unsigned char *predictor"
+specialize vp8_comp_intra8x8_predict;
+
+prototype void vp8_intra_uv4x4_predict "BLOCKD *x, int b_mode, unsigned char *predictor"
+specialize vp8_intra_uv4x4_predict;
+
+prototype void vp8_comp_intra_uv4x4_predict "BLOCKD *x, int b_mode, int second_mode, unsigned char *predictor"
+specialize vp8_comp_intra_uv4x4_predict;
+
+#
+# Loopfilter
+#
+prototype void vp8_loop_filter_mbv "unsigned char *y, unsigned char *u, unsigned char *v, int ystride, int uv_stride, struct loop_filter_info *lfi"
+specialize vp8_loop_filter_mbv;
+
+prototype void vp8_loop_filter_bv "unsigned char *y, unsigned char *u, unsigned char *v, int ystride, int uv_stride, struct loop_filter_info *lfi"
+specialize vp8_loop_filter_bv;
+
+prototype void vp8_loop_filter_bv8x8 "unsigned char *y, unsigned char *u, unsigned char *v, int ystride, int uv_stride, struct loop_filter_info *lfi"
+specialize vp8_loop_filter_bv8x8;
+
+prototype void vp8_loop_filter_mbh "unsigned char *y, unsigned char *u, unsigned char *v, int ystride, int uv_stride, struct loop_filter_info *lfi"
+specialize vp8_loop_filter_mbh;
+
+prototype void vp8_loop_filter_bh "unsigned char *y, unsigned char *u, unsigned char *v, int ystride, int uv_stride, struct loop_filter_info *lfi"
+specialize vp8_loop_filter_bh;
+
+prototype void vp8_loop_filter_bh8x8 "unsigned char *y, unsigned char *u, unsigned char *v, int ystride, int uv_stride, struct loop_filter_info *lfi"
+specialize vp8_loop_filter_bh8x8;
+
+prototype void vp8_loop_filter_simple_mbv "unsigned char *y, int ystride, const unsigned char *blimit"
+specialize vp8_loop_filter_simple_mbv mmx sse2 media neon
+vp8_loop_filter_simple_mbv_c=vp8_loop_filter_simple_vertical_edge_c
+vp8_loop_filter_simple_mbv_mmx=vp8_loop_filter_simple_vertical_edge_mmx
+vp8_loop_filter_simple_mbv_sse2=vp8_loop_filter_simple_vertical_edge_sse2
+vp8_loop_filter_simple_mbv_media=vp8_loop_filter_simple_vertical_edge_armv6
+vp8_loop_filter_simple_mbv_neon=vp8_loop_filter_mbvs_neon
+
+prototype void vp8_loop_filter_simple_mbh "unsigned char *y, int ystride, const unsigned char *blimit"
+specialize vp8_loop_filter_simple_mbh mmx sse2 media neon
+vp8_loop_filter_simple_mbh_c=vp8_loop_filter_simple_horizontal_edge_c
+vp8_loop_filter_simple_mbh_mmx=vp8_loop_filter_simple_horizontal_edge_mmx
+vp8_loop_filter_simple_mbh_sse2=vp8_loop_filter_simple_horizontal_edge_sse2
+vp8_loop_filter_simple_mbh_media=vp8_loop_filter_simple_horizontal_edge_armv6
+vp8_loop_filter_simple_mbh_neon=vp8_loop_filter_mbhs_neon
+
+prototype void vp8_loop_filter_simple_bv "unsigned char *y, int ystride, const unsigned char *blimit"
+specialize vp8_loop_filter_simple_bv mmx sse2 media neon
+vp8_loop_filter_simple_bv_c=vp8_loop_filter_bvs_c
+vp8_loop_filter_simple_bv_mmx=vp8_loop_filter_bvs_mmx
+vp8_loop_filter_simple_bv_sse2=vp8_loop_filter_bvs_sse2
+vp8_loop_filter_simple_bv_media=vp8_loop_filter_bvs_armv6
+vp8_loop_filter_simple_bv_neon=vp8_loop_filter_bvs_neon
+
+prototype void vp8_loop_filter_simple_bh "unsigned char *y, int ystride, const unsigned char *blimit"
+specialize vp8_loop_filter_simple_bh mmx sse2 media neon
+vp8_loop_filter_simple_bh_c=vp8_loop_filter_bhs_c
+vp8_loop_filter_simple_bh_mmx=vp8_loop_filter_bhs_mmx
+vp8_loop_filter_simple_bh_sse2=vp8_loop_filter_bhs_sse2
+vp8_loop_filter_simple_bh_media=vp8_loop_filter_bhs_armv6
+vp8_loop_filter_simple_bh_neon=vp8_loop_filter_bhs_neon
+
diff --git a/vp8/common/seg_common.c b/vp8/common/seg_common.c
index b616391ba..a11fe87e9 100644
--- a/vp8/common/seg_common.c
+++ b/vp8/common/seg_common.c
@@ -19,7 +19,7 @@ const int vp8_seg_feature_data_bits[SEG_LVL_MAX] =
// the coding mechanism is still subject to change so these provide a
// convenient single point of change.
-int segfeature_active(MACROBLOCKD *xd,
+int segfeature_active(const MACROBLOCKD *xd,
int segment_id,
SEG_LVL_FEATURES feature_id) {
// Return true if mask bit set and segmentation enabled.
@@ -66,7 +66,7 @@ void set_segdata(MACROBLOCKD *xd,
xd->segment_feature_data[segment_id][feature_id] = seg_data;
}
-int get_segdata(MACROBLOCKD *xd,
+int get_segdata(const MACROBLOCKD *xd,
int segment_id,
SEG_LVL_FEATURES feature_id) {
return xd->segment_feature_data[segment_id][feature_id];
@@ -126,7 +126,7 @@ void set_segref(MACROBLOCKD *xd,
(1 << ref_frame);
}
-int check_segref(MACROBLOCKD *xd,
+int check_segref(const MACROBLOCKD *xd,
int segment_id,
MV_REFERENCE_FRAME ref_frame) {
return (xd->segment_feature_data[segment_id][SEG_LVL_REF_FRAME] &
diff --git a/vp8/common/seg_common.h b/vp8/common/seg_common.h
index 74131926f..59f40d112 100644
--- a/vp8/common/seg_common.h
+++ b/vp8/common/seg_common.h
@@ -15,7 +15,7 @@
#ifndef __INC_SEG_COMMON_H__
#define __INC_SEG_COMMON_H__ 1
-int segfeature_active(MACROBLOCKD *xd,
+int segfeature_active(const MACROBLOCKD *xd,
int segment_id,
SEG_LVL_FEATURES feature_id);
@@ -42,7 +42,7 @@ void set_segdata(MACROBLOCKD *xd,
SEG_LVL_FEATURES feature_id,
int seg_data);
-int get_segdata(MACROBLOCKD *xd,
+int get_segdata(const MACROBLOCKD *xd,
int segment_id,
SEG_LVL_FEATURES feature_id);
@@ -73,7 +73,7 @@ void set_segref(MACROBLOCKD *xd,
int segment_id,
MV_REFERENCE_FRAME ref_frame);
-int check_segref(MACROBLOCKD *xd,
+int check_segref(const MACROBLOCKD *xd,
int segment_id,
MV_REFERENCE_FRAME ref_frame);
diff --git a/vp8/common/x86/loopfilter_x86.c b/vp8/common/x86/loopfilter_x86.c
index add888835..e7239818e 100644
--- a/vp8/common/x86/loopfilter_x86.c
+++ b/vp8/common/x86/loopfilter_x86.c
@@ -30,7 +30,7 @@ extern loop_filter_uvfunction vp8_mbloop_filter_vertical_edge_uv_sse2;
#if HAVE_MMX
/* Horizontal MB filtering */
void vp8_loop_filter_mbh_mmx(unsigned char *y_ptr, unsigned char *u_ptr, unsigned char *v_ptr,
- int y_stride, int uv_stride, loop_filter_info *lfi) {
+ int y_stride, int uv_stride, struct loop_filter_info *lfi) {
vp8_mbloop_filter_horizontal_edge_mmx(y_ptr, y_stride, lfi->mblim, lfi->lim, lfi->hev_thr, 2);
if (u_ptr)
@@ -43,7 +43,7 @@ void vp8_loop_filter_mbh_mmx(unsigned char *y_ptr, unsigned char *u_ptr, unsigne
/* Vertical MB Filtering */
void vp8_loop_filter_mbv_mmx(unsigned char *y_ptr, unsigned char *u_ptr, unsigned char *v_ptr,
- int y_stride, int uv_stride, loop_filter_info *lfi) {
+ int y_stride, int uv_stride, struct loop_filter_info *lfi) {
vp8_mbloop_filter_vertical_edge_mmx(y_ptr, y_stride, lfi->mblim, lfi->lim, lfi->hev_thr, 2);
if (u_ptr)
@@ -56,7 +56,7 @@ void vp8_loop_filter_mbv_mmx(unsigned char *y_ptr, unsigned char *u_ptr, unsigne
/* Horizontal B Filtering */
void vp8_loop_filter_bh_mmx(unsigned char *y_ptr, unsigned char *u_ptr, unsigned char *v_ptr,
- int y_stride, int uv_stride, loop_filter_info *lfi) {
+ int y_stride, int uv_stride, struct loop_filter_info *lfi) {
vp8_loop_filter_horizontal_edge_mmx(y_ptr + 4 * y_stride, y_stride, lfi->blim, lfi->lim, lfi->hev_thr, 2);
vp8_loop_filter_horizontal_edge_mmx(y_ptr + 8 * y_stride, y_stride, lfi->blim, lfi->lim, lfi->hev_thr, 2);
vp8_loop_filter_horizontal_edge_mmx(y_ptr + 12 * y_stride, y_stride, lfi->blim, lfi->lim, lfi->hev_thr, 2);
@@ -78,7 +78,7 @@ void vp8_loop_filter_bhs_mmx(unsigned char *y_ptr, int y_stride, const unsigned
/* Vertical B Filtering */
void vp8_loop_filter_bv_mmx(unsigned char *y_ptr, unsigned char *u_ptr, unsigned char *v_ptr,
- int y_stride, int uv_stride, loop_filter_info *lfi) {
+ int y_stride, int uv_stride, struct loop_filter_info *lfi) {
vp8_loop_filter_vertical_edge_mmx(y_ptr + 4, y_stride, lfi->blim, lfi->lim, lfi->hev_thr, 2);
vp8_loop_filter_vertical_edge_mmx(y_ptr + 8, y_stride, lfi->blim, lfi->lim, lfi->hev_thr, 2);
vp8_loop_filter_vertical_edge_mmx(y_ptr + 12, y_stride, lfi->blim, lfi->lim, lfi->hev_thr, 2);
@@ -102,7 +102,7 @@ void vp8_loop_filter_bvs_mmx(unsigned char *y_ptr, int y_stride, const unsigned
/* Horizontal MB filtering */
#if HAVE_SSE2
void vp8_loop_filter_mbh_sse2(unsigned char *y_ptr, unsigned char *u_ptr, unsigned char *v_ptr,
- int y_stride, int uv_stride, loop_filter_info *lfi) {
+ int y_stride, int uv_stride, struct loop_filter_info *lfi) {
vp8_mbloop_filter_horizontal_edge_sse2(y_ptr, y_stride, lfi->mblim, lfi->lim, lfi->hev_thr, 2);
if (u_ptr)
@@ -112,7 +112,7 @@ void vp8_loop_filter_mbh_sse2(unsigned char *y_ptr, unsigned char *u_ptr, unsign
/* Vertical MB Filtering */
void vp8_loop_filter_mbv_sse2(unsigned char *y_ptr, unsigned char *u_ptr, unsigned char *v_ptr,
- int y_stride, int uv_stride, loop_filter_info *lfi) {
+ int y_stride, int uv_stride, struct loop_filter_info *lfi) {
vp8_mbloop_filter_vertical_edge_sse2(y_ptr, y_stride, lfi->mblim, lfi->lim, lfi->hev_thr, 2);
if (u_ptr)
@@ -122,7 +122,7 @@ void vp8_loop_filter_mbv_sse2(unsigned char *y_ptr, unsigned char *u_ptr, unsign
/* Horizontal B Filtering */
void vp8_loop_filter_bh_sse2(unsigned char *y_ptr, unsigned char *u_ptr, unsigned char *v_ptr,
- int y_stride, int uv_stride, loop_filter_info *lfi) {
+ int y_stride, int uv_stride, struct loop_filter_info *lfi) {
vp8_loop_filter_horizontal_edge_sse2(y_ptr + 4 * y_stride, y_stride, lfi->blim, lfi->lim, lfi->hev_thr, 2);
vp8_loop_filter_horizontal_edge_sse2(y_ptr + 8 * y_stride, y_stride, lfi->blim, lfi->lim, lfi->hev_thr, 2);
vp8_loop_filter_horizontal_edge_sse2(y_ptr + 12 * y_stride, y_stride, lfi->blim, lfi->lim, lfi->hev_thr, 2);
@@ -141,7 +141,7 @@ void vp8_loop_filter_bhs_sse2(unsigned char *y_ptr, int y_stride, const unsigned
/* Vertical B Filtering */
void vp8_loop_filter_bv_sse2(unsigned char *y_ptr, unsigned char *u_ptr, unsigned char *v_ptr,
- int y_stride, int uv_stride, loop_filter_info *lfi) {
+ int y_stride, int uv_stride, struct loop_filter_info *lfi) {
vp8_loop_filter_vertical_edge_sse2(y_ptr + 4, y_stride, lfi->blim, lfi->lim, lfi->hev_thr, 2);
vp8_loop_filter_vertical_edge_sse2(y_ptr + 8, y_stride, lfi->blim, lfi->lim, lfi->hev_thr, 2);
vp8_loop_filter_vertical_edge_sse2(y_ptr + 12, y_stride, lfi->blim, lfi->lim, lfi->hev_thr, 2);
diff --git a/vp8/common/x86/loopfilter_x86.h b/vp8/common/x86/loopfilter_x86.h
index 1ed6c213f..3cbea8f04 100644
--- a/vp8/common/x86/loopfilter_x86.h
+++ b/vp8/common/x86/loopfilter_x86.h
@@ -28,33 +28,6 @@ extern prototype_simple_loopfilter(vp8_loop_filter_simple_vertical_edge_mmx);
extern prototype_simple_loopfilter(vp8_loop_filter_bvs_mmx);
extern prototype_simple_loopfilter(vp8_loop_filter_simple_horizontal_edge_mmx);
extern prototype_simple_loopfilter(vp8_loop_filter_bhs_mmx);
-
-
-#if !CONFIG_RUNTIME_CPU_DETECT
-#undef vp8_lf_normal_mb_v
-#define vp8_lf_normal_mb_v vp8_loop_filter_mbv_mmx
-
-#undef vp8_lf_normal_b_v
-#define vp8_lf_normal_b_v vp8_loop_filter_bv_mmx
-
-#undef vp8_lf_normal_mb_h
-#define vp8_lf_normal_mb_h vp8_loop_filter_mbh_mmx
-
-#undef vp8_lf_normal_b_h
-#define vp8_lf_normal_b_h vp8_loop_filter_bh_mmx
-
-#undef vp8_lf_simple_mb_v
-#define vp8_lf_simple_mb_v vp8_loop_filter_simple_vertical_edge_mmx
-
-#undef vp8_lf_simple_b_v
-#define vp8_lf_simple_b_v vp8_loop_filter_bvs_mmx
-
-#undef vp8_lf_simple_mb_h
-#define vp8_lf_simple_mb_h vp8_loop_filter_simple_horizontal_edge_mmx
-
-#undef vp8_lf_simple_b_h
-#define vp8_lf_simple_b_h vp8_loop_filter_bhs_mmx
-#endif
#endif
@@ -67,33 +40,6 @@ extern prototype_simple_loopfilter(vp8_loop_filter_simple_vertical_edge_sse2);
extern prototype_simple_loopfilter(vp8_loop_filter_bvs_sse2);
extern prototype_simple_loopfilter(vp8_loop_filter_simple_horizontal_edge_sse2);
extern prototype_simple_loopfilter(vp8_loop_filter_bhs_sse2);
-
-
-#if !CONFIG_RUNTIME_CPU_DETECT
-#undef vp8_lf_normal_mb_v
-#define vp8_lf_normal_mb_v vp8_loop_filter_mbv_sse2
-
-#undef vp8_lf_normal_b_v
-#define vp8_lf_normal_b_v vp8_loop_filter_bv_sse2
-
-#undef vp8_lf_normal_mb_h
-#define vp8_lf_normal_mb_h vp8_loop_filter_mbh_sse2
-
-#undef vp8_lf_normal_b_h
-#define vp8_lf_normal_b_h vp8_loop_filter_bh_sse2
-
-#undef vp8_lf_simple_mb_v
-#define vp8_lf_simple_mb_v vp8_loop_filter_simple_vertical_edge_sse2
-
-#undef vp8_lf_simple_b_v
-#define vp8_lf_simple_b_v vp8_loop_filter_bvs_sse2
-
-#undef vp8_lf_simple_mb_h
-#define vp8_lf_simple_mb_h vp8_loop_filter_simple_horizontal_edge_sse2
-
-#undef vp8_lf_simple_b_h
-#define vp8_lf_simple_b_h vp8_loop_filter_bhs_sse2
-#endif
#endif
diff --git a/vp8/common/x86/recon_wrapper_sse2.c b/vp8/common/x86/recon_wrapper_sse2.c
index e304d055b..681aff81e 100644
--- a/vp8/common/x86/recon_wrapper_sse2.c
+++ b/vp8/common/x86/recon_wrapper_sse2.c
@@ -9,9 +9,8 @@
*/
#include "vpx_ports/config.h"
-#include "vp8/common/recon.h"
-#include "recon_x86.h"
#include "vpx_mem/vpx_mem.h"
+#include "vp8/common/blockd.h"
#define build_intra_predictors_mbuv_prototype(sym) \
void sym(unsigned char *dst, int dst_stride, \
diff --git a/vp8/common/x86/recon_x86.h b/vp8/common/x86/recon_x86.h
deleted file mode 100644
index fe0f8f0bc..000000000
--- a/vp8/common/x86/recon_x86.h
+++ /dev/null
@@ -1,84 +0,0 @@
-/*
- * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
- *
- * Use of this source code is governed by a BSD-style license
- * that can be found in the LICENSE file in the root of the source
- * tree. An additional intellectual property rights grant can be found
- * in the file PATENTS. All contributing project authors may
- * be found in the AUTHORS file in the root of the source tree.
- */
-
-
-#ifndef RECON_X86_H
-#define RECON_X86_H
-
-/* Note:
- *
- * This platform is commonly built for runtime CPU detection. If you modify
- * any of the function mappings present in this file, be sure to also update
- * them in the function pointer initialization code
- */
-
-#if HAVE_MMX
-extern prototype_recon_block(vp8_recon_b_mmx);
-extern prototype_copy_block(vp8_copy_mem8x8_mmx);
-extern prototype_copy_block(vp8_copy_mem8x4_mmx);
-extern prototype_copy_block(vp8_copy_mem16x16_mmx);
-
-
-#if !CONFIG_RUNTIME_CPU_DETECT
-#undef vp8_recon_recon
-#define vp8_recon_recon vp8_recon_b_mmx
-
-#undef vp8_recon_copy8x8
-#define vp8_recon_copy8x8 vp8_copy_mem8x8_mmx
-
-#undef vp8_recon_copy8x4
-#define vp8_recon_copy8x4 vp8_copy_mem8x4_mmx
-
-#undef vp8_recon_copy16x16
-#define vp8_recon_copy16x16 vp8_copy_mem16x16_mmx
-
-#endif
-#endif
-
-#if HAVE_SSE2
-extern prototype_recon_block(vp8_recon2b_sse2);
-extern prototype_recon_block(vp8_recon4b_sse2);
-extern prototype_copy_block(vp8_copy_mem16x16_sse2);
-extern prototype_build_intra_predictors(vp8_build_intra_predictors_mbuv_sse2);
-extern prototype_build_intra_predictors(vp8_build_intra_predictors_mbuv_s_sse2);
-
-#if !CONFIG_RUNTIME_CPU_DETECT
-#undef vp8_recon_recon2
-#define vp8_recon_recon2 vp8_recon2b_sse2
-
-#undef vp8_recon_recon4
-#define vp8_recon_recon4 vp8_recon4b_sse2
-
-#undef vp8_recon_copy16x16
-#define vp8_recon_copy16x16 vp8_copy_mem16x16_sse2
-
-#undef vp8_recon_build_intra_predictors_mbuv
-#define vp8_recon_build_intra_predictors_mbuv vp8_build_intra_predictors_mbuv_sse2
-
-#undef vp8_recon_build_intra_predictors_mbuv_s
-#define vp8_recon_build_intra_predictors_mbuv_s vp8_build_intra_predictors_mbuv_s_sse2
-
-#endif
-#endif
-
-#if HAVE_SSSE3
-extern prototype_build_intra_predictors(vp8_build_intra_predictors_mbuv_ssse3);
-extern prototype_build_intra_predictors(vp8_build_intra_predictors_mbuv_s_ssse3);
-
-#if !CONFIG_RUNTIME_CPU_DETECT
-#undef vp8_recon_build_intra_predictors_mbuv
-#define vp8_recon_build_intra_predictors_mbuv vp8_build_intra_predictors_mbuv_ssse3
-
-#undef vp8_recon_build_intra_predictors_mbuv_s
-#define vp8_recon_build_intra_predictors_mbuv_s vp8_build_intra_predictors_mbuv_s_ssse3
-
-#endif
-#endif
-#endif
diff --git a/vp8/common/x86/x86_systemdependent.c b/vp8/common/x86/x86_systemdependent.c
index 2c66d9fb7..b6d058fbf 100644
--- a/vp8/common/x86/x86_systemdependent.c
+++ b/vp8/common/x86/x86_systemdependent.c
@@ -14,7 +14,6 @@
#include "vp8/common/g_common.h"
#include "vp8/common/subpixel.h"
#include "vp8/common/loopfilter.h"
-#include "vp8/common/recon.h"
#include "vp8/common/idct.h"
#include "vp8/common/pragmas.h"
#include "vp8/common/onyxc_int.h"
@@ -41,11 +40,6 @@ void vp8_arch_x86_common_init(VP8_COMMON *ctx) {
// rtcd->idct.iwalsh16 = vp8_short_inv_walsh4x4_mmx;
// rtcd->idct.iwalsh1 = vp8_short_inv_walsh4x4_1_mmx;
- rtcd->recon.recon = vp8_recon_b_mmx;
- rtcd->recon.copy8x8 = vp8_copy_mem8x8_mmx;
- rtcd->recon.copy8x4 = vp8_copy_mem8x4_mmx;
- rtcd->recon.copy16x16 = vp8_copy_mem16x16_mmx;
-
/* Disabled due to unsupported enhanced interpolation/high_prec mv
rtcd->subpix.sixtap16x16 = vp8_sixtap_predict16x16_mmx;
rtcd->subpix.sixtap8x8 = vp8_sixtap_predict8x8_mmx;
@@ -57,15 +51,6 @@ void vp8_arch_x86_common_init(VP8_COMMON *ctx) {
rtcd->subpix.bilinear8x4 = vp8_bilinear_predict8x4_mmx;
rtcd->subpix.bilinear4x4 = vp8_bilinear_predict4x4_mmx;
- rtcd->loopfilter.normal_mb_v = vp8_loop_filter_mbv_mmx;
- rtcd->loopfilter.normal_b_v = vp8_loop_filter_bv_mmx;
- rtcd->loopfilter.normal_mb_h = vp8_loop_filter_mbh_mmx;
- rtcd->loopfilter.normal_b_h = vp8_loop_filter_bh_mmx;
- rtcd->loopfilter.simple_mb_v = vp8_loop_filter_simple_vertical_edge_mmx;
- rtcd->loopfilter.simple_b_v = vp8_loop_filter_bvs_mmx;
- rtcd->loopfilter.simple_mb_h = vp8_loop_filter_simple_horizontal_edge_mmx;
- rtcd->loopfilter.simple_b_h = vp8_loop_filter_bhs_mmx;
-
#if CONFIG_POSTPROC
rtcd->postproc.down = vp8_mbpost_proc_down_mmx;
/*rtcd->postproc.across = vp8_mbpost_proc_across_ip_c;*/
@@ -78,16 +63,7 @@ void vp8_arch_x86_common_init(VP8_COMMON *ctx) {
#if HAVE_SSE2
if (flags & HAS_SSE2) {
- rtcd->recon.recon2 = vp8_recon2b_sse2;
- rtcd->recon.recon4 = vp8_recon4b_sse2;
- rtcd->recon.copy16x16 = vp8_copy_mem16x16_sse2;
- /* these are disable because of unsupported diagonal pred modes
- rtcd->recon.build_intra_predictors_mbuv =
- vp8_build_intra_predictors_mbuv_sse2;
- rtcd->recon.build_intra_predictors_mbuv_s =
- vp8_build_intra_predictors_mbuv_s_sse2;
- */
// rtcd->idct.iwalsh16 = vp8_short_inv_walsh4x4_sse2;
@@ -99,15 +75,6 @@ void vp8_arch_x86_common_init(VP8_COMMON *ctx) {
rtcd->subpix.bilinear16x16 = vp8_bilinear_predict16x16_sse2;
rtcd->subpix.bilinear8x8 = vp8_bilinear_predict8x8_sse2;
- rtcd->loopfilter.normal_mb_v = vp8_loop_filter_mbv_sse2;
- rtcd->loopfilter.normal_b_v = vp8_loop_filter_bv_sse2;
- rtcd->loopfilter.normal_mb_h = vp8_loop_filter_mbh_sse2;
- rtcd->loopfilter.normal_b_h = vp8_loop_filter_bh_sse2;
- rtcd->loopfilter.simple_mb_v = vp8_loop_filter_simple_vertical_edge_sse2;
- rtcd->loopfilter.simple_b_v = vp8_loop_filter_bvs_sse2;
- rtcd->loopfilter.simple_mb_h = vp8_loop_filter_simple_horizontal_edge_sse2;
- rtcd->loopfilter.simple_b_h = vp8_loop_filter_bhs_sse2;
-
#if CONFIG_POSTPROC
rtcd->postproc.down = vp8_mbpost_proc_down_xmm;
rtcd->postproc.across = vp8_mbpost_proc_across_ip_xmm;
diff --git a/vp8/decoder/decodemv.c b/vp8/decoder/decodemv.c
index 7f7567e4e..f72fc3f1d 100644
--- a/vp8/decoder/decodemv.c
+++ b/vp8/decoder/decodemv.c
@@ -78,7 +78,7 @@ static void vp8_kfread_modes(VP8D_COMP *pbi,
int mb_row,
int mb_col) {
VP8_COMMON *const cm = & pbi->common;
- vp8_reader *const bc = & pbi->bc;
+ vp8_reader *const bc = pbi->mb.current_bc;
const int mis = pbi->common.mode_info_stride;
int map_index = mb_row * pbi->common.mb_cols + mb_col;
MB_PREDICTION_MODE y_mode;
@@ -170,6 +170,22 @@ static void vp8_kfread_modes(VP8D_COMP *pbi,
m->mbmi.second_uv_mode = (MB_PREDICTION_MODE)(DC_PRED - 1);
#endif
+#if CONFIG_TX_SELECT
+ if (cm->txfm_mode == TX_MODE_SELECT && m->mbmi.mb_skip_coeff == 0 &&
+ m->mbmi.mode <= I8X8_PRED) {
+ // FIXME(rbultje) code ternary symbol once all experiments are merged
+ m->mbmi.txfm_size = vp8_read(bc, cm->prob_tx[0]);
+ if (m->mbmi.txfm_size != TX_4X4 && m->mbmi.mode != I8X8_PRED)
+ m->mbmi.txfm_size += vp8_read(bc, cm->prob_tx[1]);
+ } else
+#endif
+ if (cm->txfm_mode >= ALLOW_16X16 && m->mbmi.mode <= TM_PRED) {
+ m->mbmi.txfm_size = TX_16X16;
+ } else if (cm->txfm_mode >= ALLOW_8X8 && m->mbmi.mode != B_PRED) {
+ m->mbmi.txfm_size = TX_8X8;
+ } else {
+ m->mbmi.txfm_size = TX_4X4;
+ }
}
#if CONFIG_NEWMVENTROPY
@@ -596,9 +612,8 @@ static void read_switchable_interp_probs(VP8D_COMP *pbi) {
}
#endif
-static void mb_mode_mv_init(VP8D_COMP *pbi) {
+static void mb_mode_mv_init(VP8D_COMP *pbi, vp8_reader *bc) {
VP8_COMMON *const cm = & pbi->common;
- vp8_reader *const bc = & pbi->bc;
#if CONFIG_NEWMVENTROPY
nmv_context *const nmvc = &pbi->common.fc.nmvc;
#else
@@ -607,14 +622,10 @@ static void mb_mode_mv_init(VP8D_COMP *pbi) {
#endif
MACROBLOCKD *const xd = & pbi->mb;
- vpx_memset(cm->mbskip_pred_probs, 0, sizeof(cm->mbskip_pred_probs));
- if (pbi->common.mb_no_coeff_skip) {
- int k;
- for (k = 0; k < MBSKIP_CONTEXTS; ++k)
- cm->mbskip_pred_probs[k] = (vp8_prob)vp8_read_literal(bc, 8);
- }
-
- if (cm->frame_type != KEY_FRAME) {
+ if (cm->frame_type == KEY_FRAME) {
+ if (!cm->kf_ymode_probs_update)
+ cm->kf_ymode_probs_index = vp8_read_literal(bc, 3);
+ } else {
#if CONFIG_PRED_FILTER
cm->pred_filter_mode = (vp8_prob)vp8_read_literal(bc, 2);
@@ -666,9 +677,9 @@ static void mb_mode_mv_init(VP8D_COMP *pbi) {
// value
static void read_mb_segment_id(VP8D_COMP *pbi,
int mb_row, int mb_col) {
- vp8_reader *const bc = & pbi->bc;
VP8_COMMON *const cm = & pbi->common;
MACROBLOCKD *const xd = & pbi->mb;
+ vp8_reader *const bc = xd->current_bc;
MODE_INFO *mi = xd->mode_info_context;
MB_MODE_INFO *mbmi = &mi->mbmi;
int index = mb_row * pbi->common.mb_cols + mb_col;
@@ -739,7 +750,6 @@ static void read_mb_modes_mv(VP8D_COMP *pbi, MODE_INFO *mi, MB_MODE_INFO *mbmi,
MODE_INFO *prev_mi,
int mb_row, int mb_col) {
VP8_COMMON *const cm = & pbi->common;
- vp8_reader *const bc = & pbi->bc;
#if CONFIG_NEWMVENTROPY
nmv_context *const nmvc = &pbi->common.fc.nmvc;
#else
@@ -748,6 +758,7 @@ static void read_mb_modes_mv(VP8D_COMP *pbi, MODE_INFO *mi, MB_MODE_INFO *mbmi,
#endif
const int mis = pbi->common.mode_info_stride;
MACROBLOCKD *const xd = & pbi->mb;
+ vp8_reader *const bc = xd->current_bc;
int_mv *const mv = & mbmi->mv;
int mb_to_left_edge;
@@ -1263,98 +1274,40 @@ static void read_mb_modes_mv(VP8D_COMP *pbi, MODE_INFO *mi, MB_MODE_INFO *mbmi,
#endif
}
-}
-
-void vp8_decode_mode_mvs(VP8D_COMP *pbi) {
- int i;
- VP8_COMMON *cm = &pbi->common;
- MODE_INFO *mi = cm->mi;
- MACROBLOCKD *const xd = &pbi->mb;
- int sb_row, sb_col;
- int sb_rows = (cm->mb_rows + 1) >> 1;
- int sb_cols = (cm->mb_cols + 1) >> 1;
- int row_delta[4] = { 0, +1, 0, -1};
- int col_delta[4] = { +1, -1, +1, +1};
-
- MODE_INFO *prev_mi = cm->prev_mi;
-
- mb_mode_mv_init(pbi);
-
- if (cm->frame_type == KEY_FRAME && !cm->kf_ymode_probs_update) {
- cm->kf_ymode_probs_index = vp8_read_literal(&pbi->bc, 3);
- }
-
- for (sb_row = 0; sb_row < sb_rows; sb_row++) {
- int mb_col = 0;
- int mb_row = (sb_row << 1);
-
- for (sb_col = 0; sb_col < sb_cols; sb_col++) {
-#if CONFIG_SUPERBLOCKS
- mi->mbmi.encoded_as_sb = vp8_read(&pbi->bc, cm->sb_coded);
-#endif
- for (i = 0; i < 4; i++) {
-
- int dy = row_delta[i];
- int dx = col_delta[i];
- int offset_extended = dy * cm->mode_info_stride + dx;
-
- if ((mb_row >= cm->mb_rows) || (mb_col >= cm->mb_cols)) {
- /* next macroblock */
- mb_row += dy;
- mb_col += dx;
- mi += offset_extended;
- prev_mi += offset_extended;
- continue;
- }
-#if CONFIG_SUPERBLOCKS
- if (i)
- mi->mbmi.encoded_as_sb = 0;
-#endif
-
- // Make sure the MacroBlockD mode info pointer is set correctly
- xd->mode_info_context = mi;
- xd->prev_mode_info_context = prev_mi;
-
- pbi->mb.mb_to_top_edge = -((mb_row * 16)) << 3;
- pbi->mb.mb_to_bottom_edge =
- ((pbi->common.mb_rows - 1 - mb_row) * 16) << 3;
-
- if (cm->frame_type == KEY_FRAME)
- vp8_kfread_modes(pbi, mi, mb_row, mb_col);
- else
- read_mb_modes_mv(pbi, mi, &mi->mbmi, prev_mi, mb_row,
- mb_col);
-
-#if CONFIG_SUPERBLOCKS
- if (mi->mbmi.encoded_as_sb) {
- assert(!i);
- mb_col += 2;
- mi[1] = mi[cm->mode_info_stride] =
- mi[cm->mode_info_stride + 1] = mi[0];
- mi += 2;
- prev_mi += 2;
- break;
- }
+#if CONFIG_TX_SELECT
+ if (cm->txfm_mode == TX_MODE_SELECT && mbmi->mb_skip_coeff == 0 &&
+ ((mbmi->ref_frame == INTRA_FRAME && mbmi->mode <= I8X8_PRED) ||
+ (mbmi->ref_frame != INTRA_FRAME && mbmi->mode != SPLITMV))) {
+ // FIXME(rbultje) code ternary symbol once all experiments are merged
+ mbmi->txfm_size = vp8_read(bc, cm->prob_tx[0]);
+ if (mbmi->txfm_size != TX_4X4 && mbmi->mode != I8X8_PRED)
+ mbmi->txfm_size += vp8_read(bc, cm->prob_tx[1]);
+ } else
#endif
-
- /* next macroblock */
- mb_row += dy;
- mb_col += dx;
- mi += offset_extended;
- prev_mi += offset_extended;
- }
- }
-
- mi += cm->mode_info_stride + (1 - (cm->mb_cols & 0x1));
- prev_mi += cm->mode_info_stride + (1 - (cm->mb_cols & 0x1));
+ if (cm->txfm_mode >= ALLOW_16X16 &&
+ ((mbmi->ref_frame == INTRA_FRAME && mbmi->mode <= TM_PRED) ||
+ (mbmi->ref_frame != INTRA_FRAME && mbmi->mode != SPLITMV))) {
+ mbmi->txfm_size = TX_16X16;
+ } else if (cm->txfm_mode >= ALLOW_8X8 &&
+ ((mbmi->ref_frame == INTRA_FRAME && mbmi->mode != B_PRED) ||
+ (mbmi->ref_frame != INTRA_FRAME && mbmi->mode != SPLITMV))) {
+ mbmi->txfm_size = TX_8X8;
+ } else {
+ mbmi->txfm_size = TX_4X4;
}
}
void vpx_decode_mode_mvs_init(VP8D_COMP *pbi){
VP8_COMMON *cm = &pbi->common;
- mb_mode_mv_init(pbi);
- if (cm->frame_type == KEY_FRAME && !cm->kf_ymode_probs_update)
- cm->kf_ymode_probs_index = vp8_read_literal(&pbi->bc, 3);
+
+ vpx_memset(cm->mbskip_pred_probs, 0, sizeof(cm->mbskip_pred_probs));
+ if (pbi->common.mb_no_coeff_skip) {
+ int k;
+ for (k = 0; k < MBSKIP_CONTEXTS; ++k)
+ cm->mbskip_pred_probs[k] = (vp8_prob)vp8_read_literal(&pbi->bc, 8);
+ }
+
+ mb_mode_mv_init(pbi, &pbi->bc);
}
void vpx_decode_mb_mode_mv(VP8D_COMP *pbi,
MACROBLOCKD *xd,
diff --git a/vp8/decoder/decodemv.h b/vp8/decoder/decodemv.h
index 17bbb5b67..9629f952b 100644
--- a/vp8/decoder/decodemv.h
+++ b/vp8/decoder/decodemv.h
@@ -11,7 +11,6 @@
#include "onyxd_int.h"
-void vp8_decode_mode_mvs(VP8D_COMP *);
void vpx_decode_mb_mode_mv(VP8D_COMP *pbi,
MACROBLOCKD *xd,
int mb_row,
diff --git a/vp8/decoder/decodframe.c b/vp8/decoder/decodframe.c
index 34b43ce3b..a46b0de34 100644
--- a/vp8/decoder/decodframe.c
+++ b/vp8/decoder/decodframe.c
@@ -13,7 +13,6 @@
#include "vp8/common/header.h"
#include "vp8/common/reconintra.h"
#include "vp8/common/reconintra4x4.h"
-#include "vp8/common/recon.h"
#include "vp8/common/reconinter.h"
#include "dequantize.h"
#include "detokenize.h"
@@ -35,6 +34,7 @@
#include "vp8/common/seg_common.h"
#include "vp8/common/entropy.h"
+#include "vpx_rtcd.h"
#include <assert.h>
#include <stdio.h>
@@ -173,14 +173,12 @@ static void skip_recon_mb(VP8D_COMP *pbi, MACROBLOCKD *xd) {
if (xd->mode_info_context->mbmi.ref_frame == INTRA_FRAME) {
#if CONFIG_SUPERBLOCKS
if (xd->mode_info_context->mbmi.encoded_as_sb) {
- RECON_INVOKE(&pbi->common.rtcd.recon, build_intra_predictors_sbuv_s)(xd);
- RECON_INVOKE(&pbi->common.rtcd.recon,
- build_intra_predictors_sby_s)(xd);
+ vp8_build_intra_predictors_sbuv_s(xd);
+ vp8_build_intra_predictors_sby_s(xd);
} else {
#endif
- RECON_INVOKE(&pbi->common.rtcd.recon, build_intra_predictors_mbuv_s)(xd);
- RECON_INVOKE(&pbi->common.rtcd.recon,
- build_intra_predictors_mby_s)(xd);
+ vp8_build_intra_predictors_mbuv_s(xd);
+ vp8_build_intra_predictors_mby_s(xd);
#if CONFIG_SUPERBLOCKS
}
#endif
@@ -231,39 +229,6 @@ static void decode_macroblock(VP8D_COMP *pbi, MACROBLOCKD *xd,
if (xd->segmentation_enabled)
mb_init_dequantizer(pbi, xd);
- if (pbi->common.frame_type == KEY_FRAME) {
-#if CONFIG_TX16X16 || CONFIG_HYBRIDTRANSFORM16X16
- if (xd->mode_info_context->mbmi.mode <= TM_PRED ||
- xd->mode_info_context->mbmi.mode == NEWMV ||
- xd->mode_info_context->mbmi.mode == ZEROMV ||
- xd->mode_info_context->mbmi.mode == NEARMV ||
- xd->mode_info_context->mbmi.mode == NEARESTMV)
- xd->mode_info_context->mbmi.txfm_size = TX_16X16;
- else
-#endif
- if (pbi->common.txfm_mode == ALLOW_8X8 &&
- xd->mode_info_context->mbmi.mode != B_PRED)
- xd->mode_info_context->mbmi.txfm_size = TX_8X8;
- else
- xd->mode_info_context->mbmi.txfm_size = TX_4X4;
- } else {
-#if CONFIG_TX16X16 || CONFIG_HYBRIDTRANSFORM16X16
- if (xd->mode_info_context->mbmi.mode <= TM_PRED ||
- xd->mode_info_context->mbmi.mode == NEWMV ||
- xd->mode_info_context->mbmi.mode == ZEROMV ||
- xd->mode_info_context->mbmi.mode == NEARMV ||
- xd->mode_info_context->mbmi.mode == NEARESTMV)
- xd->mode_info_context->mbmi.txfm_size = TX_16X16;
- else
-#endif
- if (pbi->common.txfm_mode == ALLOW_8X8 &&
- xd->mode_info_context->mbmi.mode != B_PRED &&
- xd->mode_info_context->mbmi.mode != SPLITMV)
- xd->mode_info_context->mbmi.txfm_size = TX_8X8;
- else
- xd->mode_info_context->mbmi.txfm_size = TX_4X4;
- }
-
#if CONFIG_SUPERBLOCKS
if (xd->mode_info_context->mbmi.encoded_as_sb) {
xd->mode_info_context->mbmi.txfm_size = TX_8X8;
@@ -323,12 +288,9 @@ static void decode_macroblock(VP8D_COMP *pbi, MACROBLOCKD *xd,
xd->block[i].eob = 0;
xd->eobs[i] = 0;
}
-#if CONFIG_TX16X16 || CONFIG_HYBRIDTRANSFORM16X16
if (tx_type == TX_16X16)
eobtotal = vp8_decode_mb_tokens_16x16(pbi, xd);
- else
-#endif
- if (tx_type == TX_8X8)
+ else if (tx_type == TX_8X8)
eobtotal = vp8_decode_mb_tokens_8x8(pbi, xd);
else
eobtotal = vp8_decode_mb_tokens(pbi, xd);
@@ -367,15 +329,14 @@ static void decode_macroblock(VP8D_COMP *pbi, MACROBLOCKD *xd,
if (xd->mode_info_context->mbmi.ref_frame == INTRA_FRAME) {
#if CONFIG_SUPERBLOCKS
if (xd->mode_info_context->mbmi.encoded_as_sb) {
- RECON_INVOKE(&pbi->common.rtcd.recon, build_intra_predictors_sby_s)(xd);
- RECON_INVOKE(&pbi->common.rtcd.recon, build_intra_predictors_sbuv_s)(xd);
+ vp8_build_intra_predictors_sby_s(xd);
+ vp8_build_intra_predictors_sbuv_s(xd);
} else
#endif
if (mode != I8X8_PRED) {
- RECON_INVOKE(&pbi->common.rtcd.recon, build_intra_predictors_mbuv)(xd);
+ vp8_build_intra_predictors_mbuv(xd);
if (mode != B_PRED) {
- RECON_INVOKE(&pbi->common.rtcd.recon,
- build_intra_predictors_mby)(xd);
+ vp8_build_intra_predictors_mby(xd);
}
#if 0
// Intra-modes requiring recon data from top-right
@@ -400,10 +361,8 @@ static void decode_macroblock(VP8D_COMP *pbi, MACROBLOCKD *xd,
if (mode == I8X8_PRED) {
for (i = 0; i < 4; i++) {
int ib = vp8_i8x8_block[i];
-#if !CONFIG_HYBRIDTRANSFORM8X8
const int iblock[4] = {0, 1, 4, 5};
int j;
-#endif
int i8x8mode;
BLOCKD *b;
@@ -417,30 +376,37 @@ static void decode_macroblock(VP8D_COMP *pbi, MACROBLOCKD *xd,
b = &xd->block[ib];
i8x8mode = b->bmi.as_mode.first;
- RECON_INVOKE(RTCD_VTABLE(recon), intra8x8_predict)
- (b, i8x8mode, b->predictor);
+ vp8_intra8x8_predict(b, i8x8mode, b->predictor);
+ if (xd->mode_info_context->mbmi.txfm_size == TX_8X8) {
#if CONFIG_HYBRIDTRANSFORM8X8
- vp8_ht_dequant_idct_add_8x8_c(b->bmi.as_mode.tx_type,
- q, dq, pre, dst, 16, stride);
- q += 64;
+ vp8_ht_dequant_idct_add_8x8_c(b->bmi.as_mode.tx_type,
+ q, dq, pre, dst, 16, stride);
+ q += 64;
#else
- vp8_dequant_idct_add_8x8_c(q, dq, pre, dst, 16, stride);
- q += 64;
+ vp8_dequant_idct_add_8x8_c(q, dq, pre, dst, 16, stride);
+ q += 64;
#endif
+ } else {
+ for (j = 0; j < 4; j++) {
+ b = &xd->block[ib + iblock[j]];
+ vp8_dequant_idct_add_c(b->qcoeff, b->dequant, b->predictor,
+ *(b->base_dst) + b->dst, 16, b->dst_stride);
+ }
+ }
b = &xd->block[16 + i];
- RECON_INVOKE(RTCD_VTABLE(recon), intra_uv4x4_predict)
- (b, i8x8mode, b->predictor);
- DEQUANT_INVOKE(&pbi->dequant, idct_add)
- (b->qcoeff, b->dequant, b->predictor,
- *(b->base_dst) + b->dst, 8, b->dst_stride);
+ vp8_intra_uv4x4_predict(b, i8x8mode, b->predictor);
+ DEQUANT_INVOKE(&pbi->dequant, idct_add)(b->qcoeff, b->dequant,
+ b->predictor,
+ *(b->base_dst) + b->dst, 8,
+ b->dst_stride);
b = &xd->block[20 + i];
- RECON_INVOKE(RTCD_VTABLE(recon), intra_uv4x4_predict)
- (b, i8x8mode, b->predictor);
- DEQUANT_INVOKE(&pbi->dequant, idct_add)
- (b->qcoeff, b->dequant, b->predictor,
- *(b->base_dst) + b->dst, 8, b->dst_stride);
+ vp8_intra_uv4x4_predict(b, i8x8mode, b->predictor);
+ DEQUANT_INVOKE(&pbi->dequant, idct_add)(b->qcoeff, b->dequant,
+ b->predictor,
+ *(b->base_dst) + b->dst, 8,
+ b->dst_stride);
}
} else if (mode == B_PRED) {
for (i = 0; i < 16; i++) {
@@ -451,12 +417,10 @@ static void decode_macroblock(VP8D_COMP *pbi, MACROBLOCKD *xd,
if (b_mode2 == (B_PREDICTION_MODE)(B_DC_PRED - 1)) {
#endif
- RECON_INVOKE(RTCD_VTABLE(recon), intra4x4_predict)
- (b, b_mode, b->predictor);
+ vp8_intra4x4_predict(b, b_mode, b->predictor);
#if CONFIG_COMP_INTRA_PRED
} else {
- RECON_INVOKE(RTCD_VTABLE(recon), comp_intra4x4_predict)
- (b, b_mode, b_mode2, b->predictor);
+ vp8_comp_intra4x4_predict(b, b_mode, b_mode2, b->predictor);
}
#endif
@@ -490,7 +454,6 @@ static void decode_macroblock(VP8D_COMP *pbi, MACROBLOCKD *xd,
} else {
BLOCKD *b = &xd->block[24];
-#if CONFIG_TX16X16 || CONFIG_HYBRIDTRANSFORM16X16
if (tx_type == TX_16X16) {
#if CONFIG_HYBRIDTRANSFORM16X16
if (mode < I8X8_PRED && active_ht16) {
@@ -513,9 +476,7 @@ static void decode_macroblock(VP8D_COMP *pbi, MACROBLOCKD *xd,
16, xd->dst.y_stride);
#endif
}
- else
-#endif
- if (tx_type == TX_8X8) {
+ else if (tx_type == TX_8X8) {
#if CONFIG_SUPERBLOCKS
void *orig = xd->mode_info_context;
int n, num = xd->mode_info_context->mbmi.encoded_as_sb ? 4 : 1;
@@ -602,9 +563,7 @@ static void decode_macroblock(VP8D_COMP *pbi, MACROBLOCKD *xd,
#endif
if ((tx_type == TX_8X8 &&
xd->mode_info_context->mbmi.mode != I8X8_PRED)
-#if CONFIG_TX16X16 || CONFIG_HYBRIDTRANSFORM16X16
|| tx_type == TX_16X16
-#endif
)
DEQUANT_INVOKE(&pbi->dequant, idct_add_uv_block_8x8) //
(xd->qcoeff + 16 * 16, xd->block[16].dequant,
@@ -668,8 +627,7 @@ decode_sb_row(VP8D_COMP *pbi, VP8_COMMON *pc, int mbrow, MACROBLOCKD *xd) {
MODE_INFO *mi = xd->mode_info_context;
#if CONFIG_SUPERBLOCKS
- if (pbi->interleaved_decoding)
- mi->mbmi.encoded_as_sb = vp8_read(&pbi->bc, pc->sb_coded);
+ mi->mbmi.encoded_as_sb = vp8_read(&pbi->bc, pc->sb_coded);
#endif
// Process the 4 MBs within the SB in the order:
@@ -718,8 +676,7 @@ decode_sb_row(VP8D_COMP *pbi, VP8_COMMON *pc, int mbrow, MACROBLOCKD *xd) {
if (i)
mi->mbmi.encoded_as_sb = 0;
#endif
- if(pbi->interleaved_decoding)
- vpx_decode_mb_mode_mv(pbi, xd, mb_row, mb_col);
+ vpx_decode_mb_mode_mv(pbi, xd, mb_row, mb_col);
update_blockd_bmi(xd);
@@ -858,6 +815,7 @@ static void init_frame(VP8D_COMP *pbi) {
vp8_init_mv_probs(pc);
vp8_init_mbmode_probs(pc);
+ vp8_default_bmode_probs(pc->fc.bmode_prob);
vp8_default_coef_probs(pc);
vp8_kf_default_bmode_probs(pc->kf_bmode_prob);
@@ -891,6 +849,14 @@ static void init_frame(VP8D_COMP *pbi) {
vpx_memcpy(pbi->common.fc.vp8_mode_contexts,
pbi->common.fc.mode_context,
sizeof(pbi->common.fc.mode_context));
+ vpx_memset(pc->prev_mip, 0,
+ (pc->mb_cols + 1) * (pc->mb_rows + 1)* sizeof(MODE_INFO));
+ vpx_memset(pc->mip, 0,
+ (pc->mb_cols + 1) * (pc->mb_rows + 1)* sizeof(MODE_INFO));
+
+ update_mode_info_border(pc, pc->mip);
+ update_mode_info_in_image(pc, pc->mi);
+
} else {
if (!pc->use_bilinear_mc_filter)
@@ -1006,7 +972,7 @@ static void read_coef_probs(VP8D_COMP *pbi) {
}
#endif
- if (pbi->common.txfm_mode == ALLOW_8X8 && vp8_read_bit(bc)) {
+ if (pbi->common.txfm_mode != ONLY_4X4 && vp8_read_bit(bc)) {
// read coef probability tree
for (i = 0; i < BLOCK_TYPES_8X8; i++)
for (j = !i; j < COEF_BANDS; j++)
@@ -1025,7 +991,7 @@ static void read_coef_probs(VP8D_COMP *pbi) {
}
}
#if CONFIG_HYBRIDTRANSFORM8X8
- if (pbi->common.txfm_mode == ALLOW_8X8 && vp8_read_bit(bc)) {
+ if (pbi->common.txfm_mode != ONLY_4X4 && vp8_read_bit(bc)) {
// read coef probability tree
for (i = 0; i < BLOCK_TYPES_8X8; i++)
for (j = !i; j < COEF_BANDS; j++)
@@ -1045,9 +1011,8 @@ static void read_coef_probs(VP8D_COMP *pbi) {
}
#endif
-#if CONFIG_TX16X16
// 16x16
- if (vp8_read_bit(bc)) {
+ if (pbi->common.txfm_mode > ALLOW_8X8 && vp8_read_bit(bc)) {
// read coef probability tree
for (i = 0; i < BLOCK_TYPES_16X16; ++i)
for (j = !i; j < COEF_BANDS; ++j)
@@ -1066,7 +1031,7 @@ static void read_coef_probs(VP8D_COMP *pbi) {
}
}
#if CONFIG_HYBRIDTRANSFORM16X16
- if (vp8_read_bit(bc)) {
+ if (pbi->common.txfm_mode > ALLOW_8X8 && vp8_read_bit(bc)) {
// read coef probability tree
for (i = 0; i < BLOCK_TYPES_16X16; ++i)
for (j = !i; j < COEF_BANDS; ++j)
@@ -1085,7 +1050,6 @@ static void read_coef_probs(VP8D_COMP *pbi) {
}
}
#endif
-#endif
}
int vp8_decode_frame(VP8D_COMP *pbi) {
@@ -1177,7 +1141,7 @@ int vp8_decode_frame(VP8D_COMP *pbi) {
init_frame(pbi);
- if (vp8dx_start_decode(bc, data, data_end - data))
+ if (vp8dx_start_decode(bc, data, first_partition_length_in_bytes))
vpx_internal_error(&pc->error, VPX_CODEC_MEM_ERROR,
"Failed to allocate bool decoder 0");
if (pc->frame_type == KEY_FRAME) {
@@ -1194,9 +1158,27 @@ int vp8_decode_frame(VP8D_COMP *pbi) {
xd->update_mb_segmentation_map = (unsigned char)vp8_read_bit(bc);
// If so what method will be used.
- if (xd->update_mb_segmentation_map)
- pc->temporal_update = (unsigned char)vp8_read_bit(bc);
+ if (xd->update_mb_segmentation_map) {
+ // Which macro block level features are enabled
+ // Read the probs used to decode the segment id for each macro
+ // block.
+ for (i = 0; i < MB_FEATURE_TREE_PROBS; i++) {
+ xd->mb_segment_tree_probs[i] = vp8_read_bit(bc) ?
+ (vp8_prob)vp8_read_literal(bc, 8) : 255;
+ }
+
+ // Read the prediction probs needed to decode the segment id
+ pc->temporal_update = (unsigned char)vp8_read_bit(bc);
+ for (i = 0; i < PREDICTION_PROBS; i++) {
+ if (pc->temporal_update) {
+ pc->segment_pred_probs[i] = vp8_read_bit(bc) ?
+ (vp8_prob)vp8_read_literal(bc, 8) : 255;
+ } else {
+ pc->segment_pred_probs[i] = 255;
+ }
+ }
+ }
// Is the segment data being updated
xd->update_mb_segmentation_data = (unsigned char)vp8_read_bit(bc);
@@ -1261,38 +1243,6 @@ int vp8_decode_frame(VP8D_COMP *pbi) {
}
}
}
-
- if (xd->update_mb_segmentation_map) {
- // Which macro block level features are enabled
- vpx_memset(xd->mb_segment_tree_probs, 255,
- sizeof(xd->mb_segment_tree_probs));
- vpx_memset(pc->segment_pred_probs, 255,
- sizeof(pc->segment_pred_probs));
-
- // Read the probs used to decode the segment id for each macro
- // block.
- for (i = 0; i < MB_FEATURE_TREE_PROBS; i++) {
- // If not explicitly set value is defaulted to 255 by
- // memset above
- if (vp8_read_bit(bc))
- xd->mb_segment_tree_probs[i] =
- (vp8_prob)vp8_read_literal(bc, 8);
- }
-
- // If predictive coding of segment map is enabled read the
- // prediction probabilities.
- if (pc->temporal_update) {
- // Read the prediction probs needed to decode the segment id
- // when predictive coding enabled
- for (i = 0; i < PREDICTION_PROBS; i++) {
- // If not explicitly set value is defaulted to 255 by
- // memset above
- if (vp8_read_bit(bc))
- pc->segment_pred_probs[i] =
- (vp8_prob)vp8_read_literal(bc, 8);
- }
- }
- }
}
// Read common prediction model status flag probability updates for the
@@ -1314,7 +1264,17 @@ int vp8_decode_frame(VP8D_COMP *pbi) {
#endif
/* Read the loop filter level and type */
+#if CONFIG_TX_SELECT
+ pc->txfm_mode = vp8_read_literal(bc, 2);
+ if (pc->txfm_mode == TX_MODE_SELECT) {
+ pc->prob_tx[0] = vp8_read_literal(bc, 8);
+ pc->prob_tx[1] = vp8_read_literal(bc, 8);
+ }
+#else
pc->txfm_mode = (TXFM_MODE) vp8_read_bit(bc);
+ if (pc->txfm_mode == ALLOW_8X8)
+ pc->txfm_mode = ALLOW_16X16;
+#endif
pc->filter_type = (LOOPFILTERTYPE) vp8_read_bit(bc);
pc->filter_level = vp8_read_literal(bc, 6);
@@ -1458,14 +1418,12 @@ int vp8_decode_frame(VP8D_COMP *pbi) {
vp8_copy(pbi->common.fc.pre_hybrid_coef_probs_8x8,
pbi->common.fc.hybrid_coef_probs_8x8);
#endif
-#if CONFIG_TX16X16
vp8_copy(pbi->common.fc.pre_coef_probs_16x16,
pbi->common.fc.coef_probs_16x16);
#if CONFIG_HYBRIDTRANSFORM16X16
vp8_copy(pbi->common.fc.pre_hybrid_coef_probs_16x16,
pbi->common.fc.hybrid_coef_probs_16x16);
#endif
-#endif
vp8_copy(pbi->common.fc.pre_ymode_prob, pbi->common.fc.ymode_prob);
vp8_copy(pbi->common.fc.pre_uv_mode_prob, pbi->common.fc.uv_mode_prob);
vp8_copy(pbi->common.fc.pre_bmode_prob, pbi->common.fc.bmode_prob);
@@ -1486,12 +1444,10 @@ int vp8_decode_frame(VP8D_COMP *pbi) {
#if CONFIG_HYBRIDTRANSFORM8X8
vp8_zero(pbi->common.fc.hybrid_coef_counts_8x8);
#endif
-#if CONFIG_TX16X16
vp8_zero(pbi->common.fc.coef_counts_16x16);
#if CONFIG_HYBRIDTRANSFORM16X16
vp8_zero(pbi->common.fc.hybrid_coef_counts_16x16);
#endif
-#endif
vp8_zero(pbi->common.fc.ymode_counts);
vp8_zero(pbi->common.fc.uv_mode_counts);
vp8_zero(pbi->common.fc.bmode_counts);
@@ -1530,10 +1486,7 @@ int vp8_decode_frame(VP8D_COMP *pbi) {
/* Read the mb_no_coeff_skip flag */
pc->mb_no_coeff_skip = (int)vp8_read_bit(bc);
- if(pbi->interleaved_decoding)
- vpx_decode_mode_mvs_init(pbi);
- else
- vp8_decode_mode_mvs(pbi);
+ vpx_decode_mode_mvs_init(pbi);
vpx_memset(pc->above_context, 0, sizeof(ENTROPY_CONTEXT_PLANES) * pc->mb_cols);
diff --git a/vp8/decoder/dequantize.c b/vp8/decoder/dequantize.c
index 95a3ae272..e55da754b 100644
--- a/vp8/decoder/dequantize.c
+++ b/vp8/decoder/dequantize.c
@@ -509,7 +509,6 @@ void vp8_ht_dequant_idct_add_16x16_c(TX_TYPE tx_type, short *input, short *dq,
}
#endif
-#if CONFIG_TX16X16 || CONFIG_HYBRIDTRANSFORM16X16
void vp8_dequant_idct_add_16x16_c(short *input, short *dq, unsigned char *pred,
unsigned char *dest, int pitch, int stride) {
short output[256];
@@ -544,4 +543,3 @@ void vp8_dequant_idct_add_16x16_c(short *input, short *dq, unsigned char *pred,
pred += pitch;
}
}
-#endif
diff --git a/vp8/decoder/dequantize.h b/vp8/decoder/dequantize.h
index e739ff842..5b7b21598 100644
--- a/vp8/decoder/dequantize.h
+++ b/vp8/decoder/dequantize.h
@@ -145,12 +145,10 @@ extern prototype_dequant_idct_add_y_block_8x8(vp8_dequant_idct_add_y_block_8x8);
#endif
extern prototype_dequant_idct_add_uv_block_8x8(vp8_dequant_idct_add_uv_block_8x8);
-#if CONFIG_TX16X16 || CONFIG_HYBRIDTRANSFORM16X16
#ifndef vp8_dequant_idct_add_16x16
#define vp8_dequant_idct_add_16x16 vp8_dequant_idct_add_16x16_c
#endif
extern prototype_dequant_idct_add(vp8_dequant_idct_add_16x16);
-#endif
typedef prototype_dequant_block((*vp8_dequant_block_fn_t));
@@ -184,9 +182,7 @@ typedef struct {
vp8_dequant_dc_idct_add_y_block_fn_t_8x8 dc_idct_add_y_block_8x8;
vp8_dequant_idct_add_y_block_fn_t_8x8 idct_add_y_block_8x8;
vp8_dequant_idct_add_uv_block_fn_t_8x8 idct_add_uv_block_8x8;
-#if CONFIG_TX16X16 || CONFIG_HYBRIDTRANSFORM16X16
vp8_dequant_idct_add_fn_t idct_add_16x16;
-#endif
} vp8_dequant_rtcd_vtable_t;
#if CONFIG_RUNTIME_CPU_DETECT
diff --git a/vp8/decoder/detokenize.c b/vp8/decoder/detokenize.c
index cfb076862..5b5ec7e2a 100644
--- a/vp8/decoder/detokenize.c
+++ b/vp8/decoder/detokenize.c
@@ -39,7 +39,6 @@ DECLARE_ALIGNED(16, const int, coef_bands_x_8x8[64]) = {
7 * OCB_X, 7 * OCB_X, 7 * OCB_X, 7 * OCB_X, 7 * OCB_X, 7 * OCB_X, 7 * OCB_X, 7 * OCB_X,
};
-#if CONFIG_TX16X16 || CONFIG_HYBRIDTRANSFORM16X16
DECLARE_ALIGNED(16, const int, coef_bands_x_16x16[256]) = {
0 * OCB_X, 1 * OCB_X, 2 * OCB_X, 3 * OCB_X, 5 * OCB_X, 4 * OCB_X, 4 * OCB_X, 5 * OCB_X, 5 * OCB_X, 3 * OCB_X, 6 * OCB_X, 3 * OCB_X, 5 * OCB_X, 4 * OCB_X, 6 * OCB_X, 6 * OCB_X,
6 * OCB_X, 5 * OCB_X, 5 * OCB_X, 6 * OCB_X, 6 * OCB_X, 6 * OCB_X, 6 * OCB_X, 6 * OCB_X, 6 * OCB_X, 6 * OCB_X, 6 * OCB_X, 6 * OCB_X, 6 * OCB_X, 6 * OCB_X, 6 * OCB_X, 6 * OCB_X,
@@ -58,7 +57,6 @@ DECLARE_ALIGNED(16, const int, coef_bands_x_16x16[256]) = {
7 * OCB_X, 7 * OCB_X, 7 * OCB_X, 7 * OCB_X, 7 * OCB_X, 7 * OCB_X, 7 * OCB_X, 7 * OCB_X, 7 * OCB_X, 7 * OCB_X, 7 * OCB_X, 7 * OCB_X, 7 * OCB_X, 7 * OCB_X, 7 * OCB_X, 7 * OCB_X,
7 * OCB_X, 7 * OCB_X, 7 * OCB_X, 7 * OCB_X, 7 * OCB_X, 7 * OCB_X, 7 * OCB_X, 7 * OCB_X, 7 * OCB_X, 7 * OCB_X, 7 * OCB_X, 7 * OCB_X, 7 * OCB_X, 7 * OCB_X, 7 * OCB_X, 7 * OCB_X
};
-#endif
#define EOB_CONTEXT_NODE 0
#define ZERO_CONTEXT_NODE 1
@@ -105,9 +103,7 @@ void vp8_reset_mb_tokens_context(MACROBLOCKD *xd) {
if ((xd->mode_info_context->mbmi.mode != B_PRED &&
xd->mode_info_context->mbmi.mode != I8X8_PRED &&
xd->mode_info_context->mbmi.mode != SPLITMV)
-#if CONFIG_TX16X16 || CONFIG_HYBRIDTRANSFORM16X16
|| xd->mode_info_context->mbmi.txfm_size == TX_16X16
-#endif
) {
vpx_memset(xd->above_context, 0, sizeof(ENTROPY_CONTEXT_PLANES));
vpx_memset(xd->left_context, 0, sizeof(ENTROPY_CONTEXT_PLANES));
@@ -141,7 +137,7 @@ int get_token(int v) {
#if CONFIG_HYBRIDTRANSFORM
void static count_tokens_adaptive_scan(const MACROBLOCKD *xd, INT16 *qcoeff_ptr,
- int block, int type,
+ int block, PLANE_TYPE type,
TX_TYPE tx_type,
ENTROPY_CONTEXT *a, ENTROPY_CONTEXT *l,
int eob, int seg_eob,
@@ -186,7 +182,7 @@ void static count_tokens_adaptive_scan(const MACROBLOCKD *xd, INT16 *qcoeff_ptr,
}
#endif
-void static count_tokens(INT16 *qcoeff_ptr, int block, int type,
+void static count_tokens(INT16 *qcoeff_ptr, int block, PLANE_TYPE type,
ENTROPY_CONTEXT *a, ENTROPY_CONTEXT *l,
int eob, int seg_eob, FRAME_CONTEXT *const fc) {
int c, pt, token, band;
@@ -205,7 +201,7 @@ void static count_tokens(INT16 *qcoeff_ptr, int block, int type,
}
}
-void static count_tokens_8x8(INT16 *qcoeff_ptr, int block, int type,
+void static count_tokens_8x8(INT16 *qcoeff_ptr, int block, PLANE_TYPE type,
#if CONFIG_HYBRIDTRANSFORM8X8
TX_TYPE tx_type,
#endif
@@ -237,8 +233,7 @@ void static count_tokens_8x8(INT16 *qcoeff_ptr, int block, int type,
}
}
-#if CONFIG_TX16X16
-void static count_tokens_16x16(INT16 *qcoeff_ptr, int block, int type,
+void static count_tokens_16x16(INT16 *qcoeff_ptr, int block, PLANE_TYPE type,
#if CONFIG_HYBRIDTRANSFORM16X16
TX_TYPE tx_type,
#endif
@@ -269,8 +264,6 @@ void static count_tokens_16x16(INT16 *qcoeff_ptr, int block, int type,
fc->coef_counts_16x16[type][band][pt][DCT_EOB_TOKEN]++;
}
}
-#endif
-
static int vp8_get_signed(BOOL_DECODER *br, int value_to_sign) {
const int split = (br->range + 1) >> 1;
@@ -310,7 +303,8 @@ static int vp8_get_signed(BOOL_DECODER *br, int value_to_sign) {
} while (0);
static int vp8_decode_coefs(VP8D_COMP *dx, const MACROBLOCKD *xd,
- ENTROPY_CONTEXT *a, ENTROPY_CONTEXT *l, int type,
+ ENTROPY_CONTEXT *a, ENTROPY_CONTEXT *l,
+ PLANE_TYPE type,
#if CONFIG_HYBRIDTRANSFORM8X8 || CONFIG_HYBRIDTRANSFORM || CONFIG_HYBRIDTRANSFORM16X16
TX_TYPE tx_type,
#endif
@@ -319,7 +313,7 @@ static int vp8_decode_coefs(VP8D_COMP *dx, const MACROBLOCKD *xd,
const int *coef_bands) {
FRAME_CONTEXT *const fc = &dx->common.fc;
BOOL_DECODER *br = xd->current_bc;
- int tmp, c = (type == 0);
+ int tmp, c = (type == PLANE_TYPE_Y_NO_DC);
const vp8_prob *prob, *coef_probs;
switch (block_type) {
@@ -338,7 +332,6 @@ static int vp8_decode_coefs(VP8D_COMP *dx, const MACROBLOCKD *xd,
#endif
fc->coef_probs_8x8[type][0][0];
break;
-#if CONFIG_TX16X16
case TX_16X16:
coef_probs =
#if CONFIG_HYBRIDTRANSFORM16X16
@@ -346,7 +339,6 @@ static int vp8_decode_coefs(VP8D_COMP *dx, const MACROBLOCKD *xd,
#endif
fc->coef_probs_16x16[type][0][0];
break;
-#endif
}
VP8_COMBINEENTROPYCONTEXTS(tmp, *a, *l);
@@ -445,24 +437,22 @@ SKIP_START:
tx_type,
#endif
a, l, c, seg_eob, fc);
-#if CONFIG_TX16X16
else
count_tokens_16x16(qcoeff_ptr, i, type,
#if CONFIG_HYBRIDTRANSFORM16X16
tx_type,
#endif
a, l, c, seg_eob, fc);
-#endif
return c;
}
-#if CONFIG_TX16X16 || CONFIG_HYBRIDTRANSFORM16X16
int vp8_decode_mb_tokens_16x16(VP8D_COMP *pbi, MACROBLOCKD *xd) {
ENTROPY_CONTEXT* const A = (ENTROPY_CONTEXT *)xd->above_context;
ENTROPY_CONTEXT* const L = (ENTROPY_CONTEXT *)xd->left_context;
char* const eobs = xd->eobs;
- int c, i, type, eobtotal = 0, seg_eob;
+ PLANE_TYPE type;
+ int c, i, eobtotal = 0, seg_eob;
const int segment_id = xd->mode_info_context->mbmi.segment_id;
const int seg_active = segfeature_active(xd, segment_id, SEG_LVL_EOB);
INT16 *qcoeff_ptr = &xd->qcoeff[0];
@@ -483,7 +473,6 @@ int vp8_decode_mb_tokens_16x16(VP8D_COMP *pbi, MACROBLOCKD *xd) {
// Luma block
{
const int* const scan = vp8_default_zig_zag1d_16x16;
- //printf("16: %d\n", tx_type);
c = vp8_decode_coefs(pbi, xd, A, L, type,
#if CONFIG_HYBRIDTRANSFORM8X8 || CONFIG_HYBRIDTRANSFORM || CONFIG_HYBRIDTRANSFORM16X16
tx_type,
@@ -514,7 +503,6 @@ int vp8_decode_mb_tokens_16x16(VP8D_COMP *pbi, MACROBLOCKD *xd) {
ENTROPY_CONTEXT* const l = L + vp8_block2left_8x8[i];
const int* const scan = vp8_default_zig_zag1d_8x8;
- //printf("8: %d\n", tx_type);
c = vp8_decode_coefs(pbi, xd, a, l, type,
#if CONFIG_HYBRIDTRANSFORM8X8 || CONFIG_HYBRIDTRANSFORM || CONFIG_HYBRIDTRANSFORM16X16
tx_type,
@@ -532,15 +520,14 @@ int vp8_decode_mb_tokens_16x16(VP8D_COMP *pbi, MACROBLOCKD *xd) {
vpx_memset(&L[8], 0, sizeof(L[8]));
return eobtotal;
}
-#endif
-
int vp8_decode_mb_tokens_8x8(VP8D_COMP *pbi, MACROBLOCKD *xd) {
ENTROPY_CONTEXT *const A = (ENTROPY_CONTEXT *)xd->above_context;
ENTROPY_CONTEXT *const L = (ENTROPY_CONTEXT *)xd->left_context;
char *const eobs = xd->eobs;
- int c, i, type, eobtotal = 0, seg_eob;
+ PLANE_TYPE type;
+ int c, i, eobtotal = 0, seg_eob;
const int segment_id = xd->mode_info_context->mbmi.segment_id;
const int seg_active = segfeature_active(xd, segment_id, SEG_LVL_EOB);
INT16 *qcoeff_ptr = &xd->qcoeff[0];
@@ -647,8 +634,8 @@ int vp8_decode_mb_tokens(VP8D_COMP *dx, MACROBLOCKD *xd) {
char *const eobs = xd->eobs;
const int *scan = vp8_default_zig_zag1d;
-
- int c, i, type, eobtotal = 0, seg_eob = 16;
+ PLANE_TYPE type;
+ int c, i, eobtotal = 0, seg_eob = 16;
INT16 *qcoeff_ptr = &xd->qcoeff[0];
int segment_id = xd->mode_info_context->mbmi.segment_id;
diff --git a/vp8/decoder/detokenize.h b/vp8/decoder/detokenize.h
index 5fb1a7f3e..d02d4cae1 100644
--- a/vp8/decoder/detokenize.h
+++ b/vp8/decoder/detokenize.h
@@ -17,8 +17,6 @@
void vp8_reset_mb_tokens_context(MACROBLOCKD *xd);
int vp8_decode_mb_tokens(VP8D_COMP *, MACROBLOCKD *);
int vp8_decode_mb_tokens_8x8(VP8D_COMP *, MACROBLOCKD *);
-#if CONFIG_TX16X16 || CONFIG_HYBRIDTRANSFORM16X16
int vp8_decode_mb_tokens_16x16(VP8D_COMP *, MACROBLOCKD *);
-#endif
#endif /* DETOKENIZE_H */
diff --git a/vp8/decoder/generic/dsystemdependent.c b/vp8/decoder/generic/dsystemdependent.c
index ccd6764ac..b7b65d902 100644
--- a/vp8/decoder/generic/dsystemdependent.c
+++ b/vp8/decoder/generic/dsystemdependent.c
@@ -22,9 +22,7 @@ void vp8_dmachine_specific_config(VP8D_COMP *pbi) {
pbi->mb.rtcd = &pbi->common.rtcd;
pbi->dequant.block_2x2 = vp8_dequantize_b_2x2_c;
pbi->dequant.idct_add_8x8 = vp8_dequant_idct_add_8x8_c;
-#if CONFIG_TX16X16 || CONFIG_HYBRIDTRANSFORM16X16
pbi->dequant.idct_add_16x16 = vp8_dequant_idct_add_16x16_c;
-#endif
pbi->dequant.dc_idct_add_8x8 = vp8_dequant_dc_idct_add_8x8_c;
pbi->dequant.dc_idct_add_y_block_8x8 = vp8_dequant_dc_idct_add_y_block_8x8_c;
pbi->dequant.idct_add_y_block_8x8 = vp8_dequant_idct_add_y_block_8x8_c;
diff --git a/vp8/decoder/onyxd_if.c b/vp8/decoder/onyxd_if.c
index 2e1364817..2e7751325 100644
--- a/vp8/decoder/onyxd_if.c
+++ b/vp8/decoder/onyxd_if.c
@@ -149,8 +149,6 @@ VP8D_PTR vp8dx_create_decompressor(VP8D_CONFIG *oxcf) {
pbi->decoded_key_frame = 0;
- pbi->interleaved_decoding = CONFIG_NEWBESTREFMV || CONFIG_SUPERBLOCKS;
-
return (VP8D_PTR) pbi;
}
@@ -471,38 +469,6 @@ int vp8dx_receive_compressed_data(VP8D_PTR ptr, unsigned long size, const unsign
pbi->last_time_stamp = time_stamp;
pbi->source_sz = 0;
-#if 0
- {
- int i;
- int64_t earliest_time = pbi->dr[0].time_stamp;
- int64_t latest_time = pbi->dr[0].time_stamp;
- int64_t time_diff = 0;
- int bytes = 0;
-
- pbi->dr[pbi->common.current_video_frame & 0xf].size = pbi->bc.pos + pbi->bc2.pos + 4;;
- pbi->dr[pbi->common.current_video_frame & 0xf].time_stamp = time_stamp;
-
- for (i = 0; i < 16; i++) {
-
- bytes += pbi->dr[i].size;
-
- if (pbi->dr[i].time_stamp < earliest_time)
- earliest_time = pbi->dr[i].time_stamp;
-
- if (pbi->dr[i].time_stamp > latest_time)
- latest_time = pbi->dr[i].time_stamp;
- }
-
- time_diff = latest_time - earliest_time;
-
- if (time_diff > 0) {
- pbi->common.bitrate = 80000.00 * bytes / time_diff;
- pbi->common.framerate = 160000000.00 / time_diff;
- }
-
- }
-#endif
-
#if HAVE_ARMV7
#if CONFIG_RUNTIME_CPU_DETECT
if (cm->rtcd.flags & HAS_NEON)
diff --git a/vp8/decoder/onyxd_int.h b/vp8/decoder/onyxd_int.h
index 0cb00a4c5..b757f7764 100644
--- a/vp8/decoder/onyxd_int.h
+++ b/vp8/decoder/onyxd_int.h
@@ -33,11 +33,6 @@ typedef struct {
} MB_ROW_DEC;
typedef struct {
- int64_t time_stamp;
- int size;
-} DATARATE;
-
-typedef struct {
int const *scan;
int const *scan_8x8;
UINT8 const *ptr_block2leftabove;
@@ -54,9 +49,7 @@ typedef struct {
vp8_prob const *coef_probs[BLOCK_TYPES];
vp8_prob const *coef_probs_8x8[BLOCK_TYPES_8X8];
-#if CONFIG_TX16X16 || CONFIG_HYBRIDTRANSFORM16X16
vp8_prob const *coef_probs_16X16[BLOCK_TYPES_16X16];
-#endif
UINT8 eob[25];
@@ -79,8 +72,6 @@ typedef struct VP8Decompressor {
int64_t last_time_stamp;
int ready_for_new_data;
- DATARATE dr[16];
-
DETOK detoken;
#if CONFIG_RUNTIME_CPU_DETECT
@@ -91,8 +82,6 @@ typedef struct VP8Decompressor {
int decoded_key_frame;
- int interleaved_decoding;
-
} VP8D_COMP;
int vp8_decode_frame(VP8D_COMP *cpi);
diff --git a/vp8/decoder/reconintra_mt.c b/vp8/decoder/reconintra_mt.c
index f447cb189..ebd1bb314 100644
--- a/vp8/decoder/reconintra_mt.c
+++ b/vp8/decoder/reconintra_mt.c
@@ -10,7 +10,6 @@
#include "vpx_ports/config.h"
-#include "vp8/common/recon.h"
#include "vp8/common/reconintra.h"
#include "vpx_mem/vpx_mem.h"
#include "onyxd_int.h"
diff --git a/vp8/encoder/bitstream.c b/vp8/encoder/bitstream.c
index 4920739bc..cd779ddb6 100644
--- a/vp8/encoder/bitstream.c
+++ b/vp8/encoder/bitstream.c
@@ -61,7 +61,6 @@ unsigned int hybrid_tree_update_hist_8x8 [BLOCK_TYPES_8X8]
[PREV_COEF_CONTEXTS]
[ENTROPY_NODES] [2];
#endif
-#if CONFIG_TX16X16
unsigned int tree_update_hist_16x16 [BLOCK_TYPES_16X16]
[COEF_BANDS]
[PREV_COEF_CONTEXTS]
@@ -72,7 +71,6 @@ unsigned int hybrid_tree_update_hist_16x16 [BLOCK_TYPES_16X16]
[PREV_COEF_CONTEXTS]
[ENTROPY_NODES] [2];
#endif
-#endif
extern unsigned int active_section;
#endif
@@ -241,6 +239,18 @@ static void update_mbintra_mode_probs(VP8_COMP *cpi) {
}
}
+static __inline int get_prob(int num, int den) {
+ int p;
+ if (den <= 0)
+ return 128;
+ p = (num * 255 + (den >> 1)) / den;
+ if (p > 255)
+ return 255;
+ else if (p < 1)
+ return 1;
+ return p;
+}
+
void update_skip_probs(VP8_COMP *cpi) {
VP8_COMMON *const pc = & cpi->common;
int prob_skip_false[3] = {0, 0, 0};
@@ -444,13 +454,15 @@ static int prob_diff_update_savings_search(const unsigned int *ct,
return bestsavings;
}
-static void pack_tokens_c(vp8_writer *w, const TOKENEXTRA *p, int xcount) {
- const TOKENEXTRA *const stop = p + xcount;
+static void pack_mb_tokens(vp8_writer *w,
+ TOKENEXTRA **tp,
+ const TOKENEXTRA *const stop) {
unsigned int split;
unsigned int shift;
int count = w->count;
unsigned int range = w->range;
unsigned int lowvalue = w->lowvalue;
+ TOKENEXTRA *p = *tp;
while (p < stop) {
const int t = p->Token;
@@ -461,6 +473,12 @@ static void pack_tokens_c(vp8_writer *w, const TOKENEXTRA *p, int xcount) {
int v = a->value;
int n = a->Len;
+ if (t == EOSB_TOKEN)
+ {
+ ++p;
+ break;
+ }
+
/* skip one or two nodes */
if (p->skip_eob_node) {
n -= p->skip_eob_node;
@@ -594,14 +612,13 @@ static void pack_tokens_c(vp8_writer *w, const TOKENEXTRA *p, int xcount) {
}
}
-
++p;
}
w->count = count;
w->lowvalue = lowvalue;
w->range = range;
-
+ *tp = p;
}
static void write_partition_size(unsigned char *cx_data, int size) {
@@ -834,7 +851,7 @@ static void update_ref_probs(VP8_COMP *const cpi) {
static void pack_inter_mode_mvs(VP8_COMP *const cpi) {
int i;
VP8_COMMON *const pc = & cpi->common;
- vp8_writer *const w = & cpi->bc;
+ vp8_writer *const w = & cpi->bc2;
#if CONFIG_NEWMVENTROPY
const nmv_context *nmvc = &pc->fc.nmvc;
#else
@@ -845,6 +862,8 @@ static void pack_inter_mode_mvs(VP8_COMP *const cpi) {
MACROBLOCKD *xd = &cpi->mb.e_mbd;
MODE_INFO *m;
MODE_INFO *prev_m;
+ TOKENEXTRA *tok = cpi->tok;
+ TOKENEXTRA *tok_end = tok + cpi->tok_count;
const int mis = pc->mode_info_stride;
int mb_row, mb_col;
@@ -861,73 +880,6 @@ static void pack_inter_mode_mvs(VP8_COMP *const cpi) {
cpi->mb.partition_info = cpi->mb.pi;
- // Update the probabilities used to encode reference frame data
- update_ref_probs(cpi);
-
-#ifdef ENTROPY_STATS
- active_section = 1;
-#endif
-
- if (pc->mb_no_coeff_skip) {
- int k;
-
- update_skip_probs(cpi);
- for (k = 0; k < MBSKIP_CONTEXTS; ++k)
- vp8_write_literal(w, pc->mbskip_pred_probs[k], 8);
- }
-
-#if CONFIG_PRED_FILTER
- // Write the prediction filter mode used for this frame
- vp8_write_literal(w, pc->pred_filter_mode, 2);
-
- // Write prediction filter on/off probability if signaling at MB level
- if (pc->pred_filter_mode == 2)
- vp8_write_literal(w, pc->prob_pred_filter_off, 8);
-
- // printf("pred_filter_mode:%d prob_pred_filter_off:%d\n",
- // pc->pred_filter_mode, pc->prob_pred_filter_off);
-#endif
-#if CONFIG_SWITCHABLE_INTERP
- if (pc->mcomp_filter_type == SWITCHABLE)
- update_switchable_interp_probs(cpi);
-#endif
-
- vp8_write_literal(w, pc->prob_intra_coded, 8);
- vp8_write_literal(w, pc->prob_last_coded, 8);
- vp8_write_literal(w, pc->prob_gf_coded, 8);
-
- if (cpi->common.comp_pred_mode == HYBRID_PREDICTION) {
- vp8_write(w, 1, 128);
- vp8_write(w, 1, 128);
- for (i = 0; i < COMP_PRED_CONTEXTS; i++) {
- if (cpi->single_pred_count[i] + cpi->comp_pred_count[i]) {
- pc->prob_comppred[i] = cpi->single_pred_count[i] * 255 /
- (cpi->single_pred_count[i] + cpi->comp_pred_count[i]);
- if (pc->prob_comppred[i] < 1)
- pc->prob_comppred[i] = 1;
- } else {
- pc->prob_comppred[i] = 128;
- }
- vp8_write_literal(w, pc->prob_comppred[i], 8);
- }
- } else if (cpi->common.comp_pred_mode == SINGLE_PREDICTION_ONLY) {
- vp8_write(w, 0, 128);
- } else { /* compound prediction only */
- vp8_write(w, 1, 128);
- vp8_write(w, 0, 128);
- }
-
- update_mbintra_mode_probs(cpi);
-
-#if CONFIG_NEWMVENTROPY
- vp8_write_nmvprobs(cpi, xd->allow_high_precision_mv);
-#else
- if (xd->allow_high_precision_mv)
- vp8_write_mvprobs_hp(cpi);
- else
- vp8_write_mvprobs(cpi);
-#endif
-
mb_row = 0;
for (row = 0; row < pc->mb_rows; row += 2) {
m = pc->mi + row * mis;
@@ -1290,6 +1242,27 @@ static void pack_inter_mode_mvs(VP8_COMP *const cpi) {
}
}
+#if CONFIG_TX_SELECT
+ if (((rf == INTRA_FRAME && mode <= I8X8_PRED) ||
+ (rf != INTRA_FRAME && mode != SPLITMV)) &&
+ pc->txfm_mode == TX_MODE_SELECT &&
+ !((pc->mb_no_coeff_skip && mi->mb_skip_coeff) ||
+ (segfeature_active(xd, segment_id, SEG_LVL_EOB) &&
+ get_segdata(xd, segment_id, SEG_LVL_EOB) == 0))) {
+ TX_SIZE sz = mi->txfm_size;
+ // FIXME(rbultje) code ternary symbol once all experiments are merged
+ vp8_write(w, sz != TX_4X4, pc->prob_tx[0]);
+ if (sz != TX_4X4 && mode != I8X8_PRED)
+ vp8_write(w, sz != TX_8X8, pc->prob_tx[1]);
+ }
+#endif
+
+#ifdef ENTROPY_STATS
+ active_section = 1;
+#endif
+ assert(tok < tok_end);
+ pack_mb_tokens(w, &tok, tok_end);
+
#if CONFIG_SUPERBLOCKS
if (m->mbmi.encoded_as_sb) {
assert(!i);
@@ -1323,8 +1296,110 @@ static void pack_inter_mode_mvs(VP8_COMP *const cpi) {
}
+static void write_mb_modes_kf(const VP8_COMMON *c,
+ const MACROBLOCKD *xd,
+ const MODE_INFO *m,
+ int mode_info_stride,
+ vp8_writer *const bc) {
+ const int mis = mode_info_stride;
+ int ym;
+ int segment_id;
+
+ ym = m->mbmi.mode;
+ segment_id = m->mbmi.segment_id;
+
+ if (xd->update_mb_segmentation_map) {
+ write_mb_segid(bc, &m->mbmi, xd);
+ }
+
+ if (c->mb_no_coeff_skip &&
+ (!segfeature_active(xd, segment_id, SEG_LVL_EOB) ||
+ (get_segdata(xd, segment_id, SEG_LVL_EOB) != 0))) {
+ int skip_coeff = m->mbmi.mb_skip_coeff;
+#if CONFIG_SUPERBLOCKS
+ if (m->mbmi.encoded_as_sb) {
+ skip_coeff &= m[1].mbmi.mb_skip_coeff;
+ skip_coeff &= m[mis].mbmi.mb_skip_coeff;
+ skip_coeff &= m[mis + 1].mbmi.mb_skip_coeff;
+ }
+#endif
+ vp8_encode_bool(bc, skip_coeff,
+ get_pred_prob(c, xd, PRED_MBSKIP));
+ }
+
+#if CONFIG_SUPERBLOCKS
+ if (m->mbmi.encoded_as_sb) {
+ sb_kfwrite_ymode(bc, ym,
+ c->sb_kf_ymode_prob[c->kf_ymode_probs_index]);
+ } else
+#endif
+ {
+ kfwrite_ymode(bc, ym,
+ c->kf_ymode_prob[c->kf_ymode_probs_index]);
+ }
+
+ if (ym == B_PRED) {
+ const int mis = c->mode_info_stride;
+ int i = 0;
+#if CONFIG_COMP_INTRA_PRED
+ int uses_second =
+ m->bmi[0].as_mode.second !=
+ (B_PREDICTION_MODE)(B_DC_PRED - 1);
+ vp8_write(bc, uses_second, 128);
+#endif
+ do {
+ const B_PREDICTION_MODE A = above_block_mode(m, i, mis);
+ const B_PREDICTION_MODE L = left_block_mode(m, i);
+ const int bm = m->bmi[i].as_mode.first;
+#if CONFIG_COMP_INTRA_PRED
+ const int bm2 = m->bmi[i].as_mode.second;
+#endif
+
+#ifdef ENTROPY_STATS
+ ++intra_mode_stats [A] [L] [bm];
+#endif
+
+ write_bmode(bc, bm, c->kf_bmode_prob [A] [L]);
+ // printf(" mode: %d\n", bm);
+#if CONFIG_COMP_INTRA_PRED
+ if (uses_second) {
+ write_bmode(bc, bm2, c->kf_bmode_prob [A] [L]);
+ }
+#endif
+ } while (++i < 16);
+ }
+ if (ym == I8X8_PRED) {
+ write_i8x8_mode(bc, m->bmi[0].as_mode.first,
+ c->fc.i8x8_mode_prob);
+ // printf(" mode: %d\n", m->bmi[0].as_mode.first); fflush(stdout);
+ write_i8x8_mode(bc, m->bmi[2].as_mode.first,
+ c->fc.i8x8_mode_prob);
+ // printf(" mode: %d\n", m->bmi[2].as_mode.first); fflush(stdout);
+ write_i8x8_mode(bc, m->bmi[8].as_mode.first,
+ c->fc.i8x8_mode_prob);
+ // printf(" mode: %d\n", m->bmi[8].as_mode.first); fflush(stdout);
+ write_i8x8_mode(bc, m->bmi[10].as_mode.first,
+ c->fc.i8x8_mode_prob);
+ // printf(" mode: %d\n", m->bmi[10].as_mode.first); fflush(stdout);
+ } else
+ write_uv_mode(bc, m->mbmi.uv_mode, c->kf_uv_mode_prob[ym]);
+
+#if CONFIG_TX_SELECT
+ if (ym <= I8X8_PRED && c->txfm_mode == TX_MODE_SELECT &&
+ !((c->mb_no_coeff_skip && m->mbmi.mb_skip_coeff) ||
+ (segfeature_active(xd, segment_id, SEG_LVL_EOB) &&
+ get_segdata(xd, segment_id, SEG_LVL_EOB) == 0))) {
+ TX_SIZE sz = m->mbmi.txfm_size;
+ // FIXME(rbultje) code ternary symbol once all experiments are merged
+ vp8_write(bc, sz != TX_4X4, c->prob_tx[0]);
+ if (sz != TX_4X4 && ym <= TM_PRED)
+ vp8_write(bc, sz != TX_8X8, c->prob_tx[1]);
+ }
+#endif
+}
+
static void write_kfmodes(VP8_COMP *cpi) {
- vp8_writer *const bc = & cpi->bc;
+ vp8_writer *const bc = & cpi->bc2;
VP8_COMMON *const c = & cpi->common;
const int mis = c->mode_info_stride;
MACROBLOCKD *xd = &cpi->mb.e_mbd;
@@ -1334,16 +1409,8 @@ static void write_kfmodes(VP8_COMP *cpi) {
int mb_row, mb_col;
int row_delta[4] = { 0, +1, 0, -1};
int col_delta[4] = { +1, -1, +1, +1};
-
- if (c->mb_no_coeff_skip) {
- update_skip_probs(cpi);
- for (i = 0; i < MBSKIP_CONTEXTS; ++i)
- vp8_write_literal(bc, c->mbskip_pred_probs[i], 8);
- }
-
- if (!c->kf_ymode_probs_update) {
- vp8_write_literal(bc, c->kf_ymode_probs_index, 3);
- }
+ TOKENEXTRA *tok = cpi->tok;
+ TOKENEXTRA *tok_end = tok + cpi->tok_count;
mb_row = 0;
for (row = 0; row < c->mb_rows; row += 2) {
@@ -1357,8 +1424,6 @@ static void write_kfmodes(VP8_COMP *cpi) {
// Process the 4 MBs in the order:
// top-left, top-right, bottom-left, bottom-right
for (i = 0; i < 4; i++) {
- int ym;
- int segment_id;
int dy = row_delta[i];
int dx = col_delta[i];
int offset_extended = dy * mis + dx;
@@ -1374,83 +1439,12 @@ static void write_kfmodes(VP8_COMP *cpi) {
// Make sure the MacroBlockD mode info pointer is set correctly
xd->mode_info_context = m;
- ym = m->mbmi.mode;
- segment_id = m->mbmi.segment_id;
-
- if (cpi->mb.e_mbd.update_mb_segmentation_map) {
- write_mb_segid(bc, &m->mbmi, &cpi->mb.e_mbd);
- }
-
- if (c->mb_no_coeff_skip &&
- (!segfeature_active(xd, segment_id, SEG_LVL_EOB) ||
- (get_segdata(xd, segment_id, SEG_LVL_EOB) != 0))) {
- int skip_coeff = m->mbmi.mb_skip_coeff;
-#if CONFIG_SUPERBLOCKS
- if (m->mbmi.encoded_as_sb) {
- skip_coeff &= m[1].mbmi.mb_skip_coeff;
- skip_coeff &= m[mis].mbmi.mb_skip_coeff;
- skip_coeff &= m[mis + 1].mbmi.mb_skip_coeff;
- }
-#endif
- vp8_encode_bool(bc, skip_coeff,
- get_pred_prob(c, xd, PRED_MBSKIP));
- }
-#if CONFIG_SUPERBLOCKS
- if (m->mbmi.encoded_as_sb) {
- sb_kfwrite_ymode(bc, ym,
- c->sb_kf_ymode_prob[c->kf_ymode_probs_index]);
- } else
-#endif
- {
- kfwrite_ymode(bc, ym,
- c->kf_ymode_prob[c->kf_ymode_probs_index]);
- }
-
- if (ym == B_PRED) {
- const int mis = c->mode_info_stride;
- int i = 0;
-#if CONFIG_COMP_INTRA_PRED
- int uses_second =
- m->bmi[0].as_mode.second !=
- (B_PREDICTION_MODE)(B_DC_PRED - 1);
- vp8_write(bc, uses_second, 128);
-#endif
- do {
- const B_PREDICTION_MODE A = above_block_mode(m, i, mis);
- const B_PREDICTION_MODE L = left_block_mode(m, i);
- const int bm = m->bmi[i].as_mode.first;
-#if CONFIG_COMP_INTRA_PRED
- const int bm2 = m->bmi[i].as_mode.second;
-#endif
-
+ write_mb_modes_kf(c, xd, m, mis, bc);
#ifdef ENTROPY_STATS
- ++intra_mode_stats [A] [L] [bm];
+ active_section = 8;
#endif
-
- write_bmode(bc, bm, c->kf_bmode_prob [A] [L]);
- // printf(" mode: %d\n", bm);
-#if CONFIG_COMP_INTRA_PRED
- if (uses_second) {
- write_bmode(bc, bm2, c->kf_bmode_prob [A] [L]);
- }
-#endif
- } while (++i < 16);
- }
- if (ym == I8X8_PRED) {
- write_i8x8_mode(bc, m->bmi[0].as_mode.first,
- c->fc.i8x8_mode_prob);
- // printf(" mode: %d\n", m->bmi[0].as_mode.first); fflush(stdout);
- write_i8x8_mode(bc, m->bmi[2].as_mode.first,
- c->fc.i8x8_mode_prob);
- // printf(" mode: %d\n", m->bmi[2].as_mode.first); fflush(stdout);
- write_i8x8_mode(bc, m->bmi[8].as_mode.first,
- c->fc.i8x8_mode_prob);
- // printf(" mode: %d\n", m->bmi[8].as_mode.first); fflush(stdout);
- write_i8x8_mode(bc, m->bmi[10].as_mode.first,
- c->fc.i8x8_mode_prob);
- // printf(" mode: %d\n", m->bmi[10].as_mode.first); fflush(stdout);
- } else
- write_uv_mode(bc, m->mbmi.uv_mode, c->kf_uv_mode_prob[ym]);
+ assert(tok < tok_end);
+ pack_mb_tokens(bc, &tok, tok_end);
#if CONFIG_SUPERBLOCKS
if (m->mbmi.encoded_as_sb) {
@@ -1548,7 +1542,7 @@ void build_coeff_contexts(VP8_COMP *cpi) {
#endif
- if (cpi->common.txfm_mode == ALLOW_8X8) {
+ if (cpi->common.txfm_mode != ONLY_4X4) {
for (i = 0; i < BLOCK_TYPES_8X8; ++i) {
for (j = 0; j < COEF_BANDS; ++j) {
for (k = 0; k < PREV_COEF_CONTEXTS; ++k) {
@@ -1601,23 +1595,23 @@ void build_coeff_contexts(VP8_COMP *cpi) {
#endif
}
-#if CONFIG_TX16X16
- //16x16
- for (i = 0; i < BLOCK_TYPES_16X16; ++i) {
- for (j = 0; j < COEF_BANDS; ++j) {
- for (k = 0; k < PREV_COEF_CONTEXTS; ++k) {
- if (k >= 3 && ((i == 0 && j == 1) || (i > 0 && j == 0)))
- continue;
- vp8_tree_probs_from_distribution(
- MAX_ENTROPY_TOKENS, vp8_coef_encodings, vp8_coef_tree,
- cpi->frame_coef_probs_16x16[i][j][k],
- cpi->frame_branch_ct_16x16[i][j][k],
- cpi->coef_counts_16x16[i][j][k], 256, 1);
+ if (cpi->common.txfm_mode > ALLOW_8X8) {
+ for (i = 0; i < BLOCK_TYPES_16X16; ++i) {
+ for (j = 0; j < COEF_BANDS; ++j) {
+ for (k = 0; k < PREV_COEF_CONTEXTS; ++k) {
+ if (k >= 3 && ((i == 0 && j == 1) || (i > 0 && j == 0)))
+ continue;
+ vp8_tree_probs_from_distribution(
+ MAX_ENTROPY_TOKENS, vp8_coef_encodings, vp8_coef_tree,
+ cpi->frame_coef_probs_16x16[i][j][k],
+ cpi->frame_branch_ct_16x16[i][j][k],
+ cpi->coef_counts_16x16[i][j][k], 256, 1);
#ifdef ENTROPY_STATS
- if (!cpi->dummy_packing)
- for (t = 0; t < MAX_ENTROPY_TOKENS; ++t)
- context_counters_16x16[i][j][k][t] += cpi->coef_counts_16x16[i][j][k][t];
+ if (!cpi->dummy_packing)
+ for (t = 0; t < MAX_ENTROPY_TOKENS; ++t)
+ context_counters_16x16[i][j][k][t] += cpi->coef_counts_16x16[i][j][k][t];
#endif
+ }
}
}
}
@@ -1641,7 +1635,6 @@ void build_coeff_contexts(VP8_COMP *cpi) {
}
}
#endif
-#endif
}
#if 0
@@ -1730,8 +1723,7 @@ static void update_coef_probs2(VP8_COMP *cpi) {
}
}
- if (cpi->common.txfm_mode != ALLOW_8X8) return;
-
+ if (cpi->common.txfm_mode != ONLY_4X4)
for (t = 0; t < ENTROPY_NODES; ++t) {
/* dry run to see if there is any udpate at all needed */
savings = 0;
@@ -2008,7 +2000,7 @@ static void update_coef_probs(VP8_COMP *cpi) {
#endif
/* do not do this if not even allowed */
- if (cpi->common.txfm_mode == ALLOW_8X8) {
+ if (cpi->common.txfm_mode != ONLY_4X4) {
/* dry run to see if update is necessary */
update[0] = update[1] = 0;
savings = 0;
@@ -2160,8 +2152,7 @@ static void update_coef_probs(VP8_COMP *cpi) {
#endif
}
-#if CONFIG_TX16X16
- // 16x16
+ if (cpi->common.txfm_mode > ALLOW_8X8) {
/* dry run to see if update is necessary */
update[0] = update[1] = 0;
savings = 0;
@@ -2311,7 +2302,7 @@ static void update_coef_probs(VP8_COMP *cpi) {
}
}
#endif
-#endif
+ }
}
#ifdef PACKET_TESTING
@@ -2449,9 +2440,33 @@ void vp8_pack_bitstream(VP8_COMP *cpi, unsigned char *dest, unsigned long *size)
if (xd->update_mb_segmentation_map) {
// Select the coding strategy (temporal or spatial)
choose_segmap_coding_method(cpi);
+ // Send the tree probabilities used to decode unpredicted
+ // macro-block segments
+ for (i = 0; i < MB_FEATURE_TREE_PROBS; i++) {
+ int data = xd->mb_segment_tree_probs[i];
+
+ if (data != 255) {
+ vp8_write_bit(bc, 1);
+ vp8_write_literal(bc, data, 8);
+ } else {
+ vp8_write_bit(bc, 0);
+ }
+ }
// Write out the chosen coding method.
vp8_write_bit(bc, (pc->temporal_update) ? 1 : 0);
+ if (pc->temporal_update) {
+ for (i = 0; i < PREDICTION_PROBS; i++) {
+ int data = pc->segment_pred_probs[i];
+
+ if (data != 255) {
+ vp8_write_bit(bc, 1);
+ vp8_write_literal(bc, data, 8);
+ } else {
+ vp8_write_bit(bc, 0);
+ }
+ }
+ }
}
vp8_write_bit(bc, (xd->update_mb_segmentation_data) ? 1 : 0);
@@ -2545,33 +2560,6 @@ void vp8_pack_bitstream(VP8_COMP *cpi, unsigned char *dest, unsigned long *size)
save_segment_info(xd);
#endif
- if (xd->update_mb_segmentation_map) {
- // Send the tree probabilities used to decode unpredicted
- // macro-block segments
- for (i = 0; i < MB_FEATURE_TREE_PROBS; i++) {
- int Data = xd->mb_segment_tree_probs[i];
-
- if (Data != 255) {
- vp8_write_bit(bc, 1);
- vp8_write_literal(bc, Data, 8);
- } else
- vp8_write_bit(bc, 0);
- }
-
- // If predictive coding of segment map is enabled send the
- // prediction probabilities.
- if (pc->temporal_update) {
- for (i = 0; i < PREDICTION_PROBS; i++) {
- int Data = pc->segment_pred_probs[i];
-
- if (Data != 255) {
- vp8_write_bit(bc, 1);
- vp8_write_literal(bc, Data, 8);
- } else
- vp8_write_bit(bc, 0);
- }
- }
- }
}
// Encode the common prediction model status flag probability updates for
@@ -2600,7 +2588,26 @@ void vp8_pack_bitstream(VP8_COMP *cpi, unsigned char *dest, unsigned long *size)
}
#endif
- vp8_write_bit(bc, pc->txfm_mode);
+#if CONFIG_TX_SELECT
+ {
+ if (pc->txfm_mode == TX_MODE_SELECT) {
+ pc->prob_tx[0] = get_prob(cpi->txfm_count[0] + cpi->txfm_count_8x8p[0],
+ cpi->txfm_count[0] + cpi->txfm_count[1] + cpi->txfm_count[2] +
+ cpi->txfm_count_8x8p[0] + cpi->txfm_count_8x8p[1]);
+ pc->prob_tx[1] = get_prob(cpi->txfm_count[1], cpi->txfm_count[1] + cpi->txfm_count[2]);
+ } else {
+ pc->prob_tx[0] = 128;
+ pc->prob_tx[1] = 128;
+ }
+ vp8_write_literal(bc, pc->txfm_mode, 2);
+ if (pc->txfm_mode == TX_MODE_SELECT) {
+ vp8_write_literal(bc, pc->prob_tx[0], 8);
+ vp8_write_literal(bc, pc->prob_tx[1], 8);
+ }
+ }
+#else
+ vp8_write_bit(bc, !!pc->txfm_mode);
+#endif
// Encode the loop filter level and type
vp8_write_bit(bc, pc->filter_type);
@@ -2751,12 +2758,10 @@ void vp8_pack_bitstream(VP8_COMP *cpi, unsigned char *dest, unsigned long *size)
#if CONFIG_HYBRIDTRANSFORM8X8
vp8_copy(cpi->common.fc.pre_hybrid_coef_probs_8x8, cpi->common.fc.hybrid_coef_probs_8x8);
#endif
-#if CONFIG_TX16X16
vp8_copy(cpi->common.fc.pre_coef_probs_16x16, cpi->common.fc.coef_probs_16x16);
#if CONFIG_HYBRIDTRANSFORM16X16
vp8_copy(cpi->common.fc.pre_hybrid_coef_probs_16x16, cpi->common.fc.hybrid_coef_probs_16x16);
#endif
-#endif
vp8_copy(cpi->common.fc.pre_ymode_prob, cpi->common.fc.ymode_prob);
vp8_copy(cpi->common.fc.pre_uv_mode_prob, cpi->common.fc.uv_mode_prob);
vp8_copy(cpi->common.fc.pre_bmode_prob, cpi->common.fc.bmode_prob);
@@ -2782,21 +2787,80 @@ void vp8_pack_bitstream(VP8_COMP *cpi, unsigned char *dest, unsigned long *size)
// Write out the mb_no_coeff_skip flag
vp8_write_bit(bc, pc->mb_no_coeff_skip);
+ if (pc->mb_no_coeff_skip) {
+ int k;
- if (pc->frame_type == KEY_FRAME) {
- decide_kf_ymode_entropy(cpi);
- write_kfmodes(cpi);
+ update_skip_probs(cpi);
+ for (k = 0; k < MBSKIP_CONTEXTS; ++k)
+ vp8_write_literal(bc, pc->mbskip_pred_probs[k], 8);
+ }
-#ifdef ENTROPY_STATS
- active_section = 8;
-#endif
+ if (pc->frame_type == KEY_FRAME) {
+ if (!pc->kf_ymode_probs_update) {
+ vp8_write_literal(bc, pc->kf_ymode_probs_index, 3);
+ }
} else {
- pack_inter_mode_mvs(cpi);
- vp8_update_mode_context(&cpi->common);
+ // Update the probabilities used to encode reference frame data
+ update_ref_probs(cpi);
#ifdef ENTROPY_STATS
active_section = 1;
#endif
+
+#if CONFIG_PRED_FILTER
+ // Write the prediction filter mode used for this frame
+ vp8_write_literal(bc, pc->pred_filter_mode, 2);
+
+ // Write prediction filter on/off probability if signaling at MB level
+ if (pc->pred_filter_mode == 2)
+ vp8_write_literal(bc, pc->prob_pred_filter_off, 8);
+
+#endif
+#if CONFIG_SWITCHABLE_INTERP
+ if (pc->mcomp_filter_type == SWITCHABLE)
+ update_switchable_interp_probs(cpi);
+#endif
+
+ vp8_write_literal(bc, pc->prob_intra_coded, 8);
+ vp8_write_literal(bc, pc->prob_last_coded, 8);
+ vp8_write_literal(bc, pc->prob_gf_coded, 8);
+
+ {
+ const int comp_pred_mode = cpi->common.comp_pred_mode;
+ const int use_compound_pred = (comp_pred_mode != SINGLE_PREDICTION_ONLY);
+ const int use_hybrid_pred = (comp_pred_mode == HYBRID_PREDICTION);
+
+ vp8_write(bc, use_compound_pred, 128);
+ if (use_compound_pred) {
+ vp8_write(bc, use_hybrid_pred, 128);
+ if (use_hybrid_pred) {
+ for (i = 0; i < COMP_PRED_CONTEXTS; i++) {
+ if (cpi->single_pred_count[i] + cpi->comp_pred_count[i]) {
+ pc->prob_comppred[i] = cpi->single_pred_count[i] * 255 /
+ (cpi->single_pred_count[i]
+ + cpi->comp_pred_count[i]);
+ if (pc->prob_comppred[i] < 1)
+ pc->prob_comppred[i] = 1;
+ } else {
+ pc->prob_comppred[i] = 128;
+ }
+ vp8_write_literal(bc, pc->prob_comppred[i], 8);
+ }
+ }
+ }
+ }
+
+ update_mbintra_mode_probs(cpi);
+
+#if CONFIG_NEWMVENTROPY
+ vp8_write_nmvprobs(cpi, xd->allow_high_precision_mv);
+#else
+ if (xd->allow_high_precision_mv) {
+ vp8_write_mvprobs_hp(cpi);
+ } else {
+ vp8_write_mvprobs(cpi);
+ }
+#endif
}
vp8_stop_encode(bc);
@@ -2816,10 +2880,16 @@ void vp8_pack_bitstream(VP8_COMP *cpi, unsigned char *dest, unsigned long *size)
}
*size = VP8_HEADER_SIZE + extra_bytes_packed + cpi->bc.pos;
-
vp8_start_encode(&cpi->bc2, cx_data + bc->pos);
- pack_tokens(&cpi->bc2, cpi->tok, cpi->tok_count);
+ if (pc->frame_type == KEY_FRAME) {
+ decide_kf_ymode_entropy(cpi);
+ write_kfmodes(cpi);
+ } else {
+ pack_inter_mode_mvs(cpi);
+ vp8_update_mode_context(&cpi->common);
+ }
+
vp8_stop_encode(&cpi->bc2);
@@ -2891,7 +2961,6 @@ void print_tree_update_probs() {
fprintf(f, " },\n");
}
-#if CONFIG_TX16X16 || CONFIG_HYBRIDTRANSFORM16X16
fprintf(f, "const vp8_prob\n"
"vp8_coef_update_probs_16x16[BLOCK_TYPES_16X16]\n"
" [COEF_BANDS]\n"
@@ -2919,15 +2988,12 @@ void print_tree_update_probs() {
}
fprintf(f, " },\n");
}
-#endif
fclose(f);
f = fopen("treeupdate.bin", "wb");
fwrite(tree_update_hist, sizeof(tree_update_hist), 1, f);
fwrite(tree_update_hist_8x8, sizeof(tree_update_hist_8x8), 1, f);
-#if CONFIG_TX16X16 || CONFIG_HYBRIDTRANSFORM16X16
fwrite(tree_update_hist_16x16, sizeof(tree_update_hist_16x16), 1, f);
-#endif
fclose(f);
}
#endif
diff --git a/vp8/encoder/block.h b/vp8/encoder/block.h
index a204c8b60..861700409 100644
--- a/vp8/encoder/block.h
+++ b/vp8/encoder/block.h
@@ -35,14 +35,10 @@ typedef struct {
unsigned char *quant_shift;
short *zbin;
short *zbin_8x8;
-#if CONFIG_TX16X16 || CONFIG_HYBRIDTRANSFORM16X16
short *zbin_16x16;
-#endif
short *zrun_zbin_boost;
short *zrun_zbin_boost_8x8;
-#if CONFIG_TX16X16 || CONFIG_HYBRIDTRANSFORM16X16
short *zrun_zbin_boost_16x16;
-#endif
short *round;
// Zbin Over Quant value
@@ -55,9 +51,7 @@ typedef struct {
int eob_max_offset;
int eob_max_offset_8x8;
-#if CONFIG_TX16X16 || CONFIG_HYBRIDTRANSFORM16X16
int eob_max_offset_16x16;
-#endif
} BLOCK;
typedef struct {
@@ -85,6 +79,9 @@ typedef struct {
int hybrid_pred_diff;
int comp_pred_diff;
int single_pred_diff;
+#if CONFIG_TX_SELECT
+ int64_t txfm_rd_diff[NB_TXFM_MODES];
+#endif
} PICK_MODE_CONTEXT;
typedef struct {
@@ -186,16 +183,12 @@ typedef struct {
void (*vp8_short_fdct4x4)(short *input, short *output, int pitch);
void (*vp8_short_fdct8x4)(short *input, short *output, int pitch);
void (*short_walsh4x4)(short *input, short *output, int pitch);
- void (*quantize_b)(BLOCK *b, BLOCKD *d);
- void (*quantize_b_pair)(BLOCK *b1, BLOCK *b2, BLOCKD *d0, BLOCKD *d1);
+ void (*quantize_b_4x4)(BLOCK *b, BLOCKD *d);
+ void (*quantize_b_4x4_pair)(BLOCK *b1, BLOCK *b2, BLOCKD *d0, BLOCKD *d1);
void (*vp8_short_fdct8x8)(short *input, short *output, int pitch);
-#if CONFIG_TX16X16 || CONFIG_HYBRIDTRANSFORM16X16
void (*vp8_short_fdct16x16)(short *input, short *output, int pitch);
-#endif
void (*short_fhaar2x2)(short *input, short *output, int pitch);
-#if CONFIG_TX16X16 || CONFIG_HYBRIDTRANSFORM16X16
void (*quantize_b_16x16)(BLOCK *b, BLOCKD *d);
-#endif
void (*quantize_b_8x8)(BLOCK *b, BLOCKD *d);
void (*quantize_b_2x2)(BLOCK *b, BLOCKD *d);
diff --git a/vp8/encoder/dct.c b/vp8/encoder/dct.c
index 81b4e6be2..d81a547d2 100644
--- a/vp8/encoder/dct.c
+++ b/vp8/encoder/dct.c
@@ -693,7 +693,6 @@ void vp8_short_walsh8x4_x8_c(short *input, short *output, int pitch) {
}
#endif
-#if CONFIG_TX16X16 || CONFIG_HYBRIDTRANSFORM16X16
static const double C1 = 0.995184726672197;
static const double C2 = 0.98078528040323;
static const double C3 = 0.956940335732209;
@@ -900,4 +899,3 @@ void vp8_short_fdct16x16_c(short *input, short *out, int pitch) {
}
vp8_clear_system_state(); // Make it simd safe : __asm emms;
}
-#endif
diff --git a/vp8/encoder/dct.h b/vp8/encoder/dct.h
index de882b1cf..180192bbb 100644
--- a/vp8/encoder/dct.h
+++ b/vp8/encoder/dct.h
@@ -31,12 +31,10 @@ void vp8_fht_c(short *input, short *output, int pitch,
TX_TYPE tx_type, int tx_dim);
#endif
-#if CONFIG_TX16X16 || CONFIG_HYBRIDTRANSFORM16X16
#ifndef vp8_fdct_short16x16
#define vp8_fdct_short16x16 vp8_short_fdct16x16_c
#endif
extern prototype_fdct(vp8_fdct_short16x16);
-#endif
#ifndef vp8_fdct_short8x8
#define vp8_fdct_short8x8 vp8_short_fdct8x8_c
@@ -44,6 +42,7 @@ extern prototype_fdct(vp8_fdct_short16x16);
extern prototype_fdct(vp8_fdct_short8x8);
#ifndef vp8_fhaar_short2x2
+#define vp8_fdct_haar_short2x2 vp8_fhaar_short2x2
#define vp8_fhaar_short2x2 vp8_short_fhaar2x2_c
#endif
extern prototype_fdct(vp8_fhaar_short2x2);
@@ -81,9 +80,7 @@ extern prototype_fdct(vp8_short_walsh4x4_lossless_c);
typedef prototype_fdct(*vp8_fdct_fn_t);
typedef struct {
-#if CONFIG_TX16X16 || CONFIG_HYBRIDTRANSFORM16X16
vp8_fdct_fn_t short16x16;
-#endif
vp8_fdct_fn_t short8x8;
vp8_fdct_fn_t haar_short2x2;
vp8_fdct_fn_t short4x4;
diff --git a/vp8/encoder/encodeframe.c b/vp8/encoder/encodeframe.c
index 584570da9..9f7e81d0a 100644
--- a/vp8/encoder/encodeframe.c
+++ b/vp8/encoder/encodeframe.c
@@ -26,6 +26,7 @@
#include "vp8/common/findnearmv.h"
#include "vp8/common/reconintra.h"
#include "vp8/common/seg_common.h"
+#include "vpx_rtcd.h"
#include <stdio.h>
#include <math.h>
#include <limits.h>
@@ -52,9 +53,6 @@ int enc_debug = 0;
int mb_row_debug, mb_col_debug;
#endif
-extern void vp8_stuff_mb(VP8_COMP *cpi, MACROBLOCKD *xd,
- TOKENEXTRA **t, int dry_run);
-
extern void vp8cx_initialize_me_consts(VP8_COMP *cpi, int QIndex);
extern void vp8_auto_select_speed(VP8_COMP *cpi);
extern void vp8cx_init_mbrthread_data(VP8_COMP *cpi,
@@ -81,8 +79,6 @@ void vp8cx_encode_intra_super_block(VP8_COMP *cpi,
MACROBLOCK *x,
TOKENEXTRA **t, int mb_col);
static void adjust_act_zbin(VP8_COMP *cpi, MACROBLOCK *x);
-extern void vp8_stuff_mb_8x8(VP8_COMP *cpi,
- MACROBLOCKD *xd, TOKENEXTRA **t, int dry_run);
#ifdef MODE_STATS
unsigned int inter_y_modes[MB_MODE_COUNT];
@@ -309,9 +305,7 @@ static void build_activity_map(VP8_COMP *cpi) {
recon_yoffset += 16;
#endif
// Copy current mb to a buffer
- RECON_INVOKE(&xd->rtcd->recon, copy16x16)(x->src.y_buffer,
- x->src.y_stride,
- x->thismb, 16);
+ vp8_copy_mem16x16(x->src.y_buffer, x->src.y_stride, x->thismb, 16);
// measure activity
mb_activity = mb_activity_measure(cpi, x, mb_row, mb_col);
@@ -417,6 +411,18 @@ static void update_state(VP8_COMP *cpi, MACROBLOCK *x, PICK_MODE_CONTEXT *ctx) {
mbmi->mv[1].as_int = x->partition_info->bmi[15].second_mv.as_int;
}
+#if CONFIG_TX_SELECT
+ {
+ int segment_id = mbmi->segment_id;
+ if (!segfeature_active(xd, segment_id, SEG_LVL_EOB) ||
+ get_segdata(xd, segment_id, SEG_LVL_EOB)) {
+ for (i = 0; i < NB_TXFM_MODES; i++) {
+ cpi->rd_tx_select_diff[i] += ctx->txfm_rd_diff[i];
+ }
+ }
+ }
+#endif
+
if (cpi->common.frame_type == KEY_FRAME) {
// Restore the coding modes to that held in the coding context
// if (mb_mode == B_PRED)
@@ -579,9 +585,7 @@ static void pick_mb_modes(VP8_COMP *cpi,
xd->dst.v_buffer = cm->yv12_fb[dst_fb_idx].v_buffer + recon_uvoffset;
// Copy current MB to a work buffer
- RECON_INVOKE(&xd->rtcd->recon, copy16x16)(x->src.y_buffer,
- x->src.y_stride,
- x->thismb, 16);
+ vp8_copy_mem16x16(x->src.y_buffer, x->src.y_stride, x->thismb, 16);
x->rddiv = cpi->RDDIV;
x->rdmult = cpi->RDMULT;
@@ -606,9 +610,6 @@ static void pick_mb_modes(VP8_COMP *cpi,
x->active_ptr = cpi->active_map + map_index;
- /* force 4x4 transform for mode selection */
- mbmi->txfm_size = TX_4X4; // TODO IS this right??
-
#if CONFIG_SUPERBLOCKS
xd->mode_info_context->mbmi.encoded_as_sb = 0;
#endif
@@ -767,9 +768,7 @@ static void pick_sb_modes (VP8_COMP *cpi,
xd->dst.v_buffer = cm->yv12_fb[dst_fb_idx].v_buffer + recon_uvoffset;
#if 0 // FIXME
/* Copy current MB to a work buffer */
- RECON_INVOKE(&xd->rtcd->recon, copy16x16)(x->src.y_buffer,
- x->src.y_stride,
- x->thismb, 16);
+ vp8_copy_mem16x16(x->src.y_buffer, x->src.y_stride, x->thismb, 16);
#endif
x->rddiv = cpi->RDDIV;
x->rdmult = cpi->RDMULT;
@@ -967,9 +966,7 @@ static void encode_sb(VP8_COMP *cpi,
xd->dst.v_buffer = cm->yv12_fb[dst_fb_idx].v_buffer + recon_uvoffset;
// Copy current MB to a work buffer
- RECON_INVOKE(&xd->rtcd->recon, copy16x16)(x->src.y_buffer,
- x->src.y_stride,
- x->thismb, 16);
+ vp8_copy_mem16x16(x->src.y_buffer, x->src.y_stride, x->thismb, 16);
if (cpi->oxcf.tuning == VP8_TUNE_SSIM)
vp8_activity_masking(cpi, x);
@@ -1054,9 +1051,6 @@ static void encode_sb(VP8_COMP *cpi,
cpi->inter_zz_count++;
}
- // TODO Partitioning is broken!
- cpi->tplist[mb_row].stop = *tp;
-
#if CONFIG_SUPERBLOCKS
if (xd->mode_info_context->mbmi.encoded_as_sb) {
x->src.y_buffer += 32;
@@ -1068,6 +1062,9 @@ static void encode_sb(VP8_COMP *cpi,
xd->mode_info_context += 2;
xd->prev_mode_info_context += 2;
+ (*tp)->Token = EOSB_TOKEN;
+ (*tp)++;
+ cpi->tplist[mb_row].stop = *tp;
break;
}
#endif
@@ -1089,6 +1086,9 @@ static void encode_sb(VP8_COMP *cpi,
assert((xd->prev_mode_info_context - cpi->common.prev_mip) ==
(xd->mode_info_context - cpi->common.mip));
#endif
+ (*tp)->Token = EOSB_TOKEN;
+ (*tp)++;
+ cpi->tplist[mb_row].stop = *tp;
}
// debug output
@@ -1369,12 +1369,10 @@ static void encode_frame_internal(VP8_COMP *cpi) {
#if CONFIG_HYBRIDTRANSFORM8X8
vp8_zero(cpi->hybrid_coef_counts_8x8);
#endif
-#if CONFIG_TX16X16
vp8_zero(cpi->coef_counts_16x16);
#if CONFIG_HYBRIDTRANSFORM16X16
vp8_zero(cpi->hybrid_coef_counts_16x16);
#endif
-#endif
vp8cx_frame_init_quantizer(cpi);
@@ -1395,7 +1393,11 @@ static void encode_frame_internal(VP8_COMP *cpi) {
vpx_memset(cpi->rd_comp_pred_diff, 0, sizeof(cpi->rd_comp_pred_diff));
vpx_memset(cpi->single_pred_count, 0, sizeof(cpi->single_pred_count));
vpx_memset(cpi->comp_pred_count, 0, sizeof(cpi->comp_pred_count));
-
+#if CONFIG_TX_SELECT
+ vpx_memset(cpi->txfm_count, 0, sizeof(cpi->txfm_count));
+ vpx_memset(cpi->txfm_count_8x8p, 0, sizeof(cpi->txfm_count_8x8p));
+ vpx_memset(cpi->rd_tx_select_diff, 0, sizeof(cpi->rd_tx_select_diff));
+#endif
{
struct vpx_usec_timer emr_timer;
vpx_usec_timer_start(&emr_timer);
@@ -1455,9 +1457,36 @@ static int check_dual_ref_flags(VP8_COMP *cpi) {
}
}
+#if CONFIG_TX_SELECT
+static void reset_skip_txfm_size(VP8_COMP *cpi, TX_SIZE txfm_max) {
+ VP8_COMMON *cm = &cpi->common;
+ int mb_row, mb_col, mis = cm->mode_info_stride;
+ MODE_INFO *mi, *mi_ptr = cm->mi;
+ MB_MODE_INFO *mbmi;
+ MACROBLOCK *x = &cpi->mb;
+ MACROBLOCKD *xd = &x->e_mbd;
+
+ for (mb_row = 0; mb_row < cm->mb_rows; mb_row++, mi_ptr += mis) {
+ mi = mi_ptr;
+ for (mb_col = 0; mb_col < cm->mb_cols; mb_col++, mi++) {
+ mbmi = &mi->mbmi;
+ if (mbmi->txfm_size > txfm_max) {
+ int segment_id = mbmi->segment_id;
+ xd->mode_info_context = mi;
+ assert((segfeature_active(xd, segment_id, SEG_LVL_EOB) &&
+ get_segdata(xd, segment_id, SEG_LVL_EOB) == 0) ||
+ (cm->mb_no_coeff_skip && mbmi->mb_skip_coeff));
+ mbmi->txfm_size = txfm_max;
+ }
+ }
+ }
+}
+#endif
+
void vp8_encode_frame(VP8_COMP *cpi) {
if (cpi->sf.RD) {
int i, frame_type, pred_type;
+ TXFM_MODE txfm_type;
/*
* This code does a single RD pass over the whole frame assuming
@@ -1465,9 +1494,8 @@ void vp8_encode_frame(VP8_COMP *cpi) {
* worked best for that type of frame in the past.
* It also predicts whether another coding mode would have worked
* better that this coding mode. If that is the case, it remembers
- * that for subsequent frames. If the difference is above a certain
- * threshold, it will actually re-encode the current frame using
- * that different coding mode.
+ * that for subsequent frames.
+ * It does the same analysis for transform size selection also.
*/
if (cpi->common.frame_type == KEY_FRAME)
frame_type = 0;
@@ -1478,6 +1506,7 @@ void vp8_encode_frame(VP8_COMP *cpi) {
else
frame_type = 2;
+ /* prediction (compound, single or hybrid) mode selection */
if (frame_type == 3)
pred_type = SINGLE_PREDICTION_ONLY;
else if (cpi->rd_prediction_type_threshes[frame_type][1] >
@@ -1492,15 +1521,91 @@ void vp8_encode_frame(VP8_COMP *cpi) {
else
pred_type = HYBRID_PREDICTION;
+ /* transform size (4x4, 8x8, 16x16 or select-per-mb) selection */
+#if CONFIG_LOSSLESS
+ if (cpi->oxcf.lossless) {
+ txfm_type = ONLY_4X4;
+ } else
+#endif
+#if CONFIG_TX_SELECT
+ /* FIXME (rbultje)
+ * this is a hack (no really), basically to work around the complete
+ * nonsense coefficient cost prediction for keyframes. The probabilities
+ * are reset to defaults, and thus we basically have no idea how expensive
+ * a 4x4 vs. 8x8 will really be. The result is that any estimate at which
+ * of the two is better is utterly bogus.
+ * I'd like to eventually remove this hack, but in order to do that, we
+ * need to move the frame reset code from the frame encode init to the
+ * bitstream write code, or alternatively keep a backup of the previous
+ * keyframe's probabilities as an estimate of what the current keyframe's
+ * coefficient cost distributions may look like. */
+ if (frame_type == 0) {
+ txfm_type = ALLOW_16X16;
+ } else
+#if 0
+ /* FIXME (rbultje)
+ * this code is disabled for a similar reason as the code above; the
+ * problem is that each time we "revert" to 4x4 only (or even 8x8 only),
+ * the coefficient probabilities for 16x16 (and 8x8) start lagging behind,
+ * thus leading to them lagging further behind and not being chosen for
+ * subsequent frames either. This is essentially a local minimum problem
+ * that we can probably fix by estimating real costs more closely within
+ * a frame, perhaps by re-calculating costs on-the-fly as frame encoding
+ * progresses. */
+ if (cpi->rd_tx_select_threshes[frame_type][TX_MODE_SELECT] >
+ cpi->rd_tx_select_threshes[frame_type][ONLY_4X4] &&
+ cpi->rd_tx_select_threshes[frame_type][TX_MODE_SELECT] >
+ cpi->rd_tx_select_threshes[frame_type][ALLOW_16X16] &&
+ cpi->rd_tx_select_threshes[frame_type][TX_MODE_SELECT] >
+ cpi->rd_tx_select_threshes[frame_type][ALLOW_8X8]) {
+ txfm_type = TX_MODE_SELECT;
+ } else if (cpi->rd_tx_select_threshes[frame_type][ONLY_4X4] >
+ cpi->rd_tx_select_threshes[frame_type][ALLOW_8X8]
+ && cpi->rd_tx_select_threshes[frame_type][ONLY_4X4] >
+ cpi->rd_tx_select_threshes[frame_type][ALLOW_16X16]
+ ) {
+ txfm_type = ONLY_4X4;
+ } else if (cpi->rd_tx_select_threshes[frame_type][ALLOW_16X16] >=
+ cpi->rd_tx_select_threshes[frame_type][ALLOW_8X8]) {
+ txfm_type = ALLOW_16X16;
+ } else
+ txfm_type = ALLOW_8X8;
+#else
+ txfm_type = cpi->rd_tx_select_threshes[frame_type][ALLOW_16X16] >=
+ cpi->rd_tx_select_threshes[frame_type][TX_MODE_SELECT] ?
+ ALLOW_16X16 : TX_MODE_SELECT;
+#endif
+#else
+ txfm_type = ALLOW_16X16;
+#endif // CONFIG_TX_SELECT
+ cpi->common.txfm_mode = txfm_type;
+#if CONFIG_TX_SELECT
+ if (txfm_type != TX_MODE_SELECT) {
+ cpi->common.prob_tx[0] = 128;
+ cpi->common.prob_tx[1] = 128;
+ }
+#endif
cpi->common.comp_pred_mode = pred_type;
encode_frame_internal(cpi);
for (i = 0; i < NB_PREDICTION_TYPES; ++i) {
- int diff = cpi->rd_comp_pred_diff[i] / cpi->common.MBs;
+ const int diff = cpi->rd_comp_pred_diff[i] / cpi->common.MBs;
cpi->rd_prediction_type_threshes[frame_type][i] += diff;
cpi->rd_prediction_type_threshes[frame_type][i] >>= 1;
}
+#if CONFIG_TX_SELECT
+ for (i = 0; i < NB_TXFM_MODES; ++i) {
+ int64_t pd = cpi->rd_tx_select_diff[i];
+ int diff;
+ if (i == TX_MODE_SELECT)
+ pd -= RDCOST(cpi->mb.rdmult, cpi->mb.rddiv, 2048 * (TX_SIZE_MAX - 1), 0);
+ diff = pd / cpi->common.MBs;
+ cpi->rd_tx_select_threshes[frame_type][i] += diff;
+ cpi->rd_tx_select_threshes[frame_type][i] /= 2;
+ }
+#endif
+
if (cpi->common.comp_pred_mode == HYBRID_PREDICTION) {
int single_count_zero = 0;
int comp_count_zero = 0;
@@ -1516,6 +1621,25 @@ void vp8_encode_frame(VP8_COMP *cpi) {
cpi->common.comp_pred_mode = COMP_PREDICTION_ONLY;
}
}
+
+#if CONFIG_TX_SELECT
+ if (cpi->common.txfm_mode == TX_MODE_SELECT) {
+ const int count4x4 = cpi->txfm_count[TX_4X4] + cpi->txfm_count_8x8p[TX_4X4];
+ const int count8x8 = cpi->txfm_count[TX_8X8];
+ const int count8x8_8x8p = cpi->txfm_count_8x8p[TX_8X8];
+ const int count16x16 = cpi->txfm_count[TX_16X16];
+
+ if (count4x4 == 0 && count16x16 == 0) {
+ cpi->common.txfm_mode = ALLOW_8X8;
+ reset_skip_txfm_size(cpi, TX_8X8);
+ } else if (count8x8 == 0 && count16x16 == 0 && count8x8_8x8p == 0) {
+ cpi->common.txfm_mode = ONLY_4X4;
+ reset_skip_txfm_size(cpi, TX_4X4);
+ } else if (count8x8 == 0 && count4x4 == 0) {
+ cpi->common.txfm_mode = ALLOW_16X16;
+ }
+ }
+#endif
} else {
encode_frame_internal(cpi);
}
@@ -1718,7 +1842,7 @@ static void update_sb_skip_coeff_state(VP8_COMP *cpi,
if (skip[n]) {
x->e_mbd.above_context = &ta[n];
x->e_mbd.left_context = &tl[n];
- vp8_stuff_mb_8x8(cpi, &x->e_mbd, tp, 0);
+ vp8_stuff_mb(cpi, &x->e_mbd, tp, 0);
} else {
if (n_tokens[n]) {
memcpy(*tp, tokens[n], sizeof(*t[0]) * n_tokens[n]);
@@ -1755,20 +1879,8 @@ void vp8cx_encode_intra_super_block(VP8_COMP *cpi,
vp8_update_zbin_extra(cpi, x);
}
- /* test code: set transform size based on mode selection */
- if (cpi->common.txfm_mode == ALLOW_8X8) {
- x->e_mbd.mode_info_context->mbmi.txfm_size = TX_8X8;
- x->e_mbd.mode_info_context[1].mbmi.txfm_size = TX_8X8;
- x->e_mbd.mode_info_context[cm->mode_info_stride].mbmi.txfm_size = TX_8X8;
- x->e_mbd.mode_info_context[cm->mode_info_stride+1].mbmi.txfm_size = TX_8X8;
- cpi->t8x8_count++;
- } else {
- x->e_mbd.mode_info_context->mbmi.txfm_size = TX_4X4;
- cpi->t4x4_count++;
- }
-
- RECON_INVOKE(&rtcd->common->recon, build_intra_predictors_sby_s)(&x->e_mbd);
- RECON_INVOKE(&rtcd->common->recon, build_intra_predictors_sbuv_s)(&x->e_mbd);
+ vp8_build_intra_predictors_sby_s(&x->e_mbd);
+ vp8_build_intra_predictors_sbuv_s(&x->e_mbd);
assert(x->e_mbd.mode_info_context->mbmi.txfm_size == TX_8X8);
for (n = 0; n < 4; n++)
@@ -1790,19 +1902,15 @@ void vp8cx_encode_intra_super_block(VP8_COMP *cpi,
udst + x_idx * 8 + y_idx * 8 * dst_uv_stride,
vdst + x_idx * 8 + y_idx * 8 * dst_uv_stride,
dst_uv_stride);
- vp8_transform_intra_mby_8x8(x);
- vp8_transform_mbuv_8x8(x);
- vp8_quantize_mby_8x8(x);
- vp8_quantize_mbuv_8x8(x);
+ vp8_transform_mb_8x8(x);
+ vp8_quantize_mb_8x8(x);
if (x->optimize) {
vp8_optimize_mby_8x8(x, rtcd);
vp8_optimize_mbuv_8x8(x, rtcd);
}
- vp8_inverse_transform_mby_8x8(IF_RTCD(&rtcd->common->idct), &x->e_mbd);
- vp8_inverse_transform_mbuv_8x8(IF_RTCD(&rtcd->common->idct), &x->e_mbd);
- vp8_recon_mby_s_c(IF_RTCD(&rtcd->common->recon), &x->e_mbd,
- dst + x_idx * 16 + y_idx * 16 * dst_y_stride);
- vp8_recon_mbuv_s_c(IF_RTCD(&rtcd->common->recon), &x->e_mbd,
+ vp8_inverse_transform_mb_8x8(IF_RTCD(&rtcd->common->idct), &x->e_mbd);
+ vp8_recon_mby_s_c(&x->e_mbd, dst + x_idx * 16 + y_idx * 16 * dst_y_stride);
+ vp8_recon_mbuv_s_c(&x->e_mbd,
udst + x_idx * 8 + y_idx * 8 * dst_uv_stride,
vdst + x_idx * 8 + y_idx * 8 * dst_uv_stride);
@@ -1835,23 +1943,6 @@ void vp8cx_encode_intra_macro_block(VP8_COMP *cpi,
vp8_update_zbin_extra(cpi, x);
}
- /* test code: set transform size based on mode selection */
-#if CONFIG_TX16X16 || CONFIG_HYBRIDTRANSFORM16X16
- if (mbmi->mode <= TM_PRED) {
- mbmi->txfm_size = TX_16X16;
- cpi->t16x16_count++;
- }
- else
-#endif
- if (cpi->common.txfm_mode == ALLOW_8X8
- && mbmi->mode != B_PRED) {
- mbmi->txfm_size = TX_8X8;
- cpi->t8x8_count++;
- } else {
- mbmi->txfm_size = TX_4X4;
- cpi->t4x4_count++;
- }
-
if (mbmi->mode == I8X8_PRED) {
vp8_encode_intra8x8mby(IF_RTCD(&cpi->rtcd), x);
vp8_encode_intra8x8mbuv(IF_RTCD(&cpi->rtcd), x);
@@ -1865,9 +1956,34 @@ void vp8cx_encode_intra_macro_block(VP8_COMP *cpi,
}
if (output_enabled) {
+#if CONFIG_TX_SELECT
+ int segment_id = mbmi->segment_id;
+#endif
+
// Tokenize
sum_intra_stats(cpi, x);
vp8_tokenize_mb(cpi, &x->e_mbd, t, 0);
+
+#if CONFIG_TX_SELECT
+ if (cpi->common.txfm_mode == TX_MODE_SELECT &&
+ !((cpi->common.mb_no_coeff_skip && mbmi->mb_skip_coeff) ||
+ (segfeature_active(&x->e_mbd, segment_id, SEG_LVL_EOB) &&
+ get_segdata(&x->e_mbd, segment_id, SEG_LVL_EOB) == 0))) {
+ if (mbmi->mode != B_PRED && mbmi->mode != I8X8_PRED) {
+ cpi->txfm_count[mbmi->txfm_size]++;
+ } else if (mbmi->mode == I8X8_PRED) {
+ cpi->txfm_count_8x8p[mbmi->txfm_size]++;
+ }
+ } else
+#endif
+ if (cpi->common.txfm_mode >= ALLOW_16X16 && mbmi->mode <= TM_PRED) {
+ mbmi->txfm_size = TX_16X16;
+ } else
+ if (cpi->common.txfm_mode >= ALLOW_8X8 && mbmi->mode != B_PRED) {
+ mbmi->txfm_size = TX_8X8;
+ } else {
+ mbmi->txfm_size = TX_4X4;
+ }
}
#if CONFIG_NEWBESTREFMV
else
@@ -1932,24 +2048,6 @@ void vp8cx_encode_inter_macroblock (VP8_COMP *cpi, MACROBLOCK *x,
ref_pred_flag = ((mbmi->ref_frame == get_pred_ref(cm, xd)));
set_pred_flag(xd, PRED_REF, ref_pred_flag);
- /* test code: set transform size based on mode selection */
-#if CONFIG_TX16X16 || CONFIG_HYBRIDTRANSFORM16X16
- if (mbmi->mode <= TM_PRED || mbmi->mode == NEWMV || mbmi->mode == ZEROMV ||
- mbmi->mode == NEARMV || mbmi->mode == NEARESTMV) {
- mbmi->txfm_size = TX_16X16;
- cpi->t16x16_count++;
- } else
-#endif
- if (cpi->common.txfm_mode == ALLOW_8X8
- && mbmi->mode != B_PRED
- && mbmi->mode != SPLITMV) {
- mbmi->txfm_size = TX_8X8;
- cpi->t8x8_count++;
- } else {
- mbmi->txfm_size = TX_4X4;
- cpi->t4x4_count++;
- }
-
if (mbmi->ref_frame == INTRA_FRAME) {
if (mbmi->mode == B_PRED) {
vp8_encode_intra16x16mbuv(IF_RTCD(&cpi->rtcd), x);
@@ -2056,6 +2154,32 @@ void vp8cx_encode_inter_macroblock (VP8_COMP *cpi, MACROBLOCK *x,
cpi->skip_false_count[mb_skip_context]++;
}
}
+
+ if (output_enabled) {
+#if CONFIG_TX_SELECT
+ int segment_id = mbmi->segment_id;
+ if (cpi->common.txfm_mode == TX_MODE_SELECT &&
+ !((cpi->common.mb_no_coeff_skip && mbmi->mb_skip_coeff) ||
+ (segfeature_active(&x->e_mbd, segment_id, SEG_LVL_EOB) &&
+ get_segdata(&x->e_mbd, segment_id, SEG_LVL_EOB) == 0))) {
+ if (mbmi->mode != B_PRED && mbmi->mode != I8X8_PRED &&
+ mbmi->mode != SPLITMV) {
+ cpi->txfm_count[mbmi->txfm_size]++;
+ } else if (mbmi->mode == I8X8_PRED) {
+ cpi->txfm_count_8x8p[mbmi->txfm_size]++;
+ }
+ } else
+#endif
+ if (mbmi->mode != B_PRED && mbmi->mode != I8X8_PRED &&
+ mbmi->mode != SPLITMV && cpi->common.txfm_mode >= ALLOW_16X16) {
+ mbmi->txfm_size = TX_16X16;
+ } else if (mbmi->mode != B_PRED && mbmi->mode != SPLITMV &&
+ cpi->common.txfm_mode >= ALLOW_8X8) {
+ mbmi->txfm_size = TX_8X8;
+ } else {
+ mbmi->txfm_size = TX_4X4;
+ }
+ }
}
#if CONFIG_SUPERBLOCKS
@@ -2119,20 +2243,9 @@ void vp8cx_encode_inter_superblock(VP8_COMP *cpi, MACROBLOCK *x, TOKENEXTRA **t,
get_pred_ref(cm, xd)));
set_pred_flag(xd, PRED_REF, ref_pred_flag);
- /* test code: set transform size based on mode selection */
- if (cpi->common.txfm_mode == ALLOW_8X8
- && x->e_mbd.mode_info_context->mbmi.mode != B_PRED
- && x->e_mbd.mode_info_context->mbmi.mode != SPLITMV) {
- x->e_mbd.mode_info_context->mbmi.txfm_size = TX_8X8;
- cpi->t8x8_count++;
- } else {
- x->e_mbd.mode_info_context->mbmi.txfm_size = TX_4X4;
- cpi->t4x4_count++;
- }
-
if (xd->mode_info_context->mbmi.ref_frame == INTRA_FRAME) {
- RECON_INVOKE(&rtcd->common->recon, build_intra_predictors_sby_s)(&x->e_mbd);
- RECON_INVOKE(&rtcd->common->recon, build_intra_predictors_sbuv_s)(&x->e_mbd);
+ vp8_build_intra_predictors_sby_s(&x->e_mbd);
+ vp8_build_intra_predictors_sbuv_s(&x->e_mbd);
} else {
int ref_fb_idx;
@@ -2187,23 +2300,16 @@ void vp8cx_encode_inter_superblock(VP8_COMP *cpi, MACROBLOCK *x, TOKENEXTRA **t,
udst + x_idx * 8 + y_idx * 8 * dst_uv_stride,
vdst + x_idx * 8 + y_idx * 8 * dst_uv_stride,
dst_uv_stride);
- if (xd->mode_info_context->mbmi.ref_frame == INTRA_FRAME) {
- vp8_transform_intra_mby_8x8(x);
- } else {
- vp8_transform_mby_8x8(x);
- }
- vp8_transform_mbuv_8x8(x);
- vp8_quantize_mby_8x8(x);
- vp8_quantize_mbuv_8x8(x);
+ vp8_transform_mb_8x8(x);
+ vp8_quantize_mb_8x8(x);
if (x->optimize) {
vp8_optimize_mby_8x8(x, rtcd);
vp8_optimize_mbuv_8x8(x, rtcd);
}
- vp8_inverse_transform_mby_8x8(IF_RTCD(&rtcd->common->idct), &x->e_mbd);
- vp8_inverse_transform_mbuv_8x8(IF_RTCD(&rtcd->common->idct), &x->e_mbd);
- vp8_recon_mby_s_c(IF_RTCD(&rtcd->common->recon), &x->e_mbd,
+ vp8_inverse_transform_mb_8x8(IF_RTCD(&rtcd->common->idct), &x->e_mbd);
+ vp8_recon_mby_s_c( &x->e_mbd,
dst + x_idx * 16 + y_idx * 16 * dst_y_stride);
- vp8_recon_mbuv_s_c(IF_RTCD(&rtcd->common->recon), &x->e_mbd,
+ vp8_recon_mbuv_s_c(&x->e_mbd,
udst + x_idx * 8 + y_idx * 8 * dst_uv_stride,
vdst + x_idx * 8 + y_idx * 8 * dst_uv_stride);
diff --git a/vp8/encoder/encodeintra.c b/vp8/encoder/encodeintra.c
index d8757c531..703a1015e 100644
--- a/vp8/encoder/encodeintra.c
+++ b/vp8/encoder/encodeintra.c
@@ -8,7 +8,6 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-
#include "vpx_ports/config.h"
#include "vp8/common/idct.h"
#include "quantize.h"
@@ -16,11 +15,10 @@
#include "vp8/common/reconintra4x4.h"
#include "encodemb.h"
#include "vp8/common/invtrans.h"
-#include "vp8/common/recon.h"
#include "dct.h"
#include "vp8/common/g_common.h"
#include "encodeintra.h"
-
+#include "vpx_rtcd.h"
#if CONFIG_RUNTIME_CPU_DETECT
#define IF_RTCD(x) (x)
@@ -28,10 +26,6 @@
#define IF_RTCD(x) NULL
#endif
-#if CONFIG_HYBRIDTRANSFORM
-extern void vp8_ht_quantize_b(BLOCK *b, BLOCKD *d);
-#endif
-
int vp8_encode_intra(VP8_COMP *cpi, MACROBLOCK *x, int use_16x16_pred) {
int i;
int intra_pred_var = 0;
@@ -64,45 +58,34 @@ void vp8_encode_intra4x4block(const VP8_ENCODER_RTCD *rtcd,
BLOCKD *b = &x->e_mbd.block[ib];
BLOCK *be = &x->block[ib];
-#if CONFIG_HYBRIDTRANSFORM
- int QIndex = x->q_index;
- int active_ht = (QIndex < ACTIVE_HT);
-#endif
-
-
#if CONFIG_COMP_INTRA_PRED
if (b->bmi.as_mode.second == (B_PREDICTION_MODE)(B_DC_PRED - 1)) {
#endif
- RECON_INVOKE(&rtcd->common->recon, intra4x4_predict)
- (b, b->bmi.as_mode.first, b->predictor);
+ vp8_intra4x4_predict(b, b->bmi.as_mode.first, b->predictor);
#if CONFIG_COMP_INTRA_PRED
} else {
- RECON_INVOKE(&rtcd->common->recon, comp_intra4x4_predict)
- (b, b->bmi.as_mode.first, b->bmi.as_mode.second, b->predictor);
+ vp8_comp_intra4x4_predict(b, b->bmi.as_mode.first, b->bmi.as_mode.second,
+ b->predictor);
}
#endif
ENCODEMB_INVOKE(&rtcd->encodemb, subb)(be, b, 16);
#if CONFIG_HYBRIDTRANSFORM
- if (active_ht) {
- b->bmi.as_mode.test = b->bmi.as_mode.first;
+ if (x->q_index < ACTIVE_HT) {
txfm_map(b, b->bmi.as_mode.first);
vp8_fht_c(be->src_diff, be->coeff, 32, b->bmi.as_mode.tx_type, 4);
- vp8_ht_quantize_b(be, b);
- vp8_inverse_htransform_b(IF_RTCD(&rtcd->common->idct), b, 32) ;
- } else {
+ vp8_ht_quantize_b_4x4(be, b);
+ vp8_ihtllm_c(b->dqcoeff, b->diff, 32, b->bmi.as_mode.tx_type, 4);
+ } else
+#endif
+ {
x->vp8_short_fdct4x4(be->src_diff, be->coeff, 32) ;
- x->quantize_b(be, b) ;
- vp8_inverse_transform_b(IF_RTCD(&rtcd->common->idct), b, 32) ;
+ x->quantize_b_4x4(be, b) ;
+ vp8_inverse_transform_b_4x4(IF_RTCD(&rtcd->common->idct), b, 32) ;
}
-#else
- x->vp8_short_fdct4x4(be->src_diff, be->coeff, 32);
- x->quantize_b(be, b);
- vp8_inverse_transform_b(IF_RTCD(&rtcd->common->idct), b, 32);
-#endif
- RECON_INVOKE(&rtcd->common->recon, recon)(b->predictor, b->diff, *(b->base_dst) + b->dst, b->dst_stride);
+ vp8_recon_b(b->predictor, b->diff, *(b->base_dst) + b->dst, b->dst_stride);
}
void vp8_encode_intra4x4mby(const VP8_ENCODER_RTCD *rtcd, MACROBLOCK *mb) {
@@ -120,139 +103,101 @@ void vp8_encode_intra4x4mby(const VP8_ENCODER_RTCD *rtcd, MACROBLOCK *mb) {
}
void vp8_encode_intra16x16mby(const VP8_ENCODER_RTCD *rtcd, MACROBLOCK *x) {
+ MACROBLOCKD *xd = &x->e_mbd;
BLOCK *b = &x->block[0];
-
- int tx_type = x->e_mbd.mode_info_context->mbmi.txfm_size;
+ TX_SIZE tx_size = xd->mode_info_context->mbmi.txfm_size;
#if CONFIG_HYBRIDTRANSFORM16X16
- TX_TYPE txfm_type = x->e_mbd.mode_info_context->bmi[0].as_mode.tx_type;
+ TX_TYPE txfm_type = xd->mode_info_context->bmi[0].as_mode.tx_type;
#endif
#if CONFIG_COMP_INTRA_PRED
- if (x->e_mbd.mode_info_context->mbmi.second_mode == (MB_PREDICTION_MODE)(DC_PRED - 1))
+ if (xd->mode_info_context->mbmi.second_mode == (MB_PREDICTION_MODE)(DC_PRED - 1))
#endif
- RECON_INVOKE(&rtcd->common->recon, build_intra_predictors_mby)(&x->e_mbd);
+ vp8_build_intra_predictors_mby(xd);
#if CONFIG_COMP_INTRA_PRED
else
- RECON_INVOKE(&rtcd->common->recon, build_comp_intra_predictors_mby)(&x->e_mbd);
+ vp8_build_comp_intra_predictors_mby(xd);
#endif
- ENCODEMB_INVOKE(&rtcd->encodemb, submby)(x->src_diff, *(b->base_src), x->e_mbd.predictor, b->src_stride);
+ ENCODEMB_INVOKE(&rtcd->encodemb, submby)(x->src_diff, *(b->base_src),
+ xd->predictor, b->src_stride);
-#if CONFIG_TX16X16 || CONFIG_HYBRIDTRANSFORM16X16
- if (tx_type == TX_16X16)
+ if (tx_size == TX_16X16) {
#if CONFIG_HYBRIDTRANSFORM16X16
- {
- if ((x->e_mbd.mode_info_context->mbmi.mode < I8X8_PRED) &&
+ if ((xd->mode_info_context->mbmi.mode < I8X8_PRED) &&
(x->q_index < ACTIVE_HT16)) {
- BLOCKD *bd = &x->e_mbd.block[0];
- txfm_map(bd, pred_mode_conv(x->e_mbd.mode_info_context->mbmi.mode));
+ BLOCKD *bd = &xd->block[0];
+ txfm_map(bd, pred_mode_conv(xd->mode_info_context->mbmi.mode));
txfm_type = bd->bmi.as_mode.tx_type;
vp8_fht_c(b->src_diff, b->coeff, 32, txfm_type, 16);
+ vp8_quantize_mby_16x16(x);
+ if (x->optimize)
+ vp8_optimize_mby_16x16(x, rtcd);
+ vp8_ihtllm_c(bd->dqcoeff, bd->diff, 32, txfm_type, 16);
} else
- vp8_transform_intra_mby_16x16(x);
- }
-#else
- vp8_transform_intra_mby_16x16(x);
-#endif
- else
-#endif
- if (tx_type == TX_8X8)
- vp8_transform_intra_mby_8x8(x);
- else
- vp8_transform_intra_mby(x);
-
-#if CONFIG_TX16X16 || CONFIG_HYBRIDTRANSFORM16X16
- if (tx_type == TX_16X16)
- vp8_quantize_mby_16x16(x);
- else
#endif
- if (tx_type == TX_8X8)
+ {
+ vp8_transform_mby_16x16(x);
+ vp8_quantize_mby_16x16(x);
+ if (x->optimize)
+ vp8_optimize_mby_16x16(x, rtcd);
+ vp8_inverse_transform_mby_16x16(IF_RTCD(&rtcd->common->idct), xd);
+ }
+ } else if (tx_size == TX_8X8) {
+ vp8_transform_mby_8x8(x);
vp8_quantize_mby_8x8(x);
- else
- vp8_quantize_mby(x);
-
- if (x->optimize) {
-#if CONFIG_TX16X16 || CONFIG_HYBRIDTRANSFORM16X16
- if (tx_type == TX_16X16)
- vp8_optimize_mby_16x16(x, rtcd);
- else
-#endif
- if (tx_type == TX_8X8)
+ if (x->optimize)
vp8_optimize_mby_8x8(x, rtcd);
- else
- vp8_optimize_mby(x, rtcd);
- }
-
-#if CONFIG_TX16X16 || CONFIG_HYBRIDTRANSFORM16X16
- if (tx_type == TX_16X16)
-#if CONFIG_HYBRIDTRANSFORM16X16
- {
- if ((x->e_mbd.mode_info_context->mbmi.mode < I8X8_PRED) &&
- (x->q_index < ACTIVE_HT16)) {
- BLOCKD *bd = &x->e_mbd.block[0];
- vp8_ihtllm_c(bd->dqcoeff, bd->diff, 32, txfm_type, 16);
- } else
- vp8_inverse_transform_mby_16x16(IF_RTCD(&rtcd->common->idct), &x->e_mbd);
+ vp8_inverse_transform_mby_8x8(IF_RTCD(&rtcd->common->idct), xd);
+ } else {
+ vp8_transform_mby_4x4(x);
+ vp8_quantize_mby_4x4(x);
+ if (x->optimize)
+ vp8_optimize_mby_4x4(x, rtcd);
+ vp8_inverse_transform_mby_4x4(IF_RTCD(&rtcd->common->idct), xd);
}
-#else
- vp8_inverse_transform_mby_16x16(IF_RTCD(&rtcd->common->idct), &x->e_mbd);
-#endif
- else
-#endif
- if (tx_type == TX_8X8)
- vp8_inverse_transform_mby_8x8(IF_RTCD(&rtcd->common->idct), &x->e_mbd);
- else
- vp8_inverse_transform_mby(IF_RTCD(&rtcd->common->idct), &x->e_mbd);
-
- RECON_INVOKE(&rtcd->common->recon, recon_mby)
- (IF_RTCD(&rtcd->common->recon), &x->e_mbd);
+ vp8_recon_mby(xd);
}
void vp8_encode_intra16x16mbuv(const VP8_ENCODER_RTCD *rtcd, MACROBLOCK *x) {
- int tx_type = x->e_mbd.mode_info_context->mbmi.txfm_size;
-#if CONFIG_TX16X16 || CONFIG_HYBRIDTRANSFORM16X16
- if (tx_type == TX_16X16) tx_type = TX_8X8; // 16x16 for U and V should default to 8x8 behavior.
-#endif
+ MACROBLOCKD *xd = &x->e_mbd;
+ TX_SIZE tx_size = xd->mode_info_context->mbmi.txfm_size;
+
#if CONFIG_COMP_INTRA_PRED
- if (x->e_mbd.mode_info_context->mbmi.second_uv_mode == (MB_PREDICTION_MODE)(DC_PRED - 1)) {
+ if (xd->mode_info_context->mbmi.second_uv_mode == (MB_PREDICTION_MODE)(DC_PRED - 1)) {
#endif
- RECON_INVOKE(&rtcd->common->recon, build_intra_predictors_mbuv)(&x->e_mbd);
+ vp8_build_intra_predictors_mbuv(xd);
#if CONFIG_COMP_INTRA_PRED
} else {
- RECON_INVOKE(&rtcd->common->recon, build_comp_intra_predictors_mbuv)(&x->e_mbd);
+ vp8_build_comp_intra_predictors_mbuv(xd);
}
#endif
- ENCODEMB_INVOKE(&rtcd->encodemb, submbuv)(x->src_diff, x->src.u_buffer, x->src.v_buffer, x->e_mbd.predictor, x->src.uv_stride);
- if (tx_type == TX_8X8)
+ ENCODEMB_INVOKE(&rtcd->encodemb, submbuv)(x->src_diff,
+ x->src.u_buffer, x->src.v_buffer,
+ xd->predictor, x->src.uv_stride);
+ if (tx_size == TX_4X4) {
+ vp8_transform_mbuv_4x4(x);
+ vp8_quantize_mbuv_4x4(x);
+ if (x->optimize)
+ vp8_optimize_mbuv_4x4(x, rtcd);
+ vp8_inverse_transform_mbuv_4x4(IF_RTCD(&rtcd->common->idct), xd);
+ } else /* 16x16 or 8x8 */ {
vp8_transform_mbuv_8x8(x);
- else
- vp8_transform_mbuv(x);
-
- if (tx_type == TX_8X8)
vp8_quantize_mbuv_8x8(x);
- else
- vp8_quantize_mbuv(x);
-
- if (x->optimize) {
- if (tx_type == TX_8X8)
+ if (x->optimize)
vp8_optimize_mbuv_8x8(x, rtcd);
- else
- vp8_optimize_mbuv(x, rtcd);
+ vp8_inverse_transform_mbuv_8x8(IF_RTCD(&rtcd->common->idct), xd);
}
- if (tx_type == TX_8X8)
- vp8_inverse_transform_mbuv_8x8(IF_RTCD(&rtcd->common->idct), &x->e_mbd);
- else
- vp8_inverse_transform_mbuv(IF_RTCD(&rtcd->common->idct), &x->e_mbd);
-
- vp8_recon_intra_mbuv(IF_RTCD(&rtcd->common->recon), &x->e_mbd);
+ vp8_recon_intra_mbuv(xd);
}
void vp8_encode_intra8x8(const VP8_ENCODER_RTCD *rtcd,
MACROBLOCK *x, int ib) {
- BLOCKD *b = &x->e_mbd.block[ib];
+ MACROBLOCKD *xd = &x->e_mbd;
+ BLOCKD *b = &xd->block[ib];
BLOCK *be = &x->block[ib];
const int iblock[4] = {0, 1, 4, 5};
int i;
@@ -260,17 +205,15 @@ void vp8_encode_intra8x8(const VP8_ENCODER_RTCD *rtcd,
#if CONFIG_COMP_INTRA_PRED
if (b->bmi.as_mode.second == (MB_PREDICTION_MODE)(DC_PRED - 1)) {
#endif
- RECON_INVOKE(&rtcd->common->recon, intra8x8_predict)
- (b, b->bmi.as_mode.first, b->predictor);
+ vp8_intra8x8_predict(b, b->bmi.as_mode.first, b->predictor);
#if CONFIG_COMP_INTRA_PRED
} else {
- RECON_INVOKE(&rtcd->common->recon, comp_intra8x8_predict)
- (b, b->bmi.as_mode.first, b->bmi.as_mode.second, b->predictor);
+ vp8_comp_intra8x8_predict(b, b->bmi.as_mode.first, b->bmi.as_mode.second,
+ b->predictor);
}
#endif
- {
- MACROBLOCKD *xd = &x->e_mbd;
+ if (xd->mode_info_context->mbmi.txfm_size == TX_8X8) {
int idx = (ib & 0x02) ? (ib + 2) : ib;
// generate residual blocks
@@ -288,14 +231,23 @@ void vp8_encode_intra8x8(const VP8_ENCODER_RTCD *rtcd,
x->quantize_b_8x8(x->block + idx, xd->block + idx);
vp8_idct_idct8(xd->block[idx].dqcoeff, xd->block[ib].diff, 32);
#endif
-
- // reconstruct submacroblock
+ } else {
for (i = 0; i < 4; i++) {
b = &xd->block[ib + iblock[i]];
- vp8_recon_b_c(b->predictor, b->diff, *(b->base_dst) + b->dst,
- b->dst_stride);
+ be = &x->block[ib + iblock[i]];
+ ENCODEMB_INVOKE(&rtcd->encodemb, subb)(be, b, 16);
+ x->vp8_short_fdct4x4(be->src_diff, be->coeff, 32);
+ x->quantize_b_4x4(be, b);
+ vp8_inverse_transform_b_4x4(IF_RTCD(&rtcd->common->idct), b, 32);
}
}
+
+ // reconstruct submacroblock
+ for (i = 0; i < 4; i++) {
+ b = &xd->block[ib + iblock[i]];
+ vp8_recon_b_c(b->predictor, b->diff, *(b->base_dst) + b->dst,
+ b->dst_stride);
+ }
}
void vp8_encode_intra8x8mby(const VP8_ENCODER_RTCD *rtcd, MACROBLOCK *x) {
@@ -316,32 +268,26 @@ void vp8_encode_intra_uv4x4(const VP8_ENCODER_RTCD *rtcd,
#if CONFIG_COMP_INTRA_PRED
if (second == -1) {
#endif
- RECON_INVOKE(&rtcd->common->recon, intra_uv4x4_predict)
- (b, mode, b->predictor);
+ vp8_intra_uv4x4_predict(b, mode, b->predictor);
#if CONFIG_COMP_INTRA_PRED
} else {
- RECON_INVOKE(&rtcd->common->recon, comp_intra_uv4x4_predict)
- (b, mode, second, b->predictor);
+ vp8_comp_intra_uv4x4_predict(b, mode, second, b->predictor);
}
#endif
ENCODEMB_INVOKE(&rtcd->encodemb, subb)(be, b, 8);
x->vp8_short_fdct4x4(be->src_diff, be->coeff, 16);
+ x->quantize_b_4x4(be, b);
+ vp8_inverse_transform_b_4x4(IF_RTCD(&rtcd->common->idct), b, 16);
- x->quantize_b(be, b);
-
- vp8_inverse_transform_b(IF_RTCD(&rtcd->common->idct), b, 16);
-
- RECON_INVOKE(&rtcd->common->recon, recon_uv)(b->predictor,
- b->diff, *(b->base_dst) + b->dst, b->dst_stride);
+ vp8_recon_uv_b_c(b->predictor,b->diff, *(b->base_dst) + b->dst, b->dst_stride);
}
-
-
void vp8_encode_intra8x8mbuv(const VP8_ENCODER_RTCD *rtcd, MACROBLOCK *x) {
int i, ib, mode, second;
BLOCKD *b;
+
for (i = 0; i < 4; i++) {
ib = vp8_i8x8_block[i];
b = &x->e_mbd.block[ib];
diff --git a/vp8/encoder/encodemb.c b/vp8/encoder/encodemb.c
index 66b9fb970..50de2f2c0 100644
--- a/vp8/encoder/encodemb.c
+++ b/vp8/encoder/encodemb.c
@@ -8,14 +8,12 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-
#include "vpx_ports/config.h"
#include "encodemb.h"
#include "vp8/common/reconinter.h"
#include "quantize.h"
#include "tokenize.h"
#include "vp8/common/invtrans.h"
-#include "vp8/common/recon.h"
#include "vp8/common/reconintra.h"
#include "dct.h"
#include "vpx_mem/vpx_mem.h"
@@ -57,6 +55,7 @@ void vp8_subtract_4b_c(BLOCK *be, BLOCKD *bd, int pitch) {
unsigned char *pred_ptr = bd->predictor;
int src_stride = be->src_stride;
int r, c;
+
for (r = 0; r < 8; r++) {
for (c = 0; c < 8; c++) {
diff_ptr[c] = src_ptr[c] - pred_ptr[c];
@@ -73,7 +72,6 @@ void vp8_subtract_mbuv_s_c(short *diff, const unsigned char *usrc,
const unsigned char *vpred, int dst_stride) {
short *udiff = diff + 256;
short *vdiff = diff + 320;
-
int r, c;
for (r = 0; r < 8; r++) {
@@ -132,7 +130,7 @@ static void vp8_subtract_mb(const VP8_ENCODER_RTCD *rtcd, MACROBLOCK *x) {
ENCODEMB_INVOKE(&rtcd->encodemb, submbuv)(x->src_diff, x->src.u_buffer, x->src.v_buffer, x->e_mbd.predictor, x->src.uv_stride);
}
-static void build_dcblock(MACROBLOCK *x) {
+static void build_dcblock_4x4(MACROBLOCK *x) {
short *src_diff_ptr = &x->src_diff[384];
int i;
@@ -140,73 +138,8 @@ static void build_dcblock(MACROBLOCK *x) {
src_diff_ptr[i] = x->coeff[i * 16];
}
}
-void vp8_build_dcblock_8x8(MACROBLOCK *x) {
- short *src_diff_ptr = &x->src_diff[384];
- int i;
- for (i = 0; i < 16; i++) {
- src_diff_ptr[i] = 0;
- }
- src_diff_ptr[0] = x->coeff[0 * 16];
- src_diff_ptr[1] = x->coeff[4 * 16];
- src_diff_ptr[4] = x->coeff[8 * 16];
- src_diff_ptr[8] = x->coeff[12 * 16];
-}
-
-void vp8_transform_mbuv(MACROBLOCK *x) {
- int i;
-
- for (i = 16; i < 24; i += 2) {
- x->vp8_short_fdct8x4(&x->block[i].src_diff[0],
- &x->block[i].coeff[0], 16);
- }
-}
-
-
-void vp8_transform_intra_mby(MACROBLOCK *x) {
- int i;
-
- for (i = 0; i < 16; i += 2) {
- x->vp8_short_fdct8x4(&x->block[i].src_diff[0],
- &x->block[i].coeff[0], 32);
- }
-
- // build dc block from 16 y dc values
- build_dcblock(x);
-
- // do 2nd order transform on the dc block
- x->short_walsh4x4(&x->block[24].src_diff[0],
- &x->block[24].coeff[0], 8);
-
-}
-
-
-static void transform_mb(MACROBLOCK *x) {
- int i;
- MB_PREDICTION_MODE mode = x->e_mbd.mode_info_context->mbmi.mode;
-
- for (i = 0; i < 16; i += 2) {
- x->vp8_short_fdct8x4(&x->block[i].src_diff[0],
- &x->block[i].coeff[0], 32);
- }
-
- // build dc block from 16 y dc values
- if (mode != SPLITMV)
- build_dcblock(x);
-
- for (i = 16; i < 24; i += 2) {
- x->vp8_short_fdct8x4(&x->block[i].src_diff[0],
- &x->block[i].coeff[0], 16);
- }
-
- // do 2nd order transform on the dc block
- if (mode != SPLITMV)
- x->short_walsh4x4(&x->block[24].src_diff[0],
- &x->block[24].coeff[0], 8);
-
-}
-
-static void transform_mby(MACROBLOCK *x) {
+void vp8_transform_mby_4x4(MACROBLOCK *x) {
int i;
for (i = 0; i < 16; i += 2) {
@@ -214,75 +147,46 @@ static void transform_mby(MACROBLOCK *x) {
&x->block[i].coeff[0], 32);
}
- // build dc block from 16 y dc values
if (x->e_mbd.mode_info_context->mbmi.mode != SPLITMV) {
- build_dcblock(x);
+ // build dc block from 16 y dc values
+ build_dcblock_4x4(x);
+
+ // do 2nd order transform on the dc block
x->short_walsh4x4(&x->block[24].src_diff[0],
&x->block[24].coeff[0], 8);
}
}
-void vp8_transform_mbuv_8x8(MACROBLOCK *x) {
+void vp8_transform_mbuv_4x4(MACROBLOCK *x) {
int i;
- for (i = 16; i < 24; i += 4) {
- x->vp8_short_fdct8x8(&x->block[i].src_diff[0],
+ for (i = 16; i < 24; i += 2) {
+ x->vp8_short_fdct8x4(&x->block[i].src_diff[0],
&x->block[i].coeff[0], 16);
}
}
-
-void vp8_transform_intra_mby_8x8(MACROBLOCK *x) { // changed
- int i;
- for (i = 0; i < 9; i += 8) {
- x->vp8_short_fdct8x8(&x->block[i].src_diff[0],
- &x->block[i].coeff[0], 32);
- }
- for (i = 2; i < 11; i += 8) {
- x->vp8_short_fdct8x8(&x->block[i].src_diff[0],
- &x->block[i + 2].coeff[0], 32);
- }
- // build dc block from 16 y dc values
- vp8_build_dcblock_8x8(x);
- // vp8_build_dcblock(x);
-
- // do 2nd order transform on the dc block
- x->short_fhaar2x2(&x->block[24].src_diff[0],
- &x->block[24].coeff[0], 8);
-
+static void transform_mb_4x4(MACROBLOCK *x) {
+ vp8_transform_mby_4x4(x);
+ vp8_transform_mbuv_4x4(x);
}
-
-void vp8_transform_mb_8x8(MACROBLOCK *x) {
+void vp8_build_dcblock_8x8(MACROBLOCK *x) {
+ int16_t *src_diff_ptr = x->block[24].src_diff;
int i;
- MB_PREDICTION_MODE mode = x->e_mbd.mode_info_context->mbmi.mode;
-
- for (i = 0; i < 9; i += 8) {
- x->vp8_short_fdct8x8(&x->block[i].src_diff[0],
- &x->block[i].coeff[0], 32);
- }
- for (i = 2; i < 11; i += 8) {
- x->vp8_short_fdct8x8(&x->block[i].src_diff[0],
- &x->block[i + 2].coeff[0], 32);
- }
- // build dc block from 16 y dc values
- if (mode != B_PRED && mode != SPLITMV)
- vp8_build_dcblock_8x8(x);
- // vp8_build_dcblock(x);
- for (i = 16; i < 24; i += 4) {
- x->vp8_short_fdct8x8(&x->block[i].src_diff[0],
- &x->block[i].coeff[0], 16);
+ for (i = 0; i < 16; i++) {
+ src_diff_ptr[i] = 0;
}
-
- // do 2nd order transform on the dc block
- if (mode != B_PRED && mode != SPLITMV)
- x->short_fhaar2x2(&x->block[24].src_diff[0],
- &x->block[24].coeff[0], 8);
+ src_diff_ptr[0] = x->coeff[0 * 16];
+ src_diff_ptr[1] = x->coeff[4 * 16];
+ src_diff_ptr[4] = x->coeff[8 * 16];
+ src_diff_ptr[8] = x->coeff[12 * 16];
}
void vp8_transform_mby_8x8(MACROBLOCK *x) {
int i;
+
for (i = 0; i < 9; i += 8) {
x->vp8_short_fdct8x8(&x->block[i].src_diff[0],
&x->block[i].coeff[0], 32);
@@ -291,51 +195,41 @@ void vp8_transform_mby_8x8(MACROBLOCK *x) {
x->vp8_short_fdct8x8(&x->block[i].src_diff[0],
&x->block[i + 2].coeff[0], 32);
}
- // build dc block from 16 y dc values
+
if (x->e_mbd.mode_info_context->mbmi.mode != SPLITMV) {
- // vp8_build_dcblock(x);
+ // build dc block from 2x2 y dc values
vp8_build_dcblock_8x8(x);
+
+ // do 2nd order transform on the dc block
x->short_fhaar2x2(&x->block[24].src_diff[0],
&x->block[24].coeff[0], 8);
}
}
-#if CONFIG_TX16X16 || CONFIG_HYBRIDTRANSFORM16X16
-void vp8_transform_mbuv_16x16(MACROBLOCK *x) {
+void vp8_transform_mbuv_8x8(MACROBLOCK *x) {
int i;
- vp8_clear_system_state();
- // Default to the 8x8
- for (i = 16; i < 24; i += 4)
+ for (i = 16; i < 24; i += 4) {
x->vp8_short_fdct8x8(&x->block[i].src_diff[0],
- &x->block[i].coeff[0], 16);
+ &x->block[i].coeff[0], 16);
+ }
}
-
-void vp8_transform_intra_mby_16x16(MACROBLOCK *x) {
- vp8_clear_system_state();
- x->vp8_short_fdct16x16(&x->block[0].src_diff[0],
- &x->block[0].coeff[0], 32);
+void vp8_transform_mb_8x8(MACROBLOCK *x) {
+ vp8_transform_mby_8x8(x);
+ vp8_transform_mbuv_8x8(x);
}
-
-void vp8_transform_mb_16x16(MACROBLOCK *x) {
- int i;
+void vp8_transform_mby_16x16(MACROBLOCK *x) {
vp8_clear_system_state();
x->vp8_short_fdct16x16(&x->block[0].src_diff[0],
- &x->block[0].coeff[0], 32);
-
- for (i = 16; i < 24; i += 4) {
- x->vp8_short_fdct8x8(&x->block[i].src_diff[0],
- &x->block[i].coeff[0], 16);
- }
+ &x->block[0].coeff[0], 32);
}
-void vp8_transform_mby_16x16(MACROBLOCK *x) {
- vp8_clear_system_state();
- x->vp8_short_fdct16x16(&x->block[0].src_diff[0], &x->block[0].coeff[0], 32);
+void vp8_transform_mb_16x16(MACROBLOCK *x) {
+ vp8_transform_mby_16x16(x);
+ vp8_transform_mbuv_8x8(x);
}
-#endif
#define RDTRUNC(RM,DM,R,D) ( (128+(R)*(RM)) & 0xFF )
#define RDTRUNC_8x8(RM,DM,R,D) ( (128+(R)*(RM)) & 0xFF )
@@ -371,7 +265,7 @@ static const int plane_rd_mult[4] = {
}\
}
-void optimize_b(MACROBLOCK *mb, int i, int type,
+void optimize_b(MACROBLOCK *mb, int i, PLANE_TYPE type,
ENTROPY_CONTEXT *a, ENTROPY_CONTEXT *l,
const VP8_ENCODER_RTCD *rtcd, int tx_type) {
BLOCK *b;
@@ -448,7 +342,7 @@ void optimize_b(MACROBLOCK *mb, int i, int type,
coeff_ptr = b->coeff;
qcoeff_ptr = d->qcoeff;
dqcoeff_ptr = d->dqcoeff;
- i0 = !type;
+ i0 = (type == PLANE_TYPE_Y_NO_DC);
eob = d->eob;
/* Now set up a Viterbi trellis to evaluate alternative roundings. */
@@ -622,7 +516,7 @@ fall between -65 and +65.
**************************************************************************/
#define SUM_2ND_COEFF_THRESH 65
-static void check_reset_2nd_coeffs(MACROBLOCKD *xd, int type,
+static void check_reset_2nd_coeffs(MACROBLOCKD *xd,
ENTROPY_CONTEXT *a, ENTROPY_CONTEXT *l) {
int sum = 0;
int i;
@@ -645,11 +539,12 @@ static void check_reset_2nd_coeffs(MACROBLOCKD *xd, int type,
bd->dqcoeff[rc] = 0;
}
bd->eob = 0;
- *a = *l = (bd->eob != !type);
+ *a = *l = (bd->eob != 0);
}
}
+
#define SUM_2ND_COEFF_THRESH_8X8 32
-static void check_reset_8x8_2nd_coeffs(MACROBLOCKD *xd, int type,
+static void check_reset_8x8_2nd_coeffs(MACROBLOCKD *xd,
ENTROPY_CONTEXT *a, ENTROPY_CONTEXT *l) {
int sum = 0;
BLOCKD *bd = &xd->block[24];
@@ -674,62 +569,20 @@ static void check_reset_8x8_2nd_coeffs(MACROBLOCKD *xd, int type,
bd->qcoeff[8] = 0;
bd->dqcoeff[8] = 0;
bd->eob = 0;
- *a = *l = (bd->eob != !type);
- }
-}
-
-static void optimize_mb(MACROBLOCK *x, const VP8_ENCODER_RTCD *rtcd) {
- int b;
- int type;
- int has_2nd_order;
- ENTROPY_CONTEXT_PLANES t_above, t_left;
- ENTROPY_CONTEXT *ta;
- ENTROPY_CONTEXT *tl;
- MB_PREDICTION_MODE mode = x->e_mbd.mode_info_context->mbmi.mode;
-
- vpx_memcpy(&t_above, x->e_mbd.above_context, sizeof(ENTROPY_CONTEXT_PLANES));
- vpx_memcpy(&t_left, x->e_mbd.left_context, sizeof(ENTROPY_CONTEXT_PLANES));
-
- ta = (ENTROPY_CONTEXT *)&t_above;
- tl = (ENTROPY_CONTEXT *)&t_left;
-
- has_2nd_order = (mode != B_PRED && mode != I8X8_PRED && mode != SPLITMV);
- type = has_2nd_order ? PLANE_TYPE_Y_NO_DC : PLANE_TYPE_Y_WITH_DC;
-
- for (b = 0; b < 16; b++) {
- optimize_b(x, b, type,
- ta + vp8_block2above[b], tl + vp8_block2left[b], rtcd, TX_4X4);
- }
-
- for (b = 16; b < 24; b++) {
- optimize_b(x, b, PLANE_TYPE_UV,
- ta + vp8_block2above[b], tl + vp8_block2left[b], rtcd, TX_4X4);
- }
-
- if (has_2nd_order) {
- b = 24;
- optimize_b(x, b, PLANE_TYPE_Y2,
- ta + vp8_block2above[b], tl + vp8_block2left[b], rtcd, TX_4X4);
- check_reset_2nd_coeffs(&x->e_mbd, PLANE_TYPE_Y2,
- ta + vp8_block2above[b], tl + vp8_block2left[b]);
+ *a = *l = (bd->eob != 0);
}
}
-
-void vp8_optimize_mby(MACROBLOCK *x, const VP8_ENCODER_RTCD *rtcd) {
+void vp8_optimize_mby_4x4(MACROBLOCK *x, const VP8_ENCODER_RTCD *rtcd) {
int b;
- int type;
+ PLANE_TYPE type;
int has_2nd_order;
-
ENTROPY_CONTEXT_PLANES t_above, t_left;
ENTROPY_CONTEXT *ta;
ENTROPY_CONTEXT *tl;
MB_PREDICTION_MODE mode = x->e_mbd.mode_info_context->mbmi.mode;
- if (!x->e_mbd.above_context)
- return;
-
- if (!x->e_mbd.left_context)
+ if (!x->e_mbd.above_context || !x->e_mbd.left_context)
return;
vpx_memcpy(&t_above, x->e_mbd.above_context, sizeof(ENTROPY_CONTEXT_PLANES));
@@ -746,26 +599,22 @@ void vp8_optimize_mby(MACROBLOCK *x, const VP8_ENCODER_RTCD *rtcd) {
ta + vp8_block2above[b], tl + vp8_block2left[b], rtcd, TX_4X4);
}
-
if (has_2nd_order) {
b = 24;
optimize_b(x, b, PLANE_TYPE_Y2,
ta + vp8_block2above[b], tl + vp8_block2left[b], rtcd, TX_4X4);
- check_reset_2nd_coeffs(&x->e_mbd, PLANE_TYPE_Y2,
+ check_reset_2nd_coeffs(&x->e_mbd,
ta + vp8_block2above[b], tl + vp8_block2left[b]);
}
}
-void vp8_optimize_mbuv(MACROBLOCK *x, const VP8_ENCODER_RTCD *rtcd) {
+void vp8_optimize_mbuv_4x4(MACROBLOCK *x, const VP8_ENCODER_RTCD *rtcd) {
int b;
ENTROPY_CONTEXT_PLANES t_above, t_left;
ENTROPY_CONTEXT *ta;
ENTROPY_CONTEXT *tl;
- if (!x->e_mbd.above_context)
- return;
-
- if (!x->e_mbd.left_context)
+ if (!x->e_mbd.above_context || !x->e_mbd.left_context)
return;
vpx_memcpy(&t_above, x->e_mbd.above_context, sizeof(ENTROPY_CONTEXT_PLANES));
@@ -780,55 +629,19 @@ void vp8_optimize_mbuv(MACROBLOCK *x, const VP8_ENCODER_RTCD *rtcd) {
}
}
-void optimize_mb_8x8(MACROBLOCK *x, const VP8_ENCODER_RTCD *rtcd) {
- int b;
- int type;
- ENTROPY_CONTEXT_PLANES t_above, t_left;
- ENTROPY_CONTEXT *ta;
- ENTROPY_CONTEXT *tl;
-
- vpx_memcpy(&t_above, x->e_mbd.above_context, sizeof(ENTROPY_CONTEXT_PLANES));
- vpx_memcpy(&t_left, x->e_mbd.left_context, sizeof(ENTROPY_CONTEXT_PLANES));
-
- ta = (ENTROPY_CONTEXT *)&t_above;
- tl = (ENTROPY_CONTEXT *)&t_left;
-
- type = 0;
- for (b = 0; b < 16; b += 4) {
- optimize_b(x, b, type,
- ta + vp8_block2above_8x8[b], tl + vp8_block2left_8x8[b],
- rtcd, TX_8X8);
- *(ta + vp8_block2above_8x8[b] + 1) = *(ta + vp8_block2above_8x8[b]);
- *(tl + vp8_block2left_8x8[b] + 1) = *(tl + vp8_block2left_8x8[b]);
- }
-
- for (b = 16; b < 24; b += 4) {
- optimize_b(x, b, PLANE_TYPE_UV,
- ta + vp8_block2above_8x8[b], tl + vp8_block2left_8x8[b],
- rtcd, TX_8X8);
- *(ta + vp8_block2above_8x8[b] + 1) = *(ta + vp8_block2above_8x8[b]);
- *(tl + vp8_block2left_8x8[b] + 1) = *(tl + vp8_block2left_8x8[b]);
- }
-
- // 8x8 always have 2nd roder haar block
- check_reset_8x8_2nd_coeffs(&x->e_mbd, PLANE_TYPE_Y2,
- ta + vp8_block2above_8x8[24], tl + vp8_block2left_8x8[24]);
-
+static void optimize_mb_4x4(MACROBLOCK *x, const VP8_ENCODER_RTCD *rtcd) {
+ vp8_optimize_mby_4x4(x, rtcd);
+ vp8_optimize_mbuv_4x4(x, rtcd);
}
void vp8_optimize_mby_8x8(MACROBLOCK *x, const VP8_ENCODER_RTCD *rtcd) {
int b;
- int type;
-
+ PLANE_TYPE type;
ENTROPY_CONTEXT_PLANES t_above, t_left;
ENTROPY_CONTEXT *ta;
ENTROPY_CONTEXT *tl;
-
- if (!x->e_mbd.above_context)
- return;
-
- if (!x->e_mbd.left_context)
+ if (!x->e_mbd.above_context || !x->e_mbd.left_context)
return;
vpx_memcpy(&t_above, x->e_mbd.above_context, sizeof(ENTROPY_CONTEXT_PLANES));
@@ -836,7 +649,7 @@ void vp8_optimize_mby_8x8(MACROBLOCK *x, const VP8_ENCODER_RTCD *rtcd) {
ta = (ENTROPY_CONTEXT *)&t_above;
tl = (ENTROPY_CONTEXT *)&t_left;
- type = 0;
+ type = PLANE_TYPE_Y_NO_DC;
for (b = 0; b < 16; b += 4) {
optimize_b(x, b, type,
ta + vp8_block2above[b], tl + vp8_block2left[b],
@@ -844,10 +657,10 @@ void vp8_optimize_mby_8x8(MACROBLOCK *x, const VP8_ENCODER_RTCD *rtcd) {
*(ta + vp8_block2above_8x8[b] + 1) = *(ta + vp8_block2above_8x8[b]);
*(tl + vp8_block2left_8x8[b] + 1) = *(tl + vp8_block2left_8x8[b]);
}
+
// 8x8 always have 2nd roder haar block
- check_reset_8x8_2nd_coeffs(&x->e_mbd, PLANE_TYPE_Y2,
+ check_reset_8x8_2nd_coeffs(&x->e_mbd,
ta + vp8_block2above_8x8[24], tl + vp8_block2left_8x8[24]);
-
}
void vp8_optimize_mbuv_8x8(MACROBLOCK *x, const VP8_ENCODER_RTCD *rtcd) {
@@ -856,10 +669,7 @@ void vp8_optimize_mbuv_8x8(MACROBLOCK *x, const VP8_ENCODER_RTCD *rtcd) {
ENTROPY_CONTEXT *ta;
ENTROPY_CONTEXT *tl;
- if (!x->e_mbd.above_context)
- return;
-
- if (!x->e_mbd.left_context)
+ if (!x->e_mbd.above_context || !x->e_mbd.left_context)
return;
vpx_memcpy(&t_above, x->e_mbd.above_context, sizeof(ENTROPY_CONTEXT_PLANES));
@@ -875,13 +685,14 @@ void vp8_optimize_mbuv_8x8(MACROBLOCK *x, const VP8_ENCODER_RTCD *rtcd) {
*(ta + vp8_block2above_8x8[b] + 1) = *(ta + vp8_block2above_8x8[b]);
*(tl + vp8_block2left_8x8[b] + 1) = *(tl + vp8_block2left_8x8[b]);
}
-
}
+void optimize_mb_8x8(MACROBLOCK *x, const VP8_ENCODER_RTCD *rtcd) {
+ vp8_optimize_mby_8x8(x, rtcd);
+ vp8_optimize_mbuv_8x8(x, rtcd);
+}
-
-#if CONFIG_TX16X16 || CONFIG_HYBRIDTRANSFORM16X16
-void optimize_b_16x16(MACROBLOCK *mb, int i, int type,
+void optimize_b_16x16(MACROBLOCK *mb, int i, PLANE_TYPE type,
ENTROPY_CONTEXT *a, ENTROPY_CONTEXT *l,
const VP8_ENCODER_RTCD *rtcd) {
BLOCK *b = &mb->block[i];
@@ -1056,186 +867,73 @@ void optimize_b_16x16(MACROBLOCK *mb, int i, int type,
}
void vp8_optimize_mby_16x16(MACROBLOCK *x, const VP8_ENCODER_RTCD *rtcd) {
- ENTROPY_CONTEXT_PLANES t_above, t_left;
- ENTROPY_CONTEXT *ta, *tl;
-
- if (!x->e_mbd.above_context)
- return;
- if (!x->e_mbd.left_context)
- return;
-
- vpx_memcpy(&t_above, x->e_mbd.above_context, sizeof(ENTROPY_CONTEXT_PLANES));
- vpx_memcpy(&t_left, x->e_mbd.left_context, sizeof(ENTROPY_CONTEXT_PLANES));
-
- ta = (ENTROPY_CONTEXT *)&t_above;
- tl = (ENTROPY_CONTEXT *)&t_left;
- optimize_b_16x16(x, 0, PLANE_TYPE_Y_WITH_DC, ta, tl, rtcd);
- *(ta + 1) = *ta;
- *(tl + 1) = *tl;
-}
-
-void optimize_mb_16x16(MACROBLOCK *x, const VP8_ENCODER_RTCD *rtcd) {
- int b;
ENTROPY_CONTEXT_PLANES t_above, t_left;
ENTROPY_CONTEXT *ta, *tl;
+ if (!x->e_mbd.above_context || !x->e_mbd.left_context)
+ return;
+
vpx_memcpy(&t_above, x->e_mbd.above_context, sizeof(ENTROPY_CONTEXT_PLANES));
vpx_memcpy(&t_left, x->e_mbd.left_context, sizeof(ENTROPY_CONTEXT_PLANES));
ta = (ENTROPY_CONTEXT *)&t_above;
tl = (ENTROPY_CONTEXT *)&t_left;
-
optimize_b_16x16(x, 0, PLANE_TYPE_Y_WITH_DC, ta, tl, rtcd);
- *(ta + 1) = *ta;
- *(tl + 1) = *tl;
+}
- for (b = 16; b < 24; b += 4) {
- optimize_b(x, b, PLANE_TYPE_UV,
- ta + vp8_block2above_8x8[b], tl + vp8_block2left_8x8[b],
- rtcd, TX_8X8);
- *(ta + vp8_block2above_8x8[b] + 1) = *(ta + vp8_block2above_8x8[b]);
- *(tl + vp8_block2left_8x8[b] + 1) = *(tl + vp8_block2left_8x8[b]);
- }
+static void optimize_mb_16x16(MACROBLOCK *x, const VP8_ENCODER_RTCD *rtcd) {
+ vp8_optimize_mby_16x16(x, rtcd);
+ vp8_optimize_mbuv_8x8(x, rtcd);
}
-#endif
void vp8_encode_inter16x16(const VP8_ENCODER_RTCD *rtcd, MACROBLOCK *x) {
- int tx_type = x->e_mbd.mode_info_context->mbmi.txfm_size;
- vp8_build_inter_predictors_mb(&x->e_mbd);
+ MACROBLOCKD *xd = &x->e_mbd;
+ TX_SIZE tx_size = xd->mode_info_context->mbmi.txfm_size;
+ vp8_build_inter_predictors_mb(xd);
vp8_subtract_mb(rtcd, x);
-#if CONFIG_TX16X16 || CONFIG_HYBRIDTRANSFORM16X16
- if (tx_type == TX_16X16)
+ if (tx_size == TX_16X16) {
vp8_transform_mb_16x16(x);
- else
-#endif
- if (tx_type == TX_8X8)
- vp8_transform_mb_8x8(x);
- else
- transform_mb(x);
-
-#if CONFIG_TX16X16 || CONFIG_HYBRIDTRANSFORM16X16
- if (tx_type == TX_16X16)
vp8_quantize_mb_16x16(x);
- else
-#endif
- if (tx_type == TX_8X8)
- vp8_quantize_mb_8x8(x);
- else
- vp8_quantize_mb(x);
-
- if (x->optimize) {
-#if CONFIG_TX16X16 || CONFIG_HYBRIDTRANSFORM16X16
- if (tx_type == TX_16X16)
+ if (x->optimize)
optimize_mb_16x16(x, rtcd);
- else
-#endif
- if (tx_type == TX_8X8)
+ vp8_inverse_transform_mb_16x16(IF_RTCD(&rtcd->common->idct), xd);
+ } else if (tx_size == TX_8X8) {
+ vp8_transform_mb_8x8(x);
+ vp8_quantize_mb_8x8(x);
+ if (x->optimize)
optimize_mb_8x8(x, rtcd);
- else
- optimize_mb(x, rtcd);
+ vp8_inverse_transform_mb_8x8(IF_RTCD(&rtcd->common->idct), xd);
+ } else {
+ transform_mb_4x4(x);
+ vp8_quantize_mb_4x4(x);
+ if (x->optimize)
+ optimize_mb_4x4(x, rtcd);
+ vp8_inverse_transform_mb_4x4(IF_RTCD(&rtcd->common->idct), xd);
}
-#if CONFIG_TX16X16 || CONFIG_HYBRIDTRANSFORM16X16
- if (tx_type == TX_16X16)
- vp8_inverse_transform_mb_16x16(IF_RTCD(&rtcd->common->idct), &x->e_mbd);
- else
-#endif
- if (tx_type == TX_8X8)
- vp8_inverse_transform_mb_8x8(IF_RTCD(&rtcd->common->idct), &x->e_mbd);
- else
- vp8_inverse_transform_mb(IF_RTCD(&rtcd->common->idct), &x->e_mbd);
-
- if (tx_type == TX_8X8) {
-#ifdef ENC_DEBUG
- if (enc_debug) {
- int i;
- printf("qcoeff:\n");
- printf("%d %d:\n", x->e_mbd.mb_to_left_edge, x->e_mbd.mb_to_top_edge);
- for (i = 0; i < 400; i++) {
- printf("%3d ", x->e_mbd.qcoeff[i]);
- if (i % 16 == 15) printf("\n");
- }
- printf("dqcoeff:\n");
- for (i = 0; i < 400; i++) {
- printf("%3d ", x->e_mbd.dqcoeff[i]);
- if (i % 16 == 15) printf("\n");
- }
- printf("diff:\n");
- for (i = 0; i < 400; i++) {
- printf("%3d ", x->e_mbd.diff[i]);
- if (i % 16 == 15) printf("\n");
- }
- printf("predictor:\n");
- for (i = 0; i < 400; i++) {
- printf("%3d ", x->e_mbd.predictor[i]);
- if (i % 16 == 15) printf("\n");
- }
- printf("\n");
- }
-#endif
- }
-
- RECON_INVOKE(&rtcd->common->recon, recon_mb)
- (IF_RTCD(&rtcd->common->recon), &x->e_mbd);
-#ifdef ENC_DEBUG
- if (enc_debug) {
- int i, j, k;
- printf("Final Reconstruction\n");
- for (i = 0; i < 16; i += 4) {
- BLOCKD *b = &x->e_mbd.block[i];
- unsigned char *d = *(b->base_dst) + b->dst;
- for (k = 0; k < 4; k++) {
- for (j = 0; j < 16; j++)
- printf("%3d ", d[j]);
- printf("\n");
- d += b->dst_stride;
- }
- }
- }
-#endif
+ vp8_recon_mb(xd);
}
-
/* this function is used by first pass only */
void vp8_encode_inter16x16y(const VP8_ENCODER_RTCD *rtcd, MACROBLOCK *x) {
- int tx_type = x->e_mbd.mode_info_context->mbmi.txfm_size;
-
+ MACROBLOCKD *xd = &x->e_mbd;
BLOCK *b = &x->block[0];
#if CONFIG_PRED_FILTER
// Disable the prediction filter for firstpass
- x->e_mbd.mode_info_context->mbmi.pred_filter_enabled = 0;
+ xd->mode_info_context->mbmi.pred_filter_enabled = 0;
#endif
- vp8_build_1st_inter16x16_predictors_mby(&x->e_mbd, x->e_mbd.predictor,
- 16, 0);
+ vp8_build_1st_inter16x16_predictors_mby(xd, xd->predictor, 16, 0);
- ENCODEMB_INVOKE(&rtcd->encodemb, submby)(x->src_diff, *(b->base_src), x->e_mbd.predictor, b->src_stride);
+ ENCODEMB_INVOKE(&rtcd->encodemb, submby)(x->src_diff, *(b->base_src),
+ xd->predictor, b->src_stride);
-#if CONFIG_TX16X16 || CONFIG_HYBRIDTRANSFORM16X16
- if (tx_type == TX_16X16)
- vp8_transform_mby_16x16(x);
- else
-#endif
- if (tx_type == TX_8X8)
- vp8_transform_mby_8x8(x);
- else
- transform_mby(x);
-
- vp8_quantize_mby(x);
-
-#if CONFIG_TX16X16 || CONFIG_HYBRIDTRANSFORM16X16
- if (tx_type == TX_16X16)
- vp8_inverse_transform_mby_16x16(IF_RTCD(&rtcd->common->idct), &x->e_mbd);
- else
-#endif
- if (tx_type == TX_8X8)
- vp8_inverse_transform_mby_8x8(IF_RTCD(&rtcd->common->idct), &x->e_mbd);
- else
- vp8_inverse_transform_mby(IF_RTCD(&rtcd->common->idct), &x->e_mbd);
+ vp8_transform_mby_4x4(x);
+ vp8_quantize_mby_4x4(x);
+ vp8_inverse_transform_mby_4x4(IF_RTCD(&rtcd->common->idct), xd);
- RECON_INVOKE(&rtcd->common->recon, recon_mby)
- (IF_RTCD(&rtcd->common->recon), &x->e_mbd);
+ vp8_recon_mby(xd);
}
diff --git a/vp8/encoder/encodemb.h b/vp8/encoder/encodemb.h
index 6a0f223c9..6c28ea025 100644
--- a/vp8/encoder/encodemb.h
+++ b/vp8/encoder/encodemb.h
@@ -105,30 +105,24 @@ struct VP8_ENCODER_RTCD;
void vp8_encode_inter16x16(const struct VP8_ENCODER_RTCD *rtcd, MACROBLOCK *x);
void vp8_build_dcblock(MACROBLOCK *b);
-void vp8_transform_mb(MACROBLOCK *mb);
-void vp8_transform_mbuv(MACROBLOCK *x);
-void vp8_transform_intra_mby(MACROBLOCK *x);
+void vp8_transform_mb_4x4(MACROBLOCK *mb);
+void vp8_transform_mbuv_4x4(MACROBLOCK *x);
+void vp8_transform_mby_4x4(MACROBLOCK *x);
-void vp8_optimize_mby(MACROBLOCK *x, const struct VP8_ENCODER_RTCD *rtcd);
-void vp8_optimize_mbuv(MACROBLOCK *x, const struct VP8_ENCODER_RTCD *rtcd);
+void vp8_optimize_mby_4x4(MACROBLOCK *x, const struct VP8_ENCODER_RTCD *rtcd);
+void vp8_optimize_mbuv_4x4(MACROBLOCK *x, const struct VP8_ENCODER_RTCD *rtcd);
void vp8_encode_inter16x16y(const struct VP8_ENCODER_RTCD *rtcd, MACROBLOCK *x);
void vp8_transform_mb_8x8(MACROBLOCK *mb);
void vp8_transform_mby_8x8(MACROBLOCK *x);
void vp8_transform_mbuv_8x8(MACROBLOCK *x);
-void vp8_transform_intra_mby_8x8(MACROBLOCK *x);
void vp8_build_dcblock_8x8(MACROBLOCK *b);
void vp8_optimize_mby_8x8(MACROBLOCK *x, const struct VP8_ENCODER_RTCD *rtcd);
void vp8_optimize_mbuv_8x8(MACROBLOCK *x, const struct VP8_ENCODER_RTCD *rtcd);
-#if CONFIG_TX16X16 || CONFIG_HYBRIDTRANSFORM16X16
void vp8_transform_mb_16x16(MACROBLOCK *mb);
void vp8_transform_mby_16x16(MACROBLOCK *x);
-void vp8_transform_mbuv_16x16(MACROBLOCK *x);
-void vp8_transform_intra_mby_16x16(MACROBLOCK *x);
-void vp8_build_dcblock_16x16(MACROBLOCK *b);
void vp8_optimize_mby_16x16(MACROBLOCK *x, const struct VP8_ENCODER_RTCD *rtcd);
-#endif
void vp8_subtract_4b_c(BLOCK *be, BLOCKD *bd, int pitch);
diff --git a/vp8/encoder/firstpass.c b/vp8/encoder/firstpass.c
index 1e54371f7..bad50b606 100644
--- a/vp8/encoder/firstpass.c
+++ b/vp8/encoder/firstpass.c
@@ -528,7 +528,7 @@ void vp8_first_pass(VP8_COMP *cpi) {
xd->left_available = (mb_col != 0);
// Copy current mb to a buffer
- RECON_INVOKE(&xd->rtcd->recon, copy16x16)(x->src.y_buffer, x->src.y_stride, x->thismb, 16);
+ vp8_copy_mem16x16(x->src.y_buffer, x->src.y_stride, x->thismb, 16);
// do intra 16x16 prediction
this_error = vp8_encode_intra(cpi, x, use_dc_pred);
@@ -623,6 +623,7 @@ void vp8_first_pass(VP8_COMP *cpi) {
mv.as_mv.col <<= 3;
this_error = motion_error;
vp8_set_mbmode_and_mvs(x, NEWMV, &mv);
+ xd->mode_info_context->mbmi.txfm_size = TX_4X4;
vp8_encode_inter16x16y(IF_RTCD(&cpi->rtcd), x);
sum_mvr += mv.as_mv.row;
sum_mvr_abs += abs(mv.as_mv.row);
diff --git a/vp8/encoder/generic/csystemdependent.c b/vp8/encoder/generic/csystemdependent.c
index c4e2617d8..356e32c3f 100644
--- a/vp8/encoder/generic/csystemdependent.c
+++ b/vp8/encoder/generic/csystemdependent.c
@@ -99,9 +99,7 @@ void vp8_cmachine_specific_config(VP8_COMP *cpi) {
cpi->rtcd.variance.getmbss = vp8_get_mb_ss_c;
cpi->rtcd.fdct.short8x8 = vp8_short_fdct8x8_c;
-#if CONFIG_TX16X16 || CONFIG_HYBRIDTRANSFORM16X16
cpi->rtcd.fdct.short16x16 = vp8_short_fdct16x16_c;
-#endif
cpi->rtcd.fdct.haar_short2x2 = vp8_short_fhaar2x2_c;
cpi->rtcd.fdct.short4x4 = vp8_short_fdct4x4_c;
cpi->rtcd.fdct.short8x4 = vp8_short_fdct8x4_c;
@@ -120,13 +118,18 @@ void vp8_cmachine_specific_config(VP8_COMP *cpi) {
cpi->rtcd.search.refining_search = vp8_refining_search_sad;
cpi->rtcd.search.diamond_search = vp8_diamond_search_sad;
cpi->rtcd.temporal.apply = vp8_temporal_filter_apply_c;
+ cpi->rtcd.variance.satd16x16 = vp8_satd16x16_c;
+ cpi->rtcd.fdct.short4x4 = vp8_short_fdct4x4_c;
+ cpi->rtcd.fdct.short8x4 = vp8_short_fdct8x4_c;
+ cpi->rtcd.fdct.fast4x4 = vp8_short_fdct4x4_c;
+ cpi->rtcd.fdct.fast8x4 = vp8_short_fdct8x4_c;
+ cpi->rtcd.fdct.walsh_short4x4 = vp8_short_walsh4x4_c;
#if CONFIG_INTERNAL_STATS
cpi->rtcd.variance.ssimpf_8x8 = vp8_ssim_parms_8x8_c;
cpi->rtcd.variance.ssimpf_16x16 = vp8_ssim_parms_16x16_c;
#endif
#endif
- cpi->rtcd.variance.satd16x16 = vp8_satd16x16_c;
vp8_yv12_copy_partial_frame_ptr = vp8_yv12_copy_partial_frame;
#if ARCH_X86 || ARCH_X86_64
@@ -137,10 +140,5 @@ void vp8_cmachine_specific_config(VP8_COMP *cpi) {
vp8_arch_arm_encoder_init(cpi);
#endif
- cpi->rtcd.fdct.short4x4 = vp8_short_fdct4x4_c;
- cpi->rtcd.fdct.short8x4 = vp8_short_fdct8x4_c;
- cpi->rtcd.fdct.fast4x4 = vp8_short_fdct4x4_c;
- cpi->rtcd.fdct.fast8x4 = vp8_short_fdct8x4_c;
- cpi->rtcd.fdct.walsh_short4x4 = vp8_short_walsh4x4_c;
}
diff --git a/vp8/encoder/mbgraph.c b/vp8/encoder/mbgraph.c
index d512ae4bc..180ee5870 100644
--- a/vp8/encoder/mbgraph.c
+++ b/vp8/encoder/mbgraph.c
@@ -220,7 +220,7 @@ static int find_best_16x16_intra
unsigned int err;
xd->mode_info_context->mbmi.mode = mode;
- RECON_INVOKE(&cpi->rtcd.common->recon, build_intra_predictors_mby)(xd);
+ vp8_build_intra_predictors_mby(xd);
// VARIANCE_INVOKE(&cpi->rtcd.variance, satd16x16)
err = VARIANCE_INVOKE(&cpi->rtcd.variance, sad16x16)
(xd->predictor, 16,
diff --git a/vp8/encoder/onyx_if.c b/vp8/encoder/onyx_if.c
index 9a88eddb9..717fe96ee 100644
--- a/vp8/encoder/onyx_if.c
+++ b/vp8/encoder/onyx_if.c
@@ -1203,16 +1203,12 @@ void vp8_set_speed_features(VP8_COMP *cpi) {
}
if (cpi->sf.improved_dct) {
-#if CONFIG_TX16X16 || CONFIG_HYBRIDTRANSFORM16X16
cpi->mb.vp8_short_fdct16x16 = FDCT_INVOKE(&cpi->rtcd.fdct, short16x16);
-#endif
cpi->mb.vp8_short_fdct8x8 = FDCT_INVOKE(&cpi->rtcd.fdct, short8x8);
cpi->mb.vp8_short_fdct8x4 = FDCT_INVOKE(&cpi->rtcd.fdct, short8x4);
cpi->mb.vp8_short_fdct4x4 = FDCT_INVOKE(&cpi->rtcd.fdct, short4x4);
} else {
-#if CONFIG_TX16X16 || CONFIG_HYBRIDTRANSFORM16X16
cpi->mb.vp8_short_fdct16x16 = FDCT_INVOKE(&cpi->rtcd.fdct, short16x16);
-#endif
cpi->mb.vp8_short_fdct8x8 = FDCT_INVOKE(&cpi->rtcd.fdct, short8x8);
cpi->mb.vp8_short_fdct8x4 = FDCT_INVOKE(&cpi->rtcd.fdct, fast8x4);
cpi->mb.vp8_short_fdct4x4 = FDCT_INVOKE(&cpi->rtcd.fdct, fast4x4);
@@ -1222,13 +1218,11 @@ void vp8_set_speed_features(VP8_COMP *cpi) {
cpi->mb.short_fhaar2x2 = FDCT_INVOKE(&cpi->rtcd.fdct, haar_short2x2);
- cpi->mb.quantize_b = vp8_regular_quantize_b;
- cpi->mb.quantize_b_pair = vp8_regular_quantize_b_pair;
- cpi->mb.quantize_b_8x8 = vp8_regular_quantize_b_8x8;
-#if CONFIG_TX16X16 || CONFIG_HYBRIDTRANSFORM16X16
- cpi->mb.quantize_b_16x16= vp8_regular_quantize_b_16x16;
-#endif
- cpi->mb.quantize_b_2x2 = vp8_regular_quantize_b_2x2;
+ cpi->mb.quantize_b_4x4 = vp8_regular_quantize_b_4x4;
+ cpi->mb.quantize_b_4x4_pair = vp8_regular_quantize_b_4x4_pair;
+ cpi->mb.quantize_b_8x8 = vp8_regular_quantize_b_8x8;
+ cpi->mb.quantize_b_16x16 = vp8_regular_quantize_b_16x16;
+ cpi->mb.quantize_b_2x2 = vp8_regular_quantize_b_2x2;
vp8cx_init_quantizer(cpi);
@@ -1829,6 +1823,10 @@ VP8_PTR vp8_create_compressor(VP8_CONFIG *oxcf) {
#endif
for (i = 0; i < COMP_PRED_CONTEXTS; i++)
cm->prob_comppred[i] = 128;
+#if CONFIG_TX_SELECT
+ for (i = 0; i < TX_SIZE_MAX - 1; i++)
+ cm->prob_tx[i] = 128;
+#endif
// Prime the recent reference frame useage counters.
// Hereafter they will be maintained as a sort of moving average
@@ -3766,13 +3764,11 @@ static void encode_frame_to_data_rate
#if CONFIG_HYBRIDTRANSFORM8X8
vp8_copy(cpi->common.fc.hybrid_coef_counts_8x8, cpi->hybrid_coef_counts_8x8);
#endif
-#if CONFIG_TX16X16
vp8_copy(cpi->common.fc.coef_counts_16x16, cpi->coef_counts_16x16);
#if CONFIG_HYBRIDTRANSFORM16X16
vp8_copy(cpi->common.fc.hybrid_coef_counts_16x16,
cpi->hybrid_coef_counts_16x16);
#endif
-#endif
vp8_adapt_coef_probs(&cpi->common);
if (cpi->common.frame_type != KEY_FRAME) {
vp8_copy(cpi->common.fc.ymode_counts, cpi->ymode_count);
@@ -4416,8 +4412,9 @@ int vp8_get_compressed_data(VP8_PTR ptr, unsigned int *frame_flags, unsigned lon
{
double frame_psnr2, frame_ssim2 = 0;
double weight = 0;
-
+#if CONFIG_POSTPROC
vp8_deblock(cm->frame_to_show, &cm->post_proc_buffer, cm->filter_level * 10 / 6, 1, 0, IF_RTCD(&cm->rtcd.postproc));
+#endif
vp8_clear_system_state();
ye = calc_plane_error(orig->y_buffer, orig->y_stride,
diff --git a/vp8/encoder/onyx_int.h b/vp8/encoder/onyx_int.h
index 5cc87d7a9..1e7494039 100644
--- a/vp8/encoder/onyx_int.h
+++ b/vp8/encoder/onyx_int.h
@@ -109,14 +109,12 @@ typedef struct {
[COEF_BANDS][PREV_COEF_CONTEXTS][ENTROPY_NODES];
#endif
-#if CONFIG_TX16X16
vp8_prob coef_probs_16x16[BLOCK_TYPES_16X16]
[COEF_BANDS][PREV_COEF_CONTEXTS][ENTROPY_NODES];
#if CONFIG_HYBRIDTRANSFORM16X16
vp8_prob hybrid_coef_probs_16x16[BLOCK_TYPES_16X16]
[COEF_BANDS][PREV_COEF_CONTEXTS][ENTROPY_NODES];
#endif
-#endif
vp8_prob ymode_prob [VP8_YMODES - 1]; /* interframe intra mode probs */
vp8_prob uv_mode_prob [VP8_YMODES][VP8_UV_MODES - 1];
@@ -413,14 +411,12 @@ typedef struct VP8_COMP {
DECLARE_ALIGNED(64, short, zrun_zbin_boost_y2_8x8[QINDEX_RANGE][64]);
DECLARE_ALIGNED(64, short, zrun_zbin_boost_uv_8x8[QINDEX_RANGE][64]);
-#if CONFIG_TX16X16 || CONFIG_HYBRIDTRANSFORM16X16
DECLARE_ALIGNED(16, short, Y1zbin_16x16[QINDEX_RANGE][256]);
DECLARE_ALIGNED(16, short, Y2zbin_16x16[QINDEX_RANGE][256]);
DECLARE_ALIGNED(16, short, UVzbin_16x16[QINDEX_RANGE][256]);
DECLARE_ALIGNED(16, short, zrun_zbin_boost_y1_16x16[QINDEX_RANGE][256]);
DECLARE_ALIGNED(16, short, zrun_zbin_boost_y2_16x16[QINDEX_RANGE][256]);
DECLARE_ALIGNED(16, short, zrun_zbin_boost_uv_16x16[QINDEX_RANGE][256]);
-#endif
MACROBLOCK mb;
VP8_COMMON common;
@@ -472,6 +468,13 @@ typedef struct VP8_COMP {
int rd_prediction_type_threshes[4][NB_PREDICTION_TYPES];
int comp_pred_count[COMP_PRED_CONTEXTS];
int single_pred_count[COMP_PRED_CONTEXTS];
+#if CONFIG_TX_SELECT
+ // FIXME contextualize
+ int txfm_count[TX_SIZE_MAX];
+ int txfm_count_8x8p[TX_SIZE_MAX - 1];
+ int64_t rd_tx_select_diff[NB_TXFM_MODES];
+ int rd_tx_select_threshes[4][NB_TXFM_MODES];
+#endif
int RDMULT;
int RDDIV;
@@ -588,7 +591,6 @@ typedef struct VP8_COMP {
unsigned int frame_hybrid_branch_ct_8x8 [BLOCK_TYPES_8X8] [COEF_BANDS] [PREV_COEF_CONTEXTS] [ENTROPY_NODES][2];
#endif
-#if CONFIG_TX16X16
unsigned int coef_counts_16x16 [BLOCK_TYPES_16X16] [COEF_BANDS] [PREV_COEF_CONTEXTS] [MAX_ENTROPY_TOKENS]; /* for this frame */
vp8_prob frame_coef_probs_16x16 [BLOCK_TYPES_16X16] [COEF_BANDS] [PREV_COEF_CONTEXTS] [ENTROPY_NODES];
unsigned int frame_branch_ct_16x16 [BLOCK_TYPES_16X16] [COEF_BANDS] [PREV_COEF_CONTEXTS] [ENTROPY_NODES][2];
@@ -597,7 +599,6 @@ typedef struct VP8_COMP {
vp8_prob frame_hybrid_coef_probs_16x16 [BLOCK_TYPES_16X16] [COEF_BANDS] [PREV_COEF_CONTEXTS] [ENTROPY_NODES];
unsigned int frame_hybrid_branch_ct_16x16 [BLOCK_TYPES_16X16] [COEF_BANDS] [PREV_COEF_CONTEXTS] [ENTROPY_NODES][2];
#endif
-#endif
int gfu_boost;
int last_boost;
@@ -654,11 +655,6 @@ typedef struct VP8_COMP {
int gf_update_recommended;
int skip_true_count[3];
int skip_false_count[3];
- int t4x4_count;
- int t8x8_count;
-#if CONFIG_TX16X16 || CONFIG_HYBRIDTRANSFORM16X16
- int t16x16_count;
-#endif
unsigned char *segmentation_map;
@@ -809,6 +805,7 @@ void vp8_activity_masking(VP8_COMP *cpi, MACROBLOCK *x);
int rd_cost_intra_mb(MACROBLOCKD *x);
void vp8_tokenize_mb(VP8_COMP *, MACROBLOCKD *, TOKENEXTRA **, int dry_run);
+void vp8_stuff_mb(VP8_COMP *cpi, MACROBLOCKD *xd, TOKENEXTRA **t, int dry_run);
void vp8_set_speed_features(VP8_COMP *cpi);
diff --git a/vp8/encoder/picklpf.c b/vp8/encoder/picklpf.c
index d17dd9219..954997889 100644
--- a/vp8/encoder/picklpf.c
+++ b/vp8/encoder/picklpf.c
@@ -337,7 +337,7 @@ void vp8cx_pick_filter_level_sg(YV12_BUFFER_CONFIG *sd, VP8_COMP *cpi, int segme
Bias = Bias * cpi->twopass.section_intra_rating / 20;
// yx, bias less for large block size
- if (cpi->common.txfm_mode == ALLOW_8X8)
+ if (cpi->common.txfm_mode != ONLY_4X4)
Bias >>= 1;
filt_high = ((filt_mid + filter_step) > max_filter_level) ? max_filter_level : (filt_mid + filter_step);
@@ -546,7 +546,7 @@ void vp8cx_pick_filter_level(YV12_BUFFER_CONFIG *sd, VP8_COMP *cpi) {
Bias = Bias * cpi->twopass.section_intra_rating / 20;
// yx, bias less for large block size
- if (cpi->common.txfm_mode == ALLOW_8X8)
+ if (cpi->common.txfm_mode != ONLY_4X4)
Bias >>= 1;
filt_high = ((filt_mid + filter_step) > max_filter_level) ? max_filter_level : (filt_mid + filter_step);
diff --git a/vp8/encoder/quantize.c b/vp8/encoder/quantize.c
index f55018b76..8ae3029ee 100644
--- a/vp8/encoder/quantize.c
+++ b/vp8/encoder/quantize.c
@@ -8,7 +8,6 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-
#include <math.h>
#include "vpx_mem/vpx_mem.h"
@@ -23,7 +22,7 @@ extern int enc_debug;
#endif
#if CONFIG_HYBRIDTRANSFORM
-void vp8_ht_quantize_b(BLOCK *b, BLOCKD *d) {
+void vp8_ht_quantize_b_4x4(BLOCK *b, BLOCKD *d) {
int i, rc, eob;
int zbin;
int x, y, z, sz;
@@ -88,7 +87,7 @@ void vp8_ht_quantize_b(BLOCK *b, BLOCKD *d) {
}
#endif
-void vp8_regular_quantize_b(BLOCK *b, BLOCKD *d) {
+void vp8_regular_quantize_b_4x4(BLOCK *b, BLOCKD *d) {
int i, rc, eob;
int zbin;
int x, y, z, sz;
@@ -137,39 +136,28 @@ void vp8_regular_quantize_b(BLOCK *b, BLOCKD *d) {
d->eob = eob + 1;
}
-void vp8_quantize_mby_c(MACROBLOCK *x) {
+void vp8_quantize_mby_4x4_c(MACROBLOCK *x) {
int i;
- int has_2nd_order = (x->e_mbd.mode_info_context->mbmi.mode != B_PRED
- && x->e_mbd.mode_info_context->mbmi.mode != I8X8_PRED
- && x->e_mbd.mode_info_context->mbmi.mode != SPLITMV);
+ int has_2nd_order = x->e_mbd.mode_info_context->mbmi.mode != SPLITMV;
for (i = 0; i < 16; i++)
- x->quantize_b(&x->block[i], &x->e_mbd.block[i]);
+ x->quantize_b_4x4(&x->block[i], &x->e_mbd.block[i]);
if (has_2nd_order)
- x->quantize_b(&x->block[24], &x->e_mbd.block[24]);
-}
-
-void vp8_quantize_mb_c(MACROBLOCK *x) {
- int i;
- int has_2nd_order = (x->e_mbd.mode_info_context->mbmi.mode != B_PRED
- && x->e_mbd.mode_info_context->mbmi.mode != I8X8_PRED
- && x->e_mbd.mode_info_context->mbmi.mode != SPLITMV);
-
- for (i = 0; i < 24 + has_2nd_order; i++)
- x->quantize_b(&x->block[i], &x->e_mbd.block[i]);
+ x->quantize_b_4x4(&x->block[24], &x->e_mbd.block[24]);
}
-
-void vp8_quantize_mbuv_c(MACROBLOCK *x) {
+void vp8_quantize_mbuv_4x4_c(MACROBLOCK *x) {
int i;
for (i = 16; i < 24; i++)
- x->quantize_b(&x->block[i], &x->e_mbd.block[i]);
+ x->quantize_b_4x4(&x->block[i], &x->e_mbd.block[i]);
}
-
-
+void vp8_quantize_mb_4x4_c(MACROBLOCK *x) {
+ vp8_quantize_mby_4x4_c(x);
+ vp8_quantize_mbuv_4x4_c(x);
+}
void vp8_regular_quantize_b_2x2(BLOCK *b, BLOCKD *d) {
int i, rc, eob;
@@ -271,8 +259,8 @@ void vp8_regular_quantize_b_8x8(BLOCK *b, BLOCKD *d) {
void vp8_quantize_mby_8x8(MACROBLOCK *x) {
int i;
- int has_2nd_order = (x->e_mbd.mode_info_context->mbmi.mode != B_PRED
- && x->e_mbd.mode_info_context->mbmi.mode != SPLITMV);
+ int has_2nd_order = x->e_mbd.mode_info_context->mbmi.mode != SPLITMV;
+
for (i = 0; i < 16; i ++) {
x->e_mbd.block[i].eob = 0;
}
@@ -282,21 +270,6 @@ void vp8_quantize_mby_8x8(MACROBLOCK *x) {
if (has_2nd_order)
x->quantize_b_2x2(&x->block[24], &x->e_mbd.block[24]);
-
-}
-
-void vp8_quantize_mb_8x8(MACROBLOCK *x) {
- int i;
- int has_2nd_order = (x->e_mbd.mode_info_context->mbmi.mode != B_PRED
- && x->e_mbd.mode_info_context->mbmi.mode != SPLITMV);
- for (i = 0; i < 25; i ++) {
- x->e_mbd.block[i].eob = 0;
- }
- for (i = 0; i < 24; i += 4)
- x->quantize_b_8x8(&x->block[i], &x->e_mbd.block[i]);
-
- if (has_2nd_order)
- x->quantize_b_2x2(&x->block[24], &x->e_mbd.block[24]);
}
void vp8_quantize_mbuv_8x8(MACROBLOCK *x) {
@@ -308,12 +281,14 @@ void vp8_quantize_mbuv_8x8(MACROBLOCK *x) {
x->quantize_b_8x8(&x->block[i], &x->e_mbd.block[i]);
}
+void vp8_quantize_mb_8x8(MACROBLOCK *x) {
+ vp8_quantize_mby_8x8(x);
+ vp8_quantize_mbuv_8x8(x);
+}
-
-
-#if CONFIG_TX16X16 || CONFIG_HYBRIDTRANSFORM16X16
void vp8_quantize_mby_16x16(MACROBLOCK *x) {
int i;
+
for (i = 0; i < 16; i++)
x->e_mbd.block[i].eob = 0;
x->e_mbd.block[24].eob = 0;
@@ -321,22 +296,8 @@ void vp8_quantize_mby_16x16(MACROBLOCK *x) {
}
void vp8_quantize_mb_16x16(MACROBLOCK *x) {
- int i;
- for(i = 0; i < 25; i++)
- x->e_mbd.block[i].eob = 0;
- x->quantize_b_16x16(&x->block[0], &x->e_mbd.block[0]);
- for (i = 16; i < 24; i += 4)
- x->quantize_b_8x8(&x->block[i], &x->e_mbd.block[i]);
-}
-
-// U and V should use 8x8
-void vp8_quantize_mbuv_16x16(MACROBLOCK *x) {
- int i;
-
- for(i = 16; i < 24; i++)
- x->e_mbd.block[i].eob = 0;
- for (i = 16; i < 24; i += 4)
- x->quantize_b_8x8(&x->block[i], &x->e_mbd.block[i]);
+ vp8_quantize_mby_16x16(x);
+ vp8_quantize_mbuv_8x8(x);
}
void vp8_regular_quantize_b_16x16(BLOCK *b, BLOCKD *d) {
@@ -385,17 +346,14 @@ void vp8_regular_quantize_b_16x16(BLOCK *b, BLOCKD *d) {
d->eob = eob + 1;
}
-#endif
-
-
/* quantize_b_pair function pointer in MACROBLOCK structure is set to one of
* these two C functions if corresponding optimized routine is not available.
* NEON optimized version implements currently the fast quantization for pair
* of blocks. */
-void vp8_regular_quantize_b_pair(BLOCK *b1, BLOCK *b2, BLOCKD *d1, BLOCKD *d2) {
- vp8_regular_quantize_b(b1, d1);
- vp8_regular_quantize_b(b2, d2);
+void vp8_regular_quantize_b_4x4_pair(BLOCK *b1, BLOCK *b2, BLOCKD *d1, BLOCKD *d2) {
+ vp8_regular_quantize_b_4x4(b1, d1);
+ vp8_regular_quantize_b_4x4(b2, d2);
}
static void invert_quant(short *quant,
@@ -410,7 +368,6 @@ static void invert_quant(short *quant,
*shift = l;
}
-
void vp8cx_init_quantizer(VP8_COMP *cpi) {
int i;
int quant_val;
@@ -428,7 +385,6 @@ void vp8cx_init_quantizer(VP8_COMP *cpi) {
48, 48, 48, 48, 48, 48, 48, 48,
48, 48, 48, 48, 48, 48, 48, 48
};
-#if CONFIG_TX16X16 || CONFIG_HYBRIDTRANSFORM16X16
static const int zbin_boost_16x16[256] = {
0, 0, 0, 8, 8, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28,
30, 32, 34, 36, 38, 40, 42, 44, 46, 48, 48, 48, 48, 48, 48, 48,
@@ -447,7 +403,6 @@ void vp8cx_init_quantizer(VP8_COMP *cpi) {
48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48,
48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48,
};
-#endif
int qrounding_factor = 48;
@@ -469,17 +424,13 @@ void vp8cx_init_quantizer(VP8_COMP *cpi) {
cpi->Y1quant_shift[Q] + 0, quant_val);
cpi->Y1zbin[Q][0] = ((qzbin_factor * quant_val) + 64) >> 7;
cpi->Y1zbin_8x8[Q][0] = ((qzbin_factor * quant_val) + 64) >> 7;
-#if CONFIG_TX16X16 || CONFIG_HYBRIDTRANSFORM16X16
cpi->Y1zbin_16x16[Q][0] = ((qzbin_factor * quant_val) + 64) >> 7;
-#endif
cpi->Y1round[Q][0] = (qrounding_factor * quant_val) >> 7;
cpi->common.Y1dequant[Q][0] = quant_val;
cpi->zrun_zbin_boost_y1[Q][0] = (quant_val * zbin_boost[0]) >> 7;
cpi->zrun_zbin_boost_y1_8x8[Q][0] =
((quant_val * zbin_boost_8x8[0]) + 64) >> 7;
-#if CONFIG_TX16X16 || CONFIG_HYBRIDTRANSFORM16X16
cpi->zrun_zbin_boost_y1_16x16[Q][0] = ((quant_val * zbin_boost_16x16[0]) + 64) >> 7;
-#endif
quant_val = vp8_dc2quant(Q, cpi->common.y2dc_delta_q);
@@ -487,34 +438,26 @@ void vp8cx_init_quantizer(VP8_COMP *cpi) {
cpi->Y2quant_shift[Q] + 0, quant_val);
cpi->Y2zbin[Q][0] = ((qzbin_factor * quant_val) + 64) >> 7;
cpi->Y2zbin_8x8[Q][0] = ((qzbin_factor * quant_val) + 64) >> 7;
-#if CONFIG_TX16X16 || CONFIG_HYBRIDTRANSFORM16X16
cpi->Y2zbin_16x16[Q][0] = ((qzbin_factor * quant_val) + 64) >> 7;
-#endif
cpi->Y2round[Q][0] = (qrounding_factor * quant_val) >> 7;
cpi->common.Y2dequant[Q][0] = quant_val;
cpi->zrun_zbin_boost_y2[Q][0] = (quant_val * zbin_boost[0]) >> 7;
cpi->zrun_zbin_boost_y2_8x8[Q][0] =
((quant_val * zbin_boost_8x8[0]) + 64) >> 7;
-#if CONFIG_TX16X16 || CONFIG_HYBRIDTRANSFORM16X16
cpi->zrun_zbin_boost_y2_16x16[Q][0] = ((quant_val * zbin_boost_16x16[0]) + 64) >> 7;
-#endif
quant_val = vp8_dc_uv_quant(Q, cpi->common.uvdc_delta_q);
invert_quant(cpi->UVquant[Q] + 0,
cpi->UVquant_shift[Q] + 0, quant_val);
cpi->UVzbin[Q][0] = ((qzbin_factor * quant_val) + 64) >> 7;
cpi->UVzbin_8x8[Q][0] = ((qzbin_factor * quant_val) + 64) >> 7;
-#if CONFIG_TX16X16 || CONFIG_HYBRIDTRANSFORM16X16
cpi->UVzbin_16x16[Q][0] = ((qzbin_factor * quant_val) + 64) >> 7;
-#endif
cpi->UVround[Q][0] = (qrounding_factor * quant_val) >> 7;
cpi->common.UVdequant[Q][0] = quant_val;
cpi->zrun_zbin_boost_uv[Q][0] = (quant_val * zbin_boost[0]) >> 7;
cpi->zrun_zbin_boost_uv_8x8[Q][0] =
((quant_val * zbin_boost_8x8[0]) + 64) >> 7;
-#if CONFIG_TX16X16 || CONFIG_HYBRIDTRANSFORM16X16
cpi->zrun_zbin_boost_uv_16x16[Q][0] = ((quant_val * zbin_boost_16x16[0]) + 64) >> 7;
-#endif
// all the 4x4 ac values =;
for (i = 1; i < 16; i++) {
@@ -570,7 +513,6 @@ void vp8cx_init_quantizer(VP8_COMP *cpi) {
((quant_val * zbin_boost_8x8[i]) + 64) >> 7;
}
-#if CONFIG_TX16X16 || CONFIG_HYBRIDTRANSFORM16X16
// 16x16 structures. Same comment above applies.
for (i = 1; i < 256; i++) {
int rc = vp8_default_zig_zag1d_16x16[i];
@@ -587,7 +529,6 @@ void vp8cx_init_quantizer(VP8_COMP *cpi) {
cpi->UVzbin_16x16[Q][rc] = ((qzbin_factor * quant_val) + 64) >> 7;
cpi->zrun_zbin_boost_uv_16x16[Q][i] = ((quant_val * zbin_boost_16x16[i]) + 64) >> 7;
}
-#endif
}
}
@@ -626,16 +567,12 @@ void vp8cx_mb_init_quantizer(VP8_COMP *cpi, MACROBLOCK *x) {
x->block[i].quant_shift = cpi->Y1quant_shift[QIndex];
x->block[i].zbin = cpi->Y1zbin[QIndex];
x->block[i].zbin_8x8 = cpi->Y1zbin_8x8[QIndex];
-#if CONFIG_TX16X16 || CONFIG_HYBRIDTRANSFORM16X16
x->block[i].zbin_16x16 = cpi->Y1zbin_16x16[QIndex];
-#endif
x->block[i].round = cpi->Y1round[QIndex];
x->e_mbd.block[i].dequant = cpi->common.Y1dequant[QIndex];
x->block[i].zrun_zbin_boost = cpi->zrun_zbin_boost_y1[QIndex];
x->block[i].zrun_zbin_boost_8x8 = cpi->zrun_zbin_boost_y1_8x8[QIndex];
-#if CONFIG_TX16X16 || CONFIG_HYBRIDTRANSFORM16X16
x->block[i].zrun_zbin_boost_16x16 = cpi->zrun_zbin_boost_y1_16x16[QIndex];
-#endif
x->block[i].zbin_extra = (short)zbin_extra;
// Segment max eob offset feature.
@@ -644,16 +581,12 @@ void vp8cx_mb_init_quantizer(VP8_COMP *cpi, MACROBLOCK *x) {
get_segdata(xd, segment_id, SEG_LVL_EOB);
x->block[i].eob_max_offset_8x8 =
get_segdata(xd, segment_id, SEG_LVL_EOB);
-#if CONFIG_TX16X16 || CONFIG_HYBRIDTRANSFORM16X16
x->block[i].eob_max_offset_16x16 =
get_segdata(xd, segment_id, SEG_LVL_EOB);
-#endif
} else {
x->block[i].eob_max_offset = 16;
x->block[i].eob_max_offset_8x8 = 64;
-#if CONFIG_TX16X16 || CONFIG_HYBRIDTRANSFORM16X16
x->block[i].eob_max_offset_16x16 = 256;
-#endif
}
}
@@ -668,16 +601,12 @@ void vp8cx_mb_init_quantizer(VP8_COMP *cpi, MACROBLOCK *x) {
x->block[i].quant_shift = cpi->UVquant_shift[QIndex];
x->block[i].zbin = cpi->UVzbin[QIndex];
x->block[i].zbin_8x8 = cpi->UVzbin_8x8[QIndex];
-#if CONFIG_TX16X16 || CONFIG_HYBRIDTRANSFORM16X16
x->block[i].zbin_16x16 = cpi->UVzbin_16x16[QIndex];
-#endif
x->block[i].round = cpi->UVround[QIndex];
x->e_mbd.block[i].dequant = cpi->common.UVdequant[QIndex];
x->block[i].zrun_zbin_boost = cpi->zrun_zbin_boost_uv[QIndex];
x->block[i].zrun_zbin_boost_8x8 = cpi->zrun_zbin_boost_uv_8x8[QIndex];
-#if CONFIG_TX16X16 || CONFIG_HYBRIDTRANSFORM16X16
x->block[i].zrun_zbin_boost_16x16 = cpi->zrun_zbin_boost_uv_16x16[QIndex];
-#endif
x->block[i].zbin_extra = (short)zbin_extra;
@@ -703,16 +632,12 @@ void vp8cx_mb_init_quantizer(VP8_COMP *cpi, MACROBLOCK *x) {
x->block[24].quant_shift = cpi->Y2quant_shift[QIndex];
x->block[24].zbin = cpi->Y2zbin[QIndex];
x->block[24].zbin_8x8 = cpi->Y2zbin_8x8[QIndex];
-#if CONFIG_TX16X16 || CONFIG_HYBRIDTRANSFORM16X16
x->block[24].zbin_16x16 = cpi->Y2zbin_16x16[QIndex];
-#endif
x->block[24].round = cpi->Y2round[QIndex];
x->e_mbd.block[24].dequant = cpi->common.Y2dequant[QIndex];
x->block[24].zrun_zbin_boost = cpi->zrun_zbin_boost_y2[QIndex];
x->block[24].zrun_zbin_boost_8x8 = cpi->zrun_zbin_boost_y2_8x8[QIndex];
-#if CONFIG_TX16X16 || CONFIG_HYBRIDTRANSFORM16X16
x->block[24].zrun_zbin_boost_16x16 = cpi->zrun_zbin_boost_y2_16x16[QIndex];
-#endif
x->block[24].zbin_extra = (short)zbin_extra;
// TBD perhaps not use for Y2
@@ -731,7 +656,6 @@ void vp8cx_mb_init_quantizer(VP8_COMP *cpi, MACROBLOCK *x) {
x->q_index = QIndex;
}
-
void vp8_update_zbin_extra(VP8_COMP *cpi, MACROBLOCK *x) {
int i;
int QIndex = x->q_index;
@@ -765,7 +689,6 @@ void vp8_update_zbin_extra(VP8_COMP *cpi, MACROBLOCK *x) {
x->block[24].zbin_extra = (short)zbin_extra;
}
-
void vp8cx_frame_init_quantizer(VP8_COMP *cpi) {
// Clear Zbin mode boost for default case
cpi->zbin_mode_boost = 0;
@@ -774,7 +697,6 @@ void vp8cx_frame_init_quantizer(VP8_COMP *cpi) {
vp8cx_mb_init_quantizer(cpi, &cpi->mb);
}
-
void vp8_set_quantizer(struct VP8_COMP *cpi, int Q) {
VP8_COMMON *cm = &cpi->common;
diff --git a/vp8/encoder/quantize.h b/vp8/encoder/quantize.h
index 9bd87fa77..ad3a3fc0e 100644
--- a/vp8/encoder/quantize.h
+++ b/vp8/encoder/quantize.h
@@ -8,7 +8,6 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-
#ifndef __INC_QUANTIZE_H
#define __INC_QUANTIZE_H
@@ -31,58 +30,57 @@
#include "arm/quantize_arm.h"
#endif
-#ifndef vp8_quantize_quantb
-#define vp8_quantize_quantb vp8_regular_quantize_b
+#if CONFIG_HYBRIDTRANSFORM
+extern prototype_quantize_block(vp8_ht_quantize_b_4x4);
+#endif
+
+#ifndef vp8_quantize_quantb_4x4
+#define vp8_quantize_quantb_4x4 vp8_regular_quantize_b_4x4
#endif
-extern prototype_quantize_block(vp8_quantize_quantb);
+extern prototype_quantize_block(vp8_quantize_quantb_4x4);
-#ifndef vp8_quantize_quantb_pair
-#define vp8_quantize_quantb_pair vp8_regular_quantize_b_pair
+#ifndef vp8_quantize_quantb_4x4_pair
+#define vp8_quantize_quantb_4x4_pair vp8_regular_quantize_b_4x4_pair
#endif
-extern prototype_quantize_block_pair(vp8_quantize_quantb_pair);
+extern prototype_quantize_block_pair(vp8_quantize_quantb_4x4_pair);
#ifndef vp8_quantize_quantb_8x8
#define vp8_quantize_quantb_8x8 vp8_regular_quantize_b_8x8
#endif
extern prototype_quantize_block(vp8_quantize_quantb_8x8);
-#if CONFIG_TX16X16 || CONFIG_HYBRIDTRANSFORM16X16
#ifndef vp8_quantize_quantb_16x16
#define vp8_quantize_quantb_16x16 vp8_regular_quantize_b_16x16
#endif
extern prototype_quantize_block(vp8_quantize_quantb_16x16);
-#endif
#ifndef vp8_quantize_quantb_2x2
#define vp8_quantize_quantb_2x2 vp8_regular_quantize_b_2x2
#endif
extern prototype_quantize_block(vp8_quantize_quantb_2x2);
-#ifndef vp8_quantize_mb
-#define vp8_quantize_mb vp8_quantize_mb_c
+#ifndef vp8_quantize_mb_4x4
+#define vp8_quantize_mb_4x4 vp8_quantize_mb_4x4_c
#endif
-extern prototype_quantize_mb(vp8_quantize_mb);
+extern prototype_quantize_mb(vp8_quantize_mb_4x4);
void vp8_quantize_mb_8x8(MACROBLOCK *x);
-#ifndef vp8_quantize_mbuv
-#define vp8_quantize_mbuv vp8_quantize_mbuv_c
+#ifndef vp8_quantize_mbuv_4x4
+#define vp8_quantize_mbuv_4x4 vp8_quantize_mbuv_4x4_c
#endif
-extern prototype_quantize_mb(vp8_quantize_mbuv);
+extern prototype_quantize_mb(vp8_quantize_mbuv_4x4);
-#ifndef vp8_quantize_mby
-#define vp8_quantize_mby vp8_quantize_mby_c
+#ifndef vp8_quantize_mby_4x4
+#define vp8_quantize_mby_4x4 vp8_quantize_mby_4x4_c
#endif
-extern prototype_quantize_mb(vp8_quantize_mby);
+extern prototype_quantize_mb(vp8_quantize_mby_4x4);
extern prototype_quantize_mb(vp8_quantize_mby_8x8);
extern prototype_quantize_mb(vp8_quantize_mbuv_8x8);
-#if CONFIG_TX16X16 || CONFIG_HYBRIDTRANSFORM16X16
void vp8_quantize_mb_16x16(MACROBLOCK *x);
extern prototype_quantize_block(vp8_quantize_quantb_16x16);
extern prototype_quantize_mb(vp8_quantize_mby_16x16);
-extern prototype_quantize_mb(vp8_quantize_mbuv_16x16);
-#endif
struct VP8_COMP;
extern void vp8_set_quantizer(struct VP8_COMP *cpi, int Q);
diff --git a/vp8/encoder/ratectrl.c b/vp8/encoder/ratectrl.c
index e059a10e2..2b5f699b6 100644
--- a/vp8/encoder/ratectrl.c
+++ b/vp8/encoder/ratectrl.c
@@ -185,12 +185,10 @@ void vp8_save_coding_context(VP8_COMP *cpi) {
#if CONFIG_HYBRIDTRANSFORM8X8
vp8_copy(cc->hybrid_coef_probs_8x8, cm->fc.hybrid_coef_probs_8x8);
#endif
-#if CONFIG_TX16X16
vp8_copy(cc->coef_probs_16x16, cm->fc.coef_probs_16x16);
#if CONFIG_HYBRIDTRANSFORM16X16
vp8_copy(cc->hybrid_coef_probs_16x16, cm->fc.hybrid_coef_probs_16x16);
#endif
-#endif
#if CONFIG_SWITCHABLE_INTERP
vp8_copy(cc->switchable_interp_prob, cm->fc.switchable_interp_prob);
#endif
@@ -258,12 +256,10 @@ void vp8_restore_coding_context(VP8_COMP *cpi) {
#if CONFIG_HYBRIDTRANSFORM8X8
vp8_copy(cm->fc.hybrid_coef_probs_8x8, cc->hybrid_coef_probs_8x8);
#endif
-#if CONFIG_TX16X16
vp8_copy(cm->fc.coef_probs_16x16, cc->coef_probs_16x16);
#if CONFIG_HYBRIDTRANSFORM16X16
vp8_copy(cm->fc.hybrid_coef_probs_16x16, cc->hybrid_coef_probs_16x16);
#endif
-#endif
#if CONFIG_SWITCHABLE_INTERP
vp8_copy(cm->fc.switchable_interp_prob, cc->switchable_interp_prob);
#endif
@@ -271,10 +267,12 @@ void vp8_restore_coding_context(VP8_COMP *cpi) {
void vp8_setup_key_frame(VP8_COMP *cpi) {
+ VP8_COMMON *cm = &cpi->common;
// Setup for Key frame:
vp8_default_coef_probs(& cpi->common);
vp8_kf_default_bmode_probs(cpi->common.kf_bmode_prob);
vp8_init_mbmode_probs(& cpi->common);
+ vp8_default_bmode_probs(cm->fc.bmode_prob);
vp8_init_mv_probs(& cpi->common);
#if CONFIG_NEWMVENTROPY == 0
@@ -288,12 +286,6 @@ void vp8_setup_key_frame(VP8_COMP *cpi) {
}
#endif
- cpi->common.txfm_mode = ALLOW_8X8;
-
-#if CONFIG_LOSSLESS
- if (cpi->oxcf.lossless)
- cpi->common.txfm_mode = ONLY_4X4;
-#endif
// cpi->common.filter_level = 0; // Reset every key frame.
cpi->common.filter_level = cpi->common.base_qindex * 3 / 8;
@@ -307,17 +299,18 @@ void vp8_setup_key_frame(VP8_COMP *cpi) {
vpx_memcpy(&cpi->common.lfc, &cpi->common.fc, sizeof(cpi->common.fc));
vpx_memcpy(&cpi->common.lfc_a, &cpi->common.fc, sizeof(cpi->common.fc));
-}
+ vpx_memset(cm->prev_mip, 0,
+ (cm->mb_cols + 1) * (cm->mb_rows + 1)* sizeof(MODE_INFO));
+ vpx_memset(cm->mip, 0,
+ (cm->mb_cols + 1) * (cm->mb_rows + 1)* sizeof(MODE_INFO));
-void vp8_setup_inter_frame(VP8_COMP *cpi) {
+ update_mode_info_border(cm, cm->mip);
+ update_mode_info_in_image(cm, cm->mi);
- cpi->common.txfm_mode = ALLOW_8X8;
-#if CONFIG_LOSSLESS
- if (cpi->oxcf.lossless)
- cpi->common.txfm_mode = ONLY_4X4;
-#endif
+}
+void vp8_setup_inter_frame(VP8_COMP *cpi) {
if (cpi->common.refresh_alt_ref_frame) {
vpx_memcpy(&cpi->common.fc,
&cpi->common.lfc_a,
diff --git a/vp8/encoder/rdopt.c b/vp8/encoder/rdopt.c
index 0613355fc..bb4818eeb 100644
--- a/vp8/encoder/rdopt.c
+++ b/vp8/encoder/rdopt.c
@@ -56,10 +56,6 @@
extern void vp8cx_mb_init_quantizer(VP8_COMP *cpi, MACROBLOCK *x);
extern void vp8_update_zbin_extra(VP8_COMP *cpi, MACROBLOCK *x);
-#if CONFIG_HYBRIDTRANSFORM
-extern void vp8_ht_quantize_b(BLOCK *b, BLOCKD *d);
-#endif
-
#define MAXF(a,b) (((a) > (b)) ? (a) : (b))
#define INVALID_MV 0x80008000
@@ -379,7 +375,6 @@ void vp8_initialize_rd_consts(VP8_COMP *cpi, int QIndex) {
BLOCK_TYPES_8X8);
#endif
-#if CONFIG_TX16X16
fill_token_costs(
cpi->mb.token_costs[TX_16X16],
(const vp8_prob(*)[8][PREV_COEF_CONTEXTS][11]) cpi->common.fc.coef_probs_16x16,
@@ -391,7 +386,6 @@ void vp8_initialize_rd_consts(VP8_COMP *cpi, int QIndex) {
cpi->common.fc.hybrid_coef_probs_16x16,
BLOCK_TYPES_16X16);
#endif
-#endif
/*rough estimate for costing*/
cpi->common.kf_ymode_probs_index = cpi->common.base_qindex >> 4;
@@ -574,9 +568,9 @@ int VP8_UVSSE(MACROBLOCK *x, const vp8_variance_rtcd_vtable_t *rtcd) {
}
static int cost_coeffs_2x2(MACROBLOCK *mb,
- BLOCKD *b, int type,
+ BLOCKD *b, PLANE_TYPE type,
ENTROPY_CONTEXT *a, ENTROPY_CONTEXT *l) {
- int c = !type; /* start at coef 0, unless Y with Y2 */
+ int c = (type == PLANE_TYPE_Y_NO_DC); /* start at coef 0, unless Y with Y2 */
int eob = b->eob;
int pt; /* surrounding block/prev coef predictor */
int cost = 0;
@@ -602,11 +596,11 @@ static int cost_coeffs_2x2(MACROBLOCK *mb,
return cost;
}
-static int cost_coeffs(MACROBLOCK *mb, BLOCKD *b, int type,
+static int cost_coeffs(MACROBLOCK *mb, BLOCKD *b, PLANE_TYPE type,
ENTROPY_CONTEXT *a, ENTROPY_CONTEXT *l,
int tx_size) {
const int eob = b->eob;
- int c = !type; /* start at coef 0, unless Y with Y2 */
+ int c = (type == PLANE_TYPE_Y_NO_DC); /* start at coef 0, unless Y with Y2 */
int cost = 0, default_eob, seg_eob;
int pt; /* surrounding block/prev coef predictor */
int const *scan, *band;
@@ -626,7 +620,7 @@ static int cost_coeffs(MACROBLOCK *mb, BLOCKD *b, int type,
#if CONFIG_HYBRIDTRANSFORM
if (type == PLANE_TYPE_Y_WITH_DC &&
mb->q_index < ACTIVE_HT &&
- mbmi->mode_rdopt == B_PRED) {
+ mbmi->mode == B_PRED) {
tx_type = b->bmi.as_mode.tx_type;
switch (tx_type) {
case ADST_DCT:
@@ -653,27 +647,26 @@ static int cost_coeffs(MACROBLOCK *mb, BLOCKD *b, int type,
{
BLOCKD *bb;
int ib = (b - xd->block);
- if (ib >= 16) tx_type = DCT_DCT;
- ib = (ib & 8) + ((ib & 4) >> 1);
- bb = xd->block + ib;
- if (mbmi->mode_rdopt == I8X8_PRED)
- tx_type = bb->bmi.as_mode.tx_type;
+ if (ib < 16) {
+ ib = (ib & 8) + ((ib & 4) >> 1);
+ bb = xd->block + ib;
+ if (mbmi->mode == I8X8_PRED)
+ tx_type = bb->bmi.as_mode.tx_type;
+ }
}
#endif
break;
-#if CONFIG_TX16X16 || CONFIG_HYBRIDTRANSFORM16X16
case TX_16X16:
scan = vp8_default_zig_zag1d_16x16;
band = vp8_coef_bands_16x16;
default_eob = 256;
#if CONFIG_HYBRIDTRANSFORM16X16
if (type == PLANE_TYPE_Y_WITH_DC &&
- mbmi->mode_rdopt < I8X8_PRED &&
+ mbmi->mode < I8X8_PRED &&
mb->q_index < ACTIVE_HT16)
tx_type = b->bmi.as_mode.tx_type;
#endif
break;
-#endif
default:
break;
}
@@ -744,10 +737,11 @@ static int vp8_rdcost_mby(MACROBLOCK *mb) {
return cost;
}
-static void macro_block_yrd(MACROBLOCK *mb,
- int *Rate,
- int *Distortion,
- const VP8_ENCODER_RTCD *rtcd) {
+static void macro_block_yrd_4x4(MACROBLOCK *mb,
+ int *Rate,
+ int *Distortion,
+ const VP8_ENCODER_RTCD *rtcd,
+ int *skippable) {
int b;
MACROBLOCKD *const xd = &mb->e_mbd;
BLOCK *const mb_y2 = mb->block + 24;
@@ -774,11 +768,11 @@ static void macro_block_yrd(MACROBLOCK *mb,
// Quantization
for (b = 0; b < 16; b++) {
- mb->quantize_b(&mb->block[b], &xd->block[b]);
+ mb->quantize_b_4x4(&mb->block[b], &xd->block[b]);
}
// DC predication and Quantization of 2nd Order block
- mb->quantize_b(mb_y2, x_y2);
+ mb->quantize_b_4x4(mb_y2, x_y2);
// Distortion
d = ENCODEMB_INVOKE(&rtcd->encodemb, mberr)(mb, 1);
@@ -788,6 +782,7 @@ static void macro_block_yrd(MACROBLOCK *mb,
*Distortion = (d >> 2);
// rate
*Rate = vp8_rdcost_mby(mb);
+ *skippable = mby_is_skippable_4x4(&mb->e_mbd, 1);
}
static int vp8_rdcost_mby_8x8(MACROBLOCK *mb, int backup) {
@@ -822,7 +817,8 @@ static int vp8_rdcost_mby_8x8(MACROBLOCK *mb, int backup) {
static void macro_block_yrd_8x8(MACROBLOCK *mb,
int *Rate,
int *Distortion,
- const VP8_ENCODER_RTCD *rtcd) {
+ const VP8_ENCODER_RTCD *rtcd,
+ int *skippable) {
MACROBLOCKD *const xd = &mb->e_mbd;
BLOCK *const mb_y2 = mb->block + 24;
BLOCKD *const x_y2 = xd->block + 24;
@@ -853,9 +849,9 @@ static void macro_block_yrd_8x8(MACROBLOCK *mb,
*Distortion = (d >> 2);
// rate
*Rate = vp8_rdcost_mby_8x8(mb, 1);
+ *skippable = mby_is_skippable_8x8(&mb->e_mbd, 1);
}
-#if CONFIG_TX16X16 || CONFIG_HYBRIDTRANSFORM16X16
static int vp8_rdcost_mby_16x16(MACROBLOCK *mb) {
int cost;
MACROBLOCKD *xd = &mb->e_mbd;
@@ -873,7 +869,7 @@ static int vp8_rdcost_mby_16x16(MACROBLOCK *mb) {
}
static void macro_block_yrd_16x16(MACROBLOCK *mb, int *Rate, int *Distortion,
- const VP8_ENCODER_RTCD *rtcd) {
+ const VP8_ENCODER_RTCD *rtcd, int *skippable) {
int d;
ENCODEMB_INVOKE(&rtcd->encodemb, submby)(
@@ -883,11 +879,11 @@ static void macro_block_yrd_16x16(MACROBLOCK *mb, int *Rate, int *Distortion,
mb->block[0].src_stride);
#if CONFIG_HYBRIDTRANSFORM16X16
- if ((mb->e_mbd.mode_info_context->mbmi.mode_rdopt < I8X8_PRED) &&
+ if ((mb->e_mbd.mode_info_context->mbmi.mode < I8X8_PRED) &&
(mb->q_index < ACTIVE_HT16)) {
BLOCKD *b = &mb->e_mbd.block[0];
BLOCK *be = &mb->block[0];
- txfm_map(b, pred_mode_conv(mb->e_mbd.mode_info_context->mbmi.mode_rdopt));
+ txfm_map(b, pred_mode_conv(mb->e_mbd.mode_info_context->mbmi.mode));
vp8_fht_c(be->src_diff, be->coeff, 32, b->bmi.as_mode.tx_type, 16);
} else
vp8_transform_mby_16x16(mb);
@@ -900,7 +896,7 @@ static void macro_block_yrd_16x16(MACROBLOCK *mb, int *Rate, int *Distortion,
// TODO(jingning) is it possible to quickly determine whether to force
// trailing coefficients to be zero, instead of running trellis
// optimization in the rate-distortion optimization loop?
- if (mb->e_mbd.mode_info_context->mbmi.mode_rdopt < I8X8_PRED)
+ if (mb->e_mbd.mode_info_context->mbmi.mode < I8X8_PRED)
vp8_optimize_mby_16x16(mb, rtcd);
#endif
@@ -909,8 +905,144 @@ static void macro_block_yrd_16x16(MACROBLOCK *mb, int *Rate, int *Distortion,
*Distortion = (d >> 2);
// rate
*Rate = vp8_rdcost_mby_16x16(mb);
+ *skippable = mby_is_skippable_16x16(&mb->e_mbd);
+}
+
+static void macro_block_yrd(VP8_COMP *cpi, MACROBLOCK *x, int *rate,
+ int *distortion, int *skippable,
+ int64_t txfm_cache[NB_TXFM_MODES]) {
+ VP8_COMMON *cm = &cpi->common;
+ MB_MODE_INFO *mbmi = &x->e_mbd.mode_info_context->mbmi;
+
+#if CONFIG_TX_SELECT
+
+ MACROBLOCKD *xd = &x->e_mbd;
+ int can_skip = cm->mb_no_coeff_skip;
+ vp8_prob skip_prob = can_skip ? get_pred_prob(cm, xd, PRED_MBSKIP) : 128;
+ int s0, s1;
+ int r4x4, r4x4s, r8x8, r8x8s, d4x4, d8x8, s4x4, s8x8;
+ int64_t rd4x4, rd8x8, rd4x4s, rd8x8s;
+ int d16x16, r16x16, r16x16s, s16x16;
+ int64_t rd16x16, rd16x16s;
+
+ // FIXME don't do sub x3
+ if (skip_prob == 0)
+ skip_prob = 1;
+ s0 = vp8_cost_bit(skip_prob, 0);
+ s1 = vp8_cost_bit(skip_prob, 1);
+ macro_block_yrd_16x16(x, &r16x16, &d16x16, IF_RTCD(&cpi->rtcd), &s16x16);
+ if (can_skip) {
+ if (s16x16) {
+ rd16x16 = RDCOST(x->rdmult, x->rddiv, s1, d16x16);
+ } else {
+ rd16x16 = RDCOST(x->rdmult, x->rddiv, r16x16 + s0, d16x16);
+ }
+ } else {
+ rd16x16 = RDCOST(x->rdmult, x->rddiv, r16x16, d16x16);
+ }
+ r16x16s = r16x16 + vp8_cost_one(cm->prob_tx[0]) + vp8_cost_one(cm->prob_tx[1]);
+ if (can_skip) {
+ if (s16x16) {
+ rd16x16s = RDCOST(x->rdmult, x->rddiv, s1, d16x16);
+ } else {
+ rd16x16s = RDCOST(x->rdmult, x->rddiv, r16x16s + s0, d16x16);
+ }
+ } else {
+ rd16x16s = RDCOST(x->rdmult, x->rddiv, r16x16s, d16x16);
+ }
+ macro_block_yrd_8x8(x, &r8x8, &d8x8, IF_RTCD(&cpi->rtcd), &s8x8);
+ if (can_skip) {
+ if (s8x8) {
+ rd8x8 = RDCOST(x->rdmult, x->rddiv, s1, d8x8);
+ } else {
+ rd8x8 = RDCOST(x->rdmult, x->rddiv, r8x8 + s0, d8x8);
+ }
+ } else {
+ rd8x8 = RDCOST(x->rdmult, x->rddiv, r8x8, d8x8);
+ }
+ r8x8s = r8x8 + vp8_cost_one(cm->prob_tx[0]);
+ r8x8s += vp8_cost_zero(cm->prob_tx[1]);
+ if (can_skip) {
+ if (s8x8) {
+ rd8x8s = RDCOST(x->rdmult, x->rddiv, s1, d8x8);
+ } else {
+ rd8x8s = RDCOST(x->rdmult, x->rddiv, r8x8s + s0, d8x8);
+ }
+ } else {
+ rd8x8s = RDCOST(x->rdmult, x->rddiv, r8x8s, d8x8);
+ }
+ macro_block_yrd_4x4(x, &r4x4, &d4x4, IF_RTCD(&cpi->rtcd), &s4x4);
+ if (can_skip) {
+ if (s4x4) {
+ rd4x4 = RDCOST(x->rdmult, x->rddiv, s1, d4x4);
+ } else {
+ rd4x4 = RDCOST(x->rdmult, x->rddiv, r4x4 + s0, d4x4);
+ }
+ } else {
+ rd4x4 = RDCOST(x->rdmult, x->rddiv, r4x4, d4x4);
+ }
+ r4x4s = r4x4 + vp8_cost_zero(cm->prob_tx[0]);
+ if (can_skip) {
+ if (s4x4) {
+ rd4x4s = RDCOST(x->rdmult, x->rddiv, s1, d4x4);
+ } else {
+ rd4x4s = RDCOST(x->rdmult, x->rddiv, r4x4s + s0, d4x4);
+ }
+ } else {
+ rd4x4s = RDCOST(x->rdmult, x->rddiv, r4x4s, d4x4);
+ }
+
+ if ( cpi->common.txfm_mode == ALLOW_16X16 ||
+ (cpi->common.txfm_mode == TX_MODE_SELECT &&
+ rd16x16s < rd8x8s && rd16x16s < rd4x4s)) {
+ mbmi->txfm_size = TX_16X16;
+ *skippable = s16x16;
+ *distortion = d16x16;
+ *rate = (cpi->common.txfm_mode == ALLOW_16X16) ? r16x16 : r16x16s;
+ } else
+ if ( cpi->common.txfm_mode == ALLOW_8X8 ||
+ (cpi->common.txfm_mode == TX_MODE_SELECT && rd8x8s < rd4x4s)) {
+ mbmi->txfm_size = TX_8X8;
+ *skippable = s8x8;
+ *distortion = d8x8;
+ *rate = (cpi->common.txfm_mode == ALLOW_8X8) ? r8x8 : r8x8s;
+ } else {
+ assert(cpi->common.txfm_mode == ONLY_4X4 ||
+ (cpi->common.txfm_mode == TX_MODE_SELECT && rd4x4s <= rd8x8s));
+ mbmi->txfm_size = TX_4X4;
+ *skippable = s4x4;
+ *distortion = d4x4;
+ *rate = (cpi->common.txfm_mode == ONLY_4X4) ? r4x4 : r4x4s;
+ }
+
+ txfm_cache[ONLY_4X4] = rd4x4;
+ txfm_cache[ALLOW_8X8] = rd8x8;
+ txfm_cache[ALLOW_16X16] = rd16x16;
+ if (rd16x16s < rd8x8s && rd16x16s < rd4x4s)
+ txfm_cache[TX_MODE_SELECT] = rd16x16s;
+ else
+ txfm_cache[TX_MODE_SELECT] = rd4x4s < rd8x8s ? rd4x4s : rd8x8s;
+
+#else /* CONFIG_TX_SELECT */
+
+ switch (cpi->common.txfm_mode) {
+ case ALLOW_16X16:
+ macro_block_yrd_16x16(x, rate, distortion, IF_RTCD(&cpi->rtcd), skippable);
+ mbmi->txfm_size = TX_16X16;
+ break;
+ case ALLOW_8X8:
+ macro_block_yrd_8x8(x, rate, distortion, IF_RTCD(&cpi->rtcd), skippable);
+ mbmi->txfm_size = TX_8X8;
+ break;
+ default:
+ case ONLY_4X4:
+ macro_block_yrd_4x4(x, rate, distortion, IF_RTCD(&cpi->rtcd), skippable);
+ mbmi->txfm_size = TX_4X4;
+ break;
+ }
+
+#endif /* CONFIG_TX_SELECT */
}
-#endif
static void copy_predictor(unsigned char *dst, const unsigned char *predictor) {
const unsigned int *p = (const unsigned int *)predictor;
@@ -1050,17 +1182,16 @@ static int64_t rd_pick_intra4x4block(VP8_COMP *cpi, MACROBLOCK *x, BLOCK *be,
// Do we need to do this for mode2 also?
if (mode == B_LD_PRED || mode == B_VL_PRED)
continue;
+ b->bmi.as_mode.first = mode;
rate = bmode_costs[mode];
#if CONFIG_COMP_INTRA_PRED
if (mode2 == (B_PREDICTION_MODE)(B_DC_PRED - 1)) {
#endif
- RECON_INVOKE(&cpi->rtcd.common->recon, intra4x4_predict)
- (b, mode, b->predictor);
+ vp8_intra4x4_predict(b, mode, b->predictor);
#if CONFIG_COMP_INTRA_PRED
} else {
- RECON_INVOKE(&cpi->rtcd.common->recon, comp_intra4x4_predict)
- (b, mode, mode2, b->predictor);
+ vp8_comp_intra4x4_predict(b, mode, mode2, b->predictor);
rate += bmode_costs[mode2];
}
#endif
@@ -1068,17 +1199,16 @@ static int64_t rd_pick_intra4x4block(VP8_COMP *cpi, MACROBLOCK *x, BLOCK *be,
#if CONFIG_HYBRIDTRANSFORM
if (active_ht) {
- b->bmi.as_mode.test = mode;
txfm_map(b, mode);
vp8_fht_c(be->src_diff, be->coeff, 32, b->bmi.as_mode.tx_type, 4);
- vp8_ht_quantize_b(be, b);
+ vp8_ht_quantize_b_4x4(be, b);
} else {
x->vp8_short_fdct4x4(be->src_diff, be->coeff, 32);
- x->quantize_b(be, b);
+ x->quantize_b_4x4(be, b);
}
#else
x->vp8_short_fdct4x4(be->src_diff, be->coeff, 32);
- x->quantize_b(be, b);
+ x->quantize_b_4x4(be, b);
#endif
tempa = ta;
@@ -1131,7 +1261,7 @@ static int64_t rd_pick_intra4x4block(VP8_COMP *cpi, MACROBLOCK *x, BLOCK *be,
IDCT_INVOKE(IF_RTCD(&cpi->rtcd.common->idct), idct16)(best_dqcoeff, b->diff, 32);
#endif
- RECON_INVOKE(IF_RTCD(&cpi->rtcd.common->recon), recon)(best_predictor, b->diff, *(b->base_dst) + b->dst, b->dst_stride);
+ vp8_recon_b(best_predictor, b->diff, *(b->base_dst) + b->dst, b->dst_stride);
return best_rd;
}
@@ -1168,6 +1298,7 @@ static int64_t rd_pick_intra4x4mby_modes(VP8_COMP *cpi, MACROBLOCK *mb, int *Rat
// TODO(agrange)
// vp8_intra_prediction_down_copy(xd);
+ xd->mode_info_context->mbmi.mode = B_PRED;
bmode_costs = mb->inter_bmode_costs;
for (i = 0; i < 16; i++) {
@@ -1236,8 +1367,7 @@ static int64_t rd_pick_intra_sby_mode(VP8_COMP *cpi,
/* Y Search for 32x32 intra prediction mode */
for (mode = DC_PRED; mode <= TM_PRED; mode++) {
x->e_mbd.mode_info_context->mbmi.mode = mode;
- RECON_INVOKE(&cpi->common.rtcd.recon,
- build_intra_predictors_sby_s)(&x->e_mbd);
+ vp8_build_intra_predictors_sby_s(&x->e_mbd);
super_block_yrd_8x8(x, &this_rate_tokenonly,
&this_distortion, IF_RTCD(&cpi->rtcd), &s);
@@ -1267,8 +1397,10 @@ static int64_t rd_pick_intra16x16mby_mode(VP8_COMP *cpi,
int *Rate,
int *rate_y,
int *Distortion,
- int *skippable) {
+ int *skippable,
+ int64_t txfm_cache[NB_TXFM_MODES]) {
MB_PREDICTION_MODE mode;
+ TX_SIZE txfm_size;
MB_PREDICTION_MODE UNINITIALIZED_IS_SAFE(mode_selected);
#if CONFIG_COMP_INTRA_PRED
MB_PREDICTION_MODE mode2;
@@ -1276,43 +1408,41 @@ static int64_t rd_pick_intra16x16mby_mode(VP8_COMP *cpi,
#endif
MB_MODE_INFO * mbmi = &x->e_mbd.mode_info_context->mbmi;
int rate, ratey;
- int distortion;
+ int distortion, skip;
int64_t best_rd = INT64_MAX;
int64_t this_rd;
- int UNINITIALIZED_IS_SAFE(skip);
MACROBLOCKD *xd = &x->e_mbd;
#if CONFIG_HYBRIDTRANSFORM16X16
int best_txtype, rd_txtype;
#endif
+#if CONFIG_TX_SELECT
+ int i;
+ for (i = 0; i < NB_TXFM_MODES; i++)
+ txfm_cache[i] = INT64_MAX;
+#endif
// Y Search for 16x16 intra prediction mode
for (mode = DC_PRED; mode <= TM_PRED; mode++) {
+ int64_t local_txfm_cache[NB_TXFM_MODES];
+
mbmi->mode = mode;
-#if CONFIG_HYBRIDTRANSFORM16X16
- mbmi->mode_rdopt = mode;
-#endif
#if CONFIG_COMP_INTRA_PRED
for (mode2 = DC_PRED - 1; mode2 != TM_PRED + 1; mode2++) {
mbmi->second_mode = mode2;
if (mode2 == (MB_PREDICTION_MODE)(DC_PRED - 1)) {
#endif
- RECON_INVOKE(&cpi->common.rtcd.recon, build_intra_predictors_mby)
- (&x->e_mbd);
+ vp8_build_intra_predictors_mby(&x->e_mbd);
#if CONFIG_COMP_INTRA_PRED
} else {
continue; // i.e. disable for now
- RECON_INVOKE(&cpi->common.rtcd.recon, build_comp_intra_predictors_mby)
- (&x->e_mbd);
+ vp8_build_comp_intra_predictors_mby(&x->e_mbd);
}
#endif
-#if CONFIG_TX16X16 || CONFIG_HYBRIDTRANSFORM16X16
- macro_block_yrd_16x16(x, &ratey, &distortion, IF_RTCD(&cpi->rtcd));
-#else
- macro_block_yrd_8x8(x, &ratey, &distortion, IF_RTCD(&cpi->rtcd));
-#endif
+ macro_block_yrd(cpi, x, &ratey, &distortion, &skip, local_txfm_cache);
+
// FIXME add compoundmode cost
// FIXME add rate for mode2
rate = ratey + x->mbmode_cost[x->e_mbd.frame_type][mbmi->mode];
@@ -1324,12 +1454,8 @@ static int64_t rd_pick_intra16x16mby_mode(VP8_COMP *cpi,
#endif
if (this_rd < best_rd) {
-#if CONFIG_TX16X16
- skip = mby_is_skippable_16x16(xd);
-#else
- skip = mby_is_skippable_8x8(xd, 1);
-#endif
mode_selected = mode;
+ txfm_size = mbmi->txfm_size;
#if CONFIG_COMP_INTRA_PRED
mode2_selected = mode2;
#endif
@@ -1340,13 +1466,25 @@ static int64_t rd_pick_intra16x16mby_mode(VP8_COMP *cpi,
#if CONFIG_HYBRIDTRANSFORM16X16
best_txtype = rd_txtype;
#endif
+ *skippable = skip;
+ }
+
+#if CONFIG_TX_SELECT
+ for (i = 0; i < NB_TXFM_MODES; i++) {
+ int64_t adj_rd = this_rd + local_txfm_cache[i] -
+ local_txfm_cache[cpi->common.txfm_mode];
+ if (adj_rd < txfm_cache[i]) {
+ txfm_cache[i] = adj_rd;
+ }
}
+#endif
+
#if CONFIG_COMP_INTRA_PRED
}
#endif
}
- *skippable = skip;
+ mbmi->txfm_size = txfm_size;
mbmi->mode = mode_selected;
#if CONFIG_HYBRIDTRANSFORM16X16
x->e_mbd.block[0].bmi.as_mode.tx_type = best_txtype;
@@ -1405,42 +1543,73 @@ static int64_t rd_pick_intra8x8block(VP8_COMP *cpi, MACROBLOCK *x, int ib,
#if CONFIG_COMP_INTRA_PRED
if (mode2 == (MB_PREDICTION_MODE)(DC_PRED - 1)) {
#endif
- RECON_INVOKE(&cpi->rtcd.common->recon, intra8x8_predict)
- (b, mode, b->predictor);
+ vp8_intra8x8_predict(b, mode, b->predictor);
#if CONFIG_COMP_INTRA_PRED
} else {
continue; // i.e. disable for now
- RECON_INVOKE(&cpi->rtcd.common->recon, comp_intra8x8_predict)
- (b, mode, mode2, b->predictor);
+ vp8_comp_intra8x8_predict(b, mode, mode2, b->predictor);
}
#endif
vp8_subtract_4b_c(be, b, 16);
+ if (xd->mode_info_context->mbmi.txfm_size == TX_8X8) {
#if CONFIG_HYBRIDTRANSFORM8X8
- txfm_map(b, pred_mode_conv(mode));
- vp8_fht_c(be->src_diff, (x->block + idx)->coeff, 32,
- b->bmi.as_mode.tx_type, 8);
+ txfm_map(b, pred_mode_conv(mode));
+ vp8_fht_c(be->src_diff, (x->block + idx)->coeff, 32,
+ b->bmi.as_mode.tx_type, 8);
#else
- x->vp8_short_fdct8x8(be->src_diff, (x->block + idx)->coeff, 32);
+ x->vp8_short_fdct8x8(be->src_diff, (x->block + idx)->coeff, 32);
#endif
+ x->quantize_b_8x8(x->block + idx, xd->block + idx);
- x->quantize_b_8x8(x->block + idx, xd->block + idx);
+ // compute quantization mse of 8x8 block
+ distortion = vp8_block_error_c((x->block + idx)->coeff,
+ (xd->block + idx)->dqcoeff, 64);
+ ta0 = *(a + vp8_block2above_8x8[idx]);
+ tl0 = *(l + vp8_block2left_8x8 [idx]);
- // compute quantization mse of 8x8 block
- distortion = vp8_block_error_c((x->block + idx)->coeff,
- (xd->block + idx)->dqcoeff, 64)>>2;
- ta0 = *(a + vp8_block2above_8x8[idx]);
- tl0 = *(l + vp8_block2left_8x8 [idx]);
+ rate_t = cost_coeffs(x, xd->block + idx, PLANE_TYPE_Y_WITH_DC,
+ &ta0, &tl0, TX_8X8);
- rate_t = cost_coeffs(x, xd->block + idx, PLANE_TYPE_Y_WITH_DC,
- &ta0, &tl0, TX_8X8);
-
- rate += rate_t;
- ta1 = ta0;
- tl1 = tl0;
+ rate += rate_t;
+ ta1 = ta0;
+ tl1 = tl0;
+ } else {
+ x->vp8_short_fdct8x4(be->src_diff, be->coeff, 32);
+ x->vp8_short_fdct8x4((be + 4)->src_diff, (be + 4)->coeff, 32);
+
+ x->quantize_b_4x4_pair(x->block + ib, x->block + ib + 1,
+ xd->block + ib, xd->block + ib + 1);
+ x->quantize_b_4x4_pair(x->block + ib + 4, x->block + ib + 5,
+ xd->block + ib + 4, xd->block + ib + 5);
+
+ distortion = vp8_block_error_c((x->block + ib)->coeff,
+ (xd->block + ib)->dqcoeff, 16);
+ distortion += vp8_block_error_c((x->block + ib + 1)->coeff,
+ (xd->block + ib + 1)->dqcoeff, 16);
+ distortion += vp8_block_error_c((x->block + ib + 4)->coeff,
+ (xd->block + ib + 4)->dqcoeff, 16);
+ distortion += vp8_block_error_c((x->block + ib + 5)->coeff,
+ (xd->block + ib + 5)->dqcoeff, 16);
+
+ ta0 = *(a + vp8_block2above[ib]);
+ ta1 = *(a + vp8_block2above[ib + 1]);
+ tl0 = *(l + vp8_block2above[ib]);
+ tl1 = *(l + vp8_block2above[ib + 4]);
+ rate_t = cost_coeffs(x, xd->block + ib, PLANE_TYPE_Y_WITH_DC,
+ &ta0, &tl0, TX_4X4);
+ rate_t += cost_coeffs(x, xd->block + ib + 1, PLANE_TYPE_Y_WITH_DC,
+ &ta1, &tl0, TX_4X4);
+ rate_t += cost_coeffs(x, xd->block + ib + 4, PLANE_TYPE_Y_WITH_DC,
+ &ta0, &tl1, TX_4X4);
+ rate_t += cost_coeffs(x, xd->block + ib + 5, PLANE_TYPE_Y_WITH_DC,
+ &ta1, &tl1, TX_4X4);
+ rate += rate_t;
+ }
+ distortion >>= 2;
this_rd = RDCOST(x->rdmult, x->rddiv, rate, distortion);
if (this_rd < best_rd) {
*bestrate = rate;
@@ -1469,17 +1638,18 @@ static int64_t rd_pick_intra8x8block(VP8_COMP *cpi, MACROBLOCK *x, int ib,
#endif
vp8_encode_intra8x8(IF_RTCD(&cpi->rtcd), x, ib);
-#if CONFIG_HYBRIDTRANSFORM8X8
- *(a + vp8_block2above_8x8[idx]) = besta0;
- *(a + vp8_block2above_8x8[idx] + 1) = besta1;
- *(l + vp8_block2left_8x8 [idx]) = bestl0;
- *(l + vp8_block2left_8x8 [idx] + 1) = bestl1;
-#else
- *(a + vp8_block2above[ib]) = besta0;
- *(a + vp8_block2above[ib + 1]) = besta1;
- *(l + vp8_block2above[ib]) = bestl0;
- *(l + vp8_block2above[ib + 4]) = bestl1;
-#endif
+ if (xd->mode_info_context->mbmi.txfm_size == TX_8X8) {
+ *(a + vp8_block2above_8x8[idx]) = besta0;
+ *(a + vp8_block2above_8x8[idx] + 1) = besta1;
+ *(l + vp8_block2left_8x8 [idx]) = bestl0;
+ *(l + vp8_block2left_8x8 [idx] + 1) = bestl1;
+ } else {
+ *(a + vp8_block2above[ib]) = besta0;
+ *(a + vp8_block2above[ib + 1]) = besta1;
+ *(l + vp8_block2above[ib]) = bestl0;
+ *(l + vp8_block2above[ib + 4]) = bestl1;
+ }
+
return best_rd;
}
@@ -1502,6 +1672,7 @@ int64_t rd_pick_intra8x8mby_modes(VP8_COMP *cpi, MACROBLOCK *mb,
ta = (ENTROPY_CONTEXT *)&t_above;
tl = (ENTROPY_CONTEXT *)&t_left;
+ xd->mode_info_context->mbmi.mode = I8X8_PRED;
i8x8mode_costs = mb->i8x8_mode_costs;
for (i = 0; i < 4; i++) {
@@ -1556,15 +1727,19 @@ static int rd_cost_mbuv(MACROBLOCK *mb) {
static int64_t rd_inter16x16_uv(VP8_COMP *cpi, MACROBLOCK *x, int *rate,
- int *distortion, int fullpixel) {
+ int *distortion, int fullpixel, int *skip) {
ENCODEMB_INVOKE(IF_RTCD(&cpi->rtcd.encodemb), submbuv)(x->src_diff,
- x->src.u_buffer, x->src.v_buffer, x->e_mbd.predictor, x->src.uv_stride);
+ x->src.u_buffer,
+ x->src.v_buffer,
+ x->e_mbd.predictor,
+ x->src.uv_stride);
- vp8_transform_mbuv(x);
- vp8_quantize_mbuv(x);
+ vp8_transform_mbuv_4x4(x);
+ vp8_quantize_mbuv_4x4(x);
*rate = rd_cost_mbuv(x);
*distortion = ENCODEMB_INVOKE(&cpi->rtcd.encodemb, mbuverr)(x) / 4;
+ *skip = mbuv_is_skippable_4x4(&x->e_mbd);
return RDCOST(x->rdmult, x->rddiv, *rate, *distortion);
}
@@ -1645,16 +1820,19 @@ static int64_t rd_inter32x32_uv_8x8(VP8_COMP *cpi, MACROBLOCK *x, int *rate,
#endif
static int64_t rd_inter16x16_uv_8x8(VP8_COMP *cpi, MACROBLOCK *x, int *rate,
- int *distortion, int fullpixel) {
+ int *distortion, int fullpixel, int *skip) {
ENCODEMB_INVOKE(IF_RTCD(&cpi->rtcd.encodemb), submbuv)(x->src_diff,
- x->src.u_buffer, x->src.v_buffer, x->e_mbd.predictor, x->src.uv_stride);
+ x->src.u_buffer,
+ x->src.v_buffer,
+ x->e_mbd.predictor,
+ x->src.uv_stride);
vp8_transform_mbuv_8x8(x);
-
vp8_quantize_mbuv_8x8(x);
*rate = rd_cost_mbuv_8x8(x, 1);
*distortion = ENCODEMB_INVOKE(&cpi->rtcd.encodemb, mbuverr)(x) / 4;
+ *skip = mbuv_is_skippable_8x8(&x->e_mbd);
return RDCOST(x->rdmult, x->rddiv, *rate, *distortion);
}
@@ -1666,8 +1844,8 @@ static int64_t rd_inter4x4_uv(VP8_COMP *cpi, MACROBLOCK *x, int *rate,
ENCODEMB_INVOKE(IF_RTCD(&cpi->rtcd.encodemb), submbuv)(x->src_diff,
x->src.u_buffer, x->src.v_buffer, x->e_mbd.predictor, x->src.uv_stride);
- vp8_transform_mbuv(x);
- vp8_quantize_mbuv(x);
+ vp8_transform_mbuv_4x4(x);
+ vp8_quantize_mbuv_4x4(x);
*rate = rd_cost_mbuv(x);
*distortion = ENCODEMB_INVOKE(&cpi->rtcd.encodemb, mbuverr)(x) / 4;
@@ -1706,21 +1884,19 @@ static void rd_pick_intra_mbuv_mode(VP8_COMP *cpi,
mbmi->second_uv_mode = mode2;
if (mode2 == (MB_PREDICTION_MODE)(DC_PRED - 1)) {
#endif
- RECON_INVOKE(&cpi->rtcd.common->recon, build_intra_predictors_mbuv)
- (&x->e_mbd);
+ vp8_build_intra_predictors_mbuv(&x->e_mbd);
#if CONFIG_COMP_INTRA_PRED
} else {
continue;
- RECON_INVOKE(&cpi->rtcd.common->recon, build_comp_intra_predictors_mbuv)
- (&x->e_mbd);
+ vp8_build_comp_intra_predictors_mbuv(&x->e_mbd);
}
#endif
ENCODEMB_INVOKE(IF_RTCD(&cpi->rtcd.encodemb), submbuv)(x->src_diff,
x->src.u_buffer, x->src.v_buffer, x->e_mbd.predictor,
x->src.uv_stride);
- vp8_transform_mbuv(x);
- vp8_quantize_mbuv(x);
+ vp8_transform_mbuv_4x4(x);
+ vp8_quantize_mbuv_4x4(x);
rate_to = rd_cost_mbuv(x);
rate = rate_to
@@ -1731,7 +1907,7 @@ static void rd_pick_intra_mbuv_mode(VP8_COMP *cpi,
this_rd = RDCOST(x->rdmult, x->rddiv, rate, distortion);
if (this_rd < best_rd) {
- skip = mbuv_is_skippable(xd);
+ skip = mbuv_is_skippable_4x4(xd);
best_rd = this_rd;
d = distortion;
r = rate;
@@ -1774,8 +1950,7 @@ static void rd_pick_intra_mbuv_mode_8x8(VP8_COMP *cpi,
int64_t this_rd;
mbmi->uv_mode = mode;
- RECON_INVOKE(&cpi->rtcd.common->recon, build_intra_predictors_mbuv)
- (&x->e_mbd);
+ vp8_build_intra_predictors_mbuv(&x->e_mbd);
ENCODEMB_INVOKE(IF_RTCD(&cpi->rtcd.encodemb), submbuv)(x->src_diff,
x->src.u_buffer, x->src.v_buffer, x->e_mbd.predictor,
x->src.uv_stride);
@@ -1868,8 +2043,7 @@ static int64_t rd_pick_intra_sbuv_mode(VP8_COMP *cpi,
for (mode = DC_PRED; mode <= TM_PRED; mode++) {
x->e_mbd.mode_info_context->mbmi.uv_mode = mode;
- RECON_INVOKE(&cpi->rtcd.common->recon,
- build_intra_predictors_sbuv_s)(&x->e_mbd);
+ vp8_build_intra_predictors_sbuv_s(&x->e_mbd);
super_block_uvrd_8x8(x, &this_rate_tokenonly,
&this_distortion, IF_RTCD(&cpi->rtcd), &s);
@@ -2025,30 +2199,19 @@ static int labels2mode(
return cost;
}
-static int rdcost_mbsegment_y(MACROBLOCK *mb, const int *labels,
- int which_label, ENTROPY_CONTEXT *ta,
- ENTROPY_CONTEXT *tl) {
- int b, cost = 0;
- MACROBLOCKD *xd = &mb->e_mbd;
-
- for (b = 0; b < 16; b++)
- if (labels[ b] == which_label)
- cost += cost_coeffs(mb, xd->block + b, PLANE_TYPE_Y_WITH_DC,
- ta + vp8_block2above[b],
- tl + vp8_block2left[b], TX_4X4);
-
- return cost;
-
-}
-
-static unsigned int vp8_encode_inter_mb_segment(MACROBLOCK *x,
- int const *labels,
- int which_label,
- const VP8_ENCODER_RTCD *rtcd) {
+static int64_t encode_inter_mb_segment(MACROBLOCK *x,
+ int const *labels,
+ int which_label,
+ int *labelyrate,
+ int *distortion,
+ ENTROPY_CONTEXT *ta,
+ ENTROPY_CONTEXT *tl,
+ const VP8_ENCODER_RTCD *rtcd) {
int i;
- unsigned int distortion = 0;
MACROBLOCKD *xd = &x->e_mbd;
+ *labelyrate = 0;
+ *distortion = 0;
for (i = 0; i < 16; i++) {
if (labels[i] == which_label) {
BLOCKD *bd = &x->e_mbd.block[i];
@@ -2060,18 +2223,65 @@ static unsigned int vp8_encode_inter_mb_segment(MACROBLOCK *x,
vp8_build_2nd_inter_predictors_b(bd, 16, xd->subpixel_predict_avg);
ENCODEMB_INVOKE(&rtcd->encodemb, subb)(be, bd, 16);
x->vp8_short_fdct4x4(be->src_diff, be->coeff, 32);
-
- // set to 0 no way to account for 2nd order DC so discount
- // be->coeff[0] = 0;
- x->quantize_b(be, bd);
- thisdistortion = ENCODEMB_INVOKE(&rtcd->encodemb, berr)(
- be->coeff, bd->dqcoeff, 16) / 4;
- distortion += thisdistortion;
+ x->quantize_b_4x4(be, bd);
+ thisdistortion = vp8_block_error_c(be->coeff, bd->dqcoeff, 16);
+ *distortion += thisdistortion;
+ *labelyrate += cost_coeffs(x, bd, PLANE_TYPE_Y_WITH_DC,
+ ta + vp8_block2above[i],
+ tl + vp8_block2left[i], TX_4X4);
}
}
- return distortion;
+ *distortion >>= 2;
+ return RDCOST(x->rdmult, x->rddiv, *labelyrate, *distortion);
}
+static int64_t encode_inter_mb_segment_8x8(MACROBLOCK *x,
+ int const *labels,
+ int which_label,
+ int *labelyrate,
+ int *distortion,
+ ENTROPY_CONTEXT *ta,
+ ENTROPY_CONTEXT *tl,
+ const VP8_ENCODER_RTCD *rtcd) {
+ int i, j;
+ MACROBLOCKD *xd = &x->e_mbd;
+ const int iblock[4] = { 0, 1, 4, 5 };
+
+ *distortion = 0;
+ *labelyrate = 0;
+ for (i = 0; i < 4; i++) {
+ int ib = vp8_i8x8_block[i];
+
+ if (labels[ib] == which_label) {
+ BLOCKD *bd = &xd->block[ib];
+ BLOCK *be = &x->block[ib];
+ int thisdistortion;
+
+ vp8_build_inter_predictors4b(xd, bd, 16);
+ if (xd->mode_info_context->mbmi.second_ref_frame)
+ vp8_build_2nd_inter_predictors4b(xd, bd, 16);
+ vp8_subtract_4b_c(be, bd, 16);
+
+ for (j = 0; j < 4; j += 2) {
+ bd = &xd->block[ib + iblock[j]];
+ be = &x->block[ib + iblock[j]];
+ x->vp8_short_fdct8x4(be->src_diff, be->coeff, 32);
+ x->quantize_b_4x4_pair(be, be + 1, bd, bd + 1);
+ thisdistortion = vp8_block_error_c(be->coeff, bd->dqcoeff, 32);
+ *distortion += thisdistortion;
+ *labelyrate += cost_coeffs(x, bd, PLANE_TYPE_Y_WITH_DC,
+ ta + vp8_block2above[ib + iblock[j]],
+ tl + vp8_block2left[ib + iblock[j]], TX_4X4);
+ *labelyrate += cost_coeffs(x, bd + 1, PLANE_TYPE_Y_WITH_DC,
+ ta + vp8_block2above[ib + iblock[j] + 1],
+ tl + vp8_block2left[ib + iblock[j]],
+ TX_4X4);
+ }
+ }
+ }
+ *distortion >>= 2;
+ return RDCOST(x->rdmult, x->rddiv, *labelyrate, *distortion);
+}
static const unsigned int segmentation_to_sseshift[4] = {3, 3, 2, 0};
@@ -2280,15 +2490,18 @@ static void rd_check_segment(VP8_COMP *cpi, MACROBLOCK *x,
mv_check_bounds(x, &second_mode_mv[this_mode]))
continue;
- distortion = vp8_encode_inter_mb_segment(
- x, labels, i,
- IF_RTCD(&cpi->rtcd));
-
- labelyrate = rdcost_mbsegment_y(x, labels, i, ta_s, tl_s);
+ if (segmentation == BLOCK_4X4) {
+ this_rd = encode_inter_mb_segment(x, labels, i, &labelyrate,
+ &distortion,
+ ta_s, tl_s, IF_RTCD(&cpi->rtcd));
+ } else {
+ this_rd = encode_inter_mb_segment_8x8(x, labels, i, &labelyrate,
+ &distortion, ta_s, tl_s,
+ IF_RTCD(&cpi->rtcd));
+ }
+ this_rd += RDCOST(x->rdmult, x->rddiv, rate, 0);
rate += labelyrate;
- this_rd = RDCOST(x->rdmult, x->rddiv, rate, distortion);
-
if (this_rd < best_label_rd) {
sbr = rate;
sbd = distortion;
@@ -2981,8 +3194,12 @@ static void store_coding_context(MACROBLOCK *x, PICK_MODE_CONTEXT *ctx,
int_mv *second_ref_mv,
int single_pred_diff,
int comp_pred_diff,
- int hybrid_pred_diff) {
+ int hybrid_pred_diff,
+ int64_t txfm_size_diff[NB_TXFM_MODES]) {
MACROBLOCKD *xd = &x->e_mbd;
+#if CONFIG_TX_SELECT
+ MB_MODE_INFO *mbmi = &xd->mode_info_context->mbmi;
+#endif
// Take a snapshot of the coding context so it can be
// restored if we decide to encode this way
@@ -3001,46 +3218,34 @@ static void store_coding_context(MACROBLOCK *x, PICK_MODE_CONTEXT *ctx,
ctx->single_pred_diff = single_pred_diff;
ctx->comp_pred_diff = comp_pred_diff;
ctx->hybrid_pred_diff = hybrid_pred_diff;
+
+#if CONFIG_TX_SELECT
+ memcpy(ctx->txfm_rd_diff, txfm_size_diff, sizeof(ctx->txfm_rd_diff));
+#endif
}
static void inter_mode_cost(VP8_COMP *cpi, MACROBLOCK *x, int this_mode,
int *rate2, int *distortion2, int *rate_y,
- int *distortion, int* rate_uv, int *distortion_uv) {
+ int *distortion, int* rate_uv, int *distortion_uv,
+ int *skippable, int64_t txfm_cache[NB_TXFM_MODES]) {
+ int y_skippable, uv_skippable;
+
// Y cost and distortion
-#if CONFIG_TX16X16 || CONFIG_HYBRIDTRANSFORM16X16
- if (this_mode == ZEROMV ||
- this_mode == NEARESTMV ||
- this_mode == NEARMV ||
- this_mode == NEWMV)
- macro_block_yrd_16x16(x, rate_y, distortion, IF_RTCD(&cpi->rtcd));
- else {
-#endif
- if (cpi->common.txfm_mode == ALLOW_8X8)
- macro_block_yrd_8x8(x, rate_y, distortion, IF_RTCD(&cpi->rtcd));
- else
- macro_block_yrd(x, rate_y, distortion, IF_RTCD(&cpi->rtcd));
-#if CONFIG_TX16X16 || CONFIG_HYBRIDTRANSFORM16X16
- }
-#endif
+ macro_block_yrd(cpi, x, rate_y, distortion, &y_skippable, txfm_cache);
*rate2 += *rate_y;
*distortion2 += *distortion;
// UV cost and distortion
- if (cpi->common.txfm_mode == ALLOW_8X8
-#if CONFIG_TX16X16 || CONFIG_HYBRIDTRANSFORM16X16
- || this_mode == ZEROMV ||
- this_mode == NEARESTMV ||
- this_mode == NEARMV ||
- this_mode == NEWMV
-#endif
- )
+ if (x->e_mbd.mode_info_context->mbmi.txfm_size != TX_4X4)
rd_inter16x16_uv_8x8(cpi, x, rate_uv, distortion_uv,
- cpi->common.full_pixel);
+ cpi->common.full_pixel, &uv_skippable);
else
- rd_inter16x16_uv(cpi, x, rate_uv, distortion_uv, cpi->common.full_pixel);
+ rd_inter16x16_uv(cpi, x, rate_uv, distortion_uv, cpi->common.full_pixel,
+ &uv_skippable);
*rate2 += *rate_uv;
*distortion2 += *distortion_uv;
+ *skippable = y_skippable && uv_skippable;
}
#define MIN(x,y) (((x)<(y))?(x):(y))
@@ -3111,6 +3316,8 @@ void vp8_rd_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset, int
int mdcounts[4];
int rate, distortion;
int rate2, distortion2;
+ int64_t best_txfm_rd[NB_TXFM_MODES];
+ int64_t best_txfm_diff[NB_TXFM_MODES];
int64_t best_pred_diff[NB_PREDICTION_TYPES];
int64_t best_pred_rd[NB_PREDICTION_TYPES];
int64_t best_rd = INT64_MAX, best_intra_rd = INT64_MAX;
@@ -3165,6 +3372,8 @@ void vp8_rd_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset, int
frame_mv[NEWMV][i].as_int = INVALID_MV;
for (i = 0; i < NB_PREDICTION_TYPES; ++i)
best_pred_rd[i] = INT64_MAX;
+ for (i = 0; i < NB_TXFM_MODES; i++)
+ best_txfm_rd[i] = INT64_MAX;
for (i = 0; i < BLOCK_MAX_SEGMENTS - 1; i++) {
int j, k;
@@ -3220,7 +3429,7 @@ void vp8_rd_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset, int
uv_intra_mode = mbmi->uv_mode;
/* rough estimate for now */
- if (cpi->common.txfm_mode == ALLOW_8X8) {
+ if (cpi->common.txfm_mode != ONLY_4X4) {
rd_pick_intra_mbuv_mode_8x8(cpi, x, &uv_intra_rate_8x8,
&uv_intra_rate_tokenonly_8x8,
&uv_intra_distortion_8x8,
@@ -3240,10 +3449,11 @@ void vp8_rd_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset, int
#endif
int64_t this_rd = INT64_MAX;
int is_comp_pred;
- int disable_skip = 0;
+ int disable_skip = 0, skippable = 0;
int other_cost = 0;
int compmode_cost = 0;
int mode_excluded = 0;
+ int64_t txfm_cache[NB_TXFM_MODES] = { 0 };
// These variables hold are rolling total cost and distortion for this mode
rate2 = 0;
@@ -3283,11 +3493,6 @@ void vp8_rd_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset, int
continue;
// current coding mode under rate-distortion optimization test loop
-#if CONFIG_HYBRIDTRANSFORM
- mbmi->mode_rdopt = this_mode;
-#endif
-
-
#if CONFIG_COMP_INTRA_PRED
mbmi->second_mode = (MB_PREDICTION_MODE)(DC_PRED - 1);
mbmi->second_uv_mode = (MB_PREDICTION_MODE)(DC_PRED - 1);
@@ -3378,52 +3583,34 @@ void vp8_rd_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset, int
case D63_PRED:
mbmi->ref_frame = INTRA_FRAME;
// FIXME compound intra prediction
- RECON_INVOKE(&cpi->common.rtcd.recon, build_intra_predictors_mby)
- (&x->e_mbd);
-
-#if CONFIG_TX16X16 || CONFIG_HYBRIDTRANSFORM16X16
- // FIXME: breaks lossless since 4x4 isn't allowed
- macro_block_yrd_16x16(x, &rate_y, &distortion,
- IF_RTCD(&cpi->rtcd));
+ vp8_build_intra_predictors_mby(&x->e_mbd);
+ macro_block_yrd(cpi, x, &rate_y, &distortion, &skippable, txfm_cache);
#if CONFIG_HYBRIDTRANSFORM16X16
rd_txtype = x->e_mbd.block[0].bmi.as_mode.tx_type;
#endif
rate2 += rate_y;
distortion2 += distortion;
rate2 += x->mbmode_cost[x->e_mbd.frame_type][mbmi->mode];
- rate2 += uv_intra_rate_8x8;
- rate_uv = uv_intra_rate_tokenonly_8x8;
- distortion2 += uv_intra_distortion_8x8;
- distortion_uv = uv_intra_distortion_8x8;
- break;
-#else
- if (cpi->common.txfm_mode == ALLOW_8X8)
- macro_block_yrd_8x8(x, &rate_y, &distortion,
- IF_RTCD(&cpi->rtcd));
- else
- macro_block_yrd(x, &rate_y, &distortion,
- IF_RTCD(&cpi->rtcd));
- rate2 += rate_y;
- distortion2 += distortion;
- rate2 += x->mbmode_cost[x->e_mbd.frame_type][mbmi->mode];
- if (cpi->common.txfm_mode == ALLOW_8X8) {
+ if (mbmi->txfm_size != TX_4X4) {
rate2 += uv_intra_rate_8x8;
rate_uv = uv_intra_rate_tokenonly_8x8;
distortion2 += uv_intra_distortion_8x8;
distortion_uv = uv_intra_distortion_8x8;
+ skippable = skippable && uv_intra_skippable_8x8;
} else {
rate2 += uv_intra_rate;
rate_uv = uv_intra_rate_tokenonly;
distortion2 += uv_intra_distortion;
distortion_uv = uv_intra_distortion;
+ skippable = skippable && uv_intra_skippable;
}
break;
-#endif
case B_PRED: {
int64_t tmp_rd;
// Note the rate value returned here includes the cost of coding
// the BPRED mode : x->mbmode_cost[x->e_mbd.frame_type][BPRED];
+ mbmi->txfm_size = TX_4X4;
tmp_rd = rd_pick_intra4x4mby_modes(cpi, x, &rate, &rate_y, &distortion, best_yrd,
#if CONFIG_COMP_INTRA_PRED
0,
@@ -3444,12 +3631,16 @@ void vp8_rd_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset, int
}
break;
case I8X8_PRED: {
- int64_t tmp_rd;
- tmp_rd = rd_pick_intra8x8mby_modes(cpi, x, &rate, &rate_y,
- &distortion, best_yrd);
- rate2 += rate;
- distortion2 += distortion;
-
+#if CONFIG_TX_SELECT
+ int cost0 = vp8_cost_bit(cm->prob_tx[0], 0);
+ int cost1 = vp8_cost_bit(cm->prob_tx[0], 1);
+ int64_t tmp_rd_4x4s, tmp_rd_8x8s;
+#endif
+ int64_t tmp_rd_4x4, tmp_rd_8x8, tmp_rd;
+ int r4x4, tok4x4, d4x4, r8x8, tok8x8, d8x8;
+ mbmi->txfm_size = TX_4X4;
+ tmp_rd_4x4 = rd_pick_intra8x8mby_modes(cpi, x, &r4x4, &tok4x4,
+ &d4x4, best_yrd);
mode8x8[0][0] = x->e_mbd.mode_info_context->bmi[0].as_mode.first;
mode8x8[0][1] = x->e_mbd.mode_info_context->bmi[2].as_mode.first;
mode8x8[0][2] = x->e_mbd.mode_info_context->bmi[8].as_mode.first;
@@ -3460,6 +3651,70 @@ void vp8_rd_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset, int
mode8x8[1][2] = x->e_mbd.mode_info_context->bmi[8].as_mode.second;
mode8x8[1][3] = x->e_mbd.mode_info_context->bmi[10].as_mode.second;
#endif
+ mbmi->txfm_size = TX_8X8;
+ tmp_rd_8x8 = rd_pick_intra8x8mby_modes(cpi, x, &r8x8, &tok8x8,
+ &d8x8, best_yrd);
+ txfm_cache[ONLY_4X4] = tmp_rd_4x4;
+ txfm_cache[ALLOW_8X8] = tmp_rd_8x8;
+ txfm_cache[ALLOW_16X16] = tmp_rd_8x8;
+#if CONFIG_TX_SELECT
+ tmp_rd_4x4s = tmp_rd_4x4 + RDCOST(x->rdmult, x->rddiv, cost0, 0);
+ tmp_rd_8x8s = tmp_rd_8x8 + RDCOST(x->rdmult, x->rddiv, cost1, 0);
+ txfm_cache[TX_MODE_SELECT] = tmp_rd_4x4s < tmp_rd_8x8s ? tmp_rd_4x4s : tmp_rd_8x8s;
+ if (cm->txfm_mode == TX_MODE_SELECT) {
+ if (tmp_rd_4x4s < tmp_rd_8x8s) {
+ rate = r4x4 + cost0;
+ rate_y = tok4x4 + cost0;
+ distortion = d4x4;
+ mbmi->txfm_size = TX_4X4;
+ tmp_rd = tmp_rd_4x4s;
+ } else {
+ rate = r8x8 + cost1;
+ rate_y = tok8x8 + cost1;
+ distortion = d8x8;
+ mbmi->txfm_size = TX_8X8;
+ tmp_rd = tmp_rd_8x8s;
+
+ mode8x8[0][0] = x->e_mbd.mode_info_context->bmi[0].as_mode.first;
+ mode8x8[0][1] = x->e_mbd.mode_info_context->bmi[2].as_mode.first;
+ mode8x8[0][2] = x->e_mbd.mode_info_context->bmi[8].as_mode.first;
+ mode8x8[0][3] = x->e_mbd.mode_info_context->bmi[10].as_mode.first;
+#if CONFIG_COMP_INTRA_PRED
+ mode8x8[1][0] = x->e_mbd.mode_info_context->bmi[0].as_mode.second;
+ mode8x8[1][1] = x->e_mbd.mode_info_context->bmi[2].as_mode.second;
+ mode8x8[1][2] = x->e_mbd.mode_info_context->bmi[8].as_mode.second;
+ mode8x8[1][3] = x->e_mbd.mode_info_context->bmi[10].as_mode.second;
+#endif
+ }
+ } else
+#endif
+ if (cm->txfm_mode == ONLY_4X4) {
+ rate = r4x4;
+ rate_y = tok4x4;
+ distortion = d4x4;
+ mbmi->txfm_size = TX_4X4;
+ tmp_rd = tmp_rd_4x4;
+ } else {
+ rate = r8x8;
+ rate_y = tok8x8;
+ distortion = d8x8;
+ mbmi->txfm_size = TX_8X8;
+ tmp_rd = tmp_rd_8x8;
+
+ mode8x8[0][0] = x->e_mbd.mode_info_context->bmi[0].as_mode.first;
+ mode8x8[0][1] = x->e_mbd.mode_info_context->bmi[2].as_mode.first;
+ mode8x8[0][2] = x->e_mbd.mode_info_context->bmi[8].as_mode.first;
+ mode8x8[0][3] = x->e_mbd.mode_info_context->bmi[10].as_mode.first;
+#if CONFIG_COMP_INTRA_PRED
+ mode8x8[1][0] = x->e_mbd.mode_info_context->bmi[0].as_mode.second;
+ mode8x8[1][1] = x->e_mbd.mode_info_context->bmi[2].as_mode.second;
+ mode8x8[1][2] = x->e_mbd.mode_info_context->bmi[8].as_mode.second;
+ mode8x8[1][3] = x->e_mbd.mode_info_context->bmi[10].as_mode.second;
+#endif
+ }
+
+ rate2 += rate;
+ distortion2 += distortion;
/* TODO: uv rate maybe over-estimated here since there is UV intra
mode coded in I8X8_PRED prediction */
@@ -3489,6 +3744,7 @@ void vp8_rd_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset, int
(mbmi->ref_frame == GOLDEN_FRAME) ?
cpi->rd_threshes[THR_NEWG] : this_rd_thresh;
+ mbmi->txfm_size = TX_4X4; // FIXME use 8x8 in case of 8x8/8x16/16x8
tmp_rd = vp8_rd_pick_best_mbsegmentation(cpi, x, &best_ref_mv,
second_ref, best_yrd, mdcounts,
&rate, &rate_y, &distortion,
@@ -3524,7 +3780,7 @@ void vp8_rd_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset, int
}
else {
const int num_refs = is_comp_pred ? 2 : 1;
- int flag;
+ int flag, skip;
int refs[2] = {x->e_mbd.mode_info_context->mbmi.ref_frame,
x->e_mbd.mode_info_context->mbmi.second_ref_frame};
int_mv cur_mv[2];
@@ -3703,7 +3959,8 @@ void vp8_rd_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset, int
&xd->predictor[256],
&xd->predictor[320], 8);
inter_mode_cost(cpi, x, this_mode, &rate2, &distortion2,
- &rate_y, &distortion, &rate_uv, &distortion_uv);
+ &rate_y, &distortion, &rate_uv, &distortion_uv,
+ &skippable, txfm_cache);
if (is_comp_pred)
mode_excluded = cpi->common.comp_pred_mode == SINGLE_PREDICTION_ONLY;
else
@@ -3723,51 +3980,19 @@ void vp8_rd_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset, int
// necessary adjustment for rate. Ignore if skip is coded at
// segment level as the cost wont have been added in.
if (cpi->common.mb_no_coeff_skip) {
- int mb_skippable;
int mb_skip_allowed;
int has_y2 = (this_mode != SPLITMV
&& this_mode != B_PRED
&& this_mode != I8X8_PRED);
-#if CONFIG_TX16X16 || CONFIG_HYBRIDTRANSFORM16X16
- if (this_mode <= TM_PRED ||
- this_mode == NEWMV ||
- this_mode == ZEROMV ||
- this_mode == NEARESTMV ||
- this_mode == NEARMV)
- mb_skippable = mb_is_skippable_16x16(&x->e_mbd);
- else
-#endif
- if ((cpi->common.txfm_mode == ALLOW_8X8) && has_y2) {
- if (mbmi->ref_frame != INTRA_FRAME) {
-#if CONFIG_TX16X16
- mb_skippable = mb_is_skippable_16x16(&x->e_mbd);
-#else
- mb_skippable = mb_is_skippable_8x8(&x->e_mbd, has_y2);
-#endif
- } else {
-#if CONFIG_TX16X16
- mb_skippable = uv_intra_skippable_8x8
- & mby_is_skippable_16x16(&x->e_mbd);
-#else
- mb_skippable = uv_intra_skippable_8x8
- & mby_is_skippable_8x8(&x->e_mbd, has_y2);
-#endif
- }
- } else {
- if (mbmi->ref_frame != INTRA_FRAME)
- mb_skippable = mb_is_skippable(&x->e_mbd, has_y2);
- else
- mb_skippable = uv_intra_skippable
- & mby_is_skippable(&x->e_mbd, has_y2);
- }
-
// Is Mb level skip allowed for this mb.
mb_skip_allowed =
!segfeature_active(xd, segment_id, SEG_LVL_EOB) ||
get_segdata(xd, segment_id, SEG_LVL_EOB);
- if (mb_skippable) {
+ if (skippable) {
+ mbmi->mb_skip_coeff = 1;
+
// Back out the coefficient coding costs
rate2 -= (rate_y + rate_uv);
// for best_yrd calculation
@@ -3788,11 +4013,14 @@ void vp8_rd_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset, int
}
}
// Add in the cost of the no skip flag.
- else if (mb_skip_allowed) {
- int prob_skip_cost = vp8_cost_bit(
- get_pred_prob(cm, &x->e_mbd, PRED_MBSKIP), 0);
- rate2 += prob_skip_cost;
- other_cost += prob_skip_cost;
+ else {
+ mbmi->mb_skip_coeff = 0;
+ if (mb_skip_allowed) {
+ int prob_skip_cost = vp8_cost_bit(
+ get_pred_prob(cm, &x->e_mbd, PRED_MBSKIP), 0);
+ rate2 += prob_skip_cost;
+ other_cost += prob_skip_cost;
+ }
}
}
@@ -3835,7 +4063,7 @@ void vp8_rd_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset, int
#endif
if (this_mode <= B_PRED) {
- if (cpi->common.txfm_mode == ALLOW_8X8
+ if (mbmi->txfm_size != TX_4X4
&& this_mode != B_PRED
&& this_mode != I8X8_PRED)
mbmi->uv_mode = uv_intra_mode_8x8;
@@ -3912,6 +4140,20 @@ void vp8_rd_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset, int
if (hybrid_rd < best_pred_rd[HYBRID_PREDICTION])
best_pred_rd[HYBRID_PREDICTION] = hybrid_rd;
}
+
+ /* keep record of best txfm size */
+ if (!mode_excluded && this_rd != INT64_MAX) {
+ for (i = 0; i < NB_TXFM_MODES; i++) {
+ int64_t adj_rd;
+ if (this_mode != B_PRED && this_mode != SPLITMV) {
+ adj_rd = this_rd + txfm_cache[i] - txfm_cache[cm->txfm_mode];
+ } else {
+ adj_rd = this_rd;
+ }
+ if (adj_rd < best_txfm_rd[i])
+ best_txfm_rd[i] = adj_rd;
+ }
+ }
#if CONFIG_PRED_FILTER
}
#endif
@@ -3961,6 +4203,12 @@ void vp8_rd_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset, int
(cpi->oxcf.arnr_max_frames == 0) &&
(best_mbmode.mode != ZEROMV || best_mbmode.ref_frame != ALTREF_FRAME)) {
mbmi->mode = ZEROMV;
+#if CONFIG_TX_SELECT
+ if (cm->txfm_mode != TX_MODE_SELECT)
+ mbmi->txfm_size = cm->txfm_mode;
+ else
+#endif
+ mbmi->txfm_size = TX_16X16;
mbmi->ref_frame = ALTREF_FRAME;
mbmi->mv[0].as_int = 0;
mbmi->uv_mode = DC_PRED;
@@ -3969,6 +4217,7 @@ void vp8_rd_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset, int
mbmi->partitioning = 0;
vpx_memset(best_pred_diff, 0, sizeof(best_pred_diff));
+ vpx_memset(best_txfm_diff, 0, sizeof(best_txfm_diff));
goto end;
}
@@ -4013,11 +4262,25 @@ void vp8_rd_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset, int
best_pred_diff[i] = best_rd - best_pred_rd[i];
}
+#if CONFIG_TX_SELECT
+ if (!x->skip) {
+ for (i = 0; i < NB_TXFM_MODES; i++) {
+ if (best_txfm_rd[i] == INT64_MAX)
+ best_txfm_diff[i] = INT_MIN;
+ else
+ best_txfm_diff[i] = best_rd - best_txfm_rd[i];
+ }
+ } else {
+ vpx_memset(best_txfm_diff, 0, sizeof(best_txfm_diff));
+ }
+#endif
+
end:
store_coding_context(x, &x->mb_context[xd->mb_index], best_mode_index, &best_partition,
&frame_best_ref_mv[xd->mode_info_context->mbmi.ref_frame],
&frame_best_ref_mv[xd->mode_info_context->mbmi.second_ref_frame],
- best_pred_diff[0], best_pred_diff[1], best_pred_diff[2]);
+ best_pred_diff[0], best_pred_diff[1], best_pred_diff[2],
+ best_txfm_diff);
}
#if CONFIG_SUPERBLOCKS
@@ -4076,6 +4339,9 @@ void vp8_rd_pick_intra_mode(VP8_COMP *cpi, MACROBLOCK *x,
int dist;
int modeuv, modeuv8x8, uv_intra_skippable, uv_intra_skippable_8x8;
int y_intra16x16_skippable;
+ int64_t txfm_cache[NB_TXFM_MODES];
+ TX_SIZE txfm_size_16x16;
+ int i;
#if CONFIG_HYBRIDTRANSFORM16X16
int best_txtype;
@@ -4085,7 +4351,7 @@ void vp8_rd_pick_intra_mode(VP8_COMP *cpi, MACROBLOCK *x,
rd_pick_intra_mbuv_mode(cpi, x, &rateuv, &rateuv_tokenonly, &distuv,
&uv_intra_skippable);
modeuv = mbmi->uv_mode;
- if (cpi->common.txfm_mode == ALLOW_8X8) {
+ if (cpi->common.txfm_mode != ONLY_4X4) {
rd_pick_intra_mbuv_mode_8x8(cpi, x, &rateuv8x8, &rateuv8x8_tokenonly,
&distuv8x8, &uv_intra_skippable_8x8);
modeuv8x8 = mbmi->uv_mode;
@@ -4098,23 +4364,18 @@ void vp8_rd_pick_intra_mode(VP8_COMP *cpi, MACROBLOCK *x,
}
// current macroblock under rate-distortion optimization test loop
-#if CONFIG_HYBRIDTRANSFORM
- mbmi->mode_rdopt = DC_PRED;
-#endif
-
error16x16 = rd_pick_intra16x16mby_mode(cpi, x, &rate16x16,
&rate16x16_tokenonly, &dist16x16,
- &y_intra16x16_skippable);
+ &y_intra16x16_skippable, txfm_cache);
mode16x16 = mbmi->mode;
#if CONFIG_HYBRIDTRANSFORM16X16
best_txtype = xd->block[0].bmi.as_mode.tx_type;
xd->mode_info_context->bmi[0].as_mode.tx_type = best_txtype;
#endif
+ txfm_size_16x16 = mbmi->txfm_size;
-#if CONFIG_HYBRIDTRANSFORM || CONFIG_HYBRIDTRANSFORM8X8
- mbmi->mode_rdopt = I8X8_PRED;
-#endif
-
+ // FIXME(rbultje) support transform-size selection
+ mbmi->txfm_size = (cm->txfm_mode == ONLY_4X4) ? TX_4X4 : TX_8X8;
error8x8 = rd_pick_intra8x8mby_modes(cpi, x, &rate8x8, &rate8x8_tokenonly,
&dist8x8, error16x16);
mode8x8[0][0]= xd->mode_info_context->bmi[0].as_mode.first;
@@ -4128,10 +4389,6 @@ void vp8_rd_pick_intra_mode(VP8_COMP *cpi, MACROBLOCK *x,
mode8x8[1][3] = xd->mode_info_context->bmi[10].as_mode.second;
#endif
-#if CONFIG_HYBRIDTRANSFORM
- mbmi->mode_rdopt = B_PRED;
-#endif
-
error4x4 = rd_pick_intra4x4mby_modes(cpi, x,
&rate4x4, &rate4x4_tokenonly,
&dist4x4, error16x16,
@@ -4145,12 +4402,20 @@ void vp8_rd_pick_intra_mode(VP8_COMP *cpi, MACROBLOCK *x,
&dist4x4d, error16x16, 1, 0);
#endif
+ mbmi->mb_skip_coeff = 0;
if (cpi->common.mb_no_coeff_skip &&
y_intra16x16_skippable && uv_intra_skippable_8x8) {
+ mbmi->mb_skip_coeff = 1;
+ mbmi->mode = mode16x16;
mbmi->uv_mode = modeuv;
rate = rateuv8x8 + rate16x16 - rateuv8x8_tokenonly - rate16x16_tokenonly +
vp8_cost_bit(get_pred_prob(cm, xd, PRED_MBSKIP), 1);
dist = dist16x16 + (distuv8x8 >> 2);
+ mbmi->txfm_size = txfm_size_16x16;
+#if CONFIG_TX_SELECT
+ memset(x->mb_context[xd->mb_index].txfm_rd_diff, 0,
+ sizeof(x->mb_context[xd->mb_index].txfm_rd_diff));
+#endif
} else if (error8x8 > error16x16) {
if (error4x4 < error16x16) {
rate = rateuv;
@@ -4165,8 +4430,14 @@ void vp8_rd_pick_intra_mode(VP8_COMP *cpi, MACROBLOCK *x,
rate += rate4x4;
#endif
mbmi->mode = B_PRED;
+ mbmi->txfm_size = TX_4X4;
dist = dist4x4 + (distuv >> 2);
+#if CONFIG_TX_SELECT
+ memset(x->mb_context[xd->mb_index].txfm_rd_diff, 0,
+ sizeof(x->mb_context[xd->mb_index].txfm_rd_diff));
+#endif
} else {
+ mbmi->txfm_size = txfm_size_16x16;
mbmi->mode = mode16x16;
rate = rate16x16 + rateuv8x8;
dist = dist16x16 + (distuv8x8 >> 2);
@@ -4174,6 +4445,11 @@ void vp8_rd_pick_intra_mode(VP8_COMP *cpi, MACROBLOCK *x,
// save this into supermacroblock coding decision buffer
xd->mode_info_context->bmi[0].as_mode.tx_type = best_txtype;
#endif
+#if CONFIG_TX_SELECT
+ for (i = 0; i < NB_TXFM_MODES; i++) {
+ x->mb_context[xd->mb_index].txfm_rd_diff[i] = error16x16 - txfm_cache[i];
+ }
+#endif
}
if (cpi->common.mb_no_coeff_skip)
rate += vp8_cost_bit(get_pred_prob(cm, xd, PRED_MBSKIP), 0);
@@ -4191,12 +4467,23 @@ void vp8_rd_pick_intra_mode(VP8_COMP *cpi, MACROBLOCK *x,
rate += rate4x4;
#endif
mbmi->mode = B_PRED;
+ mbmi->txfm_size = TX_4X4;
dist = dist4x4 + (distuv >> 2);
+#if CONFIG_TX_SELECT
+ memset(x->mb_context[xd->mb_index].txfm_rd_diff, 0,
+ sizeof(x->mb_context[xd->mb_index].txfm_rd_diff));
+#endif
} else {
+ // FIXME(rbultje) support transform-size selection
mbmi->mode = I8X8_PRED;
+ mbmi->txfm_size = (cm->txfm_mode == ONLY_4X4) ? TX_4X4 : TX_8X8;
set_i8x8_block_modes(x, mode8x8);
rate = rate8x8 + rateuv;
dist = dist8x8 + (distuv >> 2);
+#if CONFIG_TX_SELECT
+ memset(x->mb_context[xd->mb_index].txfm_rd_diff, 0,
+ sizeof(x->mb_context[xd->mb_index].txfm_rd_diff));
+#endif
}
if (cpi->common.mb_no_coeff_skip)
rate += vp8_cost_bit(get_pred_prob(cm, xd, PRED_MBSKIP), 0);
@@ -4883,7 +5170,7 @@ int64_t vp8_rd_pick_inter_mode_sb(VP8_COMP *cpi, MACROBLOCK *x,
store_coding_context(x, &x->sb_context[0], mode_index, NULL,
&frame_best_ref_mv[xd->mode_info_context->mbmi.ref_frame],
&frame_best_ref_mv[xd->mode_info_context->mbmi.second_ref_frame],
- 0, 0, 0);
+ 0, 0, 0, NULL);
return best_rd;
}
@@ -4898,7 +5185,8 @@ int64_t vp8_rd_pick_inter_mode_sb(VP8_COMP *cpi, MACROBLOCK *x,
&frame_best_ref_mv[xd->mode_info_context->mbmi.second_ref_frame],
(best_single_rd == INT64_MAX) ? INT_MIN : (best_rd - best_single_rd),
(best_comp_rd == INT64_MAX) ? INT_MIN : (best_rd - best_comp_rd),
- (best_hybrid_rd == INT64_MAX) ? INT_MIN : (best_rd - best_hybrid_rd));
+ (best_hybrid_rd == INT64_MAX) ? INT_MIN : (best_rd - best_hybrid_rd),
+ NULL);
return best_rd;
}
diff --git a/vp8/encoder/temporal_filter.c b/vp8/encoder/temporal_filter.c
index d57613bd5..e59aa14fa 100644
--- a/vp8/encoder/temporal_filter.c
+++ b/vp8/encoder/temporal_filter.c
@@ -59,7 +59,7 @@ static void vp8_temporal_filter_predictors_mb_c
xd->subpixel_predict16x16(yptr, stride,
(mv_col & 7) << 1, (mv_row & 7) << 1, &pred[0], 16);
} else {
- RECON_INVOKE(&xd->rtcd->recon, copy16x16)(yptr, stride, &pred[0], 16);
+ vp8_copy_mem16x16(yptr, stride, &pred[0], 16);
}
// U & V
@@ -79,8 +79,8 @@ static void vp8_temporal_filter_predictors_mb_c
(omv_col & 15), (omv_row & 15), &pred[320], 8);
}
else {
- RECON_INVOKE(&xd->rtcd->recon, copy8x8)(uptr, stride, &pred[256], 8);
- RECON_INVOKE(&xd->rtcd->recon, copy8x8)(vptr, stride, &pred[320], 8);
+ vp8_copy_mem8x8(uptr, stride, &pred[256], 8);
+ vp8_copy_mem8x8(vptr, stride, &pred[320], 8);
}
}
void vp8_temporal_filter_apply_c
diff --git a/vp8/encoder/tokenize.c b/vp8/encoder/tokenize.c
index 4f4bd5010..c72c1e7e7 100644
--- a/vp8/encoder/tokenize.c
+++ b/vp8/encoder/tokenize.c
@@ -35,12 +35,10 @@ INT64 context_counters_8x8[BLOCK_TYPES_8X8] [COEF_BANDS] [PREV_COEF_CONTEXTS] [M
INT64 hybrid_context_counters_8x8[BLOCK_TYPES_8X8] [COEF_BANDS] [PREV_COEF_CONTEXTS] [MAX_ENTROPY_TOKENS];
#endif
-#if CONFIG_TX16X16
INT64 context_counters_16x16[BLOCK_TYPES_16X16] [COEF_BANDS] [PREV_COEF_CONTEXTS] [MAX_ENTROPY_TOKENS];
#if CONFIG_HYBRIDTRANSFORM16X16
INT64 hybrid_context_counters_16x16[BLOCK_TYPES_16X16] [COEF_BANDS] [PREV_COEF_CONTEXTS] [MAX_ENTROPY_TOKENS];
#endif
-#endif
extern unsigned int tree_update_hist[BLOCK_TYPES][COEF_BANDS]
[PREV_COEF_CONTEXTS][ENTROPY_NODES][2];
@@ -54,25 +52,15 @@ extern unsigned int tree_update_hist_8x8[BLOCK_TYPES_8X8][COEF_BANDS]
extern unsigned int hybrid_tree_update_hist_8x8[BLOCK_TYPES_8X8][COEF_BANDS]
[PREV_COEF_CONTEXTS][ENTROPY_NODES] [2];
#endif
-#if CONFIG_TX16X16
extern unsigned int tree_update_hist_16x16[BLOCK_TYPES_16X16][COEF_BANDS]
[PREV_COEF_CONTEXTS][ENTROPY_NODES] [2];
#if CONFIG_HYBRIDTRANSFORM16X16
extern unsigned int hybrid_tree_update_hist_16x16[BLOCK_TYPES_16X16][COEF_BANDS]
[PREV_COEF_CONTEXTS][ENTROPY_NODES] [2];
#endif
-#endif
-#endif
-void vp8_stuff_mb(VP8_COMP *cpi,
- MACROBLOCKD *xd, TOKENEXTRA **t, int dry_run);
-void vp8_stuff_mb_8x8(VP8_COMP *cpi,
- MACROBLOCKD *xd, TOKENEXTRA **t, int dry_run);
-void vp8_stuff_mb_8x8_4x4uv(VP8_COMP *cpi,
- MACROBLOCKD *xd, TOKENEXTRA **t, int dry_run);
-#if CONFIG_TX16X16
-void vp8_stuff_mb_16x16(VP8_COMP *cpi, MACROBLOCKD *xd,
- TOKENEXTRA **t, int dry_run);
-#endif
+#endif /* ENTROPY_STATS */
+
+void vp8_stuff_mb(VP8_COMP *cpi, MACROBLOCKD *xd, TOKENEXTRA **t, int dry_run);
void vp8_fix_contexts(MACROBLOCKD *xd);
static TOKENVALUE dct_value_tokens[DCT_MAX_VALUE * 2];
@@ -133,31 +121,25 @@ static void fill_value_tokens() {
vp8_dct_value_cost_ptr = dct_value_cost + DCT_MAX_VALUE;
}
-#if CONFIG_TX16X16
static void tokenize1st_order_b_16x16(MACROBLOCKD *xd,
const BLOCKD *const b,
TOKENEXTRA **tp,
- const int type,
- const FRAME_TYPE frametype,
+ PLANE_TYPE type,
ENTROPY_CONTEXT *a,
ENTROPY_CONTEXT *l,
VP8_COMP *cpi,
int dry_run) {
int pt; /* near block/prev token context index */
- int c = 0; /* start at DC unless type 0 */
+ int c = (type == PLANE_TYPE_Y_NO_DC) ? 1 : 0;
const int eob = b->eob; /* one beyond last nonzero coeff */
TOKENEXTRA *t = *tp; /* store tokens starting here */
- int x;
const short *qcoeff_ptr = b->qcoeff;
#if CONFIG_HYBRIDTRANSFORM16X16
TX_TYPE tx_type = get_tx_type(xd, b);
#endif
-
int seg_eob = 256;
int segment_id = xd->mode_info_context->mbmi.segment_id;
- //if (!dry_run) printf("16: %d\n", tx_type);
-
if (segfeature_active(xd, segment_id, SEG_LVL_EOB))
seg_eob = get_segdata(xd, segment_id, SEG_LVL_EOB);
@@ -165,17 +147,18 @@ static void tokenize1st_order_b_16x16(MACROBLOCKD *xd,
do {
const int band = vp8_coef_bands_16x16[c];
- int v;
+ int x;
- x = DCT_EOB_TOKEN;
if (c < eob) {
- int rc = vp8_default_zig_zag1d_16x16[c];
- v = qcoeff_ptr[rc];
+ const int rc = vp8_default_zig_zag1d_16x16[c];
+ const int v = qcoeff_ptr[rc];
assert(-DCT_MAX_VALUE <= v && v < (DCT_MAX_VALUE));
t->Extra = vp8_dct_value_tokens_ptr[v].Extra;
x = vp8_dct_value_tokens_ptr[v].Token;
+ } else {
+ x = DCT_EOB_TOKEN;
}
t->Token = x;
@@ -186,7 +169,8 @@ static void tokenize1st_order_b_16x16(MACROBLOCKD *xd,
#endif
t->context_tree = cpi->common.fc.coef_probs_16x16[type][band][pt];
- t->skip_eob_node = pt == 0 && ((band > 0 && type > 0) || (band > 1 && type == 0));
+ t->skip_eob_node = pt == 0 && ((band > 0 && type != PLANE_TYPE_Y_NO_DC) ||
+ (band > 1 && type == PLANE_TYPE_Y_NO_DC));
if (!dry_run) {
#if CONFIG_HYBRIDTRANSFORM16X16
if (tx_type != DCT_DCT)
@@ -195,32 +179,27 @@ static void tokenize1st_order_b_16x16(MACROBLOCKD *xd,
#endif
++cpi->coef_counts_16x16[type][band][pt][x];
}
- } while (pt = vp8_prev_token_class[x], ++t, c < eob && ++c < seg_eob);
+ pt = vp8_prev_token_class[x];
+ ++t;
+ } while (c < eob && ++c < seg_eob);
*tp = t;
pt = (c != !type); /* 0 <-> all coeff data is zero */
*a = *l = pt;
}
-#endif
-static void tokenize2nd_order_b_8x8
-(
- MACROBLOCKD *xd,
- const BLOCKD *const b,
- TOKENEXTRA **tp,
- const int type, /* which plane: 0=Y no DC, 1=Y2, 2=UV, 3=Y with DC */
- const FRAME_TYPE frametype,
- ENTROPY_CONTEXT *a,
- ENTROPY_CONTEXT *l,
- VP8_COMP *cpi,
- int dry_run) {
+static void tokenize2nd_order_b_8x8(MACROBLOCKD *xd,
+ const BLOCKD *const b,
+ TOKENEXTRA **tp,
+ ENTROPY_CONTEXT *a,
+ ENTROPY_CONTEXT *l,
+ VP8_COMP *cpi,
+ int dry_run) {
int pt; /* near block/prev token context index */
int c = 0; /* start at DC */
const int eob = b->eob; /* one beyond last nonzero coeff */
TOKENEXTRA *t = *tp; /* store tokens starting here */
- int x;
const short *qcoeff_ptr = b->qcoeff;
-
int seg_eob = 4;
int segment_id = xd->mode_info_context->mbmi.segment_id;
@@ -234,125 +213,107 @@ static void tokenize2nd_order_b_8x8
do {
const int band = vp8_coef_bands[c];
- int v = 0;
+ int x;
if (c < eob) {
- int rc = vp8_default_zig_zag1d[c];
- v = qcoeff_ptr[rc];
+ const int rc = vp8_default_zig_zag1d[c];
+ const int v = qcoeff_ptr[rc];
assert(-DCT_MAX_VALUE <= v && v < (DCT_MAX_VALUE));
t->Extra = vp8_dct_value_tokens_ptr[v].Extra;
x = vp8_dct_value_tokens_ptr[v].Token;
- } else
+ } else {
x = DCT_EOB_TOKEN;
+ }
t->Token = x;
- // printf("Token : %d\n", x);
- t->context_tree = cpi->common.fc.coef_probs_8x8 [type] [band] [pt];
+ t->context_tree = cpi->common.fc.coef_probs_8x8[PLANE_TYPE_Y2][band][pt];
- t->skip_eob_node = pt == 0 && ((band > 0 && type > 0) || (band > 1 && type == 0));
+ t->skip_eob_node = ((pt == 0) && (band > 0));
assert(vp8_coef_encodings[t->Token].Len - t->skip_eob_node > 0);
if (!dry_run)
- ++cpi->coef_counts_8x8 [type] [band] [pt] [x];
- } while (pt = vp8_prev_token_class[x], ++t, c < eob && ++c < seg_eob);
-
+ ++cpi->coef_counts_8x8[PLANE_TYPE_Y2][band][pt][x];
+ pt = vp8_prev_token_class[x];
+ ++t;
+ } while (c < eob && ++c < seg_eob);
*tp = t;
- pt = (c != !type); /* 0 <-> all coeff data is zero */
+ pt = (c != 0); /* 0 <-> all coeff data is zero */
*a = *l = pt;
-
}
-static void tokenize2nd_order_b(MACROBLOCKD *xd,
- TOKENEXTRA **tp,
- VP8_COMP *cpi,
- int dry_run) {
+static void tokenize2nd_order_b_4x4(MACROBLOCKD *xd,
+ TOKENEXTRA **tp,
+ VP8_COMP *cpi,
+ int dry_run) {
int pt; /* near block/prev token context index */
- int c; /* start at DC */
+ int c = 0; /* start at DC */
TOKENEXTRA *t = *tp;/* store tokens starting here */
- const BLOCKD *b;
- const short *qcoeff_ptr;
+ const BLOCKD *b = xd->block + 24;
+ const short *qcoeff_ptr = b->qcoeff;
ENTROPY_CONTEXT *a;
ENTROPY_CONTEXT *l;
- int band, rc, v, token;
-
+ const int eob = b->eob;
int seg_eob = 16;
int segment_id = xd->mode_info_context->mbmi.segment_id;
if (segfeature_active(xd, segment_id, SEG_LVL_EOB))
seg_eob = get_segdata(xd, segment_id, SEG_LVL_EOB);
- b = xd->block + 24;
- qcoeff_ptr = b->qcoeff;
a = (ENTROPY_CONTEXT *)xd->above_context + 8;
l = (ENTROPY_CONTEXT *)xd->left_context + 8;
VP8_COMBINEENTROPYCONTEXTS(pt, *a, *l);
- for (c = 0; c < b->eob; c++) {
- rc = vp8_default_zig_zag1d[c];
- band = vp8_coef_bands[c];
- v = qcoeff_ptr[rc];
+ do {
+ const int band = vp8_coef_bands[c];
+ int token;
- t->Extra = vp8_dct_value_tokens_ptr[v].Extra;
- token = vp8_dct_value_tokens_ptr[v].Token;
+ if (c < eob) {
+ const int rc = vp8_default_zig_zag1d[c];
+ const int v = qcoeff_ptr[rc];
+
+ t->Extra = vp8_dct_value_tokens_ptr[v].Extra;
+ token = vp8_dct_value_tokens_ptr[v].Token;
+ } else
+ token = DCT_EOB_TOKEN;
t->Token = token;
- t->context_tree = cpi->common.fc.coef_probs [1] [band] [pt];
+ t->context_tree = cpi->common.fc.coef_probs[PLANE_TYPE_Y2][band][pt];
t->skip_eob_node = ((pt == 0) && (band > 0));
assert(vp8_coef_encodings[t->Token].Len - t->skip_eob_node > 0);
if (!dry_run)
- ++cpi->coef_counts [1] [band] [pt] [token];
-
+ ++cpi->coef_counts[PLANE_TYPE_Y2][band][pt][token];
pt = vp8_prev_token_class[token];
- t++;
- }
-
- if (c < seg_eob) {
- band = vp8_coef_bands[c];
- t->Token = DCT_EOB_TOKEN;
- t->context_tree = cpi->common.fc.coef_probs [1] [band] [pt];
-
- t->skip_eob_node = ((pt == 0) && (band > 0));
- assert(vp8_coef_encodings[t->Token].Len - t->skip_eob_node > 0);
-
- if (!dry_run)
- ++cpi->coef_counts [1] [band] [pt] [DCT_EOB_TOKEN];
-
- t++;
- }
-
+ ++t;
+ } while (c < eob && ++c < seg_eob);
*tp = t;
pt = (c != 0); /* 0 <-> all coeff data is zero */
*a = *l = pt;
-
}
-static void tokenize1st_order_b_8x8
-(
- MACROBLOCKD *xd,
- const BLOCKD *const b,
- TOKENEXTRA **tp,
- const int type, /* which plane: 0=Y no DC, 1=Y2, 2=UV, 3=Y with DC */
- const FRAME_TYPE frametype,
- ENTROPY_CONTEXT *a,
- ENTROPY_CONTEXT *l,
- VP8_COMP *cpi,
- int dry_run) {
+static void tokenize1st_order_b_8x8(MACROBLOCKD *xd,
+ const BLOCKD *const b,
+ TOKENEXTRA **tp,
+ PLANE_TYPE type,
+ ENTROPY_CONTEXT *a,
+ ENTROPY_CONTEXT *l,
+ VP8_COMP *cpi,
+ int dry_run) {
int pt; /* near block/prev token context index */
- int c = type ? 0 : 1; /* start at DC unless type 0 */
+ int c = (type == PLANE_TYPE_Y_NO_DC) ? 1 : 0; /* start at DC unless type 0 */
TOKENEXTRA *t = *tp; /* store tokens starting here */
const short *qcoeff_ptr = b->qcoeff;
#if CONFIG_HYBRIDTRANSFORM8X8
TX_TYPE tx_type = xd->mode_info_context->mbmi.mode == I8X8_PRED ?
get_tx_type(xd, b) : DCT_DCT;
#endif
-
+ const int eob = b->eob;
int seg_eob = 64;
int segment_id = xd->mode_info_context->mbmi.segment_id;
@@ -361,16 +322,20 @@ static void tokenize1st_order_b_8x8
VP8_COMBINEENTROPYCONTEXTS(pt, *a, *l);
- //if (!dry_run) printf("8: %d\n", tx_type);
- for (; c < b->eob; ++c) {
+ do {
const int band = vp8_coef_bands_8x8[c];
- int rc = vp8_default_zig_zag1d_8x8[c];
- int v = qcoeff_ptr[rc], x;
+ int x;
+
+ if (c < eob) {
+ const int rc = vp8_default_zig_zag1d_8x8[c];
+ const int v = qcoeff_ptr[rc];
- assert(-DCT_MAX_VALUE <= v && v < (DCT_MAX_VALUE));
+ assert(-DCT_MAX_VALUE <= v && v < (DCT_MAX_VALUE));
- t->Extra = vp8_dct_value_tokens_ptr[v].Extra;
- x = vp8_dct_value_tokens_ptr[v].Token;
+ t->Extra = vp8_dct_value_tokens_ptr[v].Extra;
+ x = vp8_dct_value_tokens_ptr[v].Token;
+ } else
+ x = DCT_EOB_TOKEN;
t->Token = x;
#if CONFIG_HYBRIDTRANSFORM8X8
@@ -380,7 +345,8 @@ static void tokenize1st_order_b_8x8
#endif
t->context_tree = cpi->common.fc.coef_probs_8x8[type][band][pt];
- t->skip_eob_node = pt == 0 && ((band > 0 && type > 0) || (band > 1 && type == 0));
+ t->skip_eob_node = pt == 0 && ((band > 0 && type != PLANE_TYPE_Y_NO_DC) ||
+ (band > 1 && type == PLANE_TYPE_Y_NO_DC));
assert(vp8_coef_encodings[t->Token].Len - t->skip_eob_node > 0);
if (!dry_run) {
@@ -391,65 +357,89 @@ static void tokenize1st_order_b_8x8
#endif
++cpi->coef_counts_8x8[type][band][pt][x];
}
-
pt = vp8_prev_token_class[x];
++t;
+ } while (c < eob && ++c < seg_eob);
+
+ *tp = t;
+ pt = (c != !type); /* 0 <-> all coeff data is zero */
+ *a = *l = pt;
+}
+
+static void tokenize1st_order_chroma_4x4(MACROBLOCKD *xd,
+ TOKENEXTRA **tp,
+ VP8_COMP *cpi,
+ int dry_run) {
+ unsigned int block;
+ const BLOCKD *b = xd->block + 16;
+ int pt; /* near block/prev token context index */
+ TOKENEXTRA *t = *tp;/* store tokens starting here */
+ ENTROPY_CONTEXT *a;
+ ENTROPY_CONTEXT *l;
+ int seg_eob = 16;
+ int segment_id = xd->mode_info_context->mbmi.segment_id;
+
+ if (segfeature_active(xd, segment_id, SEG_LVL_EOB)) {
+ seg_eob = get_segdata(xd, segment_id, SEG_LVL_EOB);
}
- if (c < seg_eob) {
- const int band = vp8_coef_bands_8x8[c];
- t->Token = DCT_EOB_TOKEN;
+ /* Chroma */
+ for (block = 16; block < 24; block++, b++) {
+ const int eob = b->eob;
+ const int tmp1 = vp8_block2above[block];
+ const int tmp2 = vp8_block2left[block];
+ const int16_t *qcoeff_ptr = b->qcoeff;
+ int c = 0;
-#if CONFIG_HYBRIDTRANSFORM8X8
- if (tx_type != DCT_DCT)
- t->context_tree = cpi->common.fc.hybrid_coef_probs_8x8 [type] [band] [pt];
- else
-#endif
- t->context_tree = cpi->common.fc.coef_probs_8x8 [type] [band] [pt];
+ a = (ENTROPY_CONTEXT *)xd->above_context + tmp1;
+ l = (ENTROPY_CONTEXT *)xd->left_context + tmp2;
- t->skip_eob_node = pt == 0 && ((band > 0 && type > 0) || (band > 1 && type == 0));
- if (vp8_coef_encodings[t->Token].Len - t->skip_eob_node <= 0) {
- printf("type %d, seg-eob %d, eob %d, pt %d, c %d band %d\n", type, seg_eob, b->eob, pt, c, band);
- fflush(stdout);
- }
- assert(vp8_coef_encodings[t->Token].Len - t->skip_eob_node > 0);
+ VP8_COMBINEENTROPYCONTEXTS(pt, *a, *l);
- if (!dry_run) {
-#if CONFIG_HYBRIDTRANSFORM8X8
- if (tx_type != DCT_DCT)
- ++cpi->hybrid_coef_counts_8x8[type][band][pt][DCT_EOB_TOKEN];
- else
-#endif
- ++cpi->coef_counts_8x8[type][band][pt][DCT_EOB_TOKEN];
- }
- ++t;
- }
+ do {
+ const int band = vp8_coef_bands[c];
+ int token;
- *tp = t;
- pt = (c != !type); /* 0 <-> all coeff data is zero */
- *a = *l = pt;
+ if (c < eob) {
+ const int rc = vp8_default_zig_zag1d[c];
+ const int v = qcoeff_ptr[rc];
+
+ t->Extra = vp8_dct_value_tokens_ptr[v].Extra;
+ token = vp8_dct_value_tokens_ptr[v].Token;
+ } else
+ token = DCT_EOB_TOKEN;
+
+ t->Token = token;
+ t->context_tree = cpi->common.fc.coef_probs[PLANE_TYPE_UV][band][pt];
+
+ t->skip_eob_node = ((pt == 0) && (band > 0));
+ assert(vp8_coef_encodings[t->Token].Len - t->skip_eob_node > 0);
+
+ if (!dry_run)
+ ++cpi->coef_counts[PLANE_TYPE_UV][band][pt][token];
+ pt = vp8_prev_token_class[token];
+ ++t;
+ } while (c < eob && ++c < seg_eob);
+
+ *tp = t;
+ pt = (c != 0); /* 0 <-> all coeff data is zero */
+ *a = *l = pt;
+ }
}
#if CONFIG_HYBRIDTRANSFORM
-static void tokenize1st_order_ht( MACROBLOCKD *xd,
- TOKENEXTRA **tp,
- int type,
- VP8_COMP *cpi,
- int dry_run) {
+static void tokenize1st_order_ht_4x4(MACROBLOCKD *xd,
+ TOKENEXTRA **tp,
+ PLANE_TYPE type,
+ VP8_COMP *cpi,
+ int dry_run) {
unsigned int block;
- const BLOCKD *b;
+ const BLOCKD *b = xd->block;
int pt; /* near block/prev token context index */
- int c;
- int token;
TOKENEXTRA *t = *tp;/* store tokens starting here */
- const short *qcoeff_ptr;
ENTROPY_CONTEXT * a;
ENTROPY_CONTEXT * l;
- int band, rc, v;
- int tmp1, tmp2;
-
int const *pt_scan ;
-
int seg_eob = 16;
int segment_id = xd->mode_info_context->mbmi.segment_id;
@@ -457,33 +447,33 @@ static void tokenize1st_order_ht( MACROBLOCKD *xd,
seg_eob = get_segdata( xd, segment_id, SEG_LVL_EOB );
}
- b = xd->block;
-
/* Luma */
for (block = 0; block < 16; block++, b++) {
- B_PREDICTION_MODE b_mode;
+ const int eob = b->eob;
TX_TYPE tx_type = DCT_DCT;
+ const int tmp1 = vp8_block2above[block];
+ const int tmp2 = vp8_block2left[block];
+ const int16_t *qcoeff_ptr = b->qcoeff;
+ int c = (type == PLANE_TYPE_Y_NO_DC) ? 1 : 0;
+
+ a = (ENTROPY_CONTEXT *)xd->above_context + tmp1;
+ l = (ENTROPY_CONTEXT *)xd->left_context + tmp2;
+ VP8_COMBINEENTROPYCONTEXTS(pt, *a, *l);
if( xd->mode_info_context->mbmi.mode == B_PRED ) {
- b_mode = b->bmi.as_mode.first;
tx_type = get_tx_type(xd, b);
}
// assign scanning order for luma components coded in intra4x4 mode
- if( (xd->mode_info_context->mbmi.mode == B_PRED) &&
- (type == PLANE_TYPE_Y_WITH_DC) ) {
- switch(b_mode) {
- case B_VE_PRED :
- case B_VR_PRED :
+ if ((xd->mode_info_context->mbmi.mode == B_PRED) &&
+ (type == PLANE_TYPE_Y_WITH_DC)) {
+ switch (tx_type) {
+ case ADST_DCT:
pt_scan = vp8_row_scan;
break;
-
- case B_HE_PRED :
- case B_HD_PRED :
- case B_HU_PRED :
+ case DCT_ADST:
pt_scan = vp8_col_scan;
break;
-
default :
pt_scan = vp8_default_zig_zag1d;
break;
@@ -492,218 +482,58 @@ static void tokenize1st_order_ht( MACROBLOCKD *xd,
pt_scan = vp8_default_zig_zag1d;
}
- tmp1 = vp8_block2above[block];
- tmp2 = vp8_block2left[block];
- qcoeff_ptr = b->qcoeff;
- a = (ENTROPY_CONTEXT *)xd->above_context + tmp1;
- l = (ENTROPY_CONTEXT *)xd->left_context + tmp2;
- VP8_COMBINEENTROPYCONTEXTS(pt, *a, *l);
-
- c = type ? 0 : 1;
+ do {
+ const int band = vp8_coef_bands[c];
+ int token;
- for (; c < b->eob; c++) {
- rc = pt_scan[c];
- band = vp8_coef_bands[c];
- v = qcoeff_ptr[rc];
+ if (c < eob) {
+ const int rc = pt_scan[c];
+ const int v = qcoeff_ptr[rc];
- t->Extra = vp8_dct_value_tokens_ptr[v].Extra;
- token = vp8_dct_value_tokens_ptr[v].Token;
+ t->Extra = vp8_dct_value_tokens_ptr[v].Extra;
+ token = vp8_dct_value_tokens_ptr[v].Token;
+ } else
+ token = DCT_EOB_TOKEN;
t->Token = token;
if (tx_type != DCT_DCT)
- t->context_tree = cpi->common.fc.hybrid_coef_probs [type] [band] [pt];
+ t->context_tree = cpi->common.fc.hybrid_coef_probs[type][band][pt];
else
- t->context_tree = cpi->common.fc.coef_probs [type] [band] [pt];
+ t->context_tree = cpi->common.fc.coef_probs[type][band][pt];
- t->skip_eob_node = pt == 0 &&
- ((band > 0 && type > 0) || (band > 1 && type == 0));
+ t->skip_eob_node = pt == 0 && ((band > 0 && type != PLANE_TYPE_Y_NO_DC) ||
+ (band > 1 && type == PLANE_TYPE_Y_NO_DC));
assert(vp8_coef_encodings[t->Token].Len - t->skip_eob_node > 0);
if (!dry_run) {
if (tx_type != DCT_DCT)
- ++cpi->hybrid_coef_counts[type] [band] [pt] [token];
+ ++cpi->hybrid_coef_counts[type][band][pt][token];
else
- ++cpi->coef_counts [type] [band] [pt] [token];
+ ++cpi->coef_counts [type][band][pt][token];
}
-
pt = vp8_prev_token_class[token];
- t++;
- }
-
- if (c < seg_eob) {
- band = vp8_coef_bands[c];
- t->Token = DCT_EOB_TOKEN;
- if (tx_type != DCT_DCT)
- t->context_tree = cpi->common.fc.hybrid_coef_probs [type] [band] [pt];
- else
- t->context_tree = cpi->common.fc.coef_probs [type] [band] [pt];
-
- t->skip_eob_node = pt == 0 &&
- ((band > 0 && type > 0) || (band > 1 && type == 0));
- assert(vp8_coef_encodings[t->Token].Len - t->skip_eob_node > 0);
- if (!dry_run) {
- if (tx_type != DCT_DCT)
- ++cpi->hybrid_coef_counts[type] [band] [pt] [DCT_EOB_TOKEN];
- else
- ++cpi->coef_counts [type] [band] [pt] [DCT_EOB_TOKEN];
- }
-
- t++;
- }
+ ++t;
+ } while (c < eob && ++c < seg_eob);
*tp = t;
pt = (c != !type); /* 0 <-> all coeff data is zero */
*a = *l = pt;
}
- // reset scanning order for chroma components
- pt_scan = vp8_default_zig_zag1d ;
-
- /* Chroma */
- for (block = 16; block < 24; block++, b++) {
- tmp1 = vp8_block2above[block];
- tmp2 = vp8_block2left[block];
- qcoeff_ptr = b->qcoeff;
- a = (ENTROPY_CONTEXT *)xd->above_context + tmp1;
- l = (ENTROPY_CONTEXT *)xd->left_context + tmp2;
-
- VP8_COMBINEENTROPYCONTEXTS(pt, *a, *l);
-
- for (c = 0; c < b->eob; c++) {
- rc = pt_scan[c];
- band = vp8_coef_bands[c];
- v = qcoeff_ptr[rc];
-
- t->Extra = vp8_dct_value_tokens_ptr[v].Extra;
- token = vp8_dct_value_tokens_ptr[v].Token;
-
- t->Token = token;
- t->context_tree = cpi->common.fc.coef_probs [2] [band] [pt];
-
- t->skip_eob_node = ((pt == 0) && (band > 0));
- assert(vp8_coef_encodings[t->Token].Len - t->skip_eob_node > 0);
-
- if (!dry_run)
- ++cpi->coef_counts [2] [band] [pt] [token];
-
- pt = vp8_prev_token_class[token];
- t++;
- }
-
- if (c < seg_eob) {
- band = vp8_coef_bands[c];
- t->Token = DCT_EOB_TOKEN;
- t->context_tree = cpi->common.fc.coef_probs [2] [band] [pt];
-
- t->skip_eob_node = ((pt == 0) && (band > 0));
- assert(vp8_coef_encodings[t->Token].Len - t->skip_eob_node > 0);
- if (!dry_run)
- ++cpi->coef_counts [2] [band] [pt] [DCT_EOB_TOKEN];
- t++;
- }
-
- *tp = t;
- pt = (c != 0); /* 0 <-> all coeff data is zero */
- *a = *l = pt;
- }
+ tokenize1st_order_chroma_4x4(xd, tp, cpi, dry_run);
}
#endif
-
-static void tokenize1st_order_chroma
-(
- MACROBLOCKD *xd,
- TOKENEXTRA **tp,
- int type, /* which plane: 0=Y no DC, 1=Y2, 2=UV, 3=Y with DC */
- VP8_COMP *cpi,
- int dry_run) {
- unsigned int block;
- const BLOCKD *b;
- int pt; /* near block/prev token context index */
- int c;
- int token;
- TOKENEXTRA *t = *tp;/* store tokens starting here */
- const short *qcoeff_ptr;
- ENTROPY_CONTEXT *a;
- ENTROPY_CONTEXT *l;
- int band, rc, v;
- int tmp1, tmp2;
-
- int seg_eob = 16;
- int segment_id = xd->mode_info_context->mbmi.segment_id;
-
- if (segfeature_active(xd, segment_id, SEG_LVL_EOB)) {
- seg_eob = get_segdata(xd, segment_id, SEG_LVL_EOB);
- }
-
- b = xd->block;
- b += 16;
-
- /* Chroma */
- for (block = 16; block < 24; block++, b++) {
- tmp1 = vp8_block2above[block];
- tmp2 = vp8_block2left[block];
- qcoeff_ptr = b->qcoeff;
- a = (ENTROPY_CONTEXT *)xd->above_context + tmp1;
- l = (ENTROPY_CONTEXT *)xd->left_context + tmp2;
-
- VP8_COMBINEENTROPYCONTEXTS(pt, *a, *l);
-
- for (c = 0; c < b->eob; c++) {
- rc = vp8_default_zig_zag1d[c];
- band = vp8_coef_bands[c];
- v = qcoeff_ptr[rc];
-
- t->Extra = vp8_dct_value_tokens_ptr[v].Extra;
- token = vp8_dct_value_tokens_ptr[v].Token;
-
- t->Token = token;
- t->context_tree = cpi->common.fc.coef_probs [2] [band] [pt];
-
- t->skip_eob_node = ((pt == 0) && (band > 0));
- assert(vp8_coef_encodings[t->Token].Len - t->skip_eob_node > 0);
-
- if (!dry_run)
- ++cpi->coef_counts [2] [band] [pt] [token];
-
- pt = vp8_prev_token_class[token];
- t++;
- }
-
- if (c < seg_eob) {
- band = vp8_coef_bands[c];
- t->Token = DCT_EOB_TOKEN;
- t->context_tree = cpi->common.fc.coef_probs [2] [band] [pt];
-
- t->skip_eob_node = ((pt == 0) && (band > 0));
- assert(vp8_coef_encodings[t->Token].Len - t->skip_eob_node > 0);
-
- if (!dry_run)
- ++cpi->coef_counts [2] [band] [pt] [DCT_EOB_TOKEN];
-
- t++;
- }
- *tp = t;
- pt = (c != 0); /* 0 <-> all coeff data is zero */
- *a = *l = pt;
- }
-}
-
-static void tokenize1st_order_b
-(
- MACROBLOCKD *xd,
- TOKENEXTRA **tp,
- int type, /* which plane: 0=Y no DC, 1=Y2, 2=UV, 3=Y with DC */
- VP8_COMP *cpi,
- int dry_run) {
+static void tokenize1st_order_b_4x4(MACROBLOCKD *xd,
+ TOKENEXTRA **tp,
+ PLANE_TYPE type,
+ VP8_COMP *cpi,
+ int dry_run) {
unsigned int block;
- const BLOCKD *b;
+ const BLOCKD *b = xd->block;
int pt; /* near block/prev token context index */
- int band, rc, v, c, token;
TOKENEXTRA *t = *tp;/* store tokens starting here */
- const short *qcoeff_ptr;
ENTROPY_CONTEXT *a, *l;
-
int seg_eob = 16;
int segment_id = xd->mode_info_context->mbmi.segment_id;
@@ -711,108 +541,52 @@ static void tokenize1st_order_b
seg_eob = get_segdata(xd, segment_id, SEG_LVL_EOB);
}
- b = xd->block;
/* Luma */
for (block = 0; block < 16; block++, b++) {
- qcoeff_ptr = b->qcoeff;
+ const int eob = b->eob;
+ const int16_t *qcoeff_ptr = b->qcoeff;
+ int c = (type == PLANE_TYPE_Y_NO_DC) ? 1 : 0;
+
a = (ENTROPY_CONTEXT *)xd->above_context + vp8_block2above[block];
l = (ENTROPY_CONTEXT *)xd->left_context + vp8_block2left[block];
VP8_COMBINEENTROPYCONTEXTS(pt, *a, *l);
- c = type ? 0 : 1;
-
assert(b->eob <= 16);
- for (; c < b->eob; c++) {
- rc = vp8_default_zig_zag1d[c];
- band = vp8_coef_bands[c];
- v = qcoeff_ptr[rc];
+ do {
+ const int band = vp8_coef_bands[c];
+ int token;
+
+ if (c < eob) {
+ const int rc = vp8_default_zig_zag1d[c];
+ const int v = qcoeff_ptr[rc];
- t->Extra = vp8_dct_value_tokens_ptr[v].Extra;
- token = vp8_dct_value_tokens_ptr[v].Token;
+ t->Extra = vp8_dct_value_tokens_ptr[v].Extra;
+ token = vp8_dct_value_tokens_ptr[v].Token;
+ } else
+ token = DCT_EOB_TOKEN;
t->Token = token;
- t->context_tree = cpi->common.fc.coef_probs [type] [band] [pt];
+ t->context_tree = cpi->common.fc.coef_probs[type][band][pt];
- t->skip_eob_node = pt == 0 &&
- ((band > 0 && type > 0) || (band > 1 && type == 0));
+ t->skip_eob_node = pt == 0 && ((band > 0 && type != PLANE_TYPE_Y_NO_DC) ||
+ (band > 1 && type == PLANE_TYPE_Y_NO_DC));
assert(vp8_coef_encodings[t->Token].Len - t->skip_eob_node > 0);
if (!dry_run)
- ++cpi->coef_counts [type] [band] [pt] [token];
-
+ ++cpi->coef_counts[type][band][pt][token];
pt = vp8_prev_token_class[token];
- t++;
- }
+ ++t;
+ } while (c < eob && ++c < seg_eob);
- if (c < seg_eob) {
- band = vp8_coef_bands[c];
- t->Token = DCT_EOB_TOKEN;
- t->context_tree = cpi->common.fc.coef_probs [type] [band] [pt];
-
- t->skip_eob_node = pt == 0 &&
- ((band > 0 && type > 0) || (band > 1 && type == 0));
- assert(vp8_coef_encodings[t->Token].Len - t->skip_eob_node > 0);
- if (!dry_run)
- ++cpi->coef_counts [type] [band] [pt] [DCT_EOB_TOKEN];
-
- t++;
- }
*tp = t;
pt = (c != !type); /* 0 <-> all coeff data is zero */
*a = *l = pt;
-
}
- /* Chroma */
- for (block = 16; block < 24; block++, b++) {
- qcoeff_ptr = b->qcoeff;
- a = (ENTROPY_CONTEXT *)xd->above_context + vp8_block2above[block];
- l = (ENTROPY_CONTEXT *)xd->left_context + vp8_block2left[block];
-
- VP8_COMBINEENTROPYCONTEXTS(pt, *a, *l);
- assert(b->eob <= 16);
- for (c = 0; c < b->eob; c++) {
- rc = vp8_default_zig_zag1d[c];
- band = vp8_coef_bands[c];
- v = qcoeff_ptr[rc];
-
- t->Extra = vp8_dct_value_tokens_ptr[v].Extra;
- token = vp8_dct_value_tokens_ptr[v].Token;
-
- t->Token = token;
- t->context_tree = cpi->common.fc.coef_probs [2] [band] [pt];
-
- t->skip_eob_node = ((pt == 0) && (band > 0));
- assert(vp8_coef_encodings[t->Token].Len - t->skip_eob_node > 0);
-
- if (!dry_run)
- ++cpi->coef_counts [2] [band] [pt] [token];
-
- pt = vp8_prev_token_class[token];
- t++;
- }
-
- if (c < seg_eob) {
- band = vp8_coef_bands[c];
- t->Token = DCT_EOB_TOKEN;
- t->context_tree = cpi->common.fc.coef_probs [2] [band] [pt];
-
- t->skip_eob_node = ((pt == 0) && (band > 0));
- assert(vp8_coef_encodings[t->Token].Len - t->skip_eob_node > 0);
-
- if (!dry_run)
- ++cpi->coef_counts [2] [band] [pt] [DCT_EOB_TOKEN];
-
- t++;
- }
- *tp = t;
- pt = (c != 0); /* 0 <-> all coeff data is zero */
- *a = *l = pt;
- }
+ tokenize1st_order_chroma_4x4(xd, tp, cpi, dry_run);
}
-
-int mby_is_skippable(MACROBLOCKD *xd, int has_y2_block) {
+int mby_is_skippable_4x4(MACROBLOCKD *xd, int has_y2_block) {
int skip = 1;
int i = 0;
@@ -827,7 +601,7 @@ int mby_is_skippable(MACROBLOCKD *xd, int has_y2_block) {
return skip;
}
-int mbuv_is_skippable(MACROBLOCKD *xd) {
+int mbuv_is_skippable_4x4(MACROBLOCKD *xd) {
int skip = 1;
int i;
@@ -836,9 +610,9 @@ int mbuv_is_skippable(MACROBLOCKD *xd) {
return skip;
}
-int mb_is_skippable(MACROBLOCKD *xd, int has_y2_block) {
- return (mby_is_skippable(xd, has_y2_block) &
- mbuv_is_skippable(xd));
+int mb_is_skippable_4x4(MACROBLOCKD *xd, int has_y2_block) {
+ return (mby_is_skippable_4x4(xd, has_y2_block) &
+ mbuv_is_skippable_4x4(xd));
}
int mby_is_skippable_8x8(MACROBLOCKD *xd, int has_y2_block) {
@@ -867,10 +641,9 @@ int mb_is_skippable_8x8(MACROBLOCKD *xd, int has_y2_block) {
int mb_is_skippable_8x8_4x4uv(MACROBLOCKD *xd, int has_y2_block) {
return (mby_is_skippable_8x8(xd, has_y2_block) &
- mbuv_is_skippable(xd));
+ mbuv_is_skippable_4x4(xd));
}
-#if CONFIG_TX16X16 || CONFIG_HYBRIDTRANSFORM16X16
int mby_is_skippable_16x16(MACROBLOCKD *xd) {
int skip = 1;
//skip &= (xd->block[0].eob < 2); // I think this should be commented? No second order == DC must be coded
@@ -883,13 +656,12 @@ int mby_is_skippable_16x16(MACROBLOCKD *xd) {
int mb_is_skippable_16x16(MACROBLOCKD *xd) {
return (mby_is_skippable_16x16(xd) & mbuv_is_skippable_8x8(xd));
}
-#endif
void vp8_tokenize_mb(VP8_COMP *cpi,
MACROBLOCKD *xd,
TOKENEXTRA **t,
int dry_run) {
- int plane_type;
+ PLANE_TYPE plane_type;
int has_y2_block;
int b;
int tx_size = xd->mode_info_context->mbmi.txfm_size;
@@ -914,19 +686,15 @@ void vp8_tokenize_mb(VP8_COMP *cpi,
} else
skip_inc = 0;
- has_y2_block = (xd->mode_info_context->mbmi.mode != B_PRED
+ has_y2_block = (tx_size != TX_16X16
+ && xd->mode_info_context->mbmi.mode != B_PRED
&& xd->mode_info_context->mbmi.mode != I8X8_PRED
&& xd->mode_info_context->mbmi.mode != SPLITMV);
-#if CONFIG_TX16X16 || CONFIG_HYBRIDTRANSFORM16X16
- if (tx_size == TX_16X16) has_y2_block = 0; // Because of inter frames
-#endif
switch (tx_size) {
-#if CONFIG_TX16X16 || CONFIG_HYBRIDTRANSFORM16X16
case TX_16X16:
xd->mode_info_context->mbmi.mb_skip_coeff = mb_is_skippable_16x16(xd);
break;
-#endif
case TX_8X8:
if (xd->mode_info_context->mbmi.mode == I8X8_PRED)
xd->mode_info_context->mbmi.mb_skip_coeff = mb_is_skippable_8x8_4x4uv(xd, 0);
@@ -935,7 +703,7 @@ void vp8_tokenize_mb(VP8_COMP *cpi,
break;
default:
- xd->mode_info_context->mbmi.mb_skip_coeff = mb_is_skippable(xd, has_y2_block);
+ xd->mode_info_context->mbmi.mb_skip_coeff = mb_is_skippable_4x4(xd, has_y2_block);
break;
}
@@ -943,18 +711,7 @@ void vp8_tokenize_mb(VP8_COMP *cpi,
if (!dry_run)
cpi->skip_true_count[mb_skip_context] += skip_inc;
if (!cpi->common.mb_no_coeff_skip) {
-#if CONFIG_TX16X16 && CONFIG_HYBRIDTRANSFORM16X16
- if (tx_size == TX_16X16)
- vp8_stuff_mb_16x16(cpi, xd, t, dry_run);
- else
-#endif
- if (tx_size == TX_8X8) {
- if (xd->mode_info_context->mbmi.mode == I8X8_PRED)
- vp8_stuff_mb_8x8_4x4uv(cpi, xd, t, dry_run);
- else
- vp8_stuff_mb_8x8(cpi, xd, t, dry_run);
- } else
- vp8_stuff_mb(cpi, xd, t, dry_run);
+ vp8_stuff_mb(cpi, xd, t, dry_run);
} else {
vp8_fix_contexts(xd);
}
@@ -966,55 +723,49 @@ void vp8_tokenize_mb(VP8_COMP *cpi,
if (!dry_run)
cpi->skip_false_count[mb_skip_context] += skip_inc;
- plane_type = 3;
if (has_y2_block) {
if (tx_size == TX_8X8) {
ENTROPY_CONTEXT *A = (ENTROPY_CONTEXT *)xd->above_context;
ENTROPY_CONTEXT *L = (ENTROPY_CONTEXT *)xd->left_context;
tokenize2nd_order_b_8x8(xd,
- xd->block + 24, t, 1, xd->frame_type,
+ xd->block + 24, t,
A + vp8_block2above_8x8[24],
L + vp8_block2left_8x8[24],
cpi, dry_run);
} else
- tokenize2nd_order_b(xd, t, cpi, dry_run);
+ tokenize2nd_order_b_4x4(xd, t, cpi, dry_run);
- plane_type = 0;
- }
+ plane_type = PLANE_TYPE_Y_NO_DC;
+ } else
+ plane_type = PLANE_TYPE_Y_WITH_DC;
-#if CONFIG_TX16X16
if (tx_size == TX_16X16) {
ENTROPY_CONTEXT * A = (ENTROPY_CONTEXT *)xd->above_context;
ENTROPY_CONTEXT * L = (ENTROPY_CONTEXT *)xd->left_context;
- tokenize1st_order_b_16x16(xd, xd->block, t, 3,
- xd->frame_type, A, L, cpi, dry_run);
+ tokenize1st_order_b_16x16(xd, xd->block, t, PLANE_TYPE_Y_WITH_DC,
+ A, L, cpi, dry_run);
for (b = 1; b < 16; b++) {
*(A + vp8_block2above[b]) = *(A);
*(L + vp8_block2left[b] ) = *(L);
}
for (b = 16; b < 24; b += 4) {
- tokenize1st_order_b_8x8(xd, xd->block + b, t, 2, xd->frame_type,
- A + vp8_block2above_8x8[b], L + vp8_block2left_8x8[b], cpi, dry_run);
+ tokenize1st_order_b_8x8(xd, xd->block + b, t, PLANE_TYPE_UV,
+ A + vp8_block2above_8x8[b],
+ L + vp8_block2left_8x8[b], cpi, dry_run);
*(A + vp8_block2above_8x8[b]+1) = *(A + vp8_block2above_8x8[b]);
*(L + vp8_block2left_8x8[b]+1 ) = *(L + vp8_block2left_8x8[b]);
}
vpx_memset(&A[8], 0, sizeof(A[8]));
vpx_memset(&L[8], 0, sizeof(L[8]));
}
- else
-#endif
- if (tx_size == TX_8X8) {
+ else if (tx_size == TX_8X8) {
ENTROPY_CONTEXT *A = (ENTROPY_CONTEXT *)xd->above_context;
ENTROPY_CONTEXT *L = (ENTROPY_CONTEXT *)xd->left_context;
- if (xd->mode_info_context->mbmi.mode == I8X8_PRED) {
- plane_type = PLANE_TYPE_Y_WITH_DC;
- }
for (b = 0; b < 16; b += 4) {
tokenize1st_order_b_8x8(xd,
- xd->block + b,
- t, plane_type, xd->frame_type,
+ xd->block + b, t, plane_type,
A + vp8_block2above_8x8[b],
L + vp8_block2left_8x8[b],
cpi, dry_run);
@@ -1022,14 +773,12 @@ void vp8_tokenize_mb(VP8_COMP *cpi,
*(L + vp8_block2left_8x8[b] + 1) = *(L + vp8_block2left_8x8[b]);
}
if (xd->mode_info_context->mbmi.mode == I8X8_PRED) {
- tokenize1st_order_chroma(xd, t, PLANE_TYPE_UV, cpi, dry_run);
+ tokenize1st_order_chroma_4x4(xd, t, cpi, dry_run);
} else {
for (b = 16; b < 24; b += 4) {
- tokenize1st_order_b_8x8(xd,
- xd->block + b, t, 2, xd->frame_type,
+ tokenize1st_order_b_8x8(xd, xd->block + b, t, PLANE_TYPE_UV,
A + vp8_block2above_8x8[b],
- L + vp8_block2left_8x8[b],
- cpi, dry_run);
+ L + vp8_block2left_8x8[b], cpi, dry_run);
*(A + vp8_block2above_8x8[b] + 1) = *(A + vp8_block2above_8x8[b]);
*(L + vp8_block2left_8x8[b] + 1) = *(L + vp8_block2left_8x8[b]);
}
@@ -1037,10 +786,10 @@ void vp8_tokenize_mb(VP8_COMP *cpi,
} else {
#if CONFIG_HYBRIDTRANSFORM
if (active_ht)
- tokenize1st_order_ht(xd, t, plane_type, cpi, dry_run);
+ tokenize1st_order_ht_4x4(xd, t, plane_type, cpi, dry_run);
else
#endif
- tokenize1st_order_b(xd, t, plane_type, cpi, dry_run);
+ tokenize1st_order_b_4x4(xd, t, plane_type, cpi, dry_run);
}
if (dry_run)
*t = t_backup;
@@ -1053,15 +802,11 @@ void init_context_counters(void) {
if (!f) {
vpx_memset(context_counters, 0, sizeof(context_counters));
vpx_memset(context_counters_8x8, 0, sizeof(context_counters_8x8));
-#if CONFIG_TX16X16 || CONFIG_HYBRIDTRANSFORM16X16
vpx_memset(context_counters_16x16, 0, sizeof(context_counters_16x16));
-#endif
} else {
fread(context_counters, sizeof(context_counters), 1, f);
fread(context_counters_8x8, sizeof(context_counters_8x8), 1, f);
-#if CONFIG_TX16X16 || CONFIG_HYBRIDTRANSFORM16X16
fread(context_counters_16x16, sizeof(context_counters_16x16), 1, f);
-#endif
fclose(f);
}
@@ -1069,15 +814,11 @@ void init_context_counters(void) {
if (!f) {
vpx_memset(tree_update_hist, 0, sizeof(tree_update_hist));
vpx_memset(tree_update_hist_8x8, 0, sizeof(tree_update_hist_8x8));
-#if CONFIG_TX16X16 || CONFIG_HYBRIDTRANSFORM16X16
vpx_memset(tree_update_hist_16x16, 0, sizeof(tree_update_hist_16x16));
-#endif
} else {
fread(tree_update_hist, sizeof(tree_update_hist), 1, f);
fread(tree_update_hist_8x8, sizeof(tree_update_hist_8x8), 1, f);
-#if CONFIG_TX16X16 || CONFIG_HYBRIDTRANSFORM16X16
fread(tree_update_hist_16x16, sizeof(tree_update_hist_16x16), 1, f);
-#endif
fclose(f);
}
}
@@ -1153,7 +894,6 @@ void print_context_counters() {
} while (++type < BLOCK_TYPES_8X8);
fprintf(f, "\n};\n");
-#if CONFIG_TX16X16 || CONFIG_HYBRIDTRANSFORM16X16
fprintf(f, "static const unsigned int\nvp8_default_coef_counts_16x16"
"[BLOCK_TYPES_16X16] [COEF_BANDS]"
"[PREV_COEF_CONTEXTS] [MAX_ENTROPY_TOKENS] = {");
@@ -1186,7 +926,6 @@ void print_context_counters() {
fprintf(f, "\n }");
} while (++type < BLOCK_TYPES_16X16);
fprintf(f, "\n};\n");
-#endif
fprintf(f, "static const vp8_prob\n"
"vp8_default_coef_probs[BLOCK_TYPES] [COEF_BANDS] \n"
@@ -1256,7 +995,6 @@ void print_context_counters() {
} while (++type < BLOCK_TYPES_8X8);
fprintf(f, "\n};\n");
-#if CONFIG_TX16X16 || CONFIG_HYBRIDTRANSFORM16X16
fprintf(f, "static const vp8_prob\n"
"vp8_default_coef_probs_16x16[BLOCK_TYPES_16X16] [COEF_BANDS]\n"
"[PREV_COEF_CONTEXTS] [ENTROPY_NODES] = {");
@@ -1289,87 +1027,71 @@ void print_context_counters() {
fprintf(f, "\n }");
} while (++type < BLOCK_TYPES_16X16);
fprintf(f, "\n};\n");
-#endif
fclose(f);
f = fopen("context.bin", "wb");
fwrite(context_counters, sizeof(context_counters), 1, f);
fwrite(context_counters_8x8, sizeof(context_counters_8x8), 1, f);
-#if CONFIG_TX16X16 || CONFIG_HYBRIDTRANSFORM16X16
fwrite(context_counters_16x16, sizeof(context_counters_16x16), 1, f);
-#endif
fclose(f);
}
#endif
-
void vp8_tokenize_initialize() {
fill_value_tokens();
}
-
-static __inline void stuff2nd_order_b_8x8(
- MACROBLOCKD *xd,
- const BLOCKD *const b,
- TOKENEXTRA **tp,
- const int type, /* which plane: 0=Y no DC, 1=Y2, 2=UV, 3=Y with DC */
- const FRAME_TYPE frametype,
- ENTROPY_CONTEXT *a,
- ENTROPY_CONTEXT *l,
- VP8_COMP *cpi,
- int dry_run) {
+static __inline void stuff2nd_order_b_8x8(MACROBLOCKD *xd,
+ const BLOCKD *const b,
+ TOKENEXTRA **tp,
+ ENTROPY_CONTEXT *a,
+ ENTROPY_CONTEXT *l,
+ VP8_COMP *cpi,
+ int dry_run) {
int pt; /* near block/prev token context index */
TOKENEXTRA *t = *tp; /* store tokens starting here */
VP8_COMBINEENTROPYCONTEXTS(pt, *a, *l);
- (void) frametype;
- (void) type;
(void) b;
t->Token = DCT_EOB_TOKEN;
- t->context_tree = cpi->common.fc.coef_probs_8x8 [1] [0] [pt];
+ t->context_tree = cpi->common.fc.coef_probs_8x8[PLANE_TYPE_Y2][0][pt];
// t->section = 11;
t->skip_eob_node = 0;
++t;
*tp = t;
if (!dry_run)
- ++cpi->coef_counts_8x8 [1] [0] [pt] [DCT_EOB_TOKEN];
+ ++cpi->coef_counts_8x8[PLANE_TYPE_Y2][0][pt][DCT_EOB_TOKEN];
pt = 0;
*a = *l = pt;
-
}
-static __inline void stuff1st_order_b_8x8
-(
- MACROBLOCKD *xd,
- const BLOCKD *const b,
- TOKENEXTRA **tp,
- const int type, /* which plane: 0=Y no DC, 1=Y2, 2=UV, 3=Y with DC */
- const FRAME_TYPE frametype,
- ENTROPY_CONTEXT *a,
- ENTROPY_CONTEXT *l,
- VP8_COMP *cpi,
- int dry_run) {
+static __inline void stuff1st_order_b_8x8(MACROBLOCKD *xd,
+ const BLOCKD *const b,
+ TOKENEXTRA **tp,
+ PLANE_TYPE type,
+ ENTROPY_CONTEXT *a,
+ ENTROPY_CONTEXT *l,
+ VP8_COMP *cpi,
+ int dry_run) {
int pt; /* near block/prev token context index */
TOKENEXTRA *t = *tp; /* store tokens starting here */
#if CONFIG_HYBRIDTRANSFORM8X8
TX_TYPE tx_type = xd->mode_info_context->mbmi.mode == I8X8_PRED ?
get_tx_type(xd, b) : DCT_DCT;
#endif
-
+ const int band = vp8_coef_bands_8x8[(type == PLANE_TYPE_Y_NO_DC) ? 1 : 0];
VP8_COMBINEENTROPYCONTEXTS(pt, *a, *l);
- (void) frametype;
- (void) type;
(void) b;
t->Token = DCT_EOB_TOKEN;
#if CONFIG_HYBRIDTRANSFORM8X8
if (tx_type != DCT_DCT)
- t->context_tree = cpi->common.fc.hybrid_coef_probs_8x8 [0] [1] [pt];
+ t->context_tree = cpi->common.fc.hybrid_coef_probs_8x8[type][band][pt];
else
#endif
- t->context_tree = cpi->common.fc.coef_probs_8x8 [0] [1] [pt];
+ t->context_tree = cpi->common.fc.coef_probs_8x8[type][band][pt];
// t->section = 8;
t->skip_eob_node = 0;
++t;
@@ -1377,66 +1099,61 @@ static __inline void stuff1st_order_b_8x8
if (!dry_run) {
#if CONFIG_HYBRIDTRANSFORM8X8
if (tx_type == DCT_DCT)
- ++cpi->hybrid_coef_counts_8x8[0] [1] [pt] [DCT_EOB_TOKEN];
+ ++cpi->hybrid_coef_counts_8x8[type][band][pt][DCT_EOB_TOKEN];
else
#endif
- ++cpi->coef_counts_8x8[0] [1] [pt] [DCT_EOB_TOKEN];
+ ++cpi->coef_counts_8x8[type][band][pt][DCT_EOB_TOKEN];
}
pt = 0; /* 0 <-> all coeff data is zero */
*a = *l = pt;
-
-
}
-static __inline
-void stuff1st_order_buv_8x8
-(
- MACROBLOCKD *xd,
- const BLOCKD *const b,
- TOKENEXTRA **tp,
- const int type, /* which plane: 0=Y no DC, 1=Y2, 2=UV, 3=Y with DC */
- const FRAME_TYPE frametype,
- ENTROPY_CONTEXT *a,
- ENTROPY_CONTEXT *l,
- VP8_COMP *cpi,
- int dry_run) {
+static __inline void stuff1st_order_buv_8x8(MACROBLOCKD *xd,
+ const BLOCKD *const b,
+ TOKENEXTRA **tp,
+ ENTROPY_CONTEXT *a,
+ ENTROPY_CONTEXT *l,
+ VP8_COMP *cpi,
+ int dry_run) {
int pt; /* near block/prev token context index */
TOKENEXTRA *t = *tp; /* store tokens starting here */
VP8_COMBINEENTROPYCONTEXTS(pt, *a, *l);
- (void) frametype;
- (void) type;
(void) b;
t->Token = DCT_EOB_TOKEN;
- t->context_tree = cpi->common.fc.coef_probs_8x8 [2] [0] [pt];
+ t->context_tree = cpi->common.fc.coef_probs_8x8[PLANE_TYPE_UV][0][pt];
// t->section = 13;
t->skip_eob_node = 0;
++t;
*tp = t;
if (!dry_run)
- ++cpi->coef_counts_8x8[2] [0] [pt] [DCT_EOB_TOKEN];
+ ++cpi->coef_counts_8x8[PLANE_TYPE_UV][0][pt][DCT_EOB_TOKEN];
pt = 0; /* 0 <-> all coeff data is zero */
*a = *l = pt;
-
}
-void vp8_stuff_mb_8x8(VP8_COMP *cpi,
- MACROBLOCKD *xd,
- TOKENEXTRA **t,
- int dry_run) {
+static void vp8_stuff_mb_8x8(VP8_COMP *cpi, MACROBLOCKD *xd,
+ TOKENEXTRA **t, int dry_run) {
ENTROPY_CONTEXT *A = (ENTROPY_CONTEXT *)xd->above_context;
ENTROPY_CONTEXT *L = (ENTROPY_CONTEXT *)xd->left_context;
- int plane_type;
+ PLANE_TYPE plane_type;
int b;
TOKENEXTRA *t_backup = *t;
+ const int has_y2_block = (xd->mode_info_context->mbmi.mode != B_PRED
+ && xd->mode_info_context->mbmi.mode != I8X8_PRED
+ && xd->mode_info_context->mbmi.mode != SPLITMV);
- stuff2nd_order_b_8x8(xd, xd->block + 24, t, 1, xd->frame_type,
- A + vp8_block2above_8x8[24],
- L + vp8_block2left_8x8[24], cpi, dry_run);
- plane_type = 0;
+ if (has_y2_block) {
+ stuff2nd_order_b_8x8(xd, xd->block + 24, t,
+ A + vp8_block2above_8x8[24],
+ L + vp8_block2left_8x8[24], cpi, dry_run);
+ plane_type = PLANE_TYPE_Y_NO_DC;
+ } else {
+ plane_type = PLANE_TYPE_Y_WITH_DC;
+ }
for (b = 0; b < 16; b += 4) {
- stuff1st_order_b_8x8(xd, xd->block + b, t, plane_type, xd->frame_type,
+ stuff1st_order_b_8x8(xd, xd->block + b, t, plane_type,
A + vp8_block2above_8x8[b],
L + vp8_block2left_8x8[b],
cpi, dry_run);
@@ -1445,7 +1162,7 @@ void vp8_stuff_mb_8x8(VP8_COMP *cpi,
}
for (b = 16; b < 24; b += 4) {
- stuff1st_order_buv_8x8(xd, xd->block + b, t, 2, xd->frame_type,
+ stuff1st_order_buv_8x8(xd, xd->block + b, t,
A + vp8_block2above[b],
L + vp8_block2left[b],
cpi, dry_run);
@@ -1456,63 +1173,60 @@ void vp8_stuff_mb_8x8(VP8_COMP *cpi,
*t = t_backup;
}
-#if CONFIG_TX16X16
-static __inline
-void stuff1st_order_b_16x16(MACROBLOCKD *xd,
- const BLOCKD *const b,
- TOKENEXTRA **tp,
- const FRAME_TYPE frametype,
- ENTROPY_CONTEXT *a,
- ENTROPY_CONTEXT *l,
- VP8_COMP *cpi,
- int dry_run){
- int pt; /* near block/prev token context index */
- TOKENEXTRA *t = *tp; /* store tokens starting here */
+static __inline void stuff1st_order_b_16x16(MACROBLOCKD *xd,
+ const BLOCKD *const b,
+ TOKENEXTRA **tp,
+ PLANE_TYPE type,
+ ENTROPY_CONTEXT *a,
+ ENTROPY_CONTEXT *l,
+ VP8_COMP *cpi,
+ int dry_run) {
+ int pt; /* near block/prev token context index */
+ TOKENEXTRA *t = *tp; /* store tokens starting here */
#if CONFIG_HYBRIDTRANSFORM16X16
- TX_TYPE tx_type = get_tx_type(xd, b);
+ TX_TYPE tx_type = get_tx_type(xd, b);
#endif
- VP8_COMBINEENTROPYCONTEXTS(pt, *a, *l);
- (void) frametype;
- (void) b;
+ const int band = vp8_coef_bands_16x16[(type == PLANE_TYPE_Y_NO_DC) ? 1 : 0];
+ VP8_COMBINEENTROPYCONTEXTS(pt, *a, *l);
+ (void) b;
- t->Token = DCT_EOB_TOKEN;
+ t->Token = DCT_EOB_TOKEN;
#if CONFIG_HYBRIDTRANSFORM16X16
- if (tx_type != DCT_DCT)
- t->context_tree = cpi->common.fc.hybrid_coef_probs_16x16[3][1][pt];
- else
+ if (tx_type != DCT_DCT)
+ t->context_tree = cpi->common.fc.hybrid_coef_probs_16x16[type][band][pt];
+ else
#endif
- t->context_tree = cpi->common.fc.coef_probs_16x16[3][1][pt];
- t->skip_eob_node = 0;
- ++t;
- *tp = t;
- if (!dry_run) {
+ t->context_tree = cpi->common.fc.coef_probs_16x16[type][band][pt];
+ t->skip_eob_node = 0;
+ ++t;
+ *tp = t;
+ if (!dry_run) {
#if CONFIG_HYBRIDTRANSFORM16X16
- if (tx_type != DCT_DCT)
- ++cpi->hybrid_coef_counts_16x16[3][1][pt][DCT_EOB_TOKEN];
- else
+ if (tx_type != DCT_DCT)
+ ++cpi->hybrid_coef_counts_16x16[type][band][pt][DCT_EOB_TOKEN];
+ else
#endif
- ++cpi->coef_counts_16x16[3][1][pt][DCT_EOB_TOKEN];
- }
- pt = 0; /* 0 <-> all coeff data is zero */
- *a = *l = pt;
+ ++cpi->coef_counts_16x16[type][band][pt][DCT_EOB_TOKEN];
+ }
+ pt = 0; /* 0 <-> all coeff data is zero */
+ *a = *l = pt;
}
-void vp8_stuff_mb_16x16(VP8_COMP *cpi,
- MACROBLOCKD *xd,
- TOKENEXTRA **t,
- int dry_run) {
+static void vp8_stuff_mb_16x16(VP8_COMP *cpi, MACROBLOCKD *xd,
+ TOKENEXTRA **t, int dry_run) {
ENTROPY_CONTEXT * A = (ENTROPY_CONTEXT *)xd->above_context;
ENTROPY_CONTEXT * L = (ENTROPY_CONTEXT *)xd->left_context;
int b, i;
TOKENEXTRA *t_backup = *t;
- stuff1st_order_b_16x16(xd, xd->block, t, xd->frame_type, A, L, cpi, dry_run);
+ stuff1st_order_b_16x16(xd, xd->block, t, PLANE_TYPE_Y_WITH_DC,
+ A, L, cpi, dry_run);
for (i = 1; i < 16; i++) {
*(A + vp8_block2above[i]) = *(A);
*(L + vp8_block2left[i]) = *(L);
}
for (b = 16; b < 24; b += 4) {
- stuff1st_order_buv_8x8(xd, xd->block + b, t, 2, xd->frame_type,
+ stuff1st_order_buv_8x8(xd, xd->block + b, t,
A + vp8_block2above[b],
L + vp8_block2left[b],
cpi, dry_run);
@@ -1524,140 +1238,136 @@ void vp8_stuff_mb_16x16(VP8_COMP *cpi,
if (dry_run)
*t = t_backup;
}
-#endif
-static __inline void stuff2nd_order_b
-(
- MACROBLOCKD *xd,
- const BLOCKD *const b,
- TOKENEXTRA **tp,
- ENTROPY_CONTEXT *a,
- ENTROPY_CONTEXT *l,
- VP8_COMP *cpi,
- int dry_run) {
+static __inline void stuff2nd_order_b_4x4(MACROBLOCKD *xd,
+ const BLOCKD *const b,
+ TOKENEXTRA **tp,
+ ENTROPY_CONTEXT *a,
+ ENTROPY_CONTEXT *l,
+ VP8_COMP *cpi,
+ int dry_run) {
int pt; /* near block/prev token context index */
TOKENEXTRA *t = *tp; /* store tokens starting here */
VP8_COMBINEENTROPYCONTEXTS(pt, *a, *l);
t->Token = DCT_EOB_TOKEN;
- t->context_tree = cpi->common.fc.coef_probs [1] [0] [pt];
+ t->context_tree = cpi->common.fc.coef_probs[PLANE_TYPE_Y2][0][pt];
t->skip_eob_node = 0;
++t;
*tp = t;
if (!dry_run)
- ++cpi->coef_counts [1] [0] [pt] [DCT_EOB_TOKEN];
+ ++cpi->coef_counts[PLANE_TYPE_Y2][0][pt] [DCT_EOB_TOKEN];
pt = 0;
*a = *l = pt;
-
}
-static __inline void stuff1st_order_b(MACROBLOCKD *xd,
- const BLOCKD *const b,
- TOKENEXTRA **tp,
- ENTROPY_CONTEXT *a,
- ENTROPY_CONTEXT *l,
- VP8_COMP *cpi,
- int dry_run) {
+static __inline void stuff1st_order_b_4x4(MACROBLOCKD *xd,
+ const BLOCKD *const b,
+ TOKENEXTRA **tp,
+ PLANE_TYPE type,
+ ENTROPY_CONTEXT *a,
+ ENTROPY_CONTEXT *l,
+ VP8_COMP *cpi,
+ int dry_run) {
int pt; /* near block/prev token context index */
TOKENEXTRA *t = *tp; /* store tokens starting here */
#if CONFIG_HYBRIDTRANSFORM
TX_TYPE tx_type = get_tx_type(xd, b);
#endif
+ const int band = vp8_coef_bands[(type == PLANE_TYPE_Y_NO_DC) ? 1 : 0];
VP8_COMBINEENTROPYCONTEXTS(pt, *a, *l);
t->Token = DCT_EOB_TOKEN;
#if CONFIG_HYBRIDTRANSFORM
if (tx_type != DCT_DCT)
- t->context_tree = cpi->common.fc.hybrid_coef_probs [0] [1] [pt];
+ t->context_tree = cpi->common.fc.hybrid_coef_probs[type][band][pt];
else
#endif
- t->context_tree = cpi->common.fc.coef_probs [0] [1] [pt];
+ t->context_tree = cpi->common.fc.coef_probs[type][band][pt];
t->skip_eob_node = 0;
++t;
*tp = t;
if (!dry_run) {
#if CONFIG_HYBRIDTRANSFORM
if (tx_type != DCT_DCT)
- ++cpi->hybrid_coef_counts[0] [1] [pt] [DCT_EOB_TOKEN];
+ ++cpi->hybrid_coef_counts[type][band][pt][DCT_EOB_TOKEN];
else
#endif
- ++cpi->coef_counts[0] [1] [pt] [DCT_EOB_TOKEN];
+ ++cpi->coef_counts[type][band][pt][DCT_EOB_TOKEN];
}
pt = 0; /* 0 <-> all coeff data is zero */
*a = *l = pt;
-
}
-static __inline
-void stuff1st_order_buv
-(
- MACROBLOCKD *xd,
- const BLOCKD *const b,
- TOKENEXTRA **tp,
- ENTROPY_CONTEXT *a,
- ENTROPY_CONTEXT *l,
- VP8_COMP *cpi,
- int dry_run) {
+
+static __inline void stuff1st_order_buv_4x4(MACROBLOCKD *xd,
+ const BLOCKD *const b,
+ TOKENEXTRA **tp,
+ ENTROPY_CONTEXT *a,
+ ENTROPY_CONTEXT *l,
+ VP8_COMP *cpi,
+ int dry_run) {
int pt; /* near block/prev token context index */
TOKENEXTRA *t = *tp; /* store tokens starting here */
VP8_COMBINEENTROPYCONTEXTS(pt, *a, *l);
t->Token = DCT_EOB_TOKEN;
- t->context_tree = cpi->common.fc.coef_probs [2] [0] [pt];
+ t->context_tree = cpi->common.fc.coef_probs[PLANE_TYPE_UV][0][pt];
t->skip_eob_node = 0;
++t;
*tp = t;
if (!dry_run)
- ++cpi->coef_counts[2] [0] [pt] [DCT_EOB_TOKEN];
+ ++cpi->coef_counts[PLANE_TYPE_UV][0][pt][DCT_EOB_TOKEN];
pt = 0; /* 0 <-> all coeff data is zero */
*a = *l = pt;
}
-void vp8_stuff_mb(VP8_COMP *cpi, MACROBLOCKD *xd,
- TOKENEXTRA **t, int dry_run) {
+static void vp8_stuff_mb_4x4(VP8_COMP *cpi, MACROBLOCKD *xd,
+ TOKENEXTRA **t, int dry_run) {
ENTROPY_CONTEXT *A = (ENTROPY_CONTEXT *)xd->above_context;
ENTROPY_CONTEXT *L = (ENTROPY_CONTEXT *)xd->left_context;
int b;
TOKENEXTRA *t_backup = *t;
+ PLANE_TYPE plane_type;
+ const int has_y2_block = (xd->mode_info_context->mbmi.mode != B_PRED
+ && xd->mode_info_context->mbmi.mode != I8X8_PRED
+ && xd->mode_info_context->mbmi.mode != SPLITMV);
- stuff2nd_order_b(xd, xd->block + 24, t,
- A + vp8_block2above[24],
- L + vp8_block2left[24],
- cpi, dry_run);
+ if (has_y2_block) {
+ stuff2nd_order_b_4x4(xd, xd->block + 24, t,
+ A + vp8_block2above[24],
+ L + vp8_block2left[24],
+ cpi, dry_run);
+ plane_type = PLANE_TYPE_Y_NO_DC;
+ } else {
+ plane_type = PLANE_TYPE_Y_WITH_DC;
+ }
for (b = 0; b < 16; b++)
- stuff1st_order_b(xd, xd->block + b, t,
- A + vp8_block2above[b],
- L + vp8_block2left[b],
- cpi, dry_run);
+ stuff1st_order_b_4x4(xd, xd->block + b, t, plane_type,
+ A + vp8_block2above[b],
+ L + vp8_block2left[b],
+ cpi, dry_run);
for (b = 16; b < 24; b++)
- stuff1st_order_buv(xd, xd->block + b, t,
- A + vp8_block2above[b],
- L + vp8_block2left[b],
- cpi, dry_run);
+ stuff1st_order_buv_4x4(xd, xd->block + b, t,
+ A + vp8_block2above[b],
+ L + vp8_block2left[b],
+ cpi, dry_run);
if (dry_run)
*t = t_backup;
}
-void vp8_stuff_mb_8x8_4x4uv(VP8_COMP *cpi,
- MACROBLOCKD *xd,
- TOKENEXTRA **t,
- int dry_run) {
+static void vp8_stuff_mb_8x8_4x4uv(VP8_COMP *cpi, MACROBLOCKD *xd,
+ TOKENEXTRA **t, int dry_run) {
ENTROPY_CONTEXT *A = (ENTROPY_CONTEXT *)xd->above_context;
ENTROPY_CONTEXT *L = (ENTROPY_CONTEXT *)xd->left_context;
- int plane_type;
int b;
TOKENEXTRA *t_backup = *t;
- stuff2nd_order_b_8x8(xd, xd->block + 24, t, 1, xd->frame_type,
- A + vp8_block2above_8x8[24],
- L + vp8_block2left_8x8[24], cpi, dry_run);
- plane_type = 3;
-
for (b = 0; b < 16; b += 4) {
- stuff1st_order_b_8x8(xd, xd->block + b, t, plane_type, xd->frame_type,
+ stuff1st_order_b_8x8(xd, xd->block + b, t, PLANE_TYPE_Y_WITH_DC,
A + vp8_block2above_8x8[b],
L + vp8_block2left_8x8[b],
cpi, dry_run);
@@ -1666,23 +1376,37 @@ void vp8_stuff_mb_8x8_4x4uv(VP8_COMP *cpi,
}
for (b = 16; b < 24; b++)
- stuff1st_order_buv(xd, xd->block + b, t,
- A + vp8_block2above[b],
- L + vp8_block2left[b],
- cpi, dry_run);
+ stuff1st_order_buv_4x4(xd, xd->block + b, t,
+ A + vp8_block2above[b],
+ L + vp8_block2left[b],
+ cpi, dry_run);
if (dry_run)
*t = t_backup;
}
+void vp8_stuff_mb(VP8_COMP *cpi, MACROBLOCKD *xd, TOKENEXTRA **t, int dry_run) {
+ TX_SIZE tx_size = xd->mode_info_context->mbmi.txfm_size;
+
+ if (tx_size == TX_16X16) {
+ vp8_stuff_mb_16x16(cpi, xd, t, dry_run);
+ } else if (tx_size == TX_8X8) {
+ if (xd->mode_info_context->mbmi.mode == I8X8_PRED) {
+ vp8_stuff_mb_8x8_4x4uv(cpi, xd, t, dry_run);
+ } else {
+ vp8_stuff_mb_8x8(cpi, xd, t, dry_run);
+ }
+ } else {
+ vp8_stuff_mb_4x4(cpi, xd, t, dry_run);
+ }
+}
+
void vp8_fix_contexts(MACROBLOCKD *xd) {
/* Clear entropy contexts for Y2 blocks */
if ((xd->mode_info_context->mbmi.mode != B_PRED
&& xd->mode_info_context->mbmi.mode != I8X8_PRED
&& xd->mode_info_context->mbmi.mode != SPLITMV)
-#if CONFIG_TX16X16 || CONFIG_HYBRIDTRANSFORM16X16
|| xd->mode_info_context->mbmi.txfm_size == TX_16X16
-#endif
) {
vpx_memset(xd->above_context, 0, sizeof(ENTROPY_CONTEXT_PLANES));
vpx_memset(xd->left_context, 0, sizeof(ENTROPY_CONTEXT_PLANES));
diff --git a/vp8/encoder/tokenize.h b/vp8/encoder/tokenize.h
index 199997b42..87cb44bdc 100644
--- a/vp8/encoder/tokenize.h
+++ b/vp8/encoder/tokenize.h
@@ -31,9 +31,9 @@ typedef struct {
int rd_cost_mby(MACROBLOCKD *);
-extern int mby_is_skippable(MACROBLOCKD *xd, int has_y2_block);
-extern int mbuv_is_skippable(MACROBLOCKD *xd);
-extern int mb_is_skippable(MACROBLOCKD *xd, int has_y2_block);
+extern int mby_is_skippable_4x4(MACROBLOCKD *xd, int has_y2_block);
+extern int mbuv_is_skippable_4x4(MACROBLOCKD *xd);
+extern int mb_is_skippable_4x4(MACROBLOCKD *xd, int has_y2_block);
extern int mby_is_skippable_8x8(MACROBLOCKD *xd, int has_y2_block);
extern int mbuv_is_skippable_8x8(MACROBLOCKD *xd);
extern int mb_is_skippable_8x8(MACROBLOCKD *xd, int has_y2_block);
@@ -49,11 +49,9 @@ extern INT64 context_counters[BLOCK_TYPES][COEF_BANDS]
[PREV_COEF_CONTEXTS][MAX_ENTROPY_TOKENS];
extern INT64 context_counters_8x8[BLOCK_TYPES_8X8][COEF_BANDS]
[PREV_COEF_CONTEXTS][MAX_ENTROPY_TOKENS];
-#if CONFIG_TX16X16 || CONFIG_HYBRIDTRANSFORM16X16
extern INT64 context_counters_16x16[BLOCK_TYPES_16X16][COEF_BANDS]
[PREV_COEF_CONTEXTS][MAX_ENTROPY_TOKENS];
#endif
-#endif
extern const int *vp8_dct_value_cost_ptr;
/* TODO: The Token field should be broken out into a separate char array to
diff --git a/vp8/encoder/x86/x86_csystemdependent.c b/vp8/encoder/x86/x86_csystemdependent.c
index 146888a1f..71c51c14f 100644
--- a/vp8/encoder/x86/x86_csystemdependent.c
+++ b/vp8/encoder/x86/x86_csystemdependent.c
@@ -115,13 +115,6 @@ void vp8_arch_x86_encoder_init(VP8_COMP *cpi) {
cpi->rtcd.variance.mse16x16 = vp8_mse16x16_mmx;
cpi->rtcd.variance.getmbss = vp8_get_mb_ss_mmx;
- cpi->rtcd.fdct.short4x4 = vp8_short_fdct4x4_mmx;
- cpi->rtcd.fdct.short8x4 = vp8_short_fdct8x4_mmx;
- cpi->rtcd.fdct.fast4x4 = vp8_short_fdct4x4_mmx;
- cpi->rtcd.fdct.fast8x4 = vp8_short_fdct8x4_mmx;
-
- cpi->rtcd.fdct.walsh_short4x4 = vp8_short_walsh4x4_c;
-
cpi->rtcd.encodemb.berr = vp8_block_error_mmx;
cpi->rtcd.encodemb.mberr = vp8_mbblock_error_mmx;
cpi->rtcd.encodemb.mbuverr = vp8_mbuverror_mmx;
@@ -159,13 +152,6 @@ void vp8_arch_x86_encoder_init(VP8_COMP *cpi) {
cpi->rtcd.variance.mse16x16 = vp8_mse16x16_wmt;
cpi->rtcd.variance.getmbss = vp8_get_mb_ss_sse2;
- cpi->rtcd.fdct.short4x4 = vp8_short_fdct4x4_sse2;
- cpi->rtcd.fdct.short8x4 = vp8_short_fdct8x4_sse2;
- cpi->rtcd.fdct.fast4x4 = vp8_short_fdct4x4_sse2;
- cpi->rtcd.fdct.fast8x4 = vp8_short_fdct8x4_sse2;
-
- cpi->rtcd.fdct.walsh_short4x4 = vp8_short_walsh4x4_sse2;
-
cpi->rtcd.encodemb.berr = vp8_block_error_xmm;
cpi->rtcd.encodemb.mberr = vp8_mbblock_error_xmm;
cpi->rtcd.encodemb.mbuverr = vp8_mbuverror_xmm;
diff --git a/vp8/vp8_common.mk b/vp8/vp8_common.mk
index f04bc3497..25c4fe210 100644
--- a/vp8/vp8_common.mk
+++ b/vp8/vp8_common.mk
@@ -48,7 +48,6 @@ VP8_COMMON_SRCS-yes += common/onyxc_int.h
VP8_COMMON_SRCS-yes += common/pred_common.h
VP8_COMMON_SRCS-yes += common/pred_common.c
VP8_COMMON_SRCS-yes += common/quant_common.h
-VP8_COMMON_SRCS-yes += common/recon.h
VP8_COMMON_SRCS-yes += common/reconinter.h
VP8_COMMON_SRCS-yes += common/reconintra.h
VP8_COMMON_SRCS-yes += common/reconintra4x4.h
@@ -82,7 +81,6 @@ VP8_COMMON_SRCS-yes += common/implicit_segmentation.c
VP8_COMMON_SRCS-$(ARCH_X86)$(ARCH_X86_64) += common/x86/idct_x86.h
VP8_COMMON_SRCS-$(ARCH_X86)$(ARCH_X86_64) += common/x86/subpixel_x86.h
-VP8_COMMON_SRCS-$(ARCH_X86)$(ARCH_X86_64) += common/x86/recon_x86.h
VP8_COMMON_SRCS-$(ARCH_X86)$(ARCH_X86_64) += common/x86/loopfilter_x86.h
VP8_COMMON_SRCS-$(ARCH_X86)$(ARCH_X86_64) += common/x86/postproc_x86.h
VP8_COMMON_SRCS-$(ARCH_X86)$(ARCH_X86_64) += common/x86/x86_systemdependent.c
diff --git a/vp8/vp8cx.mk b/vp8/vp8cx.mk
index 3b804b7b1..7058e316b 100644
--- a/vp8/vp8cx.mk
+++ b/vp8/vp8cx.mk
@@ -79,8 +79,10 @@ VP8_CX_SRCS-$(CONFIG_INTERNAL_STATS) += encoder/ssim.c
VP8_CX_SRCS-yes += encoder/tokenize.c
VP8_CX_SRCS-yes += encoder/treewriter.c
VP8_CX_SRCS-yes += encoder/variance_c.c
+ifeq ($(CONFIG_POSTPROC),yes)
VP8_CX_SRCS-$(CONFIG_INTERNAL_STATS) += common/postproc.h
VP8_CX_SRCS-$(CONFIG_INTERNAL_STATS) += common/postproc.c
+endif
VP8_CX_SRCS-yes += encoder/temporal_filter.c
VP8_CX_SRCS-yes += encoder/temporal_filter.h
VP8_CX_SRCS-yes += encoder/mbgraph.c