summaryrefslogtreecommitdiff
path: root/vp8/common
diff options
context:
space:
mode:
authorRonald S. Bultje <rbultje@google.com>2012-08-20 17:04:23 -0700
committerGerrit Code Review <gerrit@gerrit.golo.chromium.org>2012-08-20 17:04:23 -0700
commit4396441b8b6a97aae50694d49971cdc062c469e0 (patch)
treec7e53cb1e8fe71b42bfff31d63554929bde20b63 /vp8/common
parentfc892cc54d6c443f2e29e270c7ad0dc5e0f137a2 (diff)
parent5d4cffb35f4bc23462eedc95a4802c65e32d7d5a (diff)
downloadlibvpx-4396441b8b6a97aae50694d49971cdc062c469e0.tar
libvpx-4396441b8b6a97aae50694d49971cdc062c469e0.tar.gz
libvpx-4396441b8b6a97aae50694d49971cdc062c469e0.tar.bz2
libvpx-4396441b8b6a97aae50694d49971cdc062c469e0.zip
Merge "Superblock coding." into experimental
Diffstat (limited to 'vp8/common')
-rw-r--r--vp8/common/blockd.h6
-rw-r--r--vp8/common/entropymode.c32
-rw-r--r--vp8/common/entropymode.h6
-rw-r--r--vp8/common/generic/systemdependent.c6
-rw-r--r--vp8/common/loopfilter.c32
-rw-r--r--vp8/common/onyxc_int.h8
-rw-r--r--vp8/common/pred_common.c15
-rw-r--r--vp8/common/recon.c46
-rw-r--r--vp8/common/recon.h16
-rw-r--r--vp8/common/reconinter.c50
-rw-r--r--vp8/common/reconintra.c285
11 files changed, 290 insertions, 212 deletions
diff --git a/vp8/common/blockd.h b/vp8/common/blockd.h
index 1926f20bd..1cba5d35a 100644
--- a/vp8/common/blockd.h
+++ b/vp8/common/blockd.h
@@ -148,6 +148,7 @@ typedef enum {
#define VP8_YMODES (B_PRED + 1)
#define VP8_UV_MODES (TM_PRED + 1)
#define VP8_I8X8_MODES (TM_PRED + 1)
+#define VP8_I32X32_MODES (TM_PRED + 1)
#define VP8_MVREFS (1 + SPLITMV - NEARESTMV)
@@ -293,6 +294,11 @@ typedef struct {
INTERPOLATIONFILTERTYPE interp_filter;
#endif
+#if CONFIG_SUPERBLOCKS
+ // FIXME need a SB array of 4 MB_MODE_INFOs that
+ // only needs one encoded_as_sb.
+ unsigned char encoded_as_sb;
+#endif
} MB_MODE_INFO;
typedef struct {
diff --git a/vp8/common/entropymode.c b/vp8/common/entropymode.c
index 8d43ce827..5627aa43a 100644
--- a/vp8/common/entropymode.c
+++ b/vp8/common/entropymode.c
@@ -227,6 +227,14 @@ const vp8_tree_index vp8_mv_ref_tree[8] = {
-NEWMV, -SPLITMV
};
+#if CONFIG_SUPERBLOCKS
+const vp8_tree_index vp8_sb_mv_ref_tree[6] = {
+ -ZEROMV, 2,
+ -NEARESTMV, 4,
+ -NEARMV, -NEWMV
+};
+#endif
+
const vp8_tree_index vp8_sub_mv_ref_tree[6] = {
-LEFT4X4, 2,
-ABOVE4X4, 4,
@@ -236,12 +244,18 @@ const vp8_tree_index vp8_sub_mv_ref_tree[6] = {
struct vp8_token_struct vp8_bmode_encodings [VP8_BINTRAMODES];
struct vp8_token_struct vp8_ymode_encodings [VP8_YMODES];
+#if CONFIG_SUPERBLOCKS
+struct vp8_token_struct vp8_sb_kf_ymode_encodings [VP8_I32X32_MODES];
+#endif
struct vp8_token_struct vp8_kf_ymode_encodings [VP8_YMODES];
struct vp8_token_struct vp8_uv_mode_encodings [VP8_UV_MODES];
-struct vp8_token_struct vp8_i8x8_mode_encodings [VP8_UV_MODES];
+struct vp8_token_struct vp8_i8x8_mode_encodings [VP8_I8X8_MODES];
struct vp8_token_struct vp8_mbsplit_encodings [VP8_NUMMBSPLITS];
struct vp8_token_struct vp8_mv_ref_encoding_array [VP8_MVREFS];
+#if CONFIG_SUPERBLOCKS
+struct vp8_token_struct vp8_sb_mv_ref_encoding_array [VP8_MVREFS];
+#endif
struct vp8_token_struct vp8_sub_mv_ref_encoding_array [VP8_SUBMVREFS];
@@ -253,11 +267,18 @@ void vp8_init_mbmode_probs(VP8_COMMON *x) {
vp8_ymode_tree, x->fc.ymode_prob, bct, y_mode_cts, 256, 1);
{
int i;
- for (i = 0; i < 8; i++)
+ for (i = 0; i < 8; i++) {
vp8_tree_probs_from_distribution(
VP8_YMODES, vp8_kf_ymode_encodings, vp8_kf_ymode_tree,
x->kf_ymode_prob[i], bct, kf_y_mode_cts[i],
256, 1);
+#if CONFIG_SUPERBLOCKS
+ vp8_tree_probs_from_distribution(
+ VP8_I32X32_MODES, vp8_sb_kf_ymode_encodings, vp8_sb_ymode_tree,
+ x->sb_kf_ymode_prob[i], bct, kf_y_mode_cts[i],
+ 256, 1);
+#endif
+ }
}
{
int i;
@@ -360,6 +381,9 @@ void vp8_entropy_mode_init() {
vp8_tokens_from_tree(vp8_bmode_encodings, vp8_bmode_tree);
vp8_tokens_from_tree(vp8_ymode_encodings, vp8_ymode_tree);
vp8_tokens_from_tree(vp8_kf_ymode_encodings, vp8_kf_ymode_tree);
+#if CONFIG_SUPERBLOCKS
+ vp8_tokens_from_tree(vp8_sb_kf_ymode_encodings, vp8_sb_ymode_tree);
+#endif
vp8_tokens_from_tree(vp8_uv_mode_encodings, vp8_uv_mode_tree);
vp8_tokens_from_tree(vp8_i8x8_mode_encodings, vp8_i8x8_mode_tree);
vp8_tokens_from_tree(vp8_mbsplit_encodings, vp8_mbsplit_tree);
@@ -370,6 +394,10 @@ void vp8_entropy_mode_init() {
vp8_tokens_from_tree_offset(vp8_mv_ref_encoding_array,
vp8_mv_ref_tree, NEARESTMV);
+#if CONFIG_SUPERBLOCKS
+ vp8_tokens_from_tree_offset(vp8_sb_mv_ref_encoding_array,
+ vp8_sb_mv_ref_tree, NEARESTMV);
+#endif
vp8_tokens_from_tree_offset(vp8_sub_mv_ref_encoding_array,
vp8_sub_mv_ref_tree, LEFT4X4);
}
diff --git a/vp8/common/entropymode.h b/vp8/common/entropymode.h
index f9cc263b9..430c949a6 100644
--- a/vp8/common/entropymode.h
+++ b/vp8/common/entropymode.h
@@ -40,21 +40,25 @@ extern const vp8_tree_index vp8_bmode_tree[];
extern const vp8_tree_index vp8_ymode_tree[];
extern const vp8_tree_index vp8_kf_ymode_tree[];
extern const vp8_tree_index vp8_uv_mode_tree[];
+#define vp8_sb_ymode_tree vp8_uv_mode_tree
extern const vp8_tree_index vp8_i8x8_mode_tree[];
extern const vp8_tree_index vp8_mbsplit_tree[];
extern const vp8_tree_index vp8_mv_ref_tree[];
+extern const vp8_tree_index vp8_sb_mv_ref_tree[];
extern const vp8_tree_index vp8_sub_mv_ref_tree[];
extern struct vp8_token_struct vp8_bmode_encodings [VP8_BINTRAMODES];
extern struct vp8_token_struct vp8_ymode_encodings [VP8_YMODES];
+extern struct vp8_token_struct vp8_sb_kf_ymode_encodings [VP8_I32X32_MODES];
extern struct vp8_token_struct vp8_kf_ymode_encodings [VP8_YMODES];
-extern struct vp8_token_struct vp8_i8x8_mode_encodings [VP8_UV_MODES];
+extern struct vp8_token_struct vp8_i8x8_mode_encodings [VP8_I8X8_MODES];
extern struct vp8_token_struct vp8_uv_mode_encodings [VP8_UV_MODES];
extern struct vp8_token_struct vp8_mbsplit_encodings [VP8_NUMMBSPLITS];
/* Inter mode values do not start at zero */
extern struct vp8_token_struct vp8_mv_ref_encoding_array [VP8_MVREFS];
+extern struct vp8_token_struct vp8_sb_mv_ref_encoding_array [VP8_MVREFS];
extern struct vp8_token_struct vp8_sub_mv_ref_encoding_array [VP8_SUBMVREFS];
void vp8_entropy_mode_init(void);
diff --git a/vp8/common/generic/systemdependent.c b/vp8/common/generic/systemdependent.c
index b71ef750d..d28024cda 100644
--- a/vp8/common/generic/systemdependent.c
+++ b/vp8/common/generic/systemdependent.c
@@ -47,6 +47,12 @@ void vp8_machine_specific_config(VP8_COMMON *ctx) {
rtcd->recon.recon4 = vp8_recon4b_c;
rtcd->recon.recon_mb = vp8_recon_mb_c;
rtcd->recon.recon_mby = vp8_recon_mby_c;
+#if CONFIG_SUPERBLOCKS
+ rtcd->recon.build_intra_predictors_sby_s =
+ vp8_build_intra_predictors_sby_s;
+ rtcd->recon.build_intra_predictors_sbuv_s =
+ vp8_build_intra_predictors_sbuv_s;
+#endif
rtcd->recon.build_intra_predictors_mby =
vp8_build_intra_predictors_mby;
#if CONFIG_COMP_INTRA_PRED
diff --git a/vp8/common/loopfilter.c b/vp8/common/loopfilter.c
index 05c00ef4e..d9c4b54be 100644
--- a/vp8/common/loopfilter.c
+++ b/vp8/common/loopfilter.c
@@ -325,7 +325,13 @@ void vp8_loop_filter_frame
lfi.lim = lfi_n->lim[filter_level];
lfi.hev_thr = lfi_n->hev_thr[hev_index];
- if (mb_col > 0)
+ if (mb_col > 0
+#if CONFIG_SUPERBLOCKS
+ && !((mb_col & 1) && mode_info_context->mbmi.encoded_as_sb &&
+ mode_info_context[0].mbmi.mb_skip_coeff &&
+ mode_info_context[-1].mbmi.mb_skip_coeff)
+#endif
+ )
vp8_loop_filter_mbv_c
(y_ptr, u_ptr, v_ptr, post->y_stride, post->uv_stride, &lfi);
@@ -344,7 +350,13 @@ void vp8_loop_filter_frame
}
/* don't apply across umv border */
- if (mb_row > 0)
+ if (mb_row > 0
+#if CONFIG_SUPERBLOCKS
+ && !((mb_row & 1) && mode_info_context->mbmi.encoded_as_sb &&
+ mode_info_context[0].mbmi.mb_skip_coeff &&
+ mode_info_context[-cm->mode_info_stride].mbmi.mb_skip_coeff)
+#endif
+ )
vp8_loop_filter_mbh_c
(y_ptr, u_ptr, v_ptr, post->y_stride, post->uv_stride, &lfi);
@@ -362,7 +374,13 @@ void vp8_loop_filter_frame
}
} else {
// FIXME: Not 8x8 aware
- if (mb_col > 0)
+ if (mb_col > 0
+#if CONFIG_SUPERBLOCKS
+ && !((mb_col & 1) && mode_info_context->mbmi.encoded_as_sb &&
+ mode_info_context[0].mbmi.mb_skip_coeff &&
+ mode_info_context[-1].mbmi.mb_skip_coeff)
+#endif
+ )
LF_INVOKE(&cm->rtcd.loopfilter, simple_mb_v)
(y_ptr, post->y_stride, lfi_n->mblim[filter_level]);
@@ -371,7 +389,13 @@ void vp8_loop_filter_frame
(y_ptr, post->y_stride, lfi_n->blim[filter_level]);
/* don't apply across umv border */
- if (mb_row > 0)
+ if (mb_row > 0
+#if CONFIG_SUPERBLOCKS
+ && !((mb_row & 1) && mode_info_context->mbmi.encoded_as_sb &&
+ mode_info_context[0].mbmi.mb_skip_coeff &&
+ mode_info_context[-cm->mode_info_stride].mbmi.mb_skip_coeff)
+#endif
+ )
LF_INVOKE(&cm->rtcd.loopfilter, simple_mb_h)
(y_ptr, post->y_stride, lfi_n->mblim[filter_level]);
diff --git a/vp8/common/onyxc_int.h b/vp8/common/onyxc_int.h
index a36347dca..b7a543220 100644
--- a/vp8/common/onyxc_int.h
+++ b/vp8/common/onyxc_int.h
@@ -226,12 +226,15 @@ typedef struct VP8Common {
/* Y,U,V,Y2 */
ENTROPY_CONTEXT_PLANES *above_context; /* row of context for each plane */
- ENTROPY_CONTEXT_PLANES left_context; /* (up to) 4 contexts "" */
+ ENTROPY_CONTEXT_PLANES left_context[2]; /* (up to) 4 contexts "" */
/* keyframe block modes are predicted by their above, left neighbors */
vp8_prob kf_bmode_prob [VP8_BINTRAMODES] [VP8_BINTRAMODES] [VP8_BINTRAMODES - 1];
vp8_prob kf_ymode_prob[8][VP8_YMODES - 1]; /* keyframe "" */
+#if CONFIG_SUPERBLOCKS
+ vp8_prob sb_kf_ymode_prob[8][VP8_I32X32_MODES - 1];
+#endif
int kf_ymode_probs_index;
int kf_ymode_probs_update;
vp8_prob kf_uv_mode_prob[VP8_YMODES] [VP8_UV_MODES - 1];
@@ -239,6 +242,9 @@ typedef struct VP8Common {
vp8_prob prob_intra_coded;
vp8_prob prob_last_coded;
vp8_prob prob_gf_coded;
+#if CONFIG_SUPERBLOCKS
+ vp8_prob sb_coded;
+#endif
// Context probabilities when using predictive coding of segment id
vp8_prob segment_pred_probs[PREDICTION_PROBS];
diff --git a/vp8/common/pred_common.c b/vp8/common/pred_common.c
index ac5d86009..cb80a0f7e 100644
--- a/vp8/common/pred_common.c
+++ b/vp8/common/pred_common.c
@@ -1,3 +1,4 @@
+
/*
* Copyright (c) 2012 The WebM project authors. All Rights Reserved.
*
@@ -224,10 +225,24 @@ void set_pred_flag(MACROBLOCKD *const xd,
switch (pred_id) {
case PRED_SEG_ID:
xd->mode_info_context->mbmi.seg_id_predicted = pred_flag;
+#if CONFIG_SUPERBLOCKS
+ if (xd->mode_info_context->mbmi.encoded_as_sb) {
+ xd->mode_info_context[1].mbmi.seg_id_predicted = pred_flag;
+ xd->mode_info_context[xd->mode_info_stride].mbmi.seg_id_predicted = pred_flag;
+ xd->mode_info_context[xd->mode_info_stride+1].mbmi.seg_id_predicted = pred_flag;
+ }
+#endif
break;
case PRED_REF:
xd->mode_info_context->mbmi.ref_predicted = pred_flag;
+#if CONFIG_SUPERBLOCKS
+ if (xd->mode_info_context->mbmi.encoded_as_sb) {
+ xd->mode_info_context[1].mbmi.ref_predicted = pred_flag;
+ xd->mode_info_context[xd->mode_info_stride].mbmi.ref_predicted = pred_flag;
+ xd->mode_info_context[xd->mode_info_stride+1].mbmi.ref_predicted = pred_flag;
+ }
+#endif
break;
case PRED_MBSKIP:
diff --git a/vp8/common/recon.c b/vp8/common/recon.c
index 8fc320863..cf2d2fb85 100644
--- a/vp8/common/recon.c
+++ b/vp8/common/recon.c
@@ -124,6 +124,52 @@ void vp8_recon2b_c
}
}
+#if CONFIG_SUPERBLOCKS
+void vp8_recon_mby_s_c(const vp8_recon_rtcd_vtable_t *rtcd, MACROBLOCKD *xd, uint8_t *dst) {
+ int x, y;
+ BLOCKD *b = &xd->block[0];
+ int stride = b->dst_stride;
+ short *diff = b->diff;
+
+ for (y = 0; y < 16; y++) {
+ for (x = 0; x < 16; x++) {
+ int a = dst[x] + diff[x];
+ if (a < 0)
+ a = 0;
+ else if (a > 255)
+ a = 255;
+ dst[x] = a;
+ }
+ dst += stride;
+ diff += 16;
+ }
+}
+
+void vp8_recon_mbuv_s_c(const vp8_recon_rtcd_vtable_t *rtcd, MACROBLOCKD *xd, uint8_t *udst, uint8_t *vdst) {
+ int x, y, i;
+ uint8_t *dst = udst;
+
+ for (i = 0; i < 2; i++, dst = vdst) {
+ BLOCKD *b = &xd->block[16 + 4 * i];
+ int stride = b->dst_stride;
+ short *diff = b->diff;
+
+ for (y = 0; y < 8; y++) {
+ for (x = 0; x < 8; x++) {
+ int a = dst[x] + diff[x];
+ if (a < 0)
+ a = 0;
+ else if (a > 255)
+ a = 255;
+ dst[x] = a;
+ }
+ dst += stride;
+ diff += 8;
+ }
+ }
+}
+#endif
+
void vp8_recon_mby_c(const vp8_recon_rtcd_vtable_t *rtcd, MACROBLOCKD *xd) {
#if ARCH_ARM
BLOCKD *b = &xd->block[0];
diff --git a/vp8/common/recon.h b/vp8/common/recon.h
index 2626a218d..3527fc14d 100644
--- a/vp8/common/recon.h
+++ b/vp8/common/recon.h
@@ -100,6 +100,11 @@ extern prototype_recon_macroblock(vp8_recon_recon_mb);
#endif
extern prototype_recon_macroblock(vp8_recon_recon_mby);
+#ifndef vp8_recon_build_intra_predictors_sby_s
+#define vp8_recon_build_intra_predictors_sby_s vp8_build_intra_predictors_sby_s
+#endif
+extern prototype_build_intra_predictors(vp8_recon_build_intra_predictors_sby_s);
+
#ifndef vp8_recon_build_intra_predictors_mby
#define vp8_recon_build_intra_predictors_mby vp8_build_intra_predictors_mby
#endif
@@ -126,6 +131,11 @@ extern prototype_build_intra_predictors\
extern prototype_build_intra_predictors\
(vp8_recon_build_intra_predictors_mby_s);
+#ifndef vp8_recon_build_intra_predictors_sbuv_s
+#define vp8_recon_build_intra_predictors_sbuv_s vp8_build_intra_predictors_sbuv_s
+#endif
+extern prototype_build_intra_predictors(vp8_recon_build_intra_predictors_sbuv_s);
+
#ifndef vp8_recon_build_intra_predictors_mbuv
#define vp8_recon_build_intra_predictors_mbuv vp8_build_intra_predictors_mbuv
#endif
@@ -214,11 +224,17 @@ typedef struct vp8_recon_rtcd_vtable {
vp8_recon_fn_t recon4;
vp8_recon_mb_fn_t recon_mb;
vp8_recon_mb_fn_t recon_mby;
+#if CONFIG_SUPERBLOCKS
+ vp8_build_intra_pred_fn_t build_intra_predictors_sby_s;
+#endif
vp8_build_intra_pred_fn_t build_intra_predictors_mby_s;
vp8_build_intra_pred_fn_t build_intra_predictors_mby;
#if CONFIG_COMP_INTRA_PRED
vp8_build_intra_pred_fn_t build_comp_intra_predictors_mby;
#endif
+#if CONFIG_SUPERBLOCKS
+ vp8_build_intra_pred_fn_t build_intra_predictors_sbuv_s;
+#endif
vp8_build_intra_pred_fn_t build_intra_predictors_mbuv_s;
vp8_build_intra_pred_fn_t build_intra_predictors_mbuv;
#if CONFIG_COMP_INTRA_PRED
diff --git a/vp8/common/reconinter.c b/vp8/common/reconinter.c
index d73d8fd1c..0212c92c7 100644
--- a/vp8/common/reconinter.c
+++ b/vp8/common/reconinter.c
@@ -761,6 +761,56 @@ void vp8_build_1st_inter16x16_predictors_mb(MACROBLOCKD *xd,
vp8_build_1st_inter16x16_predictors_mbuv(xd, dst_u, dst_v, dst_uvstride);
}
+#if CONFIG_SUPERBLOCKS
+void vp8_build_inter32x32_predictors_sb(MACROBLOCKD *x,
+ unsigned char *dst_y,
+ unsigned char *dst_u,
+ unsigned char *dst_v,
+ int dst_ystride,
+ int dst_uvstride) {
+ uint8_t *y1 = x->pre.y_buffer, *u1 = x->pre.u_buffer, *v1 = x->pre.v_buffer;
+ uint8_t *y2 = x->second_pre.y_buffer, *u2 = x->second_pre.u_buffer,
+ *v2 = x->second_pre.v_buffer;
+ int n;
+
+ for (n = 0; n < 4; n++)
+ {
+ const int x_idx = n & 1, y_idx = n >> 1;
+
+ x->pre.y_buffer = y1 + y_idx * 16 * x->pre.y_stride + x_idx * 16;
+ x->pre.u_buffer = u1 + y_idx * 8 * x->pre.uv_stride + x_idx * 8;
+ x->pre.v_buffer = v1 + y_idx * 8 * x->pre.uv_stride + x_idx * 8;
+
+ vp8_build_1st_inter16x16_predictors_mb(x,
+ dst_y + y_idx * 16 * dst_ystride + x_idx * 16,
+ dst_u + y_idx * 8 * dst_uvstride + x_idx * 8,
+ dst_v + y_idx * 8 * dst_uvstride + x_idx * 8,
+ dst_ystride, dst_uvstride);
+ if (x->mode_info_context->mbmi.second_ref_frame) {
+ x->second_pre.y_buffer = y2 + y_idx * 16 * x->pre.y_stride + x_idx * 16;
+ x->second_pre.u_buffer = u2 + y_idx * 8 * x->pre.uv_stride + x_idx * 8;
+ x->second_pre.v_buffer = v2 + y_idx * 8 * x->pre.uv_stride + x_idx * 8;
+
+ vp8_build_2nd_inter16x16_predictors_mb(x,
+ dst_y + y_idx * 16 * dst_ystride + x_idx * 16,
+ dst_u + y_idx * 8 * dst_uvstride + x_idx * 8,
+ dst_v + y_idx * 8 * dst_uvstride + x_idx * 8,
+ dst_ystride, dst_uvstride);
+ }
+ }
+
+ x->pre.y_buffer = y1;
+ x->pre.u_buffer = u1;
+ x->pre.v_buffer = v1;
+
+ if (x->mode_info_context->mbmi.second_ref_frame) {
+ x->second_pre.y_buffer = y2;
+ x->second_pre.u_buffer = u2;
+ x->second_pre.v_buffer = v2;
+ }
+}
+#endif
+
/*
* The following functions should be called after an initial
* call to vp8_build_inter16x16_predictors_mb() or _mby()/_mbuv().
diff --git a/vp8/common/reconintra.c b/vp8/common/reconintra.c
index 96bebc5be..d858cd153 100644
--- a/vp8/common/reconintra.c
+++ b/vp8/common/reconintra.c
@@ -207,17 +207,18 @@ void vp8_recon_intra_mbuv(const vp8_recon_rtcd_vtable_t *rtcd,
}
}
-void vp8_build_intra_predictors_mby_internal(MACROBLOCKD *xd,
- unsigned char *ypred_ptr,
- int y_stride, int mode) {
+void vp8_build_intra_predictors_internal(MACROBLOCKD *xd,
+ unsigned char *src, int src_stride,
+ unsigned char *ypred_ptr,
+ int y_stride, int mode, int bsize) {
- unsigned char *yabove_row = xd->dst.y_buffer - xd->dst.y_stride;
- unsigned char yleft_col[16];
+ unsigned char *yabove_row = src - src_stride;
+ unsigned char yleft_col[32];
unsigned char ytop_left = yabove_row[-1];
int r, c, i;
- for (i = 0; i < 16; i++) {
- yleft_col[i] = xd->dst.y_buffer [i * xd->dst.y_stride - 1];
+ for (i = 0; i < bsize; i++) {
+ yleft_col[i] = xd->dst.y_buffer [i * src_stride - 1];
}
/* for Y */
@@ -227,58 +228,58 @@ void vp8_build_intra_predictors_mby_internal(MACROBLOCKD *xd,
int i;
int shift;
int average = 0;
-
+ int log2_bsize_minus_1;
+
+ assert(bsize == 8 || bsize == 16 || bsize == 32);
+ if (bsize == 8) {
+ log2_bsize_minus_1 = 2;
+ } else if (bsize == 16) {
+ log2_bsize_minus_1 = 3;
+ } else /* bsize == 32 */ {
+ log2_bsize_minus_1 = 4;
+ }
if (xd->up_available || xd->left_available) {
if (xd->up_available) {
- for (i = 0; i < 16; i++) {
+ for (i = 0; i < bsize; i++) {
average += yabove_row[i];
}
}
if (xd->left_available) {
- for (i = 0; i < 16; i++) {
+ for (i = 0; i < bsize; i++) {
average += yleft_col[i];
}
}
- shift = 3 + xd->up_available + xd->left_available;
+ shift = log2_bsize_minus_1 + xd->up_available + xd->left_available;
expected_dc = (average + (1 << (shift - 1))) >> shift;
} else {
expected_dc = 128;
}
- for (r = 0; r < 16; r++) {
- vpx_memset(ypred_ptr, expected_dc, 16);
- ypred_ptr += y_stride; /*16;*/
+ for (r = 0; r < bsize; r++) {
+ vpx_memset(ypred_ptr, expected_dc, bsize);
+ ypred_ptr += y_stride;
}
}
break;
case V_PRED: {
-
- for (r = 0; r < 16; r++) {
-
- ((int *)ypred_ptr)[0] = ((int *)yabove_row)[0];
- ((int *)ypred_ptr)[1] = ((int *)yabove_row)[1];
- ((int *)ypred_ptr)[2] = ((int *)yabove_row)[2];
- ((int *)ypred_ptr)[3] = ((int *)yabove_row)[3];
+ for (r = 0; r < bsize; r++) {
+ memcpy(ypred_ptr, yabove_row, bsize);
ypred_ptr += y_stride;
}
}
break;
case H_PRED: {
-
- for (r = 0; r < 16; r++) {
-
- vpx_memset(ypred_ptr, yleft_col[r], 16);
+ for (r = 0; r < bsize; r++) {
+ vpx_memset(ypred_ptr, yleft_col[r], bsize);
ypred_ptr += y_stride;
}
-
}
break;
case TM_PRED: {
-
- for (r = 0; r < 16; r++) {
- for (c = 0; c < 16; c++) {
+ for (r = 0; r < bsize; r++) {
+ for (c = 0; c < bsize; c++) {
int pred = yleft_col[r] + yabove_row[ c] - ytop_left;
if (pred < 0)
@@ -292,31 +293,30 @@ void vp8_build_intra_predictors_mby_internal(MACROBLOCKD *xd,
ypred_ptr += y_stride;
}
-
}
break;
case D45_PRED: {
- d45_predictor(ypred_ptr, y_stride, 16, yabove_row, yleft_col);
+ d45_predictor(ypred_ptr, y_stride, bsize, yabove_row, yleft_col);
}
break;
case D135_PRED: {
- d135_predictor(ypred_ptr, y_stride, 16, yabove_row, yleft_col);
+ d135_predictor(ypred_ptr, y_stride, bsize, yabove_row, yleft_col);
}
break;
case D117_PRED: {
- d117_predictor(ypred_ptr, y_stride, 16, yabove_row, yleft_col);
+ d117_predictor(ypred_ptr, y_stride, bsize, yabove_row, yleft_col);
}
break;
case D153_PRED: {
- d153_predictor(ypred_ptr, y_stride, 16, yabove_row, yleft_col);
+ d153_predictor(ypred_ptr, y_stride, bsize, yabove_row, yleft_col);
}
break;
case D27_PRED: {
- d27_predictor(ypred_ptr, y_stride, 16, yabove_row, yleft_col);
+ d27_predictor(ypred_ptr, y_stride, bsize, yabove_row, yleft_col);
}
break;
case D63_PRED: {
- d63_predictor(ypred_ptr, y_stride, 16, yabove_row, yleft_col);
+ d63_predictor(ypred_ptr, y_stride, bsize, yabove_row, yleft_col);
}
break;
case I8X8_PRED:
@@ -332,25 +332,36 @@ void vp8_build_intra_predictors_mby_internal(MACROBLOCKD *xd,
}
void vp8_build_intra_predictors_mby(MACROBLOCKD *xd) {
- vp8_build_intra_predictors_mby_internal(xd, xd->predictor, 16,
- xd->mode_info_context->mbmi.mode);
+ vp8_build_intra_predictors_internal(xd, xd->dst.y_buffer, xd->dst.y_stride,
+ xd->predictor, 16,
+ xd->mode_info_context->mbmi.mode, 16);
}
void vp8_build_intra_predictors_mby_s(MACROBLOCKD *xd) {
- vp8_build_intra_predictors_mby_internal(xd, xd->dst.y_buffer,
- xd->dst.y_stride,
- xd->mode_info_context->mbmi.mode);
+ vp8_build_intra_predictors_internal(xd, xd->dst.y_buffer, xd->dst.y_stride,
+ xd->dst.y_buffer, xd->dst.y_stride,
+ xd->mode_info_context->mbmi.mode, 16);
}
+#if CONFIG_SUPERBLOCKS
+void vp8_build_intra_predictors_sby_s(MACROBLOCKD *x) {
+ vp8_build_intra_predictors_internal(x, x->dst.y_buffer, x->dst.y_stride,
+ x->dst.y_buffer, x->dst.y_stride,
+ x->mode_info_context->mbmi.mode, 32);
+}
+#endif
+
#if CONFIG_COMP_INTRA_PRED
void vp8_build_comp_intra_predictors_mby(MACROBLOCKD *xd) {
unsigned char predictor[2][256];
int i;
- vp8_build_intra_predictors_mby_internal(
- xd, predictor[0], 16, xd->mode_info_context->mbmi.mode);
- vp8_build_intra_predictors_mby_internal(
- xd, predictor[1], 16, xd->mode_info_context->mbmi.second_mode);
+ vp8_build_intra_predictors_internal(xd, xd->dst.y_buffer, xd->dst.y_stride,
+ predictor[0], 16,
+ xd->mode_info_context->mbmi.mode);
+ vp8_build_intra_predictors_internal(xd, xd->dst.y_buffer, xd->dst.y_stride,
+ predictor[1], 16,
+ xd->mode_info_context->mbmi.second_mode);
for (i = 0; i < 256; i++) {
xd->predictor[i] = (predictor[0][i] + predictor[1][i] + 1) >> 1;
@@ -362,172 +373,37 @@ void vp8_build_intra_predictors_mbuv_internal(MACROBLOCKD *xd,
unsigned char *upred_ptr,
unsigned char *vpred_ptr,
int uv_stride,
- int mode) {
- YV12_BUFFER_CONFIG * dst = &xd->dst;
- unsigned char *uabove_row = dst->u_buffer - dst->uv_stride;
- unsigned char uleft_col[16];
- unsigned char utop_left = uabove_row[-1];
- unsigned char *vabove_row = dst->v_buffer - dst->uv_stride;
- unsigned char vleft_col[20];
- unsigned char vtop_left = vabove_row[-1];
-
- int i, j;
-
- for (i = 0; i < 8; i++) {
- uleft_col[i] = dst->u_buffer [i * dst->uv_stride - 1];
- vleft_col[i] = dst->v_buffer [i * dst->uv_stride - 1];
- }
-
- switch (mode) {
- case DC_PRED: {
- int expected_udc;
- int expected_vdc;
- int i;
- int shift;
- int Uaverage = 0;
- int Vaverage = 0;
-
- if (xd->up_available) {
- for (i = 0; i < 8; i++) {
- Uaverage += uabove_row[i];
- Vaverage += vabove_row[i];
- }
- }
-
- if (xd->left_available) {
- for (i = 0; i < 8; i++) {
- Uaverage += uleft_col[i];
- Vaverage += vleft_col[i];
- }
- }
-
- if (!xd->up_available && !xd->left_available) {
- expected_udc = 128;
- expected_vdc = 128;
- } else {
- shift = 2 + xd->up_available + xd->left_available;
- expected_udc = (Uaverage + (1 << (shift - 1))) >> shift;
- expected_vdc = (Vaverage + (1 << (shift - 1))) >> shift;
- }
-
-
- /*vpx_memset(upred_ptr,expected_udc,64);*/
- /*vpx_memset(vpred_ptr,expected_vdc,64);*/
- for (i = 0; i < 8; i++) {
- vpx_memset(upred_ptr, expected_udc, 8);
- vpx_memset(vpred_ptr, expected_vdc, 8);
- upred_ptr += uv_stride; /*8;*/
- vpred_ptr += uv_stride; /*8;*/
- }
- }
- break;
- case V_PRED: {
- int i;
-
- for (i = 0; i < 8; i++) {
- vpx_memcpy(upred_ptr, uabove_row, 8);
- vpx_memcpy(vpred_ptr, vabove_row, 8);
- upred_ptr += uv_stride; /*8;*/
- vpred_ptr += uv_stride; /*8;*/
- }
-
- }
- break;
- case H_PRED: {
- int i;
-
- for (i = 0; i < 8; i++) {
- vpx_memset(upred_ptr, uleft_col[i], 8);
- vpx_memset(vpred_ptr, vleft_col[i], 8);
- upred_ptr += uv_stride; /*8;*/
- vpred_ptr += uv_stride; /*8;*/
- }
- }
-
- break;
- case TM_PRED: {
- int i;
-
- for (i = 0; i < 8; i++) {
- for (j = 0; j < 8; j++) {
- int predu = uleft_col[i] + uabove_row[j] - utop_left;
- int predv = vleft_col[i] + vabove_row[j] - vtop_left;
-
- if (predu < 0)
- predu = 0;
-
- if (predu > 255)
- predu = 255;
-
- if (predv < 0)
- predv = 0;
-
- if (predv > 255)
- predv = 255;
-
- upred_ptr[j] = predu;
- vpred_ptr[j] = predv;
- }
-
- upred_ptr += uv_stride; /*8;*/
- vpred_ptr += uv_stride; /*8;*/
- }
-
- }
- break;
- case D45_PRED: {
- d45_predictor(upred_ptr, uv_stride, 8, uabove_row, uleft_col);
- d45_predictor(vpred_ptr, uv_stride, 8, vabove_row, vleft_col);
- }
- break;
- case D135_PRED: {
- d135_predictor(upred_ptr, uv_stride, 8, uabove_row, uleft_col);
- d135_predictor(vpred_ptr, uv_stride, 8, vabove_row, vleft_col);
- }
- break;
- case D117_PRED: {
- d117_predictor(upred_ptr, uv_stride, 8, uabove_row, uleft_col);
- d117_predictor(vpred_ptr, uv_stride, 8, vabove_row, vleft_col);
- }
- break;
- case D153_PRED: {
- d153_predictor(upred_ptr, uv_stride, 8, uabove_row, uleft_col);
- d153_predictor(vpred_ptr, uv_stride, 8, vabove_row, vleft_col);
- }
- break;
- case D27_PRED: {
- d27_predictor(upred_ptr, uv_stride, 8, uabove_row, uleft_col);
- d27_predictor(vpred_ptr, uv_stride, 8, vabove_row, vleft_col);
- }
- break;
- case D63_PRED: {
- d63_predictor(upred_ptr, uv_stride, 8, uabove_row, uleft_col);
- d63_predictor(vpred_ptr, uv_stride, 8, vabove_row, vleft_col);
- }
- break;
- case B_PRED:
- case NEARESTMV:
- case NEARMV:
- case ZEROMV:
- case NEWMV:
- case SPLITMV:
- case MB_MODE_COUNT:
- break;
- }
+ int mode, int bsize) {
+ vp8_build_intra_predictors_internal(xd, xd->dst.u_buffer, xd->dst.uv_stride,
+ upred_ptr, uv_stride, mode, bsize);
+ vp8_build_intra_predictors_internal(xd, xd->dst.v_buffer, xd->dst.uv_stride,
+ vpred_ptr, uv_stride, mode, bsize);
}
void vp8_build_intra_predictors_mbuv(MACROBLOCKD *xd) {
- vp8_build_intra_predictors_mbuv_internal(
- xd, &xd->predictor[256], &xd->predictor[320],
- 8, xd->mode_info_context->mbmi.uv_mode);
+ vp8_build_intra_predictors_mbuv_internal(xd, &xd->predictor[256],
+ &xd->predictor[320], 8,
+ xd->mode_info_context->mbmi.uv_mode,
+ 8);
}
void vp8_build_intra_predictors_mbuv_s(MACROBLOCKD *xd) {
- vp8_build_intra_predictors_mbuv_internal(
- xd, xd->dst.u_buffer, xd->dst.v_buffer,
- xd->dst.uv_stride, xd->mode_info_context->mbmi.uv_mode);
+ vp8_build_intra_predictors_mbuv_internal(xd, xd->dst.u_buffer,
+ xd->dst.v_buffer,
+ xd->dst.uv_stride,
+ xd->mode_info_context->mbmi.uv_mode,
+ 8);
}
+#if CONFIG_SUPERBLOCKS
+void vp8_build_intra_predictors_sbuv_s(MACROBLOCKD *xd) {
+ vp8_build_intra_predictors_mbuv_internal(xd, xd->dst.u_buffer,
+ xd->dst.v_buffer, xd->dst.uv_stride,
+ xd->mode_info_context->mbmi.uv_mode,
+ 16);
+}
+#endif
+
#if CONFIG_COMP_INTRA_PRED
void vp8_build_comp_intra_predictors_mbuv(MACROBLOCKD *xd) {
unsigned char predictor[2][2][64];
@@ -541,7 +417,8 @@ void vp8_build_comp_intra_predictors_mbuv(MACROBLOCKD *xd) {
xd->mode_info_context->mbmi.second_uv_mode);
for (i = 0; i < 64; i++) {
xd->predictor[256 + i] = (predictor[0][0][i] + predictor[0][1][i] + 1) >> 1;
- xd->predictor[256 + 64 + i] = (predictor[1][0][i] + predictor[1][1][i] + 1) >> 1;
+ xd->predictor[256 + 64 + i] = (predictor[1][0][i] +
+ predictor[1][1][i] + 1) >> 1;
}
}
#endif