summaryrefslogtreecommitdiff
path: root/vp8
diff options
context:
space:
mode:
Diffstat (limited to 'vp8')
-rw-r--r--vp8/common/blockd.h4
-rw-r--r--vp8/common/loopfilter.h10
-rw-r--r--vp8/common/mvref_common.c15
-rw-r--r--vp8/common/mvref_common.h4
-rw-r--r--vp8/common/pred_common.c50
-rw-r--r--vp8/common/pred_common.h35
-rw-r--r--vp8/common/reconintra4x4.c3
-rw-r--r--vp8/common/reconintra4x4.h2
-rw-r--r--vp8/common/rtcd_defs.sh95
-rw-r--r--vp8/decoder/decodemv.c41
-rw-r--r--vp8/decoder/decodframe.c3
-rw-r--r--vp8/encoder/arm/dct_arm.c2
-rw-r--r--vp8/encoder/bitstream.c22
-rw-r--r--vp8/encoder/block.h4
-rw-r--r--vp8/encoder/dct.h97
-rw-r--r--vp8/encoder/encodeframe.c22
-rw-r--r--vp8/encoder/encodeintra.c18
-rw-r--r--vp8/encoder/encodemb.c11
-rw-r--r--vp8/encoder/encodemb.h67
-rw-r--r--vp8/encoder/generic/csystemdependent.c21
-rw-r--r--vp8/encoder/onyx_if.c35
-rw-r--r--vp8/encoder/onyx_int.h3
-rw-r--r--vp8/encoder/rdopt.c243
-rw-r--r--vp8/encoder/satd_c.c3
-rw-r--r--vp8/encoder/segmentation.c6
-rw-r--r--vp8/encoder/tokenize.c2
-rw-r--r--vp8/encoder/x86/dct_x86.h73
-rw-r--r--vp8/encoder/x86/encodemb_x86.h85
-rw-r--r--vp8/encoder/x86/x86_csystemdependent.c17
-rw-r--r--vp8/vp8cx.mk3
30 files changed, 327 insertions, 669 deletions
diff --git a/vp8/common/blockd.h b/vp8/common/blockd.h
index 5d919f1a9..3a3efabed 100644
--- a/vp8/common/blockd.h
+++ b/vp8/common/blockd.h
@@ -253,7 +253,7 @@ typedef struct {
union b_mode_info bmi[16];
} MODE_INFO;
-typedef struct {
+typedef struct blockd {
short *qcoeff;
short *dqcoeff;
unsigned char *predictor;
@@ -275,7 +275,7 @@ typedef struct {
union b_mode_info bmi;
} BLOCKD;
-typedef struct MacroBlockD {
+typedef struct macroblockd {
DECLARE_ALIGNED(16, short, diff[400]); /* from idct diff */
DECLARE_ALIGNED(16, unsigned char, predictor[384]);
DECLARE_ALIGNED(16, short, qcoeff[400]);
diff --git a/vp8/common/loopfilter.h b/vp8/common/loopfilter.h
index df78b8c12..c9f4fb5a4 100644
--- a/vp8/common/loopfilter.h
+++ b/vp8/common/loopfilter.h
@@ -81,22 +81,22 @@ typedef void loop_filter_uvfunction
/* assorted loopfilter functions which get used elsewhere */
struct VP8Common;
-struct MacroBlockD;
+struct macroblockd;
void vp8_loop_filter_init(struct VP8Common *cm);
void vp8_loop_filter_frame_init(struct VP8Common *cm,
- struct MacroBlockD *mbd,
+ struct macroblockd *mbd,
int default_filt_lvl);
-void vp8_loop_filter_frame(struct VP8Common *cm, struct MacroBlockD *mbd);
+void vp8_loop_filter_frame(struct VP8Common *cm, struct macroblockd *mbd);
void vp8_loop_filter_partial_frame(struct VP8Common *cm,
- struct MacroBlockD *mbd,
+ struct macroblockd *mbd,
int default_filt_lvl);
void vp8_loop_filter_frame_yonly(struct VP8Common *cm,
- struct MacroBlockD *mbd,
+ struct macroblockd *mbd,
int default_filt_lvl);
void vp8_loop_filter_update_sharpness(loop_filter_info_n *lfi,
diff --git a/vp8/common/mvref_common.c b/vp8/common/mvref_common.c
index b6040cd59..7fd48b938 100644
--- a/vp8/common/mvref_common.c
+++ b/vp8/common/mvref_common.c
@@ -33,17 +33,10 @@ static void clamp_mv(const MACROBLOCKD *xd, int_mv *mv) {
mv->as_mv.row = xd->mb_to_bottom_edge + MV_BORDER;
}
-// Code for selecting / building and entropy coding a motion vector reference
-// Returns a seperation value for two vectors.
-// This is taken as the sum of the abs x and y difference.
-unsigned int mv_distance(int_mv *mv1, int_mv *mv2) {
- return (abs(mv1->as_mv.row - mv2->as_mv.row) +
- abs(mv1->as_mv.col - mv2->as_mv.col));
-}
// Gets a best matching candidate refenence motion vector
// from the given mode info structure (if available)
-int get_candidate_mvref(
+static int get_candidate_mvref(
const MODE_INFO *candidate_mi,
MV_REFERENCE_FRAME ref_frame,
MV_REFERENCE_FRAME *c_ref_frame,
@@ -111,7 +104,7 @@ int get_candidate_mvref(
// Performs mv adjustment based on reference frame and clamps the MV
// if it goes off the edge of the buffer.
-void scale_mv(
+static void scale_mv(
MACROBLOCKD *xd,
MV_REFERENCE_FRAME this_ref_frame,
MV_REFERENCE_FRAME candidate_ref_frame,
@@ -162,7 +155,7 @@ void scale_mv(
// Adds a new candidate reference vector to the list if indeed it is new.
// If it is not new then the score of the existing candidate that it matches
// is increased and the list is resorted.
-void addmv_and_shuffle(
+static void addmv_and_shuffle(
int_mv *mv_list,
int *mv_scores,
int *index,
@@ -212,7 +205,7 @@ void addmv_and_shuffle(
// This function searches the neighbourhood of a given MB/SB and populates a
// list of candidate reference vectors.
//
-void find_mv_refs(
+void vp9_find_mv_refs(
MACROBLOCKD *xd,
MODE_INFO *here,
MODE_INFO *lf_here,
diff --git a/vp8/common/mvref_common.h b/vp8/common/mvref_common.h
index 3f19ddbdb..7f396e4d7 100644
--- a/vp8/common/mvref_common.h
+++ b/vp8/common/mvref_common.h
@@ -17,9 +17,7 @@
#ifndef __INC_MVREF_COMMON_H
#define __INC_MVREF_COMMON_H
-unsigned int mv_distance(int_mv *mv1, int_mv *mv2);
-
-void find_mv_refs(
+void vp9_find_mv_refs(
MACROBLOCKD *xd,
MODE_INFO *here,
MODE_INFO *lf_here,
diff --git a/vp8/common/pred_common.c b/vp8/common/pred_common.c
index a97eed8e4..6fe12d46d 100644
--- a/vp8/common/pred_common.c
+++ b/vp8/common/pred_common.c
@@ -15,9 +15,9 @@
// TBD prediction functions for various bitstream signals
// Returns a context number for the given MB prediction signal
-unsigned char get_pred_context(const VP8_COMMON *const cm,
- const MACROBLOCKD *const xd,
- PRED_ID pred_id) {
+unsigned char vp9_get_pred_context(const VP8_COMMON *const cm,
+ const MACROBLOCKD *const xd,
+ PRED_ID pred_id) {
int pred_context;
MODE_INFO *m = xd->mode_info_context;
@@ -104,14 +104,14 @@ unsigned char get_pred_context(const VP8_COMMON *const cm,
// This function returns a context probability for coding a given
// prediction signal
-vp8_prob get_pred_prob(const VP8_COMMON *const cm,
- const MACROBLOCKD *const xd,
- PRED_ID pred_id) {
+vp8_prob vp9_get_pred_prob(const VP8_COMMON *const cm,
+ const MACROBLOCKD *const xd,
+ PRED_ID pred_id) {
vp8_prob pred_probability;
int pred_context;
// Get the appropriate prediction context
- pred_context = get_pred_context(cm, xd, pred_id);
+ pred_context = vp9_get_pred_context(cm, xd, pred_id);
switch (pred_id) {
case PRED_SEG_ID:
@@ -144,14 +144,14 @@ vp8_prob get_pred_prob(const VP8_COMMON *const cm,
// This function returns a context probability ptr for coding a given
// prediction signal
-const vp8_prob *get_pred_probs(const VP8_COMMON *const cm,
- const MACROBLOCKD *const xd,
- PRED_ID pred_id) {
+const vp8_prob *vp9_get_pred_probs(const VP8_COMMON *const cm,
+ const MACROBLOCKD *const xd,
+ PRED_ID pred_id) {
const vp8_prob *pred_probability;
int pred_context;
// Get the appropriate prediction context
- pred_context = get_pred_context(cm, xd, pred_id);
+ pred_context = vp9_get_pred_context(cm, xd, pred_id);
switch (pred_id) {
case PRED_SEG_ID:
@@ -188,8 +188,8 @@ const vp8_prob *get_pred_probs(const VP8_COMMON *const cm,
// This function returns the status of the given prediction signal.
// I.e. is the predicted value for the given signal correct.
-unsigned char get_pred_flag(const MACROBLOCKD *const xd,
- PRED_ID pred_id) {
+unsigned char vp9_get_pred_flag(const MACROBLOCKD *const xd,
+ PRED_ID pred_id) {
unsigned char pred_flag = 0;
switch (pred_id) {
@@ -216,9 +216,9 @@ unsigned char get_pred_flag(const MACROBLOCKD *const xd,
// This function sets the status of the given prediction signal.
// I.e. is the predicted value for the given signal correct.
-void set_pred_flag(MACROBLOCKD *const xd,
- PRED_ID pred_id,
- unsigned char pred_flag) {
+void vp9_set_pred_flag(MACROBLOCKD *const xd,
+ PRED_ID pred_id,
+ unsigned char pred_flag) {
switch (pred_id) {
case PRED_SEG_ID:
xd->mode_info_context->mbmi.seg_id_predicted = pred_flag;
@@ -257,14 +257,14 @@ void set_pred_flag(MACROBLOCKD *const xd,
// peredict various bitstream signals.
// Macroblock segment id prediction function
-unsigned char get_pred_mb_segid(const VP8_COMMON *const cm, int MbIndex) {
+unsigned char vp9_get_pred_mb_segid(const VP8_COMMON *const cm, int MbIndex) {
// Currently the prediction for the macroblock segment ID is
// the value stored for this macroblock in the previous frame.
return cm->last_frame_seg_map[MbIndex];
}
-MV_REFERENCE_FRAME get_pred_ref(const VP8_COMMON *const cm,
- const MACROBLOCKD *const xd) {
+MV_REFERENCE_FRAME vp9_get_pred_ref(const VP8_COMMON *const cm,
+ const MACROBLOCKD *const xd) {
MODE_INFO *m = xd->mode_info_context;
MV_REFERENCE_FRAME left;
@@ -335,7 +335,7 @@ MV_REFERENCE_FRAME get_pred_ref(const VP8_COMMON *const cm,
// Functions to computes a set of modified reference frame probabilities
// to use when the prediction of the reference frame value fails
-void calc_ref_probs(int *count, vp8_prob *probs) {
+void vp9_calc_ref_probs(int *count, vp8_prob *probs) {
int tot_count;
tot_count = count[0] + count[1] + count[2] + count[3];
@@ -365,7 +365,7 @@ void calc_ref_probs(int *count, vp8_prob *probs) {
// Values willbe set to 0 for reference frame options that are not possible
// because wither they were predicted and prediction has failed or because
// they are not allowed for a given segment.
-void compute_mod_refprobs(VP8_COMMON *const cm) {
+void vp9_compute_mod_refprobs(VP8_COMMON *const cm) {
int norm_cnt[MAX_REF_FRAMES];
int intra_count;
int inter_count;
@@ -387,28 +387,28 @@ void compute_mod_refprobs(VP8_COMMON *const cm) {
norm_cnt[1] = last_count;
norm_cnt[2] = gf_count;
norm_cnt[3] = arf_count;
- calc_ref_probs(norm_cnt, cm->mod_refprobs[INTRA_FRAME]);
+ vp9_calc_ref_probs(norm_cnt, cm->mod_refprobs[INTRA_FRAME]);
cm->mod_refprobs[INTRA_FRAME][0] = 0; // This branch implicit
norm_cnt[0] = intra_count;
norm_cnt[1] = 0;
norm_cnt[2] = gf_count;
norm_cnt[3] = arf_count;
- calc_ref_probs(norm_cnt, cm->mod_refprobs[LAST_FRAME]);
+ vp9_calc_ref_probs(norm_cnt, cm->mod_refprobs[LAST_FRAME]);
cm->mod_refprobs[LAST_FRAME][1] = 0; // This branch implicit
norm_cnt[0] = intra_count;
norm_cnt[1] = last_count;
norm_cnt[2] = 0;
norm_cnt[3] = arf_count;
- calc_ref_probs(norm_cnt, cm->mod_refprobs[GOLDEN_FRAME]);
+ vp9_calc_ref_probs(norm_cnt, cm->mod_refprobs[GOLDEN_FRAME]);
cm->mod_refprobs[GOLDEN_FRAME][2] = 0; // This branch implicit
norm_cnt[0] = intra_count;
norm_cnt[1] = last_count;
norm_cnt[2] = gf_count;
norm_cnt[3] = 0;
- calc_ref_probs(norm_cnt, cm->mod_refprobs[ALTREF_FRAME]);
+ vp9_calc_ref_probs(norm_cnt, cm->mod_refprobs[ALTREF_FRAME]);
cm->mod_refprobs[ALTREF_FRAME][2] = 0; // This branch implicit
// Score the reference frames based on overal frequency.
diff --git a/vp8/common/pred_common.h b/vp8/common/pred_common.h
index 2a9875dfe..0096e3d79 100644
--- a/vp8/common/pred_common.h
+++ b/vp8/common/pred_common.h
@@ -25,30 +25,31 @@ typedef enum {
PRED_SWITCHABLE_INTERP = 4
} PRED_ID;
-extern unsigned char get_pred_context(const VP8_COMMON *const cm,
- const MACROBLOCKD *const xd,
- PRED_ID pred_id);
+extern unsigned char vp9_get_pred_context(const VP8_COMMON *const cm,
+ const MACROBLOCKD *const xd,
+ PRED_ID pred_id);
-extern vp8_prob get_pred_prob(const VP8_COMMON *const cm,
- const MACROBLOCKD *const xd,
- PRED_ID pred_id);
+extern vp8_prob vp9_get_pred_prob(const VP8_COMMON *const cm,
+ const MACROBLOCKD *const xd,
+ PRED_ID pred_id);
-extern const vp8_prob *get_pred_probs(const VP8_COMMON *const cm,
- const MACROBLOCKD *const xd,
- PRED_ID pred_id);
+extern const vp8_prob *vp9_get_pred_probs(const VP8_COMMON *const cm,
+ const MACROBLOCKD *const xd,
+ PRED_ID pred_id);
-extern unsigned char get_pred_flag(const MACROBLOCKD *const xd,
- PRED_ID pred_id);
+extern unsigned char vp9_get_pred_flag(const MACROBLOCKD *const xd,
+ PRED_ID pred_id);
-extern void set_pred_flag(MACROBLOCKD *const xd,
- PRED_ID pred_id,
- unsigned char pred_flag);
+extern void vp9_set_pred_flag(MACROBLOCKD *const xd,
+ PRED_ID pred_id,
+ unsigned char pred_flag);
-extern unsigned char get_pred_mb_segid(const VP8_COMMON *const cm, int MbIndex);
+extern unsigned char vp9_get_pred_mb_segid(const VP8_COMMON *const cm,
+ int MbIndex);
-extern MV_REFERENCE_FRAME get_pred_ref(const VP8_COMMON *const cm,
+extern MV_REFERENCE_FRAME vp9_get_pred_ref(const VP8_COMMON *const cm,
const MACROBLOCKD *const xd);
-extern void compute_mod_refprobs(VP8_COMMON *const cm);
+extern void vp9_compute_mod_refprobs(VP8_COMMON *const cm);
#endif /* __INC_PRED_COMMON_H__ */
diff --git a/vp8/common/reconintra4x4.c b/vp8/common/reconintra4x4.c
index 1e40168ec..741fd7d31 100644
--- a/vp8/common/reconintra4x4.c
+++ b/vp8/common/reconintra4x4.c
@@ -295,7 +295,8 @@ void vp8_comp_intra4x4_predict_c(BLOCKD *x,
/* copy 4 bytes from the above right down so that the 4x4 prediction modes using pixels above and
* to the right prediction have filled in pixels to use.
*/
-void vp8_intra_prediction_down_copy(MACROBLOCKD *xd, int extend_edge) {
+void vp8_intra_prediction_down_copy(MACROBLOCKD *xd) {
+ int extend_edge = (xd->mb_to_right_edge == 0 && xd->mb_index < 2);
unsigned char *above_right = *(xd->block[0].base_dst) + xd->block[0].dst -
xd->block[0].dst_stride + 16;
unsigned int *src_ptr = (unsigned int *)
diff --git a/vp8/common/reconintra4x4.h b/vp8/common/reconintra4x4.h
index 771e0b2eb..a8cdea47c 100644
--- a/vp8/common/reconintra4x4.h
+++ b/vp8/common/reconintra4x4.h
@@ -12,6 +12,6 @@
#ifndef __INC_RECONINTRA4x4_H
#define __INC_RECONINTRA4x4_H
-extern void vp8_intra_prediction_down_copy(MACROBLOCKD *xd, int extend_edge);
+extern void vp8_intra_prediction_down_copy(MACROBLOCKD *xd);
#endif
diff --git a/vp8/common/rtcd_defs.sh b/vp8/common/rtcd_defs.sh
index e45028284..0feb441f0 100644
--- a/vp8/common/rtcd_defs.sh
+++ b/vp8/common/rtcd_defs.sh
@@ -1,8 +1,15 @@
common_forward_decls() {
cat <<EOF
-#include "vp8/common/blockd.h"
struct loop_filter_info;
+struct blockd;
+struct macroblockd;
+struct loop_filter_info;
+
+/* Encoder forward decls */
+struct block;
+struct macroblock;
+struct variance_vtable;
/* Encoder forward decls */
struct variance_vtable;
@@ -70,55 +77,55 @@ specialize vp8_recon2b sse2
prototype void vp8_recon4b "unsigned char *pred_ptr, short *diff_ptr, unsigned char *dst_ptr, int stride"
specialize vp8_recon4b sse2
-prototype void vp8_recon_mb "MACROBLOCKD *x"
+prototype void vp8_recon_mb "struct macroblockd *x"
specialize vp8_recon_mb
-prototype void vp8_recon_mby "MACROBLOCKD *x"
+prototype void vp8_recon_mby "struct macroblockd *x"
specialize vp8_recon_mby
-prototype void vp8_build_intra_predictors_mby_s "MACROBLOCKD *x"
+prototype void vp8_build_intra_predictors_mby_s "struct macroblockd *x"
specialize vp8_build_intra_predictors_mby_s
-prototype void vp8_build_intra_predictors_sby_s "MACROBLOCKD *x"
+prototype void vp8_build_intra_predictors_sby_s "struct macroblockd *x"
specialize vp8_build_intra_predictors_sby_s;
-prototype void vp8_build_intra_predictors_sbuv_s "MACROBLOCKD *x"
+prototype void vp8_build_intra_predictors_sbuv_s "struct macroblockd *x"
specialize vp8_build_intra_predictors_sbuv_s;
-prototype void vp8_build_intra_predictors_mby "MACROBLOCKD *x"
+prototype void vp8_build_intra_predictors_mby "struct macroblockd *x"
specialize vp8_build_intra_predictors_mby;
-prototype void vp8_build_comp_intra_predictors_mby "MACROBLOCKD *x"
+prototype void vp8_build_comp_intra_predictors_mby "struct macroblockd *x"
specialize vp8_build_comp_intra_predictors_mby;
-prototype void vp8_build_intra_predictors_mby_s "MACROBLOCKD *x"
+prototype void vp8_build_intra_predictors_mby_s "struct macroblockd *x"
specialize vp8_build_intra_predictors_mby_s;
-prototype void vp8_build_intra_predictors_mbuv "MACROBLOCKD *x"
+prototype void vp8_build_intra_predictors_mbuv "struct macroblockd *x"
specialize vp8_build_intra_predictors_mbuv;
-prototype void vp8_build_intra_predictors_mbuv_s "MACROBLOCKD *x"
+prototype void vp8_build_intra_predictors_mbuv_s "struct macroblockd *x"
specialize vp8_build_intra_predictors_mbuv_s;
-prototype void vp8_build_comp_intra_predictors_mbuv "MACROBLOCKD *x"
+prototype void vp8_build_comp_intra_predictors_mbuv "struct macroblockd *x"
specialize vp8_build_comp_intra_predictors_mbuv;
-prototype void vp8_intra4x4_predict "BLOCKD *x, int b_mode, unsigned char *predictor"
+prototype void vp8_intra4x4_predict "struct blockd *x, int b_mode, unsigned char *predictor"
specialize vp8_intra4x4_predict;
-prototype void vp8_comp_intra4x4_predict "BLOCKD *x, int b_mode, int second_mode, unsigned char *predictor"
+prototype void vp8_comp_intra4x4_predict "struct blockd *x, int b_mode, int second_mode, unsigned char *predictor"
specialize vp8_comp_intra4x4_predict;
-prototype void vp8_intra8x8_predict "BLOCKD *x, int b_mode, unsigned char *predictor"
+prototype void vp8_intra8x8_predict "struct blockd *x, int b_mode, unsigned char *predictor"
specialize vp8_intra8x8_predict;
-prototype void vp8_comp_intra8x8_predict "BLOCKD *x, int b_mode, int second_mode, unsigned char *predictor"
+prototype void vp8_comp_intra8x8_predict "struct blockd *x, int b_mode, int second_mode, unsigned char *predictor"
specialize vp8_comp_intra8x8_predict;
-prototype void vp8_intra_uv4x4_predict "BLOCKD *x, int b_mode, unsigned char *predictor"
+prototype void vp8_intra_uv4x4_predict "struct blockd *x, int b_mode, unsigned char *predictor"
specialize vp8_intra_uv4x4_predict;
-prototype void vp8_comp_intra_uv4x4_predict "BLOCKD *x, int b_mode, int second_mode, unsigned char *predictor"
+prototype void vp8_comp_intra_uv4x4_predict "struct blockd *x, int b_mode, int second_mode, unsigned char *predictor"
specialize vp8_comp_intra_uv4x4_predict;
#
@@ -367,6 +374,30 @@ specialize vp8_sub_pixel_mse32x32
prototype unsigned int vp8_get_mb_ss "const short *"
specialize vp8_get_mb_ss mmx sse2
+# ENCODEMB INVOKE
+prototype int vp8_mbblock_error "struct macroblock *mb, int dc"
+specialize vp8_mbblock_error mmx sse2
+vp8_mbblock_error_sse2=vp8_mbblock_error_xmm
+
+prototype int vp8_block_error "short *coeff, short *dqcoeff, int block_size"
+specialize vp8_block_error mmx sse2
+vp8_block_error_sse2=vp8_block_error_xmm
+
+prototype void vp8_subtract_b "struct block *be, struct blockd *bd, int pitch"
+specialize vp8_subtract_b mmx sse2
+
+prototype int vp8_mbuverror "struct macroblock *mb"
+specialize vp8_mbuverror mmx sse2
+vp8_mbuverror_sse2=vp8_mbuverror_xmm
+
+prototype void vp8_subtract_b "struct block *be, struct blockd *bd, int pitch"
+specialize vp8_subtract_b mmx sse2
+
+prototype void vp8_subtract_mby "short *diff, unsigned char *src, unsigned char *pred, int stride"
+specialize vp8_subtract_mby mmx sse2
+
+prototype void vp8_subtract_mbuv "short *diff, unsigned char *usrc, unsigned char *vsrc, unsigned char *pred, int stride"
+specialize vp8_subtract_mbuv mmx sse2
#
# Structured Similarity (SSIM)
@@ -381,5 +412,33 @@ if [ "$CONFIG_INTERNAL_STATS" = "yes" ]; then
specialize vp8_ssim_parms_16x16 $sse2_on_x86_64
fi
+# fdct functions
+prototype void vp8_short_fdct8x8 "short *InputData, short *OutputData, int pitch"
+specialize vp8_short_fdct8x8
+
+prototype void vp8_short_fhaar2x2 "short *InputData, short *OutputData, int pitch"
+specialize vp8_short_fhaar2x2
+
+prototype void vp8_short_fdct4x4 "short *InputData, short *OutputData, int pitch"
+specialize vp8_short_fdct4x4
+
+prototype void vp8_short_fdct8x4 "short *InputData, short *OutputData, int pitch"
+specialize vp8_short_fdct8x4
+
+prototype void vp8_short_walsh4x4 "short *InputData, short *OutputData, int pitch"
+specialize vp8_short_walsh4x4
+
+prototype void vp8_short_fdct16x16 "short *InputData, short *OutputData, int pitch"
+specialize vp8_short_fdct16x16
+
+prototype void vp8_short_walsh4x4_lossless "short *InputData, short *OutputData, int pitch"
+specialize vp8_short_walsh4x4_lossless
+
+prototype void vp8_short_walsh4x4_x8 "short *InputData, short *OutputData, int pitch"
+specialize vp8_short_walsh4x4_x8
+
+prototype void vp8_short_walsh8x4_x8 "short *InputData, short *OutputData, int pitch"
+specialize vp8_short_walsh8x4_x8
+
fi
# end encoder functions
diff --git a/vp8/decoder/decodemv.c b/vp8/decoder/decodemv.c
index 991fb9103..2187e020f 100644
--- a/vp8/decoder/decodemv.c
+++ b/vp8/decoder/decodemv.c
@@ -115,7 +115,8 @@ static void kfread_modes(VP8D_COMP *pbi,
(get_segdata(&pbi->mb,
m->mbmi.segment_id, SEG_LVL_EOB) != 0))) {
MACROBLOCKD *const xd = &pbi->mb;
- m->mbmi.mb_skip_coeff = vp8_read(bc, get_pred_prob(cm, xd, PRED_MBSKIP));
+ m->mbmi.mb_skip_coeff =
+ vp8_read(bc, vp9_get_pred_prob(cm, xd, PRED_MBSKIP));
} else {
if (segfeature_active(&pbi->mb,
m->mbmi.segment_id, SEG_LVL_EOB) &&
@@ -379,16 +380,16 @@ static MV_REFERENCE_FRAME read_ref_frame(VP8D_COMP *pbi,
MV_REFERENCE_FRAME pred_ref;
// Get the context probability the prediction flag
- pred_prob = get_pred_prob(cm, xd, PRED_REF);
+ pred_prob = vp9_get_pred_prob(cm, xd, PRED_REF);
// Read the prediction status flag
prediction_flag = (unsigned char)vp8_read(bc, pred_prob);
// Store the prediction flag.
- set_pred_flag(xd, PRED_REF, prediction_flag);
+ vp9_set_pred_flag(xd, PRED_REF, prediction_flag);
// Get the predicted reference frame.
- pred_ref = get_pred_ref(cm, xd);
+ pred_ref = vp9_get_pred_ref(cm, xd);
// If correctly predicted then use the predicted value
if (prediction_flag) {
@@ -453,8 +454,8 @@ static MV_REFERENCE_FRAME read_ref_frame(VP8D_COMP *pbi,
// The reference frame for the mb is considered as correclty predicted
// if it is signaled at the segment level for the purposes of the
// common prediction model
- set_pred_flag(xd, PRED_REF, 1);
- ref_frame = get_pred_ref(cm, xd);
+ vp9_set_pred_flag(xd, PRED_REF, 1);
+ ref_frame = vp9_get_pred_ref(cm, xd);
}
return (MV_REFERENCE_FRAME)ref_frame;
@@ -529,7 +530,7 @@ static void mb_mode_mv_init(VP8D_COMP *pbi, vp8_reader *bc) {
// Computes a modified set of probabilities for use when reference
// frame prediction fails.
- compute_mod_refprobs(cm);
+ vp9_compute_mod_refprobs(cm);
pbi->common.comp_pred_mode = vp8_read(bc, 128);
if (cm->comp_pred_mode)
@@ -576,19 +577,19 @@ static void read_mb_segment_id(VP8D_COMP *pbi,
// Get the context based probability for reading the
// prediction status flag
vp8_prob pred_prob =
- get_pred_prob(cm, xd, PRED_SEG_ID);
+ vp9_get_pred_prob(cm, xd, PRED_SEG_ID);
// Read the prediction status flag
unsigned char seg_pred_flag =
(unsigned char)vp8_read(bc, pred_prob);
// Store the prediction flag.
- set_pred_flag(xd, PRED_SEG_ID, seg_pred_flag);
+ vp9_set_pred_flag(xd, PRED_SEG_ID, seg_pred_flag);
// If the value is flagged as correctly predicted
// then use the predicted value
if (seg_pred_flag) {
- mbmi->segment_id = get_pred_mb_segid(cm, index);
+ mbmi->segment_id = vp9_get_pred_mb_segid(cm, index);
}
// Else .... decode it explicitly
else {
@@ -678,7 +679,7 @@ static void read_mb_modes_mv(VP8D_COMP *pbi, MODE_INFO *mi, MB_MODE_INFO *mbmi,
(get_segdata(xd, mbmi->segment_id, SEG_LVL_EOB) != 0))) {
// Read the macroblock coeff skip flag if this feature is in use,
// else default to 0
- mbmi->mb_skip_coeff = vp8_read(bc, get_pred_prob(cm, xd, PRED_MBSKIP));
+ mbmi->mb_skip_coeff = vp8_read(bc, vp9_get_pred_prob(cm, xd, PRED_MBSKIP));
} else {
if (segfeature_active(xd,
mbmi->segment_id, SEG_LVL_EOB) &&
@@ -731,9 +732,9 @@ static void read_mb_modes_mv(VP8D_COMP *pbi, MODE_INFO *mi, MB_MODE_INFO *mbmi,
xd->pre.u_buffer = cm->yv12_fb[ref_fb_idx].u_buffer + recon_uvoffset;
xd->pre.v_buffer = cm->yv12_fb[ref_fb_idx].v_buffer + recon_uvoffset;
- find_mv_refs(xd, mi, prev_mi,
- ref_frame, mbmi->ref_mvs[ref_frame],
- cm->ref_frame_sign_bias);
+ vp9_find_mv_refs(xd, mi, prev_mi,
+ ref_frame, mbmi->ref_mvs[ref_frame],
+ cm->ref_frame_sign_bias);
vp8_find_best_ref_mvs(xd,
xd->pre.y_buffer,
@@ -775,7 +776,7 @@ static void read_mb_modes_mv(VP8D_COMP *pbi, MODE_INFO *mi, MB_MODE_INFO *mbmi,
if (cm->mcomp_filter_type == SWITCHABLE) {
mbmi->interp_filter = vp8_switchable_interp[
vp8_treed_read(bc, vp8_switchable_interp_tree,
- get_pred_probs(cm, xd, PRED_SWITCHABLE_INTERP))];
+ vp9_get_pred_probs(cm, xd, PRED_SWITCHABLE_INTERP))];
} else {
mbmi->interp_filter = cm->mcomp_filter_type;
}
@@ -783,7 +784,7 @@ static void read_mb_modes_mv(VP8D_COMP *pbi, MODE_INFO *mi, MB_MODE_INFO *mbmi,
if (cm->comp_pred_mode == COMP_PREDICTION_ONLY ||
(cm->comp_pred_mode == HYBRID_PREDICTION &&
- vp8_read(bc, get_pred_prob(cm, xd, PRED_COMP)))) {
+ vp8_read(bc, vp9_get_pred_prob(cm, xd, PRED_COMP)))) {
/* Since we have 3 reference frames, we can only have 3 unique
* combinations of combinations of 2 different reference frames
* (A-G, G-L or A-L). In the bitstream, we use this to simply
@@ -818,10 +819,10 @@ static void read_mb_modes_mv(VP8D_COMP *pbi, MODE_INFO *mi, MB_MODE_INFO *mbmi,
mbmi->second_ref_frame,
cm->ref_frame_sign_bias);
- find_mv_refs(xd, mi, prev_mi,
- mbmi->second_ref_frame,
- mbmi->ref_mvs[mbmi->second_ref_frame],
- cm->ref_frame_sign_bias);
+ vp9_find_mv_refs(xd, mi, prev_mi,
+ mbmi->second_ref_frame,
+ mbmi->ref_mvs[mbmi->second_ref_frame],
+ cm->ref_frame_sign_bias);
vp8_find_best_ref_mvs(xd,
xd->second_pre.y_buffer,
diff --git a/vp8/decoder/decodframe.c b/vp8/decoder/decodframe.c
index 78ba590e0..c53560dbb 100644
--- a/vp8/decoder/decodframe.c
+++ b/vp8/decoder/decodframe.c
@@ -658,8 +658,7 @@ decode_sb_row(VP8D_COMP *pbi, VP8_COMMON *pc, int mbrow, MACROBLOCKD *xd,
mi[pc->mode_info_stride + 1] = mi[0];
}
#endif
- vp8_intra_prediction_down_copy(xd, mb_col == pc->mb_cols - 1 &&
- !(mb_row & 1));
+ vp8_intra_prediction_down_copy(xd);
decode_macroblock(pbi, xd, mb_col, bc);
#if CONFIG_SUPERBLOCKS
if (xd->mode_info_context->mbmi.encoded_as_sb) {
diff --git a/vp8/encoder/arm/dct_arm.c b/vp8/encoder/arm/dct_arm.c
index 913d5c0f0..97376de32 100644
--- a/vp8/encoder/arm/dct_arm.c
+++ b/vp8/encoder/arm/dct_arm.c
@@ -9,7 +9,7 @@
*/
#include "vpx_config.h"
-#include "vp8/encoder/dct.h"
+#include "./vpx_rtcd.h"
#if HAVE_ARMV6
diff --git a/vp8/encoder/bitstream.c b/vp8/encoder/bitstream.c
index 3c7e37762..4c1d61ed8 100644
--- a/vp8/encoder/bitstream.c
+++ b/vp8/encoder/bitstream.c
@@ -800,16 +800,16 @@ static void encode_ref_frame(vp8_writer *const bc,
MV_REFERENCE_FRAME pred_rf;
// Get the context probability the prediction flag
- pred_prob = get_pred_prob(cm, xd, PRED_REF);
+ pred_prob = vp9_get_pred_prob(cm, xd, PRED_REF);
// Get the predicted value.
- pred_rf = get_pred_ref(cm, xd);
+ pred_rf = vp9_get_pred_ref(cm, xd);
// Did the chosen reference frame match its predicted value.
prediction_flag =
(xd->mode_info_context->mbmi.ref_frame == pred_rf);
- set_pred_flag(xd, PRED_REF, prediction_flag);
+ vp9_set_pred_flag(xd, PRED_REF, prediction_flag);
vp8_write(bc, prediction_flag, pred_prob);
// If not predicted correctly then code value explicitly
@@ -869,7 +869,7 @@ static void update_ref_probs(VP8_COMP *const cpi) {
// Compute a modified set of probabilities to use when prediction of the
// reference frame fails
- compute_mod_refprobs(cm);
+ vp9_compute_mod_refprobs(cm);
}
static void pack_inter_mode_mvs(VP8_COMP *const cpi, vp8_writer *const bc) {
@@ -956,8 +956,8 @@ static void pack_inter_mode_mvs(VP8_COMP *const cpi, vp8_writer *const bc) {
if (cpi->mb.e_mbd.update_mb_segmentation_map) {
// Is temporal coding of the segment map enabled
if (pc->temporal_update) {
- prediction_flag = get_pred_flag(xd, PRED_SEG_ID);
- pred_prob = get_pred_prob(pc, xd, PRED_SEG_ID);
+ prediction_flag = vp9_get_pred_flag(xd, PRED_SEG_ID);
+ pred_prob = vp9_get_pred_prob(pc, xd, PRED_SEG_ID);
// Code the segment id prediction flag for this mb
vp8_write(bc, prediction_flag, pred_prob);
@@ -983,7 +983,7 @@ static void pack_inter_mode_mvs(VP8_COMP *const cpi, vp8_writer *const bc) {
}
#endif
vp8_encode_bool(bc, skip_coeff,
- get_pred_prob(pc, xd, PRED_MBSKIP));
+ vp9_get_pred_prob(pc, xd, PRED_MBSKIP));
}
// Encode the reference frame.
@@ -1097,8 +1097,8 @@ static void pack_inter_mode_mvs(VP8_COMP *const cpi, vp8_writer *const bc) {
{
if (cpi->common.mcomp_filter_type == SWITCHABLE) {
vp8_write_token(bc, vp8_switchable_interp_tree,
- get_pred_probs(&cpi->common, xd,
- PRED_SWITCHABLE_INTERP),
+ vp9_get_pred_probs(&cpi->common, xd,
+ PRED_SWITCHABLE_INTERP),
vp8_switchable_interp_encodings +
vp8_switchable_interp_map[mi->interp_filter]);
} else {
@@ -1126,7 +1126,7 @@ static void pack_inter_mode_mvs(VP8_COMP *const cpi, vp8_writer *const bc) {
// (if not specified at the frame/segment level)
if (cpi->common.comp_pred_mode == HYBRID_PREDICTION) {
vp8_write(bc, mi->second_ref_frame != INTRA_FRAME,
- get_pred_prob(pc, xd, PRED_COMP));
+ vp9_get_pred_prob(pc, xd, PRED_COMP));
}
{
@@ -1326,7 +1326,7 @@ static void write_mb_modes_kf(const VP8_COMMON *c,
}
#endif
vp8_encode_bool(bc, skip_coeff,
- get_pred_prob(c, xd, PRED_MBSKIP));
+ vp9_get_pred_prob(c, xd, PRED_MBSKIP));
}
#if CONFIG_SUPERBLOCKS
diff --git a/vp8/encoder/block.h b/vp8/encoder/block.h
index df74a3fcf..9a6483adb 100644
--- a/vp8/encoder/block.h
+++ b/vp8/encoder/block.h
@@ -24,7 +24,7 @@ typedef struct {
int offset;
} search_site;
-typedef struct {
+typedef struct block {
// 16 Y blocks, 4 U blocks, 4 V blocks each with 16 entries
short *src_diff;
short *coeff;
@@ -85,7 +85,7 @@ typedef struct {
int64_t txfm_rd_diff[NB_TXFM_MODES];
} PICK_MODE_CONTEXT;
-typedef struct {
+typedef struct macroblock {
DECLARE_ALIGNED(16, short, src_diff[400]); // 16x16 Y 8x8 U 8x8 V 4x4 2nd Y
DECLARE_ALIGNED(16, short, coeff[400]); // 16x16 Y 8x8 U 8x8 V 4x4 2nd Y
DECLARE_ALIGNED(16, unsigned char, thismb[256]); // 16x16 Y
diff --git a/vp8/encoder/dct.h b/vp8/encoder/dct.h
deleted file mode 100644
index 4ad1fe85d..000000000
--- a/vp8/encoder/dct.h
+++ /dev/null
@@ -1,97 +0,0 @@
-/*
- * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
- *
- * Use of this source code is governed by a BSD-style license
- * that can be found in the LICENSE file in the root of the source
- * tree. An additional intellectual property rights grant can be found
- * in the file PATENTS. All contributing project authors may
- * be found in the AUTHORS file in the root of the source tree.
- */
-
-
-#ifndef __INC_DCT_H
-#define __INC_DCT_H
-
-#include "vpx_config.h"
-#include "vp8/common/blockd.h"
-
-#define prototype_fdct(sym) void (sym)(short *input, short *output, int pitch)
-
-#if ARCH_X86 || ARCH_X86_64
-#include "x86/dct_x86.h"
-#endif
-
-#if ARCH_ARM
-#include "arm/dct_arm.h"
-#endif
-
-
-void vp8_fht_c(short *input, short *output, int pitch,
- TX_TYPE tx_type, int tx_dim);
-
-#ifndef vp8_fdct_short16x16
-#define vp8_fdct_short16x16 vp8_short_fdct16x16_c
-#endif
-extern prototype_fdct(vp8_fdct_short16x16);
-
-#ifndef vp8_fdct_short8x8
-#define vp8_fdct_short8x8 vp8_short_fdct8x8_c
-#endif
-extern prototype_fdct(vp8_fdct_short8x8);
-
-#ifndef vp8_fhaar_short2x2
-#define vp8_fdct_haar_short2x2 vp8_fhaar_short2x2
-#define vp8_fhaar_short2x2 vp8_short_fhaar2x2_c
-#endif
-extern prototype_fdct(vp8_fhaar_short2x2);
-
-
-#ifndef vp8_fdct_short4x4
-#define vp8_fdct_short4x4 vp8_short_fdct4x4_c
-#endif
-extern prototype_fdct(vp8_fdct_short4x4);
-
-#ifndef vp8_fdct_short8x4
-#define vp8_fdct_short8x4 vp8_short_fdct8x4_c
-#endif
-extern prototype_fdct(vp8_fdct_short8x4);
-
-// There is no fast4x4 (for now)
-#ifndef vp8_fdct_fast4x4
-#define vp8_fdct_fast4x4 vp8_short_fdct4x4_c
-#endif
-
-#ifndef vp8_fdct_fast8x4
-#define vp8_fdct_fast8x4 vp8_short_fdct8x4_c
-#endif
-
-#ifndef vp8_fdct_walsh_short4x4
-#define vp8_fdct_walsh_short4x4 vp8_short_walsh4x4_c
-#endif
-extern prototype_fdct(vp8_fdct_walsh_short4x4);
-
-#if CONFIG_LOSSLESS
-extern prototype_fdct(vp8_short_walsh4x4_x8_c);
-extern prototype_fdct(vp8_short_walsh8x4_x8_c);
-extern prototype_fdct(vp8_short_walsh4x4_lossless_c);
-#endif
-
-typedef prototype_fdct(*vp8_fdct_fn_t);
-typedef struct {
- vp8_fdct_fn_t short16x16;
- vp8_fdct_fn_t short8x8;
- vp8_fdct_fn_t haar_short2x2;
- vp8_fdct_fn_t short4x4;
- vp8_fdct_fn_t short8x4;
- vp8_fdct_fn_t fast4x4;
- vp8_fdct_fn_t fast8x4;
- vp8_fdct_fn_t walsh_short4x4;
-} vp8_fdct_rtcd_vtable_t;
-
-#if CONFIG_RUNTIME_CPU_DETECT
-#define FDCT_INVOKE(ctx,fn) (ctx)->fn
-#else
-#define FDCT_INVOKE(ctx,fn) vp8_fdct_##fn
-#endif
-
-#endif
diff --git a/vp8/encoder/encodeframe.c b/vp8/encoder/encodeframe.c
index a71715ea5..e464fde38 100644
--- a/vp8/encoder/encodeframe.c
+++ b/vp8/encoder/encodeframe.c
@@ -613,8 +613,7 @@ static void pick_mb_modes(VP8_COMP *cpi,
cpi->update_context = 0; // TODO Do we need this now??
- vp8_intra_prediction_down_copy(xd, mb_col == cm->mb_cols - 1 &&
- (mb_row & 1) == 0);
+ vp8_intra_prediction_down_copy(xd);
// Find best coding mode & reconstruct the MB so it is available
// as a predictor for MBs that follow in the SB
@@ -666,8 +665,8 @@ static void pick_mb_modes(VP8_COMP *cpi,
check_segref(xd, seg_id, GOLDEN_FRAME) +
check_segref(xd, seg_id, ALTREF_FRAME) > 1) {
// Get the prediction context and status
- int pred_flag = get_pred_flag(xd, PRED_REF);
- int pred_context = get_pred_context(cm, xd, PRED_REF);
+ int pred_flag = vp9_get_pred_flag(xd, PRED_REF);
+ int pred_context = vp9_get_pred_context(cm, xd, PRED_REF);
// Count prediction success
cpi->ref_pred_count[pred_context][pred_flag]++;
@@ -983,8 +982,7 @@ static void encode_sb(VP8_COMP *cpi,
#if CONFIG_SUPERBLOCKS
if (!xd->mode_info_context->mbmi.encoded_as_sb)
#endif
- vp8_intra_prediction_down_copy(xd, mb_col == cm->mb_cols - 1 &&
- (mb_row & 1) == 0);
+ vp8_intra_prediction_down_copy(xd);
if (cm->frame_type == KEY_FRAME) {
#if CONFIG_SUPERBLOCKS
@@ -1005,7 +1003,7 @@ static void encode_sb(VP8_COMP *cpi,
if (xd->mode_info_context->mbmi.ref_frame) {
unsigned char pred_context;
- pred_context = get_pred_context(cm, xd, PRED_COMP);
+ pred_context = vp9_get_pred_context(cm, xd, PRED_COMP);
if (xd->mode_info_context->mbmi.second_ref_frame == INTRA_FRAME)
cpi->single_pred_count[pred_context]++;
@@ -1308,7 +1306,7 @@ static void encode_frame_internal(VP8_COMP *cpi) {
// Compute a modified set of reference frame probabilities to use when
// prediction fails. These are based on the current general estimates for
// this frame which may be updated with each iteration of the recode loop.
- compute_mod_refprobs(cm);
+ vp9_compute_mod_refprobs(cm);
#if CONFIG_NEW_MVREF
// temp stats reset
@@ -2011,8 +2009,8 @@ void vp8cx_encode_inter_macroblock (VP8_COMP *cpi, MACROBLOCK *x,
// SET VARIOUS PREDICTION FLAGS
// Did the chosen reference frame match its predicted value.
- ref_pred_flag = ((mbmi->ref_frame == get_pred_ref(cm, xd)));
- set_pred_flag(xd, PRED_REF, ref_pred_flag);
+ ref_pred_flag = ((mbmi->ref_frame == vp9_get_pred_ref(cm, xd)));
+ vp9_set_pred_flag(xd, PRED_REF, ref_pred_flag);
if (mbmi->ref_frame == INTRA_FRAME) {
if (mbmi->mode == B_PRED) {
@@ -2207,8 +2205,8 @@ void vp8cx_encode_inter_superblock(VP8_COMP *cpi, MACROBLOCK *x, TOKENEXTRA **t,
// Did the chosen reference frame match its predicted value.
ref_pred_flag = ((xd->mode_info_context->mbmi.ref_frame ==
- get_pred_ref(cm, xd)));
- set_pred_flag(xd, PRED_REF, ref_pred_flag);
+ vp9_get_pred_ref(cm, xd)));
+ vp9_set_pred_flag(xd, PRED_REF, ref_pred_flag);
if (xd->mode_info_context->mbmi.ref_frame == INTRA_FRAME) {
vp8_build_intra_predictors_sby_s(&x->e_mbd);
diff --git a/vp8/encoder/encodeintra.c b/vp8/encoder/encodeintra.c
index f44df22ea..508e5aaa5 100644
--- a/vp8/encoder/encodeintra.c
+++ b/vp8/encoder/encodeintra.c
@@ -9,16 +9,15 @@
*/
#include "vpx_ports/config.h"
+#include "vpx_rtcd.h"
#include "vp8/common/idct.h"
#include "quantize.h"
#include "vp8/common/reconintra.h"
#include "vp8/common/reconintra4x4.h"
#include "encodemb.h"
#include "vp8/common/invtrans.h"
-#include "dct.h"
#include "vp8/common/g_common.h"
#include "encodeintra.h"
-#include "vpx_rtcd.h"
#if CONFIG_RUNTIME_CPU_DETECT
#define IF_RTCD(x) (x)
@@ -70,7 +69,7 @@ void vp8_encode_intra4x4block(const VP8_ENCODER_RTCD *rtcd,
}
#endif
- ENCODEMB_INVOKE(&rtcd->encodemb, subb)(be, b, 16);
+ vp8_subtract_b(be, b, 16);
tx_type = get_tx_type(&x->e_mbd, b);
if (tx_type != DCT_DCT) {
@@ -109,8 +108,7 @@ void vp8_encode_intra16x16mby(const VP8_ENCODER_RTCD *rtcd, MACROBLOCK *x) {
vp8_build_comp_intra_predictors_mby(xd);
#endif
- ENCODEMB_INVOKE(&rtcd->encodemb, submby)(x->src_diff, *(b->base_src),
- xd->predictor, b->src_stride);
+ vp8_subtract_mby(x->src_diff, *(b->base_src), xd->predictor, b->src_stride);
if (tx_size == TX_16X16) {
BLOCKD *bd = &xd->block[0];
@@ -159,9 +157,9 @@ void vp8_encode_intra16x16mbuv(const VP8_ENCODER_RTCD *rtcd, MACROBLOCK *x) {
}
#endif
- ENCODEMB_INVOKE(&rtcd->encodemb, submbuv)(x->src_diff,
- x->src.u_buffer, x->src.v_buffer,
- xd->predictor, x->src.uv_stride);
+ vp8_subtract_mbuv(x->src_diff, x->src.u_buffer, x->src.v_buffer,
+ xd->predictor, x->src.uv_stride);
+
if (tx_size == TX_4X4) {
vp8_transform_mbuv_4x4(x);
vp8_quantize_mbuv_4x4(x);
@@ -221,7 +219,7 @@ void vp8_encode_intra8x8(const VP8_ENCODER_RTCD *rtcd,
for (i = 0; i < 4; i++) {
b = &xd->block[ib + iblock[i]];
be = &x->block[ib + iblock[i]];
- ENCODEMB_INVOKE(&rtcd->encodemb, subb)(be, b, 16);
+ vp8_subtract_b(be, b, 16);
x->vp8_short_fdct4x4(be->src_diff, be->coeff, 32);
x->quantize_b_4x4(be, b);
vp8_inverse_transform_b_4x4(IF_RTCD(&rtcd->common->idct), b, 32);
@@ -261,7 +259,7 @@ void vp8_encode_intra_uv4x4(const VP8_ENCODER_RTCD *rtcd,
}
#endif
- ENCODEMB_INVOKE(&rtcd->encodemb, subb)(be, b, 8);
+ vp8_subtract_b(be, b, 8);
x->vp8_short_fdct4x4(be->src_diff, be->coeff, 16);
x->quantize_b_4x4(be, b);
diff --git a/vp8/encoder/encodemb.c b/vp8/encoder/encodemb.c
index d3bd0f1dd..ee9c56ae5 100644
--- a/vp8/encoder/encodemb.c
+++ b/vp8/encoder/encodemb.c
@@ -15,10 +15,10 @@
#include "tokenize.h"
#include "vp8/common/invtrans.h"
#include "vp8/common/reconintra.h"
-#include "dct.h"
#include "vpx_mem/vpx_mem.h"
#include "rdopt.h"
#include "vp8/common/systemdependent.h"
+#include "vpx_rtcd.h"
#if CONFIG_RUNTIME_CPU_DETECT
#define IF_RTCD(x) (x)
@@ -126,8 +126,10 @@ void vp8_subtract_mby_c(short *diff, unsigned char *src,
static void vp8_subtract_mb(const VP8_ENCODER_RTCD *rtcd, MACROBLOCK *x) {
BLOCK *b = &x->block[0];
- ENCODEMB_INVOKE(&rtcd->encodemb, submby)(x->src_diff, *(b->base_src), x->e_mbd.predictor, b->src_stride);
- ENCODEMB_INVOKE(&rtcd->encodemb, submbuv)(x->src_diff, x->src.u_buffer, x->src.v_buffer, x->e_mbd.predictor, x->src.uv_stride);
+ vp8_subtract_mby(x->src_diff, *(b->base_src), x->e_mbd.predictor,
+ b->src_stride);
+ vp8_subtract_mbuv(x->src_diff, x->src.u_buffer, x->src.v_buffer,
+ x->e_mbd.predictor, x->src.uv_stride);
}
static void build_dcblock_4x4(MACROBLOCK *x) {
@@ -942,8 +944,7 @@ void vp8_encode_inter16x16y(const VP8_ENCODER_RTCD *rtcd, MACROBLOCK *x) {
vp8_build_1st_inter16x16_predictors_mby(xd, xd->predictor, 16, 0);
- ENCODEMB_INVOKE(&rtcd->encodemb, submby)(x->src_diff, *(b->base_src),
- xd->predictor, b->src_stride);
+ vp8_subtract_mby(x->src_diff, *(b->base_src), xd->predictor, b->src_stride);
vp8_transform_mby_4x4(x);
vp8_quantize_mby_4x4(x);
diff --git a/vp8/encoder/encodemb.h b/vp8/encoder/encodemb.h
index 6c28ea025..6c2300e34 100644
--- a/vp8/encoder/encodemb.h
+++ b/vp8/encoder/encodemb.h
@@ -15,73 +15,6 @@
#include "vpx_ports/config.h"
#include "block.h"
-#define prototype_mberr(sym) \
- int (sym)(MACROBLOCK *mb, int dc)
-
-#define prototype_berr(sym) \
- int (sym)(short *coeff, short *dqcoeff, int block_size)
-
-#define prototype_mbuverr(sym) \
- int (sym)(MACROBLOCK *mb)
-
-#define prototype_subb(sym) \
- void (sym)(BLOCK *be,BLOCKD *bd, int pitch)
-
-#define prototype_submby(sym) \
- void (sym)(short *diff, unsigned char *src, unsigned char *pred, int stride)
-
-#define prototype_submbuv(sym) \
- void (sym)(short *diff, unsigned char *usrc, unsigned char *vsrc,\
- unsigned char *pred, int stride)
-
-#if ARCH_X86 || ARCH_X86_64
-#include "x86/encodemb_x86.h"
-#endif
-
-#if ARCH_ARM
-#include "arm/encodemb_arm.h"
-#endif
-
-#ifndef vp8_encodemb_berr
-#define vp8_encodemb_berr vp8_block_error_c
-#endif
-extern prototype_berr(vp8_encodemb_berr);
-
-#ifndef vp8_encodemb_mberr
-#define vp8_encodemb_mberr vp8_mbblock_error_c
-#endif
-extern prototype_mberr(vp8_encodemb_mberr);
-
-#ifndef vp8_encodemb_mbuverr
-#define vp8_encodemb_mbuverr vp8_mbuverror_c
-#endif
-extern prototype_mbuverr(vp8_encodemb_mbuverr);
-
-#ifndef vp8_encodemb_subb
-#define vp8_encodemb_subb vp8_subtract_b_c
-#endif
-extern prototype_subb(vp8_encodemb_subb);
-
-#ifndef vp8_encodemb_submby
-#define vp8_encodemb_submby vp8_subtract_mby_c
-#endif
-extern prototype_submby(vp8_encodemb_submby);
-
-#ifndef vp8_encodemb_submbuv
-#define vp8_encodemb_submbuv vp8_subtract_mbuv_c
-#endif
-extern prototype_submbuv(vp8_encodemb_submbuv);
-
-
-typedef struct {
- prototype_berr(*berr);
- prototype_mberr(*mberr);
- prototype_mbuverr(*mbuverr);
- prototype_subb(*subb);
- prototype_submby(*submby);
- prototype_submbuv(*submbuv);
-} vp8_encodemb_rtcd_vtable_t;
-
typedef struct {
MB_PREDICTION_MODE mode;
MV_REFERENCE_FRAME ref_frame;
diff --git a/vp8/encoder/generic/csystemdependent.c b/vp8/encoder/generic/csystemdependent.c
index 44e83fdc7..dbe36249d 100644
--- a/vp8/encoder/generic/csystemdependent.c
+++ b/vp8/encoder/generic/csystemdependent.c
@@ -24,31 +24,10 @@ void vp8_cmachine_specific_config(VP8_COMP *cpi) {
#if CONFIG_RUNTIME_CPU_DETECT
cpi->rtcd.common = &cpi->common.rtcd;
- cpi->rtcd.fdct.short8x8 = vp8_short_fdct8x8_c;
- cpi->rtcd.fdct.short16x16 = vp8_short_fdct16x16_c;
- cpi->rtcd.fdct.haar_short2x2 = vp8_short_fhaar2x2_c;
- cpi->rtcd.fdct.short4x4 = vp8_short_fdct4x4_c;
- cpi->rtcd.fdct.short8x4 = vp8_short_fdct8x4_c;
- cpi->rtcd.fdct.fast4x4 = vp8_short_fdct4x4_c;
- cpi->rtcd.fdct.fast8x4 = vp8_short_fdct8x4_c;
- cpi->rtcd.fdct.walsh_short4x4 = vp8_short_walsh4x4_c;
-
- cpi->rtcd.encodemb.berr = vp8_block_error_c;
- cpi->rtcd.encodemb.mberr = vp8_mbblock_error_c;
- cpi->rtcd.encodemb.mbuverr = vp8_mbuverror_c;
- cpi->rtcd.encodemb.subb = vp8_subtract_b_c;
- cpi->rtcd.encodemb.submby = vp8_subtract_mby_c;
- cpi->rtcd.encodemb.submbuv = vp8_subtract_mbuv_c;
-
cpi->rtcd.search.full_search = vp8_full_search_sad;
cpi->rtcd.search.refining_search = vp8_refining_search_sad;
cpi->rtcd.search.diamond_search = vp8_diamond_search_sad;
cpi->rtcd.temporal.apply = vp8_temporal_filter_apply_c;
- cpi->rtcd.fdct.short4x4 = vp8_short_fdct4x4_c;
- cpi->rtcd.fdct.short8x4 = vp8_short_fdct8x4_c;
- cpi->rtcd.fdct.fast4x4 = vp8_short_fdct4x4_c;
- cpi->rtcd.fdct.fast8x4 = vp8_short_fdct8x4_c;
- cpi->rtcd.fdct.walsh_short4x4 = vp8_short_walsh4x4_c;
#endif
vp8_yv12_copy_partial_frame_ptr = vp8_yv12_copy_partial_frame;
diff --git a/vp8/encoder/onyx_if.c b/vp8/encoder/onyx_if.c
index 25d865484..1c37eb133 100644
--- a/vp8/encoder/onyx_if.c
+++ b/vp8/encoder/onyx_if.c
@@ -167,9 +167,6 @@ extern unsigned int inter_uv_modes[VP8_UV_MODES];
extern unsigned int inter_b_modes[B_MODE_COUNT];
#endif
-extern void (*vp8_short_fdct4x4)(short *input, short *output, int pitch);
-extern void (*vp8_short_fdct8x4)(short *input, short *output, int pitch);
-
extern void vp8cx_init_quantizer(VP8_COMP *cpi);
int vp8cx_base_skip_false_prob[QINDEX_RANGE][3];
@@ -1190,20 +1187,23 @@ void vp8_set_speed_features(VP8_COMP *cpi) {
vp8_init_dsmotion_compensation(&cpi->mb, cm->yv12_fb[cm->lst_fb_idx].y_stride);
}
- if (cpi->sf.improved_dct) {
- cpi->mb.vp8_short_fdct16x16 = FDCT_INVOKE(&cpi->rtcd.fdct, short16x16);
- cpi->mb.vp8_short_fdct8x8 = FDCT_INVOKE(&cpi->rtcd.fdct, short8x8);
- cpi->mb.vp8_short_fdct8x4 = FDCT_INVOKE(&cpi->rtcd.fdct, short8x4);
- cpi->mb.vp8_short_fdct4x4 = FDCT_INVOKE(&cpi->rtcd.fdct, short4x4);
- } else {
- cpi->mb.vp8_short_fdct16x16 = FDCT_INVOKE(&cpi->rtcd.fdct, short16x16);
- cpi->mb.vp8_short_fdct8x8 = FDCT_INVOKE(&cpi->rtcd.fdct, short8x8);
- cpi->mb.vp8_short_fdct8x4 = FDCT_INVOKE(&cpi->rtcd.fdct, fast8x4);
- cpi->mb.vp8_short_fdct4x4 = FDCT_INVOKE(&cpi->rtcd.fdct, fast4x4);
+ cpi->mb.vp8_short_fdct16x16 = vp8_short_fdct16x16;
+ cpi->mb.vp8_short_fdct8x8 = vp8_short_fdct8x8;
+ cpi->mb.vp8_short_fdct8x4 = vp8_short_fdct8x4;
+ cpi->mb.vp8_short_fdct4x4 = vp8_short_fdct4x4;
+ cpi->mb.short_walsh4x4 = vp8_short_walsh4x4;
+ cpi->mb.short_fhaar2x2 = vp8_short_fhaar2x2;
+
+#if CONFIG_LOSSLESS
+ if (cpi->oxcf.lossless) {
+ cpi->mb.vp8_short_fdct8x4 = vp8_short_walsh8x4_x8;
+ cpi->mb.vp8_short_fdct4x4 = vp8_short_walsh4x4_x8;
+ cpi->mb.short_walsh4x4 = vp8_short_walsh4x4;
+ cpi->mb.short_fhaar2x2 = vp8_short_fhaar2x2;
+ cpi->mb.short_walsh4x4 = vp8_short_walsh4x4_lossless;
}
+#endif
- cpi->mb.short_walsh4x4 = FDCT_INVOKE(&cpi->rtcd.fdct, walsh_short4x4);
- cpi->mb.short_fhaar2x2 = FDCT_INVOKE(&cpi->rtcd.fdct, haar_short2x2);
cpi->mb.quantize_b_4x4 = vp8_regular_quantize_b_4x4;
@@ -1524,11 +1524,6 @@ void vp8_change_config(VP8_PTR ptr, VP8_CONFIG *oxcf) {
#if CONFIG_LOSSLESS
cpi->oxcf.lossless = oxcf->lossless;
if (cpi->oxcf.lossless) {
- cpi->rtcd.fdct.short4x4 = vp8_short_walsh4x4_x8_c;
- cpi->rtcd.fdct.fast4x4 = vp8_short_walsh4x4_x8_c;
- cpi->rtcd.fdct.short8x4 = vp8_short_walsh8x4_x8_c;
- cpi->rtcd.fdct.fast8x4 = vp8_short_walsh8x4_x8_c;
- cpi->rtcd.fdct.walsh_short4x4 = vp8_short_walsh4x4_lossless_c;
cpi->common.rtcd.idct.idct1 = vp8_short_inv_walsh4x4_1_x8_c;
cpi->common.rtcd.idct.idct16 = vp8_short_inv_walsh4x4_x8_c;
cpi->common.rtcd.idct.idct1_scalar_add = vp8_dc_only_inv_walsh_add_c;
diff --git a/vp8/encoder/onyx_int.h b/vp8/encoder/onyx_int.h
index e78339c42..b94b736bc 100644
--- a/vp8/encoder/onyx_int.h
+++ b/vp8/encoder/onyx_int.h
@@ -19,7 +19,6 @@
#include "tokenize.h"
#include "vp8/common/onyxc_int.h"
#include "variance.h"
-#include "dct.h"
#include "encodemb.h"
#include "quantize.h"
#include "vp8/common/entropy.h"
@@ -350,8 +349,6 @@ typedef struct {
typedef struct VP8_ENCODER_RTCD {
VP8_COMMON_RTCD *common;
- vp8_fdct_rtcd_vtable_t fdct;
- vp8_encodemb_rtcd_vtable_t encodemb;
vp8_search_rtcd_vtable_t search;
vp8_temporal_rtcd_vtable_t temporal;
} VP8_ENCODER_RTCD;
diff --git a/vp8/encoder/rdopt.c b/vp8/encoder/rdopt.c
index f72697728..12ff9c3ec 100644
--- a/vp8/encoder/rdopt.c
+++ b/vp8/encoder/rdopt.c
@@ -35,14 +35,13 @@
#include "rdopt.h"
#include "ratectrl.h"
#include "vpx_mem/vpx_mem.h"
-#include "dct.h"
#include "vp8/common/systemdependent.h"
#include "vp8/encoder/encodemv.h"
#include "vp8/common/seg_common.h"
#include "vp8/common/pred_common.h"
#include "vp8/common/entropy.h"
-
+#include "vpx_rtcd.h"
#if CONFIG_NEWBESTREFMV
#include "vp8/common/mvref_common.h"
#endif
@@ -710,11 +709,8 @@ static void macro_block_yrd_4x4(MACROBLOCK *mb,
BLOCK *beptr;
int d;
- ENCODEMB_INVOKE(&rtcd->encodemb, submby)(
- mb->src_diff,
- *(mb->block[0].base_src),
- xd->predictor,
- mb->block[0].src_stride);
+ vp8_subtract_mby(mb->src_diff, *(mb->block[0].base_src), xd->predictor,
+ mb->block[0].src_stride);
// Fdct and building the 2nd order block
for (beptr = mb->block; beptr < mb->block + 16; beptr += 2) {
@@ -735,9 +731,9 @@ static void macro_block_yrd_4x4(MACROBLOCK *mb,
mb->quantize_b_4x4(mb_y2, x_y2);
// Distortion
- d = ENCODEMB_INVOKE(&rtcd->encodemb, mberr)(mb, 1);
+ d = vp8_mbblock_error(mb, 1);
- d += ENCODEMB_INVOKE(&rtcd->encodemb, berr)(mb_y2->coeff, x_y2->dqcoeff, 16);
+ d += vp8_block_error(mb_y2->coeff, x_y2->dqcoeff, 16);
*Distortion = (d >> 2);
// rate
@@ -784,11 +780,8 @@ static void macro_block_yrd_8x8(MACROBLOCK *mb,
BLOCKD *const x_y2 = xd->block + 24;
int d;
- ENCODEMB_INVOKE(&rtcd->encodemb, submby)(
- mb->src_diff,
- *(mb->block[0].base_src),
- xd->predictor,
- mb->block[0].src_stride);
+ vp8_subtract_mby(mb->src_diff, *(mb->block[0].base_src), xd->predictor,
+ mb->block[0].src_stride);
vp8_transform_mby_8x8(mb);
vp8_quantize_mby_8x8(mb);
@@ -803,8 +796,8 @@ static void macro_block_yrd_8x8(MACROBLOCK *mb,
xd->dqcoeff[128] = 0;
xd->dqcoeff[192] = 0;
- d = ENCODEMB_INVOKE(&rtcd->encodemb, mberr)(mb, 0);
- d += ENCODEMB_INVOKE(&rtcd->encodemb, berr)(mb_y2->coeff, x_y2->dqcoeff, 16);
+ d = vp8_mbblock_error(mb, 0);
+ d += vp8_block_error(mb_y2->coeff, x_y2->dqcoeff, 16);
*Distortion = (d >> 2);
// rate
@@ -836,11 +829,8 @@ static void macro_block_yrd_16x16(MACROBLOCK *mb, int *Rate, int *Distortion,
BLOCK *be = &mb->block[0];
TX_TYPE tx_type;
- ENCODEMB_INVOKE(&rtcd->encodemb, submby)(
- mb->src_diff,
- *(mb->block[0].base_src),
- mb->e_mbd.predictor,
- mb->block[0].src_stride);
+ vp8_subtract_mby(mb->src_diff, *(mb->block[0].base_src), mb->e_mbd.predictor,
+ mb->block[0].src_stride);
tx_type = get_tx_type_16x16(xd, b);
if (tx_type != DCT_DCT) {
@@ -855,7 +845,7 @@ static void macro_block_yrd_16x16(MACROBLOCK *mb, int *Rate, int *Distortion,
if (mb->e_mbd.mode_info_context->mbmi.mode < I8X8_PRED)
vp8_optimize_mby_16x16(mb, rtcd);
- d = ENCODEMB_INVOKE(&rtcd->encodemb, mberr)(mb, 0);
+ d = vp8_mbblock_error(mb, 0);
*Distortion = (d >> 2);
// rate
@@ -871,7 +861,7 @@ static void macro_block_yrd(VP8_COMP *cpi, MACROBLOCK *x, int *rate,
MACROBLOCKD *xd = &x->e_mbd;
int can_skip = cm->mb_no_coeff_skip;
- vp8_prob skip_prob = can_skip ? get_pred_prob(cm, xd, PRED_MBSKIP) : 128;
+ vp8_prob skip_prob = can_skip ? vp9_get_pred_prob(cm, xd, PRED_MBSKIP) : 128;
int s0, s1;
int r4x4, r4x4s, r8x8, r8x8s, d4x4, d8x8, s4x4, s8x8;
int64_t rd4x4, rd8x8, rd4x4s, rd8x8s;
@@ -1029,8 +1019,8 @@ static void super_block_yrd_8x8(MACROBLOCK *x,
xd->dqcoeff[128] = 0;
xd->dqcoeff[192] = 0;
- d += ENCODEMB_INVOKE(&rtcd->encodemb, mberr)(x, 0);
- d += ENCODEMB_INVOKE(&rtcd->encodemb, berr)(by2->coeff, bdy2->dqcoeff, 16);
+ d += vp8_mbblock_error(x, 0);
+ d += vp8_block_error(by2->coeff, bdy2->dqcoeff, 16);
xd->above_context = ta + x_idx;
xd->left_context = tl + y_idx;
r += vp8_rdcost_mby_8x8(x, 0);
@@ -1121,7 +1111,7 @@ static int64_t rd_pick_intra4x4block(VP8_COMP *cpi, MACROBLOCK *x, BLOCK *be,
rate += bmode_costs[mode2];
}
#endif
- ENCODEMB_INVOKE(IF_RTCD(&cpi->rtcd.encodemb), subb)(be, b, 16);
+ vp8_subtract_b(be, b, 16);
b->bmi.as_mode.first = mode;
tx_type = get_tx_type_4x4(xd, b);
@@ -1138,8 +1128,7 @@ static int64_t rd_pick_intra4x4block(VP8_COMP *cpi, MACROBLOCK *x, BLOCK *be,
ratey = cost_coeffs(x, b, PLANE_TYPE_Y_WITH_DC, &tempa, &templ, TX_4X4);
rate += ratey;
- distortion = ENCODEMB_INVOKE(IF_RTCD(&cpi->rtcd.encodemb), berr)(
- be->coeff, b->dqcoeff, 16) >> 2;
+ distortion = vp8_block_error(be->coeff, b->dqcoeff, 16) >> 2;
this_rd = RDCOST(x->rdmult, x->rddiv, rate, distortion);
@@ -1621,17 +1610,14 @@ static int rd_cost_mbuv(MACROBLOCK *mb) {
static int64_t rd_inter16x16_uv(VP8_COMP *cpi, MACROBLOCK *x, int *rate,
int *distortion, int fullpixel, int *skip) {
- ENCODEMB_INVOKE(IF_RTCD(&cpi->rtcd.encodemb), submbuv)(x->src_diff,
- x->src.u_buffer,
- x->src.v_buffer,
- x->e_mbd.predictor,
- x->src.uv_stride);
+ vp8_subtract_mbuv(x->src_diff, x->src.u_buffer, x->src.v_buffer,
+ x->e_mbd.predictor, x->src.uv_stride);
vp8_transform_mbuv_4x4(x);
vp8_quantize_mbuv_4x4(x);
*rate = rd_cost_mbuv(x);
- *distortion = ENCODEMB_INVOKE(&cpi->rtcd.encodemb, mbuverr)(x) / 4;
+ *distortion = vp8_mbuverror(x) / 4;
*skip = mbuv_is_skippable_4x4(&x->e_mbd);
return RDCOST(x->rdmult, x->rddiv, *rate, *distortion);
@@ -1696,7 +1682,7 @@ static int64_t rd_inter32x32_uv_8x8(VP8_COMP *cpi, MACROBLOCK *x, int *rate,
xd->above_context = ta + x_idx;
xd->left_context = tl + y_idx;
r += rd_cost_mbuv_8x8(x, 0);
- d += ENCODEMB_INVOKE(&cpi->rtcd.encodemb, mbuverr)(x) / 4;
+ d += vp8_mbuverror(x) / 4;
skippable = skippable && mbuv_is_skippable_8x8(xd);
}
@@ -1714,17 +1700,14 @@ static int64_t rd_inter32x32_uv_8x8(VP8_COMP *cpi, MACROBLOCK *x, int *rate,
static int64_t rd_inter16x16_uv_8x8(VP8_COMP *cpi, MACROBLOCK *x, int *rate,
int *distortion, int fullpixel, int *skip) {
- ENCODEMB_INVOKE(IF_RTCD(&cpi->rtcd.encodemb), submbuv)(x->src_diff,
- x->src.u_buffer,
- x->src.v_buffer,
- x->e_mbd.predictor,
- x->src.uv_stride);
+ vp8_subtract_mbuv(x->src_diff, x->src.u_buffer, x->src.v_buffer,
+ x->e_mbd.predictor, x->src.uv_stride);
vp8_transform_mbuv_8x8(x);
vp8_quantize_mbuv_8x8(x);
*rate = rd_cost_mbuv_8x8(x, 1);
- *distortion = ENCODEMB_INVOKE(&cpi->rtcd.encodemb, mbuverr)(x) / 4;
+ *distortion = vp8_mbuverror(x) / 4;
*skip = mbuv_is_skippable_8x8(&x->e_mbd);
return RDCOST(x->rdmult, x->rddiv, *rate, *distortion);
@@ -1734,14 +1717,14 @@ static int64_t rd_inter16x16_uv_8x8(VP8_COMP *cpi, MACROBLOCK *x, int *rate,
static int64_t rd_inter4x4_uv(VP8_COMP *cpi, MACROBLOCK *x, int *rate,
int *distortion, int *skippable, int fullpixel) {
vp8_build_inter4x4_predictors_mbuv(&x->e_mbd);
- ENCODEMB_INVOKE(IF_RTCD(&cpi->rtcd.encodemb), submbuv)(x->src_diff,
- x->src.u_buffer, x->src.v_buffer, x->e_mbd.predictor, x->src.uv_stride);
+ vp8_subtract_mbuv(x->src_diff, x->src.u_buffer, x->src.v_buffer,
+ x->e_mbd.predictor, x->src.uv_stride);
vp8_transform_mbuv_4x4(x);
vp8_quantize_mbuv_4x4(x);
*rate = rd_cost_mbuv(x);
- *distortion = ENCODEMB_INVOKE(&cpi->rtcd.encodemb, mbuverr)(x) / 4;
+ *distortion = vp8_mbuverror(x) / 4;
*skippable = mbuv_is_skippable_4x4(&x->e_mbd);
return RDCOST(x->rdmult, x->rddiv, *rate, *distortion);
@@ -1786,9 +1769,8 @@ static void rd_pick_intra_mbuv_mode(VP8_COMP *cpi,
}
#endif
- ENCODEMB_INVOKE(IF_RTCD(&cpi->rtcd.encodemb), submbuv)(x->src_diff,
- x->src.u_buffer, x->src.v_buffer, x->e_mbd.predictor,
- x->src.uv_stride);
+ vp8_subtract_mbuv(x->src_diff, x->src.u_buffer, x->src.v_buffer,
+ x->e_mbd.predictor, x->src.uv_stride);
vp8_transform_mbuv_4x4(x);
vp8_quantize_mbuv_4x4(x);
@@ -1796,7 +1778,7 @@ static void rd_pick_intra_mbuv_mode(VP8_COMP *cpi,
rate = rate_to
+ x->intra_uv_mode_cost[x->e_mbd.frame_type][mbmi->uv_mode];
- distortion = ENCODEMB_INVOKE(&cpi->rtcd.encodemb, mbuverr)(x) / 4;
+ distortion = vp8_mbuverror(x) / 4;
this_rd = RDCOST(x->rdmult, x->rddiv, rate, distortion);
@@ -1845,9 +1827,8 @@ static void rd_pick_intra_mbuv_mode_8x8(VP8_COMP *cpi,
mbmi->uv_mode = mode;
vp8_build_intra_predictors_mbuv(&x->e_mbd);
- ENCODEMB_INVOKE(IF_RTCD(&cpi->rtcd.encodemb), submbuv)(x->src_diff,
- x->src.u_buffer, x->src.v_buffer, x->e_mbd.predictor,
- x->src.uv_stride);
+ vp8_subtract_mbuv(x->src_diff, x->src.u_buffer, x->src.v_buffer,
+ x->e_mbd.predictor, x->src.uv_stride);
vp8_transform_mbuv_8x8(x);
vp8_quantize_mbuv_8x8(x);
@@ -1855,7 +1836,7 @@ static void rd_pick_intra_mbuv_mode_8x8(VP8_COMP *cpi,
rate_to = rd_cost_mbuv_8x8(x, 1);
rate = rate_to + x->intra_uv_mode_cost[x->e_mbd.frame_type][mbmi->uv_mode];
- distortion = ENCODEMB_INVOKE(&cpi->rtcd.encodemb, mbuverr)(x) / 4;
+ distortion = vp8_mbuverror(x) / 4;
this_rd = RDCOST(x->rdmult, x->rddiv, rate, distortion);
if (this_rd < best_rd) {
@@ -1905,7 +1886,7 @@ static void super_block_uvrd_8x8(MACROBLOCK *x,
vp8_quantize_mbuv_8x8(x);
s &= mbuv_is_skippable_8x8(xd);
- d += ENCODEMB_INVOKE(&rtcd->encodemb, mbuverr)(x) >> 2;
+ d += vp8_mbuverror(x) >> 2;
xd->above_context = ta + x_idx;
xd->left_context = tl + y_idx;
r += rd_cost_mbuv_8x8(x, 0);
@@ -2115,10 +2096,10 @@ static int64_t encode_inter_mb_segment(MACROBLOCK *x,
vp8_build_inter_predictors_b(bd, 16, xd->subpixel_predict);
if (xd->mode_info_context->mbmi.second_ref_frame)
vp8_build_2nd_inter_predictors_b(bd, 16, xd->subpixel_predict_avg);
- ENCODEMB_INVOKE(&rtcd->encodemb, subb)(be, bd, 16);
+ vp8_subtract_b(be, bd, 16);
x->vp8_short_fdct4x4(be->src_diff, be->coeff, 32);
x->quantize_b_4x4(be, bd);
- thisdistortion = vp8_block_error_c(be->coeff, bd->dqcoeff, 16);
+ thisdistortion = vp8_block_error(be->coeff, bd->dqcoeff, 16);
*distortion += thisdistortion;
*labelyrate += cost_coeffs(x, bd, PLANE_TYPE_Y_WITH_DC,
ta + vp8_block2above[i],
@@ -3036,7 +3017,7 @@ static void set_i8x8_block_modes(MACROBLOCK *x, int modes[2][4]) {
}
}
-extern void calc_ref_probs(int *count, vp8_prob *probs);
+extern void vp9_calc_ref_probs(int *count, vp8_prob *probs);
static void estimate_curframe_refprobs(VP8_COMP *cpi, vp8_prob mod_refprobs[3], int pred_ref) {
int norm_cnt[MAX_REF_FRAMES];
const int *const rfct = cpi->count_mb_ref_frame_usage;
@@ -3052,28 +3033,28 @@ static void estimate_curframe_refprobs(VP8_COMP *cpi, vp8_prob mod_refprobs[3],
norm_cnt[1] = last_count;
norm_cnt[2] = gf_count;
norm_cnt[3] = arf_count;
- calc_ref_probs(norm_cnt, mod_refprobs);
+ vp9_calc_ref_probs(norm_cnt, mod_refprobs);
mod_refprobs[0] = 0; // This branch implicit
} else if (pred_ref == LAST_FRAME) {
norm_cnt[0] = intra_count;
norm_cnt[1] = 0;
norm_cnt[2] = gf_count;
norm_cnt[3] = arf_count;
- calc_ref_probs(norm_cnt, mod_refprobs);
+ vp9_calc_ref_probs(norm_cnt, mod_refprobs);
mod_refprobs[1] = 0; // This branch implicit
} else if (pred_ref == GOLDEN_FRAME) {
norm_cnt[0] = intra_count;
norm_cnt[1] = last_count;
norm_cnt[2] = 0;
norm_cnt[3] = arf_count;
- calc_ref_probs(norm_cnt, mod_refprobs);
+ vp9_calc_ref_probs(norm_cnt, mod_refprobs);
mod_refprobs[2] = 0; // This branch implicit
} else {
norm_cnt[0] = intra_count;
norm_cnt[1] = last_count;
norm_cnt[2] = gf_count;
norm_cnt[3] = 0;
- calc_ref_probs(norm_cnt, mod_refprobs);
+ vp9_calc_ref_probs(norm_cnt, mod_refprobs);
mod_refprobs[2] = 0; // This branch implicit
}
}
@@ -3113,13 +3094,13 @@ static void vp8_estimate_ref_frame_costs(VP8_COMP *cpi, int segment_id, unsigned
}
// Get the predicted reference for this mb
- pred_ref = get_pred_ref(cm, xd);
+ pred_ref = vp9_get_pred_ref(cm, xd);
// Get the context probability for the prediction flag (based on last frame)
- pred_prob = get_pred_prob(cm, xd, PRED_REF);
+ pred_prob = vp9_get_pred_prob(cm, xd, PRED_REF);
// Predict probability for current frame based on stats so far
- pred_ctx = get_pred_context(cm, xd, PRED_REF);
+ pred_ctx = vp9_get_pred_context(cm, xd, PRED_REF);
tot_count = cpi->ref_pred_count[pred_ctx][0] + cpi->ref_pred_count[pred_ctx][1];
if (tot_count) {
new_pred_prob =
@@ -3252,11 +3233,11 @@ void setup_buffer_inter(VP8_COMP *cpi, MACROBLOCK *x, int idx, int frame_type,
v_buffer[frame_type] = yv12->v_buffer + recon_uvoffset;
#if CONFIG_NEWBESTREFMV
- find_mv_refs(xd, xd->mode_info_context,
- xd->prev_mode_info_context,
- frame_type,
- mbmi->ref_mvs[frame_type],
- cpi->common.ref_frame_sign_bias);
+ vp9_find_mv_refs(xd, xd->mode_info_context,
+ xd->prev_mode_info_context,
+ frame_type,
+ mbmi->ref_mvs[frame_type],
+ cpi->common.ref_frame_sign_bias);
vp8_find_best_ref_mvs(xd, y_buffer[frame_type],
yv12->y_stride,
@@ -3278,7 +3259,7 @@ void vp8_rd_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset, int
PARTITION_INFO best_partition;
int_mv best_ref_mv, second_best_ref_mv;
MB_PREDICTION_MODE this_mode;
- MB_MODE_INFO * mbmi = &x->e_mbd.mode_info_context->mbmi;
+ MB_MODE_INFO * mbmi = &xd->mode_info_context->mbmi;
int i, best_mode_index = 0;
int mode8x8[2][4];
unsigned char segment_id = mbmi->segment_id;
@@ -3414,7 +3395,7 @@ void vp8_rd_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset, int
mbmi->uv_mode = DC_PRED;
mbmi->ref_frame = vp8_mode_order[mode_index].ref_frame;
mbmi->second_ref_frame = vp8_mode_order[mode_index].second_ref_frame;
- is_comp_pred = x->e_mbd.mode_info_context->mbmi.second_ref_frame != 0;
+ is_comp_pred = xd->mode_info_context->mbmi.second_ref_frame != 0;
#if CONFIG_PRED_FILTER
mbmi->pred_filter_enabled = 0;
#endif
@@ -3472,9 +3453,9 @@ void vp8_rd_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset, int
if (mbmi->ref_frame) {
int ref = mbmi->ref_frame;
- x->e_mbd.pre.y_buffer = y_buffer[ref];
- x->e_mbd.pre.u_buffer = u_buffer[ref];
- x->e_mbd.pre.v_buffer = v_buffer[ref];
+ xd->pre.y_buffer = y_buffer[ref];
+ xd->pre.u_buffer = u_buffer[ref];
+ xd->pre.v_buffer = v_buffer[ref];
best_ref_mv = frame_best_ref_mv[ref];
vpx_memcpy(mdcounts, frame_mdcounts[ref], sizeof(mdcounts));
}
@@ -3482,9 +3463,9 @@ void vp8_rd_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset, int
if (mbmi->second_ref_frame) {
int ref = mbmi->second_ref_frame;
- x->e_mbd.second_pre.y_buffer = y_buffer[ref];
- x->e_mbd.second_pre.u_buffer = u_buffer[ref];
- x->e_mbd.second_pre.v_buffer = v_buffer[ref];
+ xd->second_pre.y_buffer = y_buffer[ref];
+ xd->second_pre.u_buffer = u_buffer[ref];
+ xd->second_pre.v_buffer = v_buffer[ref];
second_best_ref_mv = frame_best_ref_mv[ref];
}
@@ -3528,7 +3509,7 @@ void vp8_rd_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset, int
macro_block_yrd(cpi, x, &rate_y, &distortion, &skippable, txfm_cache);
rate2 += rate_y;
distortion2 += distortion;
- rate2 += x->mbmode_cost[x->e_mbd.frame_type][mbmi->mode];
+ rate2 += x->mbmode_cost[xd->frame_type][mbmi->mode];
if (mbmi->txfm_size != TX_4X4) {
rate2 += uv_intra_rate_8x8;
rate_uv = uv_intra_rate_tokenonly_8x8;
@@ -3547,7 +3528,7 @@ void vp8_rd_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset, int
int64_t tmp_rd;
// Note the rate value returned here includes the cost of coding
- // the BPRED mode : x->mbmode_cost[x->e_mbd.frame_type][BPRED];
+ // the BPRED mode : x->mbmode_cost[xd->frame_type][BPRED];
mbmi->txfm_size = TX_4X4;
tmp_rd = rd_pick_intra4x4mby_modes(cpi, x, &rate, &rate_y, &distortion, best_yrd,
#if CONFIG_COMP_INTRA_PRED
@@ -3577,15 +3558,15 @@ void vp8_rd_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset, int
mbmi->txfm_size = TX_4X4;
tmp_rd_4x4 = rd_pick_intra8x8mby_modes(cpi, x, &r4x4, &tok4x4,
&d4x4, best_yrd);
- mode8x8[0][0] = x->e_mbd.mode_info_context->bmi[0].as_mode.first;
- mode8x8[0][1] = x->e_mbd.mode_info_context->bmi[2].as_mode.first;
- mode8x8[0][2] = x->e_mbd.mode_info_context->bmi[8].as_mode.first;
- mode8x8[0][3] = x->e_mbd.mode_info_context->bmi[10].as_mode.first;
+ mode8x8[0][0] = xd->mode_info_context->bmi[0].as_mode.first;
+ mode8x8[0][1] = xd->mode_info_context->bmi[2].as_mode.first;
+ mode8x8[0][2] = xd->mode_info_context->bmi[8].as_mode.first;
+ mode8x8[0][3] = xd->mode_info_context->bmi[10].as_mode.first;
#if CONFIG_COMP_INTRA_PRED
- mode8x8[1][0] = x->e_mbd.mode_info_context->bmi[0].as_mode.second;
- mode8x8[1][1] = x->e_mbd.mode_info_context->bmi[2].as_mode.second;
- mode8x8[1][2] = x->e_mbd.mode_info_context->bmi[8].as_mode.second;
- mode8x8[1][3] = x->e_mbd.mode_info_context->bmi[10].as_mode.second;
+ mode8x8[1][0] = xd->mode_info_context->bmi[0].as_mode.second;
+ mode8x8[1][1] = xd->mode_info_context->bmi[2].as_mode.second;
+ mode8x8[1][2] = xd->mode_info_context->bmi[8].as_mode.second;
+ mode8x8[1][3] = xd->mode_info_context->bmi[10].as_mode.second;
#endif
mbmi->txfm_size = TX_8X8;
tmp_rd_8x8 = rd_pick_intra8x8mby_modes(cpi, x, &r8x8, &tok8x8,
@@ -3610,15 +3591,15 @@ void vp8_rd_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset, int
mbmi->txfm_size = TX_8X8;
tmp_rd = tmp_rd_8x8s;
- mode8x8[0][0] = x->e_mbd.mode_info_context->bmi[0].as_mode.first;
- mode8x8[0][1] = x->e_mbd.mode_info_context->bmi[2].as_mode.first;
- mode8x8[0][2] = x->e_mbd.mode_info_context->bmi[8].as_mode.first;
- mode8x8[0][3] = x->e_mbd.mode_info_context->bmi[10].as_mode.first;
+ mode8x8[0][0] = xd->mode_info_context->bmi[0].as_mode.first;
+ mode8x8[0][1] = xd->mode_info_context->bmi[2].as_mode.first;
+ mode8x8[0][2] = xd->mode_info_context->bmi[8].as_mode.first;
+ mode8x8[0][3] = xd->mode_info_context->bmi[10].as_mode.first;
#if CONFIG_COMP_INTRA_PRED
- mode8x8[1][0] = x->e_mbd.mode_info_context->bmi[0].as_mode.second;
- mode8x8[1][1] = x->e_mbd.mode_info_context->bmi[2].as_mode.second;
- mode8x8[1][2] = x->e_mbd.mode_info_context->bmi[8].as_mode.second;
- mode8x8[1][3] = x->e_mbd.mode_info_context->bmi[10].as_mode.second;
+ mode8x8[1][0] = xd->mode_info_context->bmi[0].as_mode.second;
+ mode8x8[1][1] = xd->mode_info_context->bmi[2].as_mode.second;
+ mode8x8[1][2] = xd->mode_info_context->bmi[8].as_mode.second;
+ mode8x8[1][3] = xd->mode_info_context->bmi[10].as_mode.second;
#endif
}
} else if (cm->txfm_mode == ONLY_4X4) {
@@ -3634,15 +3615,15 @@ void vp8_rd_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset, int
mbmi->txfm_size = TX_8X8;
tmp_rd = tmp_rd_8x8;
- mode8x8[0][0] = x->e_mbd.mode_info_context->bmi[0].as_mode.first;
- mode8x8[0][1] = x->e_mbd.mode_info_context->bmi[2].as_mode.first;
- mode8x8[0][2] = x->e_mbd.mode_info_context->bmi[8].as_mode.first;
- mode8x8[0][3] = x->e_mbd.mode_info_context->bmi[10].as_mode.first;
+ mode8x8[0][0] = xd->mode_info_context->bmi[0].as_mode.first;
+ mode8x8[0][1] = xd->mode_info_context->bmi[2].as_mode.first;
+ mode8x8[0][2] = xd->mode_info_context->bmi[8].as_mode.first;
+ mode8x8[0][3] = xd->mode_info_context->bmi[10].as_mode.first;
#if CONFIG_COMP_INTRA_PRED
- mode8x8[1][0] = x->e_mbd.mode_info_context->bmi[0].as_mode.second;
- mode8x8[1][1] = x->e_mbd.mode_info_context->bmi[2].as_mode.second;
- mode8x8[1][2] = x->e_mbd.mode_info_context->bmi[8].as_mode.second;
- mode8x8[1][3] = x->e_mbd.mode_info_context->bmi[10].as_mode.second;
+ mode8x8[1][0] = xd->mode_info_context->bmi[0].as_mode.second;
+ mode8x8[1][1] = xd->mode_info_context->bmi[2].as_mode.second;
+ mode8x8[1][2] = xd->mode_info_context->bmi[8].as_mode.second;
+ mode8x8[1][3] = xd->mode_info_context->bmi[10].as_mode.second;
#endif
}
@@ -3688,7 +3669,7 @@ void vp8_rd_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset, int
if (cpi->common.mcomp_filter_type == SWITCHABLE)
rate2 += SWITCHABLE_INTERP_RATE_FACTOR * x->switchable_interp_costs
- [get_pred_context(&cpi->common, xd, PRED_SWITCHABLE_INTERP)]
+ [vp9_get_pred_context(&cpi->common, xd, PRED_SWITCHABLE_INTERP)]
[vp8_switchable_interp_map[mbmi->interp_filter]];
// If even the 'Y' rd value of split is higher than best so far
// then dont bother looking at UV
@@ -3711,14 +3692,14 @@ void vp8_rd_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset, int
mode_excluded = cpi->common.comp_pred_mode == COMP_PREDICTION_ONLY;
compmode_cost =
- vp8_cost_bit(get_pred_prob(cm, xd, PRED_COMP), is_comp_pred);
+ vp8_cost_bit(vp9_get_pred_prob(cm, xd, PRED_COMP), is_comp_pred);
mbmi->mode = this_mode;
}
else {
const int num_refs = is_comp_pred ? 2 : 1;
int flag, skip;
- int refs[2] = {x->e_mbd.mode_info_context->mbmi.ref_frame,
- x->e_mbd.mode_info_context->mbmi.second_ref_frame};
+ int refs[2] = {xd->mode_info_context->mbmi.ref_frame,
+ xd->mode_info_context->mbmi.second_ref_frame};
int_mv cur_mv[2];
switch (this_mode) {
case NEWMV:
@@ -3729,11 +3710,11 @@ void vp8_rd_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset, int
rate2 += vp8_mv_bit_cost(&frame_mv[NEWMV][refs[0]],
&frame_best_ref_mv[refs[0]],
XMVCOST, 96,
- x->e_mbd.allow_high_precision_mv);
+ xd->allow_high_precision_mv);
rate2 += vp8_mv_bit_cost(&frame_mv[NEWMV][refs[1]],
&frame_best_ref_mv[refs[1]],
XMVCOST, 96,
- x->e_mbd.allow_high_precision_mv);
+ xd->allow_high_precision_mv);
} else {
int bestsme = INT_MAX;
int further_steps, step_param = cpi->sf.first_step;
@@ -3752,7 +3733,7 @@ void vp8_rd_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset, int
saddone = 1;
}
- vp8_mv_pred(cpi, &x->e_mbd, x->e_mbd.mode_info_context, &mvp,
+ vp8_mv_pred(cpi, &x->e_mbd, xd->mode_info_context, &mvp,
mbmi->ref_frame, cpi->common.ref_frame_sign_bias,
&sr, &near_sadidx[0]);
@@ -3789,7 +3770,7 @@ void vp8_rd_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset, int
// Add the new motion vector cost to our rolling cost variable
rate2 += vp8_mv_bit_cost(&tmp_mv, &best_ref_mv,
XMVCOST, 96,
- x->e_mbd.allow_high_precision_mv);
+ xd->allow_high_precision_mv);
}
break;
case NEARESTMV:
@@ -3815,7 +3796,7 @@ void vp8_rd_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset, int
vp8_clamp_mv2(&cur_mv[i], xd);
if (mv_check_bounds(x, &cur_mv[i]))
flag = 1;
- x->e_mbd.mode_info_context->mbmi.mv[i].as_int = cur_mv[i].as_int;
+ xd->mode_info_context->mbmi.mv[i].as_int = cur_mv[i].as_int;
}
if (flag)
continue;
@@ -3829,21 +3810,21 @@ void vp8_rd_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset, int
#endif
if (cpi->common.mcomp_filter_type == SWITCHABLE)
rate2 += SWITCHABLE_INTERP_RATE_FACTOR * x->switchable_interp_costs
- [get_pred_context(&cpi->common, xd, PRED_SWITCHABLE_INTERP)]
+ [vp9_get_pred_context(&cpi->common, xd, PRED_SWITCHABLE_INTERP)]
[vp8_switchable_interp_map[
- x->e_mbd.mode_info_context->mbmi.interp_filter]];
+ xd->mode_info_context->mbmi.interp_filter]];
/* We don't include the cost of the second reference here, because there are only
* three options: Last/Golden, ARF/Last or Golden/ARF, or in other words if you
* present them in that order, the second one is always known if the first is known */
- compmode_cost = vp8_cost_bit(get_pred_prob(cm, xd, PRED_COMP),
+ compmode_cost = vp8_cost_bit(vp9_get_pred_prob(cm, xd, PRED_COMP),
is_comp_pred);
rate2 += vp8_cost_mv_ref(cpi, this_mode, mdcounts);
- vp8_build_1st_inter16x16_predictors_mby(&x->e_mbd, x->e_mbd.predictor,
+ vp8_build_1st_inter16x16_predictors_mby(&x->e_mbd, xd->predictor,
16, 0);
if (is_comp_pred)
- vp8_build_2nd_inter16x16_predictors_mby(&x->e_mbd, x->e_mbd.predictor,
+ vp8_build_2nd_inter16x16_predictors_mby(&x->e_mbd, xd->predictor,
16);
if (cpi->active_map_enabled && x->active_ptr[0] == 0)
@@ -3857,7 +3838,7 @@ void vp8_rd_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset, int
threshold = x->encode_breakout;
var = vp8_variance16x16(*(b->base_src), b->src_stride,
- x->e_mbd.predictor, 16, &sse);
+ xd->predictor, 16, &sse);
if (sse < threshold) {
unsigned int q2dc = xd->block[24].dequant[0];
@@ -3936,7 +3917,7 @@ void vp8_rd_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset, int
// Cost the skip mb case
vp8_prob skip_prob =
- get_pred_prob(cm, &x->e_mbd, PRED_MBSKIP);
+ vp9_get_pred_prob(cm, &x->e_mbd, PRED_MBSKIP);
if (skip_prob) {
prob_skip_cost = vp8_cost_bit(skip_prob, 1);
@@ -3950,7 +3931,7 @@ void vp8_rd_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset, int
mbmi->mb_skip_coeff = 0;
if (mb_skip_allowed) {
int prob_skip_cost = vp8_cost_bit(
- get_pred_prob(cm, &x->e_mbd, PRED_MBSKIP), 0);
+ vp9_get_pred_prob(cm, &x->e_mbd, PRED_MBSKIP), 0);
rate2 += prob_skip_cost;
other_cost += prob_skip_cost;
}
@@ -4018,7 +3999,7 @@ void vp8_rd_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset, int
|| (this_mode == I8X8_PRED)
|| (this_mode == SPLITMV))
for (i = 0; i < 16; i++) {
- best_bmodes[i] = x->e_mbd.block[i].bmi;
+ best_bmodes[i] = xd->block[i].bmi;
}
}
@@ -4102,7 +4083,7 @@ void vp8_rd_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset, int
best_mbmode.mode >= NEARESTMV &&
best_mbmode.mode <= SPLITMV) {
++cpi->switchable_interp_count
- [get_pred_context(&cpi->common, xd, PRED_SWITCHABLE_INTERP)]
+ [vp9_get_pred_context(&cpi->common, xd, PRED_SWITCHABLE_INTERP)]
[vp8_switchable_interp_map[best_mbmode.interp_filter]];
}
@@ -4209,7 +4190,7 @@ void vp8_rd_pick_intra_mode_sb(VP8_COMP *cpi, MACROBLOCK *x,
int dist_y, dist_uv;
int y_skip, uv_skip;
- x->e_mbd.mode_info_context->mbmi.txfm_size = TX_8X8;
+ xd->mode_info_context->mbmi.txfm_size = TX_8X8;
error_uv = rd_pick_intra_sbuv_mode(cpi, x, &rate_uv, &rate_uv_tokenonly,
&dist_uv, &uv_skip);
@@ -4218,12 +4199,12 @@ void vp8_rd_pick_intra_mode_sb(VP8_COMP *cpi, MACROBLOCK *x,
if (cpi->common.mb_no_coeff_skip && y_skip && uv_skip) {
*returnrate = rate_y + rate_uv - rate_y_tokenonly - rate_uv_tokenonly +
- vp8_cost_bit(get_pred_prob(cm, xd, PRED_MBSKIP), 1);
+ vp8_cost_bit(vp9_get_pred_prob(cm, xd, PRED_MBSKIP), 1);
*returndist = dist_y + (dist_uv >> 2);
} else {
*returnrate = rate_y + rate_uv;
if (cpi->common.mb_no_coeff_skip)
- *returnrate += vp8_cost_bit(get_pred_prob(cm, xd, PRED_MBSKIP), 0);
+ *returnrate += vp8_cost_bit(vp9_get_pred_prob(cm, xd, PRED_MBSKIP), 0);
*returndist = dist_y + (dist_uv >> 2);
}
}
@@ -4315,7 +4296,7 @@ void vp8_rd_pick_intra_mode(VP8_COMP *cpi, MACROBLOCK *x,
mbmi->mode = mode16x16;
mbmi->uv_mode = modeuv;
rate = rateuv8x8 + rate16x16 - rateuv8x8_tokenonly - rate16x16_tokenonly +
- vp8_cost_bit(get_pred_prob(cm, xd, PRED_MBSKIP), 1);
+ vp8_cost_bit(vp9_get_pred_prob(cm, xd, PRED_MBSKIP), 1);
dist = dist16x16 + (distuv8x8 >> 2);
mbmi->txfm_size = txfm_size_16x16;
memset(x->mb_context[xd->mb_index].txfm_rd_diff, 0,
@@ -4348,7 +4329,7 @@ void vp8_rd_pick_intra_mode(VP8_COMP *cpi, MACROBLOCK *x,
}
}
if (cpi->common.mb_no_coeff_skip)
- rate += vp8_cost_bit(get_pred_prob(cm, xd, PRED_MBSKIP), 0);
+ rate += vp8_cost_bit(vp9_get_pred_prob(cm, xd, PRED_MBSKIP), 0);
} else {
if (error4x4 < error8x8) {
rate = rateuv;
@@ -4378,7 +4359,7 @@ void vp8_rd_pick_intra_mode(VP8_COMP *cpi, MACROBLOCK *x,
sizeof(x->mb_context[xd->mb_index].txfm_rd_diff));
}
if (cpi->common.mb_no_coeff_skip)
- rate += vp8_cost_bit(get_pred_prob(cm, xd, PRED_MBSKIP), 0);
+ rate += vp8_cost_bit(vp9_get_pred_prob(cm, xd, PRED_MBSKIP), 0);
}
*returnrate = rate;
@@ -4719,7 +4700,7 @@ int64_t vp8_rd_pick_inter_mode_sb(VP8_COMP *cpi, MACROBLOCK *x,
xd->dst.uv_stride);
compmode_cost =
- vp8_cost_bit(get_pred_prob(cm, xd, PRED_COMP), 0);
+ vp8_cost_bit(vp9_get_pred_prob(cm, xd, PRED_COMP), 0);
if (cpi->active_map_enabled && x->active_ptr[0] == 0) {
x->skip = 1;
@@ -4871,7 +4852,7 @@ int64_t vp8_rd_pick_inter_mode_sb(VP8_COMP *cpi, MACROBLOCK *x,
* are only three options: Last/Golden, ARF/Last or Golden/ARF, or in
* other words if you present them in that order, the second one is
* always known if the first is known */
- compmode_cost = vp8_cost_bit(get_pred_prob(cm, xd, PRED_COMP), 1);
+ compmode_cost = vp8_cost_bit(vp9_get_pred_prob(cm, xd, PRED_COMP), 1);
}
if (cpi->common.comp_pred_mode == HYBRID_PREDICTION) {
@@ -4908,7 +4889,7 @@ int64_t vp8_rd_pick_inter_mode_sb(VP8_COMP *cpi, MACROBLOCK *x,
// Cost the skip mb case
vp8_prob skip_prob =
- get_pred_prob(cm, xd, PRED_MBSKIP);
+ vp9_get_pred_prob(cm, xd, PRED_MBSKIP);
if (skip_prob) {
prob_skip_cost = vp8_cost_bit(skip_prob, 1);
@@ -4919,7 +4900,7 @@ int64_t vp8_rd_pick_inter_mode_sb(VP8_COMP *cpi, MACROBLOCK *x,
}
// Add in the cost of the no skip flag.
else if (mb_skip_allowed) {
- int prob_skip_cost = vp8_cost_bit(get_pred_prob(cm, xd,
+ int prob_skip_cost = vp8_cost_bit(vp9_get_pred_prob(cm, xd,
PRED_MBSKIP), 0);
rate2 += prob_skip_cost;
other_cost += prob_skip_cost;
diff --git a/vp8/encoder/satd_c.c b/vp8/encoder/satd_c.c
index 2ce1b9937..4a76de36b 100644
--- a/vp8/encoder/satd_c.c
+++ b/vp8/encoder/satd_c.c
@@ -9,9 +9,8 @@
*/
#include <stdlib.h>
-#include "dct.h"
#include "vpx_ports/mem.h"
-
+#include "./vpx_rtcd.h"
unsigned int vp8_satd16x16_c(const unsigned char *src_ptr,
int src_stride,
const unsigned char *ref_ptr,
diff --git a/vp8/encoder/segmentation.c b/vp8/encoder/segmentation.c
index e85bb45ce..fea816ca2 100644
--- a/vp8/encoder/segmentation.c
+++ b/vp8/encoder/segmentation.c
@@ -222,15 +222,15 @@ void choose_segmap_coding_method(VP8_COMP *cpi) {
if (cm->frame_type != KEY_FRAME) {
// Test to see if the segment id matches the predicted value.
int seg_predicted =
- (segment_id == get_pred_mb_segid(cm, segmap_index));
+ (segment_id == vp9_get_pred_mb_segid(cm, segmap_index));
// Get the segment id prediction context
pred_context =
- get_pred_context(cm, xd, PRED_SEG_ID);
+ vp9_get_pred_context(cm, xd, PRED_SEG_ID);
// Store the prediction status for this mb and update counts
// as appropriate
- set_pred_flag(xd, PRED_SEG_ID, seg_predicted);
+ vp9_set_pred_flag(xd, PRED_SEG_ID, seg_predicted);
temporal_predictor_count[pred_context][seg_predicted]++;
if (!seg_predicted)
diff --git a/vp8/encoder/tokenize.c b/vp8/encoder/tokenize.c
index a96b8e455..99f6b1fb1 100644
--- a/vp8/encoder/tokenize.c
+++ b/vp8/encoder/tokenize.c
@@ -297,7 +297,7 @@ void vp8_tokenize_mb(VP8_COMP *cpi,
int has_y2_block;
int b;
int tx_size = xd->mode_info_context->mbmi.txfm_size;
- int mb_skip_context = get_pred_context(&cpi->common, xd, PRED_MBSKIP);
+ int mb_skip_context = vp9_get_pred_context(&cpi->common, xd, PRED_MBSKIP);
TOKENEXTRA *t_backup = *t;
ENTROPY_CONTEXT * A = (ENTROPY_CONTEXT *) xd->above_context;
ENTROPY_CONTEXT * L = (ENTROPY_CONTEXT *) xd->left_context;
diff --git a/vp8/encoder/x86/dct_x86.h b/vp8/encoder/x86/dct_x86.h
deleted file mode 100644
index 19f6c1686..000000000
--- a/vp8/encoder/x86/dct_x86.h
+++ /dev/null
@@ -1,73 +0,0 @@
-/*
- * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
- *
- * Use of this source code is governed by a BSD-style license
- * that can be found in the LICENSE file in the root of the source
- * tree. An additional intellectual property rights grant can be found
- * in the file PATENTS. All contributing project authors may
- * be found in the AUTHORS file in the root of the source tree.
- */
-
-
-#ifndef DCT_X86_H
-#define DCT_X86_H
-
-
-/* Note:
- *
- * This platform is commonly built for runtime CPU detection. If you modify
- * any of the function mappings present in this file, be sure to also update
- * them in the function pointer initialization code
- */
-#if HAVE_MMX
-extern prototype_fdct(vp8_short_fdct4x4_mmx);
-extern prototype_fdct(vp8_short_fdct8x4_mmx);
-
-#if !CONFIG_RUNTIME_CPU_DETECT
-
-#undef vp8_fdct_short4x4
-#define vp8_fdct_short4x4 vp8_short_fdct4x4_mmx
-
-#undef vp8_fdct_short8x4
-#define vp8_fdct_short8x4 vp8_short_fdct8x4_mmx
-
-#undef vp8_fdct_fast4x4
-#define vp8_fdct_fast4x4 vp8_short_fdct4x4_mmx
-
-#undef vp8_fdct_fast8x4
-#define vp8_fdct_fast8x4 vp8_short_fdct8x4_mmx
-
-#endif
-
-#endif
-
-
-#if HAVE_SSE2
-extern prototype_fdct(vp8_short_fdct8x4_sse2);
-extern prototype_fdct(vp8_short_walsh4x4_sse2);
-
-extern prototype_fdct(vp8_short_fdct4x4_sse2);
-
-#if !CONFIG_RUNTIME_CPU_DETECT
-
-#undef vp8_fdct_short4x4
-#define vp8_fdct_short4x4 vp8_short_fdct4x4_sse2
-
-#undef vp8_fdct_short8x4
-#define vp8_fdct_short8x4 vp8_short_fdct8x4_sse2
-
-#undef vp8_fdct_fast4x4
-#define vp8_fdct_fast4x4 vp8_short_fdct4x4_sse2
-
-#undef vp8_fdct_fast8x4
-#define vp8_fdct_fast8x4 vp8_short_fdct8x4_sse2
-
-#undef vp8_fdct_walsh_short4x4
-#define vp8_fdct_walsh_short4x4 vp8_short_walsh4x4_sse2
-
-#endif
-
-
-#endif
-
-#endif
diff --git a/vp8/encoder/x86/encodemb_x86.h b/vp8/encoder/x86/encodemb_x86.h
deleted file mode 100644
index 69b3edd66..000000000
--- a/vp8/encoder/x86/encodemb_x86.h
+++ /dev/null
@@ -1,85 +0,0 @@
-/*
- * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
- *
- * Use of this source code is governed by a BSD-style license
- * that can be found in the LICENSE file in the root of the source
- * tree. An additional intellectual property rights grant can be found
- * in the file PATENTS. All contributing project authors may
- * be found in the AUTHORS file in the root of the source tree.
- */
-
-
-#ifndef ENCODEMB_X86_H
-#define ENCODEMB_X86_H
-
-
-/* Note:
- *
- * This platform is commonly built for runtime CPU detection. If you modify
- * any of the function mappings present in this file, be sure to also update
- * them in the function pointer initialization code
- */
-#if HAVE_MMX
-extern prototype_berr(vp8_block_error_mmx);
-extern prototype_mberr(vp8_mbblock_error_mmx);
-extern prototype_mbuverr(vp8_mbuverror_mmx);
-extern prototype_subb(vp8_subtract_b_mmx);
-extern prototype_submby(vp8_subtract_mby_mmx);
-extern prototype_submbuv(vp8_subtract_mbuv_mmx);
-
-
-#if !CONFIG_RUNTIME_CPU_DETECT
-#undef vp8_encodemb_berr
-#define vp8_encodemb_berr vp8_block_error_mmx
-
-#undef vp8_encodemb_mberr
-#define vp8_encodemb_mberr vp8_mbblock_error_mmx
-
-#undef vp8_encodemb_mbuverr
-#define vp8_encodemb_mbuverr vp8_mbuverror_mmx
-
-#undef vp8_encodemb_subb
-#define vp8_encodemb_subb vp8_subtract_b_mmx
-
-#undef vp8_encodemb_submby
-#define vp8_encodemb_submby vp8_subtract_mby_mmx
-
-#undef vp8_encodemb_submbuv
-#define vp8_encodemb_submbuv vp8_subtract_mbuv_mmx
-
-#endif
-#endif
-
-
-#if HAVE_SSE2
-extern prototype_berr(vp8_block_error_xmm);
-extern prototype_mberr(vp8_mbblock_error_xmm);
-extern prototype_mbuverr(vp8_mbuverror_xmm);
-extern prototype_subb(vp8_subtract_b_sse2);
-extern prototype_submby(vp8_subtract_mby_sse2);
-extern prototype_submbuv(vp8_subtract_mbuv_sse2);
-
-#if !CONFIG_RUNTIME_CPU_DETECT
-#undef vp8_encodemb_berr
-#define vp8_encodemb_berr vp8_block_error_xmm
-
-#undef vp8_encodemb_mberr
-#define vp8_encodemb_mberr vp8_mbblock_error_xmm
-
-#undef vp8_encodemb_mbuverr
-#define vp8_encodemb_mbuverr vp8_mbuverror_xmm
-
-#undef vp8_encodemb_subb
-#define vp8_encodemb_subb vp8_subtract_b_sse2
-
-#undef vp8_encodemb_submby
-#define vp8_encodemb_submby vp8_subtract_mby_sse2
-
-#undef vp8_encodemb_submbuv
-#define vp8_encodemb_submbuv vp8_subtract_mbuv_sse2
-
-#endif
-#endif
-
-
-#endif
diff --git a/vp8/encoder/x86/x86_csystemdependent.c b/vp8/encoder/x86/x86_csystemdependent.c
index a169b493e..053601939 100644
--- a/vp8/encoder/x86/x86_csystemdependent.c
+++ b/vp8/encoder/x86/x86_csystemdependent.c
@@ -88,25 +88,8 @@ void vp8_arch_x86_encoder_init(VP8_COMP *cpi) {
*/
/* Override default functions with fastest ones for this CPU. */
-#if HAVE_MMX
- if (flags & HAS_MMX) {
- cpi->rtcd.encodemb.berr = vp8_block_error_mmx;
- cpi->rtcd.encodemb.mberr = vp8_mbblock_error_mmx;
- cpi->rtcd.encodemb.mbuverr = vp8_mbuverror_mmx;
- cpi->rtcd.encodemb.subb = vp8_subtract_b_mmx;
- cpi->rtcd.encodemb.submby = vp8_subtract_mby_mmx;
- cpi->rtcd.encodemb.submbuv = vp8_subtract_mbuv_mmx;
- }
-#endif
-
#if HAVE_SSE2
if (flags & HAS_SSE2) {
- cpi->rtcd.encodemb.berr = vp8_block_error_xmm;
- cpi->rtcd.encodemb.mberr = vp8_mbblock_error_xmm;
- cpi->rtcd.encodemb.mbuverr = vp8_mbuverror_xmm;
- cpi->rtcd.encodemb.subb = vp8_subtract_b_sse2;
- cpi->rtcd.encodemb.submby = vp8_subtract_mby_sse2;
- cpi->rtcd.encodemb.submbuv = vp8_subtract_mbuv_sse2;
cpi->rtcd.temporal.apply = vp8_temporal_filter_apply_sse2;
}
diff --git a/vp8/vp8cx.mk b/vp8/vp8cx.mk
index 6d2f18080..ef6da7cab 100644
--- a/vp8/vp8cx.mk
+++ b/vp8/vp8cx.mk
@@ -46,7 +46,6 @@ VP8_CX_SRCS-yes += encoder/generic/csystemdependent.c
VP8_CX_SRCS-yes += encoder/block.h
VP8_CX_SRCS-yes += encoder/boolhuff.h
VP8_CX_SRCS-yes += encoder/bitstream.h
-VP8_CX_SRCS-yes += encoder/dct.h
VP8_CX_SRCS-yes += encoder/encodeintra.h
VP8_CX_SRCS-yes += encoder/encodemb.h
VP8_CX_SRCS-yes += encoder/encodemv.h
@@ -89,8 +88,6 @@ VP8_CX_SRCS-yes += encoder/mbgraph.c
VP8_CX_SRCS-yes += encoder/mbgraph.h
-VP8_CX_SRCS-$(ARCH_X86)$(ARCH_X86_64) += encoder/x86/encodemb_x86.h
-VP8_CX_SRCS-$(ARCH_X86)$(ARCH_X86_64) += encoder/x86/dct_x86.h
VP8_CX_SRCS-$(ARCH_X86)$(ARCH_X86_64) += encoder/x86/mcomp_x86.h
VP8_CX_SRCS-$(ARCH_X86)$(ARCH_X86_64) += encoder/x86/quantize_x86.h
VP8_CX_SRCS-$(ARCH_X86)$(ARCH_X86_64) += encoder/x86/temporal_filter_x86.h