summaryrefslogtreecommitdiff
path: root/vp8/encoder
diff options
context:
space:
mode:
Diffstat (limited to 'vp8/encoder')
-rw-r--r--vp8/encoder/encodeintra.c13
-rw-r--r--vp8/encoder/encodeintra.h1
-rw-r--r--vp8/encoder/encodemb.c55
-rw-r--r--vp8/encoder/encodemb.h3
-rw-r--r--vp8/encoder/firstpass.c99
-rw-r--r--vp8/encoder/generic/csystemdependent.c2
-rw-r--r--vp8/encoder/mcomp.c90
-rw-r--r--vp8/encoder/onyx_if.c37
-rw-r--r--vp8/encoder/psnr.c86
-rw-r--r--vp8/encoder/psnr.h1
-rw-r--r--vp8/encoder/ratectrl.c36
-rw-r--r--vp8/encoder/rdopt.c35
-rw-r--r--vp8/encoder/tokenize.c12
-rw-r--r--vp8/encoder/variance_c.c149
-rw-r--r--vp8/encoder/x86/variance_mmx.c33
-rw-r--r--vp8/encoder/x86/x86_csystemdependent.c46
16 files changed, 226 insertions, 472 deletions
diff --git a/vp8/encoder/encodeintra.c b/vp8/encoder/encodeintra.c
index 7b81c8d95..44000063c 100644
--- a/vp8/encoder/encodeintra.c
+++ b/vp8/encoder/encodeintra.c
@@ -25,19 +25,6 @@
#define intra4x4pbias_rate 256
-void vp8_update_mode_context(int *abmode, int *lbmode, int i, int best_mode)
-{
- if (i < 12)
- {
- abmode[i+4] = best_mode;
- }
-
- if ((i & 3) != 3)
- {
- lbmode[i+1] = best_mode;
- }
-
-}
#if CONFIG_RUNTIME_CPU_DETECT
#define IF_RTCD(x) (x)
#else
diff --git a/vp8/encoder/encodeintra.h b/vp8/encoder/encodeintra.h
index b8b80f176..40930bc42 100644
--- a/vp8/encoder/encodeintra.h
+++ b/vp8/encoder/encodeintra.h
@@ -17,7 +17,6 @@ void vp8_encode_intra16x16mby(const VP8_ENCODER_RTCD *, MACROBLOCK *x);
void vp8_encode_intra16x16mbuv(const VP8_ENCODER_RTCD *, MACROBLOCK *x);
void vp8_encode_intra4x4mby(const VP8_ENCODER_RTCD *, MACROBLOCK *mb);
void vp8_encode_intra4x4block(const VP8_ENCODER_RTCD *, MACROBLOCK *x, BLOCK *be, BLOCKD *b, int best_mode);
-void vp8_update_mode_context(int *abmode, int *lbmode, int i, int best_mode);
void vp8_encode_intra4x4block_rd(const VP8_ENCODER_RTCD *, MACROBLOCK *x, BLOCK *be, BLOCKD *b, int best_mode);
#endif
diff --git a/vp8/encoder/encodemb.c b/vp8/encoder/encodemb.c
index 30b8c4590..463dbcaa9 100644
--- a/vp8/encoder/encodemb.c
+++ b/vp8/encoder/encodemb.c
@@ -104,7 +104,7 @@ static void vp8_subtract_mb(const VP8_ENCODER_RTCD *rtcd, MACROBLOCK *x)
ENCODEMB_INVOKE(&rtcd->encodemb, submbuv)(x->src_diff, x->src.u_buffer, x->src.v_buffer, x->e_mbd.predictor, x->src.uv_stride);
}
-void vp8_build_dcblock(MACROBLOCK *x)
+static void build_dcblock(MACROBLOCK *x)
{
short *src_diff_ptr = &x->src_diff[384];
int i;
@@ -138,7 +138,7 @@ void vp8_transform_intra_mby(MACROBLOCK *x)
}
// build dc block from 16 y dc values
- vp8_build_dcblock(x);
+ build_dcblock(x);
// do 2nd order transform on the dc block
x->short_walsh4x4(&x->block[24].src_diff[0],
@@ -147,7 +147,7 @@ void vp8_transform_intra_mby(MACROBLOCK *x)
}
-void vp8_transform_mb(MACROBLOCK *x)
+static void transform_mb(MACROBLOCK *x)
{
int i;
@@ -159,7 +159,7 @@ void vp8_transform_mb(MACROBLOCK *x)
// build dc block from 16 y dc values
if (x->e_mbd.mode_info_context->mbmi.mode != SPLITMV)
- vp8_build_dcblock(x);
+ build_dcblock(x);
for (i = 16; i < 24; i += 2)
{
@@ -174,7 +174,8 @@ void vp8_transform_mb(MACROBLOCK *x)
}
-void vp8_transform_mby(MACROBLOCK *x)
+
+static void transform_mby(MACROBLOCK *x)
{
int i;
@@ -187,7 +188,7 @@ void vp8_transform_mby(MACROBLOCK *x)
// build dc block from 16 y dc values
if (x->e_mbd.mode_info_context->mbmi.mode != SPLITMV)
{
- vp8_build_dcblock(x);
+ build_dcblock(x);
x->short_walsh4x4(&x->block[24].src_diff[0],
&x->block[24].coeff[0], 8);
}
@@ -255,9 +256,9 @@ static const int plane_rd_mult[4]=
Y1_RD_MULT
};
-void vp8_optimize_b(MACROBLOCK *mb, int ib, int type,
- ENTROPY_CONTEXT *a, ENTROPY_CONTEXT *l,
- const VP8_ENCODER_RTCD *rtcd)
+static void optimize_b(MACROBLOCK *mb, int ib, int type,
+ ENTROPY_CONTEXT *a, ENTROPY_CONTEXT *l,
+ const VP8_ENCODER_RTCD *rtcd)
{
BLOCK *b;
BLOCKD *d;
@@ -501,7 +502,7 @@ void vp8_optimize_b(MACROBLOCK *mb, int ib, int type,
*a = *l = (d->eob != !type);
}
-void vp8_optimize_mb(MACROBLOCK *x, const VP8_ENCODER_RTCD *rtcd)
+static void optimize_mb(MACROBLOCK *x, const VP8_ENCODER_RTCD *rtcd)
{
int b;
int type;
@@ -522,20 +523,20 @@ void vp8_optimize_mb(MACROBLOCK *x, const VP8_ENCODER_RTCD *rtcd)
for (b = 0; b < 16; b++)
{
- vp8_optimize_b(x, b, type,
+ optimize_b(x, b, type,
ta + vp8_block2above[b], tl + vp8_block2left[b], rtcd);
}
for (b = 16; b < 24; b++)
{
- vp8_optimize_b(x, b, PLANE_TYPE_UV,
+ optimize_b(x, b, PLANE_TYPE_UV,
ta + vp8_block2above[b], tl + vp8_block2left[b], rtcd);
}
if (has_2nd_order)
{
b=24;
- vp8_optimize_b(x, b, PLANE_TYPE_Y2,
+ optimize_b(x, b, PLANE_TYPE_Y2,
ta + vp8_block2above[b], tl + vp8_block2left[b], rtcd);
}
}
@@ -569,7 +570,7 @@ void vp8_optimize_mby(MACROBLOCK *x, const VP8_ENCODER_RTCD *rtcd)
for (b = 0; b < 16; b++)
{
- vp8_optimize_b(x, b, type,
+ optimize_b(x, b, type,
ta + vp8_block2above[b], tl + vp8_block2left[b], rtcd);
}
@@ -577,7 +578,7 @@ void vp8_optimize_mby(MACROBLOCK *x, const VP8_ENCODER_RTCD *rtcd)
if (has_2nd_order)
{
b=24;
- vp8_optimize_b(x, b, PLANE_TYPE_Y2,
+ optimize_b(x, b, PLANE_TYPE_Y2,
ta + vp8_block2above[b], tl + vp8_block2left[b], rtcd);
}
}
@@ -603,7 +604,7 @@ void vp8_optimize_mbuv(MACROBLOCK *x, const VP8_ENCODER_RTCD *rtcd)
for (b = 16; b < 24; b++)
{
- vp8_optimize_b(x, b, PLANE_TYPE_UV,
+ optimize_b(x, b, PLANE_TYPE_UV,
ta + vp8_block2above[b], tl + vp8_block2left[b], rtcd);
}
}
@@ -615,13 +616,13 @@ void vp8_encode_inter16x16(const VP8_ENCODER_RTCD *rtcd, MACROBLOCK *x)
vp8_subtract_mb(rtcd, x);
- vp8_transform_mb(x);
+ transform_mb(x);
vp8_quantize_mb(x);
#if !(CONFIG_REALTIME_ONLY)
if (x->optimize)
- vp8_optimize_mb(x, rtcd);
+ optimize_mb(x, rtcd);
#endif
vp8_inverse_transform_mb(IF_RTCD(&rtcd->common->idct), &x->e_mbd);
@@ -638,7 +639,7 @@ void vp8_encode_inter16x16y(const VP8_ENCODER_RTCD *rtcd, MACROBLOCK *x)
ENCODEMB_INVOKE(&rtcd->encodemb, submby)(x->src_diff, x->src.y_buffer, x->e_mbd.predictor, x->src.y_stride);
- vp8_transform_mby(x);
+ transform_mby(x);
vp8_quantize_mby(x);
@@ -649,22 +650,6 @@ void vp8_encode_inter16x16y(const VP8_ENCODER_RTCD *rtcd, MACROBLOCK *x)
}
-void vp8_encode_inter16x16uv(const VP8_ENCODER_RTCD *rtcd, MACROBLOCK *x)
-{
- vp8_build_inter_predictors_mbuv(&x->e_mbd);
-
- ENCODEMB_INVOKE(&rtcd->encodemb, submbuv)(x->src_diff, x->src.u_buffer, x->src.v_buffer, x->e_mbd.predictor, x->src.uv_stride);
-
- vp8_transform_mbuv(x);
-
- vp8_quantize_mbuv(x);
-
- vp8_inverse_transform_mbuv(IF_RTCD(&rtcd->common->idct), &x->e_mbd);
-
- vp8_recon_intra_mbuv(IF_RTCD(&rtcd->common->recon), &x->e_mbd);
-}
-
-
void vp8_encode_inter16x16uvrd(const VP8_ENCODER_RTCD *rtcd, MACROBLOCK *x)
{
vp8_build_inter_predictors_mbuv(&x->e_mbd);
diff --git a/vp8/encoder/encodemb.h b/vp8/encoder/encodemb.h
index 08f75c3b1..8c93aa180 100644
--- a/vp8/encoder/encodemb.h
+++ b/vp8/encoder/encodemb.h
@@ -101,9 +101,6 @@ void vp8_build_dcblock(MACROBLOCK *b);
void vp8_transform_mb(MACROBLOCK *mb);
void vp8_transform_mbuv(MACROBLOCK *x);
void vp8_transform_intra_mby(MACROBLOCK *x);
-void Encode16x16Y(MACROBLOCK *x);
-void Encode16x16UV(MACROBLOCK *x);
-void vp8_encode_inter16x16uv(const struct VP8_ENCODER_RTCD *rtcd, MACROBLOCK *x);
void vp8_encode_inter16x16uvrd(const struct VP8_ENCODER_RTCD *rtcd, MACROBLOCK *x);
void vp8_optimize_mby(MACROBLOCK *x, const struct VP8_ENCODER_RTCD *rtcd);
void vp8_optimize_mbuv(MACROBLOCK *x, const struct VP8_ENCODER_RTCD *rtcd);
diff --git a/vp8/encoder/firstpass.c b/vp8/encoder/firstpass.c
index 774d9b6b5..6c9433b5f 100644
--- a/vp8/encoder/firstpass.c
+++ b/vp8/encoder/firstpass.c
@@ -67,7 +67,7 @@ static int vscale_lookup[7] = {0, 1, 1, 2, 2, 3, 3};
static int hscale_lookup[7] = {0, 0, 1, 1, 2, 2, 3};
-const int cq_level[QINDEX_RANGE] =
+static const int cq_level[QINDEX_RANGE] =
{
0,0,1,1,2,3,3,4,4,5,6,6,7,8,8,9,
9,10,11,11,12,13,13,14,15,15,16,17,17,18,19,20,
@@ -79,10 +79,9 @@ const int cq_level[QINDEX_RANGE] =
86,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100
};
-void vp8_find_next_key_frame(VP8_COMP *cpi, FIRSTPASS_STATS *this_frame);
-int vp8_input_stats(VP8_COMP *cpi, FIRSTPASS_STATS *fps);
+static void find_next_key_frame(VP8_COMP *cpi, FIRSTPASS_STATS *this_frame);
-int vp8_encode_intra(VP8_COMP *cpi, MACROBLOCK *x, int use_dc_pred)
+static int encode_intra(VP8_COMP *cpi, MACROBLOCK *x, int use_dc_pred)
{
int i;
@@ -146,7 +145,7 @@ static double calculate_modified_err(VP8_COMP *cpi, FIRSTPASS_STATS *this_frame)
/*start_pos = cpi->stats_in;
sum_iiratio = 0.0;
i = 0;
- while ( (i < 1) && vp8_input_stats(cpi,&next_frame) != EOF )
+ while ( (i < 1) && input_stats(cpi,&next_frame) != EOF )
{
next_iiratio = next_frame.intra_error / DOUBLE_DIVIDE_CHECK(next_frame.coded_error);
@@ -212,7 +211,7 @@ static const double weight_table[256] = {
1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000
};
-double vp8_simple_weight(YV12_BUFFER_CONFIG *source)
+static double simple_weight(YV12_BUFFER_CONFIG *source)
{
int i, j;
@@ -240,7 +239,7 @@ double vp8_simple_weight(YV12_BUFFER_CONFIG *source)
// This function returns the current per frame maximum bitrate target
-int frame_max_bits(VP8_COMP *cpi)
+static int frame_max_bits(VP8_COMP *cpi)
{
// Max allocation for a single frame based on the max section guidelines passed in and how many bits are left
int max_bits;
@@ -281,9 +280,9 @@ int frame_max_bits(VP8_COMP *cpi)
}
-void vp8_output_stats(const VP8_COMP *cpi,
- struct vpx_codec_pkt_list *pktlist,
- FIRSTPASS_STATS *stats)
+static void output_stats(const VP8_COMP *cpi,
+ struct vpx_codec_pkt_list *pktlist,
+ FIRSTPASS_STATS *stats)
{
struct vpx_codec_cx_pkt pkt;
pkt.kind = VPX_CODEC_STATS_PKT;
@@ -323,7 +322,7 @@ void vp8_output_stats(const VP8_COMP *cpi,
#endif
}
-int vp8_input_stats(VP8_COMP *cpi, FIRSTPASS_STATS *fps)
+static int input_stats(VP8_COMP *cpi, FIRSTPASS_STATS *fps)
{
if (cpi->stats_in >= cpi->stats_in_end)
return EOF;
@@ -333,7 +332,7 @@ int vp8_input_stats(VP8_COMP *cpi, FIRSTPASS_STATS *fps)
return 1;
}
-void vp8_zero_stats(FIRSTPASS_STATS *section)
+static void zero_stats(FIRSTPASS_STATS *section)
{
section->frame = 0.0;
section->intra_error = 0.0;
@@ -353,7 +352,7 @@ void vp8_zero_stats(FIRSTPASS_STATS *section)
section->count = 0.0;
section->duration = 1.0;
}
-void vp8_accumulate_stats(FIRSTPASS_STATS *section, FIRSTPASS_STATS *frame)
+static void accumulate_stats(FIRSTPASS_STATS *section, FIRSTPASS_STATS *frame)
{
section->frame += frame->frame;
section->intra_error += frame->intra_error;
@@ -373,7 +372,7 @@ void vp8_accumulate_stats(FIRSTPASS_STATS *section, FIRSTPASS_STATS *frame)
section->count += frame->count;
section->duration += frame->duration;
}
-void vp8_avg_stats(FIRSTPASS_STATS *section)
+static void avg_stats(FIRSTPASS_STATS *section)
{
if (section->count < 1.0)
return;
@@ -397,15 +396,15 @@ void vp8_avg_stats(FIRSTPASS_STATS *section)
void vp8_init_first_pass(VP8_COMP *cpi)
{
- vp8_zero_stats(cpi->total_stats);
+ zero_stats(cpi->total_stats);
}
void vp8_end_first_pass(VP8_COMP *cpi)
{
- vp8_output_stats(cpi, cpi->output_pkt_list, cpi->total_stats);
+ output_stats(cpi, cpi->output_pkt_list, cpi->total_stats);
}
-void vp8_zz_motion_search( VP8_COMP *cpi, MACROBLOCK * x, YV12_BUFFER_CONFIG * recon_buffer, int * best_motion_err, int recon_yoffset )
+static void zz_motion_search( VP8_COMP *cpi, MACROBLOCK * x, YV12_BUFFER_CONFIG * recon_buffer, int * best_motion_err, int recon_yoffset )
{
MACROBLOCKD * const xd = & x->e_mbd;
BLOCK *b = &x->block[0];
@@ -424,7 +423,7 @@ void vp8_zz_motion_search( VP8_COMP *cpi, MACROBLOCK * x, YV12_BUFFER_CONFIG * r
VARIANCE_INVOKE(IF_RTCD(&cpi->rtcd.variance), mse16x16) ( src_ptr, src_stride, ref_ptr, ref_stride, (unsigned int *)(best_motion_err));
}
-void vp8_first_pass_motion_search(VP8_COMP *cpi, MACROBLOCK *x, MV *ref_mv, MV *best_mv, YV12_BUFFER_CONFIG *recon_buffer, int *best_motion_err, int recon_yoffset )
+static void first_pass_motion_search(VP8_COMP *cpi, MACROBLOCK *x, MV *ref_mv, MV *best_mv, YV12_BUFFER_CONFIG *recon_buffer, int *best_motion_err, int recon_yoffset )
{
MACROBLOCKD *const xd = & x->e_mbd;
BLOCK *b = &x->block[0];
@@ -575,7 +574,7 @@ void vp8_first_pass(VP8_COMP *cpi)
xd->left_available = (mb_col != 0);
// do intra 16x16 prediction
- this_error = vp8_encode_intra(cpi, x, use_dc_pred);
+ this_error = encode_intra(cpi, x, use_dc_pred);
// "intrapenalty" below deals with situations where the intra and inter error scores are very low (eg a plain black frame)
// We do not have special cases in first pass for 0,0 and nearest etc so all inter modes carry an overhead cost estimate fot the mv.
@@ -600,13 +599,13 @@ void vp8_first_pass(VP8_COMP *cpi)
int motion_error = INT_MAX;
// Simple 0,0 motion with no mv overhead
- vp8_zz_motion_search( cpi, x, lst_yv12, &motion_error, recon_yoffset );
+ zz_motion_search( cpi, x, lst_yv12, &motion_error, recon_yoffset );
d->bmi.mv.as_mv.row = 0;
d->bmi.mv.as_mv.col = 0;
// Test last reference frame using the previous best mv as the
// starting point (best reference) for the search
- vp8_first_pass_motion_search(cpi, x, &best_ref_mv.as_mv,
+ first_pass_motion_search(cpi, x, &best_ref_mv.as_mv,
&d->bmi.mv.as_mv, lst_yv12,
&motion_error, recon_yoffset);
@@ -614,7 +613,7 @@ void vp8_first_pass(VP8_COMP *cpi)
if (best_ref_mv.as_int)
{
tmp_err = INT_MAX;
- vp8_first_pass_motion_search(cpi, x, &zero_ref_mv, &tmp_mv,
+ first_pass_motion_search(cpi, x, &zero_ref_mv, &tmp_mv,
lst_yv12, &tmp_err, recon_yoffset);
if ( tmp_err < motion_error )
@@ -628,7 +627,7 @@ void vp8_first_pass(VP8_COMP *cpi)
// Experimental search in a second reference frame ((0,0) based only)
if (cm->current_video_frame > 1)
{
- vp8_first_pass_motion_search(cpi, x, &zero_ref_mv, &tmp_mv, gld_yv12, &gf_motion_error, recon_yoffset);
+ first_pass_motion_search(cpi, x, &zero_ref_mv, &tmp_mv, gld_yv12, &gf_motion_error, recon_yoffset);
if ((gf_motion_error < motion_error) && (gf_motion_error < this_error))
{
@@ -752,7 +751,7 @@ void vp8_first_pass(VP8_COMP *cpi)
fps.frame = cm->current_video_frame ;
fps.intra_error = intra_error >> 8;
fps.coded_error = coded_error >> 8;
- weight = vp8_simple_weight(cpi->Source);
+ weight = simple_weight(cpi->Source);
if (weight < 0.1)
@@ -796,8 +795,8 @@ void vp8_first_pass(VP8_COMP *cpi)
memcpy(cpi->this_frame_stats,
&fps,
sizeof(FIRSTPASS_STATS));
- vp8_output_stats(cpi, cpi->output_pkt_list, cpi->this_frame_stats);
- vp8_accumulate_stats(cpi->total_stats, &fps);
+ output_stats(cpi, cpi->output_pkt_list, cpi->this_frame_stats);
+ accumulate_stats(cpi->total_stats, &fps);
}
// Copy the previous Last Frame into the GF buffer if specific conditions for doing so are met
@@ -1168,7 +1167,7 @@ void vp8_init_second_pass(VP8_COMP *cpi)
double two_pass_min_rate = (double)(cpi->oxcf.target_bandwidth * cpi->oxcf.two_pass_vbrmin_section / 100);
- vp8_zero_stats(cpi->total_stats);
+ zero_stats(cpi->total_stats);
if (!cpi->stats_in_end)
return;
@@ -1202,7 +1201,7 @@ void vp8_init_second_pass(VP8_COMP *cpi)
cpi->kf_intra_err_min = KF_MB_INTRA_MIN * cpi->common.MBs;
cpi->gf_intra_err_min = GF_MB_INTRA_MIN * cpi->common.MBs;
- vp8_avg_stats(cpi->total_stats);
+ avg_stats(cpi->total_stats);
// Scan the first pass file and calculate an average Intra / Inter error score ratio for the sequence
{
@@ -1211,7 +1210,7 @@ void vp8_init_second_pass(VP8_COMP *cpi)
start_pos = cpi->stats_in; // Note starting "file" position
- while (vp8_input_stats(cpi, &this_frame) != EOF)
+ while (input_stats(cpi, &this_frame) != EOF)
{
IIRatio = this_frame.intra_error / DOUBLE_DIVIDE_CHECK(this_frame.coded_error);
IIRatio = (IIRatio < 1.0) ? 1.0 : (IIRatio > 20.0) ? 20.0 : IIRatio;
@@ -1232,7 +1231,7 @@ void vp8_init_second_pass(VP8_COMP *cpi)
cpi->modified_error_total = 0.0;
cpi->modified_error_used = 0.0;
- while (vp8_input_stats(cpi, &this_frame) != EOF)
+ while (input_stats(cpi, &this_frame) != EOF)
{
cpi->modified_error_total += calculate_modified_err(cpi, &this_frame);
}
@@ -1255,7 +1254,7 @@ void vp8_end_second_pass(VP8_COMP *cpi)
// This function gives and estimate of how badly we believe
// the prediction quality is decaying from frame to frame.
-double get_prediction_decay_rate(VP8_COMP *cpi, FIRSTPASS_STATS *next_frame)
+static double get_prediction_decay_rate(VP8_COMP *cpi, FIRSTPASS_STATS *next_frame)
{
double prediction_decay_rate;
double motion_decay;
@@ -1293,7 +1292,7 @@ double get_prediction_decay_rate(VP8_COMP *cpi, FIRSTPASS_STATS *next_frame)
// Function to test for a condition where a complex transition is followed
// by a static section. For example in slide shows where there is a fade
// between slides. This is to help with more optimal kf and gf positioning.
-BOOL detect_transition_to_still(
+static int detect_transition_to_still(
VP8_COMP *cpi,
int frame_interval,
int still_interval,
@@ -1318,7 +1317,7 @@ BOOL detect_transition_to_still(
// persists...
for ( j = 0; j < still_interval; j++ )
{
- if (EOF == vp8_input_stats(cpi, &tmp_next_frame))
+ if (EOF == input_stats(cpi, &tmp_next_frame))
break;
decay_rate = get_prediction_decay_rate(cpi, &tmp_next_frame);
@@ -1417,7 +1416,7 @@ static void define_gf_group(VP8_COMP *cpi, FIRSTPASS_STATS *this_frame)
mod_err_per_mb_accumulator +=
mod_frame_err / DOUBLE_DIVIDE_CHECK((double)cpi->common.MBs);
- if (EOF == vp8_input_stats(cpi, &next_frame))
+ if (EOF == input_stats(cpi, &next_frame))
break;
// Accumulate motion stats.
@@ -1691,7 +1690,7 @@ static void define_gf_group(VP8_COMP *cpi, FIRSTPASS_STATS *this_frame)
{
while (cpi->baseline_gf_interval < cpi->frames_to_key)
{
- if (EOF == vp8_input_stats(cpi, this_frame))
+ if (EOF == input_stats(cpi, this_frame))
break;
cpi->baseline_gf_interval++;
@@ -1870,16 +1869,16 @@ static void define_gf_group(VP8_COMP *cpi, FIRSTPASS_STATS *this_frame)
FIRSTPASS_STATS sectionstats;
double Ratio;
- vp8_zero_stats(&sectionstats);
+ zero_stats(&sectionstats);
reset_fpf_position(cpi, start_pos);
for (i = 0 ; i < cpi->baseline_gf_interval ; i++)
{
- vp8_input_stats(cpi, &next_frame);
- vp8_accumulate_stats(&sectionstats, &next_frame);
+ input_stats(cpi, &next_frame);
+ accumulate_stats(&sectionstats, &next_frame);
}
- vp8_avg_stats(&sectionstats);
+ avg_stats(&sectionstats);
cpi->section_intra_rating =
sectionstats.intra_error /
@@ -1977,7 +1976,7 @@ void vp8_second_pass(VP8_COMP *cpi)
vp8_clear_system_state();
- if (EOF == vp8_input_stats(cpi, &this_frame))
+ if (EOF == input_stats(cpi, &this_frame))
return;
this_frame_error = this_frame.ssim_weighted_pred_err;
@@ -1998,7 +1997,7 @@ void vp8_second_pass(VP8_COMP *cpi)
{
// Define next KF group and assign bits to it
vpx_memcpy(&this_frame_copy, &this_frame, sizeof(this_frame));
- vp8_find_next_key_frame(cpi, &this_frame_copy);
+ find_next_key_frame(cpi, &this_frame_copy);
// Special case: Error error_resilient_mode mode does not make much sense for two pass but with its current meaning but this code is designed to stop
// outlandish behaviour if someone does set it when using two pass. It effectively disables GF groups.
@@ -2239,7 +2238,7 @@ static BOOL test_candidate_kf(VP8_COMP *cpi, FIRSTPASS_STATS *last_frame, FIRST
old_boost_score = boost_score;
// Get the next frame details
- if (EOF == vp8_input_stats(cpi, &local_next_frame))
+ if (EOF == input_stats(cpi, &local_next_frame))
break;
}
@@ -2257,7 +2256,7 @@ static BOOL test_candidate_kf(VP8_COMP *cpi, FIRSTPASS_STATS *last_frame, FIRST
return is_viable_kf;
}
-void vp8_find_next_key_frame(VP8_COMP *cpi, FIRSTPASS_STATS *this_frame)
+static void find_next_key_frame(VP8_COMP *cpi, FIRSTPASS_STATS *this_frame)
{
int i,j;
FIRSTPASS_STATS last_frame;
@@ -2317,7 +2316,7 @@ void vp8_find_next_key_frame(VP8_COMP *cpi, FIRSTPASS_STATS *this_frame)
// load a the next frame's stats
vpx_memcpy(&last_frame, this_frame, sizeof(*this_frame));
- vp8_input_stats(cpi, this_frame);
+ input_stats(cpi, this_frame);
// Provided that we are not at the end of the file...
if (cpi->oxcf.auto_key
@@ -2395,7 +2394,7 @@ void vp8_find_next_key_frame(VP8_COMP *cpi, FIRSTPASS_STATS *this_frame)
kf_group_coded_err += tmp_frame.coded_error;
// Load a the next frame's stats
- vp8_input_stats(cpi, &tmp_frame);
+ input_stats(cpi, &tmp_frame);
}
// Reset to the start of the group
@@ -2500,7 +2499,7 @@ void vp8_find_next_key_frame(VP8_COMP *cpi, FIRSTPASS_STATS *this_frame)
double motion_decay;
double motion_pct;
- if (EOF == vp8_input_stats(cpi, &next_frame))
+ if (EOF == input_stats(cpi, &next_frame))
break;
if (next_frame.intra_error > cpi->kf_intra_err_min)
@@ -2535,16 +2534,16 @@ void vp8_find_next_key_frame(VP8_COMP *cpi, FIRSTPASS_STATS *this_frame)
FIRSTPASS_STATS sectionstats;
double Ratio;
- vp8_zero_stats(&sectionstats);
+ zero_stats(&sectionstats);
reset_fpf_position(cpi, start_position);
for (i = 0 ; i < cpi->frames_to_key ; i++)
{
- vp8_input_stats(cpi, &next_frame);
- vp8_accumulate_stats(&sectionstats, &next_frame);
+ input_stats(cpi, &next_frame);
+ accumulate_stats(&sectionstats, &next_frame);
}
- vp8_avg_stats(&sectionstats);
+ avg_stats(&sectionstats);
cpi->section_intra_rating = sectionstats.intra_error / DOUBLE_DIVIDE_CHECK(sectionstats.coded_error);
diff --git a/vp8/encoder/generic/csystemdependent.c b/vp8/encoder/generic/csystemdependent.c
index 81108fe96..1d672bef9 100644
--- a/vp8/encoder/generic/csystemdependent.c
+++ b/vp8/encoder/generic/csystemdependent.c
@@ -17,8 +17,6 @@
void vp8_arch_x86_encoder_init(VP8_COMP *cpi);
void vp8_arch_arm_encoder_init(VP8_COMP *cpi);
-
-void (*vp8_fast_quantize_b)(BLOCK *b, BLOCKD *d);
extern void vp8_fast_quantize_b_c(BLOCK *b, BLOCKD *d);
void (*vp8_yv12_copy_partial_frame_ptr)(YV12_BUFFER_CONFIG *src_ybc, YV12_BUFFER_CONFIG *dst_ybc, int Fraction);
diff --git a/vp8/encoder/mcomp.c b/vp8/encoder/mcomp.c
index c210c1de2..de6642b75 100644
--- a/vp8/encoder/mcomp.c
+++ b/vp8/encoder/mcomp.c
@@ -43,7 +43,7 @@ int vp8_mv_bit_cost(MV *mv, MV *ref, int *mvcost[2], int Weight)
return ((mvcost[0][(mv->row - ref->row) >> 1] + mvcost[1][(mv->col - ref->col) >> 1]) * Weight) >> 7;
}
-int vp8_mv_err_cost(MV *mv, MV *ref, int *mvcost[2], int error_per_bit)
+static int mv_err_cost(MV *mv, MV *ref, int *mvcost[2], int error_per_bit)
{
//int i;
//return ((mvcost[0][(mv->row - ref->row)>>1] + mvcost[1][(mv->col - ref->col)>>1] + 128) * error_per_bit) >> 8;
@@ -221,7 +221,7 @@ int vp8_find_best_sub_pixel_step_iteratively(MACROBLOCK *x, BLOCK *b, BLOCKD *d,
// calculate central point error
besterr = vfp->vf(y, d->pre_stride, z, b->src_stride, &sse);
- besterr += vp8_mv_err_cost(bestmv, ref_mv, mvcost, error_per_bit);
+ besterr += mv_err_cost(bestmv, ref_mv, mvcost, error_per_bit);
// TODO: Each subsequent iteration checks at least one point in common with the last iteration could be 2 ( if diag selected)
while (--halfiters)
@@ -337,13 +337,13 @@ int vp8_find_best_sub_pixel_step(MACROBLOCK *x, BLOCK *b, BLOCKD *d, MV *bestmv,
// calculate central point error
bestmse = vfp->vf(y, d->pre_stride, z, b->src_stride, &sse);
- bestmse += vp8_mv_err_cost(bestmv, ref_mv, mvcost, error_per_bit);
+ bestmse += mv_err_cost(bestmv, ref_mv, mvcost, error_per_bit);
// go left then right and check error
this_mv.row = startmv.row;
this_mv.col = ((startmv.col - 8) | 4);
left = vfp->svf_halfpix_h(y - 1, d->pre_stride, z, b->src_stride, &sse);
- left += vp8_mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
+ left += mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
if (left < bestmse)
{
@@ -353,7 +353,7 @@ int vp8_find_best_sub_pixel_step(MACROBLOCK *x, BLOCK *b, BLOCKD *d, MV *bestmv,
this_mv.col += 8;
right = vfp->svf_halfpix_h(y, d->pre_stride, z, b->src_stride, &sse);
- right += vp8_mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
+ right += mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
if (right < bestmse)
{
@@ -365,7 +365,7 @@ int vp8_find_best_sub_pixel_step(MACROBLOCK *x, BLOCK *b, BLOCKD *d, MV *bestmv,
this_mv.col = startmv.col;
this_mv.row = ((startmv.row - 8) | 4);
up = vfp->svf_halfpix_v(y - d->pre_stride, d->pre_stride, z, b->src_stride, &sse);
- up += vp8_mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
+ up += mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
if (up < bestmse)
{
@@ -375,7 +375,7 @@ int vp8_find_best_sub_pixel_step(MACROBLOCK *x, BLOCK *b, BLOCKD *d, MV *bestmv,
this_mv.row += 8;
down = vfp->svf_halfpix_v(y, d->pre_stride, z, b->src_stride, &sse);
- down += vp8_mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
+ down += mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
if (down < bestmse)
{
@@ -415,7 +415,7 @@ int vp8_find_best_sub_pixel_step(MACROBLOCK *x, BLOCK *b, BLOCKD *d, MV *bestmv,
break;
}
- diag += vp8_mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
+ diag += mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
if (diag < bestmse)
{
@@ -451,7 +451,7 @@ int vp8_find_best_sub_pixel_step(MACROBLOCK *x, BLOCK *b, BLOCKD *d, MV *bestmv,
left = vfp->svf(y - 1, d->pre_stride, 6, this_mv.row & 7, z, b->src_stride, &sse);
}
- left += vp8_mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
+ left += mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
if (left < bestmse)
{
@@ -461,7 +461,7 @@ int vp8_find_best_sub_pixel_step(MACROBLOCK *x, BLOCK *b, BLOCKD *d, MV *bestmv,
this_mv.col += 4;
right = vfp->svf(y, d->pre_stride, this_mv.col & 7, this_mv.row & 7, z, b->src_stride, &sse);
- right += vp8_mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
+ right += mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
if (right < bestmse)
{
@@ -483,7 +483,7 @@ int vp8_find_best_sub_pixel_step(MACROBLOCK *x, BLOCK *b, BLOCKD *d, MV *bestmv,
up = vfp->svf(y - d->pre_stride, d->pre_stride, this_mv.col & 7, 6, z, b->src_stride, &sse);
}
- up += vp8_mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
+ up += mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
if (up < bestmse)
{
@@ -493,7 +493,7 @@ int vp8_find_best_sub_pixel_step(MACROBLOCK *x, BLOCK *b, BLOCKD *d, MV *bestmv,
this_mv.row += 4;
down = vfp->svf(y, d->pre_stride, this_mv.col & 7, this_mv.row & 7, z, b->src_stride, &sse);
- down += vp8_mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
+ down += mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
if (down < bestmse)
{
@@ -582,7 +582,7 @@ int vp8_find_best_sub_pixel_step(MACROBLOCK *x, BLOCK *b, BLOCKD *d, MV *bestmv,
break;
}
- diag += vp8_mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
+ diag += mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
if (diag < bestmse)
{
@@ -621,13 +621,13 @@ int vp8_find_best_half_pixel_step(MACROBLOCK *mb, BLOCK *b, BLOCKD *d, MV *bestm
// calculate central point error
bestmse = vfp->vf(y, d->pre_stride, z, b->src_stride, &sse);
- bestmse += vp8_mv_err_cost(bestmv, ref_mv, mvcost, error_per_bit);
+ bestmse += mv_err_cost(bestmv, ref_mv, mvcost, error_per_bit);
// go left then right and check error
this_mv.row = startmv.row;
this_mv.col = ((startmv.col - 8) | 4);
left = vfp->svf_halfpix_h(y - 1, d->pre_stride, z, b->src_stride, &sse);
- left += vp8_mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
+ left += mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
if (left < bestmse)
{
@@ -637,7 +637,7 @@ int vp8_find_best_half_pixel_step(MACROBLOCK *mb, BLOCK *b, BLOCKD *d, MV *bestm
this_mv.col += 8;
right = vfp->svf_halfpix_h(y, d->pre_stride, z, b->src_stride, &sse);
- right += vp8_mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
+ right += mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
if (right < bestmse)
{
@@ -649,7 +649,7 @@ int vp8_find_best_half_pixel_step(MACROBLOCK *mb, BLOCK *b, BLOCKD *d, MV *bestm
this_mv.col = startmv.col;
this_mv.row = ((startmv.row - 8) | 4);
up = vfp->svf_halfpix_v(y - d->pre_stride, d->pre_stride, z, b->src_stride, &sse);
- up += vp8_mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
+ up += mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
if (up < bestmse)
{
@@ -659,7 +659,7 @@ int vp8_find_best_half_pixel_step(MACROBLOCK *mb, BLOCK *b, BLOCKD *d, MV *bestm
this_mv.row += 8;
down = vfp->svf_halfpix_v(y, d->pre_stride, z, b->src_stride, &sse);
- down += vp8_mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
+ down += mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
if (down < bestmse)
{
@@ -697,7 +697,7 @@ int vp8_find_best_half_pixel_step(MACROBLOCK *mb, BLOCK *b, BLOCKD *d, MV *bestm
break;
}
- diag += vp8_mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
+ diag += mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
if (diag < bestmse)
{
@@ -709,7 +709,7 @@ int vp8_find_best_half_pixel_step(MACROBLOCK *mb, BLOCK *b, BLOCKD *d, MV *bestm
this_mv.col = (this_mv.col - 8) | 4;
this_mv.row = (this_mv.row - 8) | 4;
diag = vfp->svf_halfpix_hv(y - 1 - d->pre_stride, d->pre_stride, z, b->src_stride, &sse);
- diag += vp8_mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
+ diag += mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
if (diag < bestmse)
{
@@ -719,7 +719,7 @@ int vp8_find_best_half_pixel_step(MACROBLOCK *mb, BLOCK *b, BLOCKD *d, MV *bestm
this_mv.col += 8;
diag = vfp->svf_halfpix_hv(y - d->pre_stride, d->pre_stride, z, b->src_stride, &sse);
- diag += vp8_mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
+ diag += mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
if (diag < bestmse)
{
@@ -730,7 +730,7 @@ int vp8_find_best_half_pixel_step(MACROBLOCK *mb, BLOCK *b, BLOCKD *d, MV *bestm
this_mv.col = (this_mv.col - 8) | 4;
this_mv.row = startmv.row + 4;
diag = vfp->svf_halfpix_hv(y - 1, d->pre_stride, z, b->src_stride, &sse);
- diag += vp8_mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
+ diag += mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
if (diag < bestmse)
{
@@ -740,7 +740,7 @@ int vp8_find_best_half_pixel_step(MACROBLOCK *mb, BLOCK *b, BLOCKD *d, MV *bestm
this_mv.col += 8;
diag = vfp->svf_halfpix_hv(y, d->pre_stride, z, b->src_stride, &sse);
- diag += vp8_mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
+ diag += mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
if (diag < bestmse)
{
@@ -894,7 +894,7 @@ cal_neighbors:
best_mv->row = br;
best_mv->col = bc;
- return vfp->vf(src, src_stride, PRE(br, bc), d->pre_stride, &thiserr) + vp8_mv_err_cost(best_mv, center_mv, mvcost, error_per_bit) ;
+ return vfp->vf(src, src_stride, PRE(br, bc), d->pre_stride, &thiserr) + mv_err_cost(best_mv, center_mv, mvcost, error_per_bit) ;
}
#undef MVC
#undef PRE
@@ -955,7 +955,7 @@ int vp8_diamond_search_sad
(ref_row > x->mv_row_min) && (ref_row < x->mv_row_max))
{
// Check the starting position
- bestsad = fn_ptr->sdf(what, what_stride, in_what, in_what_stride, 0x7fffffff) + vp8_mv_err_cost(ref_mv, center_mv, mvsadcost, error_per_bit);
+ bestsad = fn_ptr->sdf(what, what_stride, in_what, in_what_stride, 0x7fffffff) + mv_err_cost(ref_mv, center_mv, mvsadcost, error_per_bit);
}
// search_param determines the length of the initial step and hence the number of iterations
@@ -986,7 +986,7 @@ int vp8_diamond_search_sad
{
this_mv.row = this_row_offset << 3;
this_mv.col = this_col_offset << 3;
- thissad += vp8_mv_err_cost(&this_mv, center_mv, mvsadcost, error_per_bit);
+ thissad += mv_err_cost(&this_mv, center_mv, mvsadcost, error_per_bit);
if (thissad < bestsad)
{
@@ -1017,7 +1017,7 @@ int vp8_diamond_search_sad
return INT_MAX;
return fn_ptr->vf(what, what_stride, best_address, in_what_stride, (unsigned int *)(&thissad))
- + vp8_mv_err_cost(&this_mv, center_mv, mvcost, error_per_bit);
+ + mv_err_cost(&this_mv, center_mv, mvcost, error_per_bit);
}
int vp8_diamond_search_sadx4
@@ -1071,7 +1071,7 @@ int vp8_diamond_search_sadx4
(ref_row > x->mv_row_min) && (ref_row < x->mv_row_max))
{
// Check the starting position
- bestsad = fn_ptr->sdf(what, what_stride, in_what, in_what_stride, 0x7fffffff) + vp8_mv_err_cost(ref_mv, center_mv, mvsadcost, error_per_bit);
+ bestsad = fn_ptr->sdf(what, what_stride, in_what, in_what_stride, 0x7fffffff) + mv_err_cost(ref_mv, center_mv, mvsadcost, error_per_bit);
}
// search_param determines the length of the initial step and hence the number of iterations
@@ -1113,7 +1113,7 @@ int vp8_diamond_search_sadx4
{
this_mv.row = (best_mv->row + ss[i].mv.row) << 3;
this_mv.col = (best_mv->col + ss[i].mv.col) << 3;
- sad_array[t] += vp8_mv_err_cost(&this_mv, center_mv, mvsadcost, error_per_bit);
+ sad_array[t] += mv_err_cost(&this_mv, center_mv, mvsadcost, error_per_bit);
if (sad_array[t] < bestsad)
{
@@ -1142,7 +1142,7 @@ int vp8_diamond_search_sadx4
{
this_mv.row = this_row_offset << 3;
this_mv.col = this_col_offset << 3;
- thissad += vp8_mv_err_cost(&this_mv, center_mv, mvsadcost, error_per_bit);
+ thissad += mv_err_cost(&this_mv, center_mv, mvsadcost, error_per_bit);
if (thissad < bestsad)
{
@@ -1173,7 +1173,7 @@ int vp8_diamond_search_sadx4
return INT_MAX;
return fn_ptr->vf(what, what_stride, best_address, in_what_stride, (unsigned int *)(&thissad))
- + vp8_mv_err_cost(&this_mv, center_mv, mvcost, error_per_bit);
+ + mv_err_cost(&this_mv, center_mv, mvcost, error_per_bit);
}
@@ -1215,8 +1215,8 @@ int vp8_full_search_sad(MACROBLOCK *x, BLOCK *b, BLOCKD *d, MV *ref_mv, int erro
{
// Baseline value at the centre
- //bestsad = fn_ptr->sf( what,what_stride,bestaddress,in_what_stride) + (int)sqrt(vp8_mv_err_cost(ref_mv,ref_mv, mvcost,error_per_bit*14));
- bestsad = fn_ptr->sdf(what, what_stride, bestaddress, in_what_stride, 0x7fffffff) + vp8_mv_err_cost(ref_mv, center_mv, mvsadcost, error_per_bit);
+ //bestsad = fn_ptr->sf( what,what_stride,bestaddress,in_what_stride) + (int)sqrt(mv_err_cost(ref_mv,ref_mv, mvcost,error_per_bit*14));
+ bestsad = fn_ptr->sdf(what, what_stride, bestaddress, in_what_stride, 0x7fffffff) + mv_err_cost(ref_mv, center_mv, mvsadcost, error_per_bit);
}
// Apply further limits to prevent us looking using vectors that stretch beyiond the UMV border
@@ -1242,9 +1242,9 @@ int vp8_full_search_sad(MACROBLOCK *x, BLOCK *b, BLOCKD *d, MV *ref_mv, int erro
thissad = fn_ptr->sdf(what, what_stride, check_here , in_what_stride, bestsad);
this_mv.col = c << 3;
- //thissad += (int)sqrt(vp8_mv_err_cost(&this_mv,ref_mv, mvcost,error_per_bit*14));
+ //thissad += (int)sqrt(mv_err_cost(&this_mv,ref_mv, mvcost,error_per_bit*14));
//thissad += error_per_bit * mv_bits_sadcost[mv_bits(&this_mv, ref_mv, mvcost)];
- thissad += vp8_mv_err_cost(&this_mv, center_mv, mvsadcost, error_per_bit); //mv_bits(error_per_bit, &this_mv, ref_mv, mvsadcost);
+ thissad += mv_err_cost(&this_mv, center_mv, mvsadcost, error_per_bit); //mv_bits(error_per_bit, &this_mv, ref_mv, mvsadcost);
if (thissad < bestsad)
{
@@ -1263,7 +1263,7 @@ int vp8_full_search_sad(MACROBLOCK *x, BLOCK *b, BLOCKD *d, MV *ref_mv, int erro
if (bestsad < INT_MAX)
return fn_ptr->vf(what, what_stride, bestaddress, in_what_stride, (unsigned int *)(&thissad))
- + vp8_mv_err_cost(&this_mv, center_mv, mvcost, error_per_bit);
+ + mv_err_cost(&this_mv, center_mv, mvcost, error_per_bit);
else
return INT_MAX;
}
@@ -1306,7 +1306,7 @@ int vp8_full_search_sadx3(MACROBLOCK *x, BLOCK *b, BLOCKD *d, MV *ref_mv, int er
(ref_row > x->mv_row_min) && (ref_row < x->mv_row_max))
{
// Baseline value at the centre
- bestsad = fn_ptr->sdf(what, what_stride, bestaddress, in_what_stride, 0x7fffffff) + vp8_mv_err_cost(ref_mv, center_mv, mvsadcost, error_per_bit);
+ bestsad = fn_ptr->sdf(what, what_stride, bestaddress, in_what_stride, 0x7fffffff) + mv_err_cost(ref_mv, center_mv, mvsadcost, error_per_bit);
}
// Apply further limits to prevent us looking using vectors that stretch beyiond the UMV border
@@ -1341,7 +1341,7 @@ int vp8_full_search_sadx3(MACROBLOCK *x, BLOCK *b, BLOCKD *d, MV *ref_mv, int er
if (thissad < bestsad)
{
this_mv.col = c << 3;
- thissad += vp8_mv_err_cost(&this_mv, center_mv, mvsadcost, error_per_bit);
+ thissad += mv_err_cost(&this_mv, center_mv, mvsadcost, error_per_bit);
if (thissad < bestsad)
{
@@ -1364,7 +1364,7 @@ int vp8_full_search_sadx3(MACROBLOCK *x, BLOCK *b, BLOCKD *d, MV *ref_mv, int er
if (thissad < bestsad)
{
this_mv.col = c << 3;
- thissad += vp8_mv_err_cost(&this_mv, center_mv, mvsadcost, error_per_bit);
+ thissad += mv_err_cost(&this_mv, center_mv, mvsadcost, error_per_bit);
if (thissad < bestsad)
{
@@ -1386,7 +1386,7 @@ int vp8_full_search_sadx3(MACROBLOCK *x, BLOCK *b, BLOCKD *d, MV *ref_mv, int er
if (bestsad < INT_MAX)
return fn_ptr->vf(what, what_stride, bestaddress, in_what_stride, (unsigned int *)(&thissad))
- + vp8_mv_err_cost(&this_mv, center_mv, mvcost, error_per_bit);
+ + mv_err_cost(&this_mv, center_mv, mvcost, error_per_bit);
else
return INT_MAX;
}
@@ -1430,7 +1430,7 @@ int vp8_full_search_sadx8(MACROBLOCK *x, BLOCK *b, BLOCKD *d, MV *ref_mv, int er
(ref_row > x->mv_row_min) && (ref_row < x->mv_row_max))
{
// Baseline value at the centre
- bestsad = fn_ptr->sdf(what, what_stride, bestaddress, in_what_stride, 0x7fffffff) + vp8_mv_err_cost(ref_mv, center_mv, mvsadcost, error_per_bit);
+ bestsad = fn_ptr->sdf(what, what_stride, bestaddress, in_what_stride, 0x7fffffff) + mv_err_cost(ref_mv, center_mv, mvsadcost, error_per_bit);
}
// Apply further limits to prevent us looking using vectors that stretch beyiond the UMV border
@@ -1465,7 +1465,7 @@ int vp8_full_search_sadx8(MACROBLOCK *x, BLOCK *b, BLOCKD *d, MV *ref_mv, int er
if (thissad < bestsad)
{
this_mv.col = c << 3;
- thissad += vp8_mv_err_cost(&this_mv, center_mv, mvsadcost, error_per_bit);
+ thissad += mv_err_cost(&this_mv, center_mv, mvsadcost, error_per_bit);
if (thissad < bestsad)
{
@@ -1494,7 +1494,7 @@ int vp8_full_search_sadx8(MACROBLOCK *x, BLOCK *b, BLOCKD *d, MV *ref_mv, int er
if (thissad < bestsad)
{
this_mv.col = c << 3;
- thissad += vp8_mv_err_cost(&this_mv, center_mv, mvsadcost, error_per_bit);
+ thissad += mv_err_cost(&this_mv, center_mv, mvsadcost, error_per_bit);
if (thissad < bestsad)
{
@@ -1517,7 +1517,7 @@ int vp8_full_search_sadx8(MACROBLOCK *x, BLOCK *b, BLOCKD *d, MV *ref_mv, int er
if (thissad < bestsad)
{
this_mv.col = c << 3;
- thissad += vp8_mv_err_cost(&this_mv, center_mv, mvsadcost, error_per_bit);
+ thissad += mv_err_cost(&this_mv, center_mv, mvsadcost, error_per_bit);
if (thissad < bestsad)
{
@@ -1538,7 +1538,7 @@ int vp8_full_search_sadx8(MACROBLOCK *x, BLOCK *b, BLOCKD *d, MV *ref_mv, int er
if (bestsad < INT_MAX)
return fn_ptr->vf(what, what_stride, bestaddress, in_what_stride, (unsigned int *)(&thissad))
- + vp8_mv_err_cost(&this_mv, center_mv, mvcost, error_per_bit);
+ + mv_err_cost(&this_mv, center_mv, mvcost, error_per_bit);
else
return INT_MAX;
}
diff --git a/vp8/encoder/onyx_if.c b/vp8/encoder/onyx_if.c
index 8965634fe..931c51a66 100644
--- a/vp8/encoder/onyx_if.c
+++ b/vp8/encoder/onyx_if.c
@@ -70,7 +70,6 @@ extern void vp8_yv12_copy_src_frame_func_neon(YV12_BUFFER_CONFIG *src_ybc, YV12_
int vp8_estimate_entropy_savings(VP8_COMP *cpi);
int vp8_calc_ss_err(YV12_BUFFER_CONFIG *source, YV12_BUFFER_CONFIG *dest, const vp8_variance_rtcd_vtable_t *rtcd);
-int vp8_calc_low_ss_err(YV12_BUFFER_CONFIG *source, YV12_BUFFER_CONFIG *dest, const vp8_variance_rtcd_vtable_t *rtcd);
extern void vp8_temporal_filter_prepare_c(VP8_COMP *cpi);
@@ -261,7 +260,7 @@ static void setup_features(VP8_COMP *cpi)
}
-void vp8_dealloc_compressor_data(VP8_COMP *cpi)
+static void dealloc_compressor_data(VP8_COMP *cpi)
{
vpx_free(cpi->tplist);
cpi->tplist = NULL;
@@ -1453,7 +1452,7 @@ rescale(int val, int num, int denom)
}
-void vp8_init_config(VP8_PTR ptr, VP8_CONFIG *oxcf)
+static void init_config(VP8_PTR ptr, VP8_CONFIG *oxcf)
{
VP8_COMP *cpi = (VP8_COMP *)(ptr);
VP8_COMMON *cm = &cpi->common;
@@ -1844,7 +1843,7 @@ VP8_PTR vp8_create_compressor(VP8_CONFIG *oxcf)
vp8_create_common(&cpi->common);
vp8_cmachine_specific_config(cpi);
- vp8_init_config((VP8_PTR)cpi, oxcf);
+ init_config((VP8_PTR)cpi, oxcf);
memcpy(cpi->base_skip_false_prob, vp8cx_base_skip_false_prob, sizeof(vp8cx_base_skip_false_prob));
cpi->common.current_video_frame = 0;
@@ -2345,7 +2344,7 @@ void vp8_remove_compressor(VP8_PTR *ptr)
vp8cx_remove_encoder_threads(cpi);
#endif
- vp8_dealloc_compressor_data(cpi);
+ dealloc_compressor_data(cpi);
vpx_free(cpi->mb.ss);
vpx_free(cpi->tok);
vpx_free(cpi->cyclic_refresh_map);
@@ -4593,18 +4592,8 @@ static void encode_frame_to_data_rate
}
-int vp8_is_gf_update_needed(VP8_PTR ptr)
-{
- VP8_COMP *cpi = (VP8_COMP *) ptr;
- int ret_val;
-
- ret_val = cpi->gf_update_recommended;
- cpi->gf_update_recommended = 0;
-
- return ret_val;
-}
-void vp8_check_gf_quality(VP8_COMP *cpi)
+static void check_gf_quality(VP8_COMP *cpi)
{
VP8_COMMON *cm = &cpi->common;
int gf_active_pct = (100 * cpi->gf_active_count) / (cm->mb_rows * cm->mb_cols);
@@ -4853,7 +4842,7 @@ int vp8_get_compressed_data(VP8_PTR ptr, unsigned int *frame_flags, unsigned lon
if (start_frame < 0)
start_frame += cpi->oxcf.lag_in_frames;
- besterr = vp8_calc_low_ss_err(&cpi->src_buffer[cpi->last_alt_ref_sei].source_buffer,
+ besterr = calc_low_ss_err(&cpi->src_buffer[cpi->last_alt_ref_sei].source_buffer,
&cpi->src_buffer[start_frame].source_buffer, IF_RTCD(&cpi->rtcd.variance));
for (i = 0; i < 7; i++)
@@ -4862,7 +4851,7 @@ int vp8_get_compressed_data(VP8_PTR ptr, unsigned int *frame_flags, unsigned lon
cpi->oxcf.arnr_strength = i;
vp8_temporal_filter_prepare_c(cpi);
- thiserr = vp8_calc_low_ss_err(&cpi->alt_ref_buffer.source_buffer,
+ thiserr = calc_low_ss_err(&cpi->alt_ref_buffer.source_buffer,
&cpi->src_buffer[start_frame].source_buffer, IF_RTCD(&cpi->rtcd.variance));
if (10 * thiserr < besterr * 8)
@@ -5005,7 +4994,7 @@ int vp8_get_compressed_data(VP8_PTR ptr, unsigned int *frame_flags, unsigned lon
if (cpi->compressor_speed == 2)
{
- vp8_check_gf_quality(cpi);
+ check_gf_quality(cpi);
vpx_usec_timer_start(&tsctimer);
vpx_usec_timer_start(&ticktimer);
}
@@ -5384,7 +5373,9 @@ int vp8_calc_ss_err(YV12_BUFFER_CONFIG *source, YV12_BUFFER_CONFIG *dest, const
return Total;
}
-int vp8_calc_low_ss_err(YV12_BUFFER_CONFIG *source, YV12_BUFFER_CONFIG *dest, const vp8_variance_rtcd_vtable_t *rtcd)
+
+
+static int calc_low_ss_err(YV12_BUFFER_CONFIG *source, YV12_BUFFER_CONFIG *dest, const vp8_variance_rtcd_vtable_t *rtcd)
{
int i, j;
int Total = 0;
@@ -5412,11 +5403,7 @@ int vp8_calc_low_ss_err(YV12_BUFFER_CONFIG *source, YV12_BUFFER_CONFIG *dest, co
return Total;
}
-int vp8_get_speed(VP8_PTR c)
-{
- VP8_COMP *cpi = (VP8_COMP *) c;
- return cpi->Speed;
-}
+
int vp8_get_quantizer(VP8_PTR c)
{
VP8_COMP *cpi = (VP8_COMP *) c;
diff --git a/vp8/encoder/psnr.c b/vp8/encoder/psnr.c
index 96b0ea890..5119bb8aa 100644
--- a/vp8/encoder/psnr.c
+++ b/vp8/encoder/psnr.c
@@ -29,89 +29,3 @@ double vp8_mse2psnr(double Samples, double Peak, double Mse)
return psnr;
}
-
-double vp8_calc_psnr(YV12_BUFFER_CONFIG *source, YV12_BUFFER_CONFIG *dest, double *YPsnr, double *UPsnr, double *VPsnr, double *sq_error)
-{
- int i, j;
- int Diff;
- double frame_psnr;
- double Total;
- double grand_total;
- unsigned char *src = source->y_buffer;
- unsigned char *dst = dest->y_buffer;
-
- Total = 0.0;
- grand_total = 0.0;
-
- // Loop throught the Y plane raw and reconstruction data summing (square differences)
- for (i = 0; i < source->y_height; i++)
- {
-
- for (j = 0; j < source->y_width; j++)
- {
- Diff = (int)(src[j]) - (int)(dst[j]);
- Total += Diff * Diff;
- }
-
- src += source->y_stride;
- dst += dest->y_stride;
- }
-
- // Work out Y PSNR
- *YPsnr = vp8_mse2psnr(source->y_height * source->y_width, 255.0, Total);
- grand_total += Total;
- Total = 0;
-
-
- // Loop through the U plane
- src = source->u_buffer;
- dst = dest->u_buffer;
-
- for (i = 0; i < source->uv_height; i++)
- {
-
- for (j = 0; j < source->uv_width; j++)
- {
- Diff = (int)(src[j]) - (int)(dst[j]);
- Total += Diff * Diff;
- }
-
- src += source->uv_stride;
- dst += dest->uv_stride;
- }
-
- // Work out U PSNR
- *UPsnr = vp8_mse2psnr(source->uv_height * source->uv_width, 255.0, Total);
- grand_total += Total;
- Total = 0;
-
-
- // V PSNR
- src = source->v_buffer;
- dst = dest->v_buffer;
-
- for (i = 0; i < source->uv_height; i++)
- {
-
- for (j = 0; j < source->uv_width; j++)
- {
- Diff = (int)(src[j]) - (int)(dst[j]);
- Total += Diff * Diff;
- }
-
- src += source->uv_stride;
- dst += dest->uv_stride;
- }
-
- // Work out UV PSNR
- *VPsnr = vp8_mse2psnr(source->uv_height * source->uv_width, 255.0, Total);
- grand_total += Total;
- Total = 0;
-
- // Work out total PSNR
- frame_psnr = vp8_mse2psnr(source->y_height * source->y_width * 3 / 2 , 255.0, grand_total);
-
- *sq_error = 1.0 * grand_total;
-
- return frame_psnr;
-}
diff --git a/vp8/encoder/psnr.h b/vp8/encoder/psnr.h
index 8ae444823..7f6269abe 100644
--- a/vp8/encoder/psnr.h
+++ b/vp8/encoder/psnr.h
@@ -13,6 +13,5 @@
#define __INC_PSNR_H
extern double vp8_mse2psnr(double Samples, double Peak, double Mse);
-extern double vp8_calc_psnr(YV12_BUFFER_CONFIG *source, YV12_BUFFER_CONFIG *dest, double *YPsnr, double *UPsnr, double *VPsnr, double *sq_error);
#endif
diff --git a/vp8/encoder/ratectrl.c b/vp8/encoder/ratectrl.c
index 9797f5f25..e2c6327ef 100644
--- a/vp8/encoder/ratectrl.c
+++ b/vp8/encoder/ratectrl.c
@@ -90,7 +90,7 @@ const int vp8_bits_per_mb[2][QINDEX_RANGE] =
}
};
-const int vp8_kf_boost_qadjustment[QINDEX_RANGE] =
+static const int kf_boost_qadjustment[QINDEX_RANGE] =
{
128, 129, 130, 131, 132, 133, 134, 135,
136, 137, 138, 139, 140, 141, 142, 143,
@@ -154,7 +154,7 @@ const int vp8_gf_boost_qadjustment[QINDEX_RANGE] =
};
*/
-const int vp8_kf_gf_boost_qlimits[QINDEX_RANGE] =
+static const int kf_gf_boost_qlimits[QINDEX_RANGE] =
{
150, 155, 160, 165, 170, 175, 180, 185,
190, 195, 200, 205, 210, 215, 220, 225,
@@ -175,14 +175,14 @@ const int vp8_kf_gf_boost_qlimits[QINDEX_RANGE] =
};
// % adjustment to target kf size based on seperation from previous frame
-const int vp8_kf_boost_seperationt_adjustment[16] =
+static const int kf_boost_seperation_adjustment[16] =
{
30, 40, 50, 55, 60, 65, 70, 75,
80, 85, 90, 95, 100, 100, 100, 100,
};
-const int vp8_gf_adjust_table[101] =
+static const int gf_adjust_table[101] =
{
100,
115, 130, 145, 160, 175, 190, 200, 210, 220, 230,
@@ -197,13 +197,13 @@ const int vp8_gf_adjust_table[101] =
400, 400, 400, 400, 400, 400, 400, 400, 400, 400,
};
-const int vp8_gf_intra_useage_adjustment[20] =
+static const int gf_intra_usage_adjustment[20] =
{
125, 120, 115, 110, 105, 100, 95, 85, 80, 75,
70, 65, 60, 55, 50, 50, 50, 50, 50, 50,
};
-const int vp8_gf_interval_table[101] =
+static const int gf_interval_table[101] =
{
7,
7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
@@ -353,7 +353,7 @@ void vp8_calc_auto_iframe_target_size(VP8_COMP *cpi)
kf_boost = (int)(2 * cpi->output_frame_rate - 16);
// adjustment up based on q
- kf_boost = kf_boost * vp8_kf_boost_qadjustment[cpi->ni_av_qi] / 100;
+ kf_boost = kf_boost * kf_boost_qadjustment[cpi->ni_av_qi] / 100;
// frame separation adjustment ( down)
if (cpi->frames_since_key < cpi->output_frame_rate / 2)
@@ -488,10 +488,10 @@ static void calc_gf_params(VP8_COMP *cpi)
Boost = GFQ_ADJUSTMENT;
// Adjust based upon most recently measure intra useage
- Boost = Boost * vp8_gf_intra_useage_adjustment[(cpi->this_frame_percent_intra < 15) ? cpi->this_frame_percent_intra : 14] / 100;
+ Boost = Boost * gf_intra_usage_adjustment[(cpi->this_frame_percent_intra < 15) ? cpi->this_frame_percent_intra : 14] / 100;
// Adjust gf boost based upon GF usage since last GF
- Boost = Boost * vp8_gf_adjust_table[gf_frame_useage] / 100;
+ Boost = Boost * gf_adjust_table[gf_frame_useage] / 100;
#endif
}
@@ -503,8 +503,8 @@ static void calc_gf_params(VP8_COMP *cpi)
}
// Apply an upper limit based on Q for 1 pass encodes
- if (Boost > vp8_kf_gf_boost_qlimits[Q] && (cpi->pass == 0))
- Boost = vp8_kf_gf_boost_qlimits[Q];
+ if (Boost > kf_gf_boost_qlimits[Q] && (cpi->pass == 0))
+ Boost = kf_gf_boost_qlimits[Q];
// Apply lower limits to boost.
else if (Boost < 110)
@@ -539,8 +539,8 @@ static void calc_gf_params(VP8_COMP *cpi)
if (cpi->last_boost >= 1500)
cpi->frames_till_gf_update_due ++;
- if (vp8_gf_interval_table[gf_frame_useage] > cpi->frames_till_gf_update_due)
- cpi->frames_till_gf_update_due = vp8_gf_interval_table[gf_frame_useage];
+ if (gf_interval_table[gf_frame_useage] > cpi->frames_till_gf_update_due)
+ cpi->frames_till_gf_update_due = gf_interval_table[gf_frame_useage];
if (cpi->frames_till_gf_update_due > cpi->max_gf_interval)
cpi->frames_till_gf_update_due = cpi->max_gf_interval;
@@ -594,17 +594,17 @@ void vp8_calc_iframe_target_size(VP8_COMP *cpi)
// between key frames.
// Adjust boost based upon ambient Q
- Boost = vp8_kf_boost_qadjustment[Q];
+ Boost = kf_boost_qadjustment[Q];
// Make the Key frame boost less if the seperation from the previous key frame is small
if (cpi->frames_since_key < 16)
- Boost = Boost * vp8_kf_boost_seperationt_adjustment[cpi->frames_since_key] / 100;
+ Boost = Boost * kf_boost_seperation_adjustment[cpi->frames_since_key] / 100;
else
- Boost = Boost * vp8_kf_boost_seperationt_adjustment[15] / 100;
+ Boost = Boost * kf_boost_seperation_adjustment[15] / 100;
// Apply limits on boost
- if (Boost > vp8_kf_gf_boost_qlimits[Q])
- Boost = vp8_kf_gf_boost_qlimits[Q];
+ if (Boost > kf_gf_boost_qlimits[Q])
+ Boost = kf_gf_boost_qlimits[Q];
else if (Boost < 120)
Boost = 120;
}
diff --git a/vp8/encoder/rdopt.c b/vp8/encoder/rdopt.c
index c706c575f..a125cc481 100644
--- a/vp8/encoder/rdopt.c
+++ b/vp8/encoder/rdopt.c
@@ -53,7 +53,7 @@ extern void vp8_update_zbin_extra(VP8_COMP *cpi, MACROBLOCK *x);
-const int vp8_auto_speed_thresh[17] =
+static const int auto_speed_thresh[17] =
{
1000,
200,
@@ -353,7 +353,7 @@ void vp8_auto_select_speed(VP8_COMP *cpi)
}
}
- if (milliseconds_for_compress * 100 > cpi->avg_encode_time * vp8_auto_speed_thresh[cpi->Speed])
+ if (milliseconds_for_compress * 100 > cpi->avg_encode_time * auto_speed_thresh[cpi->Speed])
{
cpi->Speed -= 1;
cpi->avg_pick_mode_time = 0;
@@ -1000,13 +1000,6 @@ static unsigned int vp8_encode_inter_mb_segment(MACROBLOCK *x, int const *labels
return distortion;
}
-unsigned char vp8_mbsplit_offset2[4][16] = {
- { 0, 8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
- { 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
- { 0, 2, 8, 10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
- { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15}
-};
-
static const unsigned int segmentation_to_sseshift[4] = {3, 3, 2, 0};
@@ -1034,8 +1027,8 @@ typedef struct
} BEST_SEG_INFO;
-void vp8_rd_check_segment(VP8_COMP *cpi, MACROBLOCK *x, BEST_SEG_INFO *bsi,
- unsigned int segmentation)
+static void rd_check_segment(VP8_COMP *cpi, MACROBLOCK *x,
+ BEST_SEG_INFO *bsi, unsigned int segmentation)
{
int i;
int const *labels;
@@ -1153,7 +1146,7 @@ void vp8_rd_check_segment(VP8_COMP *cpi, MACROBLOCK *x, BEST_SEG_INFO *bsi,
int sadpb = x->sadperbit4;
// find first label
- n = vp8_mbsplit_offset2[segmentation][i];
+ n = vp8_mbsplit_offset[segmentation][i];
c = &x->block[n];
e = &x->e_mbd.block[n];
@@ -1332,16 +1325,16 @@ static int vp8_rd_pick_best_mbsegmentation(VP8_COMP *cpi, MACROBLOCK *x,
{
/* for now, we will keep the original segmentation order
when in best quality mode */
- vp8_rd_check_segment(cpi, x, &bsi, BLOCK_16X8);
- vp8_rd_check_segment(cpi, x, &bsi, BLOCK_8X16);
- vp8_rd_check_segment(cpi, x, &bsi, BLOCK_8X8);
- vp8_rd_check_segment(cpi, x, &bsi, BLOCK_4X4);
+ rd_check_segment(cpi, x, &bsi, BLOCK_16X8);
+ rd_check_segment(cpi, x, &bsi, BLOCK_8X16);
+ rd_check_segment(cpi, x, &bsi, BLOCK_8X8);
+ rd_check_segment(cpi, x, &bsi, BLOCK_4X4);
}
else
{
int sr;
- vp8_rd_check_segment(cpi, x, &bsi, BLOCK_8X8);
+ rd_check_segment(cpi, x, &bsi, BLOCK_8X8);
if (bsi.segment_rd < best_rd)
{
@@ -1380,7 +1373,7 @@ static int vp8_rd_pick_best_mbsegmentation(VP8_COMP *cpi, MACROBLOCK *x,
sr = MAXF((abs(bsi.sv_mvp[1].row - bsi.sv_mvp[3].row))>>3, (abs(bsi.sv_mvp[1].col - bsi.sv_mvp[3].col))>>3);
vp8_cal_step_param(sr, &bsi.sv_istep[1]);
- vp8_rd_check_segment(cpi, x, &bsi, BLOCK_8X16);
+ rd_check_segment(cpi, x, &bsi, BLOCK_8X16);
}
/* block 16X8 */
@@ -1391,7 +1384,7 @@ static int vp8_rd_pick_best_mbsegmentation(VP8_COMP *cpi, MACROBLOCK *x,
sr = MAXF((abs(bsi.sv_mvp[2].row - bsi.sv_mvp[3].row))>>3, (abs(bsi.sv_mvp[2].col - bsi.sv_mvp[3].col))>>3);
vp8_cal_step_param(sr, &bsi.sv_istep[1]);
- vp8_rd_check_segment(cpi, x, &bsi, BLOCK_16X8);
+ rd_check_segment(cpi, x, &bsi, BLOCK_16X8);
}
/* If 8x8 is better than 16x8/8x16, then do 4x4 search */
@@ -1399,7 +1392,7 @@ static int vp8_rd_pick_best_mbsegmentation(VP8_COMP *cpi, MACROBLOCK *x,
if (cpi->sf.no_skip_block4x4_search || bsi.segment_num == BLOCK_8X8) /* || (sv_segment_rd8x8-bsi.segment_rd) < sv_segment_rd8x8>>5) */
{
bsi.mvp = &bsi.sv_mvp[0];
- vp8_rd_check_segment(cpi, x, &bsi, BLOCK_4X4);
+ rd_check_segment(cpi, x, &bsi, BLOCK_4X4);
}
/* restore UMV window */
@@ -1432,7 +1425,7 @@ static int vp8_rd_pick_best_mbsegmentation(VP8_COMP *cpi, MACROBLOCK *x,
{
int j;
- j = vp8_mbsplit_offset2[bsi.segment_num][i];
+ j = vp8_mbsplit_offset[bsi.segment_num][i];
x->partition_info->bmi[i].mode = x->e_mbd.block[j].bmi.mode;
x->partition_info->bmi[i].mv.as_mv = x->e_mbd.block[j].bmi.mv.as_mv;
diff --git a/vp8/encoder/tokenize.c b/vp8/encoder/tokenize.c
index 5e018634f..e3f423f8a 100644
--- a/vp8/encoder/tokenize.c
+++ b/vp8/encoder/tokenize.c
@@ -26,9 +26,9 @@ _int64 context_counters[BLOCK_TYPES] [COEF_BANDS] [PREV_COEF_CONTEXTS] [vp8_coef
void vp8_stuff_mb(VP8_COMP *cpi, MACROBLOCKD *x, TOKENEXTRA **t) ;
void vp8_fix_contexts(MACROBLOCKD *x);
-TOKENVALUE vp8_dct_value_tokens[DCT_MAX_VALUE*2];
+static TOKENVALUE dct_value_tokens[DCT_MAX_VALUE*2];
const TOKENVALUE *vp8_dct_value_tokens_ptr;
-int vp8_dct_value_cost[DCT_MAX_VALUE*2];
+static int dct_value_cost[DCT_MAX_VALUE*2];
const int *vp8_dct_value_cost_ptr;
#if 0
int skip_true_count = 0;
@@ -37,7 +37,7 @@ int skip_false_count = 0;
static void fill_value_tokens()
{
- TOKENVALUE *const t = vp8_dct_value_tokens + DCT_MAX_VALUE;
+ TOKENVALUE *const t = dct_value_tokens + DCT_MAX_VALUE;
vp8_extra_bit_struct *const e = vp8_extra_bits;
int i = -DCT_MAX_VALUE;
@@ -81,7 +81,7 @@ static void fill_value_tokens()
cost += vp8_treed_cost(p->tree, p->prob, extra >> 1, Length);
cost += vp8_cost_bit(vp8_prob_half, extra & 1); /* sign */
- vp8_dct_value_cost[i + DCT_MAX_VALUE] = cost;
+ dct_value_cost[i + DCT_MAX_VALUE] = cost;
}
}
@@ -89,8 +89,8 @@ static void fill_value_tokens()
}
while (++i < DCT_MAX_VALUE);
- vp8_dct_value_tokens_ptr = vp8_dct_value_tokens + DCT_MAX_VALUE;
- vp8_dct_value_cost_ptr = vp8_dct_value_cost + DCT_MAX_VALUE;
+ vp8_dct_value_tokens_ptr = dct_value_tokens + DCT_MAX_VALUE;
+ vp8_dct_value_cost_ptr = dct_value_cost + DCT_MAX_VALUE;
}
static void tokenize2nd_order_b
diff --git a/vp8/encoder/variance_c.c b/vp8/encoder/variance_c.c
index 95ec96cec..ede07c8db 100644
--- a/vp8/encoder/variance_c.c
+++ b/vp8/encoder/variance_c.c
@@ -10,33 +10,8 @@
#include "variance.h"
+#include "vp8/common/filter.h"
-const int vp8_six_tap[8][6] =
-{
- { 0, 0, 128, 0, 0, 0 }, // note that 1/8 pel positions are just as per alpha -0.5 bicubic
- { 0, -6, 123, 12, -1, 0 },
- { 2, -11, 108, 36, -8, 1 }, // New 1/4 pel 6 tap filter
- { 0, -9, 93, 50, -6, 0 },
- { 3, -16, 77, 77, -16, 3 }, // New 1/2 pel 6 tap filter
- { 0, -6, 50, 93, -9, 0 },
- { 1, -8, 36, 108, -11, 2 }, // New 1/4 pel 6 tap filter
- { 0, -1, 12, 123, -6, 0 }
-};
-
-
-const int VP8_FILTER_WEIGHT = 128;
-const int VP8_FILTER_SHIFT = 7;
-const int vp8_bilinear_taps[8][2] =
-{
- { 128, 0 },
- { 112, 16 },
- { 96, 32 },
- { 80, 48 },
- { 64, 64 },
- { 48, 80 },
- { 32, 96 },
- { 16, 112 }
-};
unsigned int vp8_get_mb_ss_c
(
@@ -56,7 +31,7 @@ unsigned int vp8_get_mb_ss_c
}
-void vp8_variance(
+static void variance(
const unsigned char *src_ptr,
int source_stride,
const unsigned char *ref_ptr,
@@ -98,7 +73,7 @@ vp8_get8x8var_c
)
{
- vp8_variance(src_ptr, source_stride, ref_ptr, recon_stride, 8, 8, SSE, Sum);
+ variance(src_ptr, source_stride, ref_ptr, recon_stride, 8, 8, SSE, Sum);
return (*SSE - (((*Sum) * (*Sum)) >> 6));
}
@@ -114,7 +89,7 @@ vp8_get16x16var_c
)
{
- vp8_variance(src_ptr, source_stride, ref_ptr, recon_stride, 16, 16, SSE, Sum);
+ variance(src_ptr, source_stride, ref_ptr, recon_stride, 16, 16, SSE, Sum);
return (*SSE - (((*Sum) * (*Sum)) >> 8));
}
@@ -132,7 +107,7 @@ unsigned int vp8_variance16x16_c(
int avg;
- vp8_variance(src_ptr, source_stride, ref_ptr, recon_stride, 16, 16, &var, &avg);
+ variance(src_ptr, source_stride, ref_ptr, recon_stride, 16, 16, &var, &avg);
*sse = var;
return (var - ((avg * avg) >> 8));
}
@@ -148,7 +123,7 @@ unsigned int vp8_variance8x16_c(
int avg;
- vp8_variance(src_ptr, source_stride, ref_ptr, recon_stride, 8, 16, &var, &avg);
+ variance(src_ptr, source_stride, ref_ptr, recon_stride, 8, 16, &var, &avg);
*sse = var;
return (var - ((avg * avg) >> 7));
}
@@ -164,7 +139,7 @@ unsigned int vp8_variance16x8_c(
int avg;
- vp8_variance(src_ptr, source_stride, ref_ptr, recon_stride, 16, 8, &var, &avg);
+ variance(src_ptr, source_stride, ref_ptr, recon_stride, 16, 8, &var, &avg);
*sse = var;
return (var - ((avg * avg) >> 7));
}
@@ -181,7 +156,7 @@ unsigned int vp8_variance8x8_c(
int avg;
- vp8_variance(src_ptr, source_stride, ref_ptr, recon_stride, 8, 8, &var, &avg);
+ variance(src_ptr, source_stride, ref_ptr, recon_stride, 8, 8, &var, &avg);
*sse = var;
return (var - ((avg * avg) >> 6));
}
@@ -197,7 +172,7 @@ unsigned int vp8_variance4x4_c(
int avg;
- vp8_variance(src_ptr, source_stride, ref_ptr, recon_stride, 4, 4, &var, &avg);
+ variance(src_ptr, source_stride, ref_ptr, recon_stride, 4, 4, &var, &avg);
*sse = var;
return (var - ((avg * avg) >> 4));
}
@@ -213,7 +188,7 @@ unsigned int vp8_mse16x16_c(
unsigned int var;
int avg;
- vp8_variance(src_ptr, source_stride, ref_ptr, recon_stride, 16, 16, &var, &avg);
+ variance(src_ptr, source_stride, ref_ptr, recon_stride, 16, 16, &var, &avg);
*sse = var;
return var;
}
@@ -247,7 +222,7 @@ unsigned int vp8_mse16x16_c(
* to the next.
*
****************************************************************************/
-void vp8e_filter_block2d_bil_first_pass
+static void var_filter_block2d_bil_first_pass
(
const unsigned char *src_ptr,
unsigned short *output_ptr,
@@ -255,7 +230,7 @@ void vp8e_filter_block2d_bil_first_pass
int pixel_step,
unsigned int output_height,
unsigned int output_width,
- const int *vp8_filter
+ const short *vp8_filter
)
{
unsigned int i, j;
@@ -305,7 +280,7 @@ void vp8e_filter_block2d_bil_first_pass
* to the next.
*
****************************************************************************/
-void vp8e_filter_block2d_bil_second_pass
+static void var_filter_block2d_bil_second_pass
(
const unsigned short *src_ptr,
unsigned char *output_ptr,
@@ -313,7 +288,7 @@ void vp8e_filter_block2d_bil_second_pass
unsigned int pixel_step,
unsigned int output_height,
unsigned int output_width,
- const int *vp8_filter
+ const short *vp8_filter
)
{
unsigned int i, j;
@@ -338,52 +313,6 @@ void vp8e_filter_block2d_bil_second_pass
}
-/****************************************************************************
- *
- * ROUTINE : filter_block2d_bil
- *
- * INPUTS : UINT8 *src_ptr : Pointer to source block.
- * UINT32 src_pixels_per_line : Stride of input block.
- * INT32 *HFilter : Array of 2 horizontal filter taps.
- * INT32 *VFilter : Array of 2 vertical filter taps.
- *
- * OUTPUTS : UINT16 *output_ptr : Pointer to filtered block.
- *
- * RETURNS : void
- *
- * FUNCTION : 2-D filters an 8x8 input block by applying a 2-tap
- * bi-linear filter horizontally followed by a 2-tap
- * bi-linear filter vertically on the result.
- *
- * SPECIAL NOTES : The intermediate horizontally filtered block must produce
- * 1 more point than the input block in each column. This
- * is to ensure that the 2-tap filter has one extra data-point
- * at the top of each column so filter taps do not extend
- * beyond data. Thus the output of the first stage filter
- * is an 8x9 (hx_v) block.
- *
- ****************************************************************************/
-void vp8e_filter_block2d_bil
-(
- const unsigned char *src_ptr,
- unsigned char *output_ptr,
- unsigned int src_pixels_per_line,
- int *HFilter,
- int *VFilter
-)
-{
-
- unsigned short FData[20*16]; // Temp data bufffer used in filtering
-
- // First filter 1-D horizontally...
- vp8e_filter_block2d_bil_first_pass(src_ptr, FData, src_pixels_per_line, 1, 9, 8, HFilter);
-
- // then 1-D vertically...
- vp8e_filter_block2d_bil_second_pass(FData, output_ptr, 8, 8, 8, 8, VFilter);
-}
-
-
-
unsigned int vp8_sub_pixel_variance4x4_c
(
const unsigned char *src_ptr,
@@ -396,17 +325,17 @@ unsigned int vp8_sub_pixel_variance4x4_c
)
{
unsigned char temp2[20*16];
- const int *HFilter, *VFilter;
+ const short *HFilter, *VFilter;
unsigned short FData3[5*4]; // Temp data bufffer used in filtering
- HFilter = vp8_bilinear_taps[xoffset];
- VFilter = vp8_bilinear_taps[yoffset];
+ HFilter = vp8_bilinear_filters[xoffset];
+ VFilter = vp8_bilinear_filters[yoffset];
// First filter 1d Horizontal
- vp8e_filter_block2d_bil_first_pass(src_ptr, FData3, src_pixels_per_line, 1, 5, 4, HFilter);
+ var_filter_block2d_bil_first_pass(src_ptr, FData3, src_pixels_per_line, 1, 5, 4, HFilter);
// Now filter Verticaly
- vp8e_filter_block2d_bil_second_pass(FData3, temp2, 4, 4, 4, 4, VFilter);
+ var_filter_block2d_bil_second_pass(FData3, temp2, 4, 4, 4, 4, VFilter);
return vp8_variance4x4_c(temp2, 4, dst_ptr, dst_pixels_per_line, sse);
}
@@ -425,13 +354,13 @@ unsigned int vp8_sub_pixel_variance8x8_c
{
unsigned short FData3[9*8]; // Temp data bufffer used in filtering
unsigned char temp2[20*16];
- const int *HFilter, *VFilter;
+ const short *HFilter, *VFilter;
- HFilter = vp8_bilinear_taps[xoffset];
- VFilter = vp8_bilinear_taps[yoffset];
+ HFilter = vp8_bilinear_filters[xoffset];
+ VFilter = vp8_bilinear_filters[yoffset];
- vp8e_filter_block2d_bil_first_pass(src_ptr, FData3, src_pixels_per_line, 1, 9, 8, HFilter);
- vp8e_filter_block2d_bil_second_pass(FData3, temp2, 8, 8, 8, 8, VFilter);
+ var_filter_block2d_bil_first_pass(src_ptr, FData3, src_pixels_per_line, 1, 9, 8, HFilter);
+ var_filter_block2d_bil_second_pass(FData3, temp2, 8, 8, 8, 8, VFilter);
return vp8_variance8x8_c(temp2, 8, dst_ptr, dst_pixels_per_line, sse);
}
@@ -449,13 +378,13 @@ unsigned int vp8_sub_pixel_variance16x16_c
{
unsigned short FData3[17*16]; // Temp data bufffer used in filtering
unsigned char temp2[20*16];
- const int *HFilter, *VFilter;
+ const short *HFilter, *VFilter;
- HFilter = vp8_bilinear_taps[xoffset];
- VFilter = vp8_bilinear_taps[yoffset];
+ HFilter = vp8_bilinear_filters[xoffset];
+ VFilter = vp8_bilinear_filters[yoffset];
- vp8e_filter_block2d_bil_first_pass(src_ptr, FData3, src_pixels_per_line, 1, 17, 16, HFilter);
- vp8e_filter_block2d_bil_second_pass(FData3, temp2, 16, 16, 16, 16, VFilter);
+ var_filter_block2d_bil_first_pass(src_ptr, FData3, src_pixels_per_line, 1, 17, 16, HFilter);
+ var_filter_block2d_bil_second_pass(FData3, temp2, 16, 16, 16, 16, VFilter);
return vp8_variance16x16_c(temp2, 16, dst_ptr, dst_pixels_per_line, sse);
}
@@ -525,13 +454,13 @@ unsigned int vp8_sub_pixel_variance16x8_c
{
unsigned short FData3[16*9]; // Temp data bufffer used in filtering
unsigned char temp2[20*16];
- const int *HFilter, *VFilter;
+ const short *HFilter, *VFilter;
- HFilter = vp8_bilinear_taps[xoffset];
- VFilter = vp8_bilinear_taps[yoffset];
+ HFilter = vp8_bilinear_filters[xoffset];
+ VFilter = vp8_bilinear_filters[yoffset];
- vp8e_filter_block2d_bil_first_pass(src_ptr, FData3, src_pixels_per_line, 1, 9, 16, HFilter);
- vp8e_filter_block2d_bil_second_pass(FData3, temp2, 16, 16, 8, 16, VFilter);
+ var_filter_block2d_bil_first_pass(src_ptr, FData3, src_pixels_per_line, 1, 9, 16, HFilter);
+ var_filter_block2d_bil_second_pass(FData3, temp2, 16, 16, 8, 16, VFilter);
return vp8_variance16x8_c(temp2, 16, dst_ptr, dst_pixels_per_line, sse);
}
@@ -549,15 +478,15 @@ unsigned int vp8_sub_pixel_variance8x16_c
{
unsigned short FData3[9*16]; // Temp data bufffer used in filtering
unsigned char temp2[20*16];
- const int *HFilter, *VFilter;
+ const short *HFilter, *VFilter;
- HFilter = vp8_bilinear_taps[xoffset];
- VFilter = vp8_bilinear_taps[yoffset];
+ HFilter = vp8_bilinear_filters[xoffset];
+ VFilter = vp8_bilinear_filters[yoffset];
- vp8e_filter_block2d_bil_first_pass(src_ptr, FData3, src_pixels_per_line, 1, 17, 8, HFilter);
- vp8e_filter_block2d_bil_second_pass(FData3, temp2, 8, 8, 16, 8, VFilter);
+ var_filter_block2d_bil_first_pass(src_ptr, FData3, src_pixels_per_line, 1, 17, 8, HFilter);
+ var_filter_block2d_bil_second_pass(FData3, temp2, 8, 8, 16, 8, VFilter);
return vp8_variance8x16_c(temp2, 8, dst_ptr, dst_pixels_per_line, sse);
}
diff --git a/vp8/encoder/x86/variance_mmx.c b/vp8/encoder/x86/variance_mmx.c
index 07358c0c7..e107cb19d 100644
--- a/vp8/encoder/x86/variance_mmx.c
+++ b/vp8/encoder/x86/variance_mmx.c
@@ -92,39 +92,6 @@ extern unsigned int vp8_get16x16pred_error_mmx
);
-void vp8_test_get_mb_ss(void)
-{
- short zz[] =
- {
- -4, -4, -4, -4, 4, 4, 4, 4, -4, -4, -4, -4, 4, 4, 4, 4,
- -2, -2, -2, -2, 2, 2, 2, 2, -2, -2, -2, -2, 2, 2, 2, 2,
- -4, -4, -4, -4, 4, 4, 4, 4, -4, -4, -4, -4, 4, 4, 4, 4,
- -4, -4, -4, -4, 4, 4, 4, 4, -4, -4, -4, -4, 4, 4, 4, 4,
- -4, -4, -4, -4, 4, 4, 4, 4, -4, -4, -4, -4, 4, 4, 4, 4,
- -4, -4, -4, -4, 4, 4, 4, 4, -4, -4, -4, -4, 4, 4, 4, 4,
- -4, -4, -4, -4, 4, 4, 4, 4, -4, -4, -4, -4, 4, 4, 4, 4,
- -4, -4, -4, -4, 4, 4, 4, 4, -4, -4, -4, -4, 4, 4, 4, 4,
- -4, -4, -4, -4, 4, 4, 4, 4, -4, -4, -4, -4, 4, 4, 4, 4,
- -4, -4, -4, -4, 4, 4, 4, 4, -4, -4, -4, -4, 4, 4, 4, 4,
- -4, -4, -4, -4, 4, 4, 4, 4, -4, -4, -4, -4, 4, 4, 4, 4,
- -4, -4, -4, -4, 4, 4, 4, 4, -4, -4, -4, -4, 4, 4, 4, 4,
- -4, -4, -4, -4, 4, 4, 4, 4, -4, -4, -4, -4, 4, 4, 4, 4,
- -3, -3, -3, -3, 3, 3, 3, 3, -3, -3, -3, -3, 3, 3, 3, 3,
- -4, -4, -4, -4, 4, 4, 4, 4, -4, -4, -4, -4, 4, 4, 4, 4,
- -4, -4, -4, -4, 4, 4, 4, 4, -4, -4, -4, -4, 4, 4, 4, 4,
- };
- int s = 0, x = vp8_get_mb_ss_mmx(zz);
- {
- int y;
-
- for (y = 0; y < 256; y++)
- s += (zz[y] * zz[y]);
- }
-
- x += 0;
-}
-
-
unsigned int vp8_get16x16var_mmx(
const unsigned char *src_ptr,
int source_stride,
diff --git a/vp8/encoder/x86/x86_csystemdependent.c b/vp8/encoder/x86/x86_csystemdependent.c
index 5ab364147..8327fad60 100644
--- a/vp8/encoder/x86/x86_csystemdependent.c
+++ b/vp8/encoder/x86/x86_csystemdependent.c
@@ -16,7 +16,7 @@
#if HAVE_MMX
-void vp8_short_fdct8x4_mmx(short *input, short *output, int pitch)
+static void short_fdct8x4_mmx(short *input, short *output, int pitch)
{
vp8_short_fdct4x4_mmx(input, output, pitch);
vp8_short_fdct4x4_mmx(input + 4, output + 16, pitch);
@@ -26,7 +26,7 @@ int vp8_fast_quantize_b_impl_mmx(short *coeff_ptr, short *zbin_ptr,
short *qcoeff_ptr, short *dequant_ptr,
short *scan_mask, short *round_ptr,
short *quant_ptr, short *dqcoeff_ptr);
-void vp8_fast_quantize_b_mmx(BLOCK *b, BLOCKD *d)
+static void fast_quantize_b_mmx(BLOCK *b, BLOCKD *d)
{
short *scan_mask = vp8_default_zig_zag_mask;//d->scan_order_mask_ptr;
short *coeff_ptr = b->coeff;
@@ -51,7 +51,7 @@ void vp8_fast_quantize_b_mmx(BLOCK *b, BLOCKD *d)
}
int vp8_mbblock_error_mmx_impl(short *coeff_ptr, short *dcoef_ptr, int dc);
-int vp8_mbblock_error_mmx(MACROBLOCK *mb, int dc)
+static int mbblock_error_mmx(MACROBLOCK *mb, int dc)
{
short *coeff_ptr = mb->block[0].coeff;
short *dcoef_ptr = mb->e_mbd.block[0].dqcoeff;
@@ -59,7 +59,7 @@ int vp8_mbblock_error_mmx(MACROBLOCK *mb, int dc)
}
int vp8_mbuverror_mmx_impl(short *s_ptr, short *d_ptr);
-int vp8_mbuverror_mmx(MACROBLOCK *mb)
+static int mbuverror_mmx(MACROBLOCK *mb)
{
short *s_ptr = &mb->coeff[256];
short *d_ptr = &mb->e_mbd.dqcoeff[256];
@@ -69,7 +69,7 @@ int vp8_mbuverror_mmx(MACROBLOCK *mb)
void vp8_subtract_b_mmx_impl(unsigned char *z, int src_stride,
short *diff, unsigned char *predictor,
int pitch);
-void vp8_subtract_b_mmx(BLOCK *be, BLOCKD *bd, int pitch)
+static void subtract_b_mmx(BLOCK *be, BLOCKD *bd, int pitch)
{
unsigned char *z = *(be->base_src) + be->src;
unsigned int src_stride = be->src_stride;
@@ -85,7 +85,7 @@ int vp8_fast_quantize_b_impl_sse2(short *coeff_ptr,
short *qcoeff_ptr, short *dequant_ptr,
const short *inv_scan_order, short *round_ptr,
short *quant_ptr, short *dqcoeff_ptr);
-void vp8_fast_quantize_b_sse2(BLOCK *b, BLOCKD *d)
+static void fast_quantize_b_sse2(BLOCK *b, BLOCKD *d)
{
short *scan_mask = vp8_default_zig_zag_mask;//d->scan_order_mask_ptr;
short *coeff_ptr = b->coeff;
@@ -115,7 +115,7 @@ int vp8_regular_quantize_b_impl_sse2(short *coeff_ptr, short *zbin_ptr,
short *zbin_boost_ptr,
short *quant_shift_ptr);
-void vp8_regular_quantize_b_sse2(BLOCK *b,BLOCKD *d)
+static void regular_quantize_b_sse2(BLOCK *b,BLOCKD *d)
{
d->eob = vp8_regular_quantize_b_impl_sse2(b->coeff,
b->zbin,
@@ -131,7 +131,7 @@ void vp8_regular_quantize_b_sse2(BLOCK *b,BLOCKD *d)
}
int vp8_mbblock_error_xmm_impl(short *coeff_ptr, short *dcoef_ptr, int dc);
-int vp8_mbblock_error_xmm(MACROBLOCK *mb, int dc)
+static int mbblock_error_xmm(MACROBLOCK *mb, int dc)
{
short *coeff_ptr = mb->block[0].coeff;
short *dcoef_ptr = mb->e_mbd.block[0].dqcoeff;
@@ -139,7 +139,7 @@ int vp8_mbblock_error_xmm(MACROBLOCK *mb, int dc)
}
int vp8_mbuverror_xmm_impl(short *s_ptr, short *d_ptr);
-int vp8_mbuverror_xmm(MACROBLOCK *mb)
+static int mbuverror_xmm(MACROBLOCK *mb)
{
short *s_ptr = &mb->coeff[256];
short *d_ptr = &mb->e_mbd.dqcoeff[256];
@@ -149,7 +149,7 @@ int vp8_mbuverror_xmm(MACROBLOCK *mb)
void vp8_subtract_b_sse2_impl(unsigned char *z, int src_stride,
short *diff, unsigned char *predictor,
int pitch);
-void vp8_subtract_b_sse2(BLOCK *be, BLOCKD *bd, int pitch)
+static void subtract_b_sse2(BLOCK *be, BLOCKD *bd, int pitch)
{
unsigned char *z = *(be->base_src) + be->src;
unsigned int src_stride = be->src_stride;
@@ -165,7 +165,7 @@ int vp8_fast_quantize_b_impl_ssse3(short *coeff_ptr,
short *qcoeff_ptr, short *dequant_ptr,
short *round_ptr,
short *quant_ptr, short *dqcoeff_ptr);
-void vp8_fast_quantize_b_ssse3(BLOCK *b, BLOCKD *d)
+static void fast_quantize_b_ssse3(BLOCK *b, BLOCKD *d)
{
d->eob = vp8_fast_quantize_b_impl_ssse3(
b->coeff,
@@ -251,20 +251,20 @@ void vp8_arch_x86_encoder_init(VP8_COMP *cpi)
cpi->rtcd.variance.get4x4sse_cs = vp8_get4x4sse_cs_mmx;
cpi->rtcd.fdct.short4x4 = vp8_short_fdct4x4_mmx;
- cpi->rtcd.fdct.short8x4 = vp8_short_fdct8x4_mmx;
+ cpi->rtcd.fdct.short8x4 = short_fdct8x4_mmx;
cpi->rtcd.fdct.fast4x4 = vp8_short_fdct4x4_mmx;
- cpi->rtcd.fdct.fast8x4 = vp8_short_fdct8x4_mmx;
+ cpi->rtcd.fdct.fast8x4 = short_fdct8x4_mmx;
cpi->rtcd.fdct.walsh_short4x4 = vp8_short_walsh4x4_c;
cpi->rtcd.encodemb.berr = vp8_block_error_mmx;
- cpi->rtcd.encodemb.mberr = vp8_mbblock_error_mmx;
- cpi->rtcd.encodemb.mbuverr = vp8_mbuverror_mmx;
- cpi->rtcd.encodemb.subb = vp8_subtract_b_mmx;
+ cpi->rtcd.encodemb.mberr = mbblock_error_mmx;
+ cpi->rtcd.encodemb.mbuverr = mbuverror_mmx;
+ cpi->rtcd.encodemb.subb = subtract_b_mmx;
cpi->rtcd.encodemb.submby = vp8_subtract_mby_mmx;
cpi->rtcd.encodemb.submbuv = vp8_subtract_mbuv_mmx;
- /*cpi->rtcd.quantize.fastquantb = vp8_fast_quantize_b_mmx;*/
+ /*cpi->rtcd.quantize.fastquantb = fast_quantize_b_mmx;*/
}
#endif
@@ -311,16 +311,16 @@ void vp8_arch_x86_encoder_init(VP8_COMP *cpi)
cpi->rtcd.fdct.walsh_short4x4 = vp8_short_walsh4x4_sse2 ;
cpi->rtcd.encodemb.berr = vp8_block_error_xmm;
- cpi->rtcd.encodemb.mberr = vp8_mbblock_error_xmm;
- cpi->rtcd.encodemb.mbuverr = vp8_mbuverror_xmm;
- cpi->rtcd.encodemb.subb = vp8_subtract_b_sse2;
+ cpi->rtcd.encodemb.mberr = mbblock_error_xmm;
+ cpi->rtcd.encodemb.mbuverr = mbuverror_xmm;
+ cpi->rtcd.encodemb.subb = subtract_b_sse2;
cpi->rtcd.encodemb.submby = vp8_subtract_mby_sse2;
cpi->rtcd.encodemb.submbuv = vp8_subtract_mbuv_sse2;
#if ARCH_X86
- cpi->rtcd.quantize.quantb = vp8_regular_quantize_b_sse2;
+ cpi->rtcd.quantize.quantb = regular_quantize_b_sse2;
#endif
- cpi->rtcd.quantize.fastquantb = vp8_fast_quantize_b_sse2;
+ cpi->rtcd.quantize.fastquantb = fast_quantize_b_sse2;
#if !(CONFIG_REALTIME_ONLY)
cpi->rtcd.temporal.apply = vp8_temporal_filter_apply_sse2;
@@ -358,7 +358,7 @@ void vp8_arch_x86_encoder_init(VP8_COMP *cpi)
cpi->rtcd.variance.subpixvar16x8 = vp8_sub_pixel_variance16x8_ssse3;
cpi->rtcd.variance.subpixvar16x16 = vp8_sub_pixel_variance16x16_ssse3;
- cpi->rtcd.quantize.fastquantb = vp8_fast_quantize_b_ssse3;
+ cpi->rtcd.quantize.fastquantb = fast_quantize_b_ssse3;
#if CONFIG_PSNR
#if ARCH_X86_64