summaryrefslogtreecommitdiff
path: root/vp8/encoder
diff options
context:
space:
mode:
authorJohn Koleszar <jkoleszar@google.com>2011-08-11 13:01:45 -0400
committerJohn Koleszar <jkoleszar@google.com>2011-08-11 13:01:45 -0400
commita16cd74ba15a43cb2e13284938c8b0c51bfbf3af (patch)
treeb49d9728ac04a8316420305b0538dac0d6db758b /vp8/encoder
parent62400028e22b3544eac09724afe45f3cd55766a1 (diff)
parent939f64f68efc837a98a5c80913a4bde50b79854f (diff)
downloadlibvpx-a16cd74ba15a43cb2e13284938c8b0c51bfbf3af.tar
libvpx-a16cd74ba15a43cb2e13284938c8b0c51bfbf3af.tar.gz
libvpx-a16cd74ba15a43cb2e13284938c8b0c51bfbf3af.tar.bz2
libvpx-a16cd74ba15a43cb2e13284938c8b0c51bfbf3af.zip
Merge remote branch 'internal/upstream-experimental' into HEAD
Conflicts: vp8/decoder/detokenize.c vp8/decoder/onyxd_if.c vp8/vp8_common.mk Change-Id: Ifca1108186a8bc715da86a44021ee2fa5550b5b8
Diffstat (limited to 'vp8/encoder')
-rw-r--r--vp8/encoder/encodeframe.c28
-rw-r--r--vp8/encoder/firstpass.c40
-rw-r--r--vp8/encoder/mcomp.c195
-rw-r--r--vp8/encoder/onyx_if.c31
-rw-r--r--vp8/encoder/onyx_int.h28
-rw-r--r--vp8/encoder/pickinter.c2
-rw-r--r--vp8/encoder/picklpf.c2
-rw-r--r--vp8/encoder/ratectrl.c6
-rw-r--r--vp8/encoder/rdopt.c12
-rw-r--r--vp8/encoder/ssim.c34
-rw-r--r--vp8/encoder/temporal_filter.c1
11 files changed, 185 insertions, 194 deletions
diff --git a/vp8/encoder/encodeframe.c b/vp8/encoder/encodeframe.c
index ba5409754..1c4a93636 100644
--- a/vp8/encoder/encodeframe.c
+++ b/vp8/encoder/encodeframe.c
@@ -336,7 +336,7 @@ static unsigned int mb_activity_measure( VP8_COMP *cpi, MACROBLOCK *x,
// Calculate an "average" mb activity value for the frame
#define ACT_MEDIAN 0
-static void calc_av_activity( VP8_COMP *cpi, INT64 activity_sum )
+static void calc_av_activity( VP8_COMP *cpi, int64_t activity_sum )
{
#if ACT_MEDIAN
// Find median: Simple n^2 algorithm for experimentation
@@ -404,9 +404,9 @@ static void calc_activity_index( VP8_COMP *cpi, MACROBLOCK *x )
VP8_COMMON *const cm = & cpi->common;
int mb_row, mb_col;
- INT64 act;
- INT64 a;
- INT64 b;
+ int64_t act;
+ int64_t a;
+ int64_t b;
#if OUTPUT_NORM_ACT_STATS
FILE *f = fopen("norm_act.stt", "a");
@@ -470,7 +470,7 @@ static void build_activity_map( VP8_COMP *cpi )
int mb_row, mb_col;
unsigned int mb_activity;
- INT64 activity_sum = 0;
+ int64_t activity_sum = 0;
// for each macroblock row in image
for (mb_row = 0; mb_row < cm->mb_rows; mb_row++)
@@ -537,15 +537,15 @@ void vp8_activity_masking(VP8_COMP *cpi, MACROBLOCK *x)
x->errorperbit = x->rdmult * 100 /(110 * x->rddiv);
x->errorperbit += (x->errorperbit==0);
#else
- INT64 a;
- INT64 b;
- INT64 act = *(x->mb_activity_ptr);
+ int64_t a;
+ int64_t b;
+ int64_t act = *(x->mb_activity_ptr);
// Apply the masking to the RD multiplier.
a = act + (2*cpi->activity_avg);
b = (2*act) + cpi->activity_avg;
- x->rdmult = (unsigned int)(((INT64)x->rdmult*b + (a>>1))/a);
+ x->rdmult = (unsigned int)(((int64_t)x->rdmult*b + (a>>1))/a);
x->errorperbit = x->rdmult * 100 /(110 * x->rddiv);
x->errorperbit += (x->errorperbit==0);
#endif
@@ -1444,18 +1444,18 @@ static void adjust_act_zbin( VP8_COMP *cpi, MACROBLOCK *x )
#if USE_ACT_INDEX
x->act_zbin_adj = *(x->mb_activity_ptr);
#else
- INT64 a;
- INT64 b;
- INT64 act = *(x->mb_activity_ptr);
+ int64_t a;
+ int64_t b;
+ int64_t act = *(x->mb_activity_ptr);
// Apply the masking to the RD multiplier.
a = act + 4*cpi->activity_avg;
b = 4*act + cpi->activity_avg;
if ( act > cpi->activity_avg )
- x->act_zbin_adj = (int)(((INT64)b + (a>>1))/a) - 1;
+ x->act_zbin_adj = (int)(((int64_t)b + (a>>1))/a) - 1;
else
- x->act_zbin_adj = 1 - (int)(((INT64)a + (b>>1))/b);
+ x->act_zbin_adj = 1 - (int)(((int64_t)a + (b>>1))/b);
#endif
}
diff --git a/vp8/encoder/firstpass.c b/vp8/encoder/firstpass.c
index 5422365f8..b063e45af 100644
--- a/vp8/encoder/firstpass.c
+++ b/vp8/encoder/firstpass.c
@@ -530,8 +530,8 @@ void vp8_first_pass(VP8_COMP *cpi)
YV12_BUFFER_CONFIG *gld_yv12 = &cm->yv12_fb[cm->gld_fb_idx];
int recon_y_stride = lst_yv12->y_stride;
int recon_uv_stride = lst_yv12->uv_stride;
- long long intra_error = 0;
- long long coded_error = 0;
+ int64_t intra_error = 0;
+ int64_t coded_error = 0;
int sum_mvr = 0, sum_mvc = 0;
int sum_mvr_abs = 0, sum_mvc_abs = 0;
@@ -620,7 +620,7 @@ void vp8_first_pass(VP8_COMP *cpi)
this_error += intrapenalty;
// Cumulative intra error total
- intra_error += (long long)this_error;
+ intra_error += (int64_t)this_error;
// Set up limit values for motion vectors to prevent them extending outside the UMV borders
x->mv_col_min = -((mb_col * 16) + (VP8BORDERINPIXELS - 16));
@@ -757,7 +757,7 @@ void vp8_first_pass(VP8_COMP *cpi)
}
}
- coded_error += (long long)this_error;
+ coded_error += (int64_t)this_error;
// adjust to the next column of macroblocks
x->src.y_buffer += 16;
@@ -1219,8 +1219,8 @@ void vp8_init_second_pass(VP8_COMP *cpi)
cpi->twopass.total_coded_error_left = cpi->twopass.total_stats->coded_error;
cpi->twopass.start_tot_err_left = cpi->twopass.total_error_left;
- //cpi->twopass.bits_left = (long long)(cpi->twopass.total_stats->count * cpi->oxcf.target_bandwidth / DOUBLE_DIVIDE_CHECK((double)cpi->oxcf.frame_rate));
- //cpi->twopass.bits_left -= (long long)(cpi->twopass.total_stats->count * two_pass_min_rate / DOUBLE_DIVIDE_CHECK((double)cpi->oxcf.frame_rate));
+ //cpi->twopass.bits_left = (int64_t)(cpi->twopass.total_stats->count * cpi->oxcf.target_bandwidth / DOUBLE_DIVIDE_CHECK((double)cpi->oxcf.frame_rate));
+ //cpi->twopass.bits_left -= (int64_t)(cpi->twopass.total_stats->count * two_pass_min_rate / DOUBLE_DIVIDE_CHECK((double)cpi->oxcf.frame_rate));
// each frame can have a different duration, as the frame rate in the source
// isn't guaranteed to be constant. The frame rate prior to the first frame
@@ -1230,8 +1230,8 @@ void vp8_init_second_pass(VP8_COMP *cpi)
vp8_new_frame_rate(cpi, 10000000.0 * cpi->twopass.total_stats->count / cpi->twopass.total_stats->duration);
cpi->output_frame_rate = cpi->oxcf.frame_rate;
- cpi->twopass.bits_left = (long long)(cpi->twopass.total_stats->duration * cpi->oxcf.target_bandwidth / 10000000.0) ;
- cpi->twopass.bits_left -= (long long)(cpi->twopass.total_stats->duration * two_pass_min_rate / 10000000.0);
+ cpi->twopass.bits_left = (int64_t)(cpi->twopass.total_stats->duration * cpi->oxcf.target_bandwidth / 10000000.0) ;
+ cpi->twopass.bits_left -= (int64_t)(cpi->twopass.total_stats->duration * two_pass_min_rate / 10000000.0);
cpi->twopass.clip_bits_total = cpi->twopass.bits_left;
// Calculate a minimum intra value to be used in determining the IIratio
@@ -2671,16 +2671,16 @@ static void find_next_key_frame(VP8_COMP *cpi, FIRSTPASS_STATS *this_frame)
int max_bits = frame_max_bits(cpi);
// Maximum bits for the kf group
- long long max_grp_bits;
+ int64_t max_grp_bits;
// Default allocation based on bits left and relative
// complexity of the section
- cpi->twopass.kf_group_bits = (long long)( cpi->twopass.bits_left *
+ cpi->twopass.kf_group_bits = (int64_t)( cpi->twopass.bits_left *
( kf_group_err /
cpi->twopass.modified_error_left ));
// Clip based on maximum per frame rate defined by the user.
- max_grp_bits = (long long)max_bits * (long long)cpi->twopass.frames_to_key;
+ max_grp_bits = (int64_t)max_bits * (int64_t)cpi->twopass.frames_to_key;
if (cpi->twopass.kf_group_bits > max_grp_bits)
cpi->twopass.kf_group_bits = max_grp_bits;
@@ -2697,19 +2697,19 @@ static void find_next_key_frame(VP8_COMP *cpi, FIRSTPASS_STATS *this_frame)
int high_water_mark = (opt_buffer_lvl +
cpi->oxcf.maximum_buffer_size) >> 1;
- long long av_group_bits;
+ int64_t av_group_bits;
// Av bits per frame * number of frames
- av_group_bits = (long long)cpi->av_per_frame_bandwidth *
- (long long)cpi->twopass.frames_to_key;
+ av_group_bits = (int64_t)cpi->av_per_frame_bandwidth *
+ (int64_t)cpi->twopass.frames_to_key;
// We are at or above the maximum.
if (cpi->buffer_level >= high_water_mark)
{
- long long min_group_bits;
+ int64_t min_group_bits;
min_group_bits = av_group_bits +
- (long long)(buffer_lvl -
+ (int64_t)(buffer_lvl -
high_water_mark);
if (cpi->twopass.kf_group_bits < min_group_bits)
@@ -2718,11 +2718,11 @@ static void find_next_key_frame(VP8_COMP *cpi, FIRSTPASS_STATS *this_frame)
// We are above optimal but below the maximum
else if (cpi->twopass.kf_group_bits < av_group_bits)
{
- long long bits_below_av = av_group_bits -
+ int64_t bits_below_av = av_group_bits -
cpi->twopass.kf_group_bits;
cpi->twopass.kf_group_bits +=
- (long long)((double)bits_below_av *
+ (int64_t)((double)bits_below_av *
(double)(buffer_lvl - opt_buffer_lvl) /
(double)(high_water_mark - opt_buffer_lvl));
}
@@ -3043,8 +3043,8 @@ static void find_next_key_frame(VP8_COMP *cpi, FIRSTPASS_STATS *this_frame)
}
else
{
- long long clip_bits = (long long)(cpi->twopass.total_stats->count * cpi->oxcf.target_bandwidth / DOUBLE_DIVIDE_CHECK((double)cpi->oxcf.frame_rate));
- long long over_spend = cpi->oxcf.starting_buffer_level - cpi->buffer_level;
+ int64_t clip_bits = (int64_t)(cpi->twopass.total_stats->count * cpi->oxcf.target_bandwidth / DOUBLE_DIVIDE_CHECK((double)cpi->oxcf.frame_rate));
+ int64_t over_spend = cpi->oxcf.starting_buffer_level - cpi->buffer_level;
if ((last_kf_resampled && (kf_q > cpi->worst_quality)) || // If triggered last time the threshold for triggering again is reduced
((kf_q > cpi->worst_quality) && // Projected Q higher than allowed and ...
diff --git a/vp8/encoder/mcomp.c b/vp8/encoder/mcomp.c
index 58b524f82..aebcfce70 100644
--- a/vp8/encoder/mcomp.c
+++ b/vp8/encoder/mcomp.c
@@ -15,6 +15,7 @@
#include <stdio.h>
#include <limits.h>
#include <math.h>
+#include "vp8/common/findnearmv.h"
#ifdef ENTROPY_STATS
static int mv_ref_ct [31] [4] [2];
@@ -342,12 +343,26 @@ int vp8_find_best_sub_pixel_step(MACROBLOCK *x, BLOCK *b, BLOCKD *d,
int bestmse = INT_MAX;
int_mv startmv;
int_mv this_mv;
- unsigned char *y = *(d->base_pre) + d->pre + (bestmv->as_mv.row) * d->pre_stride + bestmv->as_mv.col;
unsigned char *z = (*(b->base_src) + b->src);
int left, right, up, down, diag;
unsigned int sse;
int whichdir ;
int thismse;
+ int y_stride;
+
+#if ARCH_X86 || ARCH_X86_64
+ MACROBLOCKD *xd = &x->e_mbd;
+ unsigned char *y0 = *(d->base_pre) + d->pre + (bestmv->as_mv.row) * d->pre_stride + bestmv->as_mv.col;
+ unsigned char *y;
+
+ y_stride = 32;
+ /* Copy 18 rows x 32 cols area to intermediate buffer before searching. */
+ vfp->copymem(y0 - 1 - d->pre_stride, d->pre_stride, xd->y_buf, y_stride, 18);
+ y = xd->y_buf + y_stride + 1;
+#else
+ unsigned char *y = *(d->base_pre) + d->pre + (bestmv->as_mv.row) * d->pre_stride + bestmv->as_mv.col;
+ y_stride = d->pre_stride;
+#endif
// central mv
bestmv->as_mv.row <<= 3;
@@ -355,14 +370,14 @@ int vp8_find_best_sub_pixel_step(MACROBLOCK *x, BLOCK *b, BLOCKD *d,
startmv = *bestmv;
// calculate central point error
- bestmse = vfp->vf(y, d->pre_stride, z, b->src_stride, sse1);
+ bestmse = vfp->vf(y, y_stride, z, b->src_stride, sse1);
*distortion = bestmse;
bestmse += mv_err_cost(bestmv, ref_mv, mvcost, error_per_bit);
// go left then right and check error
this_mv.as_mv.row = startmv.as_mv.row;
this_mv.as_mv.col = ((startmv.as_mv.col - 8) | 4);
- thismse = vfp->svf_halfpix_h(y - 1, d->pre_stride, z, b->src_stride, &sse);
+ thismse = vfp->svf_halfpix_h(y - 1, y_stride, z, b->src_stride, &sse);
left = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
if (left < bestmse)
@@ -374,7 +389,7 @@ int vp8_find_best_sub_pixel_step(MACROBLOCK *x, BLOCK *b, BLOCKD *d,
}
this_mv.as_mv.col += 8;
- thismse = vfp->svf_halfpix_h(y, d->pre_stride, z, b->src_stride, &sse);
+ thismse = vfp->svf_halfpix_h(y, y_stride, z, b->src_stride, &sse);
right = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
if (right < bestmse)
@@ -388,7 +403,7 @@ int vp8_find_best_sub_pixel_step(MACROBLOCK *x, BLOCK *b, BLOCKD *d,
// go up then down and check error
this_mv.as_mv.col = startmv.as_mv.col;
this_mv.as_mv.row = ((startmv.as_mv.row - 8) | 4);
- thismse = vfp->svf_halfpix_v(y - d->pre_stride, d->pre_stride, z, b->src_stride, &sse);
+ thismse = vfp->svf_halfpix_v(y - y_stride, y_stride, z, b->src_stride, &sse);
up = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
if (up < bestmse)
@@ -400,7 +415,7 @@ int vp8_find_best_sub_pixel_step(MACROBLOCK *x, BLOCK *b, BLOCKD *d,
}
this_mv.as_mv.row += 8;
- thismse = vfp->svf_halfpix_v(y, d->pre_stride, z, b->src_stride, &sse);
+ thismse = vfp->svf_halfpix_v(y, y_stride, z, b->src_stride, &sse);
down = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
if (down < bestmse)
@@ -423,23 +438,23 @@ int vp8_find_best_sub_pixel_step(MACROBLOCK *x, BLOCK *b, BLOCKD *d,
case 0:
this_mv.as_mv.col = (this_mv.as_mv.col - 8) | 4;
this_mv.as_mv.row = (this_mv.as_mv.row - 8) | 4;
- thismse = vfp->svf_halfpix_hv(y - 1 - d->pre_stride, d->pre_stride, z, b->src_stride, &sse);
+ thismse = vfp->svf_halfpix_hv(y - 1 - y_stride, y_stride, z, b->src_stride, &sse);
break;
case 1:
this_mv.as_mv.col += 4;
this_mv.as_mv.row = (this_mv.as_mv.row - 8) | 4;
- thismse = vfp->svf_halfpix_hv(y - d->pre_stride, d->pre_stride, z, b->src_stride, &sse);
+ thismse = vfp->svf_halfpix_hv(y - y_stride, y_stride, z, b->src_stride, &sse);
break;
case 2:
this_mv.as_mv.col = (this_mv.as_mv.col - 8) | 4;
this_mv.as_mv.row += 4;
- thismse = vfp->svf_halfpix_hv(y - 1, d->pre_stride, z, b->src_stride, &sse);
+ thismse = vfp->svf_halfpix_hv(y - 1, y_stride, z, b->src_stride, &sse);
break;
case 3:
default:
this_mv.as_mv.col += 4;
this_mv.as_mv.row += 4;
- thismse = vfp->svf_halfpix_hv(y, d->pre_stride, z, b->src_stride, &sse);
+ thismse = vfp->svf_halfpix_hv(y, y_stride, z, b->src_stride, &sse);
break;
}
@@ -458,7 +473,7 @@ int vp8_find_best_sub_pixel_step(MACROBLOCK *x, BLOCK *b, BLOCKD *d,
// time to check quarter pels.
if (bestmv->as_mv.row < startmv.as_mv.row)
- y -= d->pre_stride;
+ y -= y_stride;
if (bestmv->as_mv.col < startmv.as_mv.col)
y--;
@@ -473,12 +488,12 @@ int vp8_find_best_sub_pixel_step(MACROBLOCK *x, BLOCK *b, BLOCKD *d,
if (startmv.as_mv.col & 7)
{
this_mv.as_mv.col = startmv.as_mv.col - 2;
- thismse = vfp->svf(y, d->pre_stride, this_mv.as_mv.col & 7, this_mv.as_mv.row & 7, z, b->src_stride, &sse);
+ thismse = vfp->svf(y, y_stride, this_mv.as_mv.col & 7, this_mv.as_mv.row & 7, z, b->src_stride, &sse);
}
else
{
this_mv.as_mv.col = (startmv.as_mv.col - 8) | 6;
- thismse = vfp->svf(y - 1, d->pre_stride, 6, this_mv.as_mv.row & 7, z, b->src_stride, &sse);
+ thismse = vfp->svf(y - 1, y_stride, 6, this_mv.as_mv.row & 7, z, b->src_stride, &sse);
}
left = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
@@ -492,7 +507,7 @@ int vp8_find_best_sub_pixel_step(MACROBLOCK *x, BLOCK *b, BLOCKD *d,
}
this_mv.as_mv.col += 4;
- thismse = vfp->svf(y, d->pre_stride, this_mv.as_mv.col & 7, this_mv.as_mv.row & 7, z, b->src_stride, &sse);
+ thismse = vfp->svf(y, y_stride, this_mv.as_mv.col & 7, this_mv.as_mv.row & 7, z, b->src_stride, &sse);
right = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
if (right < bestmse)
@@ -509,12 +524,12 @@ int vp8_find_best_sub_pixel_step(MACROBLOCK *x, BLOCK *b, BLOCKD *d,
if (startmv.as_mv.row & 7)
{
this_mv.as_mv.row = startmv.as_mv.row - 2;
- thismse = vfp->svf(y, d->pre_stride, this_mv.as_mv.col & 7, this_mv.as_mv.row & 7, z, b->src_stride, &sse);
+ thismse = vfp->svf(y, y_stride, this_mv.as_mv.col & 7, this_mv.as_mv.row & 7, z, b->src_stride, &sse);
}
else
{
this_mv.as_mv.row = (startmv.as_mv.row - 8) | 6;
- thismse = vfp->svf(y - d->pre_stride, d->pre_stride, this_mv.as_mv.col & 7, 6, z, b->src_stride, &sse);
+ thismse = vfp->svf(y - y_stride, y_stride, this_mv.as_mv.col & 7, 6, z, b->src_stride, &sse);
}
up = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
@@ -528,7 +543,7 @@ int vp8_find_best_sub_pixel_step(MACROBLOCK *x, BLOCK *b, BLOCKD *d,
}
this_mv.as_mv.row += 4;
- thismse = vfp->svf(y, d->pre_stride, this_mv.as_mv.col & 7, this_mv.as_mv.row & 7, z, b->src_stride, &sse);
+ thismse = vfp->svf(y, y_stride, this_mv.as_mv.col & 7, this_mv.as_mv.row & 7, z, b->src_stride, &sse);
down = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
if (down < bestmse)
@@ -558,12 +573,12 @@ int vp8_find_best_sub_pixel_step(MACROBLOCK *x, BLOCK *b, BLOCKD *d,
if (startmv.as_mv.col & 7)
{
this_mv.as_mv.col -= 2;
- thismse = vfp->svf(y, d->pre_stride, this_mv.as_mv.col & 7, this_mv.as_mv.row & 7, z, b->src_stride, &sse);
+ thismse = vfp->svf(y, y_stride, this_mv.as_mv.col & 7, this_mv.as_mv.row & 7, z, b->src_stride, &sse);
}
else
{
this_mv.as_mv.col = (startmv.as_mv.col - 8) | 6;
- thismse = vfp->svf(y - 1, d->pre_stride, 6, this_mv.as_mv.row & 7, z, b->src_stride, &sse);;
+ thismse = vfp->svf(y - 1, y_stride, 6, this_mv.as_mv.row & 7, z, b->src_stride, &sse);;
}
}
else
@@ -573,12 +588,12 @@ int vp8_find_best_sub_pixel_step(MACROBLOCK *x, BLOCK *b, BLOCKD *d,
if (startmv.as_mv.col & 7)
{
this_mv.as_mv.col -= 2;
- thismse = vfp->svf(y - d->pre_stride, d->pre_stride, this_mv.as_mv.col & 7, 6, z, b->src_stride, &sse);
+ thismse = vfp->svf(y - y_stride, y_stride, this_mv.as_mv.col & 7, 6, z, b->src_stride, &sse);
}
else
{
this_mv.as_mv.col = (startmv.as_mv.col - 8) | 6;
- thismse = vfp->svf(y - d->pre_stride - 1, d->pre_stride, 6, 6, z, b->src_stride, &sse);
+ thismse = vfp->svf(y - y_stride - 1, y_stride, 6, 6, z, b->src_stride, &sse);
}
}
@@ -589,12 +604,12 @@ int vp8_find_best_sub_pixel_step(MACROBLOCK *x, BLOCK *b, BLOCKD *d,
if (startmv.as_mv.row & 7)
{
this_mv.as_mv.row -= 2;
- thismse = vfp->svf(y, d->pre_stride, this_mv.as_mv.col & 7, this_mv.as_mv.row & 7, z, b->src_stride, &sse);
+ thismse = vfp->svf(y, y_stride, this_mv.as_mv.col & 7, this_mv.as_mv.row & 7, z, b->src_stride, &sse);
}
else
{
this_mv.as_mv.row = (startmv.as_mv.row - 8) | 6;
- thismse = vfp->svf(y - d->pre_stride, d->pre_stride, this_mv.as_mv.col & 7, 6, z, b->src_stride, &sse);
+ thismse = vfp->svf(y - y_stride, y_stride, this_mv.as_mv.col & 7, 6, z, b->src_stride, &sse);
}
break;
@@ -604,19 +619,19 @@ int vp8_find_best_sub_pixel_step(MACROBLOCK *x, BLOCK *b, BLOCKD *d,
if (startmv.as_mv.col & 7)
{
this_mv.as_mv.col -= 2;
- thismse = vfp->svf(y, d->pre_stride, this_mv.as_mv.col & 7, this_mv.as_mv.row & 7, z, b->src_stride, &sse);
+ thismse = vfp->svf(y, y_stride, this_mv.as_mv.col & 7, this_mv.as_mv.row & 7, z, b->src_stride, &sse);
}
else
{
this_mv.as_mv.col = (startmv.as_mv.col - 8) | 6;
- thismse = vfp->svf(y - 1, d->pre_stride, 6, this_mv.as_mv.row & 7, z, b->src_stride, &sse);
+ thismse = vfp->svf(y - 1, y_stride, 6, this_mv.as_mv.row & 7, z, b->src_stride, &sse);
}
break;
case 3:
this_mv.as_mv.col += 2;
this_mv.as_mv.row += 2;
- thismse = vfp->svf(y, d->pre_stride, this_mv.as_mv.col & 7, this_mv.as_mv.row & 7, z, b->src_stride, &sse);
+ thismse = vfp->svf(y, y_stride, this_mv.as_mv.col & 7, this_mv.as_mv.row & 7, z, b->src_stride, &sse);
break;
}
@@ -633,7 +648,7 @@ int vp8_find_best_sub_pixel_step(MACROBLOCK *x, BLOCK *b, BLOCKD *d,
return bestmse;
}
-int vp8_find_best_half_pixel_step(MACROBLOCK *mb, BLOCK *b, BLOCKD *d,
+int vp8_find_best_half_pixel_step(MACROBLOCK *x, BLOCK *b, BLOCKD *d,
int_mv *bestmv, int_mv *ref_mv,
int error_per_bit,
const vp8_variance_fn_ptr_t *vfp,
@@ -643,11 +658,26 @@ int vp8_find_best_half_pixel_step(MACROBLOCK *mb, BLOCK *b, BLOCKD *d,
int bestmse = INT_MAX;
int_mv startmv;
int_mv this_mv;
- unsigned char *y = *(d->base_pre) + d->pre + (bestmv->as_mv.row) * d->pre_stride + bestmv->as_mv.col;
unsigned char *z = (*(b->base_src) + b->src);
int left, right, up, down, diag;
unsigned int sse;
+ int whichdir ;
int thismse;
+ int y_stride;
+
+#if ARCH_X86 || ARCH_X86_64
+ MACROBLOCKD *xd = &x->e_mbd;
+ unsigned char *y0 = *(d->base_pre) + d->pre + (bestmv->as_mv.row) * d->pre_stride + bestmv->as_mv.col;
+ unsigned char *y;
+
+ y_stride = 32;
+ /* Copy 18 rows x 32 cols area to intermediate buffer before searching. */
+ vfp->copymem(y0 - 1 - d->pre_stride, d->pre_stride, xd->y_buf, y_stride, 18);
+ y = xd->y_buf + y_stride + 1;
+#else
+ unsigned char *y = *(d->base_pre) + d->pre + (bestmv->as_mv.row) * d->pre_stride + bestmv->as_mv.col;
+ y_stride = d->pre_stride;
+#endif
// central mv
bestmv->as_mv.row <<= 3;
@@ -655,14 +685,14 @@ int vp8_find_best_half_pixel_step(MACROBLOCK *mb, BLOCK *b, BLOCKD *d,
startmv = *bestmv;
// calculate central point error
- bestmse = vfp->vf(y, d->pre_stride, z, b->src_stride, sse1);
+ bestmse = vfp->vf(y, y_stride, z, b->src_stride, sse1);
*distortion = bestmse;
bestmse += mv_err_cost(bestmv, ref_mv, mvcost, error_per_bit);
// go left then right and check error
this_mv.as_mv.row = startmv.as_mv.row;
this_mv.as_mv.col = ((startmv.as_mv.col - 8) | 4);
- thismse = vfp->svf_halfpix_h(y - 1, d->pre_stride, z, b->src_stride, &sse);
+ thismse = vfp->svf_halfpix_h(y - 1, y_stride, z, b->src_stride, &sse);
left = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
if (left < bestmse)
@@ -674,7 +704,7 @@ int vp8_find_best_half_pixel_step(MACROBLOCK *mb, BLOCK *b, BLOCKD *d,
}
this_mv.as_mv.col += 8;
- thismse = vfp->svf_halfpix_h(y, d->pre_stride, z, b->src_stride, &sse);
+ thismse = vfp->svf_halfpix_h(y, y_stride, z, b->src_stride, &sse);
right = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
if (right < bestmse)
@@ -688,7 +718,7 @@ int vp8_find_best_half_pixel_step(MACROBLOCK *mb, BLOCK *b, BLOCKD *d,
// go up then down and check error
this_mv.as_mv.col = startmv.as_mv.col;
this_mv.as_mv.row = ((startmv.as_mv.row - 8) | 4);
- thismse = vfp->svf_halfpix_v(y - d->pre_stride, d->pre_stride, z, b->src_stride, &sse);
+ thismse = vfp->svf_halfpix_v(y - y_stride, y_stride, z, b->src_stride, &sse);
up = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
if (up < bestmse)
@@ -700,7 +730,7 @@ int vp8_find_best_half_pixel_step(MACROBLOCK *mb, BLOCK *b, BLOCKD *d,
}
this_mv.as_mv.row += 8;
- thismse = vfp->svf_halfpix_v(y, d->pre_stride, z, b->src_stride, &sse);
+ thismse = vfp->svf_halfpix_v(y, y_stride, z, b->src_stride, &sse);
down = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
if (down < bestmse)
@@ -711,8 +741,6 @@ int vp8_find_best_half_pixel_step(MACROBLOCK *mb, BLOCK *b, BLOCKD *d,
*sse1 = sse;
}
- // somewhat strangely not doing all the diagonals for half pel is slower than doing them.
-#if 0
// now check 1 more diagonal -
whichdir = (left < right ? 0 : 1) + (up < down ? 0 : 2);
this_mv = startmv;
@@ -720,76 +748,28 @@ int vp8_find_best_half_pixel_step(MACROBLOCK *mb, BLOCK *b, BLOCKD *d,
switch (whichdir)
{
case 0:
- this_mv.col = (this_mv.col - 8) | 4;
- this_mv.row = (this_mv.row - 8) | 4;
- diag = vfp->svf(y - 1 - d->pre_stride, d->pre_stride, 4, 4, z, b->src_stride, &sse);
+ this_mv.as_mv.col = (this_mv.as_mv.col - 8) | 4;
+ this_mv.as_mv.row = (this_mv.as_mv.row - 8) | 4;
+ thismse = vfp->svf_halfpix_hv(y - 1 - y_stride, y_stride, z, b->src_stride, &sse);
break;
case 1:
- this_mv.col += 4;
- this_mv.row = (this_mv.row - 8) | 4;
- diag = vfp->svf(y - d->pre_stride, d->pre_stride, 4, 4, z, b->src_stride, &sse);
+ this_mv.as_mv.col += 4;
+ this_mv.as_mv.row = (this_mv.as_mv.row - 8) | 4;
+ thismse = vfp->svf_halfpix_hv(y - y_stride, y_stride, z, b->src_stride, &sse);
break;
case 2:
- this_mv.col = (this_mv.col - 8) | 4;
- this_mv.row += 4;
- diag = vfp->svf(y - 1, d->pre_stride, 4, 4, z, b->src_stride, &sse);
+ this_mv.as_mv.col = (this_mv.as_mv.col - 8) | 4;
+ this_mv.as_mv.row += 4;
+ thismse = vfp->svf_halfpix_hv(y - 1, y_stride, z, b->src_stride, &sse);
break;
case 3:
- this_mv.col += 4;
- this_mv.row += 4;
- diag = vfp->svf(y, d->pre_stride, 4, 4, z, b->src_stride, &sse);
+ default:
+ this_mv.as_mv.col += 4;
+ this_mv.as_mv.row += 4;
+ thismse = vfp->svf_halfpix_hv(y, y_stride, z, b->src_stride, &sse);
break;
}
- diag += mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
-
- if (diag < bestmse)
- {
- *bestmv = this_mv;
- bestmse = diag;
- }
-
-#else
- this_mv.as_mv.col = (this_mv.as_mv.col - 8) | 4;
- this_mv.as_mv.row = (this_mv.as_mv.row - 8) | 4;
- thismse = vfp->svf_halfpix_hv(y - 1 - d->pre_stride, d->pre_stride, z, b->src_stride, &sse);
- diag = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
-
- if (diag < bestmse)
- {
- *bestmv = this_mv;
- bestmse = diag;
- *distortion = thismse;
- *sse1 = sse;
- }
-
- this_mv.as_mv.col += 8;
- thismse = vfp->svf_halfpix_hv(y - d->pre_stride, d->pre_stride, z, b->src_stride, &sse);
- diag = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
-
- if (diag < bestmse)
- {
- *bestmv = this_mv;
- bestmse = diag;
- *distortion = thismse;
- *sse1 = sse;
- }
-
- this_mv.as_mv.col = (this_mv.as_mv.col - 8) | 4;
- this_mv.as_mv.row = startmv.as_mv.row + 4;
- thismse = vfp->svf_halfpix_hv(y - 1, d->pre_stride, z, b->src_stride, &sse);
- diag = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
-
- if (diag < bestmse)
- {
- *bestmv = this_mv;
- bestmse = diag;
- *distortion = thismse;
- *sse1 = sse;
- }
-
- this_mv.as_mv.col += 8;
- thismse = vfp->svf_halfpix_hv(y, d->pre_stride, z, b->src_stride, &sse);
diag = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
if (diag < bestmse)
@@ -800,7 +780,6 @@ int vp8_find_best_half_pixel_step(MACROBLOCK *mb, BLOCK *b, BLOCKD *d,
*sse1 = sse;
}
-#endif
return bestmse;
}
@@ -866,7 +845,7 @@ int vp8_hex_search
unsigned char *what = (*(b->base_src) + b->src);
int what_stride = b->src_stride;
int in_what_stride = d->pre_stride;
- int br = ref_mv->as_mv.row, bc = ref_mv->as_mv.col;
+ int br, bc;
int_mv this_mv;
unsigned int bestsad = 0x7fffffff;
unsigned int thissad;
@@ -880,6 +859,11 @@ int vp8_hex_search
fcenter_mv.as_mv.row = center_mv->as_mv.row >> 3;
fcenter_mv.as_mv.col = center_mv->as_mv.col >> 3;
+ // adjust ref_mv to make sure it is within MV range
+ vp8_clamp_mv(ref_mv, x->mv_col_min, x->mv_col_max, x->mv_row_min, x->mv_row_max);
+ br = ref_mv->as_mv.row;
+ bc = ref_mv->as_mv.col;
+
// Work out the start point for the search
base_offset = (unsigned char *)(*(d->base_pre) + d->pre);
this_offset = base_offset + (br * (d->pre_stride)) + bc;
@@ -1043,8 +1027,8 @@ int vp8_diamond_search_sad
int best_site = 0;
int last_site = 0;
- int ref_row = ref_mv->as_mv.row;
- int ref_col = ref_mv->as_mv.col;
+ int ref_row;
+ int ref_col;
int this_row_offset;
int this_col_offset;
search_site *ss;
@@ -1057,8 +1041,10 @@ int vp8_diamond_search_sad
fcenter_mv.as_mv.row = center_mv->as_mv.row >> 3;
fcenter_mv.as_mv.col = center_mv->as_mv.col >> 3;
+ vp8_clamp_mv(ref_mv, x->mv_col_min, x->mv_col_max, x->mv_row_min, x->mv_row_max);
+ ref_row = ref_mv->as_mv.row;
+ ref_col = ref_mv->as_mv.col;
*num00 = 0;
-
best_mv->as_mv.row = ref_row;
best_mv->as_mv.col = ref_col;
@@ -1162,8 +1148,8 @@ int vp8_diamond_search_sadx4
int best_site = 0;
int last_site = 0;
- int ref_row = ref_mv->as_mv.row;
- int ref_col = ref_mv->as_mv.col;
+ int ref_row;
+ int ref_col;
int this_row_offset;
int this_col_offset;
search_site *ss;
@@ -1176,6 +1162,9 @@ int vp8_diamond_search_sadx4
fcenter_mv.as_mv.row = center_mv->as_mv.row >> 3;
fcenter_mv.as_mv.col = center_mv->as_mv.col >> 3;
+ vp8_clamp_mv(ref_mv, x->mv_col_min, x->mv_col_max, x->mv_row_min, x->mv_row_max);
+ ref_row = ref_mv->as_mv.row;
+ ref_col = ref_mv->as_mv.col;
*num00 = 0;
best_mv->as_mv.row = ref_row;
best_mv->as_mv.col = ref_col;
diff --git a/vp8/encoder/onyx_if.c b/vp8/encoder/onyx_if.c
index b9537d1ba..f862fd0e4 100644
--- a/vp8/encoder/onyx_if.c
+++ b/vp8/encoder/onyx_if.c
@@ -9,6 +9,7 @@
*/
+#include "vpx_config.h"
#include "vp8/common/onyxc_int.h"
#include "onyx_int.h"
#include "vp8/common/systemdependent.h"
@@ -24,7 +25,9 @@
#include "segmentation.h"
#include "vp8/common/g_common.h"
#include "vpx_scale/yv12extend.h"
+#if CONFIG_POSTPROC
#include "vp8/common/postproc.h"
+#endif
#include "vpx_mem/vpx_mem.h"
#include "vp8/common/swapyv12buffer.h"
#include "vp8/common/threading.h"
@@ -2729,6 +2732,8 @@ static void scale_and_extend_source(YV12_BUFFER_CONFIG *sd, VP8_COMP *cpi)
cpi->Source = &cpi->scaled_source;
#endif
}
+ else
+ cpi->Source = sd;
}
@@ -3340,7 +3345,7 @@ void loopfilter_frame(VP8_COMP *cpi, VP8_COMMON *cm)
static void update_buffer_level(VP8_COMP *cpi)
{
- long long tmp;
+ int64_t tmp;
/* Update the buffered average bitrate.
*
@@ -3410,7 +3415,7 @@ static void update_buffer_level(VP8_COMP *cpi)
*/
if(cpi->total_actual_bits > cpi->oxcf.starting_buffer_level)
{
- tmp = (long long)cpi->buffered_av_per_frame_bandwidth
+ tmp = (int64_t)cpi->buffered_av_per_frame_bandwidth
* cpi->oxcf.maximum_buffer_size
/ cpi->av_per_frame_bandwidth;
cpi->buffer_level = cpi->oxcf.maximum_buffer_size
@@ -3428,7 +3433,7 @@ static void update_buffer_level(VP8_COMP *cpi)
*/
if(cpi->total_actual_bits > cpi->oxcf.starting_buffer_level)
{
- long long decayed_overshoot;
+ int64_t decayed_overshoot;
decayed_overshoot = cpi->accumulated_overshoot;
decayed_overshoot *= (cpi->oxcf.maximum_buffer_size
@@ -4796,22 +4801,22 @@ static void Pass2Encode(VP8_COMP *cpi, unsigned long *size, unsigned char *dest,
{
double two_pass_min_rate = (double)(cpi->oxcf.target_bandwidth
*cpi->oxcf.two_pass_vbrmin_section / 100);
- cpi->twopass.bits_left += (long long)(two_pass_min_rate / cpi->oxcf.frame_rate);
+ cpi->twopass.bits_left += (int64_t)(two_pass_min_rate / cpi->oxcf.frame_rate);
}
}
#endif
//For ARM NEON, d8-d15 are callee-saved registers, and need to be saved by us.
#if HAVE_ARMV7
-extern void vp8_push_neon(INT64 *store);
-extern void vp8_pop_neon(INT64 *store);
+extern void vp8_push_neon(int64_t *store);
+extern void vp8_pop_neon(int64_t *store);
#endif
-int vp8_receive_raw_frame(VP8_PTR ptr, unsigned int frame_flags, YV12_BUFFER_CONFIG *sd, INT64 time_stamp, INT64 end_time)
+int vp8_receive_raw_frame(VP8_PTR ptr, unsigned int frame_flags, YV12_BUFFER_CONFIG *sd, int64_t time_stamp, int64_t end_time)
{
#if HAVE_ARMV7
- INT64 store_reg[8];
+ int64_t store_reg[8];
#endif
VP8_COMP *cpi = (VP8_COMP *) ptr;
VP8_COMMON *cm = &cpi->common;
@@ -4862,10 +4867,10 @@ static int frame_is_reference(const VP8_COMP *cpi)
}
-int vp8_get_compressed_data(VP8_PTR ptr, unsigned int *frame_flags, unsigned long *size, unsigned char *dest, INT64 *time_stamp, INT64 *time_end, int flush)
+int vp8_get_compressed_data(VP8_PTR ptr, unsigned int *frame_flags, unsigned long *size, unsigned char *dest, int64_t *time_stamp, int64_t *time_end, int flush)
{
#if HAVE_ARMV7
- INT64 store_reg[8];
+ int64_t store_reg[8];
#endif
VP8_COMP *cpi = (VP8_COMP *) ptr;
VP8_COMMON *cm = &cpi->common;
@@ -4972,7 +4977,7 @@ int vp8_get_compressed_data(VP8_PTR ptr, unsigned int *frame_flags, unsigned lon
// adjust frame rates based on timestamps given
if (!cm->refresh_alt_ref_frame)
{
- long long this_duration;
+ int64_t this_duration;
int step = 0;
if (cpi->source->ts_start == cpi->first_time_stamp_ever)
@@ -4982,7 +4987,7 @@ int vp8_get_compressed_data(VP8_PTR ptr, unsigned int *frame_flags, unsigned lon
}
else
{
- long long last_duration;
+ int64_t last_duration;
this_duration = cpi->source->ts_end - cpi->last_end_time_stamp_seen;
last_duration = cpi->last_end_time_stamp_seen
@@ -5158,7 +5163,7 @@ int vp8_get_compressed_data(VP8_PTR ptr, unsigned int *frame_flags, unsigned lon
int y_samples = orig->y_height * orig->y_width ;
int uv_samples = orig->uv_height * orig->uv_width ;
int t_samples = y_samples + 2 * uv_samples;
- long long sq_error;
+ int64_t sq_error;
ye = calc_plane_error(orig->y_buffer, orig->y_stride,
recon->y_buffer, recon->y_stride, orig->y_width, orig->y_height,
diff --git a/vp8/encoder/onyx_int.h b/vp8/encoder/onyx_int.h
index a27822da6..460da21d4 100644
--- a/vp8/encoder/onyx_int.h
+++ b/vp8/encoder/onyx_int.h
@@ -321,10 +321,10 @@ typedef struct VP8_COMP
CODING_CONTEXT coding_context;
// Rate targetting variables
- long long prediction_error;
- long long last_prediction_error;
- long long intra_error;
- long long last_intra_error;
+ int64_t prediction_error;
+ int64_t last_prediction_error;
+ int64_t intra_error;
+ int64_t last_intra_error;
int this_frame_target;
int projected_frame_size;
@@ -347,7 +347,7 @@ typedef struct VP8_COMP
int baseline_gf_interval;
int active_arnr_frames; // <= cpi->oxcf.arnr_max_frames
- INT64 key_frame_count;
+ int64_t key_frame_count;
int prior_key_frame_distance[KEY_FRAME_CONTEXT];
int per_frame_bandwidth; // Current section per frame bandwidth target
int av_per_frame_bandwidth; // Average frame size target for clip
@@ -358,9 +358,9 @@ typedef struct VP8_COMP
int inter_frame_target;
double output_frame_rate;
- long long last_time_stamp_seen;
- long long last_end_time_stamp_seen;
- long long first_time_stamp_ever;
+ int64_t last_time_stamp_seen;
+ int64_t last_end_time_stamp_seen;
+ int64_t first_time_stamp_ever;
int ni_av_qi;
int ni_tot_qi;
@@ -371,7 +371,7 @@ typedef struct VP8_COMP
int zbin_mode_boost;
int zbin_mode_boost_enabled;
- INT64 total_byte_count;
+ int64_t total_byte_count;
int buffered_mode;
@@ -384,7 +384,7 @@ typedef struct VP8_COMP
int long_rolling_target_bits;
int long_rolling_actual_bits;
- long long total_actual_bits;
+ int64_t total_actual_bits;
int total_target_vs_actual; // debug stats
int worst_quality;
@@ -541,8 +541,8 @@ typedef struct VP8_COMP
FIRSTPASS_STATS *this_frame_stats;
FIRSTPASS_STATS *stats_in, *stats_in_end, *stats_in_start;
int first_pass_done;
- long long bits_left;
- long long clip_bits_total;
+ int64_t bits_left;
+ int64_t clip_bits_total;
double avg_iiratio;
double modified_error_total;
double modified_error_used;
@@ -562,10 +562,10 @@ typedef struct VP8_COMP
int gf_group_error_left; // Remaining error from uncoded frames in a gf group. Two pass use only
// Projected total bits available for a key frame group of frames
- long long kf_group_bits;
+ int64_t kf_group_bits;
// Error score of frames still to be coded in kf group
- long long kf_group_error_left;
+ int64_t kf_group_error_left;
int gf_group_bits; // Projected Bits available for a group of frames including 1 GF or ARF
int gf_bits; // Bits for the golden frame or ARF - 2 pass only
diff --git a/vp8/encoder/pickinter.c b/vp8/encoder/pickinter.c
index 725e44e62..9cdaf7d53 100644
--- a/vp8/encoder/pickinter.c
+++ b/vp8/encoder/pickinter.c
@@ -669,8 +669,6 @@ void vp8_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset,
mvp_full.as_mv.col = mvp.as_mv.col>>3;
mvp_full.as_mv.row = mvp.as_mv.row>>3;
- /* adjust mvp to make sure it is within MV range */
- vp8_clamp_mv(&mvp_full, col_min, col_max, row_min, row_max);
}else
{
mvp.as_int = best_ref_mv.as_int;
diff --git a/vp8/encoder/picklpf.c b/vp8/encoder/picklpf.c
index 49de62d7d..beefe8d8e 100644
--- a/vp8/encoder/picklpf.c
+++ b/vp8/encoder/picklpf.c
@@ -161,7 +161,7 @@ void vp8cx_pick_filter_level_fast(YV12_BUFFER_CONFIG *sd, VP8_COMP *cpi)
if (cm->sharpness_level != cm->last_sharpness_level)
{
vp8_loop_filter_update_sharpness(&cm->lf_info, cm->sharpness_level);
- cm->last_sharpness_level = cm->last_sharpness_level;
+ cm->last_sharpness_level = cm->sharpness_level;
}
// Start the search at the previous frame filter level unless it is now out of range.
diff --git a/vp8/encoder/ratectrl.c b/vp8/encoder/ratectrl.c
index dea690b13..69066e5ab 100644
--- a/vp8/encoder/ratectrl.c
+++ b/vp8/encoder/ratectrl.c
@@ -897,9 +897,9 @@ static void calc_pframe_target_size(VP8_COMP *cpi)
if ( critical_buffer_level >
(cpi->oxcf.optimal_buffer_level >> 2) )
{
- INT64 qadjustment_range =
+ int64_t qadjustment_range =
cpi->worst_quality - cpi->ni_av_qi;
- INT64 above_base =
+ int64_t above_base =
(critical_buffer_level -
(cpi->oxcf.optimal_buffer_level >> 2));
@@ -1163,7 +1163,7 @@ static void calc_pframe_target_size(VP8_COMP *cpi)
if (cpi->pass==0
&& cpi->common.refresh_golden_frame
&& cpi->oxcf.end_usage == USAGE_STREAM_FROM_SERVER) {
- long long adjust;
+ int64_t adjust;
/*
frames_in_buffer = cpi->oxcf.maximum_buffer_size
diff --git a/vp8/encoder/rdopt.c b/vp8/encoder/rdopt.c
index 3d77bb35a..952977094 100644
--- a/vp8/encoder/rdopt.c
+++ b/vp8/encoder/rdopt.c
@@ -728,7 +728,7 @@ static int rd_pick_intra4x4mby_modes(VP8_COMP *cpi, MACROBLOCK *mb, int *Rate,
int cost = mb->mbmode_cost [xd->frame_type] [B_PRED];
int distortion = 0;
int tot_rate_y = 0;
- long long total_rd = 0;
+ int64_t total_rd = 0;
ENTROPY_CONTEXT_PLANES t_above, t_left;
ENTROPY_CONTEXT *ta;
ENTROPY_CONTEXT *tl;
@@ -770,11 +770,11 @@ static int rd_pick_intra4x4mby_modes(VP8_COMP *cpi, MACROBLOCK *mb, int *Rate,
mic->bmi[i].as_mode = best_mode;
- if(total_rd >= (long long)best_rd)
+ if(total_rd >= (int64_t)best_rd)
break;
}
- if(total_rd >= (long long)best_rd)
+ if(total_rd >= (int64_t)best_rd)
return INT_MAX;
*Rate = cost;
@@ -1244,6 +1244,9 @@ static void rd_check_segment(VP8_COMP *cpi, MACROBLOCK *x,
// Should we do a full search (best quality only)
if ((cpi->compressor_speed == 0) && (bestsme >> sseshift) > 4000)
{
+ /* Check if mvp_full is within the range. */
+ vp8_clamp_mv(&mvp_full, x->mv_col_min, x->mv_col_max, x->mv_row_min, x->mv_row_max);
+
thissme = cpi->full_search_sad(x, c, e, &mvp_full,
sadpb, 16, v_fn_ptr,
x->mvcost, bsi->ref_mv);
@@ -2079,9 +2082,6 @@ void vp8_rd_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset, int
mvp_full.as_mv.col = mvp.as_mv.col>>3;
mvp_full.as_mv.row = mvp.as_mv.row>>3;
- /* adjust mvp to make sure it is within MV range */
- vp8_clamp_mv(&mvp_full, col_min, col_max, row_min, row_max);
-
// Get intersection of UMV window and valid MV window to reduce # of checks in diamond search.
if (x->mv_col_min < col_min )
x->mv_col_min = col_min;
diff --git a/vp8/encoder/ssim.c b/vp8/encoder/ssim.c
index b8701e813..fea756f7b 100644
--- a/vp8/encoder/ssim.c
+++ b/vp8/encoder/ssim.c
@@ -73,8 +73,8 @@ void ssim_parms_8x8_c
}
}
-const static long long cc1 = 26634; // (64^2*(.01*255)^2
-const static long long cc2 = 239708; // (64^2*(.03*255)^2
+const static int64_t cc1 = 26634; // (64^2*(.01*255)^2
+const static int64_t cc2 = 239708; // (64^2*(.03*255)^2
static double similarity
(
@@ -86,19 +86,19 @@ static double similarity
int count
)
{
- long long ssim_n, ssim_d;
- long long c1, c2;
+ int64_t ssim_n, ssim_d;
+ int64_t c1, c2;
//scale the constants by number of pixels
c1 = (cc1*count*count)>>12;
c2 = (cc2*count*count)>>12;
- ssim_n = (2*sum_s*sum_r+ c1)*((long long) 2*count*sum_sxr-
- (long long) 2*sum_s*sum_r+c2);
+ ssim_n = (2*sum_s*sum_r+ c1)*((int64_t) 2*count*sum_sxr-
+ (int64_t) 2*sum_s*sum_r+c2);
ssim_d = (sum_s*sum_s +sum_r*sum_r+c1)*
- ((long long)count*sum_sq_s-(long long)sum_s*sum_s +
- (long long)count*sum_sq_r-(long long) sum_r*sum_r +c2) ;
+ ((int64_t)count*sum_sq_s-(int64_t)sum_s*sum_s +
+ (int64_t)count*sum_sq_r-(int64_t) sum_r*sum_r +c2) ;
return ssim_n * 1.0 / ssim_d;
}
@@ -124,11 +124,11 @@ long dssim(unsigned char *s,int sp, unsigned char *r,int rp,
const vp8_variance_rtcd_vtable_t *rtcd)
{
unsigned long sum_s=0,sum_r=0,sum_sq_s=0,sum_sq_r=0,sum_sxr=0;
- long long ssim3;
- long long ssim_n1,ssim_n2;
- long long ssim_d1,ssim_d2;
- long long ssim_t1,ssim_t2;
- long long c1, c2;
+ int64_t ssim3;
+ int64_t ssim_n1,ssim_n2;
+ int64_t ssim_d1,ssim_d2;
+ int64_t ssim_t1,ssim_t2;
+ int64_t c1, c2;
// normalize by 256/64
c1 = cc1*16;
@@ -137,12 +137,12 @@ long dssim(unsigned char *s,int sp, unsigned char *r,int rp,
rtcd->ssimpf(s, sp, r, rp, &sum_s, &sum_r, &sum_sq_s, &sum_sq_r, &sum_sxr);
ssim_n1 = (2*sum_s*sum_r+ c1);
- ssim_n2 =((long long) 2*256*sum_sxr-(long long) 2*sum_s*sum_r+c2);
+ ssim_n2 =((int64_t) 2*256*sum_sxr-(int64_t) 2*sum_s*sum_r+c2);
- ssim_d1 =((long long)sum_s*sum_s +(long long)sum_r*sum_r+c1);
+ ssim_d1 =((int64_t)sum_s*sum_s +(int64_t)sum_r*sum_r+c1);
- ssim_d2 = (256 * (long long) sum_sq_s-(long long) sum_s*sum_s +
- (long long) 256*sum_sq_r-(long long) sum_r*sum_r +c2) ;
+ ssim_d2 = (256 * (int64_t) sum_sq_s-(int64_t) sum_s*sum_s +
+ (int64_t) 256*sum_sq_r-(int64_t) sum_r*sum_r +c2) ;
ssim_t1 = 256 - 256 * ssim_n1 / ssim_d1;
ssim_t2 = 256 - 256 * ssim_n2 / ssim_d2;
diff --git a/vp8/encoder/temporal_filter.c b/vp8/encoder/temporal_filter.c
index 19913a9b1..b9ade1c6c 100644
--- a/vp8/encoder/temporal_filter.c
+++ b/vp8/encoder/temporal_filter.c
@@ -24,7 +24,6 @@
#include "segmentation.h"
#include "vp8/common/g_common.h"
#include "vpx_scale/yv12extend.h"
-#include "vp8/common/postproc.h"
#include "vpx_mem/vpx_mem.h"
#include "vp8/common/swapyv12buffer.h"
#include "vp8/common/threading.h"