summaryrefslogtreecommitdiff
path: root/vp8
diff options
context:
space:
mode:
authorJohn Koleszar <jkoleszar@google.com>2011-08-11 13:01:45 -0400
committerJohn Koleszar <jkoleszar@google.com>2011-08-11 13:01:45 -0400
commita16cd74ba15a43cb2e13284938c8b0c51bfbf3af (patch)
treeb49d9728ac04a8316420305b0538dac0d6db758b /vp8
parent62400028e22b3544eac09724afe45f3cd55766a1 (diff)
parent939f64f68efc837a98a5c80913a4bde50b79854f (diff)
downloadlibvpx-a16cd74ba15a43cb2e13284938c8b0c51bfbf3af.tar
libvpx-a16cd74ba15a43cb2e13284938c8b0c51bfbf3af.tar.gz
libvpx-a16cd74ba15a43cb2e13284938c8b0c51bfbf3af.tar.bz2
libvpx-a16cd74ba15a43cb2e13284938c8b0c51bfbf3af.zip
Merge remote branch 'internal/upstream-experimental' into HEAD
Conflicts: vp8/decoder/detokenize.c vp8/decoder/onyxd_if.c vp8/vp8_common.mk Change-Id: Ifca1108186a8bc715da86a44021ee2fa5550b5b8
Diffstat (limited to 'vp8')
-rw-r--r--vp8/common/asm_com_offsets.c8
-rw-r--r--vp8/common/onyx.h4
-rw-r--r--vp8/common/onyxc_int.h6
-rw-r--r--vp8/common/onyxd.h4
-rw-r--r--vp8/decoder/arm/arm_dsystemdependent.c1
-rw-r--r--vp8/decoder/detokenize.c1
-rw-r--r--vp8/decoder/onyxd_if.c16
-rw-r--r--vp8/decoder/onyxd_int.h4
-rw-r--r--vp8/encoder/encodeframe.c28
-rw-r--r--vp8/encoder/firstpass.c40
-rw-r--r--vp8/encoder/mcomp.c195
-rw-r--r--vp8/encoder/onyx_if.c31
-rw-r--r--vp8/encoder/onyx_int.h28
-rw-r--r--vp8/encoder/pickinter.c2
-rw-r--r--vp8/encoder/picklpf.c2
-rw-r--r--vp8/encoder/ratectrl.c6
-rw-r--r--vp8/encoder/rdopt.c12
-rw-r--r--vp8/encoder/ssim.c34
-rw-r--r--vp8/encoder/temporal_filter.c1
-rw-r--r--vp8/vp8_common.mk7
-rw-r--r--vp8/vp8_cx_iface.c9
-rw-r--r--vp8/vp8_dx_iface.c2
-rw-r--r--vp8/vp8cx_arm.mk7
-rw-r--r--vp8/vp8dx.mk1
-rw-r--r--vp8/vp8dx_arm.mk3
25 files changed, 232 insertions, 220 deletions
diff --git a/vp8/common/asm_com_offsets.c b/vp8/common/asm_com_offsets.c
index e167d2654..e135a4d78 100644
--- a/vp8/common/asm_com_offsets.c
+++ b/vp8/common/asm_com_offsets.c
@@ -9,6 +9,8 @@
*/
+#include "vpx_config.h"
+#include "vpx/vpx_codec.h"
#include "vpx_ports/asm_offsets.h"
#include "vpx_scale/yv12config.h"
@@ -25,8 +27,14 @@ DEFINE(yv12_buffer_config_y_buffer, offsetof(YV12_BUFFER_CONFIG, y_b
DEFINE(yv12_buffer_config_u_buffer, offsetof(YV12_BUFFER_CONFIG, u_buffer));
DEFINE(yv12_buffer_config_v_buffer, offsetof(YV12_BUFFER_CONFIG, v_buffer));
DEFINE(yv12_buffer_config_border, offsetof(YV12_BUFFER_CONFIG, border));
+DEFINE(VP8BORDERINPIXELS_VAL, VP8BORDERINPIXELS);
END
/* add asserts for any offset that is not supported by assembly code */
/* add asserts for any size that is not supported by assembly code */
+
+#if HAVE_ARMV7
+/* vp8_yv12_extend_frame_borders_neon makes several assumptions based on this */
+ct_assert(VP8BORDERINPIXELS_VAL, VP8BORDERINPIXELS == 32)
+#endif
diff --git a/vp8/common/onyx.h b/vp8/common/onyx.h
index 545798ac7..015b4c4d4 100644
--- a/vp8/common/onyx.h
+++ b/vp8/common/onyx.h
@@ -211,8 +211,8 @@ extern "C"
// receive a frames worth of data caller can assume that a copy of this frame is made
// and not just a copy of the pointer..
- int vp8_receive_raw_frame(VP8_PTR comp, unsigned int frame_flags, YV12_BUFFER_CONFIG *sd, INT64 time_stamp, INT64 end_time_stamp);
- int vp8_get_compressed_data(VP8_PTR comp, unsigned int *frame_flags, unsigned long *size, unsigned char *dest, INT64 *time_stamp, INT64 *time_end, int flush);
+ int vp8_receive_raw_frame(VP8_PTR comp, unsigned int frame_flags, YV12_BUFFER_CONFIG *sd, int64_t time_stamp, int64_t end_time_stamp);
+ int vp8_get_compressed_data(VP8_PTR comp, unsigned int *frame_flags, unsigned long *size, unsigned char *dest, int64_t *time_stamp, int64_t *time_end, int flush);
int vp8_get_preview_raw_frame(VP8_PTR comp, YV12_BUFFER_CONFIG *dest, vp8_ppflags_t *flags);
int vp8_use_as_reference(VP8_PTR comp, int ref_frame_flags);
diff --git a/vp8/common/onyxc_int.h b/vp8/common/onyxc_int.h
index 77e98450c..f84bd6e4a 100644
--- a/vp8/common/onyxc_int.h
+++ b/vp8/common/onyxc_int.h
@@ -19,7 +19,9 @@
#include "entropy.h"
#include "idct.h"
#include "recon.h"
+#if CONFIG_POSTPROC
#include "postproc.h"
+#endif
/*#ifdef PACKET_TESTING*/
#include "header.h"
@@ -78,7 +80,9 @@ typedef struct VP8_COMMON_RTCD
vp8_recon_rtcd_vtable_t recon;
vp8_subpix_rtcd_vtable_t subpix;
vp8_loopfilter_rtcd_vtable_t loopfilter;
+#if CONFIG_POSTPROC
vp8_postproc_rtcd_vtable_t postproc;
+#endif
int flags;
#else
int unused;
@@ -205,7 +209,9 @@ typedef struct VP8Common
#if CONFIG_MULTITHREAD
int processor_core_count;
#endif
+#if CONFIG_POSTPROC
struct postproc_state postproc_state;
+#endif
} VP8_COMMON;
#endif
diff --git a/vp8/common/onyxd.h b/vp8/common/onyxd.h
index 2dfdc66da..d3e5c2fa7 100644
--- a/vp8/common/onyxd.h
+++ b/vp8/common/onyxd.h
@@ -54,8 +54,8 @@ extern "C"
int vp8dx_get_setting(VP8D_PTR comp, VP8D_SETTING oxst);
- int vp8dx_receive_compressed_data(VP8D_PTR comp, unsigned long size, const unsigned char *dest, INT64 time_stamp);
- int vp8dx_get_raw_frame(VP8D_PTR comp, YV12_BUFFER_CONFIG *sd, INT64 *time_stamp, INT64 *time_end_stamp, vp8_ppflags_t *flags);
+ int vp8dx_receive_compressed_data(VP8D_PTR comp, unsigned long size, const unsigned char *dest, int64_t time_stamp);
+ int vp8dx_get_raw_frame(VP8D_PTR comp, YV12_BUFFER_CONFIG *sd, int64_t *time_stamp, int64_t *time_end_stamp, vp8_ppflags_t *flags);
vpx_codec_err_t vp8dx_get_reference(VP8D_PTR comp, VP8_REFFRAME ref_frame_flag, YV12_BUFFER_CONFIG *sd);
vpx_codec_err_t vp8dx_set_reference(VP8D_PTR comp, VP8_REFFRAME ref_frame_flag, YV12_BUFFER_CONFIG *sd);
diff --git a/vp8/decoder/arm/arm_dsystemdependent.c b/vp8/decoder/arm/arm_dsystemdependent.c
index 79e2e1ba9..6ce471217 100644
--- a/vp8/decoder/arm/arm_dsystemdependent.c
+++ b/vp8/decoder/arm/arm_dsystemdependent.c
@@ -13,7 +13,6 @@
#include "vpx_ports/arm.h"
#include "vp8/common/blockd.h"
#include "vp8/common/pragmas.h"
-#include "vp8/common/postproc.h"
#include "vp8/decoder/dequantize.h"
#include "vp8/decoder/onyxd_int.h"
diff --git a/vp8/decoder/detokenize.c b/vp8/decoder/detokenize.c
index 025f9dfe4..f8a6e7eb2 100644
--- a/vp8/decoder/detokenize.c
+++ b/vp8/decoder/detokenize.c
@@ -83,7 +83,6 @@ static const unsigned char cat6_prob[12] =
{ 129, 130, 133, 140, 153, 177, 196, 230, 243, 254, 254, 0 };
#endif
-
void vp8_reset_mb_tokens_context(MACROBLOCKD *x)
{
/* Clear entropy contexts for Y2 blocks */
diff --git a/vp8/decoder/onyxd_if.c b/vp8/decoder/onyxd_if.c
index 7c84b6efb..71f323282 100644
--- a/vp8/decoder/onyxd_if.c
+++ b/vp8/decoder/onyxd_if.c
@@ -244,8 +244,8 @@ vpx_codec_err_t vp8dx_set_reference(VP8D_PTR ptr, VP8_REFFRAME ref_frame_flag, Y
/*For ARM NEON, d8-d15 are callee-saved registers, and need to be saved by us.*/
#if HAVE_ARMV7
-extern void vp8_push_neon(INT64 *store);
-extern void vp8_pop_neon(INT64 *store);
+extern void vp8_push_neon(int64_t *store);
+extern void vp8_pop_neon(int64_t *store);
#endif
static int get_free_fb (VP8_COMMON *cm)
@@ -344,10 +344,10 @@ static void vp8_print_yuv_rec_mb(VP8_COMMON *cm, int mb_row, int mb_col)
}
*/
-int vp8dx_receive_compressed_data(VP8D_PTR ptr, unsigned long size, const unsigned char *source, INT64 time_stamp)
+int vp8dx_receive_compressed_data(VP8D_PTR ptr, unsigned long size, const unsigned char *source, int64_t time_stamp)
{
#if HAVE_ARMV7
- INT64 dx_store_reg[8];
+ int64_t dx_store_reg[8];
#endif
VP8D_COMP *pbi = (VP8D_COMP *) ptr;
VP8_COMMON *cm = &pbi->common;
@@ -554,9 +554,9 @@ int vp8dx_receive_compressed_data(VP8D_PTR ptr, unsigned long size, const unsign
#if 0
{
int i;
- INT64 earliest_time = pbi->dr[0].time_stamp;
- INT64 latest_time = pbi->dr[0].time_stamp;
- INT64 time_diff = 0;
+ int64_t earliest_time = pbi->dr[0].time_stamp;
+ int64_t latest_time = pbi->dr[0].time_stamp;
+ int64_t time_diff = 0;
int bytes = 0;
pbi->dr[pbi->common.current_video_frame&0xf].size = pbi->bc.pos + pbi->bc2.pos + 4;;
@@ -596,7 +596,7 @@ int vp8dx_receive_compressed_data(VP8D_PTR ptr, unsigned long size, const unsign
pbi->common.error.setjmp = 0;
return retcode;
}
-int vp8dx_get_raw_frame(VP8D_PTR ptr, YV12_BUFFER_CONFIG *sd, INT64 *time_stamp, INT64 *time_end_stamp, vp8_ppflags_t *flags)
+int vp8dx_get_raw_frame(VP8D_PTR ptr, YV12_BUFFER_CONFIG *sd, int64_t *time_stamp, int64_t *time_end_stamp, vp8_ppflags_t *flags)
{
int ret = -1;
VP8D_COMP *pbi = (VP8D_COMP *) ptr;
diff --git a/vp8/decoder/onyxd_int.h b/vp8/decoder/onyxd_int.h
index bb05ea826..a7d8b5a35 100644
--- a/vp8/decoder/onyxd_int.h
+++ b/vp8/decoder/onyxd_int.h
@@ -40,7 +40,7 @@ typedef struct
typedef struct
{
- INT64 time_stamp;
+ int64_t time_stamp;
int size;
} DATARATE;
@@ -121,7 +121,7 @@ typedef struct VP8Decompressor
#endif
vp8_reader *mbc;
- INT64 last_time_stamp;
+ int64_t last_time_stamp;
int ready_for_new_data;
DATARATE dr[16];
diff --git a/vp8/encoder/encodeframe.c b/vp8/encoder/encodeframe.c
index ba5409754..1c4a93636 100644
--- a/vp8/encoder/encodeframe.c
+++ b/vp8/encoder/encodeframe.c
@@ -336,7 +336,7 @@ static unsigned int mb_activity_measure( VP8_COMP *cpi, MACROBLOCK *x,
// Calculate an "average" mb activity value for the frame
#define ACT_MEDIAN 0
-static void calc_av_activity( VP8_COMP *cpi, INT64 activity_sum )
+static void calc_av_activity( VP8_COMP *cpi, int64_t activity_sum )
{
#if ACT_MEDIAN
// Find median: Simple n^2 algorithm for experimentation
@@ -404,9 +404,9 @@ static void calc_activity_index( VP8_COMP *cpi, MACROBLOCK *x )
VP8_COMMON *const cm = & cpi->common;
int mb_row, mb_col;
- INT64 act;
- INT64 a;
- INT64 b;
+ int64_t act;
+ int64_t a;
+ int64_t b;
#if OUTPUT_NORM_ACT_STATS
FILE *f = fopen("norm_act.stt", "a");
@@ -470,7 +470,7 @@ static void build_activity_map( VP8_COMP *cpi )
int mb_row, mb_col;
unsigned int mb_activity;
- INT64 activity_sum = 0;
+ int64_t activity_sum = 0;
// for each macroblock row in image
for (mb_row = 0; mb_row < cm->mb_rows; mb_row++)
@@ -537,15 +537,15 @@ void vp8_activity_masking(VP8_COMP *cpi, MACROBLOCK *x)
x->errorperbit = x->rdmult * 100 /(110 * x->rddiv);
x->errorperbit += (x->errorperbit==0);
#else
- INT64 a;
- INT64 b;
- INT64 act = *(x->mb_activity_ptr);
+ int64_t a;
+ int64_t b;
+ int64_t act = *(x->mb_activity_ptr);
// Apply the masking to the RD multiplier.
a = act + (2*cpi->activity_avg);
b = (2*act) + cpi->activity_avg;
- x->rdmult = (unsigned int)(((INT64)x->rdmult*b + (a>>1))/a);
+ x->rdmult = (unsigned int)(((int64_t)x->rdmult*b + (a>>1))/a);
x->errorperbit = x->rdmult * 100 /(110 * x->rddiv);
x->errorperbit += (x->errorperbit==0);
#endif
@@ -1444,18 +1444,18 @@ static void adjust_act_zbin( VP8_COMP *cpi, MACROBLOCK *x )
#if USE_ACT_INDEX
x->act_zbin_adj = *(x->mb_activity_ptr);
#else
- INT64 a;
- INT64 b;
- INT64 act = *(x->mb_activity_ptr);
+ int64_t a;
+ int64_t b;
+ int64_t act = *(x->mb_activity_ptr);
// Apply the masking to the RD multiplier.
a = act + 4*cpi->activity_avg;
b = 4*act + cpi->activity_avg;
if ( act > cpi->activity_avg )
- x->act_zbin_adj = (int)(((INT64)b + (a>>1))/a) - 1;
+ x->act_zbin_adj = (int)(((int64_t)b + (a>>1))/a) - 1;
else
- x->act_zbin_adj = 1 - (int)(((INT64)a + (b>>1))/b);
+ x->act_zbin_adj = 1 - (int)(((int64_t)a + (b>>1))/b);
#endif
}
diff --git a/vp8/encoder/firstpass.c b/vp8/encoder/firstpass.c
index 5422365f8..b063e45af 100644
--- a/vp8/encoder/firstpass.c
+++ b/vp8/encoder/firstpass.c
@@ -530,8 +530,8 @@ void vp8_first_pass(VP8_COMP *cpi)
YV12_BUFFER_CONFIG *gld_yv12 = &cm->yv12_fb[cm->gld_fb_idx];
int recon_y_stride = lst_yv12->y_stride;
int recon_uv_stride = lst_yv12->uv_stride;
- long long intra_error = 0;
- long long coded_error = 0;
+ int64_t intra_error = 0;
+ int64_t coded_error = 0;
int sum_mvr = 0, sum_mvc = 0;
int sum_mvr_abs = 0, sum_mvc_abs = 0;
@@ -620,7 +620,7 @@ void vp8_first_pass(VP8_COMP *cpi)
this_error += intrapenalty;
// Cumulative intra error total
- intra_error += (long long)this_error;
+ intra_error += (int64_t)this_error;
// Set up limit values for motion vectors to prevent them extending outside the UMV borders
x->mv_col_min = -((mb_col * 16) + (VP8BORDERINPIXELS - 16));
@@ -757,7 +757,7 @@ void vp8_first_pass(VP8_COMP *cpi)
}
}
- coded_error += (long long)this_error;
+ coded_error += (int64_t)this_error;
// adjust to the next column of macroblocks
x->src.y_buffer += 16;
@@ -1219,8 +1219,8 @@ void vp8_init_second_pass(VP8_COMP *cpi)
cpi->twopass.total_coded_error_left = cpi->twopass.total_stats->coded_error;
cpi->twopass.start_tot_err_left = cpi->twopass.total_error_left;
- //cpi->twopass.bits_left = (long long)(cpi->twopass.total_stats->count * cpi->oxcf.target_bandwidth / DOUBLE_DIVIDE_CHECK((double)cpi->oxcf.frame_rate));
- //cpi->twopass.bits_left -= (long long)(cpi->twopass.total_stats->count * two_pass_min_rate / DOUBLE_DIVIDE_CHECK((double)cpi->oxcf.frame_rate));
+ //cpi->twopass.bits_left = (int64_t)(cpi->twopass.total_stats->count * cpi->oxcf.target_bandwidth / DOUBLE_DIVIDE_CHECK((double)cpi->oxcf.frame_rate));
+ //cpi->twopass.bits_left -= (int64_t)(cpi->twopass.total_stats->count * two_pass_min_rate / DOUBLE_DIVIDE_CHECK((double)cpi->oxcf.frame_rate));
// each frame can have a different duration, as the frame rate in the source
// isn't guaranteed to be constant. The frame rate prior to the first frame
@@ -1230,8 +1230,8 @@ void vp8_init_second_pass(VP8_COMP *cpi)
vp8_new_frame_rate(cpi, 10000000.0 * cpi->twopass.total_stats->count / cpi->twopass.total_stats->duration);
cpi->output_frame_rate = cpi->oxcf.frame_rate;
- cpi->twopass.bits_left = (long long)(cpi->twopass.total_stats->duration * cpi->oxcf.target_bandwidth / 10000000.0) ;
- cpi->twopass.bits_left -= (long long)(cpi->twopass.total_stats->duration * two_pass_min_rate / 10000000.0);
+ cpi->twopass.bits_left = (int64_t)(cpi->twopass.total_stats->duration * cpi->oxcf.target_bandwidth / 10000000.0) ;
+ cpi->twopass.bits_left -= (int64_t)(cpi->twopass.total_stats->duration * two_pass_min_rate / 10000000.0);
cpi->twopass.clip_bits_total = cpi->twopass.bits_left;
// Calculate a minimum intra value to be used in determining the IIratio
@@ -2671,16 +2671,16 @@ static void find_next_key_frame(VP8_COMP *cpi, FIRSTPASS_STATS *this_frame)
int max_bits = frame_max_bits(cpi);
// Maximum bits for the kf group
- long long max_grp_bits;
+ int64_t max_grp_bits;
// Default allocation based on bits left and relative
// complexity of the section
- cpi->twopass.kf_group_bits = (long long)( cpi->twopass.bits_left *
+ cpi->twopass.kf_group_bits = (int64_t)( cpi->twopass.bits_left *
( kf_group_err /
cpi->twopass.modified_error_left ));
// Clip based on maximum per frame rate defined by the user.
- max_grp_bits = (long long)max_bits * (long long)cpi->twopass.frames_to_key;
+ max_grp_bits = (int64_t)max_bits * (int64_t)cpi->twopass.frames_to_key;
if (cpi->twopass.kf_group_bits > max_grp_bits)
cpi->twopass.kf_group_bits = max_grp_bits;
@@ -2697,19 +2697,19 @@ static void find_next_key_frame(VP8_COMP *cpi, FIRSTPASS_STATS *this_frame)
int high_water_mark = (opt_buffer_lvl +
cpi->oxcf.maximum_buffer_size) >> 1;
- long long av_group_bits;
+ int64_t av_group_bits;
// Av bits per frame * number of frames
- av_group_bits = (long long)cpi->av_per_frame_bandwidth *
- (long long)cpi->twopass.frames_to_key;
+ av_group_bits = (int64_t)cpi->av_per_frame_bandwidth *
+ (int64_t)cpi->twopass.frames_to_key;
// We are at or above the maximum.
if (cpi->buffer_level >= high_water_mark)
{
- long long min_group_bits;
+ int64_t min_group_bits;
min_group_bits = av_group_bits +
- (long long)(buffer_lvl -
+ (int64_t)(buffer_lvl -
high_water_mark);
if (cpi->twopass.kf_group_bits < min_group_bits)
@@ -2718,11 +2718,11 @@ static void find_next_key_frame(VP8_COMP *cpi, FIRSTPASS_STATS *this_frame)
// We are above optimal but below the maximum
else if (cpi->twopass.kf_group_bits < av_group_bits)
{
- long long bits_below_av = av_group_bits -
+ int64_t bits_below_av = av_group_bits -
cpi->twopass.kf_group_bits;
cpi->twopass.kf_group_bits +=
- (long long)((double)bits_below_av *
+ (int64_t)((double)bits_below_av *
(double)(buffer_lvl - opt_buffer_lvl) /
(double)(high_water_mark - opt_buffer_lvl));
}
@@ -3043,8 +3043,8 @@ static void find_next_key_frame(VP8_COMP *cpi, FIRSTPASS_STATS *this_frame)
}
else
{
- long long clip_bits = (long long)(cpi->twopass.total_stats->count * cpi->oxcf.target_bandwidth / DOUBLE_DIVIDE_CHECK((double)cpi->oxcf.frame_rate));
- long long over_spend = cpi->oxcf.starting_buffer_level - cpi->buffer_level;
+ int64_t clip_bits = (int64_t)(cpi->twopass.total_stats->count * cpi->oxcf.target_bandwidth / DOUBLE_DIVIDE_CHECK((double)cpi->oxcf.frame_rate));
+ int64_t over_spend = cpi->oxcf.starting_buffer_level - cpi->buffer_level;
if ((last_kf_resampled && (kf_q > cpi->worst_quality)) || // If triggered last time the threshold for triggering again is reduced
((kf_q > cpi->worst_quality) && // Projected Q higher than allowed and ...
diff --git a/vp8/encoder/mcomp.c b/vp8/encoder/mcomp.c
index 58b524f82..aebcfce70 100644
--- a/vp8/encoder/mcomp.c
+++ b/vp8/encoder/mcomp.c
@@ -15,6 +15,7 @@
#include <stdio.h>
#include <limits.h>
#include <math.h>
+#include "vp8/common/findnearmv.h"
#ifdef ENTROPY_STATS
static int mv_ref_ct [31] [4] [2];
@@ -342,12 +343,26 @@ int vp8_find_best_sub_pixel_step(MACROBLOCK *x, BLOCK *b, BLOCKD *d,
int bestmse = INT_MAX;
int_mv startmv;
int_mv this_mv;
- unsigned char *y = *(d->base_pre) + d->pre + (bestmv->as_mv.row) * d->pre_stride + bestmv->as_mv.col;
unsigned char *z = (*(b->base_src) + b->src);
int left, right, up, down, diag;
unsigned int sse;
int whichdir ;
int thismse;
+ int y_stride;
+
+#if ARCH_X86 || ARCH_X86_64
+ MACROBLOCKD *xd = &x->e_mbd;
+ unsigned char *y0 = *(d->base_pre) + d->pre + (bestmv->as_mv.row) * d->pre_stride + bestmv->as_mv.col;
+ unsigned char *y;
+
+ y_stride = 32;
+ /* Copy 18 rows x 32 cols area to intermediate buffer before searching. */
+ vfp->copymem(y0 - 1 - d->pre_stride, d->pre_stride, xd->y_buf, y_stride, 18);
+ y = xd->y_buf + y_stride + 1;
+#else
+ unsigned char *y = *(d->base_pre) + d->pre + (bestmv->as_mv.row) * d->pre_stride + bestmv->as_mv.col;
+ y_stride = d->pre_stride;
+#endif
// central mv
bestmv->as_mv.row <<= 3;
@@ -355,14 +370,14 @@ int vp8_find_best_sub_pixel_step(MACROBLOCK *x, BLOCK *b, BLOCKD *d,
startmv = *bestmv;
// calculate central point error
- bestmse = vfp->vf(y, d->pre_stride, z, b->src_stride, sse1);
+ bestmse = vfp->vf(y, y_stride, z, b->src_stride, sse1);
*distortion = bestmse;
bestmse += mv_err_cost(bestmv, ref_mv, mvcost, error_per_bit);
// go left then right and check error
this_mv.as_mv.row = startmv.as_mv.row;
this_mv.as_mv.col = ((startmv.as_mv.col - 8) | 4);
- thismse = vfp->svf_halfpix_h(y - 1, d->pre_stride, z, b->src_stride, &sse);
+ thismse = vfp->svf_halfpix_h(y - 1, y_stride, z, b->src_stride, &sse);
left = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
if (left < bestmse)
@@ -374,7 +389,7 @@ int vp8_find_best_sub_pixel_step(MACROBLOCK *x, BLOCK *b, BLOCKD *d,
}
this_mv.as_mv.col += 8;
- thismse = vfp->svf_halfpix_h(y, d->pre_stride, z, b->src_stride, &sse);
+ thismse = vfp->svf_halfpix_h(y, y_stride, z, b->src_stride, &sse);
right = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
if (right < bestmse)
@@ -388,7 +403,7 @@ int vp8_find_best_sub_pixel_step(MACROBLOCK *x, BLOCK *b, BLOCKD *d,
// go up then down and check error
this_mv.as_mv.col = startmv.as_mv.col;
this_mv.as_mv.row = ((startmv.as_mv.row - 8) | 4);
- thismse = vfp->svf_halfpix_v(y - d->pre_stride, d->pre_stride, z, b->src_stride, &sse);
+ thismse = vfp->svf_halfpix_v(y - y_stride, y_stride, z, b->src_stride, &sse);
up = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
if (up < bestmse)
@@ -400,7 +415,7 @@ int vp8_find_best_sub_pixel_step(MACROBLOCK *x, BLOCK *b, BLOCKD *d,
}
this_mv.as_mv.row += 8;
- thismse = vfp->svf_halfpix_v(y, d->pre_stride, z, b->src_stride, &sse);
+ thismse = vfp->svf_halfpix_v(y, y_stride, z, b->src_stride, &sse);
down = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
if (down < bestmse)
@@ -423,23 +438,23 @@ int vp8_find_best_sub_pixel_step(MACROBLOCK *x, BLOCK *b, BLOCKD *d,
case 0:
this_mv.as_mv.col = (this_mv.as_mv.col - 8) | 4;
this_mv.as_mv.row = (this_mv.as_mv.row - 8) | 4;
- thismse = vfp->svf_halfpix_hv(y - 1 - d->pre_stride, d->pre_stride, z, b->src_stride, &sse);
+ thismse = vfp->svf_halfpix_hv(y - 1 - y_stride, y_stride, z, b->src_stride, &sse);
break;
case 1:
this_mv.as_mv.col += 4;
this_mv.as_mv.row = (this_mv.as_mv.row - 8) | 4;
- thismse = vfp->svf_halfpix_hv(y - d->pre_stride, d->pre_stride, z, b->src_stride, &sse);
+ thismse = vfp->svf_halfpix_hv(y - y_stride, y_stride, z, b->src_stride, &sse);
break;
case 2:
this_mv.as_mv.col = (this_mv.as_mv.col - 8) | 4;
this_mv.as_mv.row += 4;
- thismse = vfp->svf_halfpix_hv(y - 1, d->pre_stride, z, b->src_stride, &sse);
+ thismse = vfp->svf_halfpix_hv(y - 1, y_stride, z, b->src_stride, &sse);
break;
case 3:
default:
this_mv.as_mv.col += 4;
this_mv.as_mv.row += 4;
- thismse = vfp->svf_halfpix_hv(y, d->pre_stride, z, b->src_stride, &sse);
+ thismse = vfp->svf_halfpix_hv(y, y_stride, z, b->src_stride, &sse);
break;
}
@@ -458,7 +473,7 @@ int vp8_find_best_sub_pixel_step(MACROBLOCK *x, BLOCK *b, BLOCKD *d,
// time to check quarter pels.
if (bestmv->as_mv.row < startmv.as_mv.row)
- y -= d->pre_stride;
+ y -= y_stride;
if (bestmv->as_mv.col < startmv.as_mv.col)
y--;
@@ -473,12 +488,12 @@ int vp8_find_best_sub_pixel_step(MACROBLOCK *x, BLOCK *b, BLOCKD *d,
if (startmv.as_mv.col & 7)
{
this_mv.as_mv.col = startmv.as_mv.col - 2;
- thismse = vfp->svf(y, d->pre_stride, this_mv.as_mv.col & 7, this_mv.as_mv.row & 7, z, b->src_stride, &sse);
+ thismse = vfp->svf(y, y_stride, this_mv.as_mv.col & 7, this_mv.as_mv.row & 7, z, b->src_stride, &sse);
}
else
{
this_mv.as_mv.col = (startmv.as_mv.col - 8) | 6;
- thismse = vfp->svf(y - 1, d->pre_stride, 6, this_mv.as_mv.row & 7, z, b->src_stride, &sse);
+ thismse = vfp->svf(y - 1, y_stride, 6, this_mv.as_mv.row & 7, z, b->src_stride, &sse);
}
left = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
@@ -492,7 +507,7 @@ int vp8_find_best_sub_pixel_step(MACROBLOCK *x, BLOCK *b, BLOCKD *d,
}
this_mv.as_mv.col += 4;
- thismse = vfp->svf(y, d->pre_stride, this_mv.as_mv.col & 7, this_mv.as_mv.row & 7, z, b->src_stride, &sse);
+ thismse = vfp->svf(y, y_stride, this_mv.as_mv.col & 7, this_mv.as_mv.row & 7, z, b->src_stride, &sse);
right = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
if (right < bestmse)
@@ -509,12 +524,12 @@ int vp8_find_best_sub_pixel_step(MACROBLOCK *x, BLOCK *b, BLOCKD *d,
if (startmv.as_mv.row & 7)
{
this_mv.as_mv.row = startmv.as_mv.row - 2;
- thismse = vfp->svf(y, d->pre_stride, this_mv.as_mv.col & 7, this_mv.as_mv.row & 7, z, b->src_stride, &sse);
+ thismse = vfp->svf(y, y_stride, this_mv.as_mv.col & 7, this_mv.as_mv.row & 7, z, b->src_stride, &sse);
}
else
{
this_mv.as_mv.row = (startmv.as_mv.row - 8) | 6;
- thismse = vfp->svf(y - d->pre_stride, d->pre_stride, this_mv.as_mv.col & 7, 6, z, b->src_stride, &sse);
+ thismse = vfp->svf(y - y_stride, y_stride, this_mv.as_mv.col & 7, 6, z, b->src_stride, &sse);
}
up = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
@@ -528,7 +543,7 @@ int vp8_find_best_sub_pixel_step(MACROBLOCK *x, BLOCK *b, BLOCKD *d,
}
this_mv.as_mv.row += 4;
- thismse = vfp->svf(y, d->pre_stride, this_mv.as_mv.col & 7, this_mv.as_mv.row & 7, z, b->src_stride, &sse);
+ thismse = vfp->svf(y, y_stride, this_mv.as_mv.col & 7, this_mv.as_mv.row & 7, z, b->src_stride, &sse);
down = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
if (down < bestmse)
@@ -558,12 +573,12 @@ int vp8_find_best_sub_pixel_step(MACROBLOCK *x, BLOCK *b, BLOCKD *d,
if (startmv.as_mv.col & 7)
{
this_mv.as_mv.col -= 2;
- thismse = vfp->svf(y, d->pre_stride, this_mv.as_mv.col & 7, this_mv.as_mv.row & 7, z, b->src_stride, &sse);
+ thismse = vfp->svf(y, y_stride, this_mv.as_mv.col & 7, this_mv.as_mv.row & 7, z, b->src_stride, &sse);
}
else
{
this_mv.as_mv.col = (startmv.as_mv.col - 8) | 6;
- thismse = vfp->svf(y - 1, d->pre_stride, 6, this_mv.as_mv.row & 7, z, b->src_stride, &sse);;
+ thismse = vfp->svf(y - 1, y_stride, 6, this_mv.as_mv.row & 7, z, b->src_stride, &sse);;
}
}
else
@@ -573,12 +588,12 @@ int vp8_find_best_sub_pixel_step(MACROBLOCK *x, BLOCK *b, BLOCKD *d,
if (startmv.as_mv.col & 7)
{
this_mv.as_mv.col -= 2;
- thismse = vfp->svf(y - d->pre_stride, d->pre_stride, this_mv.as_mv.col & 7, 6, z, b->src_stride, &sse);
+ thismse = vfp->svf(y - y_stride, y_stride, this_mv.as_mv.col & 7, 6, z, b->src_stride, &sse);
}
else
{
this_mv.as_mv.col = (startmv.as_mv.col - 8) | 6;
- thismse = vfp->svf(y - d->pre_stride - 1, d->pre_stride, 6, 6, z, b->src_stride, &sse);
+ thismse = vfp->svf(y - y_stride - 1, y_stride, 6, 6, z, b->src_stride, &sse);
}
}
@@ -589,12 +604,12 @@ int vp8_find_best_sub_pixel_step(MACROBLOCK *x, BLOCK *b, BLOCKD *d,
if (startmv.as_mv.row & 7)
{
this_mv.as_mv.row -= 2;
- thismse = vfp->svf(y, d->pre_stride, this_mv.as_mv.col & 7, this_mv.as_mv.row & 7, z, b->src_stride, &sse);
+ thismse = vfp->svf(y, y_stride, this_mv.as_mv.col & 7, this_mv.as_mv.row & 7, z, b->src_stride, &sse);
}
else
{
this_mv.as_mv.row = (startmv.as_mv.row - 8) | 6;
- thismse = vfp->svf(y - d->pre_stride, d->pre_stride, this_mv.as_mv.col & 7, 6, z, b->src_stride, &sse);
+ thismse = vfp->svf(y - y_stride, y_stride, this_mv.as_mv.col & 7, 6, z, b->src_stride, &sse);
}
break;
@@ -604,19 +619,19 @@ int vp8_find_best_sub_pixel_step(MACROBLOCK *x, BLOCK *b, BLOCKD *d,
if (startmv.as_mv.col & 7)
{
this_mv.as_mv.col -= 2;
- thismse = vfp->svf(y, d->pre_stride, this_mv.as_mv.col & 7, this_mv.as_mv.row & 7, z, b->src_stride, &sse);
+ thismse = vfp->svf(y, y_stride, this_mv.as_mv.col & 7, this_mv.as_mv.row & 7, z, b->src_stride, &sse);
}
else
{
this_mv.as_mv.col = (startmv.as_mv.col - 8) | 6;
- thismse = vfp->svf(y - 1, d->pre_stride, 6, this_mv.as_mv.row & 7, z, b->src_stride, &sse);
+ thismse = vfp->svf(y - 1, y_stride, 6, this_mv.as_mv.row & 7, z, b->src_stride, &sse);
}
break;
case 3:
this_mv.as_mv.col += 2;
this_mv.as_mv.row += 2;
- thismse = vfp->svf(y, d->pre_stride, this_mv.as_mv.col & 7, this_mv.as_mv.row & 7, z, b->src_stride, &sse);
+ thismse = vfp->svf(y, y_stride, this_mv.as_mv.col & 7, this_mv.as_mv.row & 7, z, b->src_stride, &sse);
break;
}
@@ -633,7 +648,7 @@ int vp8_find_best_sub_pixel_step(MACROBLOCK *x, BLOCK *b, BLOCKD *d,
return bestmse;
}
-int vp8_find_best_half_pixel_step(MACROBLOCK *mb, BLOCK *b, BLOCKD *d,
+int vp8_find_best_half_pixel_step(MACROBLOCK *x, BLOCK *b, BLOCKD *d,
int_mv *bestmv, int_mv *ref_mv,
int error_per_bit,
const vp8_variance_fn_ptr_t *vfp,
@@ -643,11 +658,26 @@ int vp8_find_best_half_pixel_step(MACROBLOCK *mb, BLOCK *b, BLOCKD *d,
int bestmse = INT_MAX;
int_mv startmv;
int_mv this_mv;
- unsigned char *y = *(d->base_pre) + d->pre + (bestmv->as_mv.row) * d->pre_stride + bestmv->as_mv.col;
unsigned char *z = (*(b->base_src) + b->src);
int left, right, up, down, diag;
unsigned int sse;
+ int whichdir ;
int thismse;
+ int y_stride;
+
+#if ARCH_X86 || ARCH_X86_64
+ MACROBLOCKD *xd = &x->e_mbd;
+ unsigned char *y0 = *(d->base_pre) + d->pre + (bestmv->as_mv.row) * d->pre_stride + bestmv->as_mv.col;
+ unsigned char *y;
+
+ y_stride = 32;
+ /* Copy 18 rows x 32 cols area to intermediate buffer before searching. */
+ vfp->copymem(y0 - 1 - d->pre_stride, d->pre_stride, xd->y_buf, y_stride, 18);
+ y = xd->y_buf + y_stride + 1;
+#else
+ unsigned char *y = *(d->base_pre) + d->pre + (bestmv->as_mv.row) * d->pre_stride + bestmv->as_mv.col;
+ y_stride = d->pre_stride;
+#endif
// central mv
bestmv->as_mv.row <<= 3;
@@ -655,14 +685,14 @@ int vp8_find_best_half_pixel_step(MACROBLOCK *mb, BLOCK *b, BLOCKD *d,
startmv = *bestmv;
// calculate central point error
- bestmse = vfp->vf(y, d->pre_stride, z, b->src_stride, sse1);
+ bestmse = vfp->vf(y, y_stride, z, b->src_stride, sse1);
*distortion = bestmse;
bestmse += mv_err_cost(bestmv, ref_mv, mvcost, error_per_bit);
// go left then right and check error
this_mv.as_mv.row = startmv.as_mv.row;
this_mv.as_mv.col = ((startmv.as_mv.col - 8) | 4);
- thismse = vfp->svf_halfpix_h(y - 1, d->pre_stride, z, b->src_stride, &sse);
+ thismse = vfp->svf_halfpix_h(y - 1, y_stride, z, b->src_stride, &sse);
left = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
if (left < bestmse)
@@ -674,7 +704,7 @@ int vp8_find_best_half_pixel_step(MACROBLOCK *mb, BLOCK *b, BLOCKD *d,
}
this_mv.as_mv.col += 8;
- thismse = vfp->svf_halfpix_h(y, d->pre_stride, z, b->src_stride, &sse);
+ thismse = vfp->svf_halfpix_h(y, y_stride, z, b->src_stride, &sse);
right = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
if (right < bestmse)
@@ -688,7 +718,7 @@ int vp8_find_best_half_pixel_step(MACROBLOCK *mb, BLOCK *b, BLOCKD *d,
// go up then down and check error
this_mv.as_mv.col = startmv.as_mv.col;
this_mv.as_mv.row = ((startmv.as_mv.row - 8) | 4);
- thismse = vfp->svf_halfpix_v(y - d->pre_stride, d->pre_stride, z, b->src_stride, &sse);
+ thismse = vfp->svf_halfpix_v(y - y_stride, y_stride, z, b->src_stride, &sse);
up = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
if (up < bestmse)
@@ -700,7 +730,7 @@ int vp8_find_best_half_pixel_step(MACROBLOCK *mb, BLOCK *b, BLOCKD *d,
}
this_mv.as_mv.row += 8;
- thismse = vfp->svf_halfpix_v(y, d->pre_stride, z, b->src_stride, &sse);
+ thismse = vfp->svf_halfpix_v(y, y_stride, z, b->src_stride, &sse);
down = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
if (down < bestmse)
@@ -711,8 +741,6 @@ int vp8_find_best_half_pixel_step(MACROBLOCK *mb, BLOCK *b, BLOCKD *d,
*sse1 = sse;
}
- // somewhat strangely not doing all the diagonals for half pel is slower than doing them.
-#if 0
// now check 1 more diagonal -
whichdir = (left < right ? 0 : 1) + (up < down ? 0 : 2);
this_mv = startmv;
@@ -720,76 +748,28 @@ int vp8_find_best_half_pixel_step(MACROBLOCK *mb, BLOCK *b, BLOCKD *d,
switch (whichdir)
{
case 0:
- this_mv.col = (this_mv.col - 8) | 4;
- this_mv.row = (this_mv.row - 8) | 4;
- diag = vfp->svf(y - 1 - d->pre_stride, d->pre_stride, 4, 4, z, b->src_stride, &sse);
+ this_mv.as_mv.col = (this_mv.as_mv.col - 8) | 4;
+ this_mv.as_mv.row = (this_mv.as_mv.row - 8) | 4;
+ thismse = vfp->svf_halfpix_hv(y - 1 - y_stride, y_stride, z, b->src_stride, &sse);
break;
case 1:
- this_mv.col += 4;
- this_mv.row = (this_mv.row - 8) | 4;
- diag = vfp->svf(y - d->pre_stride, d->pre_stride, 4, 4, z, b->src_stride, &sse);
+ this_mv.as_mv.col += 4;
+ this_mv.as_mv.row = (this_mv.as_mv.row - 8) | 4;
+ thismse = vfp->svf_halfpix_hv(y - y_stride, y_stride, z, b->src_stride, &sse);
break;
case 2:
- this_mv.col = (this_mv.col - 8) | 4;
- this_mv.row += 4;
- diag = vfp->svf(y - 1, d->pre_stride, 4, 4, z, b->src_stride, &sse);
+ this_mv.as_mv.col = (this_mv.as_mv.col - 8) | 4;
+ this_mv.as_mv.row += 4;
+ thismse = vfp->svf_halfpix_hv(y - 1, y_stride, z, b->src_stride, &sse);
break;
case 3:
- this_mv.col += 4;
- this_mv.row += 4;
- diag = vfp->svf(y, d->pre_stride, 4, 4, z, b->src_stride, &sse);
+ default:
+ this_mv.as_mv.col += 4;
+ this_mv.as_mv.row += 4;
+ thismse = vfp->svf_halfpix_hv(y, y_stride, z, b->src_stride, &sse);
break;
}
- diag += mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
-
- if (diag < bestmse)
- {
- *bestmv = this_mv;
- bestmse = diag;
- }
-
-#else
- this_mv.as_mv.col = (this_mv.as_mv.col - 8) | 4;
- this_mv.as_mv.row = (this_mv.as_mv.row - 8) | 4;
- thismse = vfp->svf_halfpix_hv(y - 1 - d->pre_stride, d->pre_stride, z, b->src_stride, &sse);
- diag = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
-
- if (diag < bestmse)
- {
- *bestmv = this_mv;
- bestmse = diag;
- *distortion = thismse;
- *sse1 = sse;
- }
-
- this_mv.as_mv.col += 8;
- thismse = vfp->svf_halfpix_hv(y - d->pre_stride, d->pre_stride, z, b->src_stride, &sse);
- diag = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
-
- if (diag < bestmse)
- {
- *bestmv = this_mv;
- bestmse = diag;
- *distortion = thismse;
- *sse1 = sse;
- }
-
- this_mv.as_mv.col = (this_mv.as_mv.col - 8) | 4;
- this_mv.as_mv.row = startmv.as_mv.row + 4;
- thismse = vfp->svf_halfpix_hv(y - 1, d->pre_stride, z, b->src_stride, &sse);
- diag = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
-
- if (diag < bestmse)
- {
- *bestmv = this_mv;
- bestmse = diag;
- *distortion = thismse;
- *sse1 = sse;
- }
-
- this_mv.as_mv.col += 8;
- thismse = vfp->svf_halfpix_hv(y, d->pre_stride, z, b->src_stride, &sse);
diag = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
if (diag < bestmse)
@@ -800,7 +780,6 @@ int vp8_find_best_half_pixel_step(MACROBLOCK *mb, BLOCK *b, BLOCKD *d,
*sse1 = sse;
}
-#endif
return bestmse;
}
@@ -866,7 +845,7 @@ int vp8_hex_search
unsigned char *what = (*(b->base_src) + b->src);
int what_stride = b->src_stride;
int in_what_stride = d->pre_stride;
- int br = ref_mv->as_mv.row, bc = ref_mv->as_mv.col;
+ int br, bc;
int_mv this_mv;
unsigned int bestsad = 0x7fffffff;
unsigned int thissad;
@@ -880,6 +859,11 @@ int vp8_hex_search
fcenter_mv.as_mv.row = center_mv->as_mv.row >> 3;
fcenter_mv.as_mv.col = center_mv->as_mv.col >> 3;
+ // adjust ref_mv to make sure it is within MV range
+ vp8_clamp_mv(ref_mv, x->mv_col_min, x->mv_col_max, x->mv_row_min, x->mv_row_max);
+ br = ref_mv->as_mv.row;
+ bc = ref_mv->as_mv.col;
+
// Work out the start point for the search
base_offset = (unsigned char *)(*(d->base_pre) + d->pre);
this_offset = base_offset + (br * (d->pre_stride)) + bc;
@@ -1043,8 +1027,8 @@ int vp8_diamond_search_sad
int best_site = 0;
int last_site = 0;
- int ref_row = ref_mv->as_mv.row;
- int ref_col = ref_mv->as_mv.col;
+ int ref_row;
+ int ref_col;
int this_row_offset;
int this_col_offset;
search_site *ss;
@@ -1057,8 +1041,10 @@ int vp8_diamond_search_sad
fcenter_mv.as_mv.row = center_mv->as_mv.row >> 3;
fcenter_mv.as_mv.col = center_mv->as_mv.col >> 3;
+ vp8_clamp_mv(ref_mv, x->mv_col_min, x->mv_col_max, x->mv_row_min, x->mv_row_max);
+ ref_row = ref_mv->as_mv.row;
+ ref_col = ref_mv->as_mv.col;
*num00 = 0;
-
best_mv->as_mv.row = ref_row;
best_mv->as_mv.col = ref_col;
@@ -1162,8 +1148,8 @@ int vp8_diamond_search_sadx4
int best_site = 0;
int last_site = 0;
- int ref_row = ref_mv->as_mv.row;
- int ref_col = ref_mv->as_mv.col;
+ int ref_row;
+ int ref_col;
int this_row_offset;
int this_col_offset;
search_site *ss;
@@ -1176,6 +1162,9 @@ int vp8_diamond_search_sadx4
fcenter_mv.as_mv.row = center_mv->as_mv.row >> 3;
fcenter_mv.as_mv.col = center_mv->as_mv.col >> 3;
+ vp8_clamp_mv(ref_mv, x->mv_col_min, x->mv_col_max, x->mv_row_min, x->mv_row_max);
+ ref_row = ref_mv->as_mv.row;
+ ref_col = ref_mv->as_mv.col;
*num00 = 0;
best_mv->as_mv.row = ref_row;
best_mv->as_mv.col = ref_col;
diff --git a/vp8/encoder/onyx_if.c b/vp8/encoder/onyx_if.c
index b9537d1ba..f862fd0e4 100644
--- a/vp8/encoder/onyx_if.c
+++ b/vp8/encoder/onyx_if.c
@@ -9,6 +9,7 @@
*/
+#include "vpx_config.h"
#include "vp8/common/onyxc_int.h"
#include "onyx_int.h"
#include "vp8/common/systemdependent.h"
@@ -24,7 +25,9 @@
#include "segmentation.h"
#include "vp8/common/g_common.h"
#include "vpx_scale/yv12extend.h"
+#if CONFIG_POSTPROC
#include "vp8/common/postproc.h"
+#endif
#include "vpx_mem/vpx_mem.h"
#include "vp8/common/swapyv12buffer.h"
#include "vp8/common/threading.h"
@@ -2729,6 +2732,8 @@ static void scale_and_extend_source(YV12_BUFFER_CONFIG *sd, VP8_COMP *cpi)
cpi->Source = &cpi->scaled_source;
#endif
}
+ else
+ cpi->Source = sd;
}
@@ -3340,7 +3345,7 @@ void loopfilter_frame(VP8_COMP *cpi, VP8_COMMON *cm)
static void update_buffer_level(VP8_COMP *cpi)
{
- long long tmp;
+ int64_t tmp;
/* Update the buffered average bitrate.
*
@@ -3410,7 +3415,7 @@ static void update_buffer_level(VP8_COMP *cpi)
*/
if(cpi->total_actual_bits > cpi->oxcf.starting_buffer_level)
{
- tmp = (long long)cpi->buffered_av_per_frame_bandwidth
+ tmp = (int64_t)cpi->buffered_av_per_frame_bandwidth
* cpi->oxcf.maximum_buffer_size
/ cpi->av_per_frame_bandwidth;
cpi->buffer_level = cpi->oxcf.maximum_buffer_size
@@ -3428,7 +3433,7 @@ static void update_buffer_level(VP8_COMP *cpi)
*/
if(cpi->total_actual_bits > cpi->oxcf.starting_buffer_level)
{
- long long decayed_overshoot;
+ int64_t decayed_overshoot;
decayed_overshoot = cpi->accumulated_overshoot;
decayed_overshoot *= (cpi->oxcf.maximum_buffer_size
@@ -4796,22 +4801,22 @@ static void Pass2Encode(VP8_COMP *cpi, unsigned long *size, unsigned char *dest,
{
double two_pass_min_rate = (double)(cpi->oxcf.target_bandwidth
*cpi->oxcf.two_pass_vbrmin_section / 100);
- cpi->twopass.bits_left += (long long)(two_pass_min_rate / cpi->oxcf.frame_rate);
+ cpi->twopass.bits_left += (int64_t)(two_pass_min_rate / cpi->oxcf.frame_rate);
}
}
#endif
//For ARM NEON, d8-d15 are callee-saved registers, and need to be saved by us.
#if HAVE_ARMV7
-extern void vp8_push_neon(INT64 *store);
-extern void vp8_pop_neon(INT64 *store);
+extern void vp8_push_neon(int64_t *store);
+extern void vp8_pop_neon(int64_t *store);
#endif
-int vp8_receive_raw_frame(VP8_PTR ptr, unsigned int frame_flags, YV12_BUFFER_CONFIG *sd, INT64 time_stamp, INT64 end_time)
+int vp8_receive_raw_frame(VP8_PTR ptr, unsigned int frame_flags, YV12_BUFFER_CONFIG *sd, int64_t time_stamp, int64_t end_time)
{
#if HAVE_ARMV7
- INT64 store_reg[8];
+ int64_t store_reg[8];
#endif
VP8_COMP *cpi = (VP8_COMP *) ptr;
VP8_COMMON *cm = &cpi->common;
@@ -4862,10 +4867,10 @@ static int frame_is_reference(const VP8_COMP *cpi)
}
-int vp8_get_compressed_data(VP8_PTR ptr, unsigned int *frame_flags, unsigned long *size, unsigned char *dest, INT64 *time_stamp, INT64 *time_end, int flush)
+int vp8_get_compressed_data(VP8_PTR ptr, unsigned int *frame_flags, unsigned long *size, unsigned char *dest, int64_t *time_stamp, int64_t *time_end, int flush)
{
#if HAVE_ARMV7
- INT64 store_reg[8];
+ int64_t store_reg[8];
#endif
VP8_COMP *cpi = (VP8_COMP *) ptr;
VP8_COMMON *cm = &cpi->common;
@@ -4972,7 +4977,7 @@ int vp8_get_compressed_data(VP8_PTR ptr, unsigned int *frame_flags, unsigned lon
// adjust frame rates based on timestamps given
if (!cm->refresh_alt_ref_frame)
{
- long long this_duration;
+ int64_t this_duration;
int step = 0;
if (cpi->source->ts_start == cpi->first_time_stamp_ever)
@@ -4982,7 +4987,7 @@ int vp8_get_compressed_data(VP8_PTR ptr, unsigned int *frame_flags, unsigned lon
}
else
{
- long long last_duration;
+ int64_t last_duration;
this_duration = cpi->source->ts_end - cpi->last_end_time_stamp_seen;
last_duration = cpi->last_end_time_stamp_seen
@@ -5158,7 +5163,7 @@ int vp8_get_compressed_data(VP8_PTR ptr, unsigned int *frame_flags, unsigned lon
int y_samples = orig->y_height * orig->y_width ;
int uv_samples = orig->uv_height * orig->uv_width ;
int t_samples = y_samples + 2 * uv_samples;
- long long sq_error;
+ int64_t sq_error;
ye = calc_plane_error(orig->y_buffer, orig->y_stride,
recon->y_buffer, recon->y_stride, orig->y_width, orig->y_height,
diff --git a/vp8/encoder/onyx_int.h b/vp8/encoder/onyx_int.h
index a27822da6..460da21d4 100644
--- a/vp8/encoder/onyx_int.h
+++ b/vp8/encoder/onyx_int.h
@@ -321,10 +321,10 @@ typedef struct VP8_COMP
CODING_CONTEXT coding_context;
// Rate targetting variables
- long long prediction_error;
- long long last_prediction_error;
- long long intra_error;
- long long last_intra_error;
+ int64_t prediction_error;
+ int64_t last_prediction_error;
+ int64_t intra_error;
+ int64_t last_intra_error;
int this_frame_target;
int projected_frame_size;
@@ -347,7 +347,7 @@ typedef struct VP8_COMP
int baseline_gf_interval;
int active_arnr_frames; // <= cpi->oxcf.arnr_max_frames
- INT64 key_frame_count;
+ int64_t key_frame_count;
int prior_key_frame_distance[KEY_FRAME_CONTEXT];
int per_frame_bandwidth; // Current section per frame bandwidth target
int av_per_frame_bandwidth; // Average frame size target for clip
@@ -358,9 +358,9 @@ typedef struct VP8_COMP
int inter_frame_target;
double output_frame_rate;
- long long last_time_stamp_seen;
- long long last_end_time_stamp_seen;
- long long first_time_stamp_ever;
+ int64_t last_time_stamp_seen;
+ int64_t last_end_time_stamp_seen;
+ int64_t first_time_stamp_ever;
int ni_av_qi;
int ni_tot_qi;
@@ -371,7 +371,7 @@ typedef struct VP8_COMP
int zbin_mode_boost;
int zbin_mode_boost_enabled;
- INT64 total_byte_count;
+ int64_t total_byte_count;
int buffered_mode;
@@ -384,7 +384,7 @@ typedef struct VP8_COMP
int long_rolling_target_bits;
int long_rolling_actual_bits;
- long long total_actual_bits;
+ int64_t total_actual_bits;
int total_target_vs_actual; // debug stats
int worst_quality;
@@ -541,8 +541,8 @@ typedef struct VP8_COMP
FIRSTPASS_STATS *this_frame_stats;
FIRSTPASS_STATS *stats_in, *stats_in_end, *stats_in_start;
int first_pass_done;
- long long bits_left;
- long long clip_bits_total;
+ int64_t bits_left;
+ int64_t clip_bits_total;
double avg_iiratio;
double modified_error_total;
double modified_error_used;
@@ -562,10 +562,10 @@ typedef struct VP8_COMP
int gf_group_error_left; // Remaining error from uncoded frames in a gf group. Two pass use only
// Projected total bits available for a key frame group of frames
- long long kf_group_bits;
+ int64_t kf_group_bits;
// Error score of frames still to be coded in kf group
- long long kf_group_error_left;
+ int64_t kf_group_error_left;
int gf_group_bits; // Projected Bits available for a group of frames including 1 GF or ARF
int gf_bits; // Bits for the golden frame or ARF - 2 pass only
diff --git a/vp8/encoder/pickinter.c b/vp8/encoder/pickinter.c
index 725e44e62..9cdaf7d53 100644
--- a/vp8/encoder/pickinter.c
+++ b/vp8/encoder/pickinter.c
@@ -669,8 +669,6 @@ void vp8_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset,
mvp_full.as_mv.col = mvp.as_mv.col>>3;
mvp_full.as_mv.row = mvp.as_mv.row>>3;
- /* adjust mvp to make sure it is within MV range */
- vp8_clamp_mv(&mvp_full, col_min, col_max, row_min, row_max);
}else
{
mvp.as_int = best_ref_mv.as_int;
diff --git a/vp8/encoder/picklpf.c b/vp8/encoder/picklpf.c
index 49de62d7d..beefe8d8e 100644
--- a/vp8/encoder/picklpf.c
+++ b/vp8/encoder/picklpf.c
@@ -161,7 +161,7 @@ void vp8cx_pick_filter_level_fast(YV12_BUFFER_CONFIG *sd, VP8_COMP *cpi)
if (cm->sharpness_level != cm->last_sharpness_level)
{
vp8_loop_filter_update_sharpness(&cm->lf_info, cm->sharpness_level);
- cm->last_sharpness_level = cm->last_sharpness_level;
+ cm->last_sharpness_level = cm->sharpness_level;
}
// Start the search at the previous frame filter level unless it is now out of range.
diff --git a/vp8/encoder/ratectrl.c b/vp8/encoder/ratectrl.c
index dea690b13..69066e5ab 100644
--- a/vp8/encoder/ratectrl.c
+++ b/vp8/encoder/ratectrl.c
@@ -897,9 +897,9 @@ static void calc_pframe_target_size(VP8_COMP *cpi)
if ( critical_buffer_level >
(cpi->oxcf.optimal_buffer_level >> 2) )
{
- INT64 qadjustment_range =
+ int64_t qadjustment_range =
cpi->worst_quality - cpi->ni_av_qi;
- INT64 above_base =
+ int64_t above_base =
(critical_buffer_level -
(cpi->oxcf.optimal_buffer_level >> 2));
@@ -1163,7 +1163,7 @@ static void calc_pframe_target_size(VP8_COMP *cpi)
if (cpi->pass==0
&& cpi->common.refresh_golden_frame
&& cpi->oxcf.end_usage == USAGE_STREAM_FROM_SERVER) {
- long long adjust;
+ int64_t adjust;
/*
frames_in_buffer = cpi->oxcf.maximum_buffer_size
diff --git a/vp8/encoder/rdopt.c b/vp8/encoder/rdopt.c
index 3d77bb35a..952977094 100644
--- a/vp8/encoder/rdopt.c
+++ b/vp8/encoder/rdopt.c
@@ -728,7 +728,7 @@ static int rd_pick_intra4x4mby_modes(VP8_COMP *cpi, MACROBLOCK *mb, int *Rate,
int cost = mb->mbmode_cost [xd->frame_type] [B_PRED];
int distortion = 0;
int tot_rate_y = 0;
- long long total_rd = 0;
+ int64_t total_rd = 0;
ENTROPY_CONTEXT_PLANES t_above, t_left;
ENTROPY_CONTEXT *ta;
ENTROPY_CONTEXT *tl;
@@ -770,11 +770,11 @@ static int rd_pick_intra4x4mby_modes(VP8_COMP *cpi, MACROBLOCK *mb, int *Rate,
mic->bmi[i].as_mode = best_mode;
- if(total_rd >= (long long)best_rd)
+ if(total_rd >= (int64_t)best_rd)
break;
}
- if(total_rd >= (long long)best_rd)
+ if(total_rd >= (int64_t)best_rd)
return INT_MAX;
*Rate = cost;
@@ -1244,6 +1244,9 @@ static void rd_check_segment(VP8_COMP *cpi, MACROBLOCK *x,
// Should we do a full search (best quality only)
if ((cpi->compressor_speed == 0) && (bestsme >> sseshift) > 4000)
{
+ /* Check if mvp_full is within the range. */
+ vp8_clamp_mv(&mvp_full, x->mv_col_min, x->mv_col_max, x->mv_row_min, x->mv_row_max);
+
thissme = cpi->full_search_sad(x, c, e, &mvp_full,
sadpb, 16, v_fn_ptr,
x->mvcost, bsi->ref_mv);
@@ -2079,9 +2082,6 @@ void vp8_rd_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset, int
mvp_full.as_mv.col = mvp.as_mv.col>>3;
mvp_full.as_mv.row = mvp.as_mv.row>>3;
- /* adjust mvp to make sure it is within MV range */
- vp8_clamp_mv(&mvp_full, col_min, col_max, row_min, row_max);
-
// Get intersection of UMV window and valid MV window to reduce # of checks in diamond search.
if (x->mv_col_min < col_min )
x->mv_col_min = col_min;
diff --git a/vp8/encoder/ssim.c b/vp8/encoder/ssim.c
index b8701e813..fea756f7b 100644
--- a/vp8/encoder/ssim.c
+++ b/vp8/encoder/ssim.c
@@ -73,8 +73,8 @@ void ssim_parms_8x8_c
}
}
-const static long long cc1 = 26634; // (64^2*(.01*255)^2
-const static long long cc2 = 239708; // (64^2*(.03*255)^2
+const static int64_t cc1 = 26634; // (64^2*(.01*255)^2
+const static int64_t cc2 = 239708; // (64^2*(.03*255)^2
static double similarity
(
@@ -86,19 +86,19 @@ static double similarity
int count
)
{
- long long ssim_n, ssim_d;
- long long c1, c2;
+ int64_t ssim_n, ssim_d;
+ int64_t c1, c2;
//scale the constants by number of pixels
c1 = (cc1*count*count)>>12;
c2 = (cc2*count*count)>>12;
- ssim_n = (2*sum_s*sum_r+ c1)*((long long) 2*count*sum_sxr-
- (long long) 2*sum_s*sum_r+c2);
+ ssim_n = (2*sum_s*sum_r+ c1)*((int64_t) 2*count*sum_sxr-
+ (int64_t) 2*sum_s*sum_r+c2);
ssim_d = (sum_s*sum_s +sum_r*sum_r+c1)*
- ((long long)count*sum_sq_s-(long long)sum_s*sum_s +
- (long long)count*sum_sq_r-(long long) sum_r*sum_r +c2) ;
+ ((int64_t)count*sum_sq_s-(int64_t)sum_s*sum_s +
+ (int64_t)count*sum_sq_r-(int64_t) sum_r*sum_r +c2) ;
return ssim_n * 1.0 / ssim_d;
}
@@ -124,11 +124,11 @@ long dssim(unsigned char *s,int sp, unsigned char *r,int rp,
const vp8_variance_rtcd_vtable_t *rtcd)
{
unsigned long sum_s=0,sum_r=0,sum_sq_s=0,sum_sq_r=0,sum_sxr=0;
- long long ssim3;
- long long ssim_n1,ssim_n2;
- long long ssim_d1,ssim_d2;
- long long ssim_t1,ssim_t2;
- long long c1, c2;
+ int64_t ssim3;
+ int64_t ssim_n1,ssim_n2;
+ int64_t ssim_d1,ssim_d2;
+ int64_t ssim_t1,ssim_t2;
+ int64_t c1, c2;
// normalize by 256/64
c1 = cc1*16;
@@ -137,12 +137,12 @@ long dssim(unsigned char *s,int sp, unsigned char *r,int rp,
rtcd->ssimpf(s, sp, r, rp, &sum_s, &sum_r, &sum_sq_s, &sum_sq_r, &sum_sxr);
ssim_n1 = (2*sum_s*sum_r+ c1);
- ssim_n2 =((long long) 2*256*sum_sxr-(long long) 2*sum_s*sum_r+c2);
+ ssim_n2 =((int64_t) 2*256*sum_sxr-(int64_t) 2*sum_s*sum_r+c2);
- ssim_d1 =((long long)sum_s*sum_s +(long long)sum_r*sum_r+c1);
+ ssim_d1 =((int64_t)sum_s*sum_s +(int64_t)sum_r*sum_r+c1);
- ssim_d2 = (256 * (long long) sum_sq_s-(long long) sum_s*sum_s +
- (long long) 256*sum_sq_r-(long long) sum_r*sum_r +c2) ;
+ ssim_d2 = (256 * (int64_t) sum_sq_s-(int64_t) sum_s*sum_s +
+ (int64_t) 256*sum_sq_r-(int64_t) sum_r*sum_r +c2) ;
ssim_t1 = 256 - 256 * ssim_n1 / ssim_d1;
ssim_t2 = 256 - 256 * ssim_n2 / ssim_d2;
diff --git a/vp8/encoder/temporal_filter.c b/vp8/encoder/temporal_filter.c
index 19913a9b1..b9ade1c6c 100644
--- a/vp8/encoder/temporal_filter.c
+++ b/vp8/encoder/temporal_filter.c
@@ -24,7 +24,6 @@
#include "segmentation.h"
#include "vp8/common/g_common.h"
#include "vpx_scale/yv12extend.h"
-#include "vp8/common/postproc.h"
#include "vpx_mem/vpx_mem.h"
#include "vp8/common/swapyv12buffer.h"
#include "vp8/common/threading.h"
diff --git a/vp8/vp8_common.mk b/vp8/vp8_common.mk
index 91e1e6cf8..1a883b8ab 100644
--- a/vp8/vp8_common.mk
+++ b/vp8/vp8_common.mk
@@ -15,6 +15,7 @@ VP8_COMMON_SRCS-yes += common/ppflags.h
VP8_COMMON_SRCS-yes += common/onyx.h
VP8_COMMON_SRCS-yes += common/onyxd.h
VP8_COMMON_SRCS-yes += common/alloccommon.c
+VP8_COMMON_SRCS-yes += common/asm_com_offsets.c
VP8_COMMON_SRCS-yes += common/blockd.c
VP8_COMMON_SRCS-yes += common/coefupdateprobs.h
VP8_COMMON_SRCS-yes += common/debugmodes.c
@@ -101,19 +102,21 @@ VP8_COMMON_SRCS-$(HAVE_SSE2) += common/x86/postproc_sse2.asm
endif
# common (c)
-VP8_COMMON_SRCS-$(ARCH_ARM) += common/asm_com_offsets.c
ifeq ($(CONFIG_CSM),yes)
VP8_COMMON_SRCS-yes += common/maskingmv.c
VP8_COMMON_SRCS-$(HAVE_SSE3) += common/x86/mask_sse3.asm
endif
VP8_COMMON_SRCS-$(ARCH_ARM) += common/arm/arm_systemdependent.c
-
VP8_COMMON_SRCS-$(ARCH_ARM) += common/arm/bilinearfilter_arm.c
VP8_COMMON_SRCS-$(ARCH_ARM) += common/arm/bilinearfilter_arm.h
VP8_COMMON_SRCS-$(ARCH_ARM) += common/arm/filter_arm.c
+VP8_COMMON_SRCS-$(ARCH_ARM) += common/arm/idct_arm.h
VP8_COMMON_SRCS-$(ARCH_ARM) += common/arm/loopfilter_arm.c
+VP8_COMMON_SRCS-$(ARCH_ARM) += common/arm/loopfilter_arm.h
+VP8_COMMON_SRCS-$(ARCH_ARM) += common/arm/recon_arm.h
VP8_COMMON_SRCS-$(ARCH_ARM) += common/arm/reconintra_arm.c
+VP8_COMMON_SRCS-$(ARCH_ARM) += common/arm/subpixel_arm.h
# common (armv6)
VP8_COMMON_SRCS-$(HAVE_ARMV6) += common/arm/armv6/bilinearfilter_v6$(ASM)
diff --git a/vp8/vp8_cx_iface.c b/vp8/vp8_cx_iface.c
index 199678817..9f703a3c4 100644
--- a/vp8/vp8_cx_iface.c
+++ b/vp8/vp8_cx_iface.c
@@ -40,6 +40,7 @@ struct vp8_extracfg
unsigned int experimental;
vp8e_tuning tuning;
unsigned int cq_level; /* constrained quality level */
+ unsigned int rc_max_intra_bitrate_pct;
};
@@ -73,6 +74,7 @@ static const struct extraconfig_map extracfg_map[] =
0, /* experimental mode */
0, /* tuning*/
10, /* cq_level */
+ 0, /* rc_max_intra_bitrate_pct */
}
}
};
@@ -308,7 +310,7 @@ static vpx_codec_err_t set_vp8e_config(VP8_CONFIG *oxcf,
}
oxcf->target_bandwidth = cfg.rc_target_bitrate;
- oxcf->rc_max_intra_bitrate_pct = cfg.rc_max_intra_bitrate_pct;
+ oxcf->rc_max_intra_bitrate_pct = vp8_cfg.rc_max_intra_bitrate_pct;
oxcf->best_allowed_q = cfg.rc_min_quantizer;
oxcf->worst_allowed_q = cfg.rc_max_quantizer;
@@ -465,6 +467,7 @@ static vpx_codec_err_t set_param(vpx_codec_alg_priv_t *ctx,
MAP(VP8E_SET_ARNR_TYPE , xcfg.arnr_type);
MAP(VP8E_SET_TUNING, xcfg.tuning);
MAP(VP8E_SET_CQ_LEVEL, xcfg.cq_level);
+ MAP(VP8E_SET_MAX_INTRA_BITRATE_PCT, xcfg.rc_max_intra_bitrate_pct);
}
@@ -745,7 +748,7 @@ static vpx_codec_err_t vp8e_encode(vpx_codec_alg_priv_t *ctx,
{
unsigned int lib_flags;
YV12_BUFFER_CONFIG sd;
- INT64 dst_time_stamp, dst_end_time_stamp;
+ int64_t dst_time_stamp, dst_end_time_stamp;
unsigned long size, cx_data_sz;
unsigned char *cx_data;
@@ -1107,6 +1110,7 @@ static vpx_codec_ctrl_fn_map_t vp8e_ctf_maps[] =
{VP8E_SET_ARNR_TYPE , set_param},
{VP8E_SET_TUNING, set_param},
{VP8E_SET_CQ_LEVEL, set_param},
+ {VP8E_SET_MAX_INTRA_BITRATE_PCT, set_param},
{ -1, NULL},
};
@@ -1139,7 +1143,6 @@ static vpx_codec_enc_cfg_map_t vp8e_usage_cfg_map[] =
{0}, /* rc_twopass_stats_in */
#endif
256, /* rc_target_bandwidth */
- 0, /* rc_max_intra_bitrate_pct */
4, /* rc_min_quantizer */
63, /* rc_max_quantizer */
100, /* rc_undershoot_pct */
diff --git a/vp8/vp8_dx_iface.c b/vp8/vp8_dx_iface.c
index 13a072bff..ad8cd5e95 100644
--- a/vp8/vp8_dx_iface.c
+++ b/vp8/vp8_dx_iface.c
@@ -427,7 +427,7 @@ static vpx_codec_err_t vp8_decode(vpx_codec_alg_priv_t *ctx,
if (!res && ctx->pbi)
{
YV12_BUFFER_CONFIG sd;
- INT64 time_stamp = 0, time_end_stamp = 0;
+ int64_t time_stamp = 0, time_end_stamp = 0;
vp8_ppflags_t flags = {0};
if (ctx->base.init_flags & VPX_CODEC_USE_POSTPROC)
diff --git a/vp8/vp8cx_arm.mk b/vp8/vp8cx_arm.mk
index 599bf4753..e8dbd5d7e 100644
--- a/vp8/vp8cx_arm.mk
+++ b/vp8/vp8cx_arm.mk
@@ -15,9 +15,12 @@
# encoder
VP8_CX_SRCS-$(ARCH_ARM) += encoder/arm/arm_csystemdependent.c
-VP8_CX_SRCS-$(ARCH_ARM) += encoder/arm/quantize_arm.c
-VP8_CX_SRCS-$(ARCH_ARM) += encoder/arm/picklpf_arm.c
VP8_CX_SRCS-$(ARCH_ARM) += encoder/arm/dct_arm.c
+VP8_CX_SRCS-$(ARCH_ARM) += encoder/arm/dct_arm.h
+VP8_CX_SRCS-$(ARCH_ARM) += encoder/arm/encodemb_arm.h
+VP8_CX_SRCS-$(ARCH_ARM) += encoder/arm/picklpf_arm.c
+VP8_CX_SRCS-$(ARCH_ARM) += encoder/arm/quantize_arm.c
+VP8_CX_SRCS-$(ARCH_ARM) += encoder/arm/quantize_arm.h
VP8_CX_SRCS-$(ARCH_ARM) += encoder/arm/variance_arm.c
VP8_CX_SRCS-$(ARCH_ARM) += encoder/arm/variance_arm.h
diff --git a/vp8/vp8dx.mk b/vp8/vp8dx.mk
index 85d6f513e..d88b595fb 100644
--- a/vp8/vp8dx.mk
+++ b/vp8/vp8dx.mk
@@ -48,6 +48,7 @@ VP8_DX_SRCS-yes += vp8_dx_iface.c
#INCLUDES += common
#INCLUDES += decoder
+VP8_DX_SRCS-yes += decoder/asm_dec_offsets.c
VP8_DX_SRCS-yes += decoder/dboolhuff.c
VP8_DX_SRCS-yes += decoder/decodemv.c
VP8_DX_SRCS-yes += decoder/decodframe.c
diff --git a/vp8/vp8dx_arm.mk b/vp8/vp8dx_arm.mk
index 583562b97..6bde42f4c 100644
--- a/vp8/vp8dx_arm.mk
+++ b/vp8/vp8dx_arm.mk
@@ -12,9 +12,8 @@
#VP8_DX_SRCS list is modified according to different platforms.
VP8_DX_SRCS-$(ARCH_ARM) += decoder/arm/arm_dsystemdependent.c
-VP8_CX_SRCS-$(ARCH_ARM) += decoder/asm_dec_offsets.c
-
VP8_DX_SRCS-$(ARCH_ARM) += decoder/arm/dequantize_arm.c
+VP8_DX_SRCS-$(ARCH_ARM) += decoder/arm/dequantize_arm.h
#File list for armv6
VP8_DX_SRCS-$(HAVE_ARMV6) += decoder/arm/armv6/dequant_dc_idct_v6$(ASM)