summaryrefslogtreecommitdiff
path: root/vp9/encoder/vp9_encodeframe.c
diff options
context:
space:
mode:
authorJohn Koleszar <jkoleszar@google.com>2013-02-24 20:55:14 -0800
committerJohn Koleszar <jkoleszar@google.com>2013-02-26 23:54:23 -0800
commiteb939f45b8ffde47e160d545114d68ddd3606b90 (patch)
treebfbd390adf5895b76d63fb7813b91fa77694f1c1 /vp9/encoder/vp9_encodeframe.c
parentc7805395fd4aab2abadce9d34aaf07853cfc5e6d (diff)
downloadlibvpx-eb939f45b8ffde47e160d545114d68ddd3606b90.tar
libvpx-eb939f45b8ffde47e160d545114d68ddd3606b90.tar.gz
libvpx-eb939f45b8ffde47e160d545114d68ddd3606b90.tar.bz2
libvpx-eb939f45b8ffde47e160d545114d68ddd3606b90.zip
Spatial resamping of ZEROMV predictors
This patch allows coding frames using references of different resolution, in ZEROMV mode. For compound prediction, either reference may be scaled. To test, I use the resize_test and enable WRITE_RECON_BUFFER in vp9_onyxd_if.c. It's also useful to apply this patch to test/i420_video_source.h: --- a/test/i420_video_source.h +++ b/test/i420_video_source.h @@ -93,6 +93,7 @@ class I420VideoSource : public VideoSource { virtual void FillFrame() { // Read a frame from input_file. + if (frame_ != 3) if (fread(img_->img_data, raw_sz_, 1, input_file_) == 0) { limit_ = frame_; } This forces the frame that the resolution changes on to be coded with no motion, only scaling, and improves the quality of the result. Change-Id: I1ee75d19a437ff801192f767fd02a36bcbd1d496
Diffstat (limited to 'vp9/encoder/vp9_encodeframe.c')
-rw-r--r--vp9/encoder/vp9_encodeframe.c36
1 files changed, 21 insertions, 15 deletions
diff --git a/vp9/encoder/vp9_encodeframe.c b/vp9/encoder/vp9_encodeframe.c
index fec5a7c61..eaed1a964 100644
--- a/vp9/encoder/vp9_encodeframe.c
+++ b/vp9/encoder/vp9_encodeframe.c
@@ -654,7 +654,7 @@ static void set_offsets(VP9_COMP *cpi,
// Set up destination pointers
setup_pred_block(&xd->dst,
&cm->yv12_fb[dst_fb_idx],
- mb_row, mb_col);
+ mb_row, mb_col, NULL, NULL);
/* Set up limit values for MV components to prevent them from
* extending beyond the UMV borders assuming 16x16 block size */
@@ -679,7 +679,7 @@ static void set_offsets(VP9_COMP *cpi,
xd->right_available = (mb_col + block_size < cm->cur_tile_mb_col_end);
/* set up source buffers */
- setup_pred_block(&x->src, cpi->Source, mb_row, mb_col);
+ setup_pred_block(&x->src, cpi->Source, mb_row, mb_col, NULL, NULL);
/* R/D setup */
x->rddiv = cpi->RDDIV;
@@ -1272,9 +1272,6 @@ static void encode_frame_internal(VP9_COMP *cpi) {
totalrate = 0;
- // Functions setup for all frame types so we can use MC in AltRef
- vp9_setup_interp_filters(xd, cm->mcomp_filter_type, cm);
-
// Reset frame count of inter 0,0 motion vector usage.
cpi->inter_zz_count = 0;
@@ -2100,7 +2097,8 @@ static void encode_macroblock(VP9_COMP *cpi, TOKENEXTRA **t,
setup_pred_block(&xd->pre,
&cpi->common.yv12_fb[ref_fb_idx],
- mb_row, mb_col);
+ mb_row, mb_col,
+ &xd->scale_factor[0], &xd->scale_factor_uv[0]);
if (mbmi->second_ref_frame > 0) {
int second_ref_fb_idx;
@@ -2114,11 +2112,12 @@ static void encode_macroblock(VP9_COMP *cpi, TOKENEXTRA **t,
setup_pred_block(&xd->second_pre,
&cpi->common.yv12_fb[second_ref_fb_idx],
- mb_row, mb_col);
+ mb_row, mb_col,
+ &xd->scale_factor[1], &xd->scale_factor_uv[1]);
}
if (!x->skip) {
- vp9_encode_inter16x16(x);
+ vp9_encode_inter16x16(x, mb_row, mb_col);
// Clear mb_skip_coeff if mb_no_coeff_skip is not set
if (!cpi->common.mb_no_coeff_skip)
@@ -2130,7 +2129,8 @@ static void encode_macroblock(VP9_COMP *cpi, TOKENEXTRA **t,
xd->dst.u_buffer,
xd->dst.v_buffer,
xd->dst.y_stride,
- xd->dst.uv_stride);
+ xd->dst.uv_stride,
+ mb_row, mb_col);
#if CONFIG_COMP_INTERINTRA_PRED
if (xd->mode_info_context->mbmi.second_ref_frame == INTRA_FRAME) {
vp9_build_interintra_16x16_predictors_mb(xd,
@@ -2327,7 +2327,8 @@ static void encode_superblock32(VP9_COMP *cpi, TOKENEXTRA **t,
setup_pred_block(&xd->pre,
&cpi->common.yv12_fb[ref_fb_idx],
- mb_row, mb_col);
+ mb_row, mb_col,
+ &xd->scale_factor[0], &xd->scale_factor_uv[0]);
if (xd->mode_info_context->mbmi.second_ref_frame > 0) {
int second_ref_fb_idx;
@@ -2341,12 +2342,14 @@ static void encode_superblock32(VP9_COMP *cpi, TOKENEXTRA **t,
setup_pred_block(&xd->second_pre,
&cpi->common.yv12_fb[second_ref_fb_idx],
- mb_row, mb_col);
+ mb_row, mb_col,
+ &xd->scale_factor[1], &xd->scale_factor_uv[1]);
}
vp9_build_inter32x32_predictors_sb(xd, xd->dst.y_buffer,
xd->dst.u_buffer, xd->dst.v_buffer,
- xd->dst.y_stride, xd->dst.uv_stride);
+ xd->dst.y_stride, xd->dst.uv_stride,
+ mb_row, mb_col);
}
if (xd->mode_info_context->mbmi.txfm_size == TX_32X32) {
@@ -2553,7 +2556,8 @@ static void encode_superblock64(VP9_COMP *cpi, TOKENEXTRA **t,
setup_pred_block(&xd->pre,
&cpi->common.yv12_fb[ref_fb_idx],
- mb_row, mb_col);
+ mb_row, mb_col,
+ &xd->scale_factor[0], &xd->scale_factor_uv[0]);
if (xd->mode_info_context->mbmi.second_ref_frame > 0) {
int second_ref_fb_idx;
@@ -2567,12 +2571,14 @@ static void encode_superblock64(VP9_COMP *cpi, TOKENEXTRA **t,
setup_pred_block(&xd->second_pre,
&cpi->common.yv12_fb[second_ref_fb_idx],
- mb_row, mb_col);
+ mb_row, mb_col,
+ &xd->scale_factor[1], &xd->scale_factor_uv[1]);
}
vp9_build_inter64x64_predictors_sb(xd, xd->dst.y_buffer,
xd->dst.u_buffer, xd->dst.v_buffer,
- xd->dst.y_stride, xd->dst.uv_stride);
+ xd->dst.y_stride, xd->dst.uv_stride,
+ mb_row, mb_col);
}
if (xd->mode_info_context->mbmi.txfm_size == TX_32X32) {