summaryrefslogtreecommitdiff
path: root/vp9/common
diff options
context:
space:
mode:
Diffstat (limited to 'vp9/common')
-rw-r--r--vp9/common/vp9_alloccommon.c112
-rw-r--r--vp9/common/vp9_alloccommon.h2
-rw-r--r--vp9/common/vp9_entropymode.c8
-rw-r--r--vp9/common/vp9_mvref_common.c15
-rw-r--r--vp9/common/vp9_mvref_common.h4
-rw-r--r--vp9/common/vp9_onyxc_int.h80
-rw-r--r--vp9/common/vp9_reconinter.c349
-rw-r--r--vp9/common/vp9_reconinter.h34
8 files changed, 239 insertions, 365 deletions
diff --git a/vp9/common/vp9_alloccommon.c b/vp9/common/vp9_alloccommon.c
index 2f75af575..8b04d1b43 100644
--- a/vp9/common/vp9_alloccommon.c
+++ b/vp9/common/vp9_alloccommon.c
@@ -17,6 +17,24 @@
#include "vp9/common/vp9_onyxc_int.h"
#include "vp9/common/vp9_systemdependent.h"
+// TODO(hkuang): Don't need to lock the whole pool after implementing atomic
+// frame reference count.
+void lock_buffer_pool(BufferPool *const pool) {
+#if CONFIG_MULTITHREAD
+ pthread_mutex_lock(&pool->pool_mutex);
+#else
+ (void)pool;
+#endif
+}
+
+void unlock_buffer_pool(BufferPool *const pool) {
+#if CONFIG_MULTITHREAD
+ pthread_mutex_unlock(&pool->pool_mutex);
+#else
+ (void)pool;
+#endif
+}
+
void vp9_set_mb_mi(VP9_COMMON *cm, int width, int height) {
const int aligned_width = ALIGN_POWER_OF_TWO(width, MI_SIZE_LOG2);
const int aligned_height = ALIGN_POWER_OF_TWO(height, MI_SIZE_LOG2);
@@ -30,18 +48,54 @@ void vp9_set_mb_mi(VP9_COMMON *cm, int width, int height) {
cm->MBs = cm->mb_rows * cm->mb_cols;
}
+static int alloc_seg_map(VP9_COMMON *cm, int seg_map_size) {
+ int i;
+
+ for (i = 0; i < NUM_PING_PONG_BUFFERS; ++i) {
+ cm->seg_map_array[i] = (uint8_t *)vpx_calloc(seg_map_size, 1);
+ if (cm->seg_map_array[i] == NULL)
+ return 1;
+ }
+
+ // Init the index.
+ cm->seg_map_idx = 0;
+ cm->prev_seg_map_idx = 1;
+
+ cm->current_frame_seg_map = cm->seg_map_array[cm->seg_map_idx];
+ if (!cm->frame_parallel_decode)
+ cm->last_frame_seg_map = cm->seg_map_array[cm->prev_seg_map_idx];
+
+ return 0;
+}
+
+static void free_seg_map(VP9_COMMON *cm) {
+ int i;
+
+ for (i = 0; i < NUM_PING_PONG_BUFFERS; ++i) {
+ vpx_free(cm->seg_map_array[i]);
+ cm->seg_map_array[i] = NULL;
+ }
+
+ cm->current_frame_seg_map = NULL;
+
+ if (!cm->frame_parallel_decode) {
+ cm->last_frame_seg_map = NULL;
+ }
+}
+
void vp9_free_ref_frame_buffers(VP9_COMMON *cm) {
+ BufferPool *const pool = cm->buffer_pool;
int i;
for (i = 0; i < FRAME_BUFFERS; ++i) {
- if (cm->frame_bufs[i].ref_count > 0 &&
- cm->frame_bufs[i].raw_frame_buffer.data != NULL) {
- cm->release_fb_cb(cm->cb_priv, &cm->frame_bufs[i].raw_frame_buffer);
- cm->frame_bufs[i].ref_count = 0;
+ if (pool->frame_bufs[i].ref_count > 0 &&
+ pool->frame_bufs[i].raw_frame_buffer.data != NULL) {
+ pool->release_fb_cb(pool->cb_priv, &pool->frame_bufs[i].raw_frame_buffer);
+ pool->frame_bufs[i].ref_count = 0;
}
- vpx_free(cm->frame_bufs[i].mvs);
- cm->frame_bufs[i].mvs = NULL;
- vp9_free_frame_buffer(&cm->frame_bufs[i].buf);
+ vpx_free(pool->frame_bufs[i].mvs);
+ pool->frame_bufs[i].mvs = NULL;
+ vp9_free_frame_buffer(&pool->frame_bufs[i].buf);
}
#if CONFIG_VP9_POSTPROC
@@ -52,8 +106,7 @@ void vp9_free_ref_frame_buffers(VP9_COMMON *cm) {
void vp9_free_context_buffers(VP9_COMMON *cm) {
cm->free_mi(cm);
- vpx_free(cm->last_frame_seg_map);
- cm->last_frame_seg_map = NULL;
+ free_seg_map(cm);
vpx_free(cm->above_context);
cm->above_context = NULL;
vpx_free(cm->above_seg_context);
@@ -67,8 +120,10 @@ int vp9_alloc_context_buffers(VP9_COMMON *cm, int width, int height) {
if (cm->alloc_mi(cm, cm->mi_stride * calc_mi_size(cm->mi_rows)))
goto fail;
- cm->last_frame_seg_map = (uint8_t *)vpx_calloc(cm->mi_rows * cm->mi_cols, 1);
- if (!cm->last_frame_seg_map) goto fail;
+ // Create the segmentation map structure and set to 0.
+ free_seg_map(cm);
+ if (alloc_seg_map(cm, cm->mi_rows * cm->mi_cols))
+ goto fail;
cm->above_context = (ENTROPY_CONTEXT *)vpx_calloc(
2 * mi_cols_aligned_to_sb(cm->mi_cols) * MAX_MB_PLANE,
@@ -87,14 +142,15 @@ int vp9_alloc_context_buffers(VP9_COMMON *cm, int width, int height) {
}
static void init_frame_bufs(VP9_COMMON *cm) {
+ BufferPool *const pool = cm->buffer_pool;
int i;
cm->new_fb_idx = FRAME_BUFFERS - 1;
- cm->frame_bufs[cm->new_fb_idx].ref_count = 1;
+ pool->frame_bufs[cm->new_fb_idx].ref_count = 1;
for (i = 0; i < REF_FRAMES; ++i) {
cm->ref_frame_map[i] = i;
- cm->frame_bufs[i].ref_count = 1;
+ pool->frame_bufs[i].ref_count = 1;
}
}
@@ -106,8 +162,9 @@ int vp9_alloc_ref_frame_buffers(VP9_COMMON *cm, int width, int height) {
vp9_free_ref_frame_buffers(cm);
for (i = 0; i < FRAME_BUFFERS; ++i) {
- cm->frame_bufs[i].ref_count = 0;
- if (vp9_alloc_frame_buffer(&cm->frame_bufs[i].buf, width, height,
+ BufferPool *const pool = cm->buffer_pool;
+ pool->frame_bufs[i].ref_count = 0;
+ if (vp9_alloc_frame_buffer(&pool->frame_bufs[i].buf, width, height,
ss_x, ss_y,
#if CONFIG_VP9_HIGHBITDEPTH
cm->use_highbitdepth,
@@ -115,15 +172,15 @@ int vp9_alloc_ref_frame_buffers(VP9_COMMON *cm, int width, int height) {
VP9_ENC_BORDER_IN_PIXELS,
cm->byte_alignment) < 0)
goto fail;
- if (cm->frame_bufs[i].mvs == NULL) {
- cm->frame_bufs[i].mvs =
+ if (pool->frame_bufs[i].mvs == NULL) {
+ pool->frame_bufs[i].mvs =
(MV_REF *)vpx_calloc(cm->mi_rows * cm->mi_cols,
- sizeof(*cm->frame_bufs[i].mvs));
- if (cm->frame_bufs[i].mvs == NULL)
+ sizeof(*pool->frame_bufs[i].mvs));
+ if (pool->frame_bufs[i].mvs == NULL)
goto fail;
- cm->frame_bufs[i].mi_rows = cm->mi_rows;
- cm->frame_bufs[i].mi_cols = cm->mi_cols;
+ pool->frame_bufs[i].mi_rows = cm->mi_rows;
+ pool->frame_bufs[i].mi_cols = cm->mi_cols;
}
}
@@ -149,7 +206,6 @@ int vp9_alloc_ref_frame_buffers(VP9_COMMON *cm, int width, int height) {
void vp9_remove_common(VP9_COMMON *cm) {
vp9_free_ref_frame_buffers(cm);
vp9_free_context_buffers(cm);
- vp9_free_internal_frame_buffers(&cm->int_frame_buffers);
vpx_free(cm->fc);
cm->fc = NULL;
@@ -159,6 +215,16 @@ void vp9_remove_common(VP9_COMMON *cm) {
void vp9_init_context_buffers(VP9_COMMON *cm) {
cm->setup_mi(cm);
- if (cm->last_frame_seg_map)
+ if (cm->last_frame_seg_map && !cm->frame_parallel_decode)
vpx_memset(cm->last_frame_seg_map, 0, cm->mi_rows * cm->mi_cols);
}
+
+void vp9_swap_current_and_last_seg_map(VP9_COMMON *cm) {
+ // Swap indices.
+ const int tmp = cm->seg_map_idx;
+ cm->seg_map_idx = cm->prev_seg_map_idx;
+ cm->prev_seg_map_idx = tmp;
+
+ cm->current_frame_seg_map = cm->seg_map_array[cm->seg_map_idx];
+ cm->last_frame_seg_map = cm->seg_map_array[cm->prev_seg_map_idx];
+}
diff --git a/vp9/common/vp9_alloccommon.h b/vp9/common/vp9_alloccommon.h
index 955bb9ec5..09da74e49 100644
--- a/vp9/common/vp9_alloccommon.h
+++ b/vp9/common/vp9_alloccommon.h
@@ -32,6 +32,8 @@ void vp9_free_state_buffers(struct VP9Common *cm);
void vp9_set_mb_mi(struct VP9Common *cm, int width, int height);
+void vp9_swap_current_and_last_seg_map(struct VP9Common *cm);
+
#ifdef __cplusplus
} // extern "C"
#endif
diff --git a/vp9/common/vp9_entropymode.c b/vp9/common/vp9_entropymode.c
index 47e5164d7..7ba078b2b 100644
--- a/vp9/common/vp9_entropymode.c
+++ b/vp9/common/vp9_entropymode.c
@@ -428,9 +428,13 @@ void vp9_setup_past_independence(VP9_COMMON *cm) {
int i;
vp9_clearall_segfeatures(&cm->seg);
cm->seg.abs_delta = SEGMENT_DELTADATA;
- if (cm->last_frame_seg_map)
+
+ if (cm->last_frame_seg_map && !cm->frame_parallel_decode)
vpx_memset(cm->last_frame_seg_map, 0, (cm->mi_rows * cm->mi_cols));
+ if (cm->current_frame_seg_map)
+ vpx_memset(cm->current_frame_seg_map, 0, (cm->mi_rows * cm->mi_cols));
+
// Reset the mode ref deltas for loop filter
vp9_zero(lf->last_ref_deltas);
vp9_zero(lf->last_mode_deltas);
@@ -455,7 +459,7 @@ void vp9_setup_past_independence(VP9_COMMON *cm) {
}
// prev_mip will only be allocated in encoder.
- if (frame_is_intra_only(cm) && cm->prev_mip)
+ if (frame_is_intra_only(cm) && cm->prev_mip && !cm->frame_parallel_decode)
vpx_memset(cm->prev_mip, 0, cm->mi_stride * (cm->mi_rows + 1) *
sizeof(*cm->prev_mip));
diff --git a/vp9/common/vp9_mvref_common.c b/vp9/common/vp9_mvref_common.c
index 2fb070097..51aa82411 100644
--- a/vp9/common/vp9_mvref_common.c
+++ b/vp9/common/vp9_mvref_common.c
@@ -17,7 +17,8 @@ static void find_mv_refs_idx(const VP9_COMMON *cm, const MACROBLOCKD *xd,
const TileInfo *const tile,
MODE_INFO *mi, MV_REFERENCE_FRAME ref_frame,
int_mv *mv_ref_list,
- int block, int mi_row, int mi_col) {
+ int block, int mi_row, int mi_col,
+ find_mv_refs_sync sync, void *const data) {
const int *ref_sign_bias = cm->ref_frame_sign_bias;
int i, refmv_count = 0;
const POSITION *const mv_ref_search = mv_ref_blocks[mi->mbmi.sb_type];
@@ -68,6 +69,11 @@ static void find_mv_refs_idx(const VP9_COMMON *cm, const MACROBLOCKD *xd,
}
}
+ // Synchronize here for frame parallel decode if sync function is provided.
+ if (sync != NULL) {
+ sync(data, mi_row);
+ }
+
// Check the last frame's mode and mv info.
if (cm->use_prev_frame_mvs) {
if (prev_frame_mvs->ref_frame[0] == ref_frame) {
@@ -133,9 +139,10 @@ void vp9_find_mv_refs(const VP9_COMMON *cm, const MACROBLOCKD *xd,
const TileInfo *const tile,
MODE_INFO *mi, MV_REFERENCE_FRAME ref_frame,
int_mv *mv_ref_list,
- int mi_row, int mi_col) {
+ int mi_row, int mi_col,
+ find_mv_refs_sync sync, void *const data) {
find_mv_refs_idx(cm, xd, tile, mi, ref_frame, mv_ref_list, -1,
- mi_row, mi_col);
+ mi_row, mi_col, sync, data);
}
static void lower_mv_precision(MV *mv, int allow_hp) {
@@ -173,7 +180,7 @@ void vp9_append_sub8x8_mvs_for_idx(VP9_COMMON *cm, MACROBLOCKD *xd,
assert(MAX_MV_REF_CANDIDATES == 2);
find_mv_refs_idx(cm, xd, tile, mi, mi->mbmi.ref_frame[ref], mv_list, block,
- mi_row, mi_col);
+ mi_row, mi_col, NULL, NULL);
near_mv->as_int = 0;
switch (block) {
diff --git a/vp9/common/vp9_mvref_common.h b/vp9/common/vp9_mvref_common.h
index 0d4ac3e8e..f1df52146 100644
--- a/vp9/common/vp9_mvref_common.h
+++ b/vp9/common/vp9_mvref_common.h
@@ -207,10 +207,12 @@ static INLINE void clamp_mv2(MV *mv, const MACROBLOCKD *xd) {
xd->mb_to_bottom_edge + RIGHT_BOTTOM_MARGIN);
}
+typedef void (*find_mv_refs_sync)(void *const data, int mi_row);
void vp9_find_mv_refs(const VP9_COMMON *cm, const MACROBLOCKD *xd,
const TileInfo *const tile,
MODE_INFO *mi, MV_REFERENCE_FRAME ref_frame,
- int_mv *mv_ref_list, int mi_row, int mi_col);
+ int_mv *mv_ref_list, int mi_row, int mi_col,
+ find_mv_refs_sync sync, void *const data);
// check a list of motion vectors by sad score using a number rows of pixels
// above and a number cols of pixels in the left to select the one with best
diff --git a/vp9/common/vp9_onyxc_int.h b/vp9/common/vp9_onyxc_int.h
index 1a957bc99..cfb0a98e5 100644
--- a/vp9/common/vp9_onyxc_int.h
+++ b/vp9/common/vp9_onyxc_int.h
@@ -20,6 +20,7 @@
#include "vp9/common/vp9_entropymode.h"
#include "vp9/common/vp9_frame_buffers.h"
#include "vp9/common/vp9_quant_common.h"
+#include "vp9/common/vp9_thread.h"
#include "vp9/common/vp9_tile_common.h"
#if CONFIG_VP9_POSTPROC
@@ -35,14 +36,19 @@ extern "C" {
#define REF_FRAMES_LOG2 3
#define REF_FRAMES (1 << REF_FRAMES_LOG2)
-// 1 scratch frame for the new frame, 3 for scaled references on the encoder
+// 4 scratch frames for the new frames to support a maximum of 4 cores decoding
+// in parallel, 3 for scaled references on the encoder.
+// TODO(hkuang): Add ondemand frame buffers instead of hardcoding the number
+// of framebuffers.
// TODO(jkoleszar): These 3 extra references could probably come from the
// normal reference pool.
-#define FRAME_BUFFERS (REF_FRAMES + 4)
+#define FRAME_BUFFERS (REF_FRAMES + 7)
#define FRAME_CONTEXTS_LOG2 2
#define FRAME_CONTEXTS (1 << FRAME_CONTEXTS_LOG2)
+#define NUM_PING_PONG_BUFFERS 2
+
extern const struct {
PARTITION_CONTEXT above;
PARTITION_CONTEXT left;
@@ -68,8 +74,40 @@ typedef struct {
int mi_cols;
vpx_codec_frame_buffer_t raw_frame_buffer;
YV12_BUFFER_CONFIG buf;
+
+ // The Following variables will only be used in frame parallel decode.
+
+ // frame_worker_owner indicates which FrameWorker owns this buffer. NULL means
+ // that no FrameWorker owns, or is decoding, this buffer.
+ VP9Worker *frame_worker_owner;
+
+ // row and col indicate which position frame has been decoded to in real
+ // pixel unit. They are reset to -1 when decoding begins and set to INT_MAX
+ // when the frame is fully decoded.
+ int row;
+ int col;
} RefCntBuffer;
+typedef struct {
+ // Protect BufferPool from being accessed by several FrameWorkers at
+ // the same time during frame parallel decode.
+ // TODO(hkuang): Try to use atomic variable instead of locking the whole pool.
+#if CONFIG_MULTITHREAD
+ pthread_mutex_t pool_mutex;
+#endif
+
+ // Private data associated with the frame buffer callbacks.
+ void *cb_priv;
+
+ vpx_get_frame_buffer_cb_fn_t get_fb_cb;
+ vpx_release_frame_buffer_cb_fn_t release_fb_cb;
+
+ RefCntBuffer frame_bufs[FRAME_BUFFERS];
+
+ // Frame buffers allocated internally by the codec.
+ InternalFrameBufferList int_frame_buffers;
+} BufferPool;
+
typedef struct VP9Common {
struct vpx_internal_error_info error;
@@ -96,7 +134,6 @@ typedef struct VP9Common {
#endif
YV12_BUFFER_CONFIG *frame_to_show;
- RefCntBuffer frame_bufs[FRAME_BUFFERS];
RefCntBuffer *prev_frame;
// TODO(hkuang): Combine this with cur_buf in macroblockd.
@@ -104,6 +141,10 @@ typedef struct VP9Common {
int ref_frame_map[REF_FRAMES]; /* maps fb_idx to reference slot */
+ // Prepare ref_frame_map for the next frame.
+ // Only used in frame parallel decode.
+ int next_ref_frame_map[REF_FRAMES];
+
// TODO(jkoleszar): could expand active_ref_idx to 4, with 0 as intra, and
// roll new_fb_idx into it.
@@ -170,7 +211,12 @@ typedef struct VP9Common {
int use_prev_frame_mvs;
// Persistent mb segment id map used in prediction.
- unsigned char *last_frame_seg_map;
+ int seg_map_idx;
+ int prev_seg_map_idx;
+
+ uint8_t *seg_map_array[NUM_PING_PONG_BUFFERS];
+ uint8_t *last_frame_seg_map;
+ uint8_t *current_frame_seg_map;
INTERP_FILTER interp_filter;
@@ -183,6 +229,10 @@ typedef struct VP9Common {
struct loopfilter lf;
struct segmentation seg;
+ // TODO(hkuang): Remove this as it is the same as frame_parallel_decode
+ // in pbi.
+ int frame_parallel_decode; // frame-based threading.
+
// Context probabilities for reference frame prediction
MV_REFERENCE_FRAME comp_fixed_ref;
MV_REFERENCE_FRAME comp_var_ref[2];
@@ -218,31 +268,43 @@ typedef struct VP9Common {
// Handles memory for the codec.
InternalFrameBufferList int_frame_buffers;
+ // External BufferPool passed from outside.
+ BufferPool *buffer_pool;
+
PARTITION_CONTEXT *above_seg_context;
ENTROPY_CONTEXT *above_context;
} VP9_COMMON;
+// TODO(hkuang): Don't need to lock the whole pool after implementing atomic
+// frame reference count.
+void lock_buffer_pool(BufferPool *const pool);
+void unlock_buffer_pool(BufferPool *const pool);
+
static INLINE YV12_BUFFER_CONFIG *get_ref_frame(VP9_COMMON *cm, int index) {
if (index < 0 || index >= REF_FRAMES)
return NULL;
if (cm->ref_frame_map[index] < 0)
return NULL;
assert(cm->ref_frame_map[index] < FRAME_BUFFERS);
- return &cm->frame_bufs[cm->ref_frame_map[index]].buf;
+ return &cm->buffer_pool->frame_bufs[cm->ref_frame_map[index]].buf;
}
static INLINE YV12_BUFFER_CONFIG *get_frame_new_buffer(VP9_COMMON *cm) {
- return &cm->frame_bufs[cm->new_fb_idx].buf;
+ return &cm->buffer_pool->frame_bufs[cm->new_fb_idx].buf;
}
static INLINE int get_free_fb(VP9_COMMON *cm) {
+ RefCntBuffer *const frame_bufs = cm->buffer_pool->frame_bufs;
int i;
- for (i = 0; i < FRAME_BUFFERS; i++)
- if (cm->frame_bufs[i].ref_count == 0)
+
+ lock_buffer_pool(cm->buffer_pool);
+ for (i = 0; i < FRAME_BUFFERS; ++i)
+ if (frame_bufs[i].ref_count == 0)
break;
assert(i < FRAME_BUFFERS);
- cm->frame_bufs[i].ref_count = 1;
+ frame_bufs[i].ref_count = 1;
+ unlock_buffer_pool(cm->buffer_pool);
return i;
}
diff --git a/vp9/common/vp9_reconinter.c b/vp9/common/vp9_reconinter.c
index 3ba3cb542..ed3ea7e1f 100644
--- a/vp9/common/vp9_reconinter.c
+++ b/vp9/common/vp9_reconinter.c
@@ -20,97 +20,7 @@
#include "vp9/common/vp9_reconinter.h"
#include "vp9/common/vp9_reconintra.h"
-static void build_mc_border(const uint8_t *src, int src_stride,
- uint8_t *dst, int dst_stride,
- int x, int y, int b_w, int b_h, int w, int h) {
- // Get a pointer to the start of the real data for this row.
- const uint8_t *ref_row = src - x - y * src_stride;
-
- if (y >= h)
- ref_row += (h - 1) * src_stride;
- else if (y > 0)
- ref_row += y * src_stride;
-
- do {
- int right = 0, copy;
- int left = x < 0 ? -x : 0;
-
- if (left > b_w)
- left = b_w;
-
- if (x + b_w > w)
- right = x + b_w - w;
-
- if (right > b_w)
- right = b_w;
-
- copy = b_w - left - right;
-
- if (left)
- memset(dst, ref_row[0], left);
-
- if (copy)
- memcpy(dst + left, ref_row + x + left, copy);
-
- if (right)
- memset(dst + left + copy, ref_row[w - 1], right);
-
- dst += dst_stride;
- ++y;
-
- if (y > 0 && y < h)
- ref_row += src_stride;
- } while (--b_h);
-}
-
-#if CONFIG_VP9_HIGHBITDEPTH
-static void high_build_mc_border(const uint8_t *src8, int src_stride,
- uint16_t *dst, int dst_stride,
- int x, int y, int b_w, int b_h,
- int w, int h) {
- // Get a pointer to the start of the real data for this row.
- const uint16_t *src = CONVERT_TO_SHORTPTR(src8);
- const uint16_t *ref_row = src - x - y * src_stride;
-
- if (y >= h)
- ref_row += (h - 1) * src_stride;
- else if (y > 0)
- ref_row += y * src_stride;
-
- do {
- int right = 0, copy;
- int left = x < 0 ? -x : 0;
-
- if (left > b_w)
- left = b_w;
-
- if (x + b_w > w)
- right = x + b_w - w;
-
- if (right > b_w)
- right = b_w;
-
- copy = b_w - left - right;
-
- if (left)
- vpx_memset16(dst, ref_row[0], left);
-
- if (copy)
- memcpy(dst + left, ref_row + x + left, copy * sizeof(uint16_t));
-
- if (right)
- vpx_memset16(dst + left + copy, ref_row[w - 1], right);
-
- dst += dst_stride;
- ++y;
-
- if (y > 0 && y < h)
- ref_row += src_stride;
- } while (--b_h);
-}
-#endif // CONFIG_VP9_HIGHBITDEPTH
-
-static void inter_predictor(const uint8_t *src, int src_stride,
+void inter_predictor(const uint8_t *src, int src_stride,
uint8_t *dst, int dst_stride,
const int subpel_x,
const int subpel_y,
@@ -123,29 +33,8 @@ static void inter_predictor(const uint8_t *src, int src_stride,
kernel[subpel_x], xs, kernel[subpel_y], ys, w, h);
}
-void vp9_build_inter_predictor(const uint8_t *src, int src_stride,
- uint8_t *dst, int dst_stride,
- const MV *src_mv,
- const struct scale_factors *sf,
- int w, int h, int ref,
- const InterpKernel *kernel,
- enum mv_precision precision,
- int x, int y) {
- const int is_q4 = precision == MV_PRECISION_Q4;
- const MV mv_q4 = { is_q4 ? src_mv->row : src_mv->row * 2,
- is_q4 ? src_mv->col : src_mv->col * 2 };
- MV32 mv = vp9_scale_mv(&mv_q4, x, y, sf);
- const int subpel_x = mv.col & SUBPEL_MASK;
- const int subpel_y = mv.row & SUBPEL_MASK;
-
- src += (mv.row >> SUBPEL_BITS) * src_stride + (mv.col >> SUBPEL_BITS);
-
- inter_predictor(src, src_stride, dst, dst_stride, subpel_x, subpel_y,
- sf, w, h, ref, kernel, sf->x_step_q4, sf->y_step_q4);
-}
-
#if CONFIG_VP9_HIGHBITDEPTH
-static void high_inter_predictor(const uint8_t *src, int src_stride,
+void high_inter_predictor(const uint8_t *src, int src_stride,
uint8_t *dst, int dst_stride,
const int subpel_x,
const int subpel_y,
@@ -180,6 +69,27 @@ void vp9_highbd_build_inter_predictor(const uint8_t *src, int src_stride,
}
#endif // CONFIG_VP9_HIGHBITDEPTH
+void vp9_build_inter_predictor(const uint8_t *src, int src_stride,
+ uint8_t *dst, int dst_stride,
+ const MV *src_mv,
+ const struct scale_factors *sf,
+ int w, int h, int ref,
+ const InterpKernel *kernel,
+ enum mv_precision precision,
+ int x, int y) {
+ const int is_q4 = precision == MV_PRECISION_Q4;
+ const MV mv_q4 = { is_q4 ? src_mv->row : src_mv->row * 2,
+ is_q4 ? src_mv->col : src_mv->col * 2 };
+ MV32 mv = vp9_scale_mv(&mv_q4, x, y, sf);
+ const int subpel_x = mv.col & SUBPEL_MASK;
+ const int subpel_y = mv.row & SUBPEL_MASK;
+
+ src += (mv.row >> SUBPEL_BITS) * src_stride + (mv.col >> SUBPEL_BITS);
+
+ inter_predictor(src, src_stride, dst, dst_stride, subpel_x, subpel_y,
+ sf, w, h, ref, kernel, sf->x_step_q4, sf->y_step_q4);
+}
+
static INLINE int round_mv_comp_q4(int value) {
return (value < 0 ? value - 2 : value + 2) / 4;
}
@@ -234,8 +144,8 @@ MV clamp_mv_to_umv_border_sb(const MACROBLOCKD *xd, const MV *src_mv,
return clamped_mv;
}
-static MV average_split_mvs(const struct macroblockd_plane *pd,
- const MODE_INFO *mi, int ref, int block) {
+MV average_split_mvs(const struct macroblockd_plane *pd,
+ const MODE_INFO *mi, int ref, int block) {
const int ss_idx = ((pd->subsampling_x > 0) << 1) | (pd->subsampling_y > 0);
MV res = {0, 0};
switch (ss_idx) {
@@ -257,7 +167,7 @@ static MV average_split_mvs(const struct macroblockd_plane *pd,
return res;
}
-static void build_inter_predictors(MACROBLOCKD *xd, int plane, int block,
+void build_inter_predictors(MACROBLOCKD *xd, int plane, int block,
int bw, int bh,
int x, int y, int w, int h,
int mi_x, int mi_y) {
@@ -365,213 +275,6 @@ void vp9_build_inter_predictors_sb(MACROBLOCKD *xd, int mi_row, int mi_col,
MAX_MB_PLANE - 1);
}
-// TODO(jingning): This function serves as a placeholder for decoder prediction
-// using on demand border extension. It should be moved to /decoder/ directory.
-static void dec_build_inter_predictors(MACROBLOCKD *xd, int plane, int block,
- int bw, int bh,
- int x, int y, int w, int h,
- int mi_x, int mi_y) {
- struct macroblockd_plane *const pd = &xd->plane[plane];
- const MODE_INFO *mi = xd->mi[0].src_mi;
- const int is_compound = has_second_ref(&mi->mbmi);
- const InterpKernel *kernel = vp9_get_interp_kernel(mi->mbmi.interp_filter);
- int ref;
-
- for (ref = 0; ref < 1 + is_compound; ++ref) {
- const struct scale_factors *const sf = &xd->block_refs[ref]->sf;
- struct buf_2d *const pre_buf = &pd->pre[ref];
- struct buf_2d *const dst_buf = &pd->dst;
- uint8_t *const dst = dst_buf->buf + dst_buf->stride * y + x;
- const MV mv = mi->mbmi.sb_type < BLOCK_8X8
- ? average_split_mvs(pd, mi, ref, block)
- : mi->mbmi.mv[ref].as_mv;
-
- const MV mv_q4 = clamp_mv_to_umv_border_sb(xd, &mv, bw, bh,
- pd->subsampling_x,
- pd->subsampling_y);
-
- MV32 scaled_mv;
- int xs, ys, x0, y0, x0_16, y0_16, frame_width, frame_height, buf_stride,
- subpel_x, subpel_y;
- uint8_t *ref_frame, *buf_ptr;
- const YV12_BUFFER_CONFIG *ref_buf = xd->block_refs[ref]->buf;
- const int is_scaled = vp9_is_scaled(sf);
-
- // Get reference frame pointer, width and height.
- if (plane == 0) {
- frame_width = ref_buf->y_crop_width;
- frame_height = ref_buf->y_crop_height;
- ref_frame = ref_buf->y_buffer;
- } else {
- frame_width = ref_buf->uv_crop_width;
- frame_height = ref_buf->uv_crop_height;
- ref_frame = plane == 1 ? ref_buf->u_buffer : ref_buf->v_buffer;
- }
-
- if (is_scaled) {
- // Co-ordinate of containing block to pixel precision.
- int x_start = (-xd->mb_to_left_edge >> (3 + pd->subsampling_x));
- int y_start = (-xd->mb_to_top_edge >> (3 + pd->subsampling_y));
-
- // Co-ordinate of the block to 1/16th pixel precision.
- x0_16 = (x_start + x) << SUBPEL_BITS;
- y0_16 = (y_start + y) << SUBPEL_BITS;
-
- // Co-ordinate of current block in reference frame
- // to 1/16th pixel precision.
- x0_16 = sf->scale_value_x(x0_16, sf);
- y0_16 = sf->scale_value_y(y0_16, sf);
-
- // Map the top left corner of the block into the reference frame.
- x0 = sf->scale_value_x(x_start + x, sf);
- y0 = sf->scale_value_y(y_start + y, sf);
-
- // Scale the MV and incorporate the sub-pixel offset of the block
- // in the reference frame.
- scaled_mv = vp9_scale_mv(&mv_q4, mi_x + x, mi_y + y, sf);
- xs = sf->x_step_q4;
- ys = sf->y_step_q4;
- } else {
- // Co-ordinate of containing block to pixel precision.
- x0 = (-xd->mb_to_left_edge >> (3 + pd->subsampling_x)) + x;
- y0 = (-xd->mb_to_top_edge >> (3 + pd->subsampling_y)) + y;
-
- // Co-ordinate of the block to 1/16th pixel precision.
- x0_16 = x0 << SUBPEL_BITS;
- y0_16 = y0 << SUBPEL_BITS;
-
- scaled_mv.row = mv_q4.row;
- scaled_mv.col = mv_q4.col;
- xs = ys = 16;
- }
- subpel_x = scaled_mv.col & SUBPEL_MASK;
- subpel_y = scaled_mv.row & SUBPEL_MASK;
-
- // Calculate the top left corner of the best matching block in the
- // reference frame.
- x0 += scaled_mv.col >> SUBPEL_BITS;
- y0 += scaled_mv.row >> SUBPEL_BITS;
- x0_16 += scaled_mv.col;
- y0_16 += scaled_mv.row;
-
- // Get reference block pointer.
- buf_ptr = ref_frame + y0 * pre_buf->stride + x0;
- buf_stride = pre_buf->stride;
-
- // Do border extension if there is motion or the
- // width/height is not a multiple of 8 pixels.
- if (is_scaled || scaled_mv.col || scaled_mv.row ||
- (frame_width & 0x7) || (frame_height & 0x7)) {
- // Get reference block bottom right coordinate.
- int x1 = ((x0_16 + (w - 1) * xs) >> SUBPEL_BITS) + 1;
- int y1 = ((y0_16 + (h - 1) * ys) >> SUBPEL_BITS) + 1;
- int x_pad = 0, y_pad = 0;
-
- if (subpel_x || (sf->x_step_q4 != SUBPEL_SHIFTS)) {
- x0 -= VP9_INTERP_EXTEND - 1;
- x1 += VP9_INTERP_EXTEND;
- x_pad = 1;
- }
-
- if (subpel_y || (sf->y_step_q4 != SUBPEL_SHIFTS)) {
- y0 -= VP9_INTERP_EXTEND - 1;
- y1 += VP9_INTERP_EXTEND;
- y_pad = 1;
- }
-
- // Skip border extension if block is inside the frame.
- if (x0 < 0 || x0 > frame_width - 1 || x1 < 0 || x1 > frame_width - 1 ||
- y0 < 0 || y0 > frame_height - 1 || y1 < 0 || y1 > frame_height - 1) {
- uint8_t *buf_ptr1 = ref_frame + y0 * pre_buf->stride + x0;
- // Extend the border.
-#if CONFIG_VP9_HIGHBITDEPTH
- if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
- high_build_mc_border(buf_ptr1,
- pre_buf->stride,
- xd->mc_buf_high,
- x1 - x0 + 1,
- x0,
- y0,
- x1 - x0 + 1,
- y1 - y0 + 1,
- frame_width,
- frame_height);
- buf_stride = x1 - x0 + 1;
- buf_ptr = CONVERT_TO_BYTEPTR(xd->mc_buf_high) +
- y_pad * 3 * buf_stride + x_pad * 3;
- } else {
- build_mc_border(buf_ptr1,
- pre_buf->stride,
- xd->mc_buf,
- x1 - x0 + 1,
- x0,
- y0,
- x1 - x0 + 1,
- y1 - y0 + 1,
- frame_width,
- frame_height);
- buf_stride = x1 - x0 + 1;
- buf_ptr = xd->mc_buf + y_pad * 3 * buf_stride + x_pad * 3;
- }
-#else
- build_mc_border(buf_ptr1,
- pre_buf->stride,
- xd->mc_buf,
- x1 - x0 + 1,
- x0,
- y0,
- x1 - x0 + 1,
- y1 - y0 + 1,
- frame_width,
- frame_height);
- buf_stride = x1 - x0 + 1;
- buf_ptr = xd->mc_buf + y_pad * 3 * buf_stride + x_pad * 3;
-#endif // CONFIG_VP9_HIGHBITDEPTH
- }
- }
-
-#if CONFIG_VP9_HIGHBITDEPTH
- if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
- high_inter_predictor(buf_ptr, buf_stride, dst, dst_buf->stride, subpel_x,
- subpel_y, sf, w, h, ref, kernel, xs, ys, xd->bd);
- } else {
- inter_predictor(buf_ptr, buf_stride, dst, dst_buf->stride, subpel_x,
- subpel_y, sf, w, h, ref, kernel, xs, ys);
- }
-#else
- inter_predictor(buf_ptr, buf_stride, dst, dst_buf->stride, subpel_x,
- subpel_y, sf, w, h, ref, kernel, xs, ys);
-#endif // CONFIG_VP9_HIGHBITDEPTH
- }
-}
-
-void vp9_dec_build_inter_predictors_sb(MACROBLOCKD *xd, int mi_row, int mi_col,
- BLOCK_SIZE bsize) {
- int plane;
- const int mi_x = mi_col * MI_SIZE;
- const int mi_y = mi_row * MI_SIZE;
- for (plane = 0; plane < MAX_MB_PLANE; ++plane) {
- const BLOCK_SIZE plane_bsize = get_plane_block_size(bsize,
- &xd->plane[plane]);
- const int num_4x4_w = num_4x4_blocks_wide_lookup[plane_bsize];
- const int num_4x4_h = num_4x4_blocks_high_lookup[plane_bsize];
- const int bw = 4 * num_4x4_w;
- const int bh = 4 * num_4x4_h;
-
- if (xd->mi[0].src_mi->mbmi.sb_type < BLOCK_8X8) {
- int i = 0, x, y;
- assert(bsize == BLOCK_8X8);
- for (y = 0; y < num_4x4_h; ++y)
- for (x = 0; x < num_4x4_w; ++x)
- dec_build_inter_predictors(xd, plane, i++, bw, bh,
- 4 * x, 4 * y, 4, 4, mi_x, mi_y);
- } else {
- dec_build_inter_predictors(xd, plane, 0, bw, bh,
- 0, 0, bw, bh, mi_x, mi_y);
- }
- }
-}
-
void vp9_setup_dst_planes(struct macroblockd_plane planes[MAX_MB_PLANE],
const YV12_BUFFER_CONFIG *src,
int mi_row, int mi_col) {
diff --git a/vp9/common/vp9_reconinter.h b/vp9/common/vp9_reconinter.h
index 3eaf07cf8..d5ecf85b4 100644
--- a/vp9/common/vp9_reconinter.h
+++ b/vp9/common/vp9_reconinter.h
@@ -18,6 +18,37 @@
extern "C" {
#endif
+void inter_predictor(const uint8_t *src, int src_stride,
+ uint8_t *dst, int dst_stride,
+ const int subpel_x,
+ const int subpel_y,
+ const struct scale_factors *sf,
+ int w, int h, int ref,
+ const InterpKernel *kernel,
+ int xs, int ys);
+
+#if CONFIG_VP9_HIGHBITDEPTH
+void high_inter_predictor(const uint8_t *src, int src_stride,
+ uint8_t *dst, int dst_stride,
+ const int subpel_x,
+ const int subpel_y,
+ const struct scale_factors *sf,
+ int w, int h, int ref,
+ const InterpKernel *kernel,
+ int xs, int ys, int bd);
+#endif // CONFIG_VP9_HIGHBITDEPTH
+
+MV average_split_mvs(const struct macroblockd_plane *pd, const MODE_INFO *mi,
+ int ref, int block);
+
+MV clamp_mv_to_umv_border_sb(const MACROBLOCKD *xd, const MV *src_mv,
+ int bw, int bh, int ss_x, int ss_y);
+
+void build_inter_predictors(MACROBLOCKD *xd, int plane, int block,
+ int bw, int bh,
+ int x, int y, int w, int h,
+ int mi_x, int mi_y);
+
void vp9_build_inter_predictors_sby(MACROBLOCKD *xd, int mi_row, int mi_col,
BLOCK_SIZE bsize);
@@ -27,9 +58,6 @@ void vp9_build_inter_predictors_sbuv(MACROBLOCKD *xd, int mi_row, int mi_col,
void vp9_build_inter_predictors_sb(MACROBLOCKD *xd, int mi_row, int mi_col,
BLOCK_SIZE bsize);
-void vp9_dec_build_inter_predictors_sb(MACROBLOCKD *xd, int mi_row, int mi_col,
- BLOCK_SIZE bsize);
-
void vp9_build_inter_predictor(const uint8_t *src, int src_stride,
uint8_t *dst, int dst_stride,
const MV *mv_q3,