summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rwxr-xr-xbuild/make/gen_msvs_sln.sh25
-rw-r--r--vp8/vp8_dx_iface.c86
-rw-r--r--vp9/common/vp9_blockd.h12
-rw-r--r--vp9/common/vp9_entropymode.c69
-rw-r--r--vp9/common/vp9_entropymode.h25
-rw-r--r--vp9/common/vp9_onyxc_int.h18
-rw-r--r--vp9/common/vp9_postproc.c2
-rw-r--r--vp9/common/vp9_pred_common.c340
-rw-r--r--vp9/common/vp9_pred_common.h6
-rw-r--r--vp9/decoder/vp9_decodemv.c79
-rw-r--r--vp9/decoder/vp9_decodframe.c18
-rw-r--r--vp9/encoder/vp9_bitstream.c32
-rw-r--r--vp9/encoder/vp9_encodeframe.c28
-rw-r--r--vp9/encoder/vp9_onyx_int.h4
-rw-r--r--vp9/encoder/vp9_ratectrl.c8
-rw-r--r--vp9/vp9_dx_iface.c235
-rw-r--r--vpx/internal/vpx_codec_internal.h26
-rw-r--r--vpx/src/vpx_codec.c49
18 files changed, 476 insertions, 586 deletions
diff --git a/build/make/gen_msvs_sln.sh b/build/make/gen_msvs_sln.sh
index 5a8c79368..f9fc69428 100755
--- a/build/make/gen_msvs_sln.sh
+++ b/build/make/gen_msvs_sln.sh
@@ -74,8 +74,13 @@ parse_project() {
# assume that all projects have the same list of possible configurations,
# so overwriting old config_lists is not a problem
- config_list=`grep -A1 '<Configuration' $file |
- grep Name | cut -d\" -f2`
+ if [ "$sfx" = "vcproj" ]; then
+ config_list=`grep -A1 '<Configuration' $file |
+ grep Name | cut -d\" -f2`
+ else
+ config_list=`grep -B1 'Label="Configuration"' $file |
+ grep Condition | cut -d\' -f4`
+ fi
proj_list="${proj_list} ${var}"
}
@@ -168,9 +173,14 @@ process_makefile() {
IFS=$'\r'$'\n'
local TAB=$'\t'
cat <<EOF
-found_devenv := \$(shell which devenv.com >/dev/null 2>&1 && echo yes)
+ifeq (\$(CONFIG_VS_VERSION),7)
+MSBUILD_TOOL := devenv.com
+else
+MSBUILD_TOOL := msbuild.exe
+endif
+found_devenv := \$(shell which \$(MSBUILD_TOOL) >/dev/null 2>&1 && echo yes)
.nodevenv.once:
-${TAB}@echo " * devenv.com not found in path."
+${TAB}@echo " * \$(MSBUILD_TOOL) not found in path."
${TAB}@echo " * "
${TAB}@echo " * You will have to build all configurations manually using the"
${TAB}@echo " * Visual Studio IDE. To allow make to build them automatically,"
@@ -195,16 +205,17 @@ ${TAB}rm -rf "$platform"/"$config"
ifneq (\$(found_devenv),)
ifeq (\$(CONFIG_VS_VERSION),7)
$nows_sln_config: $outfile
-${TAB}devenv.com $outfile -build "$config"
+${TAB}\$(MSBUILD_TOOL) $outfile -build "$config"
else
$nows_sln_config: $outfile
-${TAB}devenv.com $outfile -build "$sln_config"
+${TAB}\$(MSBUILD_TOOL) $outfile -m -t:Build \\
+${TAB}${TAB}-p:Configuration="$config" -p:Platform="$platform"
endif
else
$nows_sln_config: $outfile .nodevenv.once
-${TAB}@echo " * Skipping build of $sln_config (devenv.com not in path)."
+${TAB}@echo " * Skipping build of $sln_config (\$(MSBUILD_TOOL) not in path)."
${TAB}@echo " * "
endif
diff --git a/vp8/vp8_dx_iface.c b/vp8/vp8_dx_iface.c
index c826f696d..b552b846a 100644
--- a/vp8/vp8_dx_iface.c
+++ b/vp8/vp8_dx_iface.c
@@ -41,15 +41,6 @@ typedef enum
static unsigned long vp8_priv_sz(const vpx_codec_dec_cfg_t *si, vpx_codec_flags_t);
-typedef struct
-{
- unsigned int id;
- unsigned long sz;
- unsigned int align;
- unsigned int flags;
- unsigned long(*calc_sz)(const vpx_codec_dec_cfg_t *, vpx_codec_flags_t);
-} mem_req_t;
-
static const mem_req_t vp8_mem_req_segs[] =
{
{VP8_SEG_ALG_PRIV, 0, 8, VPX_CODEC_MEM_ZERO, vp8_priv_sz},
@@ -93,65 +84,6 @@ static unsigned long vp8_priv_sz(const vpx_codec_dec_cfg_t *si, vpx_codec_flags_
return sizeof(vpx_codec_alg_priv_t);
}
-
-static void vp8_mmap_dtor(vpx_codec_mmap_t *mmap)
-{
- free(mmap->priv);
-}
-
-static vpx_codec_err_t vp8_mmap_alloc(vpx_codec_mmap_t *mmap)
-{
- vpx_codec_err_t res;
- unsigned int align;
-
- align = mmap->align ? mmap->align - 1 : 0;
-
- if (mmap->flags & VPX_CODEC_MEM_ZERO)
- mmap->priv = calloc(1, mmap->sz + align);
- else
- mmap->priv = malloc(mmap->sz + align);
-
- res = (mmap->priv) ? VPX_CODEC_OK : VPX_CODEC_MEM_ERROR;
- mmap->base = (void *)((((uintptr_t)mmap->priv) + align) & ~(uintptr_t)align);
- mmap->dtor = vp8_mmap_dtor;
- return res;
-}
-
-static vpx_codec_err_t vp8_validate_mmaps(const vp8_stream_info_t *si,
- const vpx_codec_mmap_t *mmaps,
- vpx_codec_flags_t init_flags)
-{
- int i;
- vpx_codec_err_t res = VPX_CODEC_OK;
-
- for (i = 0; i < NELEMENTS(vp8_mem_req_segs) - 1; i++)
- {
- /* Ensure the segment has been allocated */
- if (!mmaps[i].base)
- {
- res = VPX_CODEC_MEM_ERROR;
- break;
- }
-
- /* Verify variable size segment is big enough for the current si. */
- if (vp8_mem_req_segs[i].calc_sz)
- {
- vpx_codec_dec_cfg_t cfg;
-
- cfg.w = si->w;
- cfg.h = si->h;
-
- if (mmaps[i].sz < vp8_mem_req_segs[i].calc_sz(&cfg, init_flags))
- {
- res = VPX_CODEC_MEM_ERROR;
- break;
- }
- }
- }
-
- return res;
-}
-
static void vp8_init_ctx(vpx_codec_ctx_t *ctx, const vpx_codec_mmap_t *mmap)
{
int i;
@@ -178,16 +110,6 @@ static void vp8_init_ctx(vpx_codec_ctx_t *ctx, const vpx_codec_mmap_t *mmap)
}
}
-static void *mmap_lkup(vpx_codec_alg_priv_t *ctx, unsigned int id)
-{
- int i;
-
- for (i = 0; i < NELEMENTS(ctx->mmaps); i++)
- if (ctx->mmaps[i].id == id)
- return ctx->mmaps[i].base;
-
- return NULL;
-}
static void vp8_finalize_mmaps(vpx_codec_alg_priv_t *ctx)
{
/* nothing to clean up */
@@ -214,7 +136,7 @@ static vpx_codec_err_t vp8_init(vpx_codec_ctx_t *ctx,
mmap.align = vp8_mem_req_segs[0].align;
mmap.flags = vp8_mem_req_segs[0].flags;
- res = vp8_mmap_alloc(&mmap);
+ res = vpx_mmap_alloc(&mmap);
if (res != VPX_CODEC_OK) return res;
vp8_init_ctx(ctx, &mmap);
@@ -488,7 +410,7 @@ static vpx_codec_err_t vp8_decode(vpx_codec_alg_priv_t *ctx,
ctx->mmaps[i].sz = vp8_mem_req_segs[i].calc_sz(&cfg,
ctx->base.init_flags);
- res = vp8_mmap_alloc(&ctx->mmaps[i]);
+ res = vpx_mmap_alloc(&ctx->mmaps[i]);
}
if (!res)
@@ -500,7 +422,9 @@ static vpx_codec_err_t vp8_decode(vpx_codec_alg_priv_t *ctx,
/* Initialize the decoder instance on the first frame*/
if (!res && !ctx->decoder_init)
{
- res = vp8_validate_mmaps(&ctx->si, ctx->mmaps, ctx->base.init_flags);
+ res = vpx_validate_mmaps(&ctx->si, ctx->mmaps,
+ vp8_mem_req_segs, NELEMENTS(vp8_mem_req_segs),
+ ctx->base.init_flags);
if (!res)
{
diff --git a/vp9/common/vp9_blockd.h b/vp9/common/vp9_blockd.h
index 9ec6c187b..c98ca90b9 100644
--- a/vp9/common/vp9_blockd.h
+++ b/vp9/common/vp9_blockd.h
@@ -615,11 +615,11 @@ static int txfrm_block_to_raster_block(MACROBLOCKD *xd,
int ss_txfrm_size) {
const int bwl = b_width_log2(bsize) - xd->plane[plane].subsampling_x;
const int txwl = ss_txfrm_size / 2;
- const int tx_cols_lg2 = bwl - txwl;
- const int tx_cols = 1 << tx_cols_lg2;
+ const int tx_cols_log2 = bwl - txwl;
+ const int tx_cols = 1 << tx_cols_log2;
const int raster_mb = block >> ss_txfrm_size;
const int x = (raster_mb & (tx_cols - 1)) << (txwl);
- const int y = raster_mb >> tx_cols_lg2 << (txwl);
+ const int y = raster_mb >> tx_cols_log2 << (txwl);
return x + (y << bwl);
}
@@ -630,11 +630,11 @@ static void txfrm_block_to_raster_xy(MACROBLOCKD *xd,
int *x, int *y) {
const int bwl = b_width_log2(bsize) - xd->plane[plane].subsampling_x;
const int txwl = ss_txfrm_size / 2;
- const int tx_cols_lg2 = bwl - txwl;
- const int tx_cols = 1 << tx_cols_lg2;
+ const int tx_cols_log2 = bwl - txwl;
+ const int tx_cols = 1 << tx_cols_log2;
const int raster_mb = block >> ss_txfrm_size;
*x = (raster_mb & (tx_cols - 1)) << (txwl);
- *y = raster_mb >> tx_cols_lg2 << (txwl);
+ *y = raster_mb >> tx_cols_log2 << (txwl);
}
static void extend_for_intra(MACROBLOCKD* const xd, int plane, int block,
diff --git a/vp9/common/vp9_entropymode.c b/vp9/common/vp9_entropymode.c
index 0b5f2ea46..d78f1266c 100644
--- a/vp9/common/vp9_entropymode.c
+++ b/vp9/common/vp9_entropymode.c
@@ -50,8 +50,9 @@ static const vp9_prob default_if_uv_probs[VP9_INTRA_MODES]
{ 101, 21, 107, 181, 192, 103, 19, 67, 125 } /* y = tm */
};
-const vp9_prob vp9_partition_probs[NUM_FRAME_TYPES][NUM_PARTITION_CONTEXTS]
- [PARTITION_TYPES - 1] = {
+static const vp9_prob default_partition_probs[NUM_FRAME_TYPES]
+ [NUM_PARTITION_CONTEXTS]
+ [PARTITION_TYPES - 1] = {
{ /* frame_type = keyframe */
/* 8x8 -> 4x4 */
{ 158, 97, 94 } /* a/l both not split */,
@@ -159,20 +160,15 @@ static const vp9_prob default_single_ref_p[REF_CONTEXTS][2] = {
{ 238, 247 }
};
-const vp9_prob vp9_default_tx_probs_32x32p[TX_SIZE_CONTEXTS]
- [TX_SIZE_MAX_SB - 1] = {
- { 3, 136, 37, },
- { 5, 52, 13, },
-};
-const vp9_prob vp9_default_tx_probs_16x16p[TX_SIZE_CONTEXTS]
- [TX_SIZE_MAX_SB - 2] = {
- { 20, 152, },
- { 15, 101, },
-};
-const vp9_prob vp9_default_tx_probs_8x8p[TX_SIZE_CONTEXTS]
- [TX_SIZE_MAX_SB - 3] = {
- { 100, },
- { 66, },
+static const struct tx_probs default_tx_probs = {
+ { { 3, 136, 37 },
+ { 5, 52, 13 } },
+
+ { { 20, 152 },
+ { 15, 101 } },
+
+ { { 100 },
+ { 66 } }
};
void tx_counts_to_branch_counts_32x32(unsigned int *tx_count_32x32p,
@@ -202,24 +198,30 @@ void tx_counts_to_branch_counts_8x8(unsigned int *tx_count_8x8p,
ct_8x8p[0][1] = tx_count_8x8p[TX_8X8];
}
-const vp9_prob vp9_default_mbskip_probs[MBSKIP_CONTEXTS] = {
+static const vp9_prob default_mbskip_probs[MBSKIP_CONTEXTS] = {
192, 128, 64
};
+static const vp9_prob default_switchable_interp_prob[VP9_SWITCHABLE_FILTERS+1]
+ [VP9_SWITCHABLE_FILTERS-1] = {
+ { 235, 162, },
+ { 36, 255, },
+ { 34, 3, },
+ { 149, 144, },
+};
+
void vp9_init_mbmode_probs(VP9_COMMON *cm) {
vp9_copy(cm->fc.uv_mode_prob, default_if_uv_probs);
vp9_copy(cm->kf_uv_mode_prob, default_kf_uv_probs);
vp9_copy(cm->fc.y_mode_prob, default_if_y_probs);
- vp9_copy(cm->fc.switchable_interp_prob, vp9_switchable_interp_prob);
- vp9_copy(cm->fc.partition_prob, vp9_partition_probs);
+ vp9_copy(cm->fc.switchable_interp_prob, default_switchable_interp_prob);
+ vp9_copy(cm->fc.partition_prob, default_partition_probs);
vp9_copy(cm->fc.intra_inter_prob, default_intra_inter_p);
vp9_copy(cm->fc.comp_inter_prob, default_comp_inter_p);
vp9_copy(cm->fc.comp_ref_prob, default_comp_ref_p);
vp9_copy(cm->fc.single_ref_prob, default_single_ref_p);
- vp9_copy(cm->fc.tx_probs_32x32p, vp9_default_tx_probs_32x32p);
- vp9_copy(cm->fc.tx_probs_16x16p, vp9_default_tx_probs_16x16p);
- vp9_copy(cm->fc.tx_probs_8x8p, vp9_default_tx_probs_8x8p);
- vp9_copy(cm->fc.mbskip_probs, vp9_default_mbskip_probs);
+ cm->fc.tx_probs = default_tx_probs;
+ vp9_copy(cm->fc.mbskip_probs, default_mbskip_probs);
}
const vp9_tree_index vp9_switchable_interp_tree[VP9_SWITCHABLE_FILTERS*2-2] = {
@@ -229,14 +231,7 @@ const vp9_tree_index vp9_switchable_interp_tree[VP9_SWITCHABLE_FILTERS*2-2] = {
struct vp9_token vp9_switchable_interp_encodings[VP9_SWITCHABLE_FILTERS];
const INTERPOLATIONFILTERTYPE vp9_switchable_interp[VP9_SWITCHABLE_FILTERS] = {
EIGHTTAP, EIGHTTAP_SMOOTH, EIGHTTAP_SHARP};
-const int vp9_switchable_interp_map[SWITCHABLE+1] = {1, 0, 2, -1, -1};
-const vp9_prob vp9_switchable_interp_prob [VP9_SWITCHABLE_FILTERS+1]
- [VP9_SWITCHABLE_FILTERS-1] = {
- { 235, 162, },
- { 36, 255, },
- { 34, 3, },
- { 149, 144, },
-};
+const int vp9_switchable_interp_map[SWITCHABLE + 1] = {1, 0, 2, -1, -1};
// Indicates if the filter is interpolating or non-interpolating
const int vp9_is_interpolating_filter[SWITCHABLE + 1] = {1, 1, 1, 1, -1};
@@ -419,21 +414,21 @@ void vp9_adapt_mode_probs(VP9_COMMON *cm) {
unsigned int branch_ct_32x32p[TX_SIZE_MAX_SB - 1][2];
for (i = 0; i < TX_SIZE_CONTEXTS; ++i) {
- tx_counts_to_branch_counts_8x8(fc->tx_count_8x8p[i], branch_ct_8x8p);
+ tx_counts_to_branch_counts_8x8(fc->tx_counts.p8x8[i], branch_ct_8x8p);
for (j = 0; j < TX_SIZE_MAX_SB - 3; ++j)
- fc->tx_probs_8x8p[i][j] = update_tx_ct(fc->pre_tx_probs_8x8p[i][j],
+ fc->tx_probs.p8x8[i][j] = update_tx_ct(fc->pre_tx_probs.p8x8[i][j],
branch_ct_8x8p[j]);
- tx_counts_to_branch_counts_16x16(fc->tx_count_16x16p[i],
+ tx_counts_to_branch_counts_16x16(fc->tx_counts.p16x16[i],
branch_ct_16x16p);
for (j = 0; j < TX_SIZE_MAX_SB - 2; ++j)
- fc->tx_probs_16x16p[i][j] = update_tx_ct(fc->pre_tx_probs_16x16p[i][j],
+ fc->tx_probs.p16x16[i][j] = update_tx_ct(fc->tx_probs.p16x16[i][j],
branch_ct_16x16p[j]);
- tx_counts_to_branch_counts_32x32(fc->tx_count_32x32p[i],
+ tx_counts_to_branch_counts_32x32(fc->tx_counts.p32x32[i],
branch_ct_32x32p);
for (j = 0; j < TX_SIZE_MAX_SB - 1; ++j)
- fc->tx_probs_32x32p[i][j] = update_tx_ct(fc->pre_tx_probs_32x32p[i][j],
+ fc->tx_probs.p32x32[i][j] = update_tx_ct(fc->pre_tx_probs.p32x32[i][j],
branch_ct_32x32p[j]);
}
}
diff --git a/vp9/common/vp9_entropymode.h b/vp9/common/vp9_entropymode.h
index aa8aec7d2..d22c4afb2 100644
--- a/vp9/common/vp9_entropymode.h
+++ b/vp9/common/vp9_entropymode.h
@@ -21,6 +21,18 @@
// #define MODE_STATS
+struct tx_probs {
+ vp9_prob p32x32[TX_SIZE_CONTEXTS][TX_SIZE_MAX_SB - 1];
+ vp9_prob p16x16[TX_SIZE_CONTEXTS][TX_SIZE_MAX_SB - 2];
+ vp9_prob p8x8[TX_SIZE_CONTEXTS][TX_SIZE_MAX_SB - 2];
+};
+
+struct tx_counts {
+ unsigned int p32x32[TX_SIZE_CONTEXTS][TX_SIZE_MAX_SB];
+ unsigned int p16x16[TX_SIZE_CONTEXTS][TX_SIZE_MAX_SB - 1];
+ unsigned int p8x8[TX_SIZE_CONTEXTS][TX_SIZE_MAX_SB - 2];
+};
+
extern int vp9_mv_cont(const int_mv *l, const int_mv *a);
@@ -40,9 +52,6 @@ extern struct vp9_token vp9_sb_mv_ref_encoding_array[VP9_INTER_MODES];
// probability models for partition information
extern const vp9_tree_index vp9_partition_tree[];
extern struct vp9_token vp9_partition_encodings[PARTITION_TYPES];
-extern const vp9_prob vp9_partition_probs[NUM_FRAME_TYPES]
- [NUM_PARTITION_CONTEXTS]
- [PARTITION_TYPES - 1];
void vp9_entropy_mode_init(void);
@@ -77,16 +86,6 @@ extern const vp9_tree_index vp9_switchable_interp_tree
extern struct vp9_token vp9_switchable_interp_encodings[VP9_SWITCHABLE_FILTERS];
-extern const vp9_prob vp9_switchable_interp_prob[VP9_SWITCHABLE_FILTERS + 1]
- [VP9_SWITCHABLE_FILTERS - 1];
-
-extern const vp9_prob vp9_default_tx_probs_32x32p[TX_SIZE_CONTEXTS]
- [TX_SIZE_MAX_SB - 1];
-extern const vp9_prob vp9_default_tx_probs_16x16p[TX_SIZE_CONTEXTS]
- [TX_SIZE_MAX_SB - 2];
-extern const vp9_prob vp9_default_tx_probs_8x8p[TX_SIZE_CONTEXTS]
- [TX_SIZE_MAX_SB - 3];
-
extern void tx_counts_to_branch_counts_32x32(unsigned int *tx_count_32x32p,
unsigned int (*ct_32x32p)[2]);
extern void tx_counts_to_branch_counts_16x16(unsigned int *tx_count_16x16p,
diff --git a/vp9/common/vp9_onyxc_int.h b/vp9/common/vp9_onyxc_int.h
index b7025cac7..46fcb2f2c 100644
--- a/vp9/common/vp9_onyxc_int.h
+++ b/vp9/common/vp9_onyxc_int.h
@@ -28,7 +28,7 @@
// Define the number of candidate reference buffers.
#define NUM_REF_FRAMES 8
-#define NUM_REF_FRAMES_LG2 3
+#define NUM_REF_FRAMES_LOG2 3
#define ALLOWED_REFS_PER_FRAME 3
@@ -37,8 +37,8 @@
// normal reference pool.
#define NUM_YV12_BUFFERS (NUM_REF_FRAMES + 4)
-#define NUM_FRAME_CONTEXTS_LG2 2
-#define NUM_FRAME_CONTEXTS (1 << NUM_FRAME_CONTEXTS_LG2)
+#define NUM_FRAME_CONTEXTS_LOG2 2
+#define NUM_FRAME_CONTEXTS (1 << NUM_FRAME_CONTEXTS_LOG2)
#define MAX_LAG_BUFFERS 25
@@ -89,15 +89,9 @@ typedef struct frame_contexts {
unsigned int comp_ref_count[REF_CONTEXTS][2];
// tx_probs
- vp9_prob tx_probs_32x32p[TX_SIZE_CONTEXTS][TX_SIZE_MAX_SB - 1];
- vp9_prob tx_probs_16x16p[TX_SIZE_CONTEXTS][TX_SIZE_MAX_SB - 2];
- vp9_prob tx_probs_8x8p[TX_SIZE_CONTEXTS][TX_SIZE_MAX_SB - 3];
- vp9_prob pre_tx_probs_32x32p[TX_SIZE_CONTEXTS][TX_SIZE_MAX_SB - 1];
- vp9_prob pre_tx_probs_16x16p[TX_SIZE_CONTEXTS][TX_SIZE_MAX_SB - 2];
- vp9_prob pre_tx_probs_8x8p[TX_SIZE_CONTEXTS][TX_SIZE_MAX_SB - 3];
- unsigned int tx_count_32x32p[TX_SIZE_CONTEXTS][TX_SIZE_MAX_SB];
- unsigned int tx_count_16x16p[TX_SIZE_CONTEXTS][TX_SIZE_MAX_SB - 1];
- unsigned int tx_count_8x8p[TX_SIZE_CONTEXTS][TX_SIZE_MAX_SB - 2];
+ struct tx_probs tx_probs;
+ struct tx_probs pre_tx_probs;
+ struct tx_counts tx_counts;
// mbskip
vp9_prob mbskip_probs[MBSKIP_CONTEXTS];
diff --git a/vp9/common/vp9_postproc.c b/vp9/common/vp9_postproc.c
index 728a641aa..44e87c861 100644
--- a/vp9/common/vp9_postproc.c
+++ b/vp9/common/vp9_postproc.c
@@ -411,7 +411,7 @@ static void fillrd(struct postproc_state *state, int q, int a) {
}
- for (next = next; next < 256; next++)
+ for (; next < 256; next++)
char_dist[next] = 0;
}
diff --git a/vp9/common/vp9_pred_common.c b/vp9/common/vp9_pred_common.c
index b7abf7cd7..5a31175d4 100644
--- a/vp9/common/vp9_pred_common.c
+++ b/vp9/common/vp9_pred_common.c
@@ -71,30 +71,30 @@ unsigned char vp9_get_pred_context_switchable_interp(const VP9_COMMON *cm,
}
// Returns a context number for the given MB prediction signal
unsigned char vp9_get_pred_context_intra_inter(const VP9_COMMON *cm,
- const MACROBLOCKD *xd) {
+ const MACROBLOCKD *xd) {
int pred_context;
const MODE_INFO *const mi = xd->mode_info_context;
- const MODE_INFO *const above_mi = mi - cm->mode_info_stride;
- const MODE_INFO *const left_mi = mi - 1;
- const int left_in_image = xd->left_available && left_mi->mbmi.mb_in_image;
- const int above_in_image = xd->up_available && above_mi->mbmi.mb_in_image;
+ const MB_MODE_INFO *const above_mbmi = &mi[-cm->mode_info_stride].mbmi;
+ const MB_MODE_INFO *const left_mbmi = &mi[-1].mbmi;
+ const int left_in_image = xd->left_available && left_mbmi->mb_in_image;
+ const int above_in_image = xd->up_available && above_mbmi->mb_in_image;
// Note:
// The mode info data structure has a one element border above and to the
// left of the entries correpsonding to real macroblocks.
// The prediction flags in these dummy entries are initialised to 0.
if (above_in_image && left_in_image) { // both edges available
- if (left_mi->mbmi.ref_frame[0] == INTRA_FRAME &&
- above_mi->mbmi.ref_frame[0] == INTRA_FRAME) { // intra/intra (3)
+ if (left_mbmi->ref_frame[0] == INTRA_FRAME &&
+ above_mbmi->ref_frame[0] == INTRA_FRAME) { // intra/intra (3)
pred_context = 3;
} else { // intra/inter (1) or inter/inter (0)
- pred_context = left_mi->mbmi.ref_frame[0] == INTRA_FRAME ||
- above_mi->mbmi.ref_frame[0] == INTRA_FRAME;
+ pred_context = left_mbmi->ref_frame[0] == INTRA_FRAME ||
+ above_mbmi->ref_frame[0] == INTRA_FRAME;
}
} else if (above_in_image || left_in_image) { // one edge available
- const MODE_INFO *edge = above_in_image ? above_mi : left_mi;
+ const MB_MODE_INFO *edge_mbmi = above_in_image ? above_mbmi : left_mbmi;
// inter: 0, intra: 2
- pred_context = 2 * (edge->mbmi.ref_frame[0] == INTRA_FRAME);
+ pred_context = 2 * (edge_mbmi->ref_frame[0] == INTRA_FRAME);
} else {
pred_context = 0;
}
@@ -106,42 +106,39 @@ unsigned char vp9_get_pred_context_comp_inter_inter(const VP9_COMMON *cm,
const MACROBLOCKD *xd) {
int pred_context;
const MODE_INFO *const mi = xd->mode_info_context;
- const MODE_INFO *const above_mi = mi - cm->mode_info_stride;
- const MODE_INFO *const left_mi = mi - 1;
- const int left_in_image = xd->left_available && left_mi->mbmi.mb_in_image;
- const int above_in_image = xd->up_available && above_mi->mbmi.mb_in_image;
+ const MB_MODE_INFO *const above_mbmi = &mi[-cm->mode_info_stride].mbmi;
+ const MB_MODE_INFO *const left_mbmi = &mi[-1].mbmi;
+ const int left_in_image = xd->left_available && left_mbmi->mb_in_image;
+ const int above_in_image = xd->up_available && above_mbmi->mb_in_image;
// Note:
// The mode info data structure has a one element border above and to the
// left of the entries correpsonding to real macroblocks.
// The prediction flags in these dummy entries are initialised to 0.
if (above_in_image && left_in_image) { // both edges available
- if (above_mi->mbmi.ref_frame[1] <= INTRA_FRAME &&
- left_mi->mbmi.ref_frame[1] <= INTRA_FRAME) {
+ if (above_mbmi->ref_frame[1] <= INTRA_FRAME &&
+ left_mbmi->ref_frame[1] <= INTRA_FRAME)
// neither edge uses comp pred (0/1)
- pred_context = ((above_mi->mbmi.ref_frame[0] == cm->comp_fixed_ref) ^
- (left_mi->mbmi.ref_frame[0] == cm->comp_fixed_ref));
- } else if (above_mi->mbmi.ref_frame[1] <= INTRA_FRAME) {
+ pred_context = (above_mbmi->ref_frame[0] == cm->comp_fixed_ref) ^
+ (left_mbmi->ref_frame[0] == cm->comp_fixed_ref);
+ else if (above_mbmi->ref_frame[1] <= INTRA_FRAME)
// one of two edges uses comp pred (2/3)
- pred_context = 2 +
- (above_mi->mbmi.ref_frame[0] == cm->comp_fixed_ref ||
- above_mi->mbmi.ref_frame[0] == INTRA_FRAME);
- } else if (left_mi->mbmi.ref_frame[1] <= INTRA_FRAME) {
+ pred_context = 2 + (above_mbmi->ref_frame[0] == cm->comp_fixed_ref ||
+ above_mbmi->ref_frame[0] == INTRA_FRAME);
+ else if (left_mbmi->ref_frame[1] <= INTRA_FRAME)
// one of two edges uses comp pred (2/3)
- pred_context = 2 +
- (left_mi->mbmi.ref_frame[0] == cm->comp_fixed_ref ||
- left_mi->mbmi.ref_frame[0] == INTRA_FRAME);
- } else { // both edges use comp pred (4)
+ pred_context = 2 + (left_mbmi->ref_frame[0] == cm->comp_fixed_ref ||
+ left_mbmi->ref_frame[0] == INTRA_FRAME);
+ else // both edges use comp pred (4)
pred_context = 4;
- }
} else if (above_in_image || left_in_image) { // one edge available
- const MODE_INFO *edge = above_in_image ? above_mi : left_mi;
+ const MB_MODE_INFO *edge_mbmi = above_in_image ? above_mbmi : left_mbmi;
- if (edge->mbmi.ref_frame[1] <= INTRA_FRAME) {
+ if (edge_mbmi->ref_frame[1] <= INTRA_FRAME)
// edge does not use comp pred (0/1)
- pred_context = edge->mbmi.ref_frame[0] == cm->comp_fixed_ref;
- } else { // edge uses comp pred (3)
+ pred_context = edge_mbmi->ref_frame[0] == cm->comp_fixed_ref;
+ else
+ // edge uses comp pred (3)
pred_context = 3;
- }
} else { // no edges available (1)
pred_context = 1;
}
@@ -154,10 +151,10 @@ unsigned char vp9_get_pred_context_comp_ref_p(const VP9_COMMON *cm,
const MACROBLOCKD *xd) {
int pred_context;
const MODE_INFO *const mi = xd->mode_info_context;
- const MODE_INFO *const above_mi = mi - cm->mode_info_stride;
- const MODE_INFO *const left_mi = mi - 1;
- const int left_in_image = xd->left_available && left_mi->mbmi.mb_in_image;
- const int above_in_image = xd->up_available && above_mi->mbmi.mb_in_image;
+ const MB_MODE_INFO *const above_mbmi = &mi[-cm->mode_info_stride].mbmi;
+ const MB_MODE_INFO *const left_mbmi = &mi[-1].mbmi;
+ const int left_in_image = xd->left_available && left_mbmi->mb_in_image;
+ const int above_in_image = xd->up_available && above_mbmi->mb_in_image;
// Note:
// The mode info data structure has a one element border above and to the
// left of the entries correpsonding to real macroblocks.
@@ -166,33 +163,32 @@ unsigned char vp9_get_pred_context_comp_ref_p(const VP9_COMMON *cm,
const int var_ref_idx = !fix_ref_idx;
if (above_in_image && left_in_image) { // both edges available
- if (above_mi->mbmi.ref_frame[0] == INTRA_FRAME
- && left_mi->mbmi.ref_frame[0] == INTRA_FRAME) { // intra/intra (2)
+ if (above_mbmi->ref_frame[0] == INTRA_FRAME &&
+ left_mbmi->ref_frame[0] == INTRA_FRAME) { // intra/intra (2)
pred_context = 2;
- } else if (above_mi->mbmi.ref_frame[0] == INTRA_FRAME
- || left_mi->mbmi.ref_frame[0] == INTRA_FRAME) { // intra/inter
- const MODE_INFO *edge =
- above_mi->mbmi.ref_frame[0] == INTRA_FRAME ? left_mi : above_mi;
-
- if (edge->mbmi.ref_frame[1] <= INTRA_FRAME) { // single pred (1/3)
- pred_context = 1 + 2 * edge->mbmi.ref_frame[0] != cm->comp_var_ref[1];
- } else { // comp pred (1/3)
- pred_context = 1 + 2 * edge->mbmi.ref_frame[var_ref_idx]
+ } else if (above_mbmi->ref_frame[0] == INTRA_FRAME ||
+ left_mbmi->ref_frame[0] == INTRA_FRAME) { // intra/inter
+ const MB_MODE_INFO *edge_mbmi = above_mbmi->ref_frame[0] == INTRA_FRAME ?
+ left_mbmi : above_mbmi;
+
+ if (edge_mbmi->ref_frame[1] <= INTRA_FRAME) // single pred (1/3)
+ pred_context = 1 + 2 * edge_mbmi->ref_frame[0] != cm->comp_var_ref[1];
+ else // comp pred (1/3)
+ pred_context = 1 + 2 * edge_mbmi->ref_frame[var_ref_idx]
!= cm->comp_var_ref[1];
- }
} else { // inter/inter
- int l_sg = left_mi->mbmi.ref_frame[1] <= INTRA_FRAME;
- int a_sg = above_mi->mbmi.ref_frame[1] <= INTRA_FRAME;
- MV_REFERENCE_FRAME vrfa = a_sg ? above_mi->mbmi.ref_frame[0]
- : above_mi->mbmi.ref_frame[var_ref_idx];
- MV_REFERENCE_FRAME vrfl = l_sg ? left_mi->mbmi.ref_frame[0]
- : left_mi->mbmi.ref_frame[var_ref_idx];
+ int l_sg = left_mbmi->ref_frame[1] <= INTRA_FRAME;
+ int a_sg = above_mbmi->ref_frame[1] <= INTRA_FRAME;
+ MV_REFERENCE_FRAME vrfa = a_sg ? above_mbmi->ref_frame[0]
+ : above_mbmi->ref_frame[var_ref_idx];
+ MV_REFERENCE_FRAME vrfl = l_sg ? left_mbmi->ref_frame[0]
+ : left_mbmi->ref_frame[var_ref_idx];
if (vrfa == vrfl && cm->comp_var_ref[1] == vrfa) {
pred_context = 0;
} else if (l_sg && a_sg) { // single/single
- if ((vrfa == cm->comp_fixed_ref && vrfl == cm->comp_var_ref[0])
- || (vrfl == cm->comp_fixed_ref && vrfa == cm->comp_var_ref[0]))
+ if ((vrfa == cm->comp_fixed_ref && vrfl == cm->comp_var_ref[0]) ||
+ (vrfl == cm->comp_fixed_ref && vrfa == cm->comp_var_ref[0]))
pred_context = 4;
else if (vrfa == vrfl)
pred_context = 3;
@@ -214,16 +210,15 @@ unsigned char vp9_get_pred_context_comp_ref_p(const VP9_COMMON *cm,
}
}
} else if (above_in_image || left_in_image) { // one edge available
- const MODE_INFO *edge = above_in_image ? above_mi : left_mi;
+ const MB_MODE_INFO *edge_mbmi = above_in_image ? above_mbmi : left_mbmi;
- if (edge->mbmi.ref_frame[0] == INTRA_FRAME) {
+ if (edge_mbmi->ref_frame[0] == INTRA_FRAME)
pred_context = 2;
- } else if (edge->mbmi.ref_frame[1] > INTRA_FRAME) {
- pred_context = 4 * edge->mbmi.ref_frame[var_ref_idx]
+ else if (edge_mbmi->ref_frame[1] > INTRA_FRAME)
+ pred_context = 4 * edge_mbmi->ref_frame[var_ref_idx]
!= cm->comp_var_ref[1];
- } else {
- pred_context = 3 * edge->mbmi.ref_frame[0] != cm->comp_var_ref[1];
- }
+ else
+ pred_context = 3 * edge_mbmi->ref_frame[0] != cm->comp_var_ref[1];
} else { // no edges available (2)
pred_context = 2;
}
@@ -235,70 +230,61 @@ unsigned char vp9_get_pred_context_single_ref_p1(const VP9_COMMON *cm,
const MACROBLOCKD *xd) {
int pred_context;
const MODE_INFO *const mi = xd->mode_info_context;
- const MODE_INFO *const above_mi = mi - cm->mode_info_stride;
- const MODE_INFO *const left_mi = mi - 1;
- const int left_in_image = xd->left_available && left_mi->mbmi.mb_in_image;
- const int above_in_image = xd->up_available && above_mi->mbmi.mb_in_image;
+ const MB_MODE_INFO *const above_mbmi = &mi[-cm->mode_info_stride].mbmi;
+ const MB_MODE_INFO *const left_mbmi = &mi[-1].mbmi;
+ const int left_in_image = xd->left_available && left_mbmi->mb_in_image;
+ const int above_in_image = xd->up_available && above_mbmi->mb_in_image;
// Note:
// The mode info data structure has a one element border above and to the
// left of the entries correpsonding to real macroblocks.
// The prediction flags in these dummy entries are initialised to 0.
if (above_in_image && left_in_image) { // both edges available
- if (above_mi->mbmi.ref_frame[0] == INTRA_FRAME
- && left_mi->mbmi.ref_frame[0] == INTRA_FRAME) {
+ if (above_mbmi->ref_frame[0] == INTRA_FRAME &&
+ left_mbmi->ref_frame[0] == INTRA_FRAME) {
pred_context = 2;
- } else if (above_mi->mbmi.ref_frame[0] == INTRA_FRAME
- || left_mi->mbmi.ref_frame[0] == INTRA_FRAME) {
- const MODE_INFO *edge =
- above_mi->mbmi.ref_frame[0] == INTRA_FRAME ? left_mi : above_mi;
-
- if (edge->mbmi.ref_frame[1] <= INTRA_FRAME) {
- pred_context = 4 * (edge->mbmi.ref_frame[0] == LAST_FRAME);
- } else {
- pred_context = 1
- + (edge->mbmi.ref_frame[0] == LAST_FRAME
- || edge->mbmi.ref_frame[1] == LAST_FRAME);
- }
- } else if (above_mi->mbmi.ref_frame[1] <= INTRA_FRAME
- && left_mi->mbmi.ref_frame[1] <= INTRA_FRAME) {
- pred_context = 2 * (above_mi->mbmi.ref_frame[0] == LAST_FRAME)
- + 2 * (left_mi->mbmi.ref_frame[0] == LAST_FRAME);
- } else if (above_mi->mbmi.ref_frame[1] > INTRA_FRAME
- && left_mi->mbmi.ref_frame[1] > INTRA_FRAME) {
- pred_context = 1
- + (above_mi->mbmi.ref_frame[0] == LAST_FRAME
- || above_mi->mbmi.ref_frame[1] == LAST_FRAME
- || left_mi->mbmi.ref_frame[0] == LAST_FRAME
- || left_mi->mbmi.ref_frame[1] == LAST_FRAME);
+ } else if (above_mbmi->ref_frame[0] == INTRA_FRAME ||
+ left_mbmi->ref_frame[0] == INTRA_FRAME) {
+ const MB_MODE_INFO *edge_mbmi = above_mbmi->ref_frame[0] == INTRA_FRAME ?
+ left_mbmi : above_mbmi;
+
+ if (edge_mbmi->ref_frame[1] <= INTRA_FRAME)
+ pred_context = 4 * (edge_mbmi->ref_frame[0] == LAST_FRAME);
+ else
+ pred_context = 1 + (edge_mbmi->ref_frame[0] == LAST_FRAME ||
+ edge_mbmi->ref_frame[1] == LAST_FRAME);
+ } else if (above_mbmi->ref_frame[1] <= INTRA_FRAME &&
+ left_mbmi->ref_frame[1] <= INTRA_FRAME) {
+ pred_context = 2 * (above_mbmi->ref_frame[0] == LAST_FRAME) +
+ 2 * (left_mbmi->ref_frame[0] == LAST_FRAME);
+ } else if (above_mbmi->ref_frame[1] > INTRA_FRAME &&
+ left_mbmi->ref_frame[1] > INTRA_FRAME) {
+ pred_context = 1 + (above_mbmi->ref_frame[0] == LAST_FRAME ||
+ above_mbmi->ref_frame[1] == LAST_FRAME ||
+ left_mbmi->ref_frame[0] == LAST_FRAME ||
+ left_mbmi->ref_frame[1] == LAST_FRAME);
} else {
- MV_REFERENCE_FRAME rfs =
- above_mi->mbmi.ref_frame[1] <= INTRA_FRAME ?
- above_mi->mbmi.ref_frame[0] : left_mi->mbmi.ref_frame[0];
- MV_REFERENCE_FRAME crf1 =
- above_mi->mbmi.ref_frame[1] > INTRA_FRAME ?
- above_mi->mbmi.ref_frame[0] : left_mi->mbmi.ref_frame[0];
- MV_REFERENCE_FRAME crf2 =
- above_mi->mbmi.ref_frame[1] > INTRA_FRAME ?
- above_mi->mbmi.ref_frame[1] : left_mi->mbmi.ref_frame[1];
-
- if (rfs == LAST_FRAME) {
+ MV_REFERENCE_FRAME rfs = above_mbmi->ref_frame[1] <= INTRA_FRAME ?
+ above_mbmi->ref_frame[0] : left_mbmi->ref_frame[0];
+ MV_REFERENCE_FRAME crf1 = above_mbmi->ref_frame[1] > INTRA_FRAME ?
+ above_mbmi->ref_frame[0] : left_mbmi->ref_frame[0];
+ MV_REFERENCE_FRAME crf2 = above_mbmi->ref_frame[1] > INTRA_FRAME ?
+ above_mbmi->ref_frame[1] : left_mbmi->ref_frame[1];
+
+ if (rfs == LAST_FRAME)
pred_context = 3 + (crf1 == LAST_FRAME || crf2 == LAST_FRAME);
- } else {
+ else
pred_context = crf1 == LAST_FRAME || crf2 == LAST_FRAME;
- }
}
} else if (above_in_image || left_in_image) { // one edge available
- const MODE_INFO *edge = above_in_image ? above_mi : left_mi;
+ const MB_MODE_INFO *edge_mbmi = above_in_image ? above_mbmi : left_mbmi;
- if (edge->mbmi.ref_frame[0] == INTRA_FRAME) {
+ if (edge_mbmi->ref_frame[0] == INTRA_FRAME)
pred_context = 2;
- } else if (edge->mbmi.ref_frame[1] <= INTRA_FRAME) {
- pred_context = 4 * (edge->mbmi.ref_frame[0] == LAST_FRAME);
- } else {
- pred_context = 1
- + (edge->mbmi.ref_frame[0] == LAST_FRAME
- || edge->mbmi.ref_frame[1] == LAST_FRAME);
- }
+ else if (edge_mbmi->ref_frame[1] <= INTRA_FRAME)
+ pred_context = 4 * (edge_mbmi->ref_frame[0] == LAST_FRAME);
+ else
+ pred_context = 1 + (edge_mbmi->ref_frame[0] == LAST_FRAME ||
+ edge_mbmi->ref_frame[1] == LAST_FRAME);
} else { // no edges available (2)
pred_context = 2;
}
@@ -310,95 +296,85 @@ unsigned char vp9_get_pred_context_single_ref_p2(const VP9_COMMON *cm,
const MACROBLOCKD *xd) {
int pred_context;
const MODE_INFO *const mi = xd->mode_info_context;
- const MODE_INFO *const above_mi = mi - cm->mode_info_stride;
- const MODE_INFO *const left_mi = mi - 1;
- const int left_in_image = xd->left_available && left_mi->mbmi.mb_in_image;
- const int above_in_image = xd->up_available && above_mi->mbmi.mb_in_image;
+ const MB_MODE_INFO *const above_mbmi = &mi[-cm->mode_info_stride].mbmi;
+ const MB_MODE_INFO *const left_mbmi = &mi[-1].mbmi;
+ const int left_in_image = xd->left_available && left_mbmi->mb_in_image;
+ const int above_in_image = xd->up_available && above_mbmi->mb_in_image;
+
// Note:
// The mode info data structure has a one element border above and to the
// left of the entries correpsonding to real macroblocks.
// The prediction flags in these dummy entries are initialised to 0.
if (above_in_image && left_in_image) { // both edges available
- if (above_mi->mbmi.ref_frame[0] == INTRA_FRAME
- && left_mi->mbmi.ref_frame[0] == INTRA_FRAME) {
+ if (above_mbmi->ref_frame[0] == INTRA_FRAME &&
+ left_mbmi->ref_frame[0] == INTRA_FRAME) {
pred_context = 2;
- } else if (above_mi->mbmi.ref_frame[0] == INTRA_FRAME
- || left_mi->mbmi.ref_frame[0] == INTRA_FRAME) {
- const MODE_INFO *edge =
- above_mi->mbmi.ref_frame[0] == INTRA_FRAME ? left_mi : above_mi;
+ } else if (above_mbmi->ref_frame[0] == INTRA_FRAME ||
+ left_mbmi->ref_frame[0] == INTRA_FRAME) {
+ const MB_MODE_INFO *edge_mbmi = above_mbmi->ref_frame[0] == INTRA_FRAME ?
+ left_mbmi : above_mbmi;
- if (edge->mbmi.ref_frame[1] <= INTRA_FRAME) {
- if (edge->mbmi.ref_frame[0] == LAST_FRAME) {
+ if (edge_mbmi->ref_frame[1] <= INTRA_FRAME) {
+ if (edge_mbmi->ref_frame[0] == LAST_FRAME)
pred_context = 3;
- } else {
- pred_context = 4 * (edge->mbmi.ref_frame[0] == GOLDEN_FRAME);
- }
+ else
+ pred_context = 4 * (edge_mbmi->ref_frame[0] == GOLDEN_FRAME);
} else {
- pred_context = 1
- + 2
- * (edge->mbmi.ref_frame[0] == GOLDEN_FRAME
- || edge->mbmi.ref_frame[1] == GOLDEN_FRAME);
+ pred_context = 1 + 2 * (edge_mbmi->ref_frame[0] == GOLDEN_FRAME ||
+ edge_mbmi->ref_frame[1] == GOLDEN_FRAME);
}
- } else if (above_mi->mbmi.ref_frame[1] <= INTRA_FRAME
- && left_mi->mbmi.ref_frame[1] <= INTRA_FRAME) {
- if (above_mi->mbmi.ref_frame[0] == LAST_FRAME
- && left_mi->mbmi.ref_frame[0] == LAST_FRAME) {
+ } else if (above_mbmi->ref_frame[1] <= INTRA_FRAME &&
+ left_mbmi->ref_frame[1] <= INTRA_FRAME) {
+ if (above_mbmi->ref_frame[0] == LAST_FRAME &&
+ left_mbmi->ref_frame[0] == LAST_FRAME) {
pred_context = 3;
- } else if (above_mi->mbmi.ref_frame[0] == LAST_FRAME
- || left_mi->mbmi.ref_frame[0] == LAST_FRAME) {
- const MODE_INFO *edge =
- above_mi->mbmi.ref_frame[0] == LAST_FRAME ? left_mi : above_mi;
+ } else if (above_mbmi->ref_frame[0] == LAST_FRAME ||
+ left_mbmi->ref_frame[0] == LAST_FRAME) {
+ const MB_MODE_INFO *edge_mbmi = above_mbmi->ref_frame[0] == LAST_FRAME ?
+ left_mbmi : above_mbmi;
- pred_context = 4 * (edge->mbmi.ref_frame[0] == GOLDEN_FRAME);
+ pred_context = 4 * (edge_mbmi->ref_frame[0] == GOLDEN_FRAME);
} else {
- pred_context = 2 * (above_mi->mbmi.ref_frame[0] == GOLDEN_FRAME)
- + 2 * (left_mi->mbmi.ref_frame[0] == GOLDEN_FRAME);
+ pred_context = 2 * (above_mbmi->ref_frame[0] == GOLDEN_FRAME) +
+ 2 * (left_mbmi->ref_frame[0] == GOLDEN_FRAME);
}
- } else if (above_mi->mbmi.ref_frame[1] > INTRA_FRAME
- && left_mi->mbmi.ref_frame[1] > INTRA_FRAME) {
- if (above_mi->mbmi.ref_frame[0] == left_mi->mbmi.ref_frame[0]
- && above_mi->mbmi.ref_frame[1] == left_mi->mbmi.ref_frame[1]) {
- pred_context = 3
- * (above_mi->mbmi.ref_frame[0] == GOLDEN_FRAME
- || above_mi->mbmi.ref_frame[1] == GOLDEN_FRAME
- || left_mi->mbmi.ref_frame[0] == GOLDEN_FRAME
- || left_mi->mbmi.ref_frame[1] == GOLDEN_FRAME);
- } else {
+ } else if (above_mbmi->ref_frame[1] > INTRA_FRAME &&
+ left_mbmi->ref_frame[1] > INTRA_FRAME) {
+ if (above_mbmi->ref_frame[0] == left_mbmi->ref_frame[0] &&
+ above_mbmi->ref_frame[1] == left_mbmi->ref_frame[1])
+ pred_context = 3 * (above_mbmi->ref_frame[0] == GOLDEN_FRAME ||
+ above_mbmi->ref_frame[1] == GOLDEN_FRAME ||
+ left_mbmi->ref_frame[0] == GOLDEN_FRAME ||
+ left_mbmi->ref_frame[1] == GOLDEN_FRAME);
+ else
pred_context = 2;
- }
} else {
- MV_REFERENCE_FRAME rfs =
- above_mi->mbmi.ref_frame[1] <= INTRA_FRAME ?
- above_mi->mbmi.ref_frame[0] : left_mi->mbmi.ref_frame[0];
- MV_REFERENCE_FRAME crf1 =
- above_mi->mbmi.ref_frame[1] > INTRA_FRAME ?
- above_mi->mbmi.ref_frame[0] : left_mi->mbmi.ref_frame[0];
- MV_REFERENCE_FRAME crf2 =
- above_mi->mbmi.ref_frame[1] > INTRA_FRAME ?
- above_mi->mbmi.ref_frame[1] : left_mi->mbmi.ref_frame[1];
-
- if (rfs == GOLDEN_FRAME) {
+ MV_REFERENCE_FRAME rfs = above_mbmi->ref_frame[1] <= INTRA_FRAME ?
+ above_mbmi->ref_frame[0] : left_mbmi->ref_frame[0];
+ MV_REFERENCE_FRAME crf1 = above_mbmi->ref_frame[1] > INTRA_FRAME ?
+ above_mbmi->ref_frame[0] : left_mbmi->ref_frame[0];
+ MV_REFERENCE_FRAME crf2 = above_mbmi->ref_frame[1] > INTRA_FRAME ?
+ above_mbmi->ref_frame[1] : left_mbmi->ref_frame[1];
+
+ if (rfs == GOLDEN_FRAME)
pred_context = 3 + (crf1 == GOLDEN_FRAME || crf2 == GOLDEN_FRAME);
- } else if (rfs == ALTREF_FRAME) {
+ else if (rfs == ALTREF_FRAME)
pred_context = crf1 == GOLDEN_FRAME || crf2 == GOLDEN_FRAME;
- } else {
+ else
pred_context = 1 + 2 * (crf1 == GOLDEN_FRAME || crf2 == GOLDEN_FRAME);
- }
}
} else if (above_in_image || left_in_image) { // one edge available
- const MODE_INFO *edge = above_in_image ? above_mi : left_mi;
+ const MB_MODE_INFO *edge_mbmi = above_in_image ? above_mbmi : left_mbmi;
- if (edge->mbmi.ref_frame[0] == INTRA_FRAME
- || (edge->mbmi.ref_frame[0] == LAST_FRAME
- && edge->mbmi.ref_frame[1] <= INTRA_FRAME)) {
+ if (edge_mbmi->ref_frame[0] == INTRA_FRAME ||
+ (edge_mbmi->ref_frame[0] == LAST_FRAME &&
+ edge_mbmi->ref_frame[1] <= INTRA_FRAME))
pred_context = 2;
- } else if (edge->mbmi.ref_frame[1] <= INTRA_FRAME) {
- pred_context = 4 * (edge->mbmi.ref_frame[0] == GOLDEN_FRAME);
- } else {
- pred_context = 3
- * (edge->mbmi.ref_frame[0] == GOLDEN_FRAME
- || edge->mbmi.ref_frame[1] == GOLDEN_FRAME);
- }
+ else if (edge_mbmi->ref_frame[1] <= INTRA_FRAME)
+ pred_context = 4 * (edge_mbmi->ref_frame[0] == GOLDEN_FRAME);
+ else
+ pred_context = 3 * (edge_mbmi->ref_frame[0] == GOLDEN_FRAME ||
+ edge_mbmi->ref_frame[1] == GOLDEN_FRAME);
} else { // no edges available (2)
pred_context = 2;
}
diff --git a/vp9/common/vp9_pred_common.h b/vp9/common/vp9_pred_common.h
index fa0ced4c6..305e57cba 100644
--- a/vp9/common/vp9_pred_common.h
+++ b/vp9/common/vp9_pred_common.h
@@ -118,11 +118,11 @@ static INLINE const vp9_prob *vp9_get_pred_probs_tx_size(const VP9_COMMON *cm,
const MODE_INFO *const mi = xd->mode_info_context;
const int pred_context = vp9_get_pred_context_tx_size(cm, xd);
if (mi->mbmi.sb_type < BLOCK_SIZE_MB16X16)
- return cm->fc.tx_probs_8x8p[pred_context];
+ return cm->fc.tx_probs.p8x8[pred_context];
else if (mi->mbmi.sb_type < BLOCK_SIZE_SB32X32)
- return cm->fc.tx_probs_16x16p[pred_context];
+ return cm->fc.tx_probs.p16x16[pred_context];
else
- return cm->fc.tx_probs_32x32p[pred_context];
+ return cm->fc.tx_probs.p32x32[pred_context];
}
#endif // VP9_COMMON_VP9_PRED_COMMON_H_
diff --git a/vp9/decoder/vp9_decodemv.c b/vp9/decoder/vp9_decodemv.c
index 22be55f23..e99ab9c8c 100644
--- a/vp9/decoder/vp9_decodemv.c
+++ b/vp9/decoder/vp9_decodemv.c
@@ -60,11 +60,11 @@ static TX_SIZE read_selected_txfm_size(VP9_COMMON *cm, MACROBLOCKD *xd,
}
if (bsize >= BLOCK_SIZE_SB32X32)
- cm->fc.tx_count_32x32p[context][txfm_size]++;
+ cm->fc.tx_counts.p32x32[context][txfm_size]++;
else if (bsize >= BLOCK_SIZE_MB16X16)
- cm->fc.tx_count_16x16p[context][txfm_size]++;
+ cm->fc.tx_counts.p16x16[context][txfm_size]++;
else
- cm->fc.tx_count_8x8p[context][txfm_size]++;
+ cm->fc.tx_counts.p8x8[context][txfm_size]++;
return txfm_size;
}
@@ -108,14 +108,46 @@ static int read_intra_segment_id(VP9D_COMP *pbi, int mi_row, int mi_col,
MACROBLOCKD *const xd = &pbi->mb;
struct segmentation *const seg = &xd->seg;
const BLOCK_SIZE_TYPE bsize = xd->mode_info_context->mbmi.sb_type;
+ int segment_id;
- if (seg->enabled && seg->update_map) {
- const int segment_id = read_segment_id(r, seg);
- set_segment_id(&pbi->common, bsize, mi_row, mi_col, segment_id);
- return segment_id;
- } else {
+ if (!seg->enabled)
+ return 0; // Default for disabled segmentation
+
+ if (!seg->update_map)
return 0;
+
+ segment_id = read_segment_id(r, seg);
+ set_segment_id(&pbi->common, bsize, mi_row, mi_col, segment_id);
+ return segment_id;
+}
+
+static int read_inter_segment_id(VP9D_COMP *pbi, int mi_row, int mi_col,
+ vp9_reader *r) {
+ VP9_COMMON *const cm = &pbi->common;
+ MACROBLOCKD *const xd = &pbi->mb;
+ struct segmentation *const seg = &xd->seg;
+ const BLOCK_SIZE_TYPE bsize = xd->mode_info_context->mbmi.sb_type;
+ int pred_segment_id, segment_id;
+
+ if (!seg->enabled)
+ return 0; // Default for disabled segmentation
+
+ pred_segment_id = vp9_get_segment_id(cm, cm->last_frame_seg_map,
+ bsize, mi_row, mi_col);
+ if (!seg->update_map)
+ return pred_segment_id;
+
+ if (seg->temporal_update) {
+ const vp9_prob pred_prob = vp9_get_pred_prob_seg_id(xd);
+ const int pred_flag = vp9_read(r, pred_prob);
+ vp9_set_pred_flag_seg_id(xd, bsize, pred_flag);
+ segment_id = pred_flag ? pred_segment_id
+ : read_segment_id(r, seg);
+ } else {
+ segment_id = read_segment_id(r, seg);
}
+ set_segment_id(cm, bsize, mi_row, mi_col, segment_id);
+ return segment_id;
}
static uint8_t read_skip_coeff(VP9D_COMP *pbi, int segment_id, vp9_reader *r) {
@@ -352,37 +384,6 @@ static INLINE COMPPREDMODE_TYPE read_comp_pred_mode(vp9_reader *r) {
return mode;
}
-static int read_inter_segment_id(VP9D_COMP *pbi, int mi_row, int mi_col,
- vp9_reader *r) {
- VP9_COMMON *const cm = &pbi->common;
- MACROBLOCKD *const xd = &pbi->mb;
- struct segmentation *const seg = &xd->seg;
- const BLOCK_SIZE_TYPE bsize = xd->mode_info_context->mbmi.sb_type;
- int pred_segment_id;
- int segment_id;
-
- if (!seg->enabled)
- return 0; // Default for disabled segmentation
-
- pred_segment_id = vp9_get_segment_id(cm, cm->last_frame_seg_map,
- bsize, mi_row, mi_col);
- if (!seg->update_map)
- return pred_segment_id;
-
- if (seg->temporal_update) {
- const vp9_prob pred_prob = vp9_get_pred_prob_seg_id(xd);
- const int pred_flag = vp9_read(r, pred_prob);
- vp9_set_pred_flag_seg_id(xd, bsize, pred_flag);
- segment_id = pred_flag ? pred_segment_id
- : read_segment_id(r, seg);
- } else {
- segment_id = read_segment_id(r, seg);
- }
- set_segment_id(cm, bsize, mi_row, mi_col, segment_id);
- return segment_id;
-}
-
-
static INLINE void assign_and_clamp_mv(int_mv *dst, const int_mv *src,
int mb_to_left_edge,
int mb_to_right_edge,
diff --git a/vp9/decoder/vp9_decodframe.c b/vp9/decoder/vp9_decodframe.c
index 48be06946..5fd9a9fb8 100644
--- a/vp9/decoder/vp9_decodframe.c
+++ b/vp9/decoder/vp9_decodframe.c
@@ -68,17 +68,17 @@ static void read_tx_probs(FRAME_CONTEXT *fc, vp9_reader *r) {
for (i = 0; i < TX_SIZE_CONTEXTS; ++i)
for (j = 0; j < TX_SIZE_MAX_SB - 3; ++j)
if (vp9_read(r, VP9_MODE_UPDATE_PROB))
- vp9_diff_update_prob(r, &fc->tx_probs_8x8p[i][j]);
+ vp9_diff_update_prob(r, &fc->tx_probs.p8x8[i][j]);
for (i = 0; i < TX_SIZE_CONTEXTS; ++i)
for (j = 0; j < TX_SIZE_MAX_SB - 2; ++j)
if (vp9_read(r, VP9_MODE_UPDATE_PROB))
- vp9_diff_update_prob(r, &fc->tx_probs_16x16p[i][j]);
+ vp9_diff_update_prob(r, &fc->tx_probs.p16x16[i][j]);
for (i = 0; i < TX_SIZE_CONTEXTS; ++i)
for (j = 0; j < TX_SIZE_MAX_SB - 1; ++j)
if (vp9_read(r, VP9_MODE_UPDATE_PROB))
- vp9_diff_update_prob(r, &fc->tx_probs_32x32p[i][j]);
+ vp9_diff_update_prob(r, &fc->tx_probs.p32x32[i][j]);
}
static void mb_init_dequantizer(VP9_COMMON *pc, MACROBLOCKD *xd) {
@@ -604,9 +604,7 @@ static void update_frame_context(FRAME_CONTEXT *fc) {
fc->pre_nmvc = fc->nmvc;
vp9_copy(fc->pre_switchable_interp_prob, fc->switchable_interp_prob);
vp9_copy(fc->pre_inter_mode_probs, fc->inter_mode_probs);
- vp9_copy(fc->pre_tx_probs_8x8p, fc->tx_probs_8x8p);
- vp9_copy(fc->pre_tx_probs_16x16p, fc->tx_probs_16x16p);
- vp9_copy(fc->pre_tx_probs_32x32p, fc->tx_probs_32x32p);
+ fc->pre_tx_probs = fc->tx_probs;
vp9_copy(fc->pre_mbskip_probs, fc->mbskip_probs);
vp9_zero(fc->coef_counts);
@@ -621,9 +619,7 @@ static void update_frame_context(FRAME_CONTEXT *fc) {
vp9_zero(fc->comp_inter_count);
vp9_zero(fc->single_ref_count);
vp9_zero(fc->comp_ref_count);
- vp9_zero(fc->tx_count_8x8p);
- vp9_zero(fc->tx_count_16x16p);
- vp9_zero(fc->tx_count_32x32p);
+ vp9_zero(fc->tx_counts);
vp9_zero(fc->mbskip_count);
}
@@ -868,7 +864,7 @@ static size_t read_uncompressed_header(VP9D_COMP *pbi,
pbi->refresh_frame_flags = vp9_rb_read_literal(rb, NUM_REF_FRAMES);
for (i = 0; i < ALLOWED_REFS_PER_FRAME; ++i) {
- const int ref = vp9_rb_read_literal(rb, NUM_REF_FRAMES_LG2);
+ const int ref = vp9_rb_read_literal(rb, NUM_REF_FRAMES_LOG2);
cm->active_ref_idx[i] = cm->ref_frame_map[ref];
cm->ref_frame_sign_bias[LAST_FRAME + i] = vp9_rb_read_bit(rb);
}
@@ -893,7 +889,7 @@ static size_t read_uncompressed_header(VP9D_COMP *pbi,
cm->frame_parallel_decoding_mode = 1;
}
- cm->frame_context_idx = vp9_rb_read_literal(rb, NUM_FRAME_CONTEXTS_LG2);
+ cm->frame_context_idx = vp9_rb_read_literal(rb, NUM_FRAME_CONTEXTS_LOG2);
if (cm->frame_type == KEY_FRAME || cm->error_resilient_mode || cm->intra_only)
vp9_setup_past_independence(cm, xd);
diff --git a/vp9/encoder/vp9_bitstream.c b/vp9/encoder/vp9_bitstream.c
index 98242db3d..d34909c65 100644
--- a/vp9/encoder/vp9_bitstream.c
+++ b/vp9/encoder/vp9_bitstream.c
@@ -1075,28 +1075,26 @@ static void encode_txfm_probs(VP9_COMP *cpi, vp9_writer *w) {
for (i = 0; i < TX_SIZE_CONTEXTS; i++) {
- tx_counts_to_branch_counts_8x8(cm->fc.tx_count_8x8p[i],
+ tx_counts_to_branch_counts_8x8(cm->fc.tx_counts.p8x8[i],
ct_8x8p);
- for (j = 0; j < TX_SIZE_MAX_SB - 3; j++) {
- vp9_cond_prob_diff_update(w, &cm->fc.tx_probs_8x8p[i][j],
+ for (j = 0; j < TX_SIZE_MAX_SB - 3; j++)
+ vp9_cond_prob_diff_update(w, &cm->fc.tx_probs.p8x8[i][j],
VP9_MODE_UPDATE_PROB, ct_8x8p[j]);
- }
}
+
for (i = 0; i < TX_SIZE_CONTEXTS; i++) {
- tx_counts_to_branch_counts_16x16(cm->fc.tx_count_16x16p[i],
+ tx_counts_to_branch_counts_16x16(cm->fc.tx_counts.p16x16[i],
ct_16x16p);
- for (j = 0; j < TX_SIZE_MAX_SB - 2; j++) {
- vp9_cond_prob_diff_update(w, &cm->fc.tx_probs_16x16p[i][j],
+ for (j = 0; j < TX_SIZE_MAX_SB - 2; j++)
+ vp9_cond_prob_diff_update(w, &cm->fc.tx_probs.p16x16[i][j],
VP9_MODE_UPDATE_PROB, ct_16x16p[j]);
- }
}
+
for (i = 0; i < TX_SIZE_CONTEXTS; i++) {
- tx_counts_to_branch_counts_32x32(cm->fc.tx_count_32x32p[i],
- ct_32x32p);
- for (j = 0; j < TX_SIZE_MAX_SB - 1; j++) {
- vp9_cond_prob_diff_update(w, &cm->fc.tx_probs_32x32p[i][j],
+ tx_counts_to_branch_counts_32x32(cm->fc.tx_counts.p32x32[i], ct_32x32p);
+ for (j = 0; j < TX_SIZE_MAX_SB - 1; j++)
+ vp9_cond_prob_diff_update(w, &cm->fc.tx_probs.p32x32[i][j],
VP9_MODE_UPDATE_PROB, ct_32x32p[j]);
- }
}
#ifdef MODE_STATS
if (!cpi->dummy_packing)
@@ -1352,7 +1350,7 @@ static void write_uncompressed_header(VP9_COMP *cpi,
int i;
vp9_wb_write_literal(wb, get_refresh_mask(cpi), NUM_REF_FRAMES);
for (i = 0; i < ALLOWED_REFS_PER_FRAME; ++i) {
- vp9_wb_write_literal(wb, refs[i], NUM_REF_FRAMES_LG2);
+ vp9_wb_write_literal(wb, refs[i], NUM_REF_FRAMES_LOG2);
vp9_wb_write_bit(wb, cm->ref_frame_sign_bias[LAST_FRAME + i]);
}
@@ -1370,7 +1368,7 @@ static void write_uncompressed_header(VP9_COMP *cpi,
vp9_wb_write_bit(wb, cm->frame_parallel_decoding_mode);
}
- vp9_wb_write_literal(wb, cm->frame_context_idx, NUM_FRAME_CONTEXTS_LG2);
+ vp9_wb_write_literal(wb, cm->frame_context_idx, NUM_FRAME_CONTEXTS_LOG2);
encode_loopfilter(cm, xd, wb);
encode_quantization(cm, wb);
@@ -1505,9 +1503,7 @@ void vp9_pack_bitstream(VP9_COMP *cpi, uint8_t *dest, unsigned long *size) {
vp9_copy(fc->pre_comp_inter_prob, fc->comp_inter_prob);
vp9_copy(fc->pre_comp_ref_prob, fc->comp_ref_prob);
vp9_copy(fc->pre_single_ref_prob, fc->single_ref_prob);
- vp9_copy(fc->pre_tx_probs_8x8p, fc->tx_probs_8x8p);
- vp9_copy(fc->pre_tx_probs_16x16p, fc->tx_probs_16x16p);
- vp9_copy(fc->pre_tx_probs_32x32p, fc->tx_probs_32x32p);
+ fc->pre_tx_probs = fc->tx_probs;
vp9_copy(fc->pre_mbskip_probs, fc->mbskip_probs);
first_part_size = write_compressed_header(cpi, data);
diff --git a/vp9/encoder/vp9_encodeframe.c b/vp9/encoder/vp9_encodeframe.c
index d0827afca..dc741c9e8 100644
--- a/vp9/encoder/vp9_encodeframe.c
+++ b/vp9/encoder/vp9_encodeframe.c
@@ -1733,9 +1733,7 @@ static void init_encode_frame_mb_context(VP9_COMP *cpi) {
vp9_zero(cpi->comp_inter_count);
vp9_zero(cpi->single_ref_count);
vp9_zero(cpi->comp_ref_count);
- vp9_zero(cm->fc.tx_count_32x32p);
- vp9_zero(cm->fc.tx_count_16x16p);
- vp9_zero(cm->fc.tx_count_8x8p);
+ vp9_zero(cm->fc.tx_counts);
vp9_zero(cm->fc.mbskip_count);
// Note: this memset assumes above_context[0], [1] and [2]
@@ -2177,28 +2175,28 @@ void vp9_encode_frame(VP9_COMP *cpi) {
int count32x32 = 0;
for (i = 0; i < TX_SIZE_CONTEXTS; i++)
- count4x4 += cm->fc.tx_count_32x32p[i][TX_4X4];
+ count4x4 += cm->fc.tx_counts.p32x32[i][TX_4X4];
for (i = 0; i < TX_SIZE_CONTEXTS; i++)
- count4x4 += cm->fc.tx_count_16x16p[i][TX_4X4];
+ count4x4 += cm->fc.tx_counts.p16x16[i][TX_4X4];
for (i = 0; i < TX_SIZE_CONTEXTS; i++)
- count4x4 += cm->fc.tx_count_8x8p[i][TX_4X4];
+ count4x4 += cm->fc.tx_counts.p8x8[i][TX_4X4];
for (i = 0; i < TX_SIZE_CONTEXTS; i++)
- count8x8_lp += cm->fc.tx_count_32x32p[i][TX_8X8];
+ count8x8_lp += cm->fc.tx_counts.p32x32[i][TX_8X8];
for (i = 0; i < TX_SIZE_CONTEXTS; i++)
- count8x8_lp += cm->fc.tx_count_16x16p[i][TX_8X8];
+ count8x8_lp += cm->fc.tx_counts.p16x16[i][TX_8X8];
for (i = 0; i < TX_SIZE_CONTEXTS; i++)
- count8x8_8x8p += cm->fc.tx_count_8x8p[i][TX_8X8];
+ count8x8_8x8p += cm->fc.tx_counts.p8x8[i][TX_8X8];
for (i = 0; i < TX_SIZE_CONTEXTS; i++)
- count16x16_16x16p += cm->fc.tx_count_16x16p[i][TX_16X16];
+ count16x16_16x16p += cm->fc.tx_counts.p16x16[i][TX_16X16];
for (i = 0; i < TX_SIZE_CONTEXTS; i++)
- count16x16_lp += cm->fc.tx_count_32x32p[i][TX_16X16];
+ count16x16_lp += cm->fc.tx_counts.p32x32[i][TX_16X16];
for (i = 0; i < TX_SIZE_CONTEXTS; i++)
- count32x32 += cm->fc.tx_count_32x32p[i][TX_32X32];
+ count32x32 += cm->fc.tx_counts.p32x32[i][TX_32X32];
if (count4x4 == 0 && count16x16_lp == 0 && count16x16_16x16p == 0
&& count32x32 == 0) {
@@ -2374,11 +2372,11 @@ static void encode_superblock(VP9_COMP *cpi, TOKENEXTRA **t, int output_enabled,
vp9_segfeature_active(&xd->seg, segment_id, SEG_LVL_SKIP)))) {
const int context = vp9_get_pred_context_tx_size(cm, xd);
if (bsize >= BLOCK_SIZE_SB32X32) {
- cm->fc.tx_count_32x32p[context][mbmi->txfm_size]++;
+ cm->fc.tx_counts.p32x32[context][mbmi->txfm_size]++;
} else if (bsize >= BLOCK_SIZE_MB16X16) {
- cm->fc.tx_count_16x16p[context][mbmi->txfm_size]++;
+ cm->fc.tx_counts.p16x16[context][mbmi->txfm_size]++;
} else {
- cm->fc.tx_count_8x8p[context][mbmi->txfm_size]++;
+ cm->fc.tx_counts.p8x8[context][mbmi->txfm_size]++;
}
} else {
int x, y;
diff --git a/vp9/encoder/vp9_onyx_int.h b/vp9/encoder/vp9_onyx_int.h
index 6e9fd0f51..d3822c201 100644
--- a/vp9/encoder/vp9_onyx_int.h
+++ b/vp9/encoder/vp9_onyx_int.h
@@ -89,9 +89,7 @@ typedef struct {
int inter_mode_counts[INTER_MODE_CONTEXTS][VP9_INTER_MODES - 1][2];
vp9_prob inter_mode_probs[INTER_MODE_CONTEXTS][VP9_INTER_MODES - 1];
- vp9_prob tx_probs_8x8p[TX_SIZE_CONTEXTS][TX_SIZE_MAX_SB - 3];
- vp9_prob tx_probs_16x16p[TX_SIZE_CONTEXTS][TX_SIZE_MAX_SB - 2];
- vp9_prob tx_probs_32x32p[TX_SIZE_CONTEXTS][TX_SIZE_MAX_SB - 1];
+ struct tx_probs tx_probs;
vp9_prob mbskip_probs[MBSKIP_CONTEXTS];
} CODING_CONTEXT;
diff --git a/vp9/encoder/vp9_ratectrl.c b/vp9/encoder/vp9_ratectrl.c
index 733932821..8683965c4 100644
--- a/vp9/encoder/vp9_ratectrl.c
+++ b/vp9/encoder/vp9_ratectrl.c
@@ -142,9 +142,7 @@ void vp9_save_coding_context(VP9_COMP *cpi) {
vp9_copy(cc->coef_probs, cm->fc.coef_probs);
vp9_copy(cc->switchable_interp_prob, cm->fc.switchable_interp_prob);
- vp9_copy(cc->tx_probs_8x8p, cm->fc.tx_probs_8x8p);
- vp9_copy(cc->tx_probs_16x16p, cm->fc.tx_probs_16x16p);
- vp9_copy(cc->tx_probs_32x32p, cm->fc.tx_probs_32x32p);
+ cc->tx_probs = cm->fc.tx_probs;
vp9_copy(cc->mbskip_probs, cm->fc.mbskip_probs);
}
@@ -183,9 +181,7 @@ void vp9_restore_coding_context(VP9_COMP *cpi) {
vp9_copy(cm->fc.coef_probs, cc->coef_probs);
vp9_copy(cm->fc.switchable_interp_prob, cc->switchable_interp_prob);
- vp9_copy(cm->fc.tx_probs_8x8p, cc->tx_probs_8x8p);
- vp9_copy(cm->fc.tx_probs_16x16p, cc->tx_probs_16x16p);
- vp9_copy(cm->fc.tx_probs_32x32p, cc->tx_probs_32x32p);
+ cm->fc.tx_probs = cc->tx_probs;
vp9_copy(cm->fc.mbskip_probs, cc->mbskip_probs);
}
diff --git a/vp9/vp9_dx_iface.c b/vp9/vp9_dx_iface.c
index 3bfb35813..05029b92e 100644
--- a/vp9/vp9_dx_iface.c
+++ b/vp9/vp9_dx_iface.c
@@ -19,36 +19,29 @@
#include "decoder/vp9_onyxd_int.h"
#include "vp9/vp9_iface_common.h"
-#define VP8_CAP_POSTPROC (CONFIG_POSTPROC ? VPX_CODEC_CAP_POSTPROC : 0)
-typedef vpx_codec_stream_info_t vp8_stream_info_t;
+#define VP9_CAP_POSTPROC (CONFIG_POSTPROC ? VPX_CODEC_CAP_POSTPROC : 0)
+typedef vpx_codec_stream_info_t vp9_stream_info_t;
/* Structures for handling memory allocations */
typedef enum {
- VP8_SEG_ALG_PRIV = 256,
- VP8_SEG_MAX
+ VP9_SEG_ALG_PRIV = 256,
+ VP9_SEG_MAX
} mem_seg_id_t;
#define NELEMENTS(x) ((int)(sizeof(x)/sizeof(x[0])))
-static unsigned long vp8_priv_sz(const vpx_codec_dec_cfg_t *si, vpx_codec_flags_t);
+static unsigned long priv_sz(const vpx_codec_dec_cfg_t *si,
+ vpx_codec_flags_t flags);
-typedef struct {
- unsigned int id;
- unsigned long sz;
- unsigned int align;
- unsigned int flags;
- unsigned long(*calc_sz)(const vpx_codec_dec_cfg_t *, vpx_codec_flags_t);
-} mem_req_t;
-
-static const mem_req_t vp8_mem_req_segs[] = {
- {VP8_SEG_ALG_PRIV, 0, 8, VPX_CODEC_MEM_ZERO, vp8_priv_sz},
- {VP8_SEG_MAX, 0, 0, 0, NULL}
+static const mem_req_t vp9_mem_req_segs[] = {
+ {VP9_SEG_ALG_PRIV, 0, 8, VPX_CODEC_MEM_ZERO, priv_sz},
+ {VP9_SEG_MAX, 0, 0, 0, NULL}
};
struct vpx_codec_alg_priv {
vpx_codec_priv_t base;
- vpx_codec_mmap_t mmaps[NELEMENTS(vp8_mem_req_segs) - 1];
+ vpx_codec_mmap_t mmaps[NELEMENTS(vp9_mem_req_segs) - 1];
vpx_codec_dec_cfg_t cfg;
- vp8_stream_info_t si;
+ vp9_stream_info_t si;
int defer_alloc;
int decoder_init;
VP9D_PTR pbi;
@@ -67,8 +60,8 @@ struct vpx_codec_alg_priv {
int invert_tile_order;
};
-static unsigned long vp8_priv_sz(const vpx_codec_dec_cfg_t *si,
- vpx_codec_flags_t flags) {
+static unsigned long priv_sz(const vpx_codec_dec_cfg_t *si,
+ vpx_codec_flags_t flags) {
/* Although this declaration is constant, we can't use it in the requested
* segments list because we want to define the requested segments list
* before defining the private type (so that the number of memory maps is
@@ -78,59 +71,7 @@ static unsigned long vp8_priv_sz(const vpx_codec_dec_cfg_t *si,
return sizeof(vpx_codec_alg_priv_t);
}
-
-static void vp8_mmap_dtor(vpx_codec_mmap_t *mmap) {
- free(mmap->priv);
-}
-
-static vpx_codec_err_t vp8_mmap_alloc(vpx_codec_mmap_t *mmap) {
- vpx_codec_err_t res;
- unsigned int align;
-
- align = mmap->align ? mmap->align - 1 : 0;
-
- if (mmap->flags & VPX_CODEC_MEM_ZERO)
- mmap->priv = calloc(1, mmap->sz + align);
- else
- mmap->priv = malloc(mmap->sz + align);
-
- res = (mmap->priv) ? VPX_CODEC_OK : VPX_CODEC_MEM_ERROR;
- mmap->base = (void *)((((uintptr_t)mmap->priv) + align) & ~(uintptr_t)align);
- mmap->dtor = vp8_mmap_dtor;
- return res;
-}
-
-static vpx_codec_err_t vp8_validate_mmaps(const vp8_stream_info_t *si,
- const vpx_codec_mmap_t *mmaps,
- vpx_codec_flags_t init_flags) {
- int i;
- vpx_codec_err_t res = VPX_CODEC_OK;
-
- for (i = 0; i < NELEMENTS(vp8_mem_req_segs) - 1; i++) {
- /* Ensure the segment has been allocated */
- if (!mmaps[i].base) {
- res = VPX_CODEC_MEM_ERROR;
- break;
- }
-
- /* Verify variable size segment is big enough for the current si. */
- if (vp8_mem_req_segs[i].calc_sz) {
- vpx_codec_dec_cfg_t cfg;
-
- cfg.w = si->w;
- cfg.h = si->h;
-
- if (mmaps[i].sz < vp8_mem_req_segs[i].calc_sz(&cfg, init_flags)) {
- res = VPX_CODEC_MEM_ERROR;
- break;
- }
- }
- }
-
- return res;
-}
-
-static void vp8_init_ctx(vpx_codec_ctx_t *ctx, const vpx_codec_mmap_t *mmap) {
+static void vp9_init_ctx(vpx_codec_ctx_t *ctx, const vpx_codec_mmap_t *mmap) {
int i;
ctx->priv = mmap->base;
@@ -139,7 +80,7 @@ static void vp8_init_ctx(vpx_codec_ctx_t *ctx, const vpx_codec_mmap_t *mmap) {
ctx->priv->alg_priv = mmap->base;
for (i = 0; i < NELEMENTS(ctx->priv->alg_priv->mmaps); i++)
- ctx->priv->alg_priv->mmaps[i].id = vp8_mem_req_segs[i].id;
+ ctx->priv->alg_priv->mmaps[i].id = vp9_mem_req_segs[i].id;
ctx->priv->alg_priv->mmaps[0] = *mmap;
ctx->priv->alg_priv->si.sz = sizeof(ctx->priv->alg_priv->si);
@@ -152,20 +93,11 @@ static void vp8_init_ctx(vpx_codec_ctx_t *ctx, const vpx_codec_mmap_t *mmap) {
}
}
-static void *mmap_lkup(vpx_codec_alg_priv_t *ctx, unsigned int id) {
- int i;
-
- for (i = 0; i < NELEMENTS(ctx->mmaps); i++)
- if (ctx->mmaps[i].id == id)
- return ctx->mmaps[i].base;
-
- return NULL;
-}
-static void vp8_finalize_mmaps(vpx_codec_alg_priv_t *ctx) {
+static void vp9_finalize_mmaps(vpx_codec_alg_priv_t *ctx) {
/* nothing to clean up */
}
-static vpx_codec_err_t vp8_init(vpx_codec_ctx_t *ctx,
+static vpx_codec_err_t vp9_init(vpx_codec_ctx_t *ctx,
vpx_codec_priv_enc_mr_cfg_t *data) {
vpx_codec_err_t res = VPX_CODEC_OK;
@@ -176,15 +108,15 @@ static vpx_codec_err_t vp8_init(vpx_codec_ctx_t *ctx,
if (!ctx->priv) {
vpx_codec_mmap_t mmap;
- mmap.id = vp8_mem_req_segs[0].id;
+ mmap.id = vp9_mem_req_segs[0].id;
mmap.sz = sizeof(vpx_codec_alg_priv_t);
- mmap.align = vp8_mem_req_segs[0].align;
- mmap.flags = vp8_mem_req_segs[0].flags;
+ mmap.align = vp9_mem_req_segs[0].align;
+ mmap.flags = vp9_mem_req_segs[0].flags;
- res = vp8_mmap_alloc(&mmap);
+ res = vpx_mmap_alloc(&mmap);
if (!res) {
- vp8_init_ctx(ctx, &mmap);
+ vp9_init_ctx(ctx, &mmap);
ctx->priv->alg_priv->defer_alloc = 1;
/*post processing level initialized to do nothing */
@@ -194,7 +126,7 @@ static vpx_codec_err_t vp8_init(vpx_codec_ctx_t *ctx,
return res;
}
-static vpx_codec_err_t vp8_destroy(vpx_codec_alg_priv_t *ctx) {
+static vpx_codec_err_t vp9_destroy(vpx_codec_alg_priv_t *ctx) {
int i;
vp9_remove_decompressor(ctx->pbi);
@@ -207,7 +139,7 @@ static vpx_codec_err_t vp8_destroy(vpx_codec_alg_priv_t *ctx) {
return VPX_CODEC_OK;
}
-static vpx_codec_err_t vp8_peek_si(const uint8_t *data,
+static vpx_codec_err_t vp9_peek_si(const uint8_t *data,
unsigned int data_sz,
vpx_codec_stream_info_t *si) {
vpx_codec_err_t res = VPX_CODEC_OK;
@@ -238,13 +170,13 @@ static vpx_codec_err_t vp8_peek_si(const uint8_t *data,
return res;
}
-static vpx_codec_err_t vp8_get_si(vpx_codec_alg_priv_t *ctx,
+static vpx_codec_err_t vp9_get_si(vpx_codec_alg_priv_t *ctx,
vpx_codec_stream_info_t *si) {
unsigned int sz;
- if (si->sz >= sizeof(vp8_stream_info_t))
- sz = sizeof(vp8_stream_info_t);
+ if (si->sz >= sizeof(vp9_stream_info_t))
+ sz = sizeof(vp9_stream_info_t);
else
sz = sizeof(vpx_codec_stream_info_t);
@@ -294,27 +226,29 @@ static vpx_codec_err_t decode_one(vpx_codec_alg_priv_t *ctx,
cfg.w = ctx->si.w;
cfg.h = ctx->si.h;
- ctx->mmaps[i].id = vp8_mem_req_segs[i].id;
- ctx->mmaps[i].sz = vp8_mem_req_segs[i].sz;
- ctx->mmaps[i].align = vp8_mem_req_segs[i].align;
- ctx->mmaps[i].flags = vp8_mem_req_segs[i].flags;
+ ctx->mmaps[i].id = vp9_mem_req_segs[i].id;
+ ctx->mmaps[i].sz = vp9_mem_req_segs[i].sz;
+ ctx->mmaps[i].align = vp9_mem_req_segs[i].align;
+ ctx->mmaps[i].flags = vp9_mem_req_segs[i].flags;
if (!ctx->mmaps[i].sz)
- ctx->mmaps[i].sz = vp8_mem_req_segs[i].calc_sz(&cfg,
+ ctx->mmaps[i].sz = vp9_mem_req_segs[i].calc_sz(&cfg,
ctx->base.init_flags);
- res = vp8_mmap_alloc(&ctx->mmaps[i]);
+ res = vpx_mmap_alloc(&ctx->mmaps[i]);
}
if (!res)
- vp8_finalize_mmaps(ctx);
+ vp9_finalize_mmaps(ctx);
ctx->defer_alloc = 0;
}
/* Initialize the decoder instance on the first frame*/
if (!res && !ctx->decoder_init) {
- res = vp8_validate_mmaps(&ctx->si, ctx->mmaps, ctx->base.init_flags);
+ res = vpx_validate_mmaps(&ctx->si, ctx->mmaps,
+ vp9_mem_req_segs, NELEMENTS(vp9_mem_req_segs),
+ ctx->base.init_flags);
if (!res) {
VP9D_CONFIG oxcf;
@@ -484,7 +418,7 @@ static vpx_codec_err_t vp9_decode(vpx_codec_alg_priv_t *ctx,
return res;
}
-static vpx_image_t *vp8_get_frame(vpx_codec_alg_priv_t *ctx,
+static vpx_image_t *vp9_get_frame(vpx_codec_alg_priv_t *ctx,
vpx_codec_iter_t *iter) {
vpx_image_t *img = NULL;
@@ -502,24 +436,22 @@ static vpx_image_t *vp8_get_frame(vpx_codec_alg_priv_t *ctx,
return img;
}
-
-static
-vpx_codec_err_t vp8_xma_get_mmap(const vpx_codec_ctx_t *ctx,
- vpx_codec_mmap_t *mmap,
- vpx_codec_iter_t *iter) {
+static vpx_codec_err_t vp9_xma_get_mmap(const vpx_codec_ctx_t *ctx,
+ vpx_codec_mmap_t *mmap,
+ vpx_codec_iter_t *iter) {
vpx_codec_err_t res;
const mem_req_t *seg_iter = *iter;
/* Get address of next segment request */
do {
if (!seg_iter)
- seg_iter = vp8_mem_req_segs;
- else if (seg_iter->id != VP8_SEG_MAX)
+ seg_iter = vp9_mem_req_segs;
+ else if (seg_iter->id != VP9_SEG_MAX)
seg_iter++;
*iter = (vpx_codec_iter_t)seg_iter;
- if (seg_iter->id != VP8_SEG_MAX) {
+ if (seg_iter->id != VP9_SEG_MAX) {
mmap->id = seg_iter->id;
mmap->sz = seg_iter->sz;
mmap->align = seg_iter->align;
@@ -536,15 +468,15 @@ vpx_codec_err_t vp8_xma_get_mmap(const vpx_codec_ctx_t *ctx,
return res;
}
-static vpx_codec_err_t vp8_xma_set_mmap(vpx_codec_ctx_t *ctx,
+static vpx_codec_err_t vp9_xma_set_mmap(vpx_codec_ctx_t *ctx,
const vpx_codec_mmap_t *mmap) {
vpx_codec_err_t res = VPX_CODEC_MEM_ERROR;
int i, done;
if (!ctx->priv) {
- if (mmap->id == VP8_SEG_ALG_PRIV) {
+ if (mmap->id == VP9_SEG_ALG_PRIV) {
if (!ctx->priv) {
- vp8_init_ctx(ctx, mmap);
+ vp9_init_ctx(ctx, mmap);
res = VPX_CODEC_OK;
}
}
@@ -565,17 +497,16 @@ static vpx_codec_err_t vp8_xma_set_mmap(vpx_codec_ctx_t *ctx,
}
if (done && !res) {
- vp8_finalize_mmaps(ctx->priv->alg_priv);
+ vp9_finalize_mmaps(ctx->priv->alg_priv);
res = ctx->iface->init(ctx, NULL);
}
return res;
}
-
-static vpx_codec_err_t vp9_set_reference(vpx_codec_alg_priv_t *ctx,
- int ctr_id,
- va_list args) {
+static vpx_codec_err_t set_reference(vpx_codec_alg_priv_t *ctx,
+ int ctr_id,
+ va_list args) {
vpx_ref_frame_t *data = va_arg(args, vpx_ref_frame_t *);
@@ -592,9 +523,9 @@ static vpx_codec_err_t vp9_set_reference(vpx_codec_alg_priv_t *ctx,
}
-static vpx_codec_err_t vp9_copy_reference(vpx_codec_alg_priv_t *ctx,
- int ctr_id,
- va_list args) {
+static vpx_codec_err_t copy_reference(vpx_codec_alg_priv_t *ctx,
+ int ctr_id,
+ va_list args) {
vpx_ref_frame_t *data = va_arg(args, vpx_ref_frame_t *);
@@ -627,9 +558,9 @@ static vpx_codec_err_t get_reference(vpx_codec_alg_priv_t *ctx,
}
}
-static vpx_codec_err_t vp8_set_postproc(vpx_codec_alg_priv_t *ctx,
- int ctr_id,
- va_list args) {
+static vpx_codec_err_t set_postproc(vpx_codec_alg_priv_t *ctx,
+ int ctr_id,
+ va_list args) {
#if CONFIG_POSTPROC
vp8_postproc_cfg_t *data = va_arg(args, vp8_postproc_cfg_t *);
@@ -645,9 +576,9 @@ static vpx_codec_err_t vp8_set_postproc(vpx_codec_alg_priv_t *ctx,
#endif
}
-static vpx_codec_err_t vp8_set_dbg_options(vpx_codec_alg_priv_t *ctx,
- int ctrl_id,
- va_list args) {
+static vpx_codec_err_t set_dbg_options(vpx_codec_alg_priv_t *ctx,
+ int ctrl_id,
+ va_list args) {
#if CONFIG_POSTPROC_VISUALIZER && CONFIG_POSTPROC
int data = va_arg(args, int);
@@ -666,9 +597,9 @@ static vpx_codec_err_t vp8_set_dbg_options(vpx_codec_alg_priv_t *ctx,
#endif
}
-static vpx_codec_err_t vp8_get_last_ref_updates(vpx_codec_alg_priv_t *ctx,
- int ctrl_id,
- va_list args) {
+static vpx_codec_err_t get_last_ref_updates(vpx_codec_alg_priv_t *ctx,
+ int ctrl_id,
+ va_list args) {
int *update_info = va_arg(args, int *);
VP9D_COMP *pbi = (VP9D_COMP *)ctx->pbi;
@@ -681,9 +612,9 @@ static vpx_codec_err_t vp8_get_last_ref_updates(vpx_codec_alg_priv_t *ctx,
}
-static vpx_codec_err_t vp8_get_frame_corrupted(vpx_codec_alg_priv_t *ctx,
- int ctrl_id,
- va_list args) {
+static vpx_codec_err_t get_frame_corrupted(vpx_codec_alg_priv_t *ctx,
+ int ctrl_id,
+ va_list args) {
int *corrupted = va_arg(args, int *);
@@ -705,15 +636,15 @@ static vpx_codec_err_t set_invert_tile_order(vpx_codec_alg_priv_t *ctx,
}
static vpx_codec_ctrl_fn_map_t ctf_maps[] = {
- {VP8_SET_REFERENCE, vp9_set_reference},
- {VP8_COPY_REFERENCE, vp9_copy_reference},
- {VP8_SET_POSTPROC, vp8_set_postproc},
- {VP8_SET_DBG_COLOR_REF_FRAME, vp8_set_dbg_options},
- {VP8_SET_DBG_COLOR_MB_MODES, vp8_set_dbg_options},
- {VP8_SET_DBG_COLOR_B_MODES, vp8_set_dbg_options},
- {VP8_SET_DBG_DISPLAY_MV, vp8_set_dbg_options},
- {VP8D_GET_LAST_REF_UPDATES, vp8_get_last_ref_updates},
- {VP8D_GET_FRAME_CORRUPTED, vp8_get_frame_corrupted},
+ {VP8_SET_REFERENCE, set_reference},
+ {VP8_COPY_REFERENCE, copy_reference},
+ {VP8_SET_POSTPROC, set_postproc},
+ {VP8_SET_DBG_COLOR_REF_FRAME, set_dbg_options},
+ {VP8_SET_DBG_COLOR_MB_MODES, set_dbg_options},
+ {VP8_SET_DBG_COLOR_B_MODES, set_dbg_options},
+ {VP8_SET_DBG_DISPLAY_MV, set_dbg_options},
+ {VP8D_GET_LAST_REF_UPDATES, get_last_ref_updates},
+ {VP8D_GET_FRAME_CORRUPTED, get_frame_corrupted},
{VP9_GET_REFERENCE, get_reference},
{VP9_INVERT_TILE_DECODE_ORDER, set_invert_tile_order},
{ -1, NULL},
@@ -726,18 +657,18 @@ static vpx_codec_ctrl_fn_map_t ctf_maps[] = {
CODEC_INTERFACE(vpx_codec_vp9_dx) = {
"WebM Project VP9 Decoder" VERSION_STRING,
VPX_CODEC_INTERNAL_ABI_VERSION,
- VPX_CODEC_CAP_DECODER | VP8_CAP_POSTPROC,
+ VPX_CODEC_CAP_DECODER | VP9_CAP_POSTPROC,
/* vpx_codec_caps_t caps; */
- vp8_init, /* vpx_codec_init_fn_t init; */
- vp8_destroy, /* vpx_codec_destroy_fn_t destroy; */
+ vp9_init, /* vpx_codec_init_fn_t init; */
+ vp9_destroy, /* vpx_codec_destroy_fn_t destroy; */
ctf_maps, /* vpx_codec_ctrl_fn_map_t *ctrl_maps; */
- vp8_xma_get_mmap, /* vpx_codec_get_mmap_fn_t get_mmap; */
- vp8_xma_set_mmap, /* vpx_codec_set_mmap_fn_t set_mmap; */
+ vp9_xma_get_mmap, /* vpx_codec_get_mmap_fn_t get_mmap; */
+ vp9_xma_set_mmap, /* vpx_codec_set_mmap_fn_t set_mmap; */
{
- vp8_peek_si, /* vpx_codec_peek_si_fn_t peek_si; */
- vp8_get_si, /* vpx_codec_get_si_fn_t get_si; */
+ vp9_peek_si, /* vpx_codec_peek_si_fn_t peek_si; */
+ vp9_get_si, /* vpx_codec_get_si_fn_t get_si; */
vp9_decode, /* vpx_codec_decode_fn_t decode; */
- vp8_get_frame, /* vpx_codec_frame_get_fn_t frame_get; */
+ vp9_get_frame, /* vpx_codec_frame_get_fn_t frame_get; */
},
{
/* encoder functions */
diff --git a/vpx/internal/vpx_codec_internal.h b/vpx/internal/vpx_codec_internal.h
index 0b057de4c..05fed977e 100644
--- a/vpx/internal/vpx_codec_internal.h
+++ b/vpx/internal/vpx_codec_internal.h
@@ -474,4 +474,30 @@ static void vpx_internal_error(struct vpx_internal_error_info *info,
if (info->setjmp)
longjmp(info->jmp, info->error_code);
}
+
+//------------------------------------------------------------------------------
+// mmap interface
+
+typedef struct {
+ unsigned int id;
+ unsigned long sz;
+ unsigned int align;
+ unsigned int flags;
+ unsigned long (*calc_sz)(const vpx_codec_dec_cfg_t *, vpx_codec_flags_t);
+} mem_req_t;
+
+// Allocates mmap.priv and sets mmap.base based on mmap.sz/align/flags
+// requirements.
+// Returns #VPX_CODEC_OK on success, #VPX_CODEC_MEM_ERROR otherwise.
+vpx_codec_err_t vpx_mmap_alloc(vpx_codec_mmap_t *mmap);
+
+// Frees mmap.base allocated by a call to vpx_mmap_alloc().
+void vpx_mmap_dtor(vpx_codec_mmap_t *mmap);
+
+// Checks each mmap has the size requirement specificied by mem_reqs.
+// Returns #VPX_CODEC_OK on success, #VPX_CODEC_MEM_ERROR otherwise.
+vpx_codec_err_t vpx_validate_mmaps(const vpx_codec_stream_info_t *si,
+ const vpx_codec_mmap_t *mmaps,
+ const mem_req_t *mem_reqs, int nreqs,
+ vpx_codec_flags_t init_flags);
#endif
diff --git a/vpx/src/vpx_codec.c b/vpx/src/vpx_codec.c
index 61d7f4c18..1f664ae49 100644
--- a/vpx/src/vpx_codec.c
+++ b/vpx/src/vpx_codec.c
@@ -14,6 +14,7 @@
*
*/
#include <stdarg.h>
+#include <stdlib.h>
#include "vpx/vpx_integer.h"
#include "vpx/internal/vpx_codec_internal.h"
#include "vpx_version.h"
@@ -133,3 +134,51 @@ vpx_codec_err_t vpx_codec_control_(vpx_codec_ctx_t *ctx,
return SAVE_STATUS(ctx, res);
}
+
+//------------------------------------------------------------------------------
+// mmap interface
+
+vpx_codec_err_t vpx_mmap_alloc(vpx_codec_mmap_t *mmap) {
+ unsigned int align = mmap->align ? mmap->align - 1 : 0;
+
+ if (mmap->flags & VPX_CODEC_MEM_ZERO)
+ mmap->priv = calloc(1, mmap->sz + align);
+ else
+ mmap->priv = malloc(mmap->sz + align);
+
+ if (mmap->priv == NULL) return VPX_CODEC_MEM_ERROR;
+ mmap->base = (void *)((((uintptr_t)mmap->priv) + align) & ~(uintptr_t)align);
+ mmap->dtor = vpx_mmap_dtor;
+ return VPX_CODEC_OK;
+}
+
+void vpx_mmap_dtor(vpx_codec_mmap_t *mmap) {
+ free(mmap->priv);
+}
+
+vpx_codec_err_t vpx_validate_mmaps(const vpx_codec_stream_info_t *si,
+ const vpx_codec_mmap_t *mmaps,
+ const mem_req_t *mem_reqs, int nreqs,
+ vpx_codec_flags_t init_flags) {
+ int i;
+
+ for (i = 0; i < nreqs - 1; ++i) {
+ /* Ensure the segment has been allocated */
+ if (mmaps[i].base == NULL) {
+ return VPX_CODEC_MEM_ERROR;
+ }
+
+ /* Verify variable size segment is big enough for the current si. */
+ if (mem_reqs[i].calc_sz != NULL) {
+ vpx_codec_dec_cfg_t cfg;
+
+ cfg.w = si->w;
+ cfg.h = si->h;
+
+ if (mmaps[i].sz < mem_reqs[i].calc_sz(&cfg, init_flags)) {
+ return VPX_CODEC_MEM_ERROR;
+ }
+ }
+ }
+ return VPX_CODEC_OK;
+}