summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorclang-format <noreply@google.com>2016-07-26 20:20:13 -0700
committerJames Zern <jzern@google.com>2016-08-03 14:29:31 -0700
commit08131055e4d55003104c0be787c4870d97b6e86f (patch)
treecd6d10da5533492aef1bc1f159b419150f726e41
parent8ff40f8beccf17234240acedb8966b5634fb0c6e (diff)
downloadlibvpx-08131055e4d55003104c0be787c4870d97b6e86f.tar
libvpx-08131055e4d55003104c0be787c4870d97b6e86f.tar.gz
libvpx-08131055e4d55003104c0be787c4870d97b6e86f.tar.bz2
libvpx-08131055e4d55003104c0be787c4870d97b6e86f.zip
vp9/decoder,vp9/*.[hc]: apply clang-format
Change-Id: Ic38ea06c7b2fb3e8e94a4c0910e82672a1acaea7
-rw-r--r--vp9/decoder/vp9_decodeframe.c655
-rw-r--r--vp9/decoder/vp9_decodeframe.h10
-rw-r--r--vp9/decoder/vp9_decodemv.c291
-rw-r--r--vp9/decoder/vp9_decodemv.h5
-rw-r--r--vp9/decoder/vp9_decoder.c65
-rw-r--r--vp9/decoder/vp9_decoder.h16
-rw-r--r--vp9/decoder/vp9_detokenize.c60
-rw-r--r--vp9/decoder/vp9_detokenize.h7
-rw-r--r--vp9/decoder/vp9_dsubexp.c32
-rw-r--r--vp9/decoder/vp9_dsubexp.h3
-rw-r--r--vp9/decoder/vp9_dthread.c31
-rw-r--r--vp9/decoder/vp9_dthread.h2
-rw-r--r--vp9/vp9_cx_iface.c762
-rw-r--r--vp9/vp9_dx_iface.c252
-rw-r--r--vp9/vp9_dx_iface.h56
-rw-r--r--vp9/vp9_iface_common.h35
16 files changed, 1026 insertions, 1256 deletions
diff --git a/vp9/decoder/vp9_decodeframe.c b/vp9/decoder/vp9_decodeframe.c
index 3199b98aa..66dbbc62c 100644
--- a/vp9/decoder/vp9_decodeframe.c
+++ b/vp9/decoder/vp9_decodeframe.c
@@ -48,20 +48,19 @@
static int is_compound_reference_allowed(const VP9_COMMON *cm) {
int i;
for (i = 1; i < REFS_PER_FRAME; ++i)
- if (cm->ref_frame_sign_bias[i + 1] != cm->ref_frame_sign_bias[1])
- return 1;
+ if (cm->ref_frame_sign_bias[i + 1] != cm->ref_frame_sign_bias[1]) return 1;
return 0;
}
static void setup_compound_reference_mode(VP9_COMMON *cm) {
if (cm->ref_frame_sign_bias[LAST_FRAME] ==
- cm->ref_frame_sign_bias[GOLDEN_FRAME]) {
+ cm->ref_frame_sign_bias[GOLDEN_FRAME]) {
cm->comp_fixed_ref = ALTREF_FRAME;
cm->comp_var_ref[0] = LAST_FRAME;
cm->comp_var_ref[1] = GOLDEN_FRAME;
} else if (cm->ref_frame_sign_bias[LAST_FRAME] ==
- cm->ref_frame_sign_bias[ALTREF_FRAME]) {
+ cm->ref_frame_sign_bias[ALTREF_FRAME]) {
cm->comp_fixed_ref = GOLDEN_FRAME;
cm->comp_var_ref[0] = LAST_FRAME;
cm->comp_var_ref[1] = ALTREF_FRAME;
@@ -83,8 +82,7 @@ static int decode_unsigned_max(struct vpx_read_bit_buffer *rb, int max) {
static TX_MODE read_tx_mode(vpx_reader *r) {
TX_MODE tx_mode = vpx_read_literal(r, 2);
- if (tx_mode == ALLOW_32X32)
- tx_mode += vpx_read_bit(r);
+ if (tx_mode == ALLOW_32X32) tx_mode += vpx_read_bit(r);
return tx_mode;
}
@@ -121,9 +119,9 @@ static void read_inter_mode_probs(FRAME_CONTEXT *fc, vpx_reader *r) {
static REFERENCE_MODE read_frame_reference_mode(const VP9_COMMON *cm,
vpx_reader *r) {
if (is_compound_reference_allowed(cm)) {
- return vpx_read_bit(r) ? (vpx_read_bit(r) ? REFERENCE_MODE_SELECT
- : COMPOUND_REFERENCE)
- : SINGLE_REFERENCE;
+ return vpx_read_bit(r)
+ ? (vpx_read_bit(r) ? REFERENCE_MODE_SELECT : COMPOUND_REFERENCE)
+ : SINGLE_REFERENCE;
} else {
return SINGLE_REFERENCE;
}
@@ -151,8 +149,7 @@ static void read_frame_reference_mode_probs(VP9_COMMON *cm, vpx_reader *r) {
static void update_mv_probs(vpx_prob *p, int n, vpx_reader *r) {
int i;
for (i = 0; i < n; ++i)
- if (vpx_read(r, MV_UPDATE_PROB))
- p[i] = (vpx_read_literal(r, 7) << 1) | 1;
+ if (vpx_read(r, MV_UPDATE_PROB)) p[i] = (vpx_read_literal(r, 7) << 1) | 1;
}
static void read_mv_probs(nmv_context *ctx, int allow_hp, vpx_reader *r) {
@@ -184,10 +181,9 @@ static void read_mv_probs(nmv_context *ctx, int allow_hp, vpx_reader *r) {
}
}
-static void inverse_transform_block_inter(MACROBLOCKD* xd, int plane,
- const TX_SIZE tx_size,
- uint8_t *dst, int stride,
- int eob) {
+static void inverse_transform_block_inter(MACROBLOCKD *xd, int plane,
+ const TX_SIZE tx_size, uint8_t *dst,
+ int stride, int eob) {
struct macroblockd_plane *const pd = &xd->plane[plane];
tran_low_t *const dqcoeff = pd->dqcoeff;
assert(eob > 0);
@@ -209,8 +205,7 @@ static void inverse_transform_block_inter(MACROBLOCKD* xd, int plane,
case TX_32X32:
vp9_highbd_idct32x32_add(dqcoeff, dst, stride, eob, xd->bd);
break;
- default:
- assert(0 && "Invalid transform size");
+ default: assert(0 && "Invalid transform size");
}
}
} else {
@@ -218,21 +213,11 @@ static void inverse_transform_block_inter(MACROBLOCKD* xd, int plane,
vp9_iwht4x4_add(dqcoeff, dst, stride, eob);
} else {
switch (tx_size) {
- case TX_4X4:
- vp9_idct4x4_add(dqcoeff, dst, stride, eob);
- break;
- case TX_8X8:
- vp9_idct8x8_add(dqcoeff, dst, stride, eob);
- break;
- case TX_16X16:
- vp9_idct16x16_add(dqcoeff, dst, stride, eob);
- break;
- case TX_32X32:
- vp9_idct32x32_add(dqcoeff, dst, stride, eob);
- break;
- default:
- assert(0 && "Invalid transform size");
- return;
+ case TX_4X4: vp9_idct4x4_add(dqcoeff, dst, stride, eob); break;
+ case TX_8X8: vp9_idct8x8_add(dqcoeff, dst, stride, eob); break;
+ case TX_16X16: vp9_idct16x16_add(dqcoeff, dst, stride, eob); break;
+ case TX_32X32: vp9_idct32x32_add(dqcoeff, dst, stride, eob); break;
+ default: assert(0 && "Invalid transform size"); return;
}
}
}
@@ -241,21 +226,11 @@ static void inverse_transform_block_inter(MACROBLOCKD* xd, int plane,
vp9_iwht4x4_add(dqcoeff, dst, stride, eob);
} else {
switch (tx_size) {
- case TX_4X4:
- vp9_idct4x4_add(dqcoeff, dst, stride, eob);
- break;
- case TX_8X8:
- vp9_idct8x8_add(dqcoeff, dst, stride, eob);
- break;
- case TX_16X16:
- vp9_idct16x16_add(dqcoeff, dst, stride, eob);
- break;
- case TX_32X32:
- vp9_idct32x32_add(dqcoeff, dst, stride, eob);
- break;
- default:
- assert(0 && "Invalid transform size");
- return;
+ case TX_4X4: vp9_idct4x4_add(dqcoeff, dst, stride, eob); break;
+ case TX_8X8: vp9_idct8x8_add(dqcoeff, dst, stride, eob); break;
+ case TX_16X16: vp9_idct16x16_add(dqcoeff, dst, stride, eob); break;
+ case TX_32X32: vp9_idct32x32_add(dqcoeff, dst, stride, eob); break;
+ default: assert(0 && "Invalid transform size"); return;
}
}
#endif // CONFIG_VP9_HIGHBITDEPTH
@@ -272,11 +247,10 @@ static void inverse_transform_block_inter(MACROBLOCKD* xd, int plane,
}
}
-static void inverse_transform_block_intra(MACROBLOCKD* xd, int plane,
+static void inverse_transform_block_intra(MACROBLOCKD *xd, int plane,
const TX_TYPE tx_type,
- const TX_SIZE tx_size,
- uint8_t *dst, int stride,
- int eob) {
+ const TX_SIZE tx_size, uint8_t *dst,
+ int stride, int eob) {
struct macroblockd_plane *const pd = &xd->plane[plane];
tran_low_t *const dqcoeff = pd->dqcoeff;
assert(eob > 0);
@@ -298,8 +272,7 @@ static void inverse_transform_block_intra(MACROBLOCKD* xd, int plane,
case TX_32X32:
vp9_highbd_idct32x32_add(dqcoeff, dst, stride, eob, xd->bd);
break;
- default:
- assert(0 && "Invalid transform size");
+ default: assert(0 && "Invalid transform size");
}
}
} else {
@@ -307,21 +280,13 @@ static void inverse_transform_block_intra(MACROBLOCKD* xd, int plane,
vp9_iwht4x4_add(dqcoeff, dst, stride, eob);
} else {
switch (tx_size) {
- case TX_4X4:
- vp9_iht4x4_add(tx_type, dqcoeff, dst, stride, eob);
- break;
- case TX_8X8:
- vp9_iht8x8_add(tx_type, dqcoeff, dst, stride, eob);
- break;
+ case TX_4X4: vp9_iht4x4_add(tx_type, dqcoeff, dst, stride, eob); break;
+ case TX_8X8: vp9_iht8x8_add(tx_type, dqcoeff, dst, stride, eob); break;
case TX_16X16:
vp9_iht16x16_add(tx_type, dqcoeff, dst, stride, eob);
break;
- case TX_32X32:
- vp9_idct32x32_add(dqcoeff, dst, stride, eob);
- break;
- default:
- assert(0 && "Invalid transform size");
- return;
+ case TX_32X32: vp9_idct32x32_add(dqcoeff, dst, stride, eob); break;
+ default: assert(0 && "Invalid transform size"); return;
}
}
}
@@ -330,21 +295,13 @@ static void inverse_transform_block_intra(MACROBLOCKD* xd, int plane,
vp9_iwht4x4_add(dqcoeff, dst, stride, eob);
} else {
switch (tx_size) {
- case TX_4X4:
- vp9_iht4x4_add(tx_type, dqcoeff, dst, stride, eob);
- break;
- case TX_8X8:
- vp9_iht8x8_add(tx_type, dqcoeff, dst, stride, eob);
- break;
+ case TX_4X4: vp9_iht4x4_add(tx_type, dqcoeff, dst, stride, eob); break;
+ case TX_8X8: vp9_iht8x8_add(tx_type, dqcoeff, dst, stride, eob); break;
case TX_16X16:
vp9_iht16x16_add(tx_type, dqcoeff, dst, stride, eob);
break;
- case TX_32X32:
- vp9_idct32x32_add(dqcoeff, dst, stride, eob);
- break;
- default:
- assert(0 && "Invalid transform size");
- return;
+ case TX_32X32: vp9_idct32x32_add(dqcoeff, dst, stride, eob); break;
+ default: assert(0 && "Invalid transform size"); return;
}
}
#endif // CONFIG_VP9_HIGHBITDEPTH
@@ -363,8 +320,7 @@ static void inverse_transform_block_intra(MACROBLOCKD* xd, int plane,
static void predict_and_reconstruct_intra_block(MACROBLOCKD *const xd,
vpx_reader *r,
- MODE_INFO *const mi,
- int plane,
+ MODE_INFO *const mi, int plane,
int row, int col,
TX_SIZE tx_size) {
struct macroblockd_plane *const pd = &xd->plane[plane];
@@ -373,30 +329,29 @@ static void predict_and_reconstruct_intra_block(MACROBLOCKD *const xd,
dst = &pd->dst.buf[4 * row * pd->dst.stride + 4 * col];
if (mi->sb_type < BLOCK_8X8)
- if (plane == 0)
- mode = xd->mi[0]->bmi[(row << 1) + col].as_mode;
+ if (plane == 0) mode = xd->mi[0]->bmi[(row << 1) + col].as_mode;
- vp9_predict_intra_block(xd, pd->n4_wl, tx_size, mode,
- dst, pd->dst.stride, dst, pd->dst.stride,
- col, row, plane);
+ vp9_predict_intra_block(xd, pd->n4_wl, tx_size, mode, dst, pd->dst.stride,
+ dst, pd->dst.stride, col, row, plane);
if (!mi->skip) {
- const TX_TYPE tx_type = (plane || xd->lossless) ?
- DCT_DCT : intra_mode_to_tx_type_lookup[mode];
- const scan_order *sc = (plane || xd->lossless) ?
- &vp9_default_scan_orders[tx_size] : &vp9_scan_orders[tx_size][tx_type];
- const int eob = vp9_decode_block_tokens(xd, plane, sc, col, row, tx_size,
- r, mi->segment_id);
+ const TX_TYPE tx_type =
+ (plane || xd->lossless) ? DCT_DCT : intra_mode_to_tx_type_lookup[mode];
+ const scan_order *sc = (plane || xd->lossless)
+ ? &vp9_default_scan_orders[tx_size]
+ : &vp9_scan_orders[tx_size][tx_type];
+ const int eob = vp9_decode_block_tokens(xd, plane, sc, col, row, tx_size, r,
+ mi->segment_id);
if (eob > 0) {
- inverse_transform_block_intra(xd, plane, tx_type, tx_size,
- dst, pd->dst.stride, eob);
+ inverse_transform_block_intra(xd, plane, tx_type, tx_size, dst,
+ pd->dst.stride, eob);
}
}
}
static int reconstruct_inter_block(MACROBLOCKD *const xd, vpx_reader *r,
- MODE_INFO *const mi, int plane,
- int row, int col, TX_SIZE tx_size) {
+ MODE_INFO *const mi, int plane, int row,
+ int col, TX_SIZE tx_size) {
struct macroblockd_plane *const pd = &xd->plane[plane];
const scan_order *sc = &vp9_default_scan_orders[tx_size];
const int eob = vp9_decode_block_tokens(xd, plane, sc, col, row, tx_size, r,
@@ -410,9 +365,9 @@ static int reconstruct_inter_block(MACROBLOCKD *const xd, vpx_reader *r,
return eob;
}
-static void build_mc_border(const uint8_t *src, int src_stride,
- uint8_t *dst, int dst_stride,
- int x, int y, int b_w, int b_h, int w, int h) {
+static void build_mc_border(const uint8_t *src, int src_stride, uint8_t *dst,
+ int dst_stride, int x, int y, int b_w, int b_h,
+ int w, int h) {
// Get a pointer to the start of the real data for this row.
const uint8_t *ref_row = src - x - y * src_stride;
@@ -425,39 +380,31 @@ static void build_mc_border(const uint8_t *src, int src_stride,
int right = 0, copy;
int left = x < 0 ? -x : 0;
- if (left > b_w)
- left = b_w;
+ if (left > b_w) left = b_w;
- if (x + b_w > w)
- right = x + b_w - w;
+ if (x + b_w > w) right = x + b_w - w;
- if (right > b_w)
- right = b_w;
+ if (right > b_w) right = b_w;
copy = b_w - left - right;
- if (left)
- memset(dst, ref_row[0], left);
+ if (left) memset(dst, ref_row[0], left);
- if (copy)
- memcpy(dst + left, ref_row + x + left, copy);
+ if (copy) memcpy(dst + left, ref_row + x + left, copy);
- if (right)
- memset(dst + left + copy, ref_row[w - 1], right);
+ if (right) memset(dst + left + copy, ref_row[w - 1], right);
dst += dst_stride;
++y;
- if (y > 0 && y < h)
- ref_row += src_stride;
+ if (y > 0 && y < h) ref_row += src_stride;
} while (--b_h);
}
#if CONFIG_VP9_HIGHBITDEPTH
static void high_build_mc_border(const uint8_t *src8, int src_stride,
- uint16_t *dst, int dst_stride,
- int x, int y, int b_w, int b_h,
- int w, int h) {
+ uint16_t *dst, int dst_stride, int x, int y,
+ int b_w, int b_h, int w, int h) {
// Get a pointer to the start of the real data for this row.
const uint16_t *src = CONVERT_TO_SHORTPTR(src8);
const uint16_t *ref_row = src - x - y * src_stride;
@@ -471,31 +418,24 @@ static void high_build_mc_border(const uint8_t *src8, int src_stride,
int right = 0, copy;
int left = x < 0 ? -x : 0;
- if (left > b_w)
- left = b_w;
+ if (left > b_w) left = b_w;
- if (x + b_w > w)
- right = x + b_w - w;
+ if (x + b_w > w) right = x + b_w - w;
- if (right > b_w)
- right = b_w;
+ if (right > b_w) right = b_w;
copy = b_w - left - right;
- if (left)
- vpx_memset16(dst, ref_row[0], left);
+ if (left) vpx_memset16(dst, ref_row[0], left);
- if (copy)
- memcpy(dst + left, ref_row + x + left, copy * sizeof(uint16_t));
+ if (copy) memcpy(dst + left, ref_row + x + left, copy * sizeof(uint16_t));
- if (right)
- vpx_memset16(dst + left + copy, ref_row[w - 1], right);
+ if (right) vpx_memset16(dst + left + copy, ref_row[w - 1], right);
dst += dst_stride;
++y;
- if (y > 0 && y < h)
- ref_row += src_stride;
+ if (y > 0 && y < h) ref_row += src_stride;
} while (--b_h);
}
#endif // CONFIG_VP9_HIGHBITDEPTH
@@ -504,23 +444,21 @@ static void high_build_mc_border(const uint8_t *src8, int src_stride,
static void extend_and_predict(const uint8_t *buf_ptr1, int pre_buf_stride,
int x0, int y0, int b_w, int b_h,
int frame_width, int frame_height,
- int border_offset,
- uint8_t *const dst, int dst_buf_stride,
- int subpel_x, int subpel_y,
+ int border_offset, uint8_t *const dst,
+ int dst_buf_stride, int subpel_x, int subpel_y,
const InterpKernel *kernel,
- const struct scale_factors *sf,
- MACROBLOCKD *xd,
+ const struct scale_factors *sf, MACROBLOCKD *xd,
int w, int h, int ref, int xs, int ys) {
DECLARE_ALIGNED(16, uint16_t, mc_buf_high[80 * 2 * 80 * 2]);
const uint8_t *buf_ptr;
if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
- high_build_mc_border(buf_ptr1, pre_buf_stride, mc_buf_high, b_w,
- x0, y0, b_w, b_h, frame_width, frame_height);
+ high_build_mc_border(buf_ptr1, pre_buf_stride, mc_buf_high, b_w, x0, y0,
+ b_w, b_h, frame_width, frame_height);
buf_ptr = CONVERT_TO_BYTEPTR(mc_buf_high) + border_offset;
} else {
- build_mc_border(buf_ptr1, pre_buf_stride, (uint8_t *)mc_buf_high, b_w,
- x0, y0, b_w, b_h, frame_width, frame_height);
+ build_mc_border(buf_ptr1, pre_buf_stride, (uint8_t *)mc_buf_high, b_w, x0,
+ y0, b_w, b_h, frame_width, frame_height);
buf_ptr = ((uint8_t *)mc_buf_high) + border_offset;
}
@@ -528,46 +466,42 @@ static void extend_and_predict(const uint8_t *buf_ptr1, int pre_buf_stride,
highbd_inter_predictor(buf_ptr, b_w, dst, dst_buf_stride, subpel_x,
subpel_y, sf, w, h, ref, kernel, xs, ys, xd->bd);
} else {
- inter_predictor(buf_ptr, b_w, dst, dst_buf_stride, subpel_x,
- subpel_y, sf, w, h, ref, kernel, xs, ys);
+ inter_predictor(buf_ptr, b_w, dst, dst_buf_stride, subpel_x, subpel_y, sf,
+ w, h, ref, kernel, xs, ys);
}
}
#else
static void extend_and_predict(const uint8_t *buf_ptr1, int pre_buf_stride,
int x0, int y0, int b_w, int b_h,
int frame_width, int frame_height,
- int border_offset,
- uint8_t *const dst, int dst_buf_stride,
- int subpel_x, int subpel_y,
+ int border_offset, uint8_t *const dst,
+ int dst_buf_stride, int subpel_x, int subpel_y,
const InterpKernel *kernel,
- const struct scale_factors *sf,
- int w, int h, int ref, int xs, int ys) {
+ const struct scale_factors *sf, int w, int h,
+ int ref, int xs, int ys) {
DECLARE_ALIGNED(16, uint8_t, mc_buf[80 * 2 * 80 * 2]);
const uint8_t *buf_ptr;
- build_mc_border(buf_ptr1, pre_buf_stride, mc_buf, b_w,
- x0, y0, b_w, b_h, frame_width, frame_height);
+ build_mc_border(buf_ptr1, pre_buf_stride, mc_buf, b_w, x0, y0, b_w, b_h,
+ frame_width, frame_height);
buf_ptr = mc_buf + border_offset;
- inter_predictor(buf_ptr, b_w, dst, dst_buf_stride, subpel_x,
- subpel_y, sf, w, h, ref, kernel, xs, ys);
+ inter_predictor(buf_ptr, b_w, dst, dst_buf_stride, subpel_x, subpel_y, sf, w,
+ h, ref, kernel, xs, ys);
}
#endif // CONFIG_VP9_HIGHBITDEPTH
-static void dec_build_inter_predictors(VPxWorker *const worker, MACROBLOCKD *xd,
- int plane, int bw, int bh, int x,
- int y, int w, int h, int mi_x, int mi_y,
- const InterpKernel *kernel,
- const struct scale_factors *sf,
- struct buf_2d *pre_buf,
- struct buf_2d *dst_buf, const MV* mv,
- RefCntBuffer *ref_frame_buf,
- int is_scaled, int ref) {
+static void dec_build_inter_predictors(
+ VPxWorker *const worker, MACROBLOCKD *xd, int plane, int bw, int bh, int x,
+ int y, int w, int h, int mi_x, int mi_y, const InterpKernel *kernel,
+ const struct scale_factors *sf, struct buf_2d *pre_buf,
+ struct buf_2d *dst_buf, const MV *mv, RefCntBuffer *ref_frame_buf,
+ int is_scaled, int ref) {
struct macroblockd_plane *const pd = &xd->plane[plane];
uint8_t *const dst = dst_buf->buf + dst_buf->stride * y + x;
MV32 scaled_mv;
- int xs, ys, x0, y0, x0_16, y0_16, frame_width, frame_height,
- buf_stride, subpel_x, subpel_y;
+ int xs, ys, x0, y0, x0_16, y0_16, frame_width, frame_height, buf_stride,
+ subpel_x, subpel_y;
uint8_t *ref_frame, *buf_ptr;
// Get reference frame pointer, width and height.
@@ -578,14 +512,13 @@ static void dec_build_inter_predictors(VPxWorker *const worker, MACROBLOCKD *xd,
} else {
frame_width = ref_frame_buf->buf.uv_crop_width;
frame_height = ref_frame_buf->buf.uv_crop_height;
- ref_frame = plane == 1 ? ref_frame_buf->buf.u_buffer
- : ref_frame_buf->buf.v_buffer;
+ ref_frame =
+ plane == 1 ? ref_frame_buf->buf.u_buffer : ref_frame_buf->buf.v_buffer;
}
if (is_scaled) {
- const MV mv_q4 = clamp_mv_to_umv_border_sb(xd, mv, bw, bh,
- pd->subsampling_x,
- pd->subsampling_y);
+ const MV mv_q4 = clamp_mv_to_umv_border_sb(
+ xd, mv, bw, bh, pd->subsampling_x, pd->subsampling_y);
// Co-ordinate of containing block to pixel precision.
int x_start = (-xd->mb_to_left_edge >> (3 + pd->subsampling_x));
int y_start = (-xd->mb_to_top_edge >> (3 + pd->subsampling_y));
@@ -642,8 +575,8 @@ static void dec_build_inter_predictors(VPxWorker *const worker, MACROBLOCKD *xd,
// Do border extension if there is motion or the
// width/height is not a multiple of 8 pixels.
- if (is_scaled || scaled_mv.col || scaled_mv.row ||
- (frame_width & 0x7) || (frame_height & 0x7)) {
+ if (is_scaled || scaled_mv.col || scaled_mv.row || (frame_width & 0x7) ||
+ (frame_height & 0x7)) {
int y1 = ((y0_16 + (h - 1) * ys) >> SUBPEL_BITS) + 1;
// Get reference block bottom right horizontal coordinate.
@@ -665,8 +598,8 @@ static void dec_build_inter_predictors(VPxWorker *const worker, MACROBLOCKD *xd,
// Wait until reference block is ready. Pad 7 more pixels as last 7
// pixels of each superblock row can be changed by next superblock row.
if (worker != NULL)
- vp9_frameworker_wait(worker, ref_frame_buf,
- VPXMAX(0, (y1 + 7)) << (plane == 0 ? 0 : 1));
+ vp9_frameworker_wait(worker, ref_frame_buf, VPXMAX(0, (y1 + 7))
+ << (plane == 0 ? 0 : 1));
// Skip border extension if block is inside the frame.
if (x0 < 0 || x0 > frame_width - 1 || x1 < 0 || x1 > frame_width - 1 ||
@@ -677,11 +610,9 @@ static void dec_build_inter_predictors(VPxWorker *const worker, MACROBLOCKD *xd,
const int b_h = y1 - y0 + 1;
const int border_offset = y_pad * 3 * b_w + x_pad * 3;
- extend_and_predict(buf_ptr1, buf_stride, x0, y0, b_w, b_h,
- frame_width, frame_height, border_offset,
- dst, dst_buf->stride,
- subpel_x, subpel_y,
- kernel, sf,
+ extend_and_predict(buf_ptr1, buf_stride, x0, y0, b_w, b_h, frame_width,
+ frame_height, border_offset, dst, dst_buf->stride,
+ subpel_x, subpel_y, kernel, sf,
#if CONFIG_VP9_HIGHBITDEPTH
xd,
#endif
@@ -693,8 +624,8 @@ static void dec_build_inter_predictors(VPxWorker *const worker, MACROBLOCKD *xd,
// pixels of each superblock row can be changed by next superblock row.
if (worker != NULL) {
const int y1 = (y0_16 + (h - 1) * ys) >> SUBPEL_BITS;
- vp9_frameworker_wait(worker, ref_frame_buf,
- VPXMAX(0, (y1 + 7)) << (plane == 0 ? 0 : 1));
+ vp9_frameworker_wait(worker, ref_frame_buf, VPXMAX(0, (y1 + 7))
+ << (plane == 0 ? 0 : 1));
}
}
#if CONFIG_VP9_HIGHBITDEPTH
@@ -706,14 +637,14 @@ static void dec_build_inter_predictors(VPxWorker *const worker, MACROBLOCKD *xd,
subpel_y, sf, w, h, ref, kernel, xs, ys);
}
#else
- inter_predictor(buf_ptr, buf_stride, dst, dst_buf->stride, subpel_x,
- subpel_y, sf, w, h, ref, kernel, xs, ys);
+ inter_predictor(buf_ptr, buf_stride, dst, dst_buf->stride, subpel_x, subpel_y,
+ sf, w, h, ref, kernel, xs, ys);
#endif // CONFIG_VP9_HIGHBITDEPTH
}
static void dec_build_inter_predictors_sb(VP9Decoder *const pbi,
- MACROBLOCKD *xd,
- int mi_row, int mi_col) {
+ MACROBLOCKD *xd, int mi_row,
+ int mi_col) {
int plane;
const int mi_x = mi_col * MI_SIZE;
const int mi_y = mi_row * MI_SIZE;
@@ -723,8 +654,8 @@ static void dec_build_inter_predictors_sb(VP9Decoder *const pbi,
const int is_compound = has_second_ref(mi);
int ref;
int is_scaled;
- VPxWorker *const fwo = pbi->frame_parallel_decode ?
- pbi->frame_worker_owner : NULL;
+ VPxWorker *const fwo =
+ pbi->frame_parallel_decode ? pbi->frame_worker_owner : NULL;
for (ref = 0; ref < 1 + is_compound; ++ref) {
const MV_REFERENCE_FRAME frame = mi->ref_frame[ref];
@@ -756,10 +687,10 @@ static void dec_build_inter_predictors_sb(VP9Decoder *const pbi,
for (y = 0; y < num_4x4_h; ++y) {
for (x = 0; x < num_4x4_w; ++x) {
const MV mv = average_split_mvs(pd, mi, ref, i++);
- dec_build_inter_predictors(fwo, xd, plane, n4w_x4, n4h_x4,
- 4 * x, 4 * y, 4, 4, mi_x, mi_y, kernel,
- sf, pre_buf, dst_buf, &mv,
- ref_frame_buf, is_scaled, ref);
+ dec_build_inter_predictors(fwo, xd, plane, n4w_x4, n4h_x4, 4 * x,
+ 4 * y, 4, 4, mi_x, mi_y, kernel, sf,
+ pre_buf, dst_buf, &mv, ref_frame_buf,
+ is_scaled, ref);
}
}
}
@@ -773,20 +704,19 @@ static void dec_build_inter_predictors_sb(VP9Decoder *const pbi,
const int n4w_x4 = 4 * num_4x4_w;
const int n4h_x4 = 4 * num_4x4_h;
struct buf_2d *const pre_buf = &pd->pre[ref];
- dec_build_inter_predictors(fwo, xd, plane, n4w_x4, n4h_x4,
- 0, 0, n4w_x4, n4h_x4, mi_x, mi_y, kernel,
- sf, pre_buf, dst_buf, &mv,
- ref_frame_buf, is_scaled, ref);
+ dec_build_inter_predictors(fwo, xd, plane, n4w_x4, n4h_x4, 0, 0, n4w_x4,
+ n4h_x4, mi_x, mi_y, kernel, sf, pre_buf,
+ dst_buf, &mv, ref_frame_buf, is_scaled, ref);
}
}
}
}
-static INLINE TX_SIZE dec_get_uv_tx_size(const MODE_INFO *mi,
- int n4_wl, int n4_hl) {
+static INLINE TX_SIZE dec_get_uv_tx_size(const MODE_INFO *mi, int n4_wl,
+ int n4_hl) {
// get minimum log2 num4x4s dimension
const int x = VPXMIN(n4_wl, n4_hl);
- return VPXMIN(mi->tx_size, x);
+ return VPXMIN(mi->tx_size, x);
}
static INLINE void dec_reset_skip_context(MACROBLOCKD *xd) {
@@ -810,9 +740,8 @@ static void set_plane_n4(MACROBLOCKD *const xd, int bw, int bh, int bwl,
}
static MODE_INFO *set_offsets(VP9_COMMON *const cm, MACROBLOCKD *const xd,
- BLOCK_SIZE bsize, int mi_row, int mi_col,
- int bw, int bh, int x_mis, int y_mis,
- int bwl, int bhl) {
+ BLOCK_SIZE bsize, int mi_row, int mi_col, int bw,
+ int bh, int x_mis, int y_mis, int bwl, int bhl) {
const int offset = mi_row * cm->mi_stride + mi_col;
int x, y;
const TileInfo *const tile = &xd->tile;
@@ -840,9 +769,8 @@ static MODE_INFO *set_offsets(VP9_COMMON *const cm, MACROBLOCKD *const xd,
}
static void decode_block(VP9Decoder *const pbi, MACROBLOCKD *const xd,
- int mi_row, int mi_col,
- vpx_reader *r, BLOCK_SIZE bsize,
- int bwl, int bhl) {
+ int mi_row, int mi_col, vpx_reader *r,
+ BLOCK_SIZE bsize, int bwl, int bhl) {
VP9_COMMON *const cm = &pbi->common;
const int less8x8 = bsize < BLOCK_8X8;
const int bw = 1 << (bwl - 1);
@@ -850,15 +778,15 @@ static void decode_block(VP9Decoder *const pbi, MACROBLOCKD *const xd,
const int x_mis = VPXMIN(bw, cm->mi_cols - mi_col);
const int y_mis = VPXMIN(bh, cm->mi_rows - mi_row);
- MODE_INFO *mi = set_offsets(cm, xd, bsize, mi_row, mi_col,
- bw, bh, x_mis, y_mis, bwl, bhl);
+ MODE_INFO *mi = set_offsets(cm, xd, bsize, mi_row, mi_col, bw, bh, x_mis,
+ y_mis, bwl, bhl);
if (bsize >= BLOCK_8X8 && (cm->subsampling_x || cm->subsampling_y)) {
const BLOCK_SIZE uv_subsize =
ss_size_lookup[bsize][cm->subsampling_x][cm->subsampling_y];
if (uv_subsize == BLOCK_INVALID)
- vpx_internal_error(xd->error_info,
- VPX_CODEC_CORRUPT_FRAME, "Invalid block size.");
+ vpx_internal_error(xd->error_info, VPX_CODEC_CORRUPT_FRAME,
+ "Invalid block size.");
}
vp9_read_mode_info(pbi, xd, mi_row, mi_col, r, x_mis, y_mis);
@@ -872,24 +800,27 @@ static void decode_block(VP9Decoder *const pbi, MACROBLOCKD *const xd,
for (plane = 0; plane < MAX_MB_PLANE; ++plane) {
const struct macroblockd_plane *const pd = &xd->plane[plane];
const TX_SIZE tx_size =
- plane ? dec_get_uv_tx_size(mi, pd->n4_wl, pd->n4_hl)
- : mi->tx_size;
+ plane ? dec_get_uv_tx_size(mi, pd->n4_wl, pd->n4_hl) : mi->tx_size;
const int num_4x4_w = pd->n4_w;
const int num_4x4_h = pd->n4_h;
const int step = (1 << tx_size);
int row, col;
- const int max_blocks_wide = num_4x4_w + (xd->mb_to_right_edge >= 0 ?
- 0 : xd->mb_to_right_edge >> (5 + pd->subsampling_x));
- const int max_blocks_high = num_4x4_h + (xd->mb_to_bottom_edge >= 0 ?
- 0 : xd->mb_to_bottom_edge >> (5 + pd->subsampling_y));
+ const int max_blocks_wide =
+ num_4x4_w + (xd->mb_to_right_edge >= 0
+ ? 0
+ : xd->mb_to_right_edge >> (5 + pd->subsampling_x));
+ const int max_blocks_high =
+ num_4x4_h + (xd->mb_to_bottom_edge >= 0
+ ? 0
+ : xd->mb_to_bottom_edge >> (5 + pd->subsampling_y));
xd->max_blocks_wide = xd->mb_to_right_edge >= 0 ? 0 : max_blocks_wide;
xd->max_blocks_high = xd->mb_to_bottom_edge >= 0 ? 0 : max_blocks_high;
for (row = 0; row < max_blocks_high; row += step)
for (col = 0; col < max_blocks_wide; col += step)
- predict_and_reconstruct_intra_block(xd, r, mi, plane,
- row, col, tx_size);
+ predict_and_reconstruct_intra_block(xd, r, mi, plane, row, col,
+ tx_size);
}
} else {
// Prediction
@@ -903,28 +834,30 @@ static void decode_block(VP9Decoder *const pbi, MACROBLOCKD *const xd,
for (plane = 0; plane < MAX_MB_PLANE; ++plane) {
const struct macroblockd_plane *const pd = &xd->plane[plane];
const TX_SIZE tx_size =
- plane ? dec_get_uv_tx_size(mi, pd->n4_wl, pd->n4_hl)
- : mi->tx_size;
+ plane ? dec_get_uv_tx_size(mi, pd->n4_wl, pd->n4_hl) : mi->tx_size;
const int num_4x4_w = pd->n4_w;
const int num_4x4_h = pd->n4_h;
const int step = (1 << tx_size);
int row, col;
- const int max_blocks_wide = num_4x4_w + (xd->mb_to_right_edge >= 0 ?
- 0 : xd->mb_to_right_edge >> (5 + pd->subsampling_x));
- const int max_blocks_high = num_4x4_h + (xd->mb_to_bottom_edge >= 0 ?
- 0 : xd->mb_to_bottom_edge >> (5 + pd->subsampling_y));
+ const int max_blocks_wide =
+ num_4x4_w + (xd->mb_to_right_edge >= 0
+ ? 0
+ : xd->mb_to_right_edge >> (5 + pd->subsampling_x));
+ const int max_blocks_high =
+ num_4x4_h +
+ (xd->mb_to_bottom_edge >= 0 ? 0 : xd->mb_to_bottom_edge >>
+ (5 + pd->subsampling_y));
xd->max_blocks_wide = xd->mb_to_right_edge >= 0 ? 0 : max_blocks_wide;
xd->max_blocks_high = xd->mb_to_bottom_edge >= 0 ? 0 : max_blocks_high;
for (row = 0; row < max_blocks_high; row += step)
for (col = 0; col < max_blocks_wide; col += step)
- eobtotal += reconstruct_inter_block(xd, r, mi, plane, row, col,
- tx_size);
+ eobtotal +=
+ reconstruct_inter_block(xd, r, mi, plane, row, col, tx_size);
}
- if (!less8x8 && eobtotal == 0)
- mi->skip = 1; // skip loopfilter
+ if (!less8x8 && eobtotal == 0) mi->skip = 1; // skip loopfilter
}
}
@@ -935,21 +868,19 @@ static void decode_block(VP9Decoder *const pbi, MACROBLOCKD *const xd,
}
}
-static INLINE int dec_partition_plane_context(const MACROBLOCKD *xd,
- int mi_row, int mi_col,
- int bsl) {
+static INLINE int dec_partition_plane_context(const MACROBLOCKD *xd, int mi_row,
+ int mi_col, int bsl) {
const PARTITION_CONTEXT *above_ctx = xd->above_seg_context + mi_col;
const PARTITION_CONTEXT *left_ctx = xd->left_seg_context + (mi_row & MI_MASK);
- int above = (*above_ctx >> bsl) & 1 , left = (*left_ctx >> bsl) & 1;
+ int above = (*above_ctx >> bsl) & 1, left = (*left_ctx >> bsl) & 1;
-// assert(bsl >= 0);
+ // assert(bsl >= 0);
return (left * 2 + above) + bsl * PARTITION_PLOFFSET;
}
-static INLINE void dec_update_partition_context(MACROBLOCKD *xd,
- int mi_row, int mi_col,
- BLOCK_SIZE subsize,
+static INLINE void dec_update_partition_context(MACROBLOCKD *xd, int mi_row,
+ int mi_col, BLOCK_SIZE subsize,
int bw) {
PARTITION_CONTEXT *const above_ctx = xd->above_seg_context + mi_col;
PARTITION_CONTEXT *const left_ctx = xd->left_seg_context + (mi_row & MI_MASK);
@@ -962,8 +893,8 @@ static INLINE void dec_update_partition_context(MACROBLOCKD *xd,
}
static PARTITION_TYPE read_partition(MACROBLOCKD *xd, int mi_row, int mi_col,
- vpx_reader *r,
- int has_rows, int has_cols, int bsl) {
+ vpx_reader *r, int has_rows, int has_cols,
+ int bsl) {
const int ctx = dec_partition_plane_context(xd, mi_row, mi_col, bsl);
const vpx_prob *const probs = get_partition_probs(xd, ctx);
FRAME_COUNTS *counts = xd->counts;
@@ -978,16 +909,15 @@ static PARTITION_TYPE read_partition(MACROBLOCKD *xd, int mi_row, int mi_col,
else
p = PARTITION_SPLIT;
- if (counts)
- ++counts->partition[ctx][p];
+ if (counts) ++counts->partition[ctx][p];
return p;
}
// TODO(slavarnway): eliminate bsize and subsize in future commits
static void decode_partition(VP9Decoder *const pbi, MACROBLOCKD *const xd,
- int mi_row, int mi_col,
- vpx_reader* r, BLOCK_SIZE bsize, int n4x4_l2) {
+ int mi_row, int mi_col, vpx_reader *r,
+ BLOCK_SIZE bsize, int n4x4_l2) {
VP9_COMMON *const cm = &pbi->common;
const int n8x8_l2 = n4x4_l2 - 1;
const int num_8x8_wh = 1 << n8x8_l2;
@@ -997,11 +927,10 @@ static void decode_partition(VP9Decoder *const pbi, MACROBLOCKD *const xd,
const int has_rows = (mi_row + hbs) < cm->mi_rows;
const int has_cols = (mi_col + hbs) < cm->mi_cols;
- if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols)
- return;
+ if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols) return;
- partition = read_partition(xd, mi_row, mi_col, r, has_rows, has_cols,
- n8x8_l2);
+ partition =
+ read_partition(xd, mi_row, mi_col, r, has_rows, has_cols, n8x8_l2);
subsize = subsize_lookup[partition][bsize]; // get_subsize(bsize, partition);
if (!hbs) {
// calculate bmode block dimensions (log 2)
@@ -1032,8 +961,7 @@ static void decode_partition(VP9Decoder *const pbi, MACROBLOCKD *const xd,
decode_partition(pbi, xd, mi_row + hbs, mi_col + hbs, r, subsize,
n8x8_l2);
break;
- default:
- assert(0 && "Invalid partition type");
+ default: assert(0 && "Invalid partition type");
}
}
@@ -1043,12 +971,10 @@ static void decode_partition(VP9Decoder *const pbi, MACROBLOCKD *const xd,
dec_update_partition_context(xd, mi_row, mi_col, subsize, num_8x8_wh);
}
-static void setup_token_decoder(const uint8_t *data,
- const uint8_t *data_end,
+static void setup_token_decoder(const uint8_t *data, const uint8_t *data_end,
size_t read_size,
struct vpx_internal_error_info *error_info,
- vpx_reader *r,
- vpx_decrypt_cb decrypt_cb,
+ vpx_reader *r, vpx_decrypt_cb decrypt_cb,
void *decrypt_state) {
// Validate the calculated partition length. If the buffer
// described by the partition can't be fully read, then restrict
@@ -1075,12 +1001,11 @@ static void read_coef_probs_common(vp9_coeff_probs_model *coef_probs,
vp9_diff_update_prob(r, &coef_probs[i][j][k][l][m]);
}
-static void read_coef_probs(FRAME_CONTEXT *fc, TX_MODE tx_mode,
- vpx_reader *r) {
- const TX_SIZE max_tx_size = tx_mode_to_biggest_tx_size[tx_mode];
- TX_SIZE tx_size;
- for (tx_size = TX_4X4; tx_size <= max_tx_size; ++tx_size)
- read_coef_probs_common(fc->coef_probs[tx_size], r);
+static void read_coef_probs(FRAME_CONTEXT *fc, TX_MODE tx_mode, vpx_reader *r) {
+ const TX_SIZE max_tx_size = tx_mode_to_biggest_tx_size[tx_mode];
+ TX_SIZE tx_size;
+ for (tx_size = TX_4X4; tx_size <= max_tx_size; ++tx_size)
+ read_coef_probs_common(fc->coef_probs[tx_size], r);
}
static void setup_segmentation(struct segmentation *seg,
@@ -1091,24 +1016,22 @@ static void setup_segmentation(struct segmentation *seg,
seg->update_data = 0;
seg->enabled = vpx_rb_read_bit(rb);
- if (!seg->enabled)
- return;
+ if (!seg->enabled) return;
// Segmentation map update
seg->update_map = vpx_rb_read_bit(rb);
if (seg->update_map) {
for (i = 0; i < SEG_TREE_PROBS; i++)
- seg->tree_probs[i] = vpx_rb_read_bit(rb) ? vpx_rb_read_literal(rb, 8)
- : MAX_PROB;
+ seg->tree_probs[i] =
+ vpx_rb_read_bit(rb) ? vpx_rb_read_literal(rb, 8) : MAX_PROB;
seg->temporal_update = vpx_rb_read_bit(rb);
if (seg->temporal_update) {
for (i = 0; i < PREDICTION_PROBS; i++)
- seg->pred_probs[i] = vpx_rb_read_bit(rb) ? vpx_rb_read_literal(rb, 8)
- : MAX_PROB;
+ seg->pred_probs[i] =
+ vpx_rb_read_bit(rb) ? vpx_rb_read_literal(rb, 8) : MAX_PROB;
} else {
- for (i = 0; i < PREDICTION_PROBS; i++)
- seg->pred_probs[i] = MAX_PROB;
+ for (i = 0; i < PREDICTION_PROBS; i++) seg->pred_probs[i] = MAX_PROB;
}
}
@@ -1172,10 +1095,8 @@ static void setup_quantization(VP9_COMMON *const cm, MACROBLOCKD *const xd,
cm->uv_dc_delta_q = read_delta_q(rb);
cm->uv_ac_delta_q = read_delta_q(rb);
cm->dequant_bit_depth = cm->bit_depth;
- xd->lossless = cm->base_qindex == 0 &&
- cm->y_dc_delta_q == 0 &&
- cm->uv_dc_delta_q == 0 &&
- cm->uv_ac_delta_q == 0;
+ xd->lossless = cm->base_qindex == 0 && cm->y_dc_delta_q == 0 &&
+ cm->uv_dc_delta_q == 0 && cm->uv_ac_delta_q == 0;
#if CONFIG_VP9_HIGHBITDEPTH
xd->bd = (int)cm->bit_depth;
@@ -1188,13 +1109,13 @@ static void setup_segmentation_dequant(VP9_COMMON *const cm) {
int i;
for (i = 0; i < MAX_SEGMENTS; ++i) {
const int qindex = vp9_get_qindex(&cm->seg, i, cm->base_qindex);
- cm->y_dequant[i][0] = vp9_dc_quant(qindex, cm->y_dc_delta_q,
- cm->bit_depth);
+ cm->y_dequant[i][0] =
+ vp9_dc_quant(qindex, cm->y_dc_delta_q, cm->bit_depth);
cm->y_dequant[i][1] = vp9_ac_quant(qindex, 0, cm->bit_depth);
- cm->uv_dequant[i][0] = vp9_dc_quant(qindex, cm->uv_dc_delta_q,
- cm->bit_depth);
- cm->uv_dequant[i][1] = vp9_ac_quant(qindex, cm->uv_ac_delta_q,
- cm->bit_depth);
+ cm->uv_dequant[i][0] =
+ vp9_dc_quant(qindex, cm->uv_dc_delta_q, cm->bit_depth);
+ cm->uv_dequant[i][1] =
+ vp9_ac_quant(qindex, cm->uv_ac_delta_q, cm->bit_depth);
}
} else {
const int qindex = cm->base_qindex;
@@ -1202,18 +1123,16 @@ static void setup_segmentation_dequant(VP9_COMMON *const cm) {
// remaining are don't cares.
cm->y_dequant[0][0] = vp9_dc_quant(qindex, cm->y_dc_delta_q, cm->bit_depth);
cm->y_dequant[0][1] = vp9_ac_quant(qindex, 0, cm->bit_depth);
- cm->uv_dequant[0][0] = vp9_dc_quant(qindex, cm->uv_dc_delta_q,
- cm->bit_depth);
- cm->uv_dequant[0][1] = vp9_ac_quant(qindex, cm->uv_ac_delta_q,
- cm->bit_depth);
+ cm->uv_dequant[0][0] =
+ vp9_dc_quant(qindex, cm->uv_dc_delta_q, cm->bit_depth);
+ cm->uv_dequant[0][1] =
+ vp9_ac_quant(qindex, cm->uv_ac_delta_q, cm->bit_depth);
}
}
static INTERP_FILTER read_interp_filter(struct vpx_read_bit_buffer *rb) {
- const INTERP_FILTER literal_to_filter[] = { EIGHTTAP_SMOOTH,
- EIGHTTAP,
- EIGHTTAP_SHARP,
- BILINEAR };
+ const INTERP_FILTER literal_to_filter[] = { EIGHTTAP_SMOOTH, EIGHTTAP,
+ EIGHTTAP_SHARP, BILINEAR };
return vpx_rb_read_bit(rb) ? SWITCHABLE
: literal_to_filter[vpx_rb_read_literal(rb, 2)];
}
@@ -1245,7 +1164,7 @@ static void resize_context_buffers(VP9_COMMON *cm, int width, int height) {
const int new_mi_rows =
ALIGN_POWER_OF_TWO(height, MI_SIZE_LOG2) >> MI_SIZE_LOG2;
const int new_mi_cols =
- ALIGN_POWER_OF_TWO(width, MI_SIZE_LOG2) >> MI_SIZE_LOG2;
+ ALIGN_POWER_OF_TWO(width, MI_SIZE_LOG2) >> MI_SIZE_LOG2;
// Allocations in vp9_alloc_context_buffers() depend on individual
// dimensions as well as the overall size.
@@ -1275,13 +1194,12 @@ static void setup_frame_size(VP9_COMMON *cm, struct vpx_read_bit_buffer *rb) {
lock_buffer_pool(pool);
if (vpx_realloc_frame_buffer(
- get_frame_new_buffer(cm), cm->width, cm->height,
- cm->subsampling_x, cm->subsampling_y,
+ get_frame_new_buffer(cm), cm->width, cm->height, cm->subsampling_x,
+ cm->subsampling_y,
#if CONFIG_VP9_HIGHBITDEPTH
cm->use_highbitdepth,
#endif
- VP9_DEC_BORDER_IN_PIXELS,
- cm->byte_alignment,
+ VP9_DEC_BORDER_IN_PIXELS, cm->byte_alignment,
&pool->frame_bufs[cm->new_fb_idx].raw_frame_buffer, pool->get_fb_cb,
pool->cb_priv)) {
unlock_buffer_pool(pool);
@@ -1295,7 +1213,7 @@ static void setup_frame_size(VP9_COMMON *cm, struct vpx_read_bit_buffer *rb) {
pool->frame_bufs[cm->new_fb_idx].buf.bit_depth = (unsigned int)cm->bit_depth;
pool->frame_bufs[cm->new_fb_idx].buf.color_space = cm->color_space;
pool->frame_bufs[cm->new_fb_idx].buf.color_range = cm->color_range;
- pool->frame_bufs[cm->new_fb_idx].buf.render_width = cm->render_width;
+ pool->frame_bufs[cm->new_fb_idx].buf.render_width = cm->render_width;
pool->frame_bufs[cm->new_fb_idx].buf.render_height = cm->render_height;
}
@@ -1328,8 +1246,7 @@ static void setup_frame_size_with_refs(VP9_COMMON *cm,
}
}
- if (!found)
- vp9_read_frame_size(rb, &width, &height);
+ if (!found) vp9_read_frame_size(rb, &width, &height);
if (width <= 0 || height <= 0)
vpx_internal_error(&cm->error, VPX_CODEC_CORRUPT_FRAME,
@@ -1339,10 +1256,10 @@ static void setup_frame_size_with_refs(VP9_COMMON *cm,
// has valid dimensions.
for (i = 0; i < REFS_PER_FRAME; ++i) {
RefBuffer *const ref_frame = &cm->frame_refs[i];
- has_valid_ref_frame |= (ref_frame->idx != INVALID_IDX &&
- valid_ref_frame_size(ref_frame->buf->y_crop_width,
- ref_frame->buf->y_crop_height,
- width, height));
+ has_valid_ref_frame |=
+ (ref_frame->idx != INVALID_IDX &&
+ valid_ref_frame_size(ref_frame->buf->y_crop_width,
+ ref_frame->buf->y_crop_height, width, height));
}
if (!has_valid_ref_frame)
vpx_internal_error(&cm->error, VPX_CODEC_CORRUPT_FRAME,
@@ -1352,10 +1269,8 @@ static void setup_frame_size_with_refs(VP9_COMMON *cm,
if (ref_frame->idx == INVALID_IDX ||
!valid_ref_frame_img_fmt(ref_frame->buf->bit_depth,
ref_frame->buf->subsampling_x,
- ref_frame->buf->subsampling_y,
- cm->bit_depth,
- cm->subsampling_x,
- cm->subsampling_y))
+ ref_frame->buf->subsampling_y, cm->bit_depth,
+ cm->subsampling_x, cm->subsampling_y))
vpx_internal_error(&cm->error, VPX_CODEC_CORRUPT_FRAME,
"Referenced frame has incompatible color format");
}
@@ -1365,13 +1280,12 @@ static void setup_frame_size_with_refs(VP9_COMMON *cm,
lock_buffer_pool(pool);
if (vpx_realloc_frame_buffer(
- get_frame_new_buffer(cm), cm->width, cm->height,
- cm->subsampling_x, cm->subsampling_y,
+ get_frame_new_buffer(cm), cm->width, cm->height, cm->subsampling_x,
+ cm->subsampling_y,
#if CONFIG_VP9_HIGHBITDEPTH
cm->use_highbitdepth,
#endif
- VP9_DEC_BORDER_IN_PIXELS,
- cm->byte_alignment,
+ VP9_DEC_BORDER_IN_PIXELS, cm->byte_alignment,
&pool->frame_bufs[cm->new_fb_idx].raw_frame_buffer, pool->get_fb_cb,
pool->cb_priv)) {
unlock_buffer_pool(pool);
@@ -1385,7 +1299,7 @@ static void setup_frame_size_with_refs(VP9_COMMON *cm,
pool->frame_bufs[cm->new_fb_idx].buf.bit_depth = (unsigned int)cm->bit_depth;
pool->frame_bufs[cm->new_fb_idx].buf.color_space = cm->color_space;
pool->frame_bufs[cm->new_fb_idx].buf.color_range = cm->color_range;
- pool->frame_bufs[cm->new_fb_idx].buf.render_width = cm->render_width;
+ pool->frame_bufs[cm->new_fb_idx].buf.render_width = cm->render_width;
pool->frame_bufs[cm->new_fb_idx].buf.render_height = cm->render_height;
}
@@ -1396,8 +1310,7 @@ static void setup_tile_info(VP9_COMMON *cm, struct vpx_read_bit_buffer *rb) {
// columns
max_ones = max_log2_tile_cols - min_log2_tile_cols;
cm->log2_tile_cols = min_log2_tile_cols;
- while (max_ones-- && vpx_rb_read_bit(rb))
- cm->log2_tile_cols++;
+ while (max_ones-- && vpx_rb_read_bit(rb)) cm->log2_tile_cols++;
if (cm->log2_tile_cols > 6)
vpx_internal_error(&cm->error, VPX_CODEC_CORRUPT_FRAME,
@@ -1405,18 +1318,15 @@ static void setup_tile_info(VP9_COMMON *cm, struct vpx_read_bit_buffer *rb) {
// rows
cm->log2_tile_rows = vpx_rb_read_bit(rb);
- if (cm->log2_tile_rows)
- cm->log2_tile_rows += vpx_rb_read_bit(rb);
+ if (cm->log2_tile_rows) cm->log2_tile_rows += vpx_rb_read_bit(rb);
}
// Reads the next tile returning its size and adjusting '*data' accordingly
// based on 'is_last'.
-static void get_tile_buffer(const uint8_t *const data_end,
- int is_last,
+static void get_tile_buffer(const uint8_t *const data_end, int is_last,
struct vpx_internal_error_info *error_info,
- const uint8_t **data,
- vpx_decrypt_cb decrypt_cb, void *decrypt_state,
- TileBuffer *buf) {
+ const uint8_t **data, vpx_decrypt_cb decrypt_cb,
+ void *decrypt_state, TileBuffer *buf) {
size_t size;
if (!is_last) {
@@ -1446,9 +1356,9 @@ static void get_tile_buffer(const uint8_t *const data_end,
*data += size;
}
-static void get_tile_buffers(VP9Decoder *pbi,
- const uint8_t *data, const uint8_t *data_end,
- int tile_cols, int tile_rows,
+static void get_tile_buffers(VP9Decoder *pbi, const uint8_t *data,
+ const uint8_t *data_end, int tile_cols,
+ int tile_rows,
TileBuffer (*tile_buffers)[1 << 6]) {
int r, c;
@@ -1463,8 +1373,7 @@ static void get_tile_buffers(VP9Decoder *pbi,
}
}
-static const uint8_t *decode_tiles(VP9Decoder *pbi,
- const uint8_t *data,
+static const uint8_t *decode_tiles(VP9Decoder *pbi, const uint8_t *data,
const uint8_t *data_end) {
VP9_COMMON *const cm = &pbi->common;
const VPxWorkerInterface *const winterface = vpx_get_worker_interface();
@@ -1488,7 +1397,7 @@ static const uint8_t *decode_tiles(VP9Decoder *pbi,
}
if (cm->lf.filter_level && !cm->skip_loop_filter) {
- LFWorkerData *const lf_data = (LFWorkerData*)pbi->lf_worker.data1;
+ LFWorkerData *const lf_data = (LFWorkerData *)pbi->lf_worker.data1;
// Be sure to sync as we might be resuming after a failed frame decode.
winterface->sync(&pbi->lf_worker);
vp9_loop_filter_data_reset(lf_data, get_frame_new_buffer(cm), cm,
@@ -1534,26 +1443,26 @@ static const uint8_t *decode_tiles(VP9Decoder *pbi,
for (mi_row = tile.mi_row_start; mi_row < tile.mi_row_end;
mi_row += MI_BLOCK_SIZE) {
for (tile_col = 0; tile_col < tile_cols; ++tile_col) {
- const int col = pbi->inv_tile_order ?
- tile_cols - tile_col - 1 : tile_col;
+ const int col =
+ pbi->inv_tile_order ? tile_cols - tile_col - 1 : tile_col;
tile_data = pbi->tile_worker_data + tile_cols * tile_row + col;
vp9_tile_set_col(&tile, cm, col);
vp9_zero(tile_data->xd.left_context);
vp9_zero(tile_data->xd.left_seg_context);
for (mi_col = tile.mi_col_start; mi_col < tile.mi_col_end;
mi_col += MI_BLOCK_SIZE) {
- decode_partition(pbi, &tile_data->xd, mi_row,
- mi_col, &tile_data->bit_reader, BLOCK_64X64, 4);
+ decode_partition(pbi, &tile_data->xd, mi_row, mi_col,
+ &tile_data->bit_reader, BLOCK_64X64, 4);
}
pbi->mb.corrupted |= tile_data->xd.corrupted;
if (pbi->mb.corrupted)
- vpx_internal_error(&cm->error, VPX_CODEC_CORRUPT_FRAME,
- "Failed to decode tile data");
+ vpx_internal_error(&cm->error, VPX_CODEC_CORRUPT_FRAME,
+ "Failed to decode tile data");
}
// Loopfilter one row.
if (cm->lf.filter_level && !cm->skip_loop_filter) {
const int lf_start = mi_row - MI_BLOCK_SIZE;
- LFWorkerData *const lf_data = (LFWorkerData*)pbi->lf_worker.data1;
+ LFWorkerData *const lf_data = (LFWorkerData *)pbi->lf_worker.data1;
// delay the loopfilter by 1 macroblock row.
if (lf_start < 0) continue;
@@ -1574,14 +1483,13 @@ static const uint8_t *decode_tiles(VP9Decoder *pbi,
// still be changed by the longest loopfilter of the next superblock
// row.
if (pbi->frame_parallel_decode)
- vp9_frameworker_broadcast(pbi->cur_buf,
- mi_row << MI_BLOCK_SIZE_LOG2);
+ vp9_frameworker_broadcast(pbi->cur_buf, mi_row << MI_BLOCK_SIZE_LOG2);
}
}
// Loopfilter remaining rows in the frame.
if (cm->lf.filter_level && !cm->skip_loop_filter) {
- LFWorkerData *const lf_data = (LFWorkerData*)pbi->lf_worker.data1;
+ LFWorkerData *const lf_data = (LFWorkerData *)pbi->lf_worker.data1;
winterface->sync(&pbi->lf_worker);
lf_data->start = lf_data->stop;
lf_data->stop = cm->mi_rows;
@@ -1649,13 +1557,12 @@ static int tile_worker_hook(TileWorkerData *const tile_data,
// sorts in descending order
static int compare_tile_buffers(const void *a, const void *b) {
- const TileBuffer *const buf1 = (const TileBuffer*)a;
- const TileBuffer *const buf2 = (const TileBuffer*)b;
+ const TileBuffer *const buf1 = (const TileBuffer *)a;
+ const TileBuffer *const buf2 = (const TileBuffer *)b;
return (int)(buf2->size - buf1->size);
}
-static const uint8_t *decode_tiles_mt(VP9Decoder *pbi,
- const uint8_t *data,
+static const uint8_t *decode_tiles_mt(VP9Decoder *pbi, const uint8_t *data,
const uint8_t *data_end) {
VP9_COMMON *const cm = &pbi->common;
const VPxWorkerInterface *const winterface = vpx_get_worker_interface();
@@ -1745,7 +1652,7 @@ static const uint8_t *decode_tiles_mt(VP9Decoder *pbi,
if (!cm->frame_parallel_decoding_mode) {
for (n = 0; n < num_workers; ++n) {
TileWorkerData *const tile_data =
- (TileWorkerData*)pbi->tile_workers[n].data1;
+ (TileWorkerData *)pbi->tile_workers[n].data1;
vp9_zero(tile_data->counts);
}
}
@@ -1758,7 +1665,7 @@ static const uint8_t *decode_tiles_mt(VP9Decoder *pbi,
for (n = 0; n < num_workers; ++n) {
const int count = base + (remain + n) / num_workers;
VPxWorker *const worker = &pbi->tile_workers[n];
- TileWorkerData *const tile_data = (TileWorkerData*)worker->data1;
+ TileWorkerData *const tile_data = (TileWorkerData *)worker->data1;
tile_data->buf_start = buf_start;
tile_data->buf_end = buf_start + count - 1;
@@ -1776,7 +1683,7 @@ static const uint8_t *decode_tiles_mt(VP9Decoder *pbi,
for (; n > 0; --n) {
VPxWorker *const worker = &pbi->tile_workers[n - 1];
- TileWorkerData *const tile_data = (TileWorkerData*)worker->data1;
+ TileWorkerData *const tile_data = (TileWorkerData *)worker->data1;
// TODO(jzern): The tile may have specific error data associated with
// its vpx_internal_error_info which could be propagated to the main info
// in cm. Additionally once the threads have been synced and an error is
@@ -1790,7 +1697,7 @@ static const uint8_t *decode_tiles_mt(VP9Decoder *pbi,
if (!cm->frame_parallel_decoding_mode) {
for (n = 0; n < num_workers; ++n) {
TileWorkerData *const tile_data =
- (TileWorkerData*)pbi->tile_workers[n].data1;
+ (TileWorkerData *)pbi->tile_workers[n].data1;
vp9_accumulate_frame_counts(&cm->counts, &tile_data->counts, 1);
}
}
@@ -1804,8 +1711,8 @@ static void error_handler(void *data) {
vpx_internal_error(&cm->error, VPX_CODEC_CORRUPT_FRAME, "Truncated packet");
}
-static void read_bitdepth_colorspace_sampling(
- VP9_COMMON *cm, struct vpx_read_bit_buffer *rb) {
+static void read_bitdepth_colorspace_sampling(VP9_COMMON *cm,
+ struct vpx_read_bit_buffer *rb) {
if (cm->profile >= PROFILE_2) {
cm->bit_depth = vpx_rb_read_bit(rb) ? VPX_BITS_12 : VPX_BITS_10;
#if CONFIG_VP9_HIGHBITDEPTH
@@ -1860,8 +1767,8 @@ static size_t read_uncompressed_header(VP9Decoder *pbi,
cm->last_intra_only = cm->intra_only;
if (vpx_rb_read_literal(rb, 2) != VP9_FRAME_MARKER)
- vpx_internal_error(&cm->error, VPX_CODEC_UNSUP_BITSTREAM,
- "Invalid frame marker");
+ vpx_internal_error(&cm->error, VPX_CODEC_UNSUP_BITSTREAM,
+ "Invalid frame marker");
cm->profile = vp9_read_profile(rb);
#if CONFIG_VP9_HIGHBITDEPTH
@@ -1899,7 +1806,7 @@ static size_t read_uncompressed_header(VP9Decoder *pbi,
return 0;
}
- cm->frame_type = (FRAME_TYPE) vpx_rb_read_bit(rb);
+ cm->frame_type = (FRAME_TYPE)vpx_rb_read_bit(rb);
cm->show_frame = vpx_rb_read_bit(rb);
cm->error_resilient_mode = vpx_rb_read_bit(rb);
@@ -1924,8 +1831,8 @@ static size_t read_uncompressed_header(VP9Decoder *pbi,
} else {
cm->intra_only = cm->show_frame ? 0 : vpx_rb_read_bit(rb);
- cm->reset_frame_context = cm->error_resilient_mode ?
- 0 : vpx_rb_read_literal(rb, 2);
+ cm->reset_frame_context =
+ cm->error_resilient_mode ? 0 : vpx_rb_read_literal(rb, 2);
if (cm->intra_only) {
if (!vp9_read_sync_code(rb))
@@ -1953,7 +1860,7 @@ static size_t read_uncompressed_header(VP9Decoder *pbi,
memset(&cm->ref_frame_map, -1, sizeof(cm->ref_frame_map));
pbi->need_resync = 0;
}
- } else if (pbi->need_resync != 1) { /* Skip if need resync */
+ } else if (pbi->need_resync != 1) { /* Skip if need resync */
pbi->refresh_frame_flags = vpx_rb_read_literal(rb, REF_FRAMES);
for (i = 0; i < REFS_PER_FRAME; ++i) {
const int ref = vpx_rb_read_literal(rb, REF_FRAMES_LOG2);
@@ -1972,16 +1879,14 @@ static size_t read_uncompressed_header(VP9Decoder *pbi,
for (i = 0; i < REFS_PER_FRAME; ++i) {
RefBuffer *const ref_buf = &cm->frame_refs[i];
#if CONFIG_VP9_HIGHBITDEPTH
- vp9_setup_scale_factors_for_frame(&ref_buf->sf,
- ref_buf->buf->y_crop_width,
- ref_buf->buf->y_crop_height,
- cm->width, cm->height,
- cm->use_highbitdepth);
+ vp9_setup_scale_factors_for_frame(
+ &ref_buf->sf, ref_buf->buf->y_crop_width,
+ ref_buf->buf->y_crop_height, cm->width, cm->height,
+ cm->use_highbitdepth);
#else
- vp9_setup_scale_factors_for_frame(&ref_buf->sf,
- ref_buf->buf->y_crop_width,
- ref_buf->buf->y_crop_height,
- cm->width, cm->height);
+ vp9_setup_scale_factors_for_frame(
+ &ref_buf->sf, ref_buf->buf->y_crop_width,
+ ref_buf->buf->y_crop_height, cm->width, cm->height);
#endif
}
}
@@ -1991,7 +1896,7 @@ static size_t read_uncompressed_header(VP9Decoder *pbi,
#endif
get_frame_new_buffer(cm)->color_space = cm->color_space;
get_frame_new_buffer(cm)->color_range = cm->color_range;
- get_frame_new_buffer(cm)->render_width = cm->render_width;
+ get_frame_new_buffer(cm)->render_width = cm->render_width;
get_frame_new_buffer(cm)->render_height = cm->render_height;
if (pbi->need_resync) {
@@ -2003,8 +1908,7 @@ static size_t read_uncompressed_header(VP9Decoder *pbi,
if (!cm->error_resilient_mode) {
cm->refresh_frame_context = vpx_rb_read_bit(rb);
cm->frame_parallel_decoding_mode = vpx_rb_read_bit(rb);
- if (!cm->frame_parallel_decoding_mode)
- vp9_zero(cm->counts);
+ if (!cm->frame_parallel_decoding_mode) vp9_zero(cm->counts);
} else {
cm->refresh_frame_context = 0;
cm->frame_parallel_decoding_mode = 1;
@@ -2070,8 +1974,7 @@ static int read_compressed_header(VP9Decoder *pbi, const uint8_t *data,
"Failed to allocate bool decoder 0");
cm->tx_mode = xd->lossless ? ONLY_4X4 : read_tx_mode(&r);
- if (cm->tx_mode == TX_MODE_SELECT)
- read_tx_mode_probs(&fc->tx_probs, &r);
+ if (cm->tx_mode == TX_MODE_SELECT) read_tx_mode_probs(&fc->tx_probs, &r);
read_coef_probs(fc, cm->tx_mode, &r);
for (k = 0; k < SKIP_CONTEXTS; ++k)
@@ -2083,8 +1986,7 @@ static int read_compressed_header(VP9Decoder *pbi, const uint8_t *data,
read_inter_mode_probs(fc, &r);
- if (cm->interp_filter == SWITCHABLE)
- read_switchable_interp_probs(fc, &r);
+ if (cm->interp_filter == SWITCHABLE) read_switchable_interp_probs(fc, &r);
for (i = 0; i < INTRA_INTER_CONTEXTS; i++)
vp9_diff_update_prob(&r, &fc->intra_inter_prob[i]);
@@ -2109,11 +2011,8 @@ static int read_compressed_header(VP9Decoder *pbi, const uint8_t *data,
}
static struct vpx_read_bit_buffer *init_read_bit_buffer(
- VP9Decoder *pbi,
- struct vpx_read_bit_buffer *rb,
- const uint8_t *data,
- const uint8_t *data_end,
- uint8_t clear_data[MAX_VP9_HEADER_SIZE]) {
+ VP9Decoder *pbi, struct vpx_read_bit_buffer *rb, const uint8_t *data,
+ const uint8_t *data_end, uint8_t clear_data[MAX_VP9_HEADER_SIZE]) {
rb->bit_offset = 0;
rb->error_handler = error_handler;
rb->error_handler_data = &pbi->common;
@@ -2137,8 +2036,8 @@ int vp9_read_sync_code(struct vpx_read_bit_buffer *const rb) {
vpx_rb_read_literal(rb, 8) == VP9_SYNC_CODE_2;
}
-void vp9_read_frame_size(struct vpx_read_bit_buffer *rb,
- int *width, int *height) {
+void vp9_read_frame_size(struct vpx_read_bit_buffer *rb, int *width,
+ int *height) {
*width = vpx_rb_read_literal(rb, 16) + 1;
*height = vpx_rb_read_literal(rb, 16) + 1;
}
@@ -2146,21 +2045,19 @@ void vp9_read_frame_size(struct vpx_read_bit_buffer *rb,
BITSTREAM_PROFILE vp9_read_profile(struct vpx_read_bit_buffer *rb) {
int profile = vpx_rb_read_bit(rb);
profile |= vpx_rb_read_bit(rb) << 1;
- if (profile > 2)
- profile += vpx_rb_read_bit(rb);
- return (BITSTREAM_PROFILE) profile;
+ if (profile > 2) profile += vpx_rb_read_bit(rb);
+ return (BITSTREAM_PROFILE)profile;
}
-void vp9_decode_frame(VP9Decoder *pbi,
- const uint8_t *data, const uint8_t *data_end,
- const uint8_t **p_data_end) {
+void vp9_decode_frame(VP9Decoder *pbi, const uint8_t *data,
+ const uint8_t *data_end, const uint8_t **p_data_end) {
VP9_COMMON *const cm = &pbi->common;
MACROBLOCKD *const xd = &pbi->mb;
struct vpx_read_bit_buffer rb;
int context_updated = 0;
uint8_t clear_data[MAX_VP9_HEADER_SIZE];
- const size_t first_partition_size = read_uncompressed_header(pbi,
- init_read_bit_buffer(pbi, &rb, data, data_end, clear_data));
+ const size_t first_partition_size = read_uncompressed_header(
+ pbi, init_read_bit_buffer(pbi, &rb, data, data_end, clear_data));
const int tile_rows = 1 << cm->log2_tile_rows;
const int tile_cols = 1 << cm->log2_tile_cols;
YV12_BUFFER_CONFIG *const new_fb = get_frame_new_buffer(cm);
@@ -2177,12 +2074,10 @@ void vp9_decode_frame(VP9Decoder *pbi,
vpx_internal_error(&cm->error, VPX_CODEC_CORRUPT_FRAME,
"Truncated packet or corrupt header length");
- cm->use_prev_frame_mvs = !cm->error_resilient_mode &&
- cm->width == cm->last_width &&
- cm->height == cm->last_height &&
- !cm->last_intra_only &&
- cm->last_show_frame &&
- (cm->last_frame_type != KEY_FRAME);
+ cm->use_prev_frame_mvs =
+ !cm->error_resilient_mode && cm->width == cm->last_width &&
+ cm->height == cm->last_height && !cm->last_intra_only &&
+ cm->last_show_frame && (cm->last_frame_type != KEY_FRAME);
vp9_setup_block_planes(xd, cm->subsampling_x, cm->subsampling_y);
@@ -2221,8 +2116,8 @@ void vp9_decode_frame(VP9Decoder *pbi,
if (pbi->tile_worker_data == NULL ||
(tile_cols * tile_rows) != pbi->total_tiles) {
- const int num_tile_workers = tile_cols * tile_rows +
- ((pbi->max_threads > 1) ? pbi->max_threads : 0);
+ const int num_tile_workers =
+ tile_cols * tile_rows + ((pbi->max_threads > 1) ? pbi->max_threads : 0);
const size_t twd_size = num_tile_workers * sizeof(*pbi->tile_worker_data);
// Ensure tile data offsets will be properly aligned. This may fail on
// platforms without DECLARE_ALIGNED().
@@ -2239,9 +2134,9 @@ void vp9_decode_frame(VP9Decoder *pbi,
if (!cm->skip_loop_filter) {
// If multiple threads are used to decode tiles, then we use those
// threads to do parallel loopfiltering.
- vp9_loop_filter_frame_mt(new_fb, cm, pbi->mb.plane,
- cm->lf.filter_level, 0, 0, pbi->tile_workers,
- pbi->num_tile_workers, &pbi->lf_row_sync);
+ vp9_loop_filter_frame_mt(new_fb, cm, pbi->mb.plane, cm->lf.filter_level,
+ 0, 0, pbi->tile_workers, pbi->num_tile_workers,
+ &pbi->lf_row_sync);
}
} else {
vpx_internal_error(&cm->error, VPX_CODEC_CORRUPT_FRAME,
diff --git a/vp9/decoder/vp9_decodeframe.h b/vp9/decoder/vp9_decodeframe.h
index ce33cbdbd..44717f546 100644
--- a/vp9/decoder/vp9_decodeframe.h
+++ b/vp9/decoder/vp9_decodeframe.h
@@ -8,7 +8,6 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-
#ifndef VP9_DECODER_VP9_DECODEFRAME_H_
#define VP9_DECODER_VP9_DECODEFRAME_H_
@@ -22,13 +21,12 @@ struct VP9Decoder;
struct vpx_read_bit_buffer;
int vp9_read_sync_code(struct vpx_read_bit_buffer *const rb);
-void vp9_read_frame_size(struct vpx_read_bit_buffer *rb,
- int *width, int *height);
+void vp9_read_frame_size(struct vpx_read_bit_buffer *rb, int *width,
+ int *height);
BITSTREAM_PROFILE vp9_read_profile(struct vpx_read_bit_buffer *rb);
-void vp9_decode_frame(struct VP9Decoder *pbi,
- const uint8_t *data, const uint8_t *data_end,
- const uint8_t **p_data_end);
+void vp9_decode_frame(struct VP9Decoder *pbi, const uint8_t *data,
+ const uint8_t *data_end, const uint8_t **p_data_end);
#ifdef __cplusplus
} // extern "C"
diff --git a/vp9/decoder/vp9_decodemv.c b/vp9/decoder/vp9_decodemv.c
index f9bd6deb2..7358c9a39 100644
--- a/vp9/decoder/vp9_decodemv.c
+++ b/vp9/decoder/vp9_decodemv.c
@@ -33,29 +33,26 @@ static PREDICTION_MODE read_intra_mode_y(VP9_COMMON *cm, MACROBLOCKD *xd,
const PREDICTION_MODE y_mode =
read_intra_mode(r, cm->fc->y_mode_prob[size_group]);
FRAME_COUNTS *counts = xd->counts;
- if (counts)
- ++counts->y_mode[size_group][y_mode];
+ if (counts) ++counts->y_mode[size_group][y_mode];
return y_mode;
}
static PREDICTION_MODE read_intra_mode_uv(VP9_COMMON *cm, MACROBLOCKD *xd,
vpx_reader *r,
PREDICTION_MODE y_mode) {
- const PREDICTION_MODE uv_mode = read_intra_mode(r,
- cm->fc->uv_mode_prob[y_mode]);
+ const PREDICTION_MODE uv_mode =
+ read_intra_mode(r, cm->fc->uv_mode_prob[y_mode]);
FRAME_COUNTS *counts = xd->counts;
- if (counts)
- ++counts->uv_mode[y_mode][uv_mode];
+ if (counts) ++counts->uv_mode[y_mode][uv_mode];
return uv_mode;
}
static PREDICTION_MODE read_inter_mode(VP9_COMMON *cm, MACROBLOCKD *xd,
vpx_reader *r, int ctx) {
- const int mode = vpx_read_tree(r, vp9_inter_mode_tree,
- cm->fc->inter_mode_probs[ctx]);
+ const int mode =
+ vpx_read_tree(r, vp9_inter_mode_tree, cm->fc->inter_mode_probs[ctx]);
FRAME_COUNTS *counts = xd->counts;
- if (counts)
- ++counts->inter_mode[ctx][mode];
+ if (counts) ++counts->inter_mode[ctx][mode];
return NEARESTMV + mode;
}
@@ -76,8 +73,7 @@ static TX_SIZE read_selected_tx_size(VP9_COMMON *cm, MACROBLOCKD *xd,
tx_size += vpx_read(r, tx_probs[2]);
}
- if (counts)
- ++get_tx_counts(max_tx_size, ctx, &counts->tx)[tx_size];
+ if (counts) ++get_tx_counts(max_tx_size, ctx, &counts->tx)[tx_size];
return (TX_SIZE)tx_size;
}
@@ -105,8 +101,8 @@ static int dec_get_segment_id(const VP9_COMMON *cm, const uint8_t *segment_ids,
return segment_id;
}
-static void set_segment_id(VP9_COMMON *cm, int mi_offset,
- int x_mis, int y_mis, int segment_id) {
+static void set_segment_id(VP9_COMMON *cm, int mi_offset, int x_mis, int y_mis,
+ int segment_id) {
int x, y;
assert(segment_id >= 0 && segment_id < MAX_SEGMENTS);
@@ -117,25 +113,24 @@ static void set_segment_id(VP9_COMMON *cm, int mi_offset,
}
static void copy_segment_id(const VP9_COMMON *cm,
- const uint8_t *last_segment_ids,
- uint8_t *current_segment_ids,
- int mi_offset, int x_mis, int y_mis) {
+ const uint8_t *last_segment_ids,
+ uint8_t *current_segment_ids, int mi_offset,
+ int x_mis, int y_mis) {
int x, y;
for (y = 0; y < y_mis; y++)
for (x = 0; x < x_mis; x++)
- current_segment_ids[mi_offset + y * cm->mi_cols + x] = last_segment_ids ?
- last_segment_ids[mi_offset + y * cm->mi_cols + x] : 0;
+ current_segment_ids[mi_offset + y * cm->mi_cols + x] =
+ last_segment_ids ? last_segment_ids[mi_offset + y * cm->mi_cols + x]
+ : 0;
}
-static int read_intra_segment_id(VP9_COMMON *const cm, int mi_offset,
- int x_mis, int y_mis,
- vpx_reader *r) {
+static int read_intra_segment_id(VP9_COMMON *const cm, int mi_offset, int x_mis,
+ int y_mis, vpx_reader *r) {
struct segmentation *const seg = &cm->seg;
int segment_id;
- if (!seg->enabled)
- return 0; // Default for disabled segmentation
+ if (!seg->enabled) return 0; // Default for disabled segmentation
if (!seg->update_map) {
copy_segment_id(cm, cm->last_frame_seg_map, cm->current_frame_seg_map,
@@ -156,12 +151,12 @@ static int read_inter_segment_id(VP9_COMMON *const cm, MACROBLOCKD *const xd,
int predicted_segment_id, segment_id;
const int mi_offset = mi_row * cm->mi_cols + mi_col;
- if (!seg->enabled)
- return 0; // Default for disabled segmentation
+ if (!seg->enabled) return 0; // Default for disabled segmentation
- predicted_segment_id = cm->last_frame_seg_map ?
- dec_get_segment_id(cm, cm->last_frame_seg_map, mi_offset, x_mis, y_mis) :
- 0;
+ predicted_segment_id = cm->last_frame_seg_map
+ ? dec_get_segment_id(cm, cm->last_frame_seg_map,
+ mi_offset, x_mis, y_mis)
+ : 0;
if (!seg->update_map) {
copy_segment_id(cm, cm->last_frame_seg_map, cm->current_frame_seg_map,
@@ -172,8 +167,8 @@ static int read_inter_segment_id(VP9_COMMON *const cm, MACROBLOCKD *const xd,
if (seg->temporal_update) {
const vpx_prob pred_prob = vp9_get_pred_prob_seg_id(seg, xd);
mi->seg_id_predicted = vpx_read(r, pred_prob);
- segment_id = mi->seg_id_predicted ? predicted_segment_id
- : read_segment_id(r, seg);
+ segment_id =
+ mi->seg_id_predicted ? predicted_segment_id : read_segment_id(r, seg);
} else {
segment_id = read_segment_id(r, seg);
}
@@ -181,27 +176,26 @@ static int read_inter_segment_id(VP9_COMMON *const cm, MACROBLOCKD *const xd,
return segment_id;
}
-static int read_skip(VP9_COMMON *cm, const MACROBLOCKD *xd,
- int segment_id, vpx_reader *r) {
+static int read_skip(VP9_COMMON *cm, const MACROBLOCKD *xd, int segment_id,
+ vpx_reader *r) {
if (segfeature_active(&cm->seg, segment_id, SEG_LVL_SKIP)) {
return 1;
} else {
const int ctx = vp9_get_skip_context(xd);
const int skip = vpx_read(r, cm->fc->skip_probs[ctx]);
FRAME_COUNTS *counts = xd->counts;
- if (counts)
- ++counts->skip[ctx][skip];
+ if (counts) ++counts->skip[ctx][skip];
return skip;
}
}
static void read_intra_frame_mode_info(VP9_COMMON *const cm,
- MACROBLOCKD *const xd,
- int mi_row, int mi_col, vpx_reader *r,
- int x_mis, int y_mis) {
+ MACROBLOCKD *const xd, int mi_row,
+ int mi_col, vpx_reader *r, int x_mis,
+ int y_mis) {
MODE_INFO *const mi = xd->mi[0];
const MODE_INFO *above_mi = xd->above_mi;
- const MODE_INFO *left_mi = xd->left_mi;
+ const MODE_INFO *left_mi = xd->left_mi;
const BLOCK_SIZE bsize = mi->sb_type;
int i;
const int mi_offset = mi_row * cm->mi_cols + mi_col;
@@ -232,15 +226,14 @@ static void read_intra_frame_mode_info(VP9_COMMON *const cm,
read_intra_mode(r, get_y_mode_probs(mi, above_mi, left_mi, 2));
break;
default:
- mi->mode = read_intra_mode(r,
- get_y_mode_probs(mi, above_mi, left_mi, 0));
+ mi->mode = read_intra_mode(r, get_y_mode_probs(mi, above_mi, left_mi, 0));
}
mi->uv_mode = read_intra_mode(r, vp9_kf_uv_mode_prob[mi->mode]);
}
-static int read_mv_component(vpx_reader *r,
- const nmv_component *mvcomp, int usehp) {
+static int read_mv_component(vpx_reader *r, const nmv_component *mvcomp,
+ int usehp) {
int mag, d, fr, hp;
const int sign = vpx_read(r, mvcomp->sign);
const int mv_class = vpx_read_tree(r, vp9_mv_class_tree, mvcomp->classes);
@@ -255,18 +248,16 @@ static int read_mv_component(vpx_reader *r,
const int n = mv_class + CLASS0_BITS - 1; // number of bits
d = 0;
- for (i = 0; i < n; ++i)
- d |= vpx_read(r, mvcomp->bits[i]) << i;
+ for (i = 0; i < n; ++i) d |= vpx_read(r, mvcomp->bits[i]) << i;
mag = CLASS0_SIZE << (mv_class + 2);
}
// Fractional part
- fr = vpx_read_tree(r, vp9_mv_fp_tree, class0 ? mvcomp->class0_fp[d]
- : mvcomp->fp);
+ fr = vpx_read_tree(r, vp9_mv_fp_tree,
+ class0 ? mvcomp->class0_fp[d] : mvcomp->fp);
// High precision part (if hp is not used, the default value of the hp is 1)
- hp = usehp ? vpx_read(r, class0 ? mvcomp->class0_hp : mvcomp->hp)
- : 1;
+ hp = usehp ? vpx_read(r, class0 ? mvcomp->class0_hp : mvcomp->hp) : 1;
// Result
mag += ((d << 3) | (fr << 1) | hp) + 1;
@@ -274,12 +265,12 @@ static int read_mv_component(vpx_reader *r,
}
static INLINE void read_mv(vpx_reader *r, MV *mv, const MV *ref,
- const nmv_context *ctx,
- nmv_context_counts *counts, int allow_hp) {
+ const nmv_context *ctx, nmv_context_counts *counts,
+ int allow_hp) {
const MV_JOINT_TYPE joint_type =
(MV_JOINT_TYPE)vpx_read_tree(r, vp9_mv_joint_tree, ctx->joints);
const int use_hp = allow_hp && use_mv_hp(ref);
- MV diff = {0, 0};
+ MV diff = { 0, 0 };
if (mv_joint_vertical(joint_type))
diff.row = read_mv_component(r, &ctx->comps[0], use_hp);
@@ -301,8 +292,7 @@ static REFERENCE_MODE read_block_reference_mode(VP9_COMMON *cm,
const REFERENCE_MODE mode =
(REFERENCE_MODE)vpx_read(r, cm->fc->comp_inter_prob[ctx]);
FRAME_COUNTS *counts = xd->counts;
- if (counts)
- ++counts->comp_inter[ctx][mode];
+ if (counts) ++counts->comp_inter[ctx][mode];
return mode; // SINGLE_REFERENCE or COMPOUND_REFERENCE
} else {
return cm->reference_mode;
@@ -311,8 +301,8 @@ static REFERENCE_MODE read_block_reference_mode(VP9_COMMON *cm,
// Read the referncence frame
static void read_ref_frames(VP9_COMMON *const cm, MACROBLOCKD *const xd,
- vpx_reader *r,
- int segment_id, MV_REFERENCE_FRAME ref_frame[2]) {
+ vpx_reader *r, int segment_id,
+ MV_REFERENCE_FRAME ref_frame[2]) {
FRAME_CONTEXT *const fc = cm->fc;
FRAME_COUNTS *counts = xd->counts;
@@ -327,20 +317,17 @@ static void read_ref_frames(VP9_COMMON *const cm, MACROBLOCKD *const xd,
const int idx = cm->ref_frame_sign_bias[cm->comp_fixed_ref];
const int ctx = vp9_get_pred_context_comp_ref_p(cm, xd);
const int bit = vpx_read(r, fc->comp_ref_prob[ctx]);
- if (counts)
- ++counts->comp_ref[ctx][bit];
+ if (counts) ++counts->comp_ref[ctx][bit];
ref_frame[idx] = cm->comp_fixed_ref;
ref_frame[!idx] = cm->comp_var_ref[bit];
} else if (mode == SINGLE_REFERENCE) {
const int ctx0 = vp9_get_pred_context_single_ref_p1(xd);
const int bit0 = vpx_read(r, fc->single_ref_prob[ctx0][0]);
- if (counts)
- ++counts->single_ref[ctx0][0][bit0];
+ if (counts) ++counts->single_ref[ctx0][0][bit0];
if (bit0) {
const int ctx1 = vp9_get_pred_context_single_ref_p2(xd);
const int bit1 = vpx_read(r, fc->single_ref_prob[ctx1][1]);
- if (counts)
- ++counts->single_ref[ctx1][1][bit1];
+ if (counts) ++counts->single_ref[ctx1][1][bit1];
ref_frame[0] = bit1 ? ALTREF_FRAME : GOLDEN_FRAME;
} else {
ref_frame[0] = LAST_FRAME;
@@ -353,16 +340,14 @@ static void read_ref_frames(VP9_COMMON *const cm, MACROBLOCKD *const xd,
}
}
-static INLINE INTERP_FILTER read_switchable_interp_filter(
- VP9_COMMON *const cm, MACROBLOCKD *const xd,
- vpx_reader *r) {
+static INLINE INTERP_FILTER read_switchable_interp_filter(VP9_COMMON *const cm,
+ MACROBLOCKD *const xd,
+ vpx_reader *r) {
const int ctx = get_pred_context_switchable_interp(xd);
- const INTERP_FILTER type =
- (INTERP_FILTER)vpx_read_tree(r, vp9_switchable_interp_tree,
- cm->fc->switchable_interp_prob[ctx]);
+ const INTERP_FILTER type = (INTERP_FILTER)vpx_read_tree(
+ r, vp9_switchable_interp_tree, cm->fc->switchable_interp_prob[ctx]);
FRAME_COUNTS *counts = xd->counts;
- if (counts)
- ++counts->switchable_interp[ctx][type];
+ if (counts) ++counts->switchable_interp[ctx][type];
return type;
}
@@ -379,19 +364,16 @@ static void read_intra_block_mode_info(VP9_COMMON *const cm,
mi->mode = mi->bmi[3].as_mode;
break;
case BLOCK_4X8:
- mi->bmi[0].as_mode = mi->bmi[2].as_mode = read_intra_mode_y(cm, xd,
- r, 0);
+ mi->bmi[0].as_mode = mi->bmi[2].as_mode = read_intra_mode_y(cm, xd, r, 0);
mi->bmi[1].as_mode = mi->bmi[3].as_mode = mi->mode =
read_intra_mode_y(cm, xd, r, 0);
break;
case BLOCK_8X4:
- mi->bmi[0].as_mode = mi->bmi[1].as_mode = read_intra_mode_y(cm, xd,
- r, 0);
+ mi->bmi[0].as_mode = mi->bmi[1].as_mode = read_intra_mode_y(cm, xd, r, 0);
mi->bmi[2].as_mode = mi->bmi[3].as_mode = mi->mode =
read_intra_mode_y(cm, xd, r, 0);
break;
- default:
- mi->mode = read_intra_mode_y(cm, xd, r, size_group_lookup[bsize]);
+ default: mi->mode = read_intra_mode_y(cm, xd, r, size_group_lookup[bsize]);
}
mi->uv_mode = read_intra_mode_uv(cm, xd, r, mi->mode);
@@ -405,8 +387,8 @@ static void read_intra_block_mode_info(VP9_COMMON *const cm,
}
static INLINE int is_mv_valid(const MV *mv) {
- return mv->row > MV_LOW && mv->row < MV_UPP &&
- mv->col > MV_LOW && mv->col < MV_UPP;
+ return mv->row > MV_LOW && mv->row < MV_UPP && mv->col > MV_LOW &&
+ mv->col < MV_UPP;
}
static INLINE void copy_mv_pair(int_mv *dst, const int_mv *src) {
@@ -418,9 +400,8 @@ static INLINE void zero_mv_pair(int_mv *dst) {
}
static INLINE int assign_mv(VP9_COMMON *cm, MACROBLOCKD *xd,
- PREDICTION_MODE mode,
- int_mv mv[2], int_mv ref_mv[2],
- int_mv near_nearest_mv[2],
+ PREDICTION_MODE mode, int_mv mv[2],
+ int_mv ref_mv[2], int_mv near_nearest_mv[2],
int is_compound, int allow_hp, vpx_reader *r) {
int i;
int ret = 1;
@@ -445,9 +426,7 @@ static INLINE int assign_mv(VP9_COMMON *cm, MACROBLOCKD *xd,
zero_mv_pair(mv);
break;
}
- default: {
- return 0;
- }
+ default: { return 0; }
}
return ret;
}
@@ -460,8 +439,7 @@ static int read_is_inter_block(VP9_COMMON *const cm, MACROBLOCKD *const xd,
const int ctx = get_intra_inter_context(xd);
const int is_inter = vpx_read(r, cm->fc->intra_inter_prob[ctx]);
FRAME_COUNTS *counts = xd->counts;
- if (counts)
- ++counts->intra_inter[ctx][is_inter];
+ if (counts) ++counts->intra_inter[ctx][is_inter];
return is_inter;
}
}
@@ -487,35 +465,33 @@ static void fpm_sync(void *const data, int mi_row) {
// already in the list. If it's the second motion vector or early_break
// it will also skip all additional processing and jump to Done!
#define ADD_MV_REF_LIST_EB(mv, refmv_count, mv_ref_list, Done) \
- do { \
- if (refmv_count) { \
- if ((mv).as_int != (mv_ref_list)[0].as_int) { \
- (mv_ref_list)[(refmv_count)] = (mv); \
- refmv_count++; \
- goto Done; \
- } \
- } else { \
- (mv_ref_list)[(refmv_count)++] = (mv); \
- if (early_break) \
- goto Done; \
- } \
+ do { \
+ if (refmv_count) { \
+ if ((mv).as_int != (mv_ref_list)[0].as_int) { \
+ (mv_ref_list)[(refmv_count)] = (mv); \
+ refmv_count++; \
+ goto Done; \
+ } \
+ } else { \
+ (mv_ref_list)[(refmv_count)++] = (mv); \
+ if (early_break) goto Done; \
+ } \
} while (0)
// If either reference frame is different, not INTRA, and they
// are different from each other scale and add the mv to our list.
-#define IF_DIFF_REF_FRAME_ADD_MV_EB(mbmi, ref_frame, ref_sign_bias, \
- refmv_count, mv_ref_list, Done) \
- do { \
- if (is_inter_block(mbmi)) { \
- if ((mbmi)->ref_frame[0] != ref_frame) \
+#define IF_DIFF_REF_FRAME_ADD_MV_EB(mbmi, ref_frame, ref_sign_bias, \
+ refmv_count, mv_ref_list, Done) \
+ do { \
+ if (is_inter_block(mbmi)) { \
+ if ((mbmi)->ref_frame[0] != ref_frame) \
ADD_MV_REF_LIST_EB(scale_mv((mbmi), 0, ref_frame, ref_sign_bias), \
- refmv_count, mv_ref_list, Done); \
- if (has_second_ref(mbmi) && \
- (mbmi)->ref_frame[1] != ref_frame && \
- (mbmi)->mv[1].as_int != (mbmi)->mv[0].as_int) \
+ refmv_count, mv_ref_list, Done); \
+ if (has_second_ref(mbmi) && (mbmi)->ref_frame[1] != ref_frame && \
+ (mbmi)->mv[1].as_int != (mbmi)->mv[0].as_int) \
ADD_MV_REF_LIST_EB(scale_mv((mbmi), 1, ref_frame, ref_sign_bias), \
- refmv_count, mv_ref_list, Done); \
- } \
+ refmv_count, mv_ref_list, Done); \
+ } \
} while (0)
// This function searches the neighborhood of a given MB/SB
@@ -523,14 +499,16 @@ static void fpm_sync(void *const data, int mi_row) {
static int dec_find_mv_refs(const VP9_COMMON *cm, const MACROBLOCKD *xd,
PREDICTION_MODE mode, MV_REFERENCE_FRAME ref_frame,
const POSITION *const mv_ref_search,
- int_mv *mv_ref_list,
- int mi_row, int mi_col, int block, int is_sub8x8,
- find_mv_refs_sync sync, void *const data) {
+ int_mv *mv_ref_list, int mi_row, int mi_col,
+ int block, int is_sub8x8, find_mv_refs_sync sync,
+ void *const data) {
const int *ref_sign_bias = cm->ref_frame_sign_bias;
int i, refmv_count = 0;
int different_ref_found = 0;
- const MV_REF *const prev_frame_mvs = cm->use_prev_frame_mvs ?
- cm->prev_frame->mvs + mi_row * cm->mi_cols + mi_col : NULL;
+ const MV_REF *const prev_frame_mvs =
+ cm->use_prev_frame_mvs
+ ? cm->prev_frame->mvs + mi_row * cm->mi_cols + mi_col
+ : NULL;
const TileInfo *const tile = &xd->tile;
// If mode is nearestmv or newmv (uses nearestmv as a reference) then stop
// searching after the first mv is found.
@@ -553,11 +531,11 @@ static int dec_find_mv_refs(const VP9_COMMON *cm, const MACROBLOCKD *xd,
if (candidate_mi->ref_frame[0] == ref_frame)
ADD_MV_REF_LIST_EB(
get_sub_block_mv(candidate_mi, 0, mv_ref->col, block),
- refmv_count, mv_ref_list, Done);
+ refmv_count, mv_ref_list, Done);
else if (candidate_mi->ref_frame[1] == ref_frame)
ADD_MV_REF_LIST_EB(
get_sub_block_mv(candidate_mi, 1, mv_ref->col, block),
- refmv_count, mv_ref_list, Done);
+ refmv_count, mv_ref_list, Done);
}
}
}
@@ -579,14 +557,14 @@ static int dec_find_mv_refs(const VP9_COMMON *cm, const MACROBLOCKD *xd,
}
}
- // TODO(hkuang): Remove this sync after fixing pthread_cond_broadcast
- // on windows platform. The sync here is unnecessary if use_prev_frame_mvs
- // is 0. But after removing it, there will be hang in the unit test on windows
- // due to several threads waiting for a thread's signal.
+// TODO(hkuang): Remove this sync after fixing pthread_cond_broadcast
+// on windows platform. The sync here is unnecessary if use_prev_frame_mvs
+// is 0. But after removing it, there will be hang in the unit test on windows
+// due to several threads waiting for a thread's signal.
#if defined(_WIN32) && !HAVE_PTHREAD_H
- if (cm->frame_parallel_decode && sync != NULL) {
- sync(data, mi_row);
- }
+ if (cm->frame_parallel_decode && sync != NULL) {
+ sync(data, mi_row);
+ }
#endif
// Check the last frame's mode and mv info.
@@ -652,10 +630,9 @@ static int dec_find_mv_refs(const VP9_COMMON *cm, const MACROBLOCKD *xd,
// we only care about the nearestmv for the remaining modes
refmv_count = 1;
- Done:
+Done:
// Clamp vectors
- for (i = 0; i < refmv_count; ++i)
- clamp_mv_ref(&mv_ref_list[i].as_mv, xd);
+ for (i = 0; i < refmv_count; ++i) clamp_mv_ref(&mv_ref_list[i].as_mv, xd);
return refmv_count;
}
@@ -673,14 +650,12 @@ static void append_sub8x8_mvs_for_idx(VP9_COMMON *cm, MACROBLOCKD *xd,
assert(MAX_MV_REF_CANDIDATES == 2);
- refmv_count = dec_find_mv_refs(cm, xd, b_mode, mi->ref_frame[ref],
- mv_ref_search, mv_list, mi_row, mi_col, block,
- 1, NULL, NULL);
+ refmv_count =
+ dec_find_mv_refs(cm, xd, b_mode, mi->ref_frame[ref], mv_ref_search,
+ mv_list, mi_row, mi_col, block, 1, NULL, NULL);
switch (block) {
- case 0:
- best_sub8x8->as_int = mv_list[refmv_count - 1].as_int;
- break;
+ case 0: best_sub8x8->as_int = mv_list[refmv_count - 1].as_int; break;
case 1:
case 2:
if (b_mode == NEARESTMV) {
@@ -711,14 +686,13 @@ static void append_sub8x8_mvs_for_idx(VP9_COMMON *cm, MACROBLOCKD *xd,
}
}
break;
- default:
- assert(0 && "Invalid block index.");
+ default: assert(0 && "Invalid block index.");
}
}
static uint8_t get_mode_context(const VP9_COMMON *cm, const MACROBLOCKD *xd,
- const POSITION *const mv_ref_search,
- int mi_row, int mi_col) {
+ const POSITION *const mv_ref_search, int mi_row,
+ int mi_col) {
int i;
int context_counter = 0;
const TileInfo *const tile = &xd->tile;
@@ -739,8 +713,8 @@ static uint8_t get_mode_context(const VP9_COMMON *cm, const MACROBLOCKD *xd,
static void read_inter_block_mode_info(VP9Decoder *const pbi,
MACROBLOCKD *const xd,
- MODE_INFO *const mi,
- int mi_row, int mi_col, vpx_reader *r) {
+ MODE_INFO *const mi, int mi_row,
+ int mi_col, vpx_reader *r) {
VP9_COMMON *const cm = &pbi->common;
const BLOCK_SIZE bsize = mi->sb_type;
const int allow_hp = cm->allow_high_precision_mv;
@@ -756,9 +730,9 @@ static void read_inter_block_mode_info(VP9Decoder *const pbi,
if (segfeature_active(&cm->seg, mi->segment_id, SEG_LVL_SKIP)) {
mi->mode = ZEROMV;
if (bsize < BLOCK_8X8) {
- vpx_internal_error(xd->error_info, VPX_CODEC_UNSUP_BITSTREAM,
- "Invalid usage of segement feature on small blocks");
- return;
+ vpx_internal_error(xd->error_info, VPX_CODEC_UNSUP_BITSTREAM,
+ "Invalid usage of segement feature on small blocks");
+ return;
}
} else {
if (bsize >= BLOCK_8X8)
@@ -776,9 +750,9 @@ static void read_inter_block_mode_info(VP9Decoder *const pbi,
const MV_REFERENCE_FRAME frame = mi->ref_frame[ref];
int refmv_count;
- refmv_count = dec_find_mv_refs(cm, xd, mi->mode, frame, mv_ref_search,
- tmp_mvs, mi_row, mi_col, -1, 0,
- fpm_sync, (void *)pbi);
+ refmv_count =
+ dec_find_mv_refs(cm, xd, mi->mode, frame, mv_ref_search, tmp_mvs,
+ mi_row, mi_col, -1, 0, fpm_sync, (void *)pbi);
dec_find_best_ref_mvs(allow_hp, tmp_mvs, &best_ref_mvs[ref],
refmv_count);
@@ -787,8 +761,8 @@ static void read_inter_block_mode_info(VP9Decoder *const pbi,
}
mi->interp_filter = (cm->interp_filter == SWITCHABLE)
- ? read_switchable_interp_filter(cm, xd, r)
- : cm->interp_filter;
+ ? read_switchable_interp_filter(cm, xd, r)
+ : cm->interp_filter;
if (bsize < BLOCK_8X8) {
const int num_4x4_w = 1 << xd->bmode_blocks_wl;
@@ -813,10 +787,8 @@ static void read_inter_block_mode_info(VP9Decoder *const pbi,
break;
}
- if (num_4x4_h == 2)
- mi->bmi[j + 2] = mi->bmi[j];
- if (num_4x4_w == 2)
- mi->bmi[j + 1] = mi->bmi[j];
+ if (num_4x4_h == 2) mi->bmi[j + 2] = mi->bmi[j];
+ if (num_4x4_w == 2) mi->bmi[j + 1] = mi->bmi[j];
}
}
@@ -830,15 +802,15 @@ static void read_inter_block_mode_info(VP9Decoder *const pbi,
}
static void read_inter_frame_mode_info(VP9Decoder *const pbi,
- MACROBLOCKD *const xd,
- int mi_row, int mi_col, vpx_reader *r,
- int x_mis, int y_mis) {
+ MACROBLOCKD *const xd, int mi_row,
+ int mi_col, vpx_reader *r, int x_mis,
+ int y_mis) {
VP9_COMMON *const cm = &pbi->common;
MODE_INFO *const mi = xd->mi[0];
int inter_block;
- mi->segment_id = read_inter_segment_id(cm, xd, mi_row, mi_col, r, x_mis,
- y_mis);
+ mi->segment_id =
+ read_inter_segment_id(cm, xd, mi_row, mi_col, r, x_mis, y_mis);
mi->skip = read_skip(cm, xd, mi->segment_id, r);
inter_block = read_is_inter_block(cm, xd, mi->segment_id, r);
mi->tx_size = read_tx_size(cm, xd, !mi->skip || !inter_block, r);
@@ -854,12 +826,11 @@ static INLINE void copy_ref_frame_pair(MV_REFERENCE_FRAME *dst,
memcpy(dst, src, sizeof(*dst) * 2);
}
-void vp9_read_mode_info(VP9Decoder *const pbi, MACROBLOCKD *xd,
- int mi_row, int mi_col, vpx_reader *r,
- int x_mis, int y_mis) {
+void vp9_read_mode_info(VP9Decoder *const pbi, MACROBLOCKD *xd, int mi_row,
+ int mi_col, vpx_reader *r, int x_mis, int y_mis) {
VP9_COMMON *const cm = &pbi->common;
MODE_INFO *const mi = xd->mi[0];
- MV_REF* frame_mvs = cm->cur_frame->mvs + mi_row * cm->mi_cols + mi_col;
+ MV_REF *frame_mvs = cm->cur_frame->mvs + mi_row * cm->mi_cols + mi_col;
int w, h;
if (frame_is_intra_only(cm)) {
@@ -876,7 +847,7 @@ void vp9_read_mode_info(VP9Decoder *const pbi, MACROBLOCKD *xd,
frame_mvs += cm->mi_cols;
}
}
-#if 0 // CONFIG_BETTER_HW_COMPATIBILITY && CONFIG_VP9_HIGHBITDEPTH
+#if 0 // CONFIG_BETTER_HW_COMPATIBILITY && CONFIG_VP9_HIGHBITDEPTH
if ((xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) &&
(xd->above_mi == NULL || xd->left_mi == NULL) &&
!is_inter_block(mi) && need_top_left[mi->uv_mode])
diff --git a/vp9/decoder/vp9_decodemv.h b/vp9/decoder/vp9_decodemv.h
index 45569ec81..4e11c2fc0 100644
--- a/vp9/decoder/vp9_decodemv.h
+++ b/vp9/decoder/vp9_decodemv.h
@@ -19,9 +19,8 @@
extern "C" {
#endif
-void vp9_read_mode_info(VP9Decoder *const pbi, MACROBLOCKD *xd,
- int mi_row, int mi_col, vpx_reader *r,
- int x_mis, int y_mis);
+void vp9_read_mode_info(VP9Decoder *const pbi, MACROBLOCKD *xd, int mi_row,
+ int mi_col, vpx_reader *r, int x_mis, int y_mis);
#ifdef __cplusplus
} // extern "C"
diff --git a/vp9/decoder/vp9_decoder.c b/vp9/decoder/vp9_decoder.c
index 9ed980081..37693f094 100644
--- a/vp9/decoder/vp9_decoder.c
+++ b/vp9/decoder/vp9_decoder.c
@@ -57,12 +57,10 @@ static void vp9_dec_setup_mi(VP9_COMMON *cm) {
static int vp9_dec_alloc_mi(VP9_COMMON *cm, int mi_size) {
cm->mip = vpx_calloc(mi_size, sizeof(*cm->mip));
- if (!cm->mip)
- return 1;
+ if (!cm->mip) return 1;
cm->mi_alloc_size = mi_size;
- cm->mi_grid_base = (MODE_INFO **)vpx_calloc(mi_size, sizeof(MODE_INFO*));
- if (!cm->mi_grid_base)
- return 1;
+ cm->mi_grid_base = (MODE_INFO **)vpx_calloc(mi_size, sizeof(MODE_INFO *));
+ if (!cm->mi_grid_base) return 1;
return 0;
}
@@ -77,8 +75,7 @@ VP9Decoder *vp9_decoder_create(BufferPool *const pool) {
VP9Decoder *volatile const pbi = vpx_memalign(32, sizeof(*pbi));
VP9_COMMON *volatile const cm = pbi ? &pbi->common : NULL;
- if (!cm)
- return NULL;
+ if (!cm) return NULL;
vp9_zero(*pbi);
@@ -90,11 +87,10 @@ VP9Decoder *vp9_decoder_create(BufferPool *const pool) {
cm->error.setjmp = 1;
- CHECK_MEM_ERROR(cm, cm->fc,
- (FRAME_CONTEXT *)vpx_calloc(1, sizeof(*cm->fc)));
- CHECK_MEM_ERROR(cm, cm->frame_contexts,
- (FRAME_CONTEXT *)vpx_calloc(FRAME_CONTEXTS,
- sizeof(*cm->frame_contexts)));
+ CHECK_MEM_ERROR(cm, cm->fc, (FRAME_CONTEXT *)vpx_calloc(1, sizeof(*cm->fc)));
+ CHECK_MEM_ERROR(
+ cm, cm->frame_contexts,
+ (FRAME_CONTEXT *)vpx_calloc(FRAME_CONTEXTS, sizeof(*cm->frame_contexts)));
pbi->need_resync = 1;
once(initialize_dec);
@@ -126,8 +122,7 @@ VP9Decoder *vp9_decoder_create(BufferPool *const pool) {
void vp9_decoder_remove(VP9Decoder *pbi) {
int i;
- if (!pbi)
- return;
+ if (!pbi) return;
vpx_get_worker_interface()->end(&pbi->lf_worker);
vpx_free(pbi->lf_worker.data1);
@@ -149,8 +144,8 @@ void vp9_decoder_remove(VP9Decoder *pbi) {
static int equal_dimensions(const YV12_BUFFER_CONFIG *a,
const YV12_BUFFER_CONFIG *b) {
- return a->y_height == b->y_height && a->y_width == b->y_width &&
- a->uv_height == b->uv_height && a->uv_width == b->uv_width;
+ return a->y_height == b->y_height && a->y_width == b->y_width &&
+ a->uv_height == b->uv_height && a->uv_width == b->uv_width;
}
vpx_codec_err_t vp9_copy_reference_dec(VP9Decoder *pbi,
@@ -176,14 +171,12 @@ vpx_codec_err_t vp9_copy_reference_dec(VP9Decoder *pbi,
else
vp8_yv12_copy_frame(cfg, sd);
} else {
- vpx_internal_error(&cm->error, VPX_CODEC_ERROR,
- "Invalid reference frame");
+ vpx_internal_error(&cm->error, VPX_CODEC_ERROR, "Invalid reference frame");
}
return cm->error.error_code;
}
-
vpx_codec_err_t vp9_set_reference_dec(VP9_COMMON *cm,
VP9_REFFRAME ref_frame_flag,
YV12_BUFFER_CONFIG *sd) {
@@ -206,8 +199,7 @@ vpx_codec_err_t vp9_set_reference_dec(VP9_COMMON *cm,
} else if (ref_frame_flag == VP9_ALT_FLAG) {
idx = cm->ref_frame_map[2];
} else {
- vpx_internal_error(&cm->error, VPX_CODEC_ERROR,
- "Invalid reference frame");
+ vpx_internal_error(&cm->error, VPX_CODEC_ERROR, "Invalid reference frame");
return cm->error.error_code;
}
@@ -273,8 +265,8 @@ static void swap_frame_buffers(VP9Decoder *pbi) {
cm->frame_refs[ref_index].idx = -1;
}
-int vp9_receive_compressed_data(VP9Decoder *pbi,
- size_t size, const uint8_t **psource) {
+int vp9_receive_compressed_data(VP9Decoder *pbi, size_t size,
+ const uint8_t **psource) {
VP9_COMMON *volatile const cm = &pbi->common;
BufferPool *volatile const pool = cm->buffer_pool;
RefCntBuffer *volatile const frame_bufs = cm->buffer_pool->frame_bufs;
@@ -301,8 +293,8 @@ int vp9_receive_compressed_data(VP9Decoder *pbi,
// Check if the previous frame was a frame without any references to it.
// Release frame buffer if not decoding in frame parallel mode.
- if (!pbi->frame_parallel_decode && cm->new_fb_idx >= 0
- && frame_bufs[cm->new_fb_idx].ref_count == 0)
+ if (!pbi->frame_parallel_decode && cm->new_fb_idx >= 0 &&
+ frame_bufs[cm->new_fb_idx].ref_count == 0)
pool->release_fb_cb(pool->cb_priv,
&frame_bufs[cm->new_fb_idx].raw_frame_buffer);
// Find a free frame buffer. Return error if can not find any.
@@ -330,7 +322,6 @@ int vp9_receive_compressed_data(VP9Decoder *pbi,
pbi->cur_buf = &frame_bufs[cm->new_fb_idx];
}
-
if (setjmp(cm->error.jmp)) {
const VPxWorkerInterface *const winterface = vpx_get_worker_interface();
int i;
@@ -425,14 +416,12 @@ int vp9_get_raw_frame(VP9Decoder *pbi, YV12_BUFFER_CONFIG *sd,
(void)*flags;
#endif
- if (pbi->ready_for_new_data == 1)
- return ret;
+ if (pbi->ready_for_new_data == 1) return ret;
pbi->ready_for_new_data = 1;
/* no raw frame to show!!! */
- if (!cm->show_frame)
- return ret;
+ if (!cm->show_frame) return ret;
pbi->ready_for_new_data = 1;
@@ -451,8 +440,7 @@ int vp9_get_raw_frame(VP9Decoder *pbi, YV12_BUFFER_CONFIG *sd,
return ret;
}
-vpx_codec_err_t vp9_parse_superframe_index(const uint8_t *data,
- size_t data_sz,
+vpx_codec_err_t vp9_parse_superframe_index(const uint8_t *data, size_t data_sz,
uint32_t sizes[8], int *count,
vpx_decrypt_cb decrypt_cb,
void *decrypt_state) {
@@ -475,18 +463,16 @@ vpx_codec_err_t vp9_parse_superframe_index(const uint8_t *data,
// This chunk is marked as having a superframe index but doesn't have
// enough data for it, thus it's an invalid superframe index.
- if (data_sz < index_sz)
- return VPX_CODEC_CORRUPT_FRAME;
+ if (data_sz < index_sz) return VPX_CODEC_CORRUPT_FRAME;
{
- const uint8_t marker2 = read_marker(decrypt_cb, decrypt_state,
- data + data_sz - index_sz);
+ const uint8_t marker2 =
+ read_marker(decrypt_cb, decrypt_state, data + data_sz - index_sz);
// This chunk is marked as having a superframe index but doesn't have
// the matching marker byte at the front of the index therefore it's an
// invalid chunk.
- if (marker != marker2)
- return VPX_CODEC_CORRUPT_FRAME;
+ if (marker != marker2) return VPX_CODEC_CORRUPT_FRAME;
}
{
@@ -505,8 +491,7 @@ vpx_codec_err_t vp9_parse_superframe_index(const uint8_t *data,
for (i = 0; i < frames; ++i) {
uint32_t this_sz = 0;
- for (j = 0; j < mag; ++j)
- this_sz |= ((uint32_t)(*x++)) << (j * 8);
+ for (j = 0; j < mag; ++j) this_sz |= ((uint32_t)(*x++)) << (j * 8);
sizes[i] = this_sz;
}
*count = frames;
diff --git a/vp9/decoder/vp9_decoder.h b/vp9/decoder/vp9_decoder.h
index 7111a36d3..427baf1e0 100644
--- a/vp9/decoder/vp9_decoder.h
+++ b/vp9/decoder/vp9_decoder.h
@@ -57,9 +57,9 @@ typedef struct VP9Decoder {
// TODO(hkuang): Combine this with cur_buf in macroblockd as they are
// the same.
- RefCntBuffer *cur_buf; // Current decoding frame buffer.
+ RefCntBuffer *cur_buf; // Current decoding frame buffer.
- VPxWorker *frame_worker_owner; // frame_worker that owns this pbi.
+ VPxWorker *frame_worker_owner; // frame_worker that owns this pbi.
VPxWorker lf_worker;
VPxWorker *tile_workers;
TileWorkerData *tile_worker_data;
@@ -74,12 +74,12 @@ typedef struct VP9Decoder {
int max_threads;
int inv_tile_order;
- int need_resync; // wait for key/intra-only frame.
+ int need_resync; // wait for key/intra-only frame.
int hold_ref_buf; // hold the reference buffer.
} VP9Decoder;
-int vp9_receive_compressed_data(struct VP9Decoder *pbi,
- size_t size, const uint8_t **dest);
+int vp9_receive_compressed_data(struct VP9Decoder *pbi, size_t size,
+ const uint8_t **dest);
int vp9_get_raw_frame(struct VP9Decoder *pbi, YV12_BUFFER_CONFIG *sd,
vp9_ppflags_t *flags);
@@ -93,8 +93,7 @@ vpx_codec_err_t vp9_set_reference_dec(VP9_COMMON *cm,
YV12_BUFFER_CONFIG *sd);
static INLINE uint8_t read_marker(vpx_decrypt_cb decrypt_cb,
- void *decrypt_state,
- const uint8_t *data) {
+ void *decrypt_state, const uint8_t *data) {
if (decrypt_cb) {
uint8_t marker;
decrypt_cb(decrypt_state, data, &marker, 1);
@@ -105,8 +104,7 @@ static INLINE uint8_t read_marker(vpx_decrypt_cb decrypt_cb,
// This function is exposed for use in tests, as well as the inlined function
// "read_marker".
-vpx_codec_err_t vp9_parse_superframe_index(const uint8_t *data,
- size_t data_sz,
+vpx_codec_err_t vp9_parse_superframe_index(const uint8_t *data, size_t data_sz,
uint32_t sizes[8], int *count,
vpx_decrypt_cb decrypt_cb,
void *decrypt_state);
diff --git a/vp9/decoder/vp9_detokenize.c b/vp9/decoder/vp9_detokenize.c
index 47dc107fe..cc01909ff 100644
--- a/vp9/decoder/vp9_detokenize.c
+++ b/vp9/decoder/vp9_detokenize.c
@@ -20,25 +20,22 @@
#include "vp9/decoder/vp9_detokenize.h"
-#define EOB_CONTEXT_NODE 0
-#define ZERO_CONTEXT_NODE 1
-#define ONE_CONTEXT_NODE 2
-
-#define INCREMENT_COUNT(token) \
- do { \
- if (counts) \
- ++coef_counts[band][ctx][token]; \
+#define EOB_CONTEXT_NODE 0
+#define ZERO_CONTEXT_NODE 1
+#define ONE_CONTEXT_NODE 2
+
+#define INCREMENT_COUNT(token) \
+ do { \
+ if (counts) ++coef_counts[band][ctx][token]; \
} while (0)
static INLINE int read_coeff(const vpx_prob *probs, int n, vpx_reader *r) {
int i, val = 0;
- for (i = 0; i < n; ++i)
- val = (val << 1) | vpx_read(r, probs[i]);
+ for (i = 0; i < n; ++i) val = (val << 1) | vpx_read(r, probs[i]);
return val;
}
-static int decode_coefs(const MACROBLOCKD *xd,
- PLANE_TYPE type,
+static int decode_coefs(const MACROBLOCKD *xd, PLANE_TYPE type,
tran_low_t *dqcoeff, TX_SIZE tx_size, const int16_t *dq,
int ctx, const int16_t *scan, const int16_t *nb,
vpx_reader *r) {
@@ -47,11 +44,11 @@ static int decode_coefs(const MACROBLOCKD *xd,
const FRAME_CONTEXT *const fc = xd->fc;
const int ref = is_inter_block(xd->mi[0]);
int band, c = 0;
- const vpx_prob (*coef_probs)[COEFF_CONTEXTS][UNCONSTRAINED_NODES] =
+ const vpx_prob(*coef_probs)[COEFF_CONTEXTS][UNCONSTRAINED_NODES] =
fc->coef_probs[tx_size][type][ref];
const vpx_prob *prob;
- unsigned int (*coef_counts)[COEFF_CONTEXTS][UNCONSTRAINED_NODES + 1];
- unsigned int (*eob_branch_count)[COEFF_CONTEXTS];
+ unsigned int(*coef_counts)[COEFF_CONTEXTS][UNCONSTRAINED_NODES + 1];
+ unsigned int(*eob_branch_count)[COEFF_CONTEXTS];
uint8_t token_cache[32 * 32];
const uint8_t *band_translate = get_band_translate(tx_size);
const int dq_shift = (tx_size == TX_32X32);
@@ -59,16 +56,16 @@ static int decode_coefs(const MACROBLOCKD *xd,
int16_t dqv = dq[0];
const uint8_t *const cat6_prob =
#if CONFIG_VP9_HIGHBITDEPTH
- (xd->bd == VPX_BITS_12) ? vp9_cat6_prob_high12 :
- (xd->bd == VPX_BITS_10) ? vp9_cat6_prob_high12 + 2 :
+ (xd->bd == VPX_BITS_12)
+ ? vp9_cat6_prob_high12
+ : (xd->bd == VPX_BITS_10) ? vp9_cat6_prob_high12 + 2 :
#endif // CONFIG_VP9_HIGHBITDEPTH
- vp9_cat6_prob;
+ vp9_cat6_prob;
const int cat6_bits =
#if CONFIG_VP9_HIGHBITDEPTH
- (xd->bd == VPX_BITS_12) ? 18 :
- (xd->bd == VPX_BITS_10) ? 16 :
+ (xd->bd == VPX_BITS_12) ? 18 : (xd->bd == VPX_BITS_10) ? 16 :
#endif // CONFIG_VP9_HIGHBITDEPTH
- 14;
+ 14;
if (counts) {
coef_counts = counts->coef[tx_size][type][ref];
@@ -79,8 +76,7 @@ static int decode_coefs(const MACROBLOCKD *xd,
int val = -1;
band = *band_translate++;
prob = coef_probs[band][ctx];
- if (counts)
- ++eob_branch_count[band][ctx];
+ if (counts) ++eob_branch_count[band][ctx];
if (!vpx_read(r, prob[EOB_CONTEXT_NODE])) {
INCREMENT_COUNT(EOB_MODEL_TOKEN);
break;
@@ -91,8 +87,7 @@ static int decode_coefs(const MACROBLOCKD *xd,
dqv = dq[1];
token_cache[scan[c]] = 0;
++c;
- if (c >= max_eob)
- return c; // zero tokens at the end (no eob token)
+ if (c >= max_eob) return c; // zero tokens at the end (no eob token)
ctx = get_coef_context(nb, token_cache, c);
band = *band_translate++;
prob = coef_probs[band][ctx];
@@ -109,9 +104,7 @@ static int decode_coefs(const MACROBLOCKD *xd,
switch (token) {
case TWO_TOKEN:
case THREE_TOKEN:
- case FOUR_TOKEN:
- val = token;
- break;
+ case FOUR_TOKEN: val = token; break;
case CATEGORY1_TOKEN:
val = CAT1_MIN_VAL + read_coeff(vp9_cat1_prob, 1, r);
break;
@@ -135,8 +128,7 @@ static int decode_coefs(const MACROBLOCKD *xd,
v = (val * dqv) >> dq_shift;
#if CONFIG_COEFFICIENT_RANGE_CHECKING
#if CONFIG_VP9_HIGHBITDEPTH
- dqcoeff[scan[c]] = highbd_check_range((vpx_read_bit(r) ? -v : v),
- xd->bd);
+ dqcoeff[scan[c]] = highbd_check_range((vpx_read_bit(r) ? -v : v), xd->bd);
#else
dqcoeff[scan[c]] = check_range(vpx_read_bit(r) ? -v : v);
#endif // CONFIG_VP9_HIGHBITDEPTH
@@ -178,7 +170,7 @@ int vp9_decode_block_tokens(MACROBLOCKD *xd, int plane, const scan_order *sc,
switch (tx_size) {
case TX_4X4:
- ctx = a[0] != 0;
+ ctx = a[0] != 0;
ctx += l[0] != 0;
eob = decode_coefs(xd, get_plane_type(plane), pd->dqcoeff, tx_size,
dequant, ctx, sc->scan, sc->neighbors, r);
@@ -186,7 +178,7 @@ int vp9_decode_block_tokens(MACROBLOCKD *xd, int plane, const scan_order *sc,
break;
case TX_8X8:
get_ctx_shift(xd, &ctx_shift_a, &ctx_shift_l, x, y, 1 << TX_8X8);
- ctx = !!*(const uint16_t *)a;
+ ctx = !!*(const uint16_t *)a;
ctx += !!*(const uint16_t *)l;
eob = decode_coefs(xd, get_plane_type(plane), pd->dqcoeff, tx_size,
dequant, ctx, sc->scan, sc->neighbors, r);
@@ -195,7 +187,7 @@ int vp9_decode_block_tokens(MACROBLOCKD *xd, int plane, const scan_order *sc,
break;
case TX_16X16:
get_ctx_shift(xd, &ctx_shift_a, &ctx_shift_l, x, y, 1 << TX_16X16);
- ctx = !!*(const uint32_t *)a;
+ ctx = !!*(const uint32_t *)a;
ctx += !!*(const uint32_t *)l;
eob = decode_coefs(xd, get_plane_type(plane), pd->dqcoeff, tx_size,
dequant, ctx, sc->scan, sc->neighbors, r);
@@ -207,7 +199,7 @@ int vp9_decode_block_tokens(MACROBLOCKD *xd, int plane, const scan_order *sc,
// NOTE: casting to uint64_t here is safe because the default memory
// alignment is at least 8 bytes and the TX_32X32 is aligned on 8 byte
// boundaries.
- ctx = !!*(const uint64_t *)a;
+ ctx = !!*(const uint64_t *)a;
ctx += !!*(const uint64_t *)l;
eob = decode_coefs(xd, get_plane_type(plane), pd->dqcoeff, tx_size,
dequant, ctx, sc->scan, sc->neighbors, r);
diff --git a/vp9/decoder/vp9_detokenize.h b/vp9/decoder/vp9_detokenize.h
index d242d4466..aa2afb16a 100644
--- a/vp9/decoder/vp9_detokenize.h
+++ b/vp9/decoder/vp9_detokenize.h
@@ -8,7 +8,6 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-
#ifndef VP9_DECODER_VP9_DETOKENIZE_H_
#define VP9_DECODER_VP9_DETOKENIZE_H_
@@ -20,10 +19,8 @@
extern "C" {
#endif
-int vp9_decode_block_tokens(MACROBLOCKD *xd,
- int plane, const scan_order *sc,
- int x, int y,
- TX_SIZE tx_size, vpx_reader *r,
+int vp9_decode_block_tokens(MACROBLOCKD *xd, int plane, const scan_order *sc,
+ int x, int y, TX_SIZE tx_size, vpx_reader *r,
int seg_id);
#ifdef __cplusplus
diff --git a/vp9/decoder/vp9_dsubexp.c b/vp9/decoder/vp9_dsubexp.c
index 05b38538a..126ba0b96 100644
--- a/vp9/decoder/vp9_dsubexp.c
+++ b/vp9/decoder/vp9_dsubexp.c
@@ -15,8 +15,7 @@
#include "vp9/decoder/vp9_dsubexp.h"
static int inv_recenter_nonneg(int v, int m) {
- if (v > 2 * m)
- return v;
+ if (v > 2 * m) return v;
return (v & 1) ? m - ((v + 1) >> 1) : m + (v >> 1);
}
@@ -25,19 +24,19 @@ static int decode_uniform(vpx_reader *r) {
const int l = 8;
const int m = (1 << l) - 191;
const int v = vpx_read_literal(r, l - 1);
- return v < m ? v : (v << 1) - m + vpx_read_bit(r);
+ return v < m ? v : (v << 1) - m + vpx_read_bit(r);
}
static int inv_remap_prob(int v, int m) {
static uint8_t inv_map_table[MAX_PROB] = {
- 7, 20, 33, 46, 59, 72, 85, 98, 111, 124, 137, 150, 163, 176, 189,
- 202, 215, 228, 241, 254, 1, 2, 3, 4, 5, 6, 8, 9, 10, 11,
- 12, 13, 14, 15, 16, 17, 18, 19, 21, 22, 23, 24, 25, 26, 27,
- 28, 29, 30, 31, 32, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43,
- 44, 45, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 60,
- 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 73, 74, 75, 76,
- 77, 78, 79, 80, 81, 82, 83, 84, 86, 87, 88, 89, 90, 91, 92,
- 93, 94, 95, 96, 97, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108,
+ 7, 20, 33, 46, 59, 72, 85, 98, 111, 124, 137, 150, 163, 176, 189,
+ 202, 215, 228, 241, 254, 1, 2, 3, 4, 5, 6, 8, 9, 10, 11,
+ 12, 13, 14, 15, 16, 17, 18, 19, 21, 22, 23, 24, 25, 26, 27,
+ 28, 29, 30, 31, 32, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43,
+ 44, 45, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 60,
+ 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 73, 74, 75, 76,
+ 77, 78, 79, 80, 81, 82, 83, 84, 86, 87, 88, 89, 90, 91, 92,
+ 93, 94, 95, 96, 97, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108,
109, 110, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 125,
126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 138, 139, 140, 141,
142, 143, 144, 145, 146, 147, 148, 149, 151, 152, 153, 154, 155, 156, 157,
@@ -59,16 +58,13 @@ static int inv_remap_prob(int v, int m) {
}
static int decode_term_subexp(vpx_reader *r) {
- if (!vpx_read_bit(r))
- return vpx_read_literal(r, 4);
- if (!vpx_read_bit(r))
- return vpx_read_literal(r, 4) + 16;
- if (!vpx_read_bit(r))
- return vpx_read_literal(r, 5) + 32;
+ if (!vpx_read_bit(r)) return vpx_read_literal(r, 4);
+ if (!vpx_read_bit(r)) return vpx_read_literal(r, 4) + 16;
+ if (!vpx_read_bit(r)) return vpx_read_literal(r, 5) + 32;
return decode_uniform(r) + 64;
}
-void vp9_diff_update_prob(vpx_reader *r, vpx_prob* p) {
+void vp9_diff_update_prob(vpx_reader *r, vpx_prob *p) {
if (vpx_read(r, DIFF_UPDATE_PROB)) {
const int delp = decode_term_subexp(r);
*p = (vpx_prob)inv_remap_prob(delp, *p);
diff --git a/vp9/decoder/vp9_dsubexp.h b/vp9/decoder/vp9_dsubexp.h
index a8bcc70be..5a8ec8300 100644
--- a/vp9/decoder/vp9_dsubexp.h
+++ b/vp9/decoder/vp9_dsubexp.h
@@ -8,7 +8,6 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-
#ifndef VP9_DECODER_VP9_DSUBEXP_H_
#define VP9_DECODER_VP9_DSUBEXP_H_
@@ -18,7 +17,7 @@
extern "C" {
#endif
-void vp9_diff_update_prob(vpx_reader *r, vpx_prob* p);
+void vp9_diff_update_prob(vpx_reader *r, vpx_prob *p);
#ifdef __cplusplus
} // extern "C"
diff --git a/vp9/decoder/vp9_dthread.c b/vp9/decoder/vp9_dthread.c
index 14a71448f..52bc2a0f6 100644
--- a/vp9/decoder/vp9_dthread.c
+++ b/vp9/decoder/vp9_dthread.c
@@ -62,8 +62,7 @@ void vp9_frameworker_signal_stats(VPxWorker *const worker) {
void vp9_frameworker_wait(VPxWorker *const worker, RefCntBuffer *const ref_buf,
int row) {
#if CONFIG_MULTITHREAD
- if (!ref_buf)
- return;
+ if (!ref_buf) return;
#ifndef BUILDING_WITH_TSAN
// The following line of code will get harmless tsan error but it is the key
@@ -147,11 +146,12 @@ void vp9_frameworker_copy_context(VPxWorker *const dst_worker,
vp9_frameworker_lock_stats(src_worker);
while (!src_worker_data->frame_context_ready) {
pthread_cond_wait(&src_worker_data->stats_cond,
- &src_worker_data->stats_mutex);
+ &src_worker_data->stats_mutex);
}
- dst_cm->last_frame_seg_map = src_cm->seg.enabled ?
- src_cm->current_frame_seg_map : src_cm->last_frame_seg_map;
+ dst_cm->last_frame_seg_map = src_cm->seg.enabled
+ ? src_cm->current_frame_seg_map
+ : src_cm->last_frame_seg_map;
dst_worker_data->pbi->need_resync = src_worker_data->pbi->need_resync;
vp9_frameworker_unlock_stats(src_worker);
@@ -159,17 +159,18 @@ void vp9_frameworker_copy_context(VPxWorker *const dst_worker,
#if CONFIG_VP9_HIGHBITDEPTH
dst_cm->use_highbitdepth = src_cm->use_highbitdepth;
#endif
- dst_cm->prev_frame = src_cm->show_existing_frame ?
- src_cm->prev_frame : src_cm->cur_frame;
- dst_cm->last_width = !src_cm->show_existing_frame ?
- src_cm->width : src_cm->last_width;
- dst_cm->last_height = !src_cm->show_existing_frame ?
- src_cm->height : src_cm->last_height;
+ dst_cm->prev_frame =
+ src_cm->show_existing_frame ? src_cm->prev_frame : src_cm->cur_frame;
+ dst_cm->last_width =
+ !src_cm->show_existing_frame ? src_cm->width : src_cm->last_width;
+ dst_cm->last_height =
+ !src_cm->show_existing_frame ? src_cm->height : src_cm->last_height;
dst_cm->subsampling_x = src_cm->subsampling_x;
dst_cm->subsampling_y = src_cm->subsampling_y;
dst_cm->frame_type = src_cm->frame_type;
- dst_cm->last_show_frame = !src_cm->show_existing_frame ?
- src_cm->show_frame : src_cm->last_show_frame;
+ dst_cm->last_show_frame = !src_cm->show_existing_frame
+ ? src_cm->show_frame
+ : src_cm->last_show_frame;
for (i = 0; i < REF_FRAMES; ++i)
dst_cm->ref_frame_map[i] = src_cm->next_ref_frame_map[i];
@@ -183,7 +184,7 @@ void vp9_frameworker_copy_context(VPxWorker *const dst_worker,
memcpy(dst_cm->frame_contexts, src_cm->frame_contexts,
FRAME_CONTEXTS * sizeof(dst_cm->frame_contexts[0]));
#else
- (void) dst_worker;
- (void) src_worker;
+ (void)dst_worker;
+ (void)src_worker;
#endif // CONFIG_MULTITHREAD
}
diff --git a/vp9/decoder/vp9_dthread.h b/vp9/decoder/vp9_dthread.h
index ba7c38a51..fce0fe7fe 100644
--- a/vp9/decoder/vp9_dthread.h
+++ b/vp9/decoder/vp9_dthread.h
@@ -68,7 +68,7 @@ void vp9_frameworker_copy_context(VPxWorker *const dst_worker,
VPxWorker *const src_worker);
#ifdef __cplusplus
-} // extern "C"
+} // extern "C"
#endif
#endif // VP9_DECODER_VP9_DTHREAD_H_
diff --git a/vp9/vp9_cx_iface.c b/vp9/vp9_cx_iface.c
index 8b7a9b9f4..46be1d28c 100644
--- a/vp9/vp9_cx_iface.c
+++ b/vp9/vp9_cx_iface.c
@@ -23,90 +23,90 @@
#include "vp9/vp9_iface_common.h"
struct vp9_extracfg {
- int cpu_used; // available cpu percentage in 1/16
- unsigned int enable_auto_alt_ref;
- unsigned int noise_sensitivity;
- unsigned int sharpness;
- unsigned int static_thresh;
- unsigned int tile_columns;
- unsigned int tile_rows;
- unsigned int arnr_max_frames;
- unsigned int arnr_strength;
- unsigned int min_gf_interval;
- unsigned int max_gf_interval;
- vp8e_tuning tuning;
- unsigned int cq_level; // constrained quality level
- unsigned int rc_max_intra_bitrate_pct;
- unsigned int rc_max_inter_bitrate_pct;
- unsigned int gf_cbr_boost_pct;
- unsigned int lossless;
- unsigned int target_level;
- unsigned int frame_parallel_decoding_mode;
- AQ_MODE aq_mode;
- unsigned int frame_periodic_boost;
- vpx_bit_depth_t bit_depth;
- vp9e_tune_content content;
- vpx_color_space_t color_space;
- vpx_color_range_t color_range;
- int render_width;
- int render_height;
+ int cpu_used; // available cpu percentage in 1/16
+ unsigned int enable_auto_alt_ref;
+ unsigned int noise_sensitivity;
+ unsigned int sharpness;
+ unsigned int static_thresh;
+ unsigned int tile_columns;
+ unsigned int tile_rows;
+ unsigned int arnr_max_frames;
+ unsigned int arnr_strength;
+ unsigned int min_gf_interval;
+ unsigned int max_gf_interval;
+ vp8e_tuning tuning;
+ unsigned int cq_level; // constrained quality level
+ unsigned int rc_max_intra_bitrate_pct;
+ unsigned int rc_max_inter_bitrate_pct;
+ unsigned int gf_cbr_boost_pct;
+ unsigned int lossless;
+ unsigned int target_level;
+ unsigned int frame_parallel_decoding_mode;
+ AQ_MODE aq_mode;
+ unsigned int frame_periodic_boost;
+ vpx_bit_depth_t bit_depth;
+ vp9e_tune_content content;
+ vpx_color_space_t color_space;
+ vpx_color_range_t color_range;
+ int render_width;
+ int render_height;
};
static struct vp9_extracfg default_extra_cfg = {
- 0, // cpu_used
- 1, // enable_auto_alt_ref
- 0, // noise_sensitivity
- 0, // sharpness
- 0, // static_thresh
- 6, // tile_columns
- 0, // tile_rows
- 7, // arnr_max_frames
- 5, // arnr_strength
- 0, // min_gf_interval; 0 -> default decision
- 0, // max_gf_interval; 0 -> default decision
- VP8_TUNE_PSNR, // tuning
- 10, // cq_level
- 0, // rc_max_intra_bitrate_pct
- 0, // rc_max_inter_bitrate_pct
- 0, // gf_cbr_boost_pct
- 0, // lossless
- 255, // target_level
- 1, // frame_parallel_decoding_mode
- NO_AQ, // aq_mode
- 0, // frame_periodic_delta_q
- VPX_BITS_8, // Bit depth
- VP9E_CONTENT_DEFAULT, // content
- VPX_CS_UNKNOWN, // color space
- 0, // color range
- 0, // render width
- 0, // render height
+ 0, // cpu_used
+ 1, // enable_auto_alt_ref
+ 0, // noise_sensitivity
+ 0, // sharpness
+ 0, // static_thresh
+ 6, // tile_columns
+ 0, // tile_rows
+ 7, // arnr_max_frames
+ 5, // arnr_strength
+ 0, // min_gf_interval; 0 -> default decision
+ 0, // max_gf_interval; 0 -> default decision
+ VP8_TUNE_PSNR, // tuning
+ 10, // cq_level
+ 0, // rc_max_intra_bitrate_pct
+ 0, // rc_max_inter_bitrate_pct
+ 0, // gf_cbr_boost_pct
+ 0, // lossless
+ 255, // target_level
+ 1, // frame_parallel_decoding_mode
+ NO_AQ, // aq_mode
+ 0, // frame_periodic_delta_q
+ VPX_BITS_8, // Bit depth
+ VP9E_CONTENT_DEFAULT, // content
+ VPX_CS_UNKNOWN, // color space
+ 0, // color range
+ 0, // render width
+ 0, // render height
};
struct vpx_codec_alg_priv {
- vpx_codec_priv_t base;
- vpx_codec_enc_cfg_t cfg;
- struct vp9_extracfg extra_cfg;
- VP9EncoderConfig oxcf;
- VP9_COMP *cpi;
- unsigned char *cx_data;
- size_t cx_data_sz;
- unsigned char *pending_cx_data;
- size_t pending_cx_data_sz;
- int pending_frame_count;
- size_t pending_frame_sizes[8];
- size_t pending_frame_magnitude;
- vpx_image_t preview_img;
- vpx_enc_frame_flags_t next_frame_flags;
- vp8_postproc_cfg_t preview_ppcfg;
+ vpx_codec_priv_t base;
+ vpx_codec_enc_cfg_t cfg;
+ struct vp9_extracfg extra_cfg;
+ VP9EncoderConfig oxcf;
+ VP9_COMP *cpi;
+ unsigned char *cx_data;
+ size_t cx_data_sz;
+ unsigned char *pending_cx_data;
+ size_t pending_cx_data_sz;
+ int pending_frame_count;
+ size_t pending_frame_sizes[8];
+ size_t pending_frame_magnitude;
+ vpx_image_t preview_img;
+ vpx_enc_frame_flags_t next_frame_flags;
+ vp8_postproc_cfg_t preview_ppcfg;
vpx_codec_pkt_list_decl(256) pkt_list;
- unsigned int fixed_kf_cntr;
+ unsigned int fixed_kf_cntr;
vpx_codec_priv_output_cx_pkt_cb_pair_t output_cx_pkt_cb;
// BufferPool that holds all reference frames.
- BufferPool *buffer_pool;
+ BufferPool *buffer_pool;
};
-static vpx_codec_err_t update_error_state(vpx_codec_alg_priv_t *ctx,
- const struct vpx_internal_error_info *error) {
+static vpx_codec_err_t update_error_state(
+ vpx_codec_alg_priv_t *ctx, const struct vpx_internal_error_info *error) {
const vpx_codec_err_t res = error->error_code;
if (res != VPX_CODEC_OK)
@@ -115,58 +115,60 @@ static vpx_codec_err_t update_error_state(vpx_codec_alg_priv_t *ctx,
return res;
}
-
#undef ERROR
-#define ERROR(str) do {\
- ctx->base.err_detail = str;\
- return VPX_CODEC_INVALID_PARAM;\
+#define ERROR(str) \
+ do { \
+ ctx->base.err_detail = str; \
+ return VPX_CODEC_INVALID_PARAM; \
} while (0)
-#define RANGE_CHECK(p, memb, lo, hi) do {\
+#define RANGE_CHECK(p, memb, lo, hi) \
+ do { \
if (!(((p)->memb == lo || (p)->memb > (lo)) && (p)->memb <= hi)) \
- ERROR(#memb " out of range ["#lo".."#hi"]");\
+ ERROR(#memb " out of range [" #lo ".." #hi "]"); \
} while (0)
-#define RANGE_CHECK_HI(p, memb, hi) do {\
- if (!((p)->memb <= (hi))) \
- ERROR(#memb " out of range [.."#hi"]");\
+#define RANGE_CHECK_HI(p, memb, hi) \
+ do { \
+ if (!((p)->memb <= (hi))) ERROR(#memb " out of range [.." #hi "]"); \
} while (0)
-#define RANGE_CHECK_LO(p, memb, lo) do {\
- if (!((p)->memb >= (lo))) \
- ERROR(#memb " out of range ["#lo"..]");\
+#define RANGE_CHECK_LO(p, memb, lo) \
+ do { \
+ if (!((p)->memb >= (lo))) ERROR(#memb " out of range [" #lo "..]"); \
} while (0)
-#define RANGE_CHECK_BOOL(p, memb) do {\
- if (!!((p)->memb) != (p)->memb) ERROR(#memb " expected boolean");\
+#define RANGE_CHECK_BOOL(p, memb) \
+ do { \
+ if (!!((p)->memb) != (p)->memb) ERROR(#memb " expected boolean"); \
} while (0)
static vpx_codec_err_t validate_config(vpx_codec_alg_priv_t *ctx,
const vpx_codec_enc_cfg_t *cfg,
const struct vp9_extracfg *extra_cfg) {
- RANGE_CHECK(cfg, g_w, 1, 65535); // 16 bits available
- RANGE_CHECK(cfg, g_h, 1, 65535); // 16 bits available
- RANGE_CHECK(cfg, g_timebase.den, 1, 1000000000);
- RANGE_CHECK(cfg, g_timebase.num, 1, 1000000000);
- RANGE_CHECK_HI(cfg, g_profile, 3);
-
- RANGE_CHECK_HI(cfg, rc_max_quantizer, 63);
- RANGE_CHECK_HI(cfg, rc_min_quantizer, cfg->rc_max_quantizer);
+ RANGE_CHECK(cfg, g_w, 1, 65535); // 16 bits available
+ RANGE_CHECK(cfg, g_h, 1, 65535); // 16 bits available
+ RANGE_CHECK(cfg, g_timebase.den, 1, 1000000000);
+ RANGE_CHECK(cfg, g_timebase.num, 1, 1000000000);
+ RANGE_CHECK_HI(cfg, g_profile, 3);
+
+ RANGE_CHECK_HI(cfg, rc_max_quantizer, 63);
+ RANGE_CHECK_HI(cfg, rc_min_quantizer, cfg->rc_max_quantizer);
RANGE_CHECK_BOOL(extra_cfg, lossless);
- RANGE_CHECK(extra_cfg, aq_mode, 0, AQ_MODE_COUNT - 1);
+ RANGE_CHECK(extra_cfg, aq_mode, 0, AQ_MODE_COUNT - 1);
RANGE_CHECK(extra_cfg, frame_periodic_boost, 0, 1);
- RANGE_CHECK_HI(cfg, g_threads, 64);
- RANGE_CHECK_HI(cfg, g_lag_in_frames, MAX_LAG_BUFFERS);
- RANGE_CHECK(cfg, rc_end_usage, VPX_VBR, VPX_Q);
- RANGE_CHECK_HI(cfg, rc_undershoot_pct, 100);
- RANGE_CHECK_HI(cfg, rc_overshoot_pct, 100);
+ RANGE_CHECK_HI(cfg, g_threads, 64);
+ RANGE_CHECK_HI(cfg, g_lag_in_frames, MAX_LAG_BUFFERS);
+ RANGE_CHECK(cfg, rc_end_usage, VPX_VBR, VPX_Q);
+ RANGE_CHECK_HI(cfg, rc_undershoot_pct, 100);
+ RANGE_CHECK_HI(cfg, rc_overshoot_pct, 100);
RANGE_CHECK_HI(cfg, rc_2pass_vbr_bias_pct, 100);
- RANGE_CHECK(cfg, kf_mode, VPX_KF_DISABLED, VPX_KF_AUTO);
- RANGE_CHECK_BOOL(cfg, rc_resize_allowed);
- RANGE_CHECK_HI(cfg, rc_dropframe_thresh, 100);
- RANGE_CHECK_HI(cfg, rc_resize_up_thresh, 100);
+ RANGE_CHECK(cfg, kf_mode, VPX_KF_DISABLED, VPX_KF_AUTO);
+ RANGE_CHECK_BOOL(cfg, rc_resize_allowed);
+ RANGE_CHECK_HI(cfg, rc_dropframe_thresh, 100);
+ RANGE_CHECK_HI(cfg, rc_resize_up_thresh, 100);
RANGE_CHECK_HI(cfg, rc_resize_down_thresh, 100);
- RANGE_CHECK(cfg, g_pass, VPX_RC_ONE_PASS, VPX_RC_LAST_PASS);
+ RANGE_CHECK(cfg, g_pass, VPX_RC_ONE_PASS, VPX_RC_LAST_PASS);
RANGE_CHECK(extra_cfg, min_gf_interval, 0, (MAX_LAG_BUFFERS - 1));
RANGE_CHECK(extra_cfg, max_gf_interval, 0, (MAX_LAG_BUFFERS - 1));
if (extra_cfg->max_gf_interval > 0) {
@@ -174,7 +176,7 @@ static vpx_codec_err_t validate_config(vpx_codec_alg_priv_t *ctx,
}
if (extra_cfg->min_gf_interval > 0 && extra_cfg->max_gf_interval > 0) {
RANGE_CHECK(extra_cfg, max_gf_interval, extra_cfg->min_gf_interval,
- (MAX_LAG_BUFFERS - 1));
+ (MAX_LAG_BUFFERS - 1));
}
if (cfg->rc_resize_allowed == 1) {
@@ -191,9 +193,9 @@ static vpx_codec_err_t validate_config(vpx_codec_alg_priv_t *ctx,
level != LEVEL_2_1 && level != LEVEL_3 && level != LEVEL_3_1 &&
level != LEVEL_4 && level != LEVEL_4_1 && level != LEVEL_5 &&
level != LEVEL_5_1 && level != LEVEL_5_2 && level != LEVEL_6 &&
- level != LEVEL_6_1 && level != LEVEL_6_2 &&
- level != LEVEL_UNKNOWN && level != LEVEL_MAX)
- ERROR("target_level is invalid");
+ level != LEVEL_6_1 && level != LEVEL_6_2 && level != LEVEL_UNKNOWN &&
+ level != LEVEL_MAX)
+ ERROR("target_level is invalid");
}
if (cfg->ss_number_layers * cfg->ts_number_layers > VPX_MAX_LAYERS)
@@ -202,11 +204,10 @@ static vpx_codec_err_t validate_config(vpx_codec_alg_priv_t *ctx,
unsigned int sl, tl;
for (sl = 1; sl < cfg->ss_number_layers; ++sl) {
for (tl = 1; tl < cfg->ts_number_layers; ++tl) {
- const int layer =
- LAYER_IDS_TO_IDX(sl, tl, cfg->ts_number_layers);
+ const int layer = LAYER_IDS_TO_IDX(sl, tl, cfg->ts_number_layers);
if (cfg->layer_target_bitrate[layer] <
cfg->layer_target_bitrate[layer - 1])
- ERROR("ts_target_bitrate entries are not increasing");
+ ERROR("ts_target_bitrate entries are not increasing");
}
}
@@ -222,24 +223,23 @@ static vpx_codec_err_t validate_config(vpx_codec_alg_priv_t *ctx,
cfg->g_pass == VPX_RC_LAST_PASS) {
unsigned int i, alt_ref_sum = 0;
for (i = 0; i < cfg->ss_number_layers; ++i) {
- if (cfg->ss_enable_auto_alt_ref[i])
- ++alt_ref_sum;
+ if (cfg->ss_enable_auto_alt_ref[i]) ++alt_ref_sum;
}
if (alt_ref_sum > REF_FRAMES - cfg->ss_number_layers)
ERROR("Not enough ref buffers for svc alt ref frames");
if (cfg->ss_number_layers * cfg->ts_number_layers > 3 &&
cfg->g_error_resilient == 0)
- ERROR("Multiple frame context are not supported for more than 3 layers");
+ ERROR("Multiple frame context are not supported for more than 3 layers");
}
#endif
// VP9 does not support a lower bound on the keyframe interval in
// automatic keyframe placement mode.
- if (cfg->kf_mode != VPX_KF_DISABLED &&
- cfg->kf_min_dist != cfg->kf_max_dist &&
+ if (cfg->kf_mode != VPX_KF_DISABLED && cfg->kf_min_dist != cfg->kf_max_dist &&
cfg->kf_min_dist > 0)
- ERROR("kf_min_dist not supported in auto mode, use 0 "
- "or kf_max_dist instead.");
+ ERROR(
+ "kf_min_dist not supported in auto mode, use 0 "
+ "or kf_max_dist instead.");
RANGE_CHECK(extra_cfg, enable_auto_alt_ref, 0, 2);
RANGE_CHECK(extra_cfg, cpu_used, -8, 8);
@@ -252,12 +252,12 @@ static vpx_codec_err_t validate_config(vpx_codec_alg_priv_t *ctx,
RANGE_CHECK(extra_cfg, cq_level, 0, 63);
RANGE_CHECK(cfg, g_bit_depth, VPX_BITS_8, VPX_BITS_12);
RANGE_CHECK(cfg, g_input_bit_depth, 8, 12);
- RANGE_CHECK(extra_cfg, content,
- VP9E_CONTENT_DEFAULT, VP9E_CONTENT_INVALID - 1);
+ RANGE_CHECK(extra_cfg, content, VP9E_CONTENT_DEFAULT,
+ VP9E_CONTENT_INVALID - 1);
// TODO(yaowu): remove this when ssim tuning is implemented for vp9
if (extra_cfg->tuning == VP8_TUNE_SSIM)
- ERROR("Option --tune=ssim is not currently supported in VP9.");
+ ERROR("Option --tune=ssim is not currently supported in VP9.");
if (cfg->g_pass == VPX_RC_LAST_PASS) {
const size_t packet_sz = sizeof(FIRSTPASS_STATS);
@@ -272,7 +272,7 @@ static vpx_codec_err_t validate_config(vpx_codec_alg_priv_t *ctx,
if (cfg->ss_number_layers > 1 || cfg->ts_number_layers > 1) {
int i;
- unsigned int n_packets_per_layer[VPX_SS_MAX_LAYERS] = {0};
+ unsigned int n_packets_per_layer[VPX_SS_MAX_LAYERS] = { 0 };
stats = cfg->rc_twopass_stats_in.buf;
for (i = 0; i < n_packets; ++i) {
@@ -285,17 +285,18 @@ static vpx_codec_err_t validate_config(vpx_codec_alg_priv_t *ctx,
for (i = 0; i < (int)cfg->ss_number_layers; ++i) {
unsigned int layer_id;
if (n_packets_per_layer[i] < 2) {
- ERROR("rc_twopass_stats_in requires at least two packets for each "
- "layer.");
+ ERROR(
+ "rc_twopass_stats_in requires at least two packets for each "
+ "layer.");
}
stats = (const FIRSTPASS_STATS *)cfg->rc_twopass_stats_in.buf +
n_packets - cfg->ss_number_layers + i;
layer_id = (int)stats->spatial_layer_id;
- if (layer_id >= cfg->ss_number_layers
- ||(unsigned int)(stats->count + 0.5) !=
- n_packets_per_layer[layer_id] - 1)
+ if (layer_id >= cfg->ss_number_layers ||
+ (unsigned int)(stats->count + 0.5) !=
+ n_packets_per_layer[layer_id] - 1)
ERROR("rc_twopass_stats_in missing EOS stats packet");
}
} else {
@@ -319,8 +320,7 @@ static vpx_codec_err_t validate_config(vpx_codec_alg_priv_t *ctx,
cfg->g_bit_depth > VPX_BITS_8) {
ERROR("Codec high bit-depth not supported in profile < 2");
}
- if (cfg->g_profile <= (unsigned int)PROFILE_1 &&
- cfg->g_input_bit_depth > 8) {
+ if (cfg->g_profile <= (unsigned int)PROFILE_1 && cfg->g_input_bit_depth > 8) {
ERROR("Source high bit-depth not supported in profile < 2");
}
if (cfg->g_profile > (unsigned int)PROFILE_1 &&
@@ -328,8 +328,7 @@ static vpx_codec_err_t validate_config(vpx_codec_alg_priv_t *ctx,
ERROR("Codec bit-depth 8 not supported in profile > 1");
}
RANGE_CHECK(extra_cfg, color_space, VPX_CS_UNKNOWN, VPX_CS_SRGB);
- RANGE_CHECK(extra_cfg, color_range,
- VPX_CR_STUDIO_RANGE, VPX_CR_FULL_RANGE);
+ RANGE_CHECK(extra_cfg, color_range, VPX_CR_STUDIO_RANGE, VPX_CR_FULL_RANGE);
return VPX_CODEC_OK;
}
@@ -338,14 +337,14 @@ static vpx_codec_err_t validate_img(vpx_codec_alg_priv_t *ctx,
switch (img->fmt) {
case VPX_IMG_FMT_YV12:
case VPX_IMG_FMT_I420:
- case VPX_IMG_FMT_I42016:
- break;
+ case VPX_IMG_FMT_I42016: break;
case VPX_IMG_FMT_I422:
case VPX_IMG_FMT_I444:
case VPX_IMG_FMT_I440:
if (ctx->cfg.g_profile != (unsigned int)PROFILE_1) {
- ERROR("Invalid image format. I422, I444, I440 images are "
- "not supported in profile.");
+ ERROR(
+ "Invalid image format. I422, I444, I440 images are "
+ "not supported in profile.");
}
break;
case VPX_IMG_FMT_I42216:
@@ -353,13 +352,15 @@ static vpx_codec_err_t validate_img(vpx_codec_alg_priv_t *ctx,
case VPX_IMG_FMT_I44016:
if (ctx->cfg.g_profile != (unsigned int)PROFILE_1 &&
ctx->cfg.g_profile != (unsigned int)PROFILE_3) {
- ERROR("Invalid image format. 16-bit I422, I444, I440 images are "
- "not supported in profile.");
+ ERROR(
+ "Invalid image format. 16-bit I422, I444, I440 images are "
+ "not supported in profile.");
}
break;
default:
- ERROR("Invalid image format. Only YV12, I420, I422, I444 images are "
- "supported.");
+ ERROR(
+ "Invalid image format. Only YV12, I420, I422, I444 images are "
+ "supported.");
break;
}
@@ -386,38 +387,30 @@ static int get_image_bps(const vpx_image_t *img) {
}
static vpx_codec_err_t set_encoder_config(
- VP9EncoderConfig *oxcf,
- const vpx_codec_enc_cfg_t *cfg,
- const struct vp9_extracfg *extra_cfg) {
+ VP9EncoderConfig *oxcf, const vpx_codec_enc_cfg_t *cfg,
+ const struct vp9_extracfg *extra_cfg) {
const int is_vbr = cfg->rc_end_usage == VPX_VBR;
int sl, tl;
oxcf->profile = cfg->g_profile;
oxcf->max_threads = (int)cfg->g_threads;
- oxcf->width = cfg->g_w;
- oxcf->height = cfg->g_h;
+ oxcf->width = cfg->g_w;
+ oxcf->height = cfg->g_h;
oxcf->bit_depth = cfg->g_bit_depth;
oxcf->input_bit_depth = cfg->g_input_bit_depth;
// guess a frame rate if out of whack, use 30
oxcf->init_framerate = (double)cfg->g_timebase.den / cfg->g_timebase.num;
- if (oxcf->init_framerate > 180)
- oxcf->init_framerate = 30;
+ if (oxcf->init_framerate > 180) oxcf->init_framerate = 30;
oxcf->mode = GOOD;
switch (cfg->g_pass) {
- case VPX_RC_ONE_PASS:
- oxcf->pass = 0;
- break;
- case VPX_RC_FIRST_PASS:
- oxcf->pass = 1;
- break;
- case VPX_RC_LAST_PASS:
- oxcf->pass = 2;
- break;
+ case VPX_RC_ONE_PASS: oxcf->pass = 0; break;
+ case VPX_RC_FIRST_PASS: oxcf->pass = 1; break;
+ case VPX_RC_LAST_PASS: oxcf->pass = 2; break;
}
- oxcf->lag_in_frames = cfg->g_pass == VPX_RC_FIRST_PASS ? 0
- : cfg->g_lag_in_frames;
+ oxcf->lag_in_frames =
+ cfg->g_pass == VPX_RC_FIRST_PASS ? 0 : cfg->g_lag_in_frames;
oxcf->rc_mode = cfg->rc_end_usage;
// Convert target bandwidth from Kbit/s to Bit/s
@@ -430,55 +423,56 @@ static vpx_codec_err_t set_encoder_config(
extra_cfg->lossless ? 0 : vp9_quantizer_to_qindex(cfg->rc_min_quantizer);
oxcf->worst_allowed_q =
extra_cfg->lossless ? 0 : vp9_quantizer_to_qindex(cfg->rc_max_quantizer);
- oxcf->cq_level = vp9_quantizer_to_qindex(extra_cfg->cq_level);
+ oxcf->cq_level = vp9_quantizer_to_qindex(extra_cfg->cq_level);
oxcf->fixed_q = -1;
- oxcf->under_shoot_pct = cfg->rc_undershoot_pct;
- oxcf->over_shoot_pct = cfg->rc_overshoot_pct;
+ oxcf->under_shoot_pct = cfg->rc_undershoot_pct;
+ oxcf->over_shoot_pct = cfg->rc_overshoot_pct;
- oxcf->scaled_frame_width = cfg->rc_scaled_width;
+ oxcf->scaled_frame_width = cfg->rc_scaled_width;
oxcf->scaled_frame_height = cfg->rc_scaled_height;
if (cfg->rc_resize_allowed == 1) {
oxcf->resize_mode =
- (oxcf->scaled_frame_width == 0 || oxcf->scaled_frame_height == 0) ?
- RESIZE_DYNAMIC : RESIZE_FIXED;
+ (oxcf->scaled_frame_width == 0 || oxcf->scaled_frame_height == 0)
+ ? RESIZE_DYNAMIC
+ : RESIZE_FIXED;
} else {
oxcf->resize_mode = RESIZE_NONE;
}
- oxcf->maximum_buffer_size_ms = is_vbr ? 240000 : cfg->rc_buf_sz;
+ oxcf->maximum_buffer_size_ms = is_vbr ? 240000 : cfg->rc_buf_sz;
oxcf->starting_buffer_level_ms = is_vbr ? 60000 : cfg->rc_buf_initial_sz;
- oxcf->optimal_buffer_level_ms = is_vbr ? 60000 : cfg->rc_buf_optimal_sz;
+ oxcf->optimal_buffer_level_ms = is_vbr ? 60000 : cfg->rc_buf_optimal_sz;
- oxcf->drop_frames_water_mark = cfg->rc_dropframe_thresh;
+ oxcf->drop_frames_water_mark = cfg->rc_dropframe_thresh;
- oxcf->two_pass_vbrbias = cfg->rc_2pass_vbr_bias_pct;
- oxcf->two_pass_vbrmin_section = cfg->rc_2pass_vbr_minsection_pct;
- oxcf->two_pass_vbrmax_section = cfg->rc_2pass_vbr_maxsection_pct;
+ oxcf->two_pass_vbrbias = cfg->rc_2pass_vbr_bias_pct;
+ oxcf->two_pass_vbrmin_section = cfg->rc_2pass_vbr_minsection_pct;
+ oxcf->two_pass_vbrmax_section = cfg->rc_2pass_vbr_maxsection_pct;
- oxcf->auto_key = cfg->kf_mode == VPX_KF_AUTO &&
- cfg->kf_min_dist != cfg->kf_max_dist;
+ oxcf->auto_key =
+ cfg->kf_mode == VPX_KF_AUTO && cfg->kf_min_dist != cfg->kf_max_dist;
- oxcf->key_freq = cfg->kf_max_dist;
+ oxcf->key_freq = cfg->kf_max_dist;
- oxcf->speed = abs(extra_cfg->cpu_used);
- oxcf->encode_breakout = extra_cfg->static_thresh;
- oxcf->enable_auto_arf = extra_cfg->enable_auto_alt_ref;
- oxcf->noise_sensitivity = extra_cfg->noise_sensitivity;
- oxcf->sharpness = extra_cfg->sharpness;
+ oxcf->speed = abs(extra_cfg->cpu_used);
+ oxcf->encode_breakout = extra_cfg->static_thresh;
+ oxcf->enable_auto_arf = extra_cfg->enable_auto_alt_ref;
+ oxcf->noise_sensitivity = extra_cfg->noise_sensitivity;
+ oxcf->sharpness = extra_cfg->sharpness;
- oxcf->two_pass_stats_in = cfg->rc_twopass_stats_in;
+ oxcf->two_pass_stats_in = cfg->rc_twopass_stats_in;
#if CONFIG_FP_MB_STATS
- oxcf->firstpass_mb_stats_in = cfg->rc_firstpass_mb_stats_in;
+ oxcf->firstpass_mb_stats_in = cfg->rc_firstpass_mb_stats_in;
#endif
oxcf->color_space = extra_cfg->color_space;
oxcf->color_range = extra_cfg->color_range;
- oxcf->render_width = extra_cfg->render_width;
+ oxcf->render_width = extra_cfg->render_width;
oxcf->render_height = extra_cfg->render_height;
oxcf->arnr_max_frames = extra_cfg->arnr_max_frames;
- oxcf->arnr_strength = extra_cfg->arnr_strength;
+ oxcf->arnr_strength = extra_cfg->arnr_strength;
oxcf->min_gf_interval = extra_cfg->min_gf_interval;
oxcf->max_gf_interval = extra_cfg->max_gf_interval;
@@ -493,21 +487,21 @@ static vpx_codec_err_t set_encoder_config(
// is encoded. But this will hurt multi-threaded encoder performance. So,
// it is recommended to use tile-rows=0 while encoding with threads > 1.
if (oxcf->max_threads > 1 && oxcf->tile_columns > 0)
- oxcf->tile_rows = 0;
+ oxcf->tile_rows = 0;
else
- oxcf->tile_rows = extra_cfg->tile_rows;
+ oxcf->tile_rows = extra_cfg->tile_rows;
- oxcf->error_resilient_mode = cfg->g_error_resilient;
+ oxcf->error_resilient_mode = cfg->g_error_resilient;
oxcf->frame_parallel_decoding_mode = extra_cfg->frame_parallel_decoding_mode;
oxcf->aq_mode = extra_cfg->aq_mode;
- oxcf->frame_periodic_boost = extra_cfg->frame_periodic_boost;
+ oxcf->frame_periodic_boost = extra_cfg->frame_periodic_boost;
oxcf->ss_number_layers = cfg->ss_number_layers;
oxcf->ts_number_layers = cfg->ts_number_layers;
- oxcf->temporal_layering_mode = (enum vp9e_temporal_layering_mode)
- cfg->temporal_layering_mode;
+ oxcf->temporal_layering_mode =
+ (enum vp9e_temporal_layering_mode)cfg->temporal_layering_mode;
oxcf->target_level = extra_cfg->target_level;
@@ -528,8 +522,8 @@ static vpx_codec_err_t set_encoder_config(
}
if (oxcf->ts_number_layers > 1) {
for (tl = 0; tl < VPX_TS_MAX_LAYERS; ++tl) {
- oxcf->ts_rate_decimator[tl] = cfg->ts_rate_decimator[tl] ?
- cfg->ts_rate_decimator[tl] : 1;
+ oxcf->ts_rate_decimator[tl] =
+ cfg->ts_rate_decimator[tl] ? cfg->ts_rate_decimator[tl] : 1;
}
} else if (oxcf->ts_number_layers == 1) {
oxcf->ts_rate_decimator[0] = 1;
@@ -571,7 +565,7 @@ static vpx_codec_err_t set_encoder_config(
}
static vpx_codec_err_t encoder_set_config(vpx_codec_alg_priv_t *ctx,
- const vpx_codec_enc_cfg_t *cfg) {
+ const vpx_codec_enc_cfg_t *cfg) {
vpx_codec_err_t res;
int force_key = 0;
@@ -601,8 +595,7 @@ static vpx_codec_err_t encoder_set_config(vpx_codec_alg_priv_t *ctx,
vp9_change_config(ctx->cpi, &ctx->oxcf);
}
- if (force_key)
- ctx->next_frame_flags |= VPX_EFLAG_FORCE_KF;
+ if (force_key) ctx->next_frame_flags |= VPX_EFLAG_FORCE_KF;
return res;
}
@@ -610,8 +603,7 @@ static vpx_codec_err_t encoder_set_config(vpx_codec_alg_priv_t *ctx,
static vpx_codec_err_t ctrl_get_quantizer(vpx_codec_alg_priv_t *ctx,
va_list args) {
int *const arg = va_arg(args, int *);
- if (arg == NULL)
- return VPX_CODEC_INVALID_PARAM;
+ if (arg == NULL) return VPX_CODEC_INVALID_PARAM;
*arg = vp9_get_quantizer(ctx->cpi);
return VPX_CODEC_OK;
}
@@ -619,8 +611,7 @@ static vpx_codec_err_t ctrl_get_quantizer(vpx_codec_alg_priv_t *ctx,
static vpx_codec_err_t ctrl_get_quantizer64(vpx_codec_alg_priv_t *ctx,
va_list args) {
int *const arg = va_arg(args, int *);
- if (arg == NULL)
- return VPX_CODEC_INVALID_PARAM;
+ if (arg == NULL) return VPX_CODEC_INVALID_PARAM;
*arg = vp9_qindex_to_quantizer(vp9_get_quantizer(ctx->cpi));
return VPX_CODEC_OK;
}
@@ -736,11 +727,10 @@ static vpx_codec_err_t ctrl_set_rc_max_inter_bitrate_pct(
return update_extra_cfg(ctx, &extra_cfg);
}
-static vpx_codec_err_t ctrl_set_rc_gf_cbr_boost_pct(
- vpx_codec_alg_priv_t *ctx, va_list args) {
+static vpx_codec_err_t ctrl_set_rc_gf_cbr_boost_pct(vpx_codec_alg_priv_t *ctx,
+ va_list args) {
struct vp9_extracfg extra_cfg = ctx->extra_cfg;
- extra_cfg.gf_cbr_boost_pct =
- CAST(VP9E_SET_GF_CBR_BOOST_PCT, args);
+ extra_cfg.gf_cbr_boost_pct = CAST(VP9E_SET_GF_CBR_BOOST_PCT, args);
return update_extra_cfg(ctx, &extra_cfg);
}
@@ -808,16 +798,13 @@ static vpx_codec_err_t encoder_init(vpx_codec_ctx_t *ctx,
if (ctx->priv == NULL) {
vpx_codec_alg_priv_t *const priv = vpx_calloc(1, sizeof(*priv));
- if (priv == NULL)
- return VPX_CODEC_MEM_ERROR;
+ if (priv == NULL) return VPX_CODEC_MEM_ERROR;
ctx->priv = (vpx_codec_priv_t *)priv;
ctx->priv->init_flags = ctx->init_flags;
ctx->priv->enc.total_encoders = 1;
- priv->buffer_pool =
- (BufferPool *)vpx_calloc(1, sizeof(BufferPool));
- if (priv->buffer_pool == NULL)
- return VPX_CODEC_MEM_ERROR;
+ priv->buffer_pool = (BufferPool *)vpx_calloc(1, sizeof(BufferPool));
+ if (priv->buffer_pool == NULL) return VPX_CODEC_MEM_ERROR;
#if CONFIG_MULTITHREAD
if (pthread_mutex_init(&priv->buffer_pool->pool_mutex, NULL)) {
@@ -876,7 +863,8 @@ static void pick_quickcompress_mode(vpx_codec_alg_priv_t *ctx,
// Convert duration parameter from stream timebase to microseconds.
const uint64_t duration_us = (uint64_t)duration * 1000000 *
- (uint64_t)cfg->g_timebase.num /(uint64_t)cfg->g_timebase.den;
+ (uint64_t)cfg->g_timebase.num /
+ (uint64_t)cfg->g_timebase.den;
// If the deadline is more that the duration this frame is to be shown,
// use good quality mode. Otherwise use realtime mode.
@@ -885,11 +873,8 @@ static void pick_quickcompress_mode(vpx_codec_alg_priv_t *ctx,
new_mode = BEST;
}
break;
- case VPX_RC_FIRST_PASS:
- break;
- case VPX_RC_LAST_PASS:
- new_mode = deadline > 0 ? GOOD : BEST;
- break;
+ case VPX_RC_FIRST_PASS: break;
+ case VPX_RC_LAST_PASS: new_mode = deadline > 0 ? GOOD : BEST; break;
}
if (deadline == VPX_DL_REALTIME) {
@@ -918,8 +903,7 @@ static int write_superframe_index(vpx_codec_alg_priv_t *ctx) {
// Choose the magnitude
for (mag = 0, mask = 0xff; mag < 4; mag++) {
- if (ctx->pending_frame_magnitude < mask)
- break;
+ if (ctx->pending_frame_magnitude < mask) break;
mask <<= 8;
mask |= 0xff;
}
@@ -980,20 +964,19 @@ static vpx_codec_frame_flags_t get_frame_pkt_flags(const VP9_COMP *cpi,
if (lib_flags & FRAMEFLAGS_KEY ||
(cpi->use_svc &&
- cpi->svc.layer_context[cpi->svc.spatial_layer_id *
- cpi->svc.number_temporal_layers +
- cpi->svc.temporal_layer_id].is_key_frame)
- )
+ cpi->svc.layer_context[cpi->svc.spatial_layer_id *
+ cpi->svc.number_temporal_layers +
+ cpi->svc.temporal_layer_id]
+ .is_key_frame))
flags |= VPX_FRAME_IS_KEY;
- if (cpi->droppable)
- flags |= VPX_FRAME_IS_DROPPABLE;
+ if (cpi->droppable) flags |= VPX_FRAME_IS_DROPPABLE;
return flags;
}
const size_t kMinCompressedSize = 8192;
-static vpx_codec_err_t encoder_encode(vpx_codec_alg_priv_t *ctx,
+static vpx_codec_err_t encoder_encode(vpx_codec_alg_priv_t *ctx,
const vpx_image_t *img,
vpx_codec_pts_t pts,
unsigned long duration,
@@ -1014,12 +997,11 @@ static vpx_codec_err_t encoder_encode(vpx_codec_alg_priv_t *ctx,
// instance for its status to determine the compressed data size.
data_sz = ctx->cfg.g_w * ctx->cfg.g_h * get_image_bps(img) / 8 *
(cpi->multi_arf_allowed ? 8 : 2);
- if (data_sz < kMinCompressedSize)
- data_sz = kMinCompressedSize;
+ if (data_sz < kMinCompressedSize) data_sz = kMinCompressedSize;
if (ctx->cx_data == NULL || ctx->cx_data_sz < data_sz) {
ctx->cx_data_sz = data_sz;
free(ctx->cx_data);
- ctx->cx_data = (unsigned char*)malloc(ctx->cx_data_sz);
+ ctx->cx_data = (unsigned char *)malloc(ctx->cx_data_sz);
if (ctx->cx_data == NULL) {
return VPX_CODEC_MEM_ERROR;
}
@@ -1032,7 +1014,7 @@ static vpx_codec_err_t encoder_encode(vpx_codec_alg_priv_t *ctx,
// Handle Flags
if (((flags & VP8_EFLAG_NO_UPD_GF) && (flags & VP8_EFLAG_FORCE_GF)) ||
- ((flags & VP8_EFLAG_NO_UPD_ARF) && (flags & VP8_EFLAG_FORCE_ARF))) {
+ ((flags & VP8_EFLAG_NO_UPD_ARF) && (flags & VP8_EFLAG_FORCE_ARF))) {
ctx->base.err_detail = "Conflicting flags.";
return VPX_CODEC_INVALID_PARAM;
}
@@ -1066,16 +1048,15 @@ static vpx_codec_err_t encoder_encode(vpx_codec_alg_priv_t *ctx,
unsigned char *cx_data;
// Set up internal flags
- if (ctx->base.init_flags & VPX_CODEC_USE_PSNR)
- cpi->b_calculate_psnr = 1;
+ if (ctx->base.init_flags & VPX_CODEC_USE_PSNR) cpi->b_calculate_psnr = 1;
if (img != NULL) {
res = image2yuvconfig(img, &sd);
// Store the original flags in to the frame buffer. Will extract the
// key frame flag when we actually encode this frame.
- if (vp9_receive_raw_frame(cpi, flags | ctx->next_frame_flags,
- &sd, dst_time_stamp, dst_end_time_stamp)) {
+ if (vp9_receive_raw_frame(cpi, flags | ctx->next_frame_flags, &sd,
+ dst_time_stamp, dst_end_time_stamp)) {
res = update_error_state(ctx, &cpi->common.error);
}
ctx->next_frame_flags = 0;
@@ -1102,25 +1083,24 @@ static vpx_codec_err_t encoder_encode(vpx_codec_alg_priv_t *ctx,
}
while (cx_data_sz >= ctx->cx_data_sz / 2 &&
- -1 != vp9_get_compressed_data(cpi, &lib_flags, &size,
- cx_data, &dst_time_stamp,
- &dst_end_time_stamp, !img)) {
+ -1 != vp9_get_compressed_data(cpi, &lib_flags, &size, cx_data,
+ &dst_time_stamp, &dst_end_time_stamp,
+ !img)) {
if (size) {
vpx_codec_cx_pkt_t pkt;
#if CONFIG_SPATIAL_SVC
if (cpi->use_svc)
cpi->svc.layer_context[cpi->svc.spatial_layer_id *
- cpi->svc.number_temporal_layers].layer_size += size;
+ cpi->svc.number_temporal_layers]
+ .layer_size += size;
#endif
// Pack invisible frames with the next visible frame
if (!cpi->common.show_frame ||
(cpi->use_svc &&
- cpi->svc.spatial_layer_id < cpi->svc.number_spatial_layers - 1)
- ) {
- if (ctx->pending_cx_data == 0)
- ctx->pending_cx_data = cx_data;
+ cpi->svc.spatial_layer_id < cpi->svc.number_spatial_layers - 1)) {
+ if (ctx->pending_cx_data == 0) ctx->pending_cx_data = cx_data;
ctx->pending_cx_data_sz += size;
ctx->pending_frame_sizes[ctx->pending_frame_count++] = size;
ctx->pending_frame_magnitude |= size;
@@ -1129,14 +1109,13 @@ static vpx_codec_err_t encoder_encode(vpx_codec_alg_priv_t *ctx,
if (ctx->output_cx_pkt_cb.output_cx_pkt) {
pkt.kind = VPX_CODEC_CX_FRAME_PKT;
- pkt.data.frame.pts = ticks_to_timebase_units(timebase,
- dst_time_stamp);
- pkt.data.frame.duration =
- (unsigned long)ticks_to_timebase_units(timebase,
- dst_end_time_stamp - dst_time_stamp);
+ pkt.data.frame.pts =
+ ticks_to_timebase_units(timebase, dst_time_stamp);
+ pkt.data.frame.duration = (unsigned long)ticks_to_timebase_units(
+ timebase, dst_end_time_stamp - dst_time_stamp);
pkt.data.frame.flags = get_frame_pkt_flags(cpi, lib_flags);
pkt.data.frame.buf = ctx->pending_cx_data;
- pkt.data.frame.sz = size;
+ pkt.data.frame.sz = size;
ctx->pending_cx_data = NULL;
ctx->pending_cx_data_sz = 0;
ctx->pending_frame_count = 0;
@@ -1150,9 +1129,8 @@ static vpx_codec_err_t encoder_encode(vpx_codec_alg_priv_t *ctx,
// Add the frame packet to the list of returned packets.
pkt.kind = VPX_CODEC_CX_FRAME_PKT;
pkt.data.frame.pts = ticks_to_timebase_units(timebase, dst_time_stamp);
- pkt.data.frame.duration =
- (unsigned long)ticks_to_timebase_units(timebase,
- dst_end_time_stamp - dst_time_stamp);
+ pkt.data.frame.duration = (unsigned long)ticks_to_timebase_units(
+ timebase, dst_end_time_stamp - dst_time_stamp);
pkt.data.frame.flags = get_frame_pkt_flags(cpi, lib_flags);
if (ctx->pending_cx_data) {
@@ -1163,18 +1141,18 @@ static vpx_codec_err_t encoder_encode(vpx_codec_alg_priv_t *ctx,
if (!ctx->output_cx_pkt_cb.output_cx_pkt)
size += write_superframe_index(ctx);
pkt.data.frame.buf = ctx->pending_cx_data;
- pkt.data.frame.sz = ctx->pending_cx_data_sz;
+ pkt.data.frame.sz = ctx->pending_cx_data_sz;
ctx->pending_cx_data = NULL;
ctx->pending_cx_data_sz = 0;
ctx->pending_frame_count = 0;
ctx->pending_frame_magnitude = 0;
} else {
pkt.data.frame.buf = cx_data;
- pkt.data.frame.sz = size;
+ pkt.data.frame.sz = size;
}
pkt.data.frame.partition_id = -1;
- if(ctx->output_cx_pkt_cb.output_cx_pkt)
+ if (ctx->output_cx_pkt_cb.output_cx_pkt)
ctx->output_cx_pkt_cb.output_cx_pkt(&pkt,
ctx->output_cx_pkt_cb.user_priv);
else
@@ -1287,16 +1265,15 @@ static vpx_codec_err_t ctrl_set_previewpp(vpx_codec_alg_priv_t *ctx,
#endif
}
-
static vpx_image_t *encoder_get_preview(vpx_codec_alg_priv_t *ctx) {
YV12_BUFFER_CONFIG sd;
vp9_ppflags_t flags;
vp9_zero(flags);
if (ctx->preview_ppcfg.post_proc_flag) {
- flags.post_proc_flag = ctx->preview_ppcfg.post_proc_flag;
+ flags.post_proc_flag = ctx->preview_ppcfg.post_proc_flag;
flags.deblocking_level = ctx->preview_ppcfg.deblocking_level;
- flags.noise_level = ctx->preview_ppcfg.noise_level;
+ flags.noise_level = ctx->preview_ppcfg.noise_level;
}
if (vp9_get_preview_raw_frame(ctx->cpi, &sd, &flags) == 0) {
@@ -1316,14 +1293,13 @@ static vpx_codec_err_t ctrl_set_roi_map(vpx_codec_alg_priv_t *ctx,
return VPX_CODEC_INVALID_PARAM;
}
-
static vpx_codec_err_t ctrl_set_active_map(vpx_codec_alg_priv_t *ctx,
va_list args) {
vpx_active_map_t *const map = va_arg(args, vpx_active_map_t *);
if (map) {
- if (!vp9_set_active_map(ctx->cpi, map->active_map,
- (int)map->rows, (int)map->cols))
+ if (!vp9_set_active_map(ctx->cpi, map->active_map, (int)map->rows,
+ (int)map->cols))
return VPX_CODEC_OK;
else
return VPX_CODEC_INVALID_PARAM;
@@ -1337,8 +1313,8 @@ static vpx_codec_err_t ctrl_get_active_map(vpx_codec_alg_priv_t *ctx,
vpx_active_map_t *const map = va_arg(args, vpx_active_map_t *);
if (map) {
- if (!vp9_get_active_map(ctx->cpi, map->active_map,
- (int)map->rows, (int)map->cols))
+ if (!vp9_get_active_map(ctx->cpi, map->active_map, (int)map->rows,
+ (int)map->cols))
return VPX_CODEC_OK;
else
return VPX_CODEC_INVALID_PARAM;
@@ -1352,9 +1328,9 @@ static vpx_codec_err_t ctrl_set_scale_mode(vpx_codec_alg_priv_t *ctx,
vpx_scaling_mode_t *const mode = va_arg(args, vpx_scaling_mode_t *);
if (mode) {
- const int res = vp9_set_internal_size(ctx->cpi,
- (VPX_SCALING)mode->h_scaling_mode,
- (VPX_SCALING)mode->v_scaling_mode);
+ const int res =
+ vp9_set_internal_size(ctx->cpi, (VPX_SCALING)mode->h_scaling_mode,
+ (VPX_SCALING)mode->v_scaling_mode);
return (res == 0) ? VPX_CODEC_OK : VPX_CODEC_INVALID_PARAM;
} else {
return VPX_CODEC_INVALID_PARAM;
@@ -1374,10 +1350,8 @@ static vpx_codec_err_t ctrl_set_svc(vpx_codec_alg_priv_t *ctx, va_list args) {
vp9_set_svc(ctx->cpi, data);
if (data == 1 &&
- (cfg->g_pass == VPX_RC_FIRST_PASS ||
- cfg->g_pass == VPX_RC_LAST_PASS) &&
- cfg->ss_number_layers > 1 &&
- cfg->ts_number_layers > 1) {
+ (cfg->g_pass == VPX_RC_FIRST_PASS || cfg->g_pass == VPX_RC_LAST_PASS) &&
+ cfg->ss_number_layers > 1 && cfg->ts_number_layers > 1) {
return VPX_CODEC_INVALID_PARAM;
}
return VPX_CODEC_OK;
@@ -1431,8 +1405,7 @@ static vpx_codec_err_t ctrl_set_svc_parameters(vpx_codec_alg_priv_t *ctx,
for (tl = 0; tl < cpi->svc.number_temporal_layers; ++tl) {
const int layer =
LAYER_IDS_TO_IDX(sl, tl, cpi->svc.number_temporal_layers);
- LAYER_CONTEXT *lc =
- &cpi->svc.layer_context[layer];
+ LAYER_CONTEXT *lc = &cpi->svc.layer_context[layer];
lc->max_q = params->max_quantizers[layer];
lc->min_q = params->min_quantizers[layer];
lc->scaling_factor_num = params->scaling_factor_num[sl];
@@ -1493,125 +1466,124 @@ static vpx_codec_err_t ctrl_set_render_size(vpx_codec_alg_priv_t *ctx,
va_list args) {
struct vp9_extracfg extra_cfg = ctx->extra_cfg;
int *const render_size = va_arg(args, int *);
- extra_cfg.render_width = render_size[0];
+ extra_cfg.render_width = render_size[0];
extra_cfg.render_height = render_size[1];
return update_extra_cfg(ctx, &extra_cfg);
}
static vpx_codec_ctrl_fn_map_t encoder_ctrl_maps[] = {
- {VP8_COPY_REFERENCE, ctrl_copy_reference},
+ { VP8_COPY_REFERENCE, ctrl_copy_reference },
// Setters
- {VP8_SET_REFERENCE, ctrl_set_reference},
- {VP8_SET_POSTPROC, ctrl_set_previewpp},
- {VP8E_SET_ROI_MAP, ctrl_set_roi_map},
- {VP8E_SET_ACTIVEMAP, ctrl_set_active_map},
- {VP8E_SET_SCALEMODE, ctrl_set_scale_mode},
- {VP8E_SET_CPUUSED, ctrl_set_cpuused},
- {VP8E_SET_ENABLEAUTOALTREF, ctrl_set_enable_auto_alt_ref},
- {VP8E_SET_SHARPNESS, ctrl_set_sharpness},
- {VP8E_SET_STATIC_THRESHOLD, ctrl_set_static_thresh},
- {VP9E_SET_TILE_COLUMNS, ctrl_set_tile_columns},
- {VP9E_SET_TILE_ROWS, ctrl_set_tile_rows},
- {VP8E_SET_ARNR_MAXFRAMES, ctrl_set_arnr_max_frames},
- {VP8E_SET_ARNR_STRENGTH, ctrl_set_arnr_strength},
- {VP8E_SET_ARNR_TYPE, ctrl_set_arnr_type},
- {VP8E_SET_TUNING, ctrl_set_tuning},
- {VP8E_SET_CQ_LEVEL, ctrl_set_cq_level},
- {VP8E_SET_MAX_INTRA_BITRATE_PCT, ctrl_set_rc_max_intra_bitrate_pct},
- {VP9E_SET_MAX_INTER_BITRATE_PCT, ctrl_set_rc_max_inter_bitrate_pct},
- {VP9E_SET_GF_CBR_BOOST_PCT, ctrl_set_rc_gf_cbr_boost_pct},
- {VP9E_SET_LOSSLESS, ctrl_set_lossless},
- {VP9E_SET_FRAME_PARALLEL_DECODING, ctrl_set_frame_parallel_decoding_mode},
- {VP9E_SET_AQ_MODE, ctrl_set_aq_mode},
- {VP9E_SET_FRAME_PERIODIC_BOOST, ctrl_set_frame_periodic_boost},
- {VP9E_SET_SVC, ctrl_set_svc},
- {VP9E_SET_SVC_PARAMETERS, ctrl_set_svc_parameters},
- {VP9E_REGISTER_CX_CALLBACK, ctrl_register_cx_callback},
- {VP9E_SET_SVC_LAYER_ID, ctrl_set_svc_layer_id},
- {VP9E_SET_TUNE_CONTENT, ctrl_set_tune_content},
- {VP9E_SET_COLOR_SPACE, ctrl_set_color_space},
- {VP9E_SET_COLOR_RANGE, ctrl_set_color_range},
- {VP9E_SET_NOISE_SENSITIVITY, ctrl_set_noise_sensitivity},
- {VP9E_SET_MIN_GF_INTERVAL, ctrl_set_min_gf_interval},
- {VP9E_SET_MAX_GF_INTERVAL, ctrl_set_max_gf_interval},
- {VP9E_SET_SVC_REF_FRAME_CONFIG, ctrl_set_svc_ref_frame_config},
- {VP9E_SET_RENDER_SIZE, ctrl_set_render_size},
- {VP9E_SET_TARGET_LEVEL, ctrl_set_target_level},
+ { VP8_SET_REFERENCE, ctrl_set_reference },
+ { VP8_SET_POSTPROC, ctrl_set_previewpp },
+ { VP8E_SET_ROI_MAP, ctrl_set_roi_map },
+ { VP8E_SET_ACTIVEMAP, ctrl_set_active_map },
+ { VP8E_SET_SCALEMODE, ctrl_set_scale_mode },
+ { VP8E_SET_CPUUSED, ctrl_set_cpuused },
+ { VP8E_SET_ENABLEAUTOALTREF, ctrl_set_enable_auto_alt_ref },
+ { VP8E_SET_SHARPNESS, ctrl_set_sharpness },
+ { VP8E_SET_STATIC_THRESHOLD, ctrl_set_static_thresh },
+ { VP9E_SET_TILE_COLUMNS, ctrl_set_tile_columns },
+ { VP9E_SET_TILE_ROWS, ctrl_set_tile_rows },
+ { VP8E_SET_ARNR_MAXFRAMES, ctrl_set_arnr_max_frames },
+ { VP8E_SET_ARNR_STRENGTH, ctrl_set_arnr_strength },
+ { VP8E_SET_ARNR_TYPE, ctrl_set_arnr_type },
+ { VP8E_SET_TUNING, ctrl_set_tuning },
+ { VP8E_SET_CQ_LEVEL, ctrl_set_cq_level },
+ { VP8E_SET_MAX_INTRA_BITRATE_PCT, ctrl_set_rc_max_intra_bitrate_pct },
+ { VP9E_SET_MAX_INTER_BITRATE_PCT, ctrl_set_rc_max_inter_bitrate_pct },
+ { VP9E_SET_GF_CBR_BOOST_PCT, ctrl_set_rc_gf_cbr_boost_pct },
+ { VP9E_SET_LOSSLESS, ctrl_set_lossless },
+ { VP9E_SET_FRAME_PARALLEL_DECODING, ctrl_set_frame_parallel_decoding_mode },
+ { VP9E_SET_AQ_MODE, ctrl_set_aq_mode },
+ { VP9E_SET_FRAME_PERIODIC_BOOST, ctrl_set_frame_periodic_boost },
+ { VP9E_SET_SVC, ctrl_set_svc },
+ { VP9E_SET_SVC_PARAMETERS, ctrl_set_svc_parameters },
+ { VP9E_REGISTER_CX_CALLBACK, ctrl_register_cx_callback },
+ { VP9E_SET_SVC_LAYER_ID, ctrl_set_svc_layer_id },
+ { VP9E_SET_TUNE_CONTENT, ctrl_set_tune_content },
+ { VP9E_SET_COLOR_SPACE, ctrl_set_color_space },
+ { VP9E_SET_COLOR_RANGE, ctrl_set_color_range },
+ { VP9E_SET_NOISE_SENSITIVITY, ctrl_set_noise_sensitivity },
+ { VP9E_SET_MIN_GF_INTERVAL, ctrl_set_min_gf_interval },
+ { VP9E_SET_MAX_GF_INTERVAL, ctrl_set_max_gf_interval },
+ { VP9E_SET_SVC_REF_FRAME_CONFIG, ctrl_set_svc_ref_frame_config },
+ { VP9E_SET_RENDER_SIZE, ctrl_set_render_size },
+ { VP9E_SET_TARGET_LEVEL, ctrl_set_target_level },
// Getters
- {VP8E_GET_LAST_QUANTIZER, ctrl_get_quantizer},
- {VP8E_GET_LAST_QUANTIZER_64, ctrl_get_quantizer64},
- {VP9_GET_REFERENCE, ctrl_get_reference},
- {VP9E_GET_SVC_LAYER_ID, ctrl_get_svc_layer_id},
- {VP9E_GET_ACTIVEMAP, ctrl_get_active_map},
- {VP9E_GET_LEVEL, ctrl_get_level},
-
- { -1, NULL},
+ { VP8E_GET_LAST_QUANTIZER, ctrl_get_quantizer },
+ { VP8E_GET_LAST_QUANTIZER_64, ctrl_get_quantizer64 },
+ { VP9_GET_REFERENCE, ctrl_get_reference },
+ { VP9E_GET_SVC_LAYER_ID, ctrl_get_svc_layer_id },
+ { VP9E_GET_ACTIVEMAP, ctrl_get_active_map },
+ { VP9E_GET_LEVEL, ctrl_get_level },
+
+ { -1, NULL },
};
static vpx_codec_enc_cfg_map_t encoder_usage_cfg_map[] = {
- {
- 0,
- { // NOLINT
- 0, // g_usage
- 8, // g_threads
- 0, // g_profile
-
- 320, // g_width
- 240, // g_height
- VPX_BITS_8, // g_bit_depth
- 8, // g_input_bit_depth
-
- {1, 30}, // g_timebase
-
- 0, // g_error_resilient
-
- VPX_RC_ONE_PASS, // g_pass
-
- 25, // g_lag_in_frames
-
- 0, // rc_dropframe_thresh
- 0, // rc_resize_allowed
- 0, // rc_scaled_width
- 0, // rc_scaled_height
- 60, // rc_resize_down_thresold
- 30, // rc_resize_up_thresold
-
- VPX_VBR, // rc_end_usage
- {NULL, 0}, // rc_twopass_stats_in
- {NULL, 0}, // rc_firstpass_mb_stats_in
- 256, // rc_target_bandwidth
- 0, // rc_min_quantizer
- 63, // rc_max_quantizer
- 25, // rc_undershoot_pct
- 25, // rc_overshoot_pct
-
- 6000, // rc_max_buffer_size
- 4000, // rc_buffer_initial_size
- 5000, // rc_buffer_optimal_size
-
- 50, // rc_two_pass_vbrbias
- 0, // rc_two_pass_vbrmin_section
- 2000, // rc_two_pass_vbrmax_section
-
- // keyframing settings (kf)
- VPX_KF_AUTO, // g_kfmode
- 0, // kf_min_dist
- 128, // kf_max_dist
-
- VPX_SS_DEFAULT_LAYERS, // ss_number_layers
- {0},
- {0}, // ss_target_bitrate
- 1, // ts_number_layers
- {0}, // ts_target_bitrate
- {0}, // ts_rate_decimator
- 0, // ts_periodicity
- {0}, // ts_layer_id
- {0}, // layer_taget_bitrate
- 0 // temporal_layering_mode
- }
- },
+ { 0,
+ {
+ // NOLINT
+ 0, // g_usage
+ 8, // g_threads
+ 0, // g_profile
+
+ 320, // g_width
+ 240, // g_height
+ VPX_BITS_8, // g_bit_depth
+ 8, // g_input_bit_depth
+
+ { 1, 30 }, // g_timebase
+
+ 0, // g_error_resilient
+
+ VPX_RC_ONE_PASS, // g_pass
+
+ 25, // g_lag_in_frames
+
+ 0, // rc_dropframe_thresh
+ 0, // rc_resize_allowed
+ 0, // rc_scaled_width
+ 0, // rc_scaled_height
+ 60, // rc_resize_down_thresold
+ 30, // rc_resize_up_thresold
+
+ VPX_VBR, // rc_end_usage
+ { NULL, 0 }, // rc_twopass_stats_in
+ { NULL, 0 }, // rc_firstpass_mb_stats_in
+ 256, // rc_target_bandwidth
+ 0, // rc_min_quantizer
+ 63, // rc_max_quantizer
+ 25, // rc_undershoot_pct
+ 25, // rc_overshoot_pct
+
+ 6000, // rc_max_buffer_size
+ 4000, // rc_buffer_initial_size
+ 5000, // rc_buffer_optimal_size
+
+ 50, // rc_two_pass_vbrbias
+ 0, // rc_two_pass_vbrmin_section
+ 2000, // rc_two_pass_vbrmax_section
+
+ // keyframing settings (kf)
+ VPX_KF_AUTO, // g_kfmode
+ 0, // kf_min_dist
+ 128, // kf_max_dist
+
+ VPX_SS_DEFAULT_LAYERS, // ss_number_layers
+ { 0 },
+ { 0 }, // ss_target_bitrate
+ 1, // ts_number_layers
+ { 0 }, // ts_target_bitrate
+ { 0 }, // ts_rate_decimator
+ 0, // ts_periodicity
+ { 0 }, // ts_layer_id
+ { 0 }, // layer_taget_bitrate
+ 0 // temporal_layering_mode
+ } },
};
#ifndef VERSION_STRING
@@ -1623,25 +1595,27 @@ CODEC_INTERFACE(vpx_codec_vp9_cx) = {
#if CONFIG_VP9_HIGHBITDEPTH
VPX_CODEC_CAP_HIGHBITDEPTH |
#endif
- VPX_CODEC_CAP_ENCODER | VPX_CODEC_CAP_PSNR, // vpx_codec_caps_t
- encoder_init, // vpx_codec_init_fn_t
- encoder_destroy, // vpx_codec_destroy_fn_t
- encoder_ctrl_maps, // vpx_codec_ctrl_fn_map_t
- { // NOLINT
- NULL, // vpx_codec_peek_si_fn_t
- NULL, // vpx_codec_get_si_fn_t
- NULL, // vpx_codec_decode_fn_t
- NULL, // vpx_codec_frame_get_fn_t
- NULL // vpx_codec_set_fb_fn_t
+ VPX_CODEC_CAP_ENCODER | VPX_CODEC_CAP_PSNR, // vpx_codec_caps_t
+ encoder_init, // vpx_codec_init_fn_t
+ encoder_destroy, // vpx_codec_destroy_fn_t
+ encoder_ctrl_maps, // vpx_codec_ctrl_fn_map_t
+ {
+ // NOLINT
+ NULL, // vpx_codec_peek_si_fn_t
+ NULL, // vpx_codec_get_si_fn_t
+ NULL, // vpx_codec_decode_fn_t
+ NULL, // vpx_codec_frame_get_fn_t
+ NULL // vpx_codec_set_fb_fn_t
},
- { // NOLINT
- 1, // 1 cfg map
- encoder_usage_cfg_map, // vpx_codec_enc_cfg_map_t
- encoder_encode, // vpx_codec_encode_fn_t
- encoder_get_cxdata, // vpx_codec_get_cx_data_fn_t
- encoder_set_config, // vpx_codec_enc_config_set_fn_t
- NULL, // vpx_codec_get_global_headers_fn_t
- encoder_get_preview, // vpx_codec_get_preview_frame_fn_t
- NULL // vpx_codec_enc_mr_get_mem_loc_fn_t
+ {
+ // NOLINT
+ 1, // 1 cfg map
+ encoder_usage_cfg_map, // vpx_codec_enc_cfg_map_t
+ encoder_encode, // vpx_codec_encode_fn_t
+ encoder_get_cxdata, // vpx_codec_get_cx_data_fn_t
+ encoder_set_config, // vpx_codec_enc_config_set_fn_t
+ NULL, // vpx_codec_get_global_headers_fn_t
+ encoder_get_preview, // vpx_codec_get_preview_frame_fn_t
+ NULL // vpx_codec_enc_mr_get_mem_loc_fn_t
}
};
diff --git a/vp9/vp9_dx_iface.c b/vp9/vp9_dx_iface.c
index 841187de6..04b1dca29 100644
--- a/vp9/vp9_dx_iface.c
+++ b/vp9/vp9_dx_iface.c
@@ -41,8 +41,7 @@ static vpx_codec_err_t decoder_init(vpx_codec_ctx_t *ctx,
if (!ctx->priv) {
vpx_codec_alg_priv_t *const priv =
(vpx_codec_alg_priv_t *)vpx_calloc(1, sizeof(*priv));
- if (priv == NULL)
- return VPX_CODEC_MEM_ERROR;
+ if (priv == NULL) return VPX_CODEC_MEM_ERROR;
ctx->priv = (vpx_codec_priv_t *)priv;
ctx->priv->init_flags = ctx->init_flags;
@@ -51,7 +50,9 @@ static vpx_codec_err_t decoder_init(vpx_codec_ctx_t *ctx,
// Only do frame parallel decode when threads > 1.
priv->frame_parallel_decode =
(ctx->config.dec && (ctx->config.dec->threads > 1) &&
- (ctx->init_flags & VPX_CODEC_USE_FRAME_THREADING)) ? 1 : 0;
+ (ctx->init_flags & VPX_CODEC_USE_FRAME_THREADING))
+ ? 1
+ : 0;
if (ctx->config.dec) {
priv->cfg = *ctx->config.dec;
ctx->config.dec = &priv->cfg;
@@ -102,11 +103,10 @@ static vpx_codec_err_t decoder_destroy(vpx_codec_alg_priv_t *ctx) {
return VPX_CODEC_OK;
}
-static int parse_bitdepth_colorspace_sampling(
- BITSTREAM_PROFILE profile, struct vpx_read_bit_buffer *rb) {
+static int parse_bitdepth_colorspace_sampling(BITSTREAM_PROFILE profile,
+ struct vpx_read_bit_buffer *rb) {
vpx_color_space_t color_space;
- if (profile >= PROFILE_2)
- rb->bit_offset += 1; // Bit-depth 10 or 12.
+ if (profile >= PROFILE_2) rb->bit_offset += 1; // Bit-depth 10 or 12.
color_space = (vpx_color_space_t)vpx_rb_read_literal(rb, 3);
if (color_space != VPX_CS_SRGB) {
rb->bit_offset += 1; // [16,235] (including xvycc) vs [0,255] range.
@@ -125,17 +125,13 @@ static int parse_bitdepth_colorspace_sampling(
return 1;
}
-static vpx_codec_err_t decoder_peek_si_internal(const uint8_t *data,
- unsigned int data_sz,
- vpx_codec_stream_info_t *si,
- int *is_intra_only,
- vpx_decrypt_cb decrypt_cb,
- void *decrypt_state) {
+static vpx_codec_err_t decoder_peek_si_internal(
+ const uint8_t *data, unsigned int data_sz, vpx_codec_stream_info_t *si,
+ int *is_intra_only, vpx_decrypt_cb decrypt_cb, void *decrypt_state) {
int intra_only_flag = 0;
uint8_t clear_buffer[10];
- if (data + data_sz <= data)
- return VPX_CODEC_INVALID_PARAM;
+ if (data + data_sz <= data) return VPX_CODEC_INVALID_PARAM;
si->is_kf = 0;
si->w = si->h = 0;
@@ -148,8 +144,7 @@ static vpx_codec_err_t decoder_peek_si_internal(const uint8_t *data,
// A maximum of 6 bits are needed to read the frame marker, profile and
// show_existing_frame.
- if (data_sz < 1)
- return VPX_CODEC_UNSUP_BITSTREAM;
+ if (data_sz < 1) return VPX_CODEC_UNSUP_BITSTREAM;
{
int show_frame;
@@ -158,17 +153,14 @@ static vpx_codec_err_t decoder_peek_si_internal(const uint8_t *data,
const int frame_marker = vpx_rb_read_literal(&rb, 2);
const BITSTREAM_PROFILE profile = vp9_read_profile(&rb);
- if (frame_marker != VP9_FRAME_MARKER)
- return VPX_CODEC_UNSUP_BITSTREAM;
+ if (frame_marker != VP9_FRAME_MARKER) return VPX_CODEC_UNSUP_BITSTREAM;
- if (profile >= MAX_PROFILES)
- return VPX_CODEC_UNSUP_BITSTREAM;
+ if (profile >= MAX_PROFILES) return VPX_CODEC_UNSUP_BITSTREAM;
if (vpx_rb_read_bit(&rb)) { // show an existing frame
// If profile is > 2 and show_existing_frame is true, then at least 1 more
// byte (6+3=9 bits) is needed.
- if (profile > 2 && data_sz < 2)
- return VPX_CODEC_UNSUP_BITSTREAM;
+ if (profile > 2 && data_sz < 2) return VPX_CODEC_UNSUP_BITSTREAM;
vpx_rb_read_literal(&rb, 3); // Frame buffer to show.
return VPX_CODEC_OK;
}
@@ -176,16 +168,14 @@ static vpx_codec_err_t decoder_peek_si_internal(const uint8_t *data,
// For the rest of the function, a maximum of 9 more bytes are needed
// (computed by taking the maximum possible bits needed in each case). Note
// that this has to be updated if we read any more bits in this function.
- if (data_sz < 10)
- return VPX_CODEC_UNSUP_BITSTREAM;
+ if (data_sz < 10) return VPX_CODEC_UNSUP_BITSTREAM;
si->is_kf = !vpx_rb_read_bit(&rb);
show_frame = vpx_rb_read_bit(&rb);
error_resilient = vpx_rb_read_bit(&rb);
if (si->is_kf) {
- if (!vp9_read_sync_code(&rb))
- return VPX_CODEC_UNSUP_BITSTREAM;
+ if (!vp9_read_sync_code(&rb)) return VPX_CODEC_UNSUP_BITSTREAM;
if (!parse_bitdepth_colorspace_sampling(profile, &rb))
return VPX_CODEC_UNSUP_BITSTREAM;
@@ -196,8 +186,7 @@ static vpx_codec_err_t decoder_peek_si_internal(const uint8_t *data,
rb.bit_offset += error_resilient ? 0 : 2; // reset_frame_context
if (intra_only_flag) {
- if (!vp9_read_sync_code(&rb))
- return VPX_CODEC_UNSUP_BITSTREAM;
+ if (!vp9_read_sync_code(&rb)) return VPX_CODEC_UNSUP_BITSTREAM;
if (profile > PROFILE_0) {
if (!parse_bitdepth_colorspace_sampling(profile, &rb))
return VPX_CODEC_UNSUP_BITSTREAM;
@@ -207,8 +196,7 @@ static vpx_codec_err_t decoder_peek_si_internal(const uint8_t *data,
}
}
}
- if (is_intra_only != NULL)
- *is_intra_only = intra_only_flag;
+ if (is_intra_only != NULL) *is_intra_only = intra_only_flag;
return VPX_CODEC_OK;
}
@@ -221,8 +209,8 @@ static vpx_codec_err_t decoder_peek_si(const uint8_t *data,
static vpx_codec_err_t decoder_get_si(vpx_codec_alg_priv_t *ctx,
vpx_codec_stream_info_t *si) {
const size_t sz = (si->sz >= sizeof(vp9_stream_info_t))
- ? sizeof(vp9_stream_info_t)
- : sizeof(vpx_codec_stream_info_t);
+ ? sizeof(vp9_stream_info_t)
+ : sizeof(vpx_codec_stream_info_t);
memcpy(si, &ctx->si, sz);
si->sz = (unsigned int)sz;
@@ -234,8 +222,8 @@ static void set_error_detail(vpx_codec_alg_priv_t *ctx,
ctx->base.err_detail = error;
}
-static vpx_codec_err_t update_error_state(vpx_codec_alg_priv_t *ctx,
- const struct vpx_internal_error_info *error) {
+static vpx_codec_err_t update_error_state(
+ vpx_codec_alg_priv_t *ctx, const struct vpx_internal_error_info *error) {
if (error->error_code)
set_error_detail(ctx, error->has_detail ? error->detail : NULL);
@@ -278,10 +266,8 @@ static void set_default_ppflags(vp8_postproc_cfg_t *cfg) {
cfg->noise_level = 0;
}
-static void set_ppflags(const vpx_codec_alg_priv_t *ctx,
- vp9_ppflags_t *flags) {
- flags->post_proc_flag =
- ctx->postproc_cfg.post_proc_flag;
+static void set_ppflags(const vpx_codec_alg_priv_t *ctx, vp9_ppflags_t *flags) {
+ flags->post_proc_flag = ctx->postproc_cfg.post_proc_flag;
flags->deblocking_level = ctx->postproc_cfg.deblocking_level;
flags->noise_level = ctx->postproc_cfg.noise_level;
@@ -292,10 +278,8 @@ static int frame_worker_hook(void *arg1, void *arg2) {
const uint8_t *data = frame_worker_data->data;
(void)arg2;
- frame_worker_data->result =
- vp9_receive_compressed_data(frame_worker_data->pbi,
- frame_worker_data->data_size,
- &data);
+ frame_worker_data->result = vp9_receive_compressed_data(
+ frame_worker_data->pbi, frame_worker_data->data_size, &data);
frame_worker_data->data_end = data;
if (frame_worker_data->pbi->frame_parallel_decode) {
@@ -337,25 +321,24 @@ static vpx_codec_err_t init_decoder(vpx_codec_alg_priv_t *ctx) {
ctx->num_cache_frames = 0;
ctx->need_resync = 1;
ctx->num_frame_workers =
- (ctx->frame_parallel_decode == 1) ? ctx->cfg.threads: 1;
+ (ctx->frame_parallel_decode == 1) ? ctx->cfg.threads : 1;
if (ctx->num_frame_workers > MAX_DECODE_THREADS)
ctx->num_frame_workers = MAX_DECODE_THREADS;
ctx->available_threads = ctx->num_frame_workers;
ctx->flushed = 0;
ctx->buffer_pool = (BufferPool *)vpx_calloc(1, sizeof(BufferPool));
- if (ctx->buffer_pool == NULL)
- return VPX_CODEC_MEM_ERROR;
+ if (ctx->buffer_pool == NULL) return VPX_CODEC_MEM_ERROR;
#if CONFIG_MULTITHREAD
- if (pthread_mutex_init(&ctx->buffer_pool->pool_mutex, NULL)) {
- set_error_detail(ctx, "Failed to allocate buffer pool mutex");
- return VPX_CODEC_MEM_ERROR;
- }
+ if (pthread_mutex_init(&ctx->buffer_pool->pool_mutex, NULL)) {
+ set_error_detail(ctx, "Failed to allocate buffer pool mutex");
+ return VPX_CODEC_MEM_ERROR;
+ }
#endif
- ctx->frame_workers = (VPxWorker *)
- vpx_malloc(ctx->num_frame_workers * sizeof(*ctx->frame_workers));
+ ctx->frame_workers = (VPxWorker *)vpx_malloc(ctx->num_frame_workers *
+ sizeof(*ctx->frame_workers));
if (ctx->frame_workers == NULL) {
set_error_detail(ctx, "Failed to allocate frame_workers");
return VPX_CODEC_MEM_ERROR;
@@ -411,8 +394,7 @@ static vpx_codec_err_t init_decoder(vpx_codec_alg_priv_t *ctx) {
// If postprocessing was enabled by the application and a
// configuration has not been provided, default it.
- if (!ctx->postproc_cfg_set &&
- (ctx->base.init_flags & VPX_CODEC_USE_POSTPROC))
+ if (!ctx->postproc_cfg_set && (ctx->base.init_flags & VPX_CODEC_USE_POSTPROC))
set_default_ppflags(&ctx->postproc_cfg);
init_buffer_callbacks(ctx);
@@ -442,11 +424,9 @@ static vpx_codec_err_t decode_one(vpx_codec_alg_priv_t *ctx,
const vpx_codec_err_t res =
decoder_peek_si_internal(*data, data_sz, &ctx->si, &is_intra_only,
ctx->decrypt_cb, ctx->decrypt_state);
- if (res != VPX_CODEC_OK)
- return res;
+ if (res != VPX_CODEC_OK) return res;
- if (!ctx->si.is_kf && !is_intra_only)
- return VPX_CODEC_ERROR;
+ if (!ctx->si.is_kf && !is_intra_only) return VPX_CODEC_ERROR;
}
if (!ctx->frame_parallel_decode) {
@@ -520,7 +500,7 @@ static vpx_codec_err_t decode_one(vpx_codec_alg_priv_t *ctx,
static void wait_worker_and_cache_frame(vpx_codec_alg_priv_t *ctx) {
YV12_BUFFER_CONFIG sd;
- vp9_ppflags_t flags = {0, 0, 0};
+ vp9_ppflags_t flags = { 0, 0, 0 };
const VPxWorkerInterface *const winterface = vpx_get_worker_interface();
VPxWorker *const worker = &ctx->frame_workers[ctx->next_output_worker_id];
FrameWorkerData *const frame_worker_data = (FrameWorkerData *)worker->data1;
@@ -541,8 +521,7 @@ static void wait_worker_and_cache_frame(vpx_codec_alg_priv_t *ctx) {
frame_worker_data->user_priv);
ctx->frame_cache[ctx->frame_cache_write].img.fb_priv =
frame_bufs[cm->new_fb_idx].raw_frame_buffer.priv;
- ctx->frame_cache_write =
- (ctx->frame_cache_write + 1) % FRAME_CACHE_SIZE;
+ ctx->frame_cache_write = (ctx->frame_cache_write + 1) % FRAME_CACHE_SIZE;
++ctx->num_cache_frames;
}
}
@@ -551,7 +530,7 @@ static vpx_codec_err_t decoder_decode(vpx_codec_alg_priv_t *ctx,
const uint8_t *data, unsigned int data_sz,
void *user_priv, long deadline) {
const uint8_t *data_start = data;
- const uint8_t * const data_end = data + data_sz;
+ const uint8_t *const data_end = data + data_sz;
vpx_codec_err_t res;
uint32_t frame_sizes[8];
int frame_count;
@@ -567,14 +546,12 @@ static vpx_codec_err_t decoder_decode(vpx_codec_alg_priv_t *ctx,
// Initialize the decoder workers on the first frame.
if (ctx->frame_workers == NULL) {
const vpx_codec_err_t res = init_decoder(ctx);
- if (res != VPX_CODEC_OK)
- return res;
+ if (res != VPX_CODEC_OK) return res;
}
res = vp9_parse_superframe_index(data, data_sz, frame_sizes, &frame_count,
ctx->decrypt_cb, ctx->decrypt_state);
- if (res != VPX_CODEC_OK)
- return res;
+ if (res != VPX_CODEC_OK) return res;
if (ctx->frame_parallel_decode) {
// Decode in frame parallel mode. When decoding in this mode, the frame
@@ -587,8 +564,8 @@ static vpx_codec_err_t decoder_decode(vpx_codec_alg_priv_t *ctx,
for (i = 0; i < frame_count; ++i) {
const uint8_t *data_start_copy = data_start;
const uint32_t frame_size = frame_sizes[i];
- if (data_start < data
- || frame_size > (uint32_t) (data_end - data_start)) {
+ if (data_start < data ||
+ frame_size > (uint32_t)(data_end - data_start)) {
set_error_detail(ctx, "Invalid frame size in index");
return VPX_CODEC_CORRUPT_FRAME;
}
@@ -605,10 +582,9 @@ static vpx_codec_err_t decoder_decode(vpx_codec_alg_priv_t *ctx,
}
}
- res = decode_one(ctx, &data_start_copy, frame_size, user_priv,
- deadline);
- if (res != VPX_CODEC_OK)
- return res;
+ res =
+ decode_one(ctx, &data_start_copy, frame_size, user_priv, deadline);
+ if (res != VPX_CODEC_OK) return res;
data_start += frame_size;
}
} else {
@@ -625,8 +601,7 @@ static vpx_codec_err_t decoder_decode(vpx_codec_alg_priv_t *ctx,
}
res = decode_one(ctx, &data, data_sz, user_priv, deadline);
- if (res != VPX_CODEC_OK)
- return res;
+ if (res != VPX_CODEC_OK) return res;
}
} else {
// Decode in serial mode.
@@ -637,33 +612,30 @@ static vpx_codec_err_t decoder_decode(vpx_codec_alg_priv_t *ctx,
const uint8_t *data_start_copy = data_start;
const uint32_t frame_size = frame_sizes[i];
vpx_codec_err_t res;
- if (data_start < data
- || frame_size > (uint32_t) (data_end - data_start)) {
+ if (data_start < data ||
+ frame_size > (uint32_t)(data_end - data_start)) {
set_error_detail(ctx, "Invalid frame size in index");
return VPX_CODEC_CORRUPT_FRAME;
}
- res = decode_one(ctx, &data_start_copy, frame_size, user_priv,
- deadline);
- if (res != VPX_CODEC_OK)
- return res;
+ res =
+ decode_one(ctx, &data_start_copy, frame_size, user_priv, deadline);
+ if (res != VPX_CODEC_OK) return res;
data_start += frame_size;
}
} else {
while (data_start < data_end) {
- const uint32_t frame_size = (uint32_t) (data_end - data_start);
- const vpx_codec_err_t res = decode_one(ctx, &data_start, frame_size,
- user_priv, deadline);
- if (res != VPX_CODEC_OK)
- return res;
+ const uint32_t frame_size = (uint32_t)(data_end - data_start);
+ const vpx_codec_err_t res =
+ decode_one(ctx, &data_start, frame_size, user_priv, deadline);
+ if (res != VPX_CODEC_OK) return res;
// Account for suboptimal termination by the encoder.
while (data_start < data_end) {
- const uint8_t marker = read_marker(ctx->decrypt_cb,
- ctx->decrypt_state, data_start);
- if (marker)
- break;
+ const uint8_t marker =
+ read_marker(ctx->decrypt_cb, ctx->decrypt_state, data_start);
+ if (marker) break;
++data_start;
}
}
@@ -698,9 +670,8 @@ static vpx_image_t *decoder_get_frame(vpx_codec_alg_priv_t *ctx,
// Output the frames in the cache first.
if (ctx->num_cache_frames > 0) {
release_last_output_frame(ctx);
- ctx->last_show_frame = ctx->frame_cache[ctx->frame_cache_read].fb_idx;
- if (ctx->need_resync)
- return NULL;
+ ctx->last_show_frame = ctx->frame_cache[ctx->frame_cache_read].fb_idx;
+ if (ctx->need_resync) return NULL;
img = &ctx->frame_cache[ctx->frame_cache_read].img;
ctx->frame_cache_read = (ctx->frame_cache_read + 1) % FRAME_CACHE_SIZE;
--ctx->num_cache_frames;
@@ -712,10 +683,9 @@ static vpx_image_t *decoder_get_frame(vpx_codec_alg_priv_t *ctx,
if (*iter == NULL && ctx->frame_workers != NULL) {
do {
YV12_BUFFER_CONFIG sd;
- vp9_ppflags_t flags = {0, 0, 0};
+ vp9_ppflags_t flags = { 0, 0, 0 };
const VPxWorkerInterface *const winterface = vpx_get_worker_interface();
- VPxWorker *const worker =
- &ctx->frame_workers[ctx->next_output_worker_id];
+ VPxWorker *const worker = &ctx->frame_workers[ctx->next_output_worker_id];
FrameWorkerData *const frame_worker_data =
(FrameWorkerData *)worker->data1;
ctx->next_output_worker_id =
@@ -735,8 +705,7 @@ static vpx_image_t *decoder_get_frame(vpx_codec_alg_priv_t *ctx,
RefCntBuffer *const frame_bufs = cm->buffer_pool->frame_bufs;
release_last_output_frame(ctx);
ctx->last_show_frame = frame_worker_data->pbi->common.new_fb_idx;
- if (ctx->need_resync)
- return NULL;
+ if (ctx->need_resync) return NULL;
yuvconfig2image(&ctx->img, &sd, frame_worker_data->user_priv);
ctx->img.fb_priv = frame_bufs[cm->new_fb_idx].raw_frame_buffer.priv;
img = &ctx->img;
@@ -747,8 +716,7 @@ static vpx_image_t *decoder_get_frame(vpx_codec_alg_priv_t *ctx,
frame_worker_data->received_frame = 0;
++ctx->available_threads;
ctx->need_resync = 1;
- if (ctx->flushed != 1)
- return NULL;
+ if (ctx->flushed != 1) return NULL;
}
} while (ctx->next_output_worker_id != ctx->next_submit_worker_id);
}
@@ -756,8 +724,7 @@ static vpx_image_t *decoder_get_frame(vpx_codec_alg_priv_t *ctx,
}
static vpx_codec_err_t decoder_set_fb_fn(
- vpx_codec_alg_priv_t *ctx,
- vpx_get_frame_buffer_cb_fn_t cb_get,
+ vpx_codec_alg_priv_t *ctx, vpx_get_frame_buffer_cb_fn_t cb_get,
vpx_release_frame_buffer_cb_fn_t cb_release, void *cb_priv) {
if (cb_get == NULL || cb_release == NULL) {
return VPX_CODEC_INVALID_PARAM;
@@ -808,7 +775,7 @@ static vpx_codec_err_t ctrl_copy_reference(vpx_codec_alg_priv_t *ctx,
}
if (data) {
- vpx_ref_frame_t *frame = (vpx_ref_frame_t *) data;
+ vpx_ref_frame_t *frame = (vpx_ref_frame_t *)data;
YV12_BUFFER_CONFIG sd;
VPxWorker *const worker = ctx->frame_workers;
FrameWorkerData *const frame_worker_data = (FrameWorkerData *)worker->data1;
@@ -831,7 +798,7 @@ static vpx_codec_err_t ctrl_get_reference(vpx_codec_alg_priv_t *ctx,
}
if (data) {
- YV12_BUFFER_CONFIG* fb;
+ YV12_BUFFER_CONFIG *fb;
VPxWorker *const worker = ctx->frame_workers;
FrameWorkerData *const frame_worker_data = (FrameWorkerData *)worker->data1;
fb = get_ref_frame(&frame_worker_data->pbi->common, data->idx);
@@ -1022,8 +989,7 @@ static vpx_codec_err_t ctrl_set_byte_alignment(vpx_codec_alg_priv_t *ctx,
ctx->byte_alignment = byte_alignment;
if (ctx->frame_workers) {
VPxWorker *const worker = ctx->frame_workers;
- FrameWorkerData *const frame_worker_data =
- (FrameWorkerData *)worker->data1;
+ FrameWorkerData *const frame_worker_data = (FrameWorkerData *)worker->data1;
frame_worker_data->pbi->common.byte_alignment = byte_alignment;
}
return VPX_CODEC_OK;
@@ -1043,29 +1009,29 @@ static vpx_codec_err_t ctrl_set_skip_loop_filter(vpx_codec_alg_priv_t *ctx,
}
static vpx_codec_ctrl_fn_map_t decoder_ctrl_maps[] = {
- {VP8_COPY_REFERENCE, ctrl_copy_reference},
+ { VP8_COPY_REFERENCE, ctrl_copy_reference },
// Setters
- {VP8_SET_REFERENCE, ctrl_set_reference},
- {VP8_SET_POSTPROC, ctrl_set_postproc},
- {VP8_SET_DBG_COLOR_REF_FRAME, ctrl_set_dbg_options},
- {VP8_SET_DBG_COLOR_MB_MODES, ctrl_set_dbg_options},
- {VP8_SET_DBG_COLOR_B_MODES, ctrl_set_dbg_options},
- {VP8_SET_DBG_DISPLAY_MV, ctrl_set_dbg_options},
- {VP9_INVERT_TILE_DECODE_ORDER, ctrl_set_invert_tile_order},
- {VPXD_SET_DECRYPTOR, ctrl_set_decryptor},
- {VP9_SET_BYTE_ALIGNMENT, ctrl_set_byte_alignment},
- {VP9_SET_SKIP_LOOP_FILTER, ctrl_set_skip_loop_filter},
+ { VP8_SET_REFERENCE, ctrl_set_reference },
+ { VP8_SET_POSTPROC, ctrl_set_postproc },
+ { VP8_SET_DBG_COLOR_REF_FRAME, ctrl_set_dbg_options },
+ { VP8_SET_DBG_COLOR_MB_MODES, ctrl_set_dbg_options },
+ { VP8_SET_DBG_COLOR_B_MODES, ctrl_set_dbg_options },
+ { VP8_SET_DBG_DISPLAY_MV, ctrl_set_dbg_options },
+ { VP9_INVERT_TILE_DECODE_ORDER, ctrl_set_invert_tile_order },
+ { VPXD_SET_DECRYPTOR, ctrl_set_decryptor },
+ { VP9_SET_BYTE_ALIGNMENT, ctrl_set_byte_alignment },
+ { VP9_SET_SKIP_LOOP_FILTER, ctrl_set_skip_loop_filter },
// Getters
- {VP8D_GET_LAST_REF_UPDATES, ctrl_get_last_ref_updates},
- {VP8D_GET_FRAME_CORRUPTED, ctrl_get_frame_corrupted},
- {VP9_GET_REFERENCE, ctrl_get_reference},
- {VP9D_GET_DISPLAY_SIZE, ctrl_get_render_size},
- {VP9D_GET_BIT_DEPTH, ctrl_get_bit_depth},
- {VP9D_GET_FRAME_SIZE, ctrl_get_frame_size},
-
- { -1, NULL},
+ { VP8D_GET_LAST_REF_UPDATES, ctrl_get_last_ref_updates },
+ { VP8D_GET_FRAME_CORRUPTED, ctrl_get_frame_corrupted },
+ { VP9_GET_REFERENCE, ctrl_get_reference },
+ { VP9D_GET_DISPLAY_SIZE, ctrl_get_render_size },
+ { VP9D_GET_BIT_DEPTH, ctrl_get_bit_depth },
+ { VP9D_GET_FRAME_SIZE, ctrl_get_frame_size },
+
+ { -1, NULL },
};
#ifndef VERSION_STRING
@@ -1076,24 +1042,26 @@ CODEC_INTERFACE(vpx_codec_vp9_dx) = {
VPX_CODEC_INTERNAL_ABI_VERSION,
VPX_CODEC_CAP_DECODER | VP9_CAP_POSTPROC |
VPX_CODEC_CAP_EXTERNAL_FRAME_BUFFER, // vpx_codec_caps_t
- decoder_init, // vpx_codec_init_fn_t
- decoder_destroy, // vpx_codec_destroy_fn_t
- decoder_ctrl_maps, // vpx_codec_ctrl_fn_map_t
- { // NOLINT
- decoder_peek_si, // vpx_codec_peek_si_fn_t
- decoder_get_si, // vpx_codec_get_si_fn_t
- decoder_decode, // vpx_codec_decode_fn_t
- decoder_get_frame, // vpx_codec_frame_get_fn_t
- decoder_set_fb_fn, // vpx_codec_set_fb_fn_t
+ decoder_init, // vpx_codec_init_fn_t
+ decoder_destroy, // vpx_codec_destroy_fn_t
+ decoder_ctrl_maps, // vpx_codec_ctrl_fn_map_t
+ {
+ // NOLINT
+ decoder_peek_si, // vpx_codec_peek_si_fn_t
+ decoder_get_si, // vpx_codec_get_si_fn_t
+ decoder_decode, // vpx_codec_decode_fn_t
+ decoder_get_frame, // vpx_codec_frame_get_fn_t
+ decoder_set_fb_fn, // vpx_codec_set_fb_fn_t
},
- { // NOLINT
- 0,
- NULL, // vpx_codec_enc_cfg_map_t
- NULL, // vpx_codec_encode_fn_t
- NULL, // vpx_codec_get_cx_data_fn_t
- NULL, // vpx_codec_enc_config_set_fn_t
- NULL, // vpx_codec_get_global_headers_fn_t
- NULL, // vpx_codec_get_preview_frame_fn_t
- NULL // vpx_codec_enc_mr_get_mem_loc_fn_t
+ {
+ // NOLINT
+ 0,
+ NULL, // vpx_codec_enc_cfg_map_t
+ NULL, // vpx_codec_encode_fn_t
+ NULL, // vpx_codec_get_cx_data_fn_t
+ NULL, // vpx_codec_enc_config_set_fn_t
+ NULL, // vpx_codec_get_global_headers_fn_t
+ NULL, // vpx_codec_get_preview_frame_fn_t
+ NULL // vpx_codec_enc_mr_get_mem_loc_fn_t
}
};
diff --git a/vp9/vp9_dx_iface.h b/vp9/vp9_dx_iface.h
index e0e948e16..cc3d51842 100644
--- a/vp9/vp9_dx_iface.h
+++ b/vp9/vp9_dx_iface.h
@@ -17,7 +17,7 @@ typedef vpx_codec_stream_info_t vp9_stream_info_t;
// This limit is due to framebuffer numbers.
// TODO(hkuang): Remove this limit after implementing ondemand framebuffers.
-#define FRAME_CACHE_SIZE 6 // Cache maximum 6 decoded frames.
+#define FRAME_CACHE_SIZE 6 // Cache maximum 6 decoded frames.
typedef struct cache_frame {
int fb_idx;
@@ -25,36 +25,36 @@ typedef struct cache_frame {
} cache_frame;
struct vpx_codec_alg_priv {
- vpx_codec_priv_t base;
- vpx_codec_dec_cfg_t cfg;
- vp9_stream_info_t si;
- int postproc_cfg_set;
- vp8_postproc_cfg_t postproc_cfg;
- vpx_decrypt_cb decrypt_cb;
- void *decrypt_state;
- vpx_image_t img;
- int img_avail;
- int flushed;
- int invert_tile_order;
- int last_show_frame; // Index of last output frame.
- int byte_alignment;
- int skip_loop_filter;
+ vpx_codec_priv_t base;
+ vpx_codec_dec_cfg_t cfg;
+ vp9_stream_info_t si;
+ int postproc_cfg_set;
+ vp8_postproc_cfg_t postproc_cfg;
+ vpx_decrypt_cb decrypt_cb;
+ void *decrypt_state;
+ vpx_image_t img;
+ int img_avail;
+ int flushed;
+ int invert_tile_order;
+ int last_show_frame; // Index of last output frame.
+ int byte_alignment;
+ int skip_loop_filter;
// Frame parallel related.
- int frame_parallel_decode; // frame-based threading.
- VPxWorker *frame_workers;
- int num_frame_workers;
- int next_submit_worker_id;
- int last_submit_worker_id;
- int next_output_worker_id;
- int available_threads;
- cache_frame frame_cache[FRAME_CACHE_SIZE];
- int frame_cache_write;
- int frame_cache_read;
- int num_cache_frames;
- int need_resync; // wait for key/intra-only frame
+ int frame_parallel_decode; // frame-based threading.
+ VPxWorker *frame_workers;
+ int num_frame_workers;
+ int next_submit_worker_id;
+ int last_submit_worker_id;
+ int next_output_worker_id;
+ int available_threads;
+ cache_frame frame_cache[FRAME_CACHE_SIZE];
+ int frame_cache_write;
+ int frame_cache_read;
+ int num_cache_frames;
+ int need_resync; // wait for key/intra-only frame
// BufferPool that holds all reference frames. Shared by all the FrameWorkers.
- BufferPool *buffer_pool;
+ BufferPool *buffer_pool;
// External frame buffer info to save for VP9 common.
void *ext_priv; // Private data associated with the external frame buffers.
diff --git a/vp9/vp9_iface_common.h b/vp9/vp9_iface_common.h
index 44a5e8157..d68872750 100644
--- a/vp9/vp9_iface_common.h
+++ b/vp9/vp9_iface_common.h
@@ -12,7 +12,7 @@
#include "vpx_ports/mem.h"
-static void yuvconfig2image(vpx_image_t *img, const YV12_BUFFER_CONFIG *yv12,
+static void yuvconfig2image(vpx_image_t *img, const YV12_BUFFER_CONFIG *yv12,
void *user_priv) {
/** vpx_img_wrap() doesn't allow specifying independent strides for
* the Y, U, and V planes, nor other alignment adjustments that
@@ -61,9 +61,9 @@ static void yuvconfig2image(vpx_image_t *img, const YV12_BUFFER_CONFIG *yv12,
// of the image.
img->fmt = (vpx_img_fmt_t)(img->fmt | VPX_IMG_FMT_HIGHBITDEPTH);
img->bit_depth = yv12->bit_depth;
- img->planes[VPX_PLANE_Y] = (uint8_t*)CONVERT_TO_SHORTPTR(yv12->y_buffer);
- img->planes[VPX_PLANE_U] = (uint8_t*)CONVERT_TO_SHORTPTR(yv12->u_buffer);
- img->planes[VPX_PLANE_V] = (uint8_t*)CONVERT_TO_SHORTPTR(yv12->v_buffer);
+ img->planes[VPX_PLANE_Y] = (uint8_t *)CONVERT_TO_SHORTPTR(yv12->y_buffer);
+ img->planes[VPX_PLANE_U] = (uint8_t *)CONVERT_TO_SHORTPTR(yv12->u_buffer);
+ img->planes[VPX_PLANE_V] = (uint8_t *)CONVERT_TO_SHORTPTR(yv12->v_buffer);
img->planes[VPX_PLANE_ALPHA] = NULL;
img->stride[VPX_PLANE_Y] = 2 * yv12->y_stride;
img->stride[VPX_PLANE_U] = 2 * yv12->uv_stride;
@@ -84,17 +84,17 @@ static vpx_codec_err_t image2yuvconfig(const vpx_image_t *img,
yv12->u_buffer = img->planes[VPX_PLANE_U];
yv12->v_buffer = img->planes[VPX_PLANE_V];
- yv12->y_crop_width = img->d_w;
+ yv12->y_crop_width = img->d_w;
yv12->y_crop_height = img->d_h;
- yv12->render_width = img->r_w;
+ yv12->render_width = img->r_w;
yv12->render_height = img->r_h;
- yv12->y_width = img->d_w;
+ yv12->y_width = img->d_w;
yv12->y_height = img->d_h;
- yv12->uv_width = img->x_chroma_shift == 1 ? (1 + yv12->y_width) / 2
- : yv12->y_width;
- yv12->uv_height = img->y_chroma_shift == 1 ? (1 + yv12->y_height) / 2
- : yv12->y_height;
+ yv12->uv_width =
+ img->x_chroma_shift == 1 ? (1 + yv12->y_width) / 2 : yv12->y_width;
+ yv12->uv_height =
+ img->y_chroma_shift == 1 ? (1 + yv12->y_height) / 2 : yv12->y_height;
yv12->uv_crop_width = yv12->uv_width;
yv12->uv_crop_height = yv12->uv_height;
@@ -124,9 +124,9 @@ static vpx_codec_err_t image2yuvconfig(const vpx_image_t *img,
} else {
yv12->flags = 0;
}
- yv12->border = (yv12->y_stride - img->w) / 2;
+ yv12->border = (yv12->y_stride - img->w) / 2;
#else
- yv12->border = (img->stride[VPX_PLANE_Y] - img->w) / 2;
+ yv12->border = (img->stride[VPX_PLANE_Y] - img->w) / 2;
#endif // CONFIG_VP9_HIGHBITDEPTH
yv12->subsampling_x = img->x_chroma_shift;
yv12->subsampling_y = img->y_chroma_shift;
@@ -135,12 +135,9 @@ static vpx_codec_err_t image2yuvconfig(const vpx_image_t *img,
static VP9_REFFRAME ref_frame_to_vp9_reframe(vpx_ref_frame_type_t frame) {
switch (frame) {
- case VP8_LAST_FRAME:
- return VP9_LAST_FLAG;
- case VP8_GOLD_FRAME:
- return VP9_GOLD_FLAG;
- case VP8_ALTR_FRAME:
- return VP9_ALT_FLAG;
+ case VP8_LAST_FRAME: return VP9_LAST_FLAG;
+ case VP8_GOLD_FRAME: return VP9_GOLD_FLAG;
+ case VP8_ALTR_FRAME: return VP9_ALT_FLAG;
}
assert(0 && "Invalid Reference Frame");
return VP9_LAST_FLAG;