summaryrefslogtreecommitdiff
path: root/vp8/encoder
diff options
context:
space:
mode:
Diffstat (limited to 'vp8/encoder')
-rw-r--r--vp8/encoder/bitstream.c113
-rw-r--r--vp8/encoder/block.h20
-rw-r--r--vp8/encoder/boolhuff.h2
-rw-r--r--vp8/encoder/denoising.c74
-rw-r--r--vp8/encoder/denoising.h4
-rw-r--r--vp8/encoder/encodeframe.c250
-rw-r--r--vp8/encoder/encodemb.c12
-rw-r--r--vp8/encoder/encodemv.c77
-rw-r--r--vp8/encoder/ethreading.c77
-rw-r--r--vp8/encoder/firstpass.c1145
-rw-r--r--vp8/encoder/lookahead.c19
-rw-r--r--vp8/encoder/mcomp.c231
-rw-r--r--vp8/encoder/mcomp.h13
-rw-r--r--vp8/encoder/onyx_if.c1028
-rw-r--r--vp8/encoder/onyx_int.h136
-rw-r--r--vp8/encoder/pickinter.c59
-rw-r--r--vp8/encoder/picklpf.c84
-rw-r--r--vp8/encoder/psnr.c2
-rw-r--r--vp8/encoder/quantize.c111
-rw-r--r--vp8/encoder/ratectrl.c514
-rw-r--r--vp8/encoder/ratectrl.h2
-rw-r--r--vp8/encoder/rdopt.c299
-rw-r--r--vp8/encoder/segmentation.c20
-rw-r--r--vp8/encoder/temporal_filter.c108
-rw-r--r--vp8/encoder/x86/denoising_sse2.c31
25 files changed, 2350 insertions, 2081 deletions
diff --git a/vp8/encoder/bitstream.c b/vp8/encoder/bitstream.c
index d7cd5a9b0..92a7e067b 100644
--- a/vp8/encoder/bitstream.c
+++ b/vp8/encoder/bitstream.c
@@ -172,7 +172,7 @@ void vp8_pack_tokens_c(vp8_writer *w, const TOKENEXTRA *p, int xcount)
while (p < stop)
{
const int t = p->Token;
- const vp8_token *a = vp8_coef_encodings + t;
+ vp8_token *a = vp8_coef_encodings + t;
const vp8_extra_bit_struct *b = vp8_extra_bits + t;
int i = 0;
const unsigned char *pp = p->context_tree;
@@ -461,7 +461,7 @@ static void write_mv
static void write_mb_features(vp8_writer *w, const MB_MODE_INFO *mi, const MACROBLOCKD *x)
{
- // Encode the MB segment id.
+ /* Encode the MB segment id. */
if (x->segmentation_enabled && x->update_mb_segmentation_map)
{
switch (mi->segment_id)
@@ -483,7 +483,7 @@ static void write_mb_features(vp8_writer *w, const MB_MODE_INFO *mi, const MACRO
vp8_write(w, 1, x->mb_segment_tree_probs[2]);
break;
- // TRAP.. This should not happen
+ /* TRAP.. This should not happen */
default:
vp8_write(w, 0, x->mb_segment_tree_probs[0]);
vp8_write(w, 0, x->mb_segment_tree_probs[1]);
@@ -497,7 +497,7 @@ void vp8_convert_rfct_to_prob(VP8_COMP *const cpi)
const int rf_intra = rfct[INTRA_FRAME];
const int rf_inter = rfct[LAST_FRAME] + rfct[GOLDEN_FRAME] + rfct[ALTREF_FRAME];
- // Calculate the probabilities used to code the ref frame based on useage
+ /* Calculate the probabilities used to code the ref frame based on usage */
if (!(cpi->prob_intra_coded = rf_intra * 255 / (rf_intra + rf_inter)))
cpi->prob_intra_coded = 1;
@@ -571,8 +571,10 @@ static void pack_inter_mode_mvs(VP8_COMP *const cpi)
MACROBLOCKD *xd = &cpi->mb.e_mbd;
- // Distance of Mb to the various image edges.
- // These specified to 8th pel as they are always compared to MV values that are in 1/8th pel units
+ /* Distance of Mb to the various image edges.
+ * These specified to 8th pel as they are always compared to MV
+ * values that are in 1/8th pel units
+ */
xd->mb_to_left_edge = -((mb_col * 16) << 3);
xd->mb_to_right_edge = ((pc->mb_cols - 1 - mb_col) * 16) << 3;
xd->mb_to_top_edge = -((mb_row * 16)) << 3;
@@ -779,7 +781,7 @@ static void write_kfmodes(VP8_COMP *cpi)
write_uv_mode(bc, (m++)->mbmi.uv_mode, vp8_kf_uv_mode_prob);
}
- m++; // skip L prediction border
+ m++; /* skip L prediction border */
}
}
@@ -878,9 +880,6 @@ static int independent_coef_context_savings(VP8_COMP *cpi)
/* at every context */
/* calc probs and branch cts for this frame only */
- //vp8_prob new_p [ENTROPY_NODES];
- //unsigned int branch_ct [ENTROPY_NODES] [2];
-
int t = 0; /* token/prob index */
vp8_tree_probs_from_distribution(
@@ -940,9 +939,6 @@ static int default_coef_context_savings(VP8_COMP *cpi)
/* at every context */
/* calc probs and branch cts for this frame only */
- //vp8_prob new_p [ENTROPY_NODES];
- //unsigned int branch_ct [ENTROPY_NODES] [2];
-
int t = 0; /* token/prob index */
vp8_tree_probs_from_distribution(
@@ -1004,7 +1000,7 @@ int vp8_estimate_entropy_savings(VP8_COMP *cpi)
int new_intra, new_last, new_garf, oldtotal, newtotal;
int ref_frame_cost[MAX_REF_FRAMES];
- vp8_clear_system_state(); //__asm emms;
+ vp8_clear_system_state();
if (cpi->common.frame_type != KEY_FRAME)
{
@@ -1026,7 +1022,7 @@ int vp8_estimate_entropy_savings(VP8_COMP *cpi)
rfct[ALTREF_FRAME] * ref_frame_cost[ALTREF_FRAME];
- // old costs
+ /* old costs */
vp8_calc_ref_frame_costs(ref_frame_cost,cpi->prob_intra_coded,
cpi->prob_last_coded,cpi->prob_gf_coded);
@@ -1078,7 +1074,7 @@ void vp8_update_coef_probs(VP8_COMP *cpi)
#endif
int savings = 0;
- vp8_clear_system_state(); //__asm emms;
+ vp8_clear_system_state();
do
{
@@ -1110,21 +1106,15 @@ void vp8_update_coef_probs(VP8_COMP *cpi)
}
do
{
- //note: use result from vp8_estimate_entropy_savings, so no need to call vp8_tree_probs_from_distribution here.
+ /* note: use result from vp8_estimate_entropy_savings, so no
+ * need to call vp8_tree_probs_from_distribution here.
+ */
+
/* at every context */
/* calc probs and branch cts for this frame only */
- //vp8_prob new_p [ENTROPY_NODES];
- //unsigned int branch_ct [ENTROPY_NODES] [2];
-
int t = 0; /* token/prob index */
- //vp8_tree_probs_from_distribution(
- // MAX_ENTROPY_TOKENS, vp8_coef_encodings, vp8_coef_tree,
- // new_p, branch_ct, (unsigned int *)cpi->coef_counts [i][j][k],
- // 256, 1
- // );
-
do
{
const vp8_prob newp = cpi->frame_coef_probs [i][j][k][t];
@@ -1295,14 +1285,16 @@ void vp8_pack_bitstream(VP8_COMP *cpi, unsigned char *dest, unsigned char * dest
Sectionbits[active_section = 1] += sizeof(VP8_HEADER) * 8 * 256;
#endif
- // every keyframe send startcode, width, height, scale factor, clamp and color type
+ /* every keyframe send startcode, width, height, scale factor, clamp
+ * and color type
+ */
if (oh.type == KEY_FRAME)
{
int v;
validate_buffer(cx_data, 7, cx_data_end, &cpi->common.error);
- // Start / synch code
+ /* Start / synch code */
cx_data[0] = 0x9D;
cx_data[1] = 0x01;
cx_data[2] = 0x2a;
@@ -1321,7 +1313,7 @@ void vp8_pack_bitstream(VP8_COMP *cpi, unsigned char *dest, unsigned char * dest
vp8_start_encode(bc, cx_data, cx_data_end);
- // signal clr type
+ /* signal clr type */
vp8_write_bit(bc, pc->clr_type);
vp8_write_bit(bc, pc->clamp_type);
@@ -1330,13 +1322,13 @@ void vp8_pack_bitstream(VP8_COMP *cpi, unsigned char *dest, unsigned char * dest
vp8_start_encode(bc, cx_data, cx_data_end);
- // Signal whether or not Segmentation is enabled
+ /* Signal whether or not Segmentation is enabled */
vp8_write_bit(bc, xd->segmentation_enabled);
- // Indicate which features are enabled
+ /* Indicate which features are enabled */
if (xd->segmentation_enabled)
{
- // Signal whether or not the segmentation map is being updated.
+ /* Signal whether or not the segmentation map is being updated. */
vp8_write_bit(bc, xd->update_mb_segmentation_map);
vp8_write_bit(bc, xd->update_mb_segmentation_data);
@@ -1346,15 +1338,15 @@ void vp8_pack_bitstream(VP8_COMP *cpi, unsigned char *dest, unsigned char * dest
vp8_write_bit(bc, xd->mb_segement_abs_delta);
- // For each segmentation feature (Quant and loop filter level)
+ /* For each segmentation feature (Quant and loop filter level) */
for (i = 0; i < MB_LVL_MAX; i++)
{
- // For each of the segments
+ /* For each of the segments */
for (j = 0; j < MAX_MB_SEGMENTS; j++)
{
Data = xd->segment_feature_data[i][j];
- // Frame level data
+ /* Frame level data */
if (Data)
{
vp8_write_bit(bc, 1);
@@ -1379,7 +1371,7 @@ void vp8_pack_bitstream(VP8_COMP *cpi, unsigned char *dest, unsigned char * dest
if (xd->update_mb_segmentation_map)
{
- // Write the probs used to decode the segment id for each macro block.
+ /* Write the probs used to decode the segment id for each mb */
for (i = 0; i < MB_FEATURE_TREE_PROBS; i++)
{
int Data = xd->mb_segment_tree_probs[i];
@@ -1395,17 +1387,18 @@ void vp8_pack_bitstream(VP8_COMP *cpi, unsigned char *dest, unsigned char * dest
}
}
- // Code to determine whether or not to update the scan order.
vp8_write_bit(bc, pc->filter_type);
vp8_write_literal(bc, pc->filter_level, 6);
vp8_write_literal(bc, pc->sharpness_level, 3);
- // Write out loop filter deltas applied at the MB level based on mode or ref frame (if they are enabled).
+ /* Write out loop filter deltas applied at the MB level based on mode
+ * or ref frame (if they are enabled).
+ */
vp8_write_bit(bc, xd->mode_ref_lf_delta_enabled);
if (xd->mode_ref_lf_delta_enabled)
{
- // Do the deltas need to be updated
+ /* Do the deltas need to be updated */
int send_update = xd->mode_ref_lf_delta_update
|| cpi->oxcf.error_resilient_mode;
@@ -1414,12 +1407,12 @@ void vp8_pack_bitstream(VP8_COMP *cpi, unsigned char *dest, unsigned char * dest
{
int Data;
- // Send update
+ /* Send update */
for (i = 0; i < MAX_REF_LF_DELTAS; i++)
{
Data = xd->ref_lf_deltas[i];
- // Frame level data
+ /* Frame level data */
if (xd->ref_lf_deltas[i] != xd->last_ref_lf_deltas[i]
|| cpi->oxcf.error_resilient_mode)
{
@@ -1429,20 +1422,20 @@ void vp8_pack_bitstream(VP8_COMP *cpi, unsigned char *dest, unsigned char * dest
if (Data > 0)
{
vp8_write_literal(bc, (Data & 0x3F), 6);
- vp8_write_bit(bc, 0); // sign
+ vp8_write_bit(bc, 0); /* sign */
}
else
{
Data = -Data;
vp8_write_literal(bc, (Data & 0x3F), 6);
- vp8_write_bit(bc, 1); // sign
+ vp8_write_bit(bc, 1); /* sign */
}
}
else
vp8_write_bit(bc, 0);
}
- // Send update
+ /* Send update */
for (i = 0; i < MAX_MODE_LF_DELTAS; i++)
{
Data = xd->mode_lf_deltas[i];
@@ -1456,13 +1449,13 @@ void vp8_pack_bitstream(VP8_COMP *cpi, unsigned char *dest, unsigned char * dest
if (Data > 0)
{
vp8_write_literal(bc, (Data & 0x3F), 6);
- vp8_write_bit(bc, 0); // sign
+ vp8_write_bit(bc, 0); /* sign */
}
else
{
Data = -Data;
vp8_write_literal(bc, (Data & 0x3F), 6);
- vp8_write_bit(bc, 1); // sign
+ vp8_write_bit(bc, 1); /* sign */
}
}
else
@@ -1471,34 +1464,42 @@ void vp8_pack_bitstream(VP8_COMP *cpi, unsigned char *dest, unsigned char * dest
}
}
- //signal here is multi token partition is enabled
+ /* signal here is multi token partition is enabled */
vp8_write_literal(bc, pc->multi_token_partition, 2);
- // Frame Qbaseline quantizer index
+ /* Frame Qbaseline quantizer index */
vp8_write_literal(bc, pc->base_qindex, 7);
- // Transmit Dc, Second order and Uv quantizer delta information
+ /* Transmit Dc, Second order and Uv quantizer delta information */
put_delta_q(bc, pc->y1dc_delta_q);
put_delta_q(bc, pc->y2dc_delta_q);
put_delta_q(bc, pc->y2ac_delta_q);
put_delta_q(bc, pc->uvdc_delta_q);
put_delta_q(bc, pc->uvac_delta_q);
- // When there is a key frame all reference buffers are updated using the new key frame
+ /* When there is a key frame all reference buffers are updated using
+ * the new key frame
+ */
if (pc->frame_type != KEY_FRAME)
{
- // Should the GF or ARF be updated using the transmitted frame or buffer
+ /* Should the GF or ARF be updated using the transmitted frame
+ * or buffer
+ */
vp8_write_bit(bc, pc->refresh_golden_frame);
vp8_write_bit(bc, pc->refresh_alt_ref_frame);
- // If not being updated from current frame should either GF or ARF be updated from another buffer
+ /* If not being updated from current frame should either GF or ARF
+ * be updated from another buffer
+ */
if (!pc->refresh_golden_frame)
vp8_write_literal(bc, pc->copy_buffer_to_gf, 2);
if (!pc->refresh_alt_ref_frame)
vp8_write_literal(bc, pc->copy_buffer_to_arf, 2);
- // Indicate reference frame sign bias for Golden and ARF frames (always 0 for last frame buffer)
+ /* Indicate reference frame sign bias for Golden and ARF frames
+ * (always 0 for last frame buffer)
+ */
vp8_write_bit(bc, pc->ref_frame_sign_bias[GOLDEN_FRAME]);
vp8_write_bit(bc, pc->ref_frame_sign_bias[ALTREF_FRAME]);
}
@@ -1527,14 +1528,14 @@ void vp8_pack_bitstream(VP8_COMP *cpi, unsigned char *dest, unsigned char * dest
#endif
- vp8_clear_system_state(); //__asm emms;
+ vp8_clear_system_state();
#if CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING
pack_coef_probs(cpi);
#else
if (pc->refresh_entropy_probs == 0)
{
- // save a copy for later refresh
+ /* save a copy for later refresh */
vpx_memcpy(&cpi->common.lfc, &cpi->common.fc, sizeof(cpi->common.fc));
}
@@ -1545,7 +1546,7 @@ void vp8_pack_bitstream(VP8_COMP *cpi, unsigned char *dest, unsigned char * dest
active_section = 2;
#endif
- // Write out the mb_no_coeff_skip flag
+ /* Write out the mb_no_coeff_skip flag */
vp8_write_bit(bc, pc->mb_no_coeff_skip);
if (pc->frame_type == KEY_FRAME)
diff --git a/vp8/encoder/block.h b/vp8/encoder/block.h
index 9756acc7d..0b0a2346a 100644
--- a/vp8/encoder/block.h
+++ b/vp8/encoder/block.h
@@ -18,7 +18,7 @@
#include "vp8/common/entropy.h"
#include "vpx_ports/mem.h"
-// motion search site
+/* motion search site */
typedef struct
{
MV mv;
@@ -27,11 +27,11 @@ typedef struct
typedef struct block
{
- // 16 Y blocks, 4 U blocks, 4 V blocks each with 16 entries
+ /* 16 Y blocks, 4 U blocks, 4 V blocks each with 16 entries */
short *src_diff;
short *coeff;
- // 16 Y blocks, 4 U blocks, 4 V blocks each with 16 entries
+ /* 16 Y blocks, 4 U blocks, 4 V blocks each with 16 entries */
short *quant;
short *quant_fast;
unsigned char *quant_shift;
@@ -39,7 +39,7 @@ typedef struct block
short *zrun_zbin_boost;
short *round;
- // Zbin Over Quant value
+ /* Zbin Over Quant value */
short zbin_extra;
unsigned char **base_src;
@@ -59,12 +59,12 @@ typedef struct
typedef struct macroblock
{
- DECLARE_ALIGNED(16, short, src_diff[400]); // 16x16 Y 8x8 U 8x8 V 4x4 2nd Y
- DECLARE_ALIGNED(16, short, coeff[400]); // 16x16 Y 8x8 U 8x8 V 4x4 2nd Y
+ DECLARE_ALIGNED(16, short, src_diff[400]); /* 25 blocks Y,U,V,Y2 */
+ DECLARE_ALIGNED(16, short, coeff[400]); /* 25 blocks Y,U,V,Y2 */
DECLARE_ALIGNED(16, unsigned char, thismb[256]);
unsigned char *thismb_ptr;
- // 16 Y blocks, 4 U blocks, 4 V blocks, 1 DC 2nd order block each with 16 entries
+ /* 16 Y, 4 U, 4 V, 1 DC 2nd order block */
BLOCK block[25];
YV12_BUFFER_CONFIG src;
@@ -99,8 +99,9 @@ typedef struct macroblock
int (*token_costs)[COEF_BANDS][PREV_COEF_CONTEXTS]
[MAX_ENTROPY_TOKENS];
- // These define limits to motion vector components to prevent
- // them from extending outside the UMV borders
+ /* These define limits to motion vector components to prevent
+ * them from extending outside the UMV borders.
+ */
int mv_col_min;
int mv_col_max;
int mv_row_min;
@@ -110,7 +111,6 @@ typedef struct macroblock
unsigned int encode_breakout;
- //char * gf_active_ptr;
signed char *gf_active_ptr;
unsigned char *active_ptr;
diff --git a/vp8/encoder/boolhuff.h b/vp8/encoder/boolhuff.h
index fb6cbafd3..830906306 100644
--- a/vp8/encoder/boolhuff.h
+++ b/vp8/encoder/boolhuff.h
@@ -32,7 +32,7 @@ typedef struct
unsigned char *buffer_end;
struct vpx_internal_error_info *error;
- // Variables used to track bit costs without outputing to the bitstream
+ /* Variables used to track bit costs without outputing to the bitstream */
unsigned int measure_cost;
unsigned long bit_counter;
} BOOL_CODER;
diff --git a/vp8/encoder/denoising.c b/vp8/encoder/denoising.c
index f39239656..6bdd5c26e 100644
--- a/vp8/encoder/denoising.c
+++ b/vp8/encoder/denoising.c
@@ -16,22 +16,26 @@
#include "vpx_rtcd.h"
static const unsigned int NOISE_MOTION_THRESHOLD = 25 * 25;
-// SSE_DIFF_THRESHOLD is selected as ~95% confidence assuming var(noise) ~= 100.
+/* SSE_DIFF_THRESHOLD is selected as ~95% confidence assuming
+ * var(noise) ~= 100.
+ */
static const unsigned int SSE_DIFF_THRESHOLD = 16 * 16 * 20;
static const unsigned int SSE_THRESHOLD = 16 * 16 * 40;
-// The filtering coefficients used for denoizing are adjusted for static
-// blocks, or blocks with very small motion vectors. This is done through
-// the motion magnitude parameter.
-//
-// There are currently 2048 possible mapping from absolute difference to
-// filter coefficient depending on the motion magnitude. Each mapping is
-// in a LUT table. All these tables are staticly allocated but they are only
-// filled on their first use.
-//
-// Each entry is a pair of 16b values, the coefficient and its complement
-// to 256. Each of these value should only be 8b but they are 16b wide to
-// avoid slow partial register manipulations.
+/*
+ * The filtering coefficients used for denoizing are adjusted for static
+ * blocks, or blocks with very small motion vectors. This is done through
+ * the motion magnitude parameter.
+ *
+ * There are currently 2048 possible mapping from absolute difference to
+ * filter coefficient depending on the motion magnitude. Each mapping is
+ * in a LUT table. All these tables are staticly allocated but they are only
+ * filled on their first use.
+ *
+ * Each entry is a pair of 16b values, the coefficient and its complement
+ * to 256. Each of these value should only be 8b but they are 16b wide to
+ * avoid slow partial register manipulations.
+ */
enum {num_motion_magnitude_adjustments = 2048};
static union coeff_pair filter_coeff_LUT[num_motion_magnitude_adjustments][256];
@@ -100,7 +104,7 @@ int vp8_denoiser_filter_c(YV12_BUFFER_CONFIG *mc_running_avg,
for (r = 0; r < 16; ++r)
{
- // Calculate absolute differences
+ /* Calculate absolute differences */
unsigned char abs_diff[16];
union coeff_pair filter_coefficient[16];
@@ -112,13 +116,13 @@ int vp8_denoiser_filter_c(YV12_BUFFER_CONFIG *mc_running_avg,
abs_diff[c] = absdiff;
}
- // Use LUT to get filter coefficients (two 16b value; f and 256-f)
+ /* Use LUT to get filter coefficients (two 16b value; f and 256-f) */
for (c = 0; c < 16; ++c)
{
filter_coefficient[c] = LUT[abs_diff[c]];
}
- // Filtering...
+ /* Filtering... */
for (c = 0; c < 16; ++c)
{
const uint16_t state = (uint16_t)(mc_running_avg_y[c]);
@@ -128,10 +132,11 @@ int vp8_denoiser_filter_c(YV12_BUFFER_CONFIG *mc_running_avg,
filter_coefficient[c].as_short[1] * sample + 128) >> 8;
}
- // Depending on the magnitude of the difference between the signal and
- // filtered version, either replace the signal by the filtered one or
- // update the filter state with the signal when the change in a pixel
- // isn't classified as noise.
+ /* Depending on the magnitude of the difference between the signal and
+ * filtered version, either replace the signal by the filtered one or
+ * update the filter state with the signal when the change in a pixel
+ * isn't classified as noise.
+ */
for (c = 0; c < 16; ++c)
{
const int diff = sig[c] - running_avg_y[c];
@@ -148,7 +153,7 @@ int vp8_denoiser_filter_c(YV12_BUFFER_CONFIG *mc_running_avg,
}
}
- // Update pointers for next iteration.
+ /* Update pointers for next iteration. */
sig += sig_stride;
filtered += 16;
mc_running_avg_y += mc_avg_y_stride;
@@ -228,7 +233,6 @@ void vp8_denoiser_denoise_mb(VP8_DENOISER *denoiser,
enum vp8_denoiser_decision decision = FILTER_BLOCK;
- // Motion compensate the running average.
if (zero_frame)
{
YV12_BUFFER_CONFIG *src = &denoiser->yv12_running_avg[frame];
@@ -243,7 +247,7 @@ void vp8_denoiser_denoise_mb(VP8_DENOISER *denoiser,
saved_mbmi = *mbmi;
- // Use the best MV for the compensation.
+ /* Use the best MV for the compensation. */
mbmi->ref_frame = x->best_reference_frame;
mbmi->mode = x->best_sse_inter_mode;
mbmi->mv = x->best_sse_mv;
@@ -255,11 +259,14 @@ void vp8_denoiser_denoise_mb(VP8_DENOISER *denoiser,
(mv_row *mv_row + mv_col *mv_col <= NOISE_MOTION_THRESHOLD &&
sse_diff < SSE_DIFF_THRESHOLD))
{
- // Handle intra blocks as referring to last frame with zero motion
- // and let the absolute pixel difference affect the filter factor.
- // Also consider small amount of motion as being random walk due to
- // noise, if it doesn't mean that we get a much bigger error.
- // Note that any changes to the mode info only affects the denoising.
+ /*
+ * Handle intra blocks as referring to last frame with zero motion
+ * and let the absolute pixel difference affect the filter factor.
+ * Also consider small amount of motion as being random walk due
+ * to noise, if it doesn't mean that we get a much bigger error.
+ * Note that any changes to the mode info only affects the
+ * denoising.
+ */
mbmi->ref_frame =
x->best_zeromv_reference_frame;
@@ -275,11 +282,11 @@ void vp8_denoiser_denoise_mb(VP8_DENOISER *denoiser,
saved_pre = filter_xd->pre;
saved_dst = filter_xd->dst;
- // Compensate the running average.
+ /* Compensate the running average. */
filter_xd->pre.y_buffer = src->y_buffer + recon_yoffset;
filter_xd->pre.u_buffer = src->u_buffer + recon_uvoffset;
filter_xd->pre.v_buffer = src->v_buffer + recon_uvoffset;
- // Write the compensated running average to the destination buffer.
+ /* Write the compensated running average to the destination buffer. */
filter_xd->dst.y_buffer = dst->y_buffer + recon_yoffset;
filter_xd->dst.u_buffer = dst->u_buffer + recon_uvoffset;
filter_xd->dst.v_buffer = dst->v_buffer + recon_uvoffset;
@@ -314,7 +321,7 @@ void vp8_denoiser_denoise_mb(VP8_DENOISER *denoiser,
if (decision == FILTER_BLOCK)
{
- // Filter.
+ /* Filter. */
decision = vp8_denoiser_filter(&denoiser->yv12_mc_running_avg,
&denoiser->yv12_running_avg[LAST_FRAME],
x,
@@ -323,8 +330,9 @@ void vp8_denoiser_denoise_mb(VP8_DENOISER *denoiser,
}
if (decision == COPY_BLOCK)
{
- // No filtering of this block; it differs too much from the predictor,
- // or the motion vector magnitude is considered too big.
+ /* No filtering of this block; it differs too much from the predictor,
+ * or the motion vector magnitude is considered too big.
+ */
vp8_copy_mem16x16(
x->thismb, 16,
denoiser->yv12_running_avg[LAST_FRAME].y_buffer + recon_yoffset,
diff --git a/vp8/encoder/denoising.h b/vp8/encoder/denoising.h
index dc78e659d..2f5fbff70 100644
--- a/vp8/encoder/denoising.h
+++ b/vp8/encoder/denoising.h
@@ -19,7 +19,7 @@
enum vp8_denoiser_decision
{
COPY_BLOCK,
- FILTER_BLOCK,
+ FILTER_BLOCK
};
typedef struct vp8_denoiser
@@ -47,4 +47,4 @@ union coeff_pair
union coeff_pair *vp8_get_filter_coeff_LUT(unsigned int motion_magnitude);
-#endif // VP8_ENCODER_DENOISING_H_
+#endif /* VP8_ENCODER_DENOISING_H_ */
diff --git a/vp8/encoder/encodeframe.c b/vp8/encoder/encodeframe.c
index dce37a7ee..d9e2822bf 100644
--- a/vp8/encoder/encodeframe.c
+++ b/vp8/encoder/encodeframe.c
@@ -77,7 +77,7 @@ static const unsigned char VP8_VAR_OFFS[16]=
};
-// Original activity measure from Tim T's code.
+/* Original activity measure from Tim T's code. */
static unsigned int tt_activity_measure( VP8_COMP *cpi, MACROBLOCK *x )
{
unsigned int act;
@@ -100,7 +100,7 @@ static unsigned int tt_activity_measure( VP8_COMP *cpi, MACROBLOCK *x )
return act;
}
-// Stub for alternative experimental activity measures.
+/* Stub for alternative experimental activity measures. */
static unsigned int alt_activity_measure( VP8_COMP *cpi,
MACROBLOCK *x, int use_dc_pred )
{
@@ -108,8 +108,9 @@ static unsigned int alt_activity_measure( VP8_COMP *cpi,
}
-// Measure the activity of the current macroblock
-// What we measure here is TBD so abstracted to this function
+/* Measure the activity of the current macroblock
+ * What we measure here is TBD so abstracted to this function
+ */
#define ALT_ACT_MEASURE 1
static unsigned int mb_activity_measure( VP8_COMP *cpi, MACROBLOCK *x,
int mb_row, int mb_col)
@@ -120,12 +121,12 @@ static unsigned int mb_activity_measure( VP8_COMP *cpi, MACROBLOCK *x,
{
int use_dc_pred = (mb_col || mb_row) && (!mb_col || !mb_row);
- // Or use and alternative.
+ /* Or use and alternative. */
mb_activity = alt_activity_measure( cpi, x, use_dc_pred );
}
else
{
- // Original activity measure from Tim T's code.
+ /* Original activity measure from Tim T's code. */
mb_activity = tt_activity_measure( cpi, x );
}
@@ -135,36 +136,36 @@ static unsigned int mb_activity_measure( VP8_COMP *cpi, MACROBLOCK *x,
return mb_activity;
}
-// Calculate an "average" mb activity value for the frame
+/* Calculate an "average" mb activity value for the frame */
#define ACT_MEDIAN 0
static void calc_av_activity( VP8_COMP *cpi, int64_t activity_sum )
{
#if ACT_MEDIAN
- // Find median: Simple n^2 algorithm for experimentation
+ /* Find median: Simple n^2 algorithm for experimentation */
{
unsigned int median;
unsigned int i,j;
unsigned int * sortlist;
unsigned int tmp;
- // Create a list to sort to
+ /* Create a list to sort to */
CHECK_MEM_ERROR(sortlist,
vpx_calloc(sizeof(unsigned int),
cpi->common.MBs));
- // Copy map to sort list
+ /* Copy map to sort list */
vpx_memcpy( sortlist, cpi->mb_activity_map,
sizeof(unsigned int) * cpi->common.MBs );
- // Ripple each value down to its correct position
+ /* Ripple each value down to its correct position */
for ( i = 1; i < cpi->common.MBs; i ++ )
{
for ( j = i; j > 0; j -- )
{
if ( sortlist[j] < sortlist[j-1] )
{
- // Swap values
+ /* Swap values */
tmp = sortlist[j-1];
sortlist[j-1] = sortlist[j];
sortlist[j] = tmp;
@@ -174,7 +175,7 @@ static void calc_av_activity( VP8_COMP *cpi, int64_t activity_sum )
}
}
- // Even number MBs so estimate median as mean of two either side.
+ /* Even number MBs so estimate median as mean of two either side. */
median = ( 1 + sortlist[cpi->common.MBs >> 1] +
sortlist[(cpi->common.MBs >> 1) + 1] ) >> 1;
@@ -183,14 +184,14 @@ static void calc_av_activity( VP8_COMP *cpi, int64_t activity_sum )
vpx_free(sortlist);
}
#else
- // Simple mean for now
+ /* Simple mean for now */
cpi->activity_avg = (unsigned int)(activity_sum/cpi->common.MBs);
#endif
if (cpi->activity_avg < VP8_ACTIVITY_AVG_MIN)
cpi->activity_avg = VP8_ACTIVITY_AVG_MIN;
- // Experimental code: return fixed value normalized for several clips
+ /* Experimental code: return fixed value normalized for several clips */
if ( ALT_ACT_MEASURE )
cpi->activity_avg = 100000;
}
@@ -199,7 +200,7 @@ static void calc_av_activity( VP8_COMP *cpi, int64_t activity_sum )
#define OUTPUT_NORM_ACT_STATS 0
#if USE_ACT_INDEX
-// Calculate and activity index for each mb
+/* Calculate and activity index for each mb */
static void calc_activity_index( VP8_COMP *cpi, MACROBLOCK *x )
{
VP8_COMMON *const cm = & cpi->common;
@@ -214,19 +215,19 @@ static void calc_activity_index( VP8_COMP *cpi, MACROBLOCK *x )
fprintf(f, "\n%12d\n", cpi->activity_avg );
#endif
- // Reset pointers to start of activity map
+ /* Reset pointers to start of activity map */
x->mb_activity_ptr = cpi->mb_activity_map;
- // Calculate normalized mb activity number.
+ /* Calculate normalized mb activity number. */
for (mb_row = 0; mb_row < cm->mb_rows; mb_row++)
{
- // for each macroblock col in image
+ /* for each macroblock col in image */
for (mb_col = 0; mb_col < cm->mb_cols; mb_col++)
{
- // Read activity from the map
+ /* Read activity from the map */
act = *(x->mb_activity_ptr);
- // Calculate a normalized activity number
+ /* Calculate a normalized activity number */
a = act + 4*cpi->activity_avg;
b = 4*act + cpi->activity_avg;
@@ -238,7 +239,7 @@ static void calc_activity_index( VP8_COMP *cpi, MACROBLOCK *x )
#if OUTPUT_NORM_ACT_STATS
fprintf(f, " %6d", *(x->mb_activity_ptr));
#endif
- // Increment activity map pointers
+ /* Increment activity map pointers */
x->mb_activity_ptr++;
}
@@ -255,8 +256,9 @@ static void calc_activity_index( VP8_COMP *cpi, MACROBLOCK *x )
}
#endif
-// Loop through all MBs. Note activity of each, average activity and
-// calculate a normalized activity for each
+/* Loop through all MBs. Note activity of each, average activity and
+ * calculate a normalized activity for each
+ */
static void build_activity_map( VP8_COMP *cpi )
{
MACROBLOCK *const x = & cpi->mb;
@@ -273,15 +275,15 @@ static void build_activity_map( VP8_COMP *cpi )
unsigned int mb_activity;
int64_t activity_sum = 0;
- // for each macroblock row in image
+ /* for each macroblock row in image */
for (mb_row = 0; mb_row < cm->mb_rows; mb_row++)
{
#if ALT_ACT_MEASURE
- // reset above block coeffs
+ /* reset above block coeffs */
xd->up_available = (mb_row != 0);
recon_yoffset = (mb_row * recon_y_stride * 16);
#endif
- // for each macroblock col in image
+ /* for each macroblock col in image */
for (mb_col = 0; mb_col < cm->mb_cols; mb_col++)
{
#if ALT_ACT_MEASURE
@@ -289,48 +291,48 @@ static void build_activity_map( VP8_COMP *cpi )
xd->left_available = (mb_col != 0);
recon_yoffset += 16;
#endif
- //Copy current mb to a buffer
+ /* Copy current mb to a buffer */
vp8_copy_mem16x16(x->src.y_buffer, x->src.y_stride, x->thismb, 16);
- // measure activity
+ /* measure activity */
mb_activity = mb_activity_measure( cpi, x, mb_row, mb_col );
- // Keep frame sum
+ /* Keep frame sum */
activity_sum += mb_activity;
- // Store MB level activity details.
+ /* Store MB level activity details. */
*x->mb_activity_ptr = mb_activity;
- // Increment activity map pointer
+ /* Increment activity map pointer */
x->mb_activity_ptr++;
- // adjust to the next column of source macroblocks
+ /* adjust to the next column of source macroblocks */
x->src.y_buffer += 16;
}
- // adjust to the next row of mbs
+ /* adjust to the next row of mbs */
x->src.y_buffer += 16 * x->src.y_stride - 16 * cm->mb_cols;
#if ALT_ACT_MEASURE
- //extend the recon for intra prediction
+ /* extend the recon for intra prediction */
vp8_extend_mb_row(new_yv12, xd->dst.y_buffer + 16,
xd->dst.u_buffer + 8, xd->dst.v_buffer + 8);
#endif
}
- // Calculate an "average" MB activity
+ /* Calculate an "average" MB activity */
calc_av_activity(cpi, activity_sum);
#if USE_ACT_INDEX
- // Calculate an activity index number of each mb
+ /* Calculate an activity index number of each mb */
calc_activity_index( cpi, x );
#endif
}
-// Macroblock activity masking
+/* Macroblock activity masking */
void vp8_activity_masking(VP8_COMP *cpi, MACROBLOCK *x)
{
#if USE_ACT_INDEX
@@ -342,7 +344,7 @@ void vp8_activity_masking(VP8_COMP *cpi, MACROBLOCK *x)
int64_t b;
int64_t act = *(x->mb_activity_ptr);
- // Apply the masking to the RD multiplier.
+ /* Apply the masking to the RD multiplier. */
a = act + (2*cpi->activity_avg);
b = (2*act) + cpi->activity_avg;
@@ -351,7 +353,7 @@ void vp8_activity_masking(VP8_COMP *cpi, MACROBLOCK *x)
x->errorperbit += (x->errorperbit==0);
#endif
- // Activity based Zbin adjustment
+ /* Activity based Zbin adjustment */
adjust_act_zbin(cpi, x);
}
@@ -398,7 +400,7 @@ void encode_mb_row(VP8_COMP *cpi,
w = &cpi->bc[1];
#endif
- // reset above block coeffs
+ /* reset above block coeffs */
xd->above_context = cm->above_context;
xd->up_available = (mb_row != 0);
@@ -406,37 +408,41 @@ void encode_mb_row(VP8_COMP *cpi,
recon_uvoffset = (mb_row * recon_uv_stride * 8);
cpi->tplist[mb_row].start = *tp;
- //printf("Main mb_row = %d\n", mb_row);
+ /* printf("Main mb_row = %d\n", mb_row); */
- // Distance of Mb to the top & bottom edges, specified in 1/8th pel
- // units as they are always compared to values that are in 1/8th pel units
+ /* Distance of Mb to the top & bottom edges, specified in 1/8th pel
+ * units as they are always compared to values that are in 1/8th pel
+ */
xd->mb_to_top_edge = -((mb_row * 16) << 3);
xd->mb_to_bottom_edge = ((cm->mb_rows - 1 - mb_row) * 16) << 3;
- // Set up limit values for vertical motion vector components
- // to prevent them extending beyond the UMV borders
+ /* Set up limit values for vertical motion vector components
+ * to prevent them extending beyond the UMV borders
+ */
x->mv_row_min = -((mb_row * 16) + (VP8BORDERINPIXELS - 16));
x->mv_row_max = ((cm->mb_rows - 1 - mb_row) * 16)
+ (VP8BORDERINPIXELS - 16);
- // Set the mb activity pointer to the start of the row.
+ /* Set the mb activity pointer to the start of the row. */
x->mb_activity_ptr = &cpi->mb_activity_map[map_index];
- // for each macroblock col in image
+ /* for each macroblock col in image */
for (mb_col = 0; mb_col < cm->mb_cols; mb_col++)
{
#if (CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING)
*tp = cpi->tok;
#endif
- // Distance of Mb to the left & right edges, specified in
- // 1/8th pel units as they are always compared to values
- // that are in 1/8th pel units
+ /* Distance of Mb to the left & right edges, specified in
+ * 1/8th pel units as they are always compared to values
+ * that are in 1/8th pel units
+ */
xd->mb_to_left_edge = -((mb_col * 16) << 3);
xd->mb_to_right_edge = ((cm->mb_cols - 1 - mb_col) * 16) << 3;
- // Set up limit values for horizontal motion vector components
- // to prevent them extending beyond the UMV borders
+ /* Set up limit values for horizontal motion vector components
+ * to prevent them extending beyond the UMV borders
+ */
x->mv_col_min = -((mb_col * 16) + (VP8BORDERINPIXELS - 16));
x->mv_col_max = ((cm->mb_cols - 1 - mb_col) * 16)
+ (VP8BORDERINPIXELS - 16);
@@ -449,13 +455,13 @@ void encode_mb_row(VP8_COMP *cpi,
x->rddiv = cpi->RDDIV;
x->rdmult = cpi->RDMULT;
- //Copy current mb to a buffer
+ /* Copy current mb to a buffer */
vp8_copy_mem16x16(x->src.y_buffer, x->src.y_stride, x->thismb, 16);
#if CONFIG_MULTITHREAD
if (cpi->b_multi_threaded != 0)
{
- *current_mb_col = mb_col - 1; // set previous MB done
+ *current_mb_col = mb_col - 1; /* set previous MB done */
if ((mb_col & (nsync - 1)) == 0)
{
@@ -471,11 +477,13 @@ void encode_mb_row(VP8_COMP *cpi,
if(cpi->oxcf.tuning == VP8_TUNE_SSIM)
vp8_activity_masking(cpi, x);
- // Is segmentation enabled
- // MB level adjustment to quantizer
+ /* Is segmentation enabled */
+ /* MB level adjustment to quantizer */
if (xd->segmentation_enabled)
{
- // Code to set segment id in xd->mbmi.segment_id for current MB (with range checking)
+ /* Code to set segment id in xd->mbmi.segment_id for current MB
+ * (with range checking)
+ */
if (cpi->segmentation_map[map_index+mb_col] <= 3)
xd->mode_info_context->mbmi.segment_id = cpi->segmentation_map[map_index+mb_col];
else
@@ -484,7 +492,8 @@ void encode_mb_row(VP8_COMP *cpi,
vp8cx_mb_init_quantizer(cpi, x, 1);
}
else
- xd->mode_info_context->mbmi.segment_id = 0; // Set to Segment 0 by default
+ /* Set to Segment 0 by default */
+ xd->mode_info_context->mbmi.segment_id = 0;
x->active_ptr = cpi->active_map + map_index + mb_col;
@@ -514,21 +523,28 @@ void encode_mb_row(VP8_COMP *cpi,
#endif
- // Count of last ref frame 0,0 usage
+ /* Count of last ref frame 0,0 usage */
if ((xd->mode_info_context->mbmi.mode == ZEROMV) && (xd->mode_info_context->mbmi.ref_frame == LAST_FRAME))
cpi->inter_zz_count ++;
- // Special case code for cyclic refresh
- // If cyclic update enabled then copy xd->mbmi.segment_id; (which may have been updated based on mode
- // during vp8cx_encode_inter_macroblock()) back into the global segmentation map
+ /* Special case code for cyclic refresh
+ * If cyclic update enabled then copy xd->mbmi.segment_id; (which
+ * may have been updated based on mode during
+ * vp8cx_encode_inter_macroblock()) back into the global
+ * segmentation map
+ */
if ((cpi->current_layer == 0) &&
(cpi->cyclic_refresh_mode_enabled && xd->segmentation_enabled))
{
cpi->segmentation_map[map_index+mb_col] = xd->mode_info_context->mbmi.segment_id;
- // If the block has been refreshed mark it as clean (the magnitude of the -ve influences how long it will be before we consider another refresh):
- // Else if it was coded (last frame 0,0) and has not already been refreshed then mark it as a candidate for cleanup next time (marked 0)
- // else mark it as dirty (1).
+ /* If the block has been refreshed mark it as clean (the
+ * magnitude of the -ve influences how long it will be before
+ * we consider another refresh):
+ * Else if it was coded (last frame 0,0) and has not already
+ * been refreshed then mark it as a candidate for cleanup
+ * next time (marked 0) else mark it as dirty (1).
+ */
if (xd->mode_info_context->mbmi.segment_id)
cpi->cyclic_refresh_map[map_index+mb_col] = -1;
else if ((xd->mode_info_context->mbmi.mode == ZEROMV) && (xd->mode_info_context->mbmi.ref_frame == LAST_FRAME))
@@ -551,13 +567,13 @@ void encode_mb_row(VP8_COMP *cpi,
pack_tokens(w, tp_start, tok_count);
}
#endif
- // Increment pointer into gf usage flags structure.
+ /* Increment pointer into gf usage flags structure. */
x->gf_active_ptr++;
- // Increment the activity mask pointers.
+ /* Increment the activity mask pointers. */
x->mb_activity_ptr++;
- // adjust to the next column of macroblocks
+ /* adjust to the next column of macroblocks */
x->src.y_buffer += 16;
x->src.u_buffer += 8;
x->src.v_buffer += 8;
@@ -565,16 +581,16 @@ void encode_mb_row(VP8_COMP *cpi,
recon_yoffset += 16;
recon_uvoffset += 8;
- // Keep track of segment usage
+ /* Keep track of segment usage */
segment_counts[xd->mode_info_context->mbmi.segment_id] ++;
- // skip to next mb
+ /* skip to next mb */
xd->mode_info_context++;
x->partition_info++;
xd->above_context++;
}
- //extend the recon for intra prediction
+ /* extend the recon for intra prediction */
vp8_extend_mb_row( &cm->yv12_fb[dst_fb_idx],
xd->dst.y_buffer + 16,
xd->dst.u_buffer + 8,
@@ -585,7 +601,7 @@ void encode_mb_row(VP8_COMP *cpi,
*current_mb_col = rightmost_col;
#endif
- // this is to account for the border
+ /* this is to account for the border */
xd->mode_info_context++;
x->partition_info++;
}
@@ -596,10 +612,10 @@ static void init_encode_frame_mb_context(VP8_COMP *cpi)
VP8_COMMON *const cm = & cpi->common;
MACROBLOCKD *const xd = & x->e_mbd;
- // GF active flags data structure
+ /* GF active flags data structure */
x->gf_active_ptr = (signed char *)cpi->gf_active_flags;
- // Activity map pointer
+ /* Activity map pointer */
x->mb_activity_ptr = cpi->mb_activity_map;
x->act_zbin_adj = 0;
@@ -611,16 +627,16 @@ static void init_encode_frame_mb_context(VP8_COMP *cpi)
xd->frame_type = cm->frame_type;
- // reset intra mode contexts
+ /* reset intra mode contexts */
if (cm->frame_type == KEY_FRAME)
vp8_init_mbmode_probs(cm);
- // Copy data over into macro block data structures.
+ /* Copy data over into macro block data structures. */
x->src = * cpi->Source;
xd->pre = cm->yv12_fb[cm->lst_fb_idx];
xd->dst = cm->yv12_fb[cm->new_fb_idx];
- // set up frame for intra coded blocks
+ /* set up frame for intra coded blocks */
vp8_setup_intra_recon(&cm->yv12_fb[cm->new_fb_idx]);
vp8_build_block_offsets(x);
@@ -643,7 +659,9 @@ static void init_encode_frame_mb_context(VP8_COMP *cpi)
vpx_memset(cm->above_context, 0,
sizeof(ENTROPY_CONTEXT_PLANES) * cm->mb_cols);
- // Special case treatment when GF and ARF are not sensible options for reference
+ /* Special case treatment when GF and ARF are not sensible options
+ * for reference
+ */
if (cpi->ref_frame_flags == VP8_LAST_FRAME)
vp8_calc_ref_frame_costs(x->ref_frame_cost,
cpi->prob_intra_coded,255,128);
@@ -676,7 +694,7 @@ void vp8_encode_frame(VP8_COMP *cpi)
int segment_counts[MAX_MB_SEGMENTS];
int totalrate;
#if CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING
- BOOL_CODER * bc = &cpi->bc[1]; // bc[0] is for control partition
+ BOOL_CODER * bc = &cpi->bc[1]; /* bc[0] is for control partition */
const int num_part = (1 << cm->multi_token_partition);
#endif
@@ -691,7 +709,7 @@ void vp8_encode_frame(VP8_COMP *cpi)
vp8_auto_select_speed(cpi);
}
- // Functions setup for all frame types so we can use MC in AltRef
+ /* Functions setup for all frame types so we can use MC in AltRef */
if(!cm->use_bilinear_mc_filter)
{
xd->subpixel_predict = vp8_sixtap_predict4x4;
@@ -707,7 +725,7 @@ void vp8_encode_frame(VP8_COMP *cpi)
xd->subpixel_predict16x16 = vp8_bilinear_predict16x16;
}
- // Reset frame count of inter 0,0 motion vector usage.
+ /* Reset frame count of inter 0,0 motion vector usage. */
cpi->inter_zz_count = 0;
cpi->prediction_error = 0;
@@ -716,7 +734,7 @@ void vp8_encode_frame(VP8_COMP *cpi)
cpi->tok_count = 0;
#if 0
- // Experimental code
+ /* Experimental code */
cpi->frame_distortion = 0;
cpi->last_mb_distortion = 0;
#endif
@@ -736,14 +754,14 @@ void vp8_encode_frame(VP8_COMP *cpi)
if(cpi->oxcf.tuning == VP8_TUNE_SSIM)
{
- // Initialize encode frame context.
+ /* Initialize encode frame context. */
init_encode_frame_mb_context(cpi);
- // Build a frame level activity map
+ /* Build a frame level activity map */
build_activity_map(cpi);
}
- // re-init encode frame context.
+ /* re-init encode frame context. */
init_encode_frame_mb_context(cpi);
#if CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING
@@ -790,7 +808,7 @@ void vp8_encode_frame(VP8_COMP *cpi)
encode_mb_row(cpi, cm, mb_row, x, xd, &tp, segment_counts, &totalrate);
- // adjust to the next row of mbs
+ /* adjust to the next row of mbs */
x->src.y_buffer += 16 * x->src.y_stride * (cpi->encoding_thread_count + 1) - 16 * cm->mb_cols;
x->src.u_buffer += 8 * x->src.uv_stride * (cpi->encoding_thread_count + 1) - 8 * cm->mb_cols;
x->src.v_buffer += 8 * x->src.uv_stride * (cpi->encoding_thread_count + 1) - 8 * cm->mb_cols;
@@ -836,7 +854,7 @@ void vp8_encode_frame(VP8_COMP *cpi)
else
#endif
{
- // for each macroblock row in image
+ /* for each macroblock row in image */
for (mb_row = 0; mb_row < cm->mb_rows; mb_row++)
{
vp8_zero(cm->left_context)
@@ -847,7 +865,7 @@ void vp8_encode_frame(VP8_COMP *cpi)
encode_mb_row(cpi, cm, mb_row, x, xd, &tp, segment_counts, &totalrate);
- // adjust to the next row of mbs
+ /* adjust to the next row of mbs */
x->src.y_buffer += 16 * x->src.y_stride - 16 * cm->mb_cols;
x->src.u_buffer += 8 * x->src.uv_stride - 8 * cm->mb_cols;
x->src.v_buffer += 8 * x->src.uv_stride - 8 * cm->mb_cols;
@@ -873,12 +891,13 @@ void vp8_encode_frame(VP8_COMP *cpi)
// Work out the segment probabilities if segmentation is enabled
- if (xd->segmentation_enabled)
+ // and needs to be updated
+ if (xd->segmentation_enabled && xd->update_mb_segmentation_map)
{
int tot_count;
int i;
- // Set to defaults
+ /* Set to defaults */
vpx_memset(xd->mb_segment_tree_probs, 255 , sizeof(xd->mb_segment_tree_probs));
tot_count = segment_counts[0] + segment_counts[1] + segment_counts[2] + segment_counts[3];
@@ -899,7 +918,7 @@ void vp8_encode_frame(VP8_COMP *cpi)
if (tot_count > 0)
xd->mb_segment_tree_probs[2] = (segment_counts[2] * 255) / tot_count;
- // Zero probabilities not allowed
+ /* Zero probabilities not allowed */
for (i = 0; i < MB_FEATURE_TREE_PROBS; i ++)
{
if (xd->mb_segment_tree_probs[i] == 0)
@@ -908,10 +927,10 @@ void vp8_encode_frame(VP8_COMP *cpi)
}
}
- // 256 rate units to the bit
- cpi->projected_frame_size = totalrate >> 8; // projected_frame_size in units of BYTES
+ /* projected_frame_size in units of BYTES */
+ cpi->projected_frame_size = totalrate >> 8;
- // Make a note of the percentage MBs coded Intra.
+ /* Make a note of the percentage MBs coded Intra. */
if (cm->frame_type == KEY_FRAME)
{
cpi->this_frame_percent_intra = 100;
@@ -961,9 +980,11 @@ void vp8_encode_frame(VP8_COMP *cpi)
#endif
#if ! CONFIG_REALTIME_ONLY
- // Adjust the projected reference frame usage probability numbers to reflect
- // what we have just seen. This may be useful when we make multiple iterations
- // of the recode loop rather than continuing to use values from the previous frame.
+ /* Adjust the projected reference frame usage probability numbers to
+ * reflect what we have just seen. This may be useful when we make
+ * multiple iterations of the recode loop rather than continuing to use
+ * values from the previous frame.
+ */
if ((cm->frame_type != KEY_FRAME) && ((cpi->oxcf.number_of_layers > 1) ||
(!cm->refresh_alt_ref_frame && !cm->refresh_golden_frame)))
{
@@ -1017,16 +1038,13 @@ void vp8_build_block_offsets(MACROBLOCK *x)
vp8_build_block_doffsets(&x->e_mbd);
- // y blocks
+ /* y blocks */
x->thismb_ptr = &x->thismb[0];
for (br = 0; br < 4; br++)
{
for (bc = 0; bc < 4; bc++)
{
BLOCK *this_block = &x->block[block];
- //this_block->base_src = &x->src.y_buffer;
- //this_block->src_stride = x->src.y_stride;
- //this_block->src = 4 * br * this_block->src_stride + 4 * bc;
this_block->base_src = &x->thismb_ptr;
this_block->src_stride = 16;
this_block->src = 4 * br * 16 + 4 * bc;
@@ -1034,7 +1052,7 @@ void vp8_build_block_offsets(MACROBLOCK *x)
}
}
- // u blocks
+ /* u blocks */
for (br = 0; br < 2; br++)
{
for (bc = 0; bc < 2; bc++)
@@ -1047,7 +1065,7 @@ void vp8_build_block_offsets(MACROBLOCK *x)
}
}
- // v blocks
+ /* v blocks */
for (br = 0; br < 2; br++)
{
for (bc = 0; bc < 2; bc++)
@@ -1092,8 +1110,9 @@ static void sum_intra_stats(VP8_COMP *cpi, MACROBLOCK *x)
}
-// Experimental stub function to create a per MB zbin adjustment based on
-// some previously calculated measure of MB activity.
+/* Experimental stub function to create a per MB zbin adjustment based on
+ * some previously calculated measure of MB activity.
+ */
static void adjust_act_zbin( VP8_COMP *cpi, MACROBLOCK *x )
{
#if USE_ACT_INDEX
@@ -1103,7 +1122,7 @@ static void adjust_act_zbin( VP8_COMP *cpi, MACROBLOCK *x )
int64_t b;
int64_t act = *(x->mb_activity_ptr);
- // Apply the masking to the RD multiplier.
+ /* Apply the masking to the RD multiplier. */
a = act + 4*cpi->activity_avg;
b = 4*act + cpi->activity_avg;
@@ -1176,7 +1195,7 @@ int vp8cx_encode_inter_macroblock
x->encode_breakout = cpi->oxcf.encode_breakout;
#if CONFIG_TEMPORAL_DENOISING
- // Reset the best sse mode/mv for each macroblock.
+ /* Reset the best sse mode/mv for each macroblock. */
x->best_reference_frame = INTRA_FRAME;
x->best_zeromv_reference_frame = INTRA_FRAME;
x->best_sse_inter_mode = 0;
@@ -1223,23 +1242,23 @@ int vp8cx_encode_inter_macroblock
if(cpi->oxcf.tuning == VP8_TUNE_SSIM)
{
- // Adjust the zbin based on this MB rate.
+ /* Adjust the zbin based on this MB rate. */
adjust_act_zbin( cpi, x );
}
#if 0
- // Experimental RD code
+ /* Experimental RD code */
cpi->frame_distortion += distortion;
cpi->last_mb_distortion = distortion;
#endif
- // MB level adjutment to quantizer setup
+ /* MB level adjutment to quantizer setup */
if (xd->segmentation_enabled)
{
- // If cyclic update enabled
+ /* If cyclic update enabled */
if (cpi->current_layer == 0 && cpi->cyclic_refresh_mode_enabled)
{
- // Clear segment_id back to 0 if not coded (last frame 0,0)
+ /* Clear segment_id back to 0 if not coded (last frame 0,0) */
if ((xd->mode_info_context->mbmi.segment_id == 1) &&
((xd->mode_info_context->mbmi.ref_frame != LAST_FRAME) || (xd->mode_info_context->mbmi.mode != ZEROMV)))
{
@@ -1252,8 +1271,9 @@ int vp8cx_encode_inter_macroblock
}
{
- // Experimental code. Special case for gf and arf zeromv modes.
- // Increase zbin size to supress noise
+ /* Experimental code. Special case for gf and arf zeromv modes.
+ * Increase zbin size to supress noise
+ */
cpi->zbin_mode_boost = 0;
if (cpi->zbin_mode_boost_enabled)
{
diff --git a/vp8/encoder/encodemb.c b/vp8/encoder/encodemb.c
index f89e4f7a4..7d494f2c6 100644
--- a/vp8/encoder/encodemb.c
+++ b/vp8/encoder/encodemb.c
@@ -137,10 +137,10 @@ void vp8_transform_intra_mby(MACROBLOCK *x)
&x->block[i].coeff[0], 32);
}
- // build dc block from 16 y dc values
+ /* build dc block from 16 y dc values */
build_dcblock(x);
- // do 2nd order transform on the dc block
+ /* do 2nd order transform on the dc block */
x->short_walsh4x4(&x->block[24].src_diff[0],
&x->block[24].coeff[0], 8);
@@ -157,7 +157,7 @@ static void transform_mb(MACROBLOCK *x)
&x->block[i].coeff[0], 32);
}
- // build dc block from 16 y dc values
+ /* build dc block from 16 y dc values */
if (x->e_mbd.mode_info_context->mbmi.mode != SPLITMV)
build_dcblock(x);
@@ -167,7 +167,7 @@ static void transform_mb(MACROBLOCK *x)
&x->block[i].coeff[0], 16);
}
- // do 2nd order transform on the dc block
+ /* do 2nd order transform on the dc block */
if (x->e_mbd.mode_info_context->mbmi.mode != SPLITMV)
x->short_walsh4x4(&x->block[24].src_diff[0],
&x->block[24].coeff[0], 8);
@@ -185,7 +185,7 @@ static void transform_mby(MACROBLOCK *x)
&x->block[i].coeff[0], 32);
}
- // build dc block from 16 y dc values
+ /* build dc block from 16 y dc values */
if (x->e_mbd.mode_info_context->mbmi.mode != SPLITMV)
{
build_dcblock(x);
@@ -208,7 +208,7 @@ struct vp8_token_state{
short qc;
};
-// TODO: experiments to find optimal multiple numbers
+/* TODO: experiments to find optimal multiple numbers */
#define Y1_RD_MULT 4
#define UV_RD_MULT 2
#define Y2_RD_MULT 16
diff --git a/vp8/encoder/encodemv.c b/vp8/encoder/encodemv.c
index 0145f6d20..7d8c84dd3 100644
--- a/vp8/encoder/encodemv.c
+++ b/vp8/encoder/encodemv.c
@@ -29,15 +29,15 @@ static void encode_mvcomponent(
const vp8_prob *p = mvc->prob;
const int x = v < 0 ? -v : v;
- if (x < mvnum_short) // Small
+ if (x < mvnum_short) /* Small */
{
vp8_write(w, 0, p [mvpis_short]);
vp8_treed_write(w, vp8_small_mvtree, p + MVPshort, x, 3);
if (!x)
- return; // no sign bit
+ return; /* no sign bit */
}
- else // Large
+ else /* Large */
{
int i = 0;
@@ -100,7 +100,7 @@ void vp8_encode_motion_vector(vp8_writer *w, const MV *mv, const MV_CONTEXT *mvc
static unsigned int cost_mvcomponent(const int v, const struct mv_context *mvc)
{
const vp8_prob *p = mvc->prob;
- const int x = v; //v<0? -v:v;
+ const int x = v;
unsigned int cost;
if (x < mvnum_short)
@@ -132,12 +132,12 @@ static unsigned int cost_mvcomponent(const int v, const struct mv_context *mvc)
cost += vp8_cost_bit(p [MVPbits + 3], (x >> 3) & 1);
}
- return cost; // + vp8_cost_bit( p [MVPsign], v < 0);
+ return cost; /* + vp8_cost_bit( p [MVPsign], v < 0); */
}
void vp8_build_component_cost_table(int *mvcost[2], const MV_CONTEXT *mvc, int mvc_flag[2])
{
- int i = 1; //-mv_max;
+ int i = 1;
unsigned int cost0 = 0;
unsigned int cost1 = 0;
@@ -151,7 +151,6 @@ void vp8_build_component_cost_table(int *mvcost[2], const MV_CONTEXT *mvc, int m
do
{
- //mvcost [0] [i] = cost_mvcomponent( i, &mvc[0]);
cost0 = cost_mvcomponent(i, &mvc[0]);
mvcost [0] [i] = cost0 + vp8_cost_zero(mvc[0].prob[MVPsign]);
@@ -168,7 +167,6 @@ void vp8_build_component_cost_table(int *mvcost[2], const MV_CONTEXT *mvc, int m
do
{
- //mvcost [1] [i] = cost_mvcomponent( i, mvc[1]);
cost1 = cost_mvcomponent(i, &mvc[1]);
mvcost [1] [i] = cost1 + vp8_cost_zero(mvc[1].prob[MVPsign]);
@@ -179,10 +177,10 @@ void vp8_build_component_cost_table(int *mvcost[2], const MV_CONTEXT *mvc, int m
}
-// Motion vector probability table update depends on benefit.
-// Small correction allows for the fact that an update to an MV probability
-// may have benefit in subsequent frames as well as the current one.
-
+/* Motion vector probability table update depends on benefit.
+ * Small correction allows for the fact that an update to an MV probability
+ * may have benefit in subsequent frames as well as the current one.
+ */
#define MV_PROB_UPDATE_CORRECTION -1
@@ -254,22 +252,22 @@ static void write_component_probs(
vp8_zero(short_bct)
- //j=0
+ /* j=0 */
{
const int c = events [mv_max];
- is_short_ct [0] += c; // Short vector
- short_ct [0] += c; // Magnitude distribution
+ is_short_ct [0] += c; /* Short vector */
+ short_ct [0] += c; /* Magnitude distribution */
}
- //j: 1 ~ mv_max (1023)
+ /* j: 1 ~ mv_max (1023) */
{
int j = 1;
do
{
- const int c1 = events [mv_max + j]; //positive
- const int c2 = events [mv_max - j]; //negative
+ const int c1 = events [mv_max + j]; /* positive */
+ const int c2 = events [mv_max - j]; /* negative */
const int c = c1 + c2;
int a = j;
@@ -278,13 +276,13 @@ static void write_component_probs(
if (a < mvnum_short)
{
- is_short_ct [0] += c; // Short vector
- short_ct [a] += c; // Magnitude distribution
+ is_short_ct [0] += c; /* Short vector */
+ short_ct [a] += c; /* Magnitude distribution */
}
else
{
int k = mvlong_width - 1;
- is_short_ct [1] += c; // Long vector
+ is_short_ct [1] += c; /* Long vector */
/* bit 3 not always encoded. */
do
@@ -296,43 +294,6 @@ static void write_component_probs(
while (++j <= mv_max);
}
- /*
- {
- int j = -mv_max;
- do
- {
-
- const int c = events [mv_max + j];
- int a = j;
-
- if( j < 0)
- {
- sign_ct [1] += c;
- a = -j;
- }
- else if( j)
- sign_ct [0] += c;
-
- if( a < mvnum_short)
- {
- is_short_ct [0] += c; // Short vector
- short_ct [a] += c; // Magnitude distribution
- }
- else
- {
- int k = mvlong_width - 1;
- is_short_ct [1] += c; // Long vector
-
- // bit 3 not always encoded.
-
- do
- bit_ct [k] [(a >> k) & 1] += c;
- while( --k >= 0);
- }
- } while( ++j <= mv_max);
- }
- */
-
calc_prob(Pnew + mvpis_short, is_short_ct);
calc_prob(Pnew + MVPsign, sign_ct);
diff --git a/vp8/encoder/ethreading.c b/vp8/encoder/ethreading.c
index 57d578346..40adc3514 100644
--- a/vp8/encoder/ethreading.c
+++ b/vp8/encoder/ethreading.c
@@ -39,7 +39,7 @@ static THREAD_FUNCTION thread_loopfilter(void *p_data)
if (sem_wait(&cpi->h_event_start_lpf) == 0)
{
- if (cpi->b_multi_threaded == 0) // we're shutting down
+ if (cpi->b_multi_threaded == 0) /* we're shutting down */
break;
vp8_loopfilter_frame(cpi, cm);
@@ -60,14 +60,12 @@ THREAD_FUNCTION thread_encoding_proc(void *p_data)
ENTROPY_CONTEXT_PLANES mb_row_left_context;
const int nsync = cpi->mt_sync_range;
- //printf("Started thread %d\n", ithread);
while (1)
{
if (cpi->b_multi_threaded == 0)
break;
- //if(WaitForSingleObject(cpi->h_event_mbrencoding[ithread], INFINITE) == WAIT_OBJECT_0)
if (sem_wait(&cpi->h_event_start_encoding[ithread]) == 0)
{
VP8_COMMON *cm = &cpi->common;
@@ -83,7 +81,7 @@ THREAD_FUNCTION thread_encoding_proc(void *p_data)
int *segment_counts = mbri->segment_counts;
int *totalrate = &mbri->totalrate;
- if (cpi->b_multi_threaded == 0) // we're shutting down
+ if (cpi->b_multi_threaded == 0) /* we're shutting down */
break;
for (mb_row = ithread + 1; mb_row < cm->mb_rows; mb_row += (cpi->encoding_thread_count + 1))
@@ -108,7 +106,7 @@ THREAD_FUNCTION thread_encoding_proc(void *p_data)
last_row_current_mb_col = &cpi->mt_current_mb_col[mb_row - 1];
- // reset above block coeffs
+ /* reset above block coeffs */
xd->above_context = cm->above_context;
xd->left_context = &mb_row_left_context;
@@ -118,10 +116,10 @@ THREAD_FUNCTION thread_encoding_proc(void *p_data)
recon_yoffset = (mb_row * recon_y_stride * 16);
recon_uvoffset = (mb_row * recon_uv_stride * 8);
- // Set the mb activity pointer to the start of the row.
+ /* Set the mb activity pointer to the start of the row. */
x->mb_activity_ptr = &cpi->mb_activity_map[map_index];
- // for each macroblock col in image
+ /* for each macroblock col in image */
for (mb_col = 0; mb_col < cm->mb_cols; mb_col++)
{
*current_mb_col = mb_col - 1;
@@ -139,14 +137,18 @@ THREAD_FUNCTION thread_encoding_proc(void *p_data)
tp = tp_start;
#endif
- // Distance of Mb to the various image edges.
- // These specified to 8th pel as they are always compared to values that are in 1/8th pel units
+ /* Distance of Mb to the various image edges.
+ * These specified to 8th pel as they are always compared
+ * to values that are in 1/8th pel units
+ */
xd->mb_to_left_edge = -((mb_col * 16) << 3);
xd->mb_to_right_edge = ((cm->mb_cols - 1 - mb_col) * 16) << 3;
xd->mb_to_top_edge = -((mb_row * 16) << 3);
xd->mb_to_bottom_edge = ((cm->mb_rows - 1 - mb_row) * 16) << 3;
- // Set up limit values for motion vectors used to prevent them extending outside the UMV borders
+ /* Set up limit values for motion vectors used to prevent
+ * them extending outside the UMV borders
+ */
x->mv_col_min = -((mb_col * 16) + (VP8BORDERINPIXELS - 16));
x->mv_col_max = ((cm->mb_cols - 1 - mb_col) * 16) + (VP8BORDERINPIXELS - 16);
x->mv_row_min = -((mb_row * 16) + (VP8BORDERINPIXELS - 16));
@@ -160,17 +162,19 @@ THREAD_FUNCTION thread_encoding_proc(void *p_data)
x->rddiv = cpi->RDDIV;
x->rdmult = cpi->RDMULT;
- //Copy current mb to a buffer
+ /* Copy current mb to a buffer */
vp8_copy_mem16x16(x->src.y_buffer, x->src.y_stride, x->thismb, 16);
if (cpi->oxcf.tuning == VP8_TUNE_SSIM)
vp8_activity_masking(cpi, x);
- // Is segmentation enabled
- // MB level adjustment to quantizer
+ /* Is segmentation enabled */
+ /* MB level adjustment to quantizer */
if (xd->segmentation_enabled)
{
- // Code to set segment id in xd->mbmi.segment_id for current MB (with range checking)
+ /* Code to set segment id in xd->mbmi.segment_id for
+ * current MB (with range checking)
+ */
if (cpi->segmentation_map[map_index + mb_col] <= 3)
xd->mode_info_context->mbmi.segment_id = cpi->segmentation_map[map_index + mb_col];
else
@@ -179,7 +183,8 @@ THREAD_FUNCTION thread_encoding_proc(void *p_data)
vp8cx_mb_init_quantizer(cpi, x, 1);
}
else
- xd->mode_info_context->mbmi.segment_id = 0; // Set to Segment 0 by default
+ /* Set to Segment 0 by default */
+ xd->mode_info_context->mbmi.segment_id = 0;
x->active_ptr = cpi->active_map + map_index + mb_col;
@@ -209,21 +214,30 @@ THREAD_FUNCTION thread_encoding_proc(void *p_data)
#endif
- // Count of last ref frame 0,0 usage
+ /* Count of last ref frame 0,0 usage */
if ((xd->mode_info_context->mbmi.mode == ZEROMV) && (xd->mode_info_context->mbmi.ref_frame == LAST_FRAME))
cpi->inter_zz_count++;
- // Special case code for cyclic refresh
- // If cyclic update enabled then copy xd->mbmi.segment_id; (which may have been updated based on mode
- // during vp8cx_encode_inter_macroblock()) back into the global segmentation map
+ /* Special case code for cyclic refresh
+ * If cyclic update enabled then copy
+ * xd->mbmi.segment_id; (which may have been updated
+ * based on mode during
+ * vp8cx_encode_inter_macroblock()) back into the
+ * global segmentation map
+ */
if (cpi->cyclic_refresh_mode_enabled && xd->segmentation_enabled)
{
const MB_MODE_INFO * mbmi = &xd->mode_info_context->mbmi;
cpi->segmentation_map[map_index + mb_col] = mbmi->segment_id;
- // If the block has been refreshed mark it as clean (the magnitude of the -ve influences how long it will be before we consider another refresh):
- // Else if it was coded (last frame 0,0) and has not already been refreshed then mark it as a candidate for cleanup next time (marked 0)
- // else mark it as dirty (1).
+ /* If the block has been refreshed mark it as clean
+ * (the magnitude of the -ve influences how long it
+ * will be before we consider another refresh):
+ * Else if it was coded (last frame 0,0) and has
+ * not already been refreshed then mark it as a
+ * candidate for cleanup next time (marked 0) else
+ * mark it as dirty (1).
+ */
if (mbmi->segment_id)
cpi->cyclic_refresh_map[map_index + mb_col] = -1;
else if ((mbmi->mode == ZEROMV) && (mbmi->ref_frame == LAST_FRAME))
@@ -246,13 +260,13 @@ THREAD_FUNCTION thread_encoding_proc(void *p_data)
#else
cpi->tplist[mb_row].stop = tp;
#endif
- // Increment pointer into gf usage flags structure.
+ /* Increment pointer into gf usage flags structure. */
x->gf_active_ptr++;
- // Increment the activity mask pointers.
+ /* Increment the activity mask pointers. */
x->mb_activity_ptr++;
- // adjust to the next column of macroblocks
+ /* adjust to the next column of macroblocks */
x->src.y_buffer += 16;
x->src.u_buffer += 8;
x->src.v_buffer += 8;
@@ -260,10 +274,10 @@ THREAD_FUNCTION thread_encoding_proc(void *p_data)
recon_yoffset += 16;
recon_uvoffset += 8;
- // Keep track of segment usage
+ /* Keep track of segment usage */
segment_counts[xd->mode_info_context->mbmi.segment_id]++;
- // skip to next mb
+ /* skip to next mb */
xd->mode_info_context++;
x->partition_info++;
xd->above_context++;
@@ -276,7 +290,7 @@ THREAD_FUNCTION thread_encoding_proc(void *p_data)
*current_mb_col = mb_col + nsync;
- // this is to account for the border
+ /* this is to account for the border */
xd->mode_info_context++;
x->partition_info++;
@@ -296,7 +310,7 @@ THREAD_FUNCTION thread_encoding_proc(void *p_data)
}
}
- //printf("exit thread %d\n", ithread);
+ /* printf("exit thread %d\n", ithread); */
return 0;
}
@@ -550,14 +564,13 @@ void vp8cx_remove_encoder_threads(VP8_COMP *cpi)
{
if (cpi->b_multi_threaded)
{
- //shutdown other threads
+ /* shutdown other threads */
cpi->b_multi_threaded = 0;
{
int i;
for (i = 0; i < cpi->encoding_thread_count; i++)
{
- //SetEvent(cpi->h_event_mbrencoding[i]);
sem_post(&cpi->h_event_start_encoding[i]);
pthread_join(cpi->h_encoding_thread[i], 0);
@@ -572,7 +585,7 @@ void vp8cx_remove_encoder_threads(VP8_COMP *cpi)
sem_destroy(&cpi->h_event_end_lpf);
sem_destroy(&cpi->h_event_start_lpf);
- //free thread related resources
+ /* free thread related resources */
vpx_free(cpi->h_event_start_encoding);
vpx_free(cpi->h_encoding_thread);
vpx_free(cpi->mb_row_ei);
diff --git a/vp8/encoder/firstpass.c b/vp8/encoder/firstpass.c
index 36483d606..a6db5af28 100644
--- a/vp8/encoder/firstpass.c
+++ b/vp8/encoder/firstpass.c
@@ -30,14 +30,12 @@
#include "encodemv.h"
#include "encodeframe.h"
-//#define OUTPUT_FPF 1
+/* #define OUTPUT_FPF 1 */
extern void vp8cx_frame_init_quantizer(VP8_COMP *cpi);
extern void vp8_set_mbmode_and_mvs(MACROBLOCK *x, MB_PREDICTION_MODE mb, int_mv *mv);
extern void vp8_alloc_compressor_data(VP8_COMP *cpi);
-//#define GFQ_ADJUSTMENT (40 + ((15*Q)/10))
-//#define GFQ_ADJUSTMENT (80 + ((15*Q)/10))
#define GFQ_ADJUSTMENT vp8_gf_boost_qadjustment[Q]
extern int vp8_kf_boost_qadjustment[QINDEX_RANGE];
@@ -77,7 +75,9 @@ static const int cq_level[QINDEX_RANGE] =
static void find_next_key_frame(VP8_COMP *cpi, FIRSTPASS_STATS *this_frame);
-// Resets the first pass file to the given position using a relative seek from the current position
+/* Resets the first pass file to the given position using a relative seek
+ * from the current position
+ */
static void reset_fpf_position(VP8_COMP *cpi, FIRSTPASS_STATS *Position)
{
cpi->twopass.stats_in = Position;
@@ -92,14 +92,14 @@ static int lookup_next_frame_stats(VP8_COMP *cpi, FIRSTPASS_STATS *next_frame)
return 1;
}
-// Read frame stats at an offset from the current position
+/* Read frame stats at an offset from the current position */
static int read_frame_stats( VP8_COMP *cpi,
FIRSTPASS_STATS *frame_stats,
int offset )
{
FIRSTPASS_STATS * fps_ptr = cpi->twopass.stats_in;
- // Check legality of offset
+ /* Check legality of offset */
if ( offset >= 0 )
{
if ( &fps_ptr[offset] >= cpi->twopass.stats_in_end )
@@ -136,7 +136,7 @@ static void output_stats(const VP8_COMP *cpi,
pkt.data.twopass_stats.sz = sizeof(FIRSTPASS_STATS);
vpx_codec_pkt_list_add(pktlist, &pkt);
-// TEMP debug code
+/* TEMP debug code */
#if OUTPUT_FPF
{
@@ -257,7 +257,9 @@ static void avg_stats(FIRSTPASS_STATS *section)
section->duration /= section->count;
}
-// Calculate a modified Error used in distributing bits between easier and harder frames
+/* Calculate a modified Error used in distributing bits between easier
+ * and harder frames
+ */
static double calculate_modified_err(VP8_COMP *cpi, FIRSTPASS_STATS *this_frame)
{
double av_err = ( cpi->twopass.total_stats.ssim_weighted_pred_err /
@@ -315,7 +317,9 @@ static double simple_weight(YV12_BUFFER_CONFIG *source)
unsigned char *src = source->y_buffer;
double sum_weights = 0.0;
- // Loop throught the Y plane raw examining levels and creating a weight for the image
+ /* Loop throught the Y plane raw examining levels and creating a weight
+ * for the image
+ */
i = source->y_height;
do
{
@@ -335,41 +339,52 @@ static double simple_weight(YV12_BUFFER_CONFIG *source)
}
-// This function returns the current per frame maximum bitrate target
+/* This function returns the current per frame maximum bitrate target */
static int frame_max_bits(VP8_COMP *cpi)
{
- // Max allocation for a single frame based on the max section guidelines passed in and how many bits are left
+ /* Max allocation for a single frame based on the max section guidelines
+ * passed in and how many bits are left
+ */
int max_bits;
- // For CBR we need to also consider buffer fullness.
- // If we are running below the optimal level then we need to gradually tighten up on max_bits.
+ /* For CBR we need to also consider buffer fullness.
+ * If we are running below the optimal level then we need to gradually
+ * tighten up on max_bits.
+ */
if (cpi->oxcf.end_usage == USAGE_STREAM_FROM_SERVER)
{
double buffer_fullness_ratio = (double)cpi->buffer_level / DOUBLE_DIVIDE_CHECK((double)cpi->oxcf.optimal_buffer_level);
- // For CBR base this on the target average bits per frame plus the maximum sedction rate passed in by the user
+ /* For CBR base this on the target average bits per frame plus the
+ * maximum sedction rate passed in by the user
+ */
max_bits = (int)(cpi->av_per_frame_bandwidth * ((double)cpi->oxcf.two_pass_vbrmax_section / 100.0));
- // If our buffer is below the optimum level
+ /* If our buffer is below the optimum level */
if (buffer_fullness_ratio < 1.0)
{
- // The lower of max_bits / 4 or cpi->av_per_frame_bandwidth / 4.
+ /* The lower of max_bits / 4 or cpi->av_per_frame_bandwidth / 4. */
int min_max_bits = ((cpi->av_per_frame_bandwidth >> 2) < (max_bits >> 2)) ? cpi->av_per_frame_bandwidth >> 2 : max_bits >> 2;
max_bits = (int)(max_bits * buffer_fullness_ratio);
+ /* Lowest value we will set ... which should allow the buffer to
+ * refill.
+ */
if (max_bits < min_max_bits)
- max_bits = min_max_bits; // Lowest value we will set ... which should allow the buffer to refil.
+ max_bits = min_max_bits;
}
}
- // VBR
+ /* VBR */
else
{
- // For VBR base this on the bits and frames left plus the two_pass_vbrmax_section rate passed in by the user
+ /* For VBR base this on the bits and frames left plus the
+ * two_pass_vbrmax_section rate passed in by the user
+ */
max_bits = (int)(((double)cpi->twopass.bits_left / (cpi->twopass.total_stats.count - (double)cpi->common.current_video_frame)) * ((double)cpi->oxcf.two_pass_vbrmax_section / 100.0));
}
- // Trap case where we are out of bits
+ /* Trap case where we are out of bits */
if (max_bits < 0)
max_bits = 0;
@@ -403,13 +418,13 @@ static void zz_motion_search( VP8_COMP *cpi, MACROBLOCK * x,
unsigned char *ref_ptr;
int ref_stride = x->e_mbd.pre.y_stride;
- // Set up pointers for this macro block raw buffer
+ /* Set up pointers for this macro block raw buffer */
raw_ptr = (unsigned char *)(raw_buffer->y_buffer + recon_yoffset
+ d->offset);
vp8_mse16x16 ( src_ptr, src_stride, raw_ptr, raw_stride,
(unsigned int *)(raw_motion_err));
- // Set up pointers for this macro block recon buffer
+ /* Set up pointers for this macro block recon buffer */
xd->pre.y_buffer = recon_buffer->y_buffer + recon_yoffset;
ref_ptr = (unsigned char *)(xd->pre.y_buffer + d->offset );
vp8_mse16x16 ( src_ptr, src_stride, ref_ptr, ref_stride,
@@ -430,19 +445,19 @@ static void first_pass_motion_search(VP8_COMP *cpi, MACROBLOCK *x,
int_mv ref_mv_full;
int tmp_err;
- int step_param = 3; //3; // Dont search over full range for first pass
- int further_steps = (MAX_MVSEARCH_STEPS - 1) - step_param; //3;
+ int step_param = 3; /* Dont search over full range for first pass */
+ int further_steps = (MAX_MVSEARCH_STEPS - 1) - step_param;
int n;
vp8_variance_fn_ptr_t v_fn_ptr = cpi->fn_ptr[BLOCK_16X16];
int new_mv_mode_penalty = 256;
- // override the default variance function to use MSE
+ /* override the default variance function to use MSE */
v_fn_ptr.vf = vp8_mse16x16;
- // Set up pointers for this macro block recon buffer
+ /* Set up pointers for this macro block recon buffer */
xd->pre.y_buffer = recon_buffer->y_buffer + recon_yoffset;
- // Initial step/diamond search centred on best mv
+ /* Initial step/diamond search centred on best mv */
tmp_mv.as_int = 0;
ref_mv_full.as_mv.col = ref_mv->as_mv.col>>3;
ref_mv_full.as_mv.row = ref_mv->as_mv.row>>3;
@@ -459,7 +474,7 @@ static void first_pass_motion_search(VP8_COMP *cpi, MACROBLOCK *x,
best_mv->col = tmp_mv.as_mv.col;
}
- // Further step/diamond searches as necessary
+ /* Further step/diamond searches as necessary */
n = num00;
num00 = 0;
@@ -520,7 +535,7 @@ void vp8_first_pass(VP8_COMP *cpi)
zero_ref_mv.as_int = 0;
- vp8_clear_system_state(); //__asm emms;
+ vp8_clear_system_state();
x->src = * cpi->Source;
xd->pre = *lst_yv12;
@@ -536,13 +551,11 @@ void vp8_first_pass(VP8_COMP *cpi)
vp8_setup_block_ptrs(x);
- // set up frame new frame for intra coded blocks
+ /* set up frame new frame for intra coded blocks */
vp8_setup_intra_recon(new_yv12);
vp8cx_frame_init_quantizer(cpi);
- // Initialise the MV cost table to the defaults
- //if( cm->current_video_frame == 0)
- //if ( 0 )
+ /* Initialise the MV cost table to the defaults */
{
int flag[2] = {1, 1};
vp8_initialize_rd_consts(cpi, vp8_dc_quant(cm->base_qindex, cm->y1dc_delta_q));
@@ -550,24 +563,26 @@ void vp8_first_pass(VP8_COMP *cpi)
vp8_build_component_cost_table(cpi->mb.mvcost, (const MV_CONTEXT *) cm->fc.mvc, flag);
}
- // for each macroblock row in image
+ /* for each macroblock row in image */
for (mb_row = 0; mb_row < cm->mb_rows; mb_row++)
{
int_mv best_ref_mv;
best_ref_mv.as_int = 0;
- // reset above block coeffs
+ /* reset above block coeffs */
xd->up_available = (mb_row != 0);
recon_yoffset = (mb_row * recon_y_stride * 16);
recon_uvoffset = (mb_row * recon_uv_stride * 8);
- // Set up limit values for motion vectors to prevent them extending outside the UMV borders
+ /* Set up limit values for motion vectors to prevent them extending
+ * outside the UMV borders
+ */
x->mv_row_min = -((mb_row * 16) + (VP8BORDERINPIXELS - 16));
x->mv_row_max = ((cm->mb_rows - 1 - mb_row) * 16) + (VP8BORDERINPIXELS - 16);
- // for each macroblock col in image
+ /* for each macroblock col in image */
for (mb_col = 0; mb_col < cm->mb_cols; mb_col++)
{
int this_error;
@@ -579,26 +594,33 @@ void vp8_first_pass(VP8_COMP *cpi)
xd->dst.v_buffer = new_yv12->v_buffer + recon_uvoffset;
xd->left_available = (mb_col != 0);
- //Copy current mb to a buffer
+ /* Copy current mb to a buffer */
vp8_copy_mem16x16(x->src.y_buffer, x->src.y_stride, x->thismb, 16);
- // do intra 16x16 prediction
+ /* do intra 16x16 prediction */
this_error = vp8_encode_intra(cpi, x, use_dc_pred);
- // "intrapenalty" below deals with situations where the intra and inter error scores are very low (eg a plain black frame)
- // We do not have special cases in first pass for 0,0 and nearest etc so all inter modes carry an overhead cost estimate fot the mv.
- // When the error score is very low this causes us to pick all or lots of INTRA modes and throw lots of key frames.
- // This penalty adds a cost matching that of a 0,0 mv to the intra case.
+ /* "intrapenalty" below deals with situations where the intra
+ * and inter error scores are very low (eg a plain black frame)
+ * We do not have special cases in first pass for 0,0 and
+ * nearest etc so all inter modes carry an overhead cost
+ * estimate fot the mv. When the error score is very low this
+ * causes us to pick all or lots of INTRA modes and throw lots
+ * of key frames. This penalty adds a cost matching that of a
+ * 0,0 mv to the intra case.
+ */
this_error += intrapenalty;
- // Cumulative intra error total
+ /* Cumulative intra error total */
intra_error += (int64_t)this_error;
- // Set up limit values for motion vectors to prevent them extending outside the UMV borders
+ /* Set up limit values for motion vectors to prevent them
+ * extending outside the UMV borders
+ */
x->mv_col_min = -((mb_col * 16) + (VP8BORDERINPIXELS - 16));
x->mv_col_max = ((cm->mb_cols - 1 - mb_col) * 16) + (VP8BORDERINPIXELS - 16);
- // Other than for the first frame do a motion search
+ /* Other than for the first frame do a motion search */
if (cm->current_video_frame > 0)
{
BLOCKD *d = &x->e_mbd.block[0];
@@ -607,7 +629,7 @@ void vp8_first_pass(VP8_COMP *cpi)
int motion_error = INT_MAX;
int raw_motion_error = INT_MAX;
- // Simple 0,0 motion with no mv overhead
+ /* Simple 0,0 motion with no mv overhead */
zz_motion_search( cpi, x, cpi->last_frame_unscaled_source,
&raw_motion_error, lst_yv12, &motion_error,
recon_yoffset );
@@ -617,13 +639,16 @@ void vp8_first_pass(VP8_COMP *cpi)
if (raw_motion_error < cpi->oxcf.encode_breakout)
goto skip_motion_search;
- // Test last reference frame using the previous best mv as the
- // starting point (best reference) for the search
+ /* Test last reference frame using the previous best mv as the
+ * starting point (best reference) for the search
+ */
first_pass_motion_search(cpi, x, &best_ref_mv,
&d->bmi.mv.as_mv, lst_yv12,
&motion_error, recon_yoffset);
- // If the current best reference mv is not centred on 0,0 then do a 0,0 based search as well
+ /* If the current best reference mv is not centred on 0,0
+ * then do a 0,0 based search as well
+ */
if (best_ref_mv.as_int)
{
tmp_err = INT_MAX;
@@ -638,7 +663,9 @@ void vp8_first_pass(VP8_COMP *cpi)
}
}
- // Experimental search in a second reference frame ((0,0) based only)
+ /* Experimental search in a second reference frame ((0,0)
+ * based only)
+ */
if (cm->current_video_frame > 1)
{
first_pass_motion_search(cpi, x, &zero_ref_mv, &tmp_mv, gld_yv12, &gf_motion_error, recon_yoffset);
@@ -646,19 +673,9 @@ void vp8_first_pass(VP8_COMP *cpi)
if ((gf_motion_error < motion_error) && (gf_motion_error < this_error))
{
second_ref_count++;
- //motion_error = gf_motion_error;
- //d->bmi.mv.as_mv.row = tmp_mv.row;
- //d->bmi.mv.as_mv.col = tmp_mv.col;
}
- /*else
- {
- xd->pre.y_buffer = cm->last_frame.y_buffer + recon_yoffset;
- xd->pre.u_buffer = cm->last_frame.u_buffer + recon_uvoffset;
- xd->pre.v_buffer = cm->last_frame.v_buffer + recon_uvoffset;
- }*/
-
- // Reset to last frame as reference buffer
+ /* Reset to last frame as reference buffer */
xd->pre.y_buffer = lst_yv12->y_buffer + recon_yoffset;
xd->pre.u_buffer = lst_yv12->u_buffer + recon_uvoffset;
xd->pre.v_buffer = lst_yv12->v_buffer + recon_uvoffset;
@@ -670,10 +687,11 @@ skip_motion_search:
if (motion_error <= this_error)
{
- // Keep a count of cases where the inter and intra were
- // very close and very low. This helps with scene cut
- // detection for example in cropped clips with black bars
- // at the sides or top and bottom.
+ /* Keep a count of cases where the inter and intra were
+ * very close and very low. This helps with scene cut
+ * detection for example in cropped clips with black bars
+ * at the sides or top and bottom.
+ */
if( (((this_error-intrapenalty) * 9) <=
(motion_error*10)) &&
(this_error < (2*intrapenalty)) )
@@ -696,17 +714,17 @@ skip_motion_search:
best_ref_mv.as_int = d->bmi.mv.as_int;
- // Was the vector non-zero
+ /* Was the vector non-zero */
if (d->bmi.mv.as_int)
{
mvcount++;
- // Was it different from the last non zero vector
+ /* Was it different from the last non zero vector */
if ( d->bmi.mv.as_int != lastmv_as_int )
new_mv_count++;
lastmv_as_int = d->bmi.mv.as_int;
- // Does the Row vector point inwards or outwards
+ /* Does the Row vector point inwards or outwards */
if (mb_row < cm->mb_rows / 2)
{
if (d->bmi.mv.as_mv.row > 0)
@@ -722,7 +740,7 @@ skip_motion_search:
sum_in_vectors--;
}
- // Does the Row vector point inwards or outwards
+ /* Does the Row vector point inwards or outwards */
if (mb_col < cm->mb_cols / 2)
{
if (d->bmi.mv.as_mv.col > 0)
@@ -743,7 +761,7 @@ skip_motion_search:
coded_error += (int64_t)this_error;
- // adjust to the next column of macroblocks
+ /* adjust to the next column of macroblocks */
x->src.y_buffer += 16;
x->src.u_buffer += 8;
x->src.v_buffer += 8;
@@ -752,17 +770,17 @@ skip_motion_search:
recon_uvoffset += 8;
}
- // adjust to the next row of mbs
+ /* adjust to the next row of mbs */
x->src.y_buffer += 16 * x->src.y_stride - 16 * cm->mb_cols;
x->src.u_buffer += 8 * x->src.uv_stride - 8 * cm->mb_cols;
x->src.v_buffer += 8 * x->src.uv_stride - 8 * cm->mb_cols;
- //extend the recon for intra prediction
+ /* extend the recon for intra prediction */
vp8_extend_mb_row(new_yv12, xd->dst.y_buffer + 16, xd->dst.u_buffer + 8, xd->dst.v_buffer + 8);
- vp8_clear_system_state(); //__asm emms;
+ vp8_clear_system_state();
}
- vp8_clear_system_state(); //__asm emms;
+ vp8_clear_system_state();
{
double weight = 0.0;
@@ -809,12 +827,13 @@ skip_motion_search:
fps.pcnt_motion = 1.0 * (double)mvcount / cpi->common.MBs;
}
- // TODO: handle the case when duration is set to 0, or something less
- // than the full time between subsequent cpi->source_time_stamp s .
+ /* TODO: handle the case when duration is set to 0, or something less
+ * than the full time between subsequent cpi->source_time_stamps
+ */
fps.duration = cpi->source->ts_end
- cpi->source->ts_start;
- // don't want to do output stats with a stack variable!
+ /* don't want to do output stats with a stack variable! */
memcpy(&cpi->twopass.this_frame_stats,
&fps,
sizeof(FIRSTPASS_STATS));
@@ -822,7 +841,9 @@ skip_motion_search:
accumulate_stats(&cpi->twopass.total_stats, &fps);
}
- // Copy the previous Last Frame into the GF buffer if specific conditions for doing so are met
+ /* Copy the previous Last Frame into the GF buffer if specific
+ * conditions for doing so are met
+ */
if ((cm->current_video_frame > 0) &&
(cpi->twopass.this_frame_stats.pcnt_inter > 0.20) &&
((cpi->twopass.this_frame_stats.intra_error / cpi->twopass.this_frame_stats.coded_error) > 2.0))
@@ -830,18 +851,22 @@ skip_motion_search:
vp8_yv12_copy_frame(lst_yv12, gld_yv12);
}
- // swap frame pointers so last frame refers to the frame we just compressed
+ /* swap frame pointers so last frame refers to the frame we just
+ * compressed
+ */
vp8_swap_yv12_buffer(lst_yv12, new_yv12);
vp8_yv12_extend_frame_borders(lst_yv12);
- // Special case for the first frame. Copy into the GF buffer as a second reference.
+ /* Special case for the first frame. Copy into the GF buffer as a
+ * second reference.
+ */
if (cm->current_video_frame == 0)
{
vp8_yv12_copy_frame(lst_yv12, gld_yv12);
}
- // use this to see what the first pass reconstruction looks like
+ /* use this to see what the first pass reconstruction looks like */
if (0)
{
char filename[512];
@@ -863,11 +888,10 @@ skip_motion_search:
}
extern const int vp8_bits_per_mb[2][QINDEX_RANGE];
-// Estimate a cost per mb attributable to overheads such as the coding of
-// modes and motion vectors.
-// Currently simplistic in its assumptions for testing.
-//
-
+/* Estimate a cost per mb attributable to overheads such as the coding of
+ * modes and motion vectors.
+ * Currently simplistic in its assumptions for testing.
+ */
static double bitcost( double prob )
{
@@ -891,12 +915,14 @@ static int64_t estimate_modemvcost(VP8_COMP *cpi,
motion_cost = bitcost(av_pct_motion);
intra_cost = bitcost(av_intra);
- // Estimate of extra bits per mv overhead for mbs
- // << 9 is the normalization to the (bits * 512) used in vp8_bits_per_mb
+ /* Estimate of extra bits per mv overhead for mbs
+ * << 9 is the normalization to the (bits * 512) used in vp8_bits_per_mb
+ */
mv_cost = ((int)(fpstats->new_mv_count / fpstats->count) * 8) << 9;
- // Crude estimate of overhead cost from modes
- // << 9 is the normalization to (bits * 512) used in vp8_bits_per_mb
+ /* Crude estimate of overhead cost from modes
+ * << 9 is the normalization to (bits * 512) used in vp8_bits_per_mb
+ */
mode_cost =
(int)( ( ((av_pct_inter - av_pct_motion) * zz_cost) +
(av_pct_motion * motion_cost) +
@@ -915,17 +941,17 @@ static double calc_correction_factor( double err_per_mb,
double error_term = err_per_mb / err_devisor;
double correction_factor;
- // Adjustment based on Q to power term.
+ /* Adjustment based on Q to power term. */
power_term = pt_low + (Q * 0.01);
power_term = (power_term > pt_high) ? pt_high : power_term;
- // Adjustments to error term
- // TBD
+ /* Adjustments to error term */
+ /* TBD */
- // Calculate correction factor
+ /* Calculate correction factor */
correction_factor = pow(error_term, power_term);
- // Clip range
+ /* Clip range */
correction_factor =
(correction_factor < 0.05)
? 0.05 : (correction_factor > 5.0) ? 5.0 : correction_factor;
@@ -949,15 +975,16 @@ static int estimate_max_q(VP8_COMP *cpi,
int overhead_bits_per_mb;
if (section_target_bandwitdh <= 0)
- return cpi->twopass.maxq_max_limit; // Highest value allowed
+ return cpi->twopass.maxq_max_limit; /* Highest value allowed */
target_norm_bits_per_mb =
(section_target_bandwitdh < (1 << 20))
? (512 * section_target_bandwitdh) / num_mbs
: 512 * (section_target_bandwitdh / num_mbs);
- // Calculate a corrective factor based on a rolling ratio of bits spent
- // vs target bits
+ /* Calculate a corrective factor based on a rolling ratio of bits spent
+ * vs target bits
+ */
if ((cpi->rolling_target_bits > 0) &&
(cpi->active_worst_quality < cpi->worst_quality))
{
@@ -978,8 +1005,9 @@ static int estimate_max_q(VP8_COMP *cpi,
? 10.0 : cpi->twopass.est_max_qcorrection_factor;
}
- // Corrections for higher compression speed settings
- // (reduced compression expected)
+ /* Corrections for higher compression speed settings
+ * (reduced compression expected)
+ */
if ((cpi->compressor_speed == 3) || (cpi->compressor_speed == 1))
{
if (cpi->oxcf.cpu_used <= 5)
@@ -988,18 +1016,19 @@ static int estimate_max_q(VP8_COMP *cpi,
speed_correction = 1.25;
}
- // Estimate of overhead bits per mb
- // Correction to overhead bits for min allowed Q.
+ /* Estimate of overhead bits per mb */
+ /* Correction to overhead bits for min allowed Q. */
overhead_bits_per_mb = overhead_bits / num_mbs;
overhead_bits_per_mb *= pow( 0.98, (double)cpi->twopass.maxq_min_limit );
- // Try and pick a max Q that will be high enough to encode the
- // content at the given rate.
+ /* Try and pick a max Q that will be high enough to encode the
+ * content at the given rate.
+ */
for (Q = cpi->twopass.maxq_min_limit; Q < cpi->twopass.maxq_max_limit; Q++)
{
int bits_per_mb_at_this_q;
- // Error per MB based correction factor
+ /* Error per MB based correction factor */
err_correction_factor =
calc_correction_factor(err_per_mb, 150.0, 0.40, 0.90, Q);
@@ -1011,25 +1040,27 @@ static int estimate_max_q(VP8_COMP *cpi,
* cpi->twopass.section_max_qfactor
* (double)bits_per_mb_at_this_q);
- // Mode and motion overhead
- // As Q rises in real encode loop rd code will force overhead down
- // We make a crude adjustment for this here as *.98 per Q step.
+ /* Mode and motion overhead */
+ /* As Q rises in real encode loop rd code will force overhead down
+ * We make a crude adjustment for this here as *.98 per Q step.
+ */
overhead_bits_per_mb = (int)((double)overhead_bits_per_mb * 0.98);
if (bits_per_mb_at_this_q <= target_norm_bits_per_mb)
break;
}
- // Restriction on active max q for constrained quality mode.
+ /* Restriction on active max q for constrained quality mode. */
if ( (cpi->oxcf.end_usage == USAGE_CONSTRAINED_QUALITY) &&
(Q < cpi->cq_target_quality) )
{
Q = cpi->cq_target_quality;
}
- // Adjust maxq_min_limit and maxq_max_limit limits based on
- // averaga q observed in clip for non kf/gf.arf frames
- // Give average a chance to settle though.
+ /* Adjust maxq_min_limit and maxq_max_limit limits based on
+ * average q observed in clip for non kf/gf.arf frames
+ * Give average a chance to settle though.
+ */
if ( (cpi->ni_frames >
((unsigned int)cpi->twopass.total_stats.count >> 8)) &&
(cpi->ni_frames > 150) )
@@ -1043,8 +1074,9 @@ static int estimate_max_q(VP8_COMP *cpi,
return Q;
}
-// For cq mode estimate a cq level that matches the observed
-// complexity and data rate.
+/* For cq mode estimate a cq level that matches the observed
+ * complexity and data rate.
+ */
static int estimate_cq( VP8_COMP *cpi,
FIRSTPASS_STATS * fpstats,
int section_target_bandwitdh,
@@ -1073,11 +1105,12 @@ static int estimate_cq( VP8_COMP *cpi,
? (512 * section_target_bandwitdh) / num_mbs
: 512 * (section_target_bandwitdh / num_mbs);
- // Estimate of overhead bits per mb
+ /* Estimate of overhead bits per mb */
overhead_bits_per_mb = overhead_bits / num_mbs;
- // Corrections for higher compression speed settings
- // (reduced compression expected)
+ /* Corrections for higher compression speed settings
+ * (reduced compression expected)
+ */
if ((cpi->compressor_speed == 3) || (cpi->compressor_speed == 1))
{
if (cpi->oxcf.cpu_used <= 5)
@@ -1086,19 +1119,19 @@ static int estimate_cq( VP8_COMP *cpi,
speed_correction = 1.25;
}
- // II ratio correction factor for clip as a whole
+ /* II ratio correction factor for clip as a whole */
clip_iiratio = cpi->twopass.total_stats.intra_error /
DOUBLE_DIVIDE_CHECK(cpi->twopass.total_stats.coded_error);
clip_iifactor = 1.0 - ((clip_iiratio - 10.0) * 0.025);
if (clip_iifactor < 0.80)
clip_iifactor = 0.80;
- // Try and pick a Q that can encode the content at the given rate.
+ /* Try and pick a Q that can encode the content at the given rate. */
for (Q = 0; Q < MAXQ; Q++)
{
int bits_per_mb_at_this_q;
- // Error per MB based correction factor
+ /* Error per MB based correction factor */
err_correction_factor =
calc_correction_factor(err_per_mb, 100.0, 0.40, 0.90, Q);
@@ -1111,16 +1144,17 @@ static int estimate_cq( VP8_COMP *cpi,
clip_iifactor *
(double)bits_per_mb_at_this_q);
- // Mode and motion overhead
- // As Q rises in real encode loop rd code will force overhead down
- // We make a crude adjustment for this here as *.98 per Q step.
+ /* Mode and motion overhead */
+ /* As Q rises in real encode loop rd code will force overhead down
+ * We make a crude adjustment for this here as *.98 per Q step.
+ */
overhead_bits_per_mb = (int)((double)overhead_bits_per_mb * 0.98);
if (bits_per_mb_at_this_q <= target_norm_bits_per_mb)
break;
}
- // Clip value to range "best allowed to (worst allowed - 1)"
+ /* Clip value to range "best allowed to (worst allowed - 1)" */
Q = cq_level[Q];
if ( Q >= cpi->worst_quality )
Q = cpi->worst_quality - 1;
@@ -1142,7 +1176,9 @@ static int estimate_q(VP8_COMP *cpi, double section_err, int section_target_band
target_norm_bits_per_mb = (section_target_bandwitdh < (1 << 20)) ? (512 * section_target_bandwitdh) / num_mbs : 512 * (section_target_bandwitdh / num_mbs);
- // Corrections for higher compression speed settings (reduced compression expected)
+ /* Corrections for higher compression speed settings
+ * (reduced compression expected)
+ */
if ((cpi->compressor_speed == 3) || (cpi->compressor_speed == 1))
{
if (cpi->oxcf.cpu_used <= 5)
@@ -1151,12 +1187,12 @@ static int estimate_q(VP8_COMP *cpi, double section_err, int section_target_band
speed_correction = 1.25;
}
- // Try and pick a Q that can encode the content at the given rate.
+ /* Try and pick a Q that can encode the content at the given rate. */
for (Q = 0; Q < MAXQ; Q++)
{
int bits_per_mb_at_this_q;
- // Error per MB based correction factor
+ /* Error per MB based correction factor */
err_correction_factor =
calc_correction_factor(err_per_mb, 150.0, 0.40, 0.90, Q);
@@ -1173,7 +1209,7 @@ static int estimate_q(VP8_COMP *cpi, double section_err, int section_target_band
return Q;
}
-// Estimate a worst case Q for a KF group
+/* Estimate a worst case Q for a KF group */
static int estimate_kf_group_q(VP8_COMP *cpi, double section_err, int section_target_bandwitdh, double group_iiratio)
{
int Q;
@@ -1193,12 +1229,14 @@ static int estimate_kf_group_q(VP8_COMP *cpi, double section_err, int section_ta
double combined_correction_factor;
- // Trap special case where the target is <= 0
+ /* Trap special case where the target is <= 0 */
if (target_norm_bits_per_mb <= 0)
return MAXQ * 2;
- // Calculate a corrective factor based on a rolling ratio of bits spent vs target bits
- // This is clamped to the range 0.1 to 10.0
+ /* Calculate a corrective factor based on a rolling ratio of bits spent
+ * vs target bits
+ * This is clamped to the range 0.1 to 10.0
+ */
if (cpi->long_rolling_target_bits <= 0)
current_spend_ratio = 10.0;
else
@@ -1207,14 +1245,19 @@ static int estimate_kf_group_q(VP8_COMP *cpi, double section_err, int section_ta
current_spend_ratio = (current_spend_ratio > 10.0) ? 10.0 : (current_spend_ratio < 0.1) ? 0.1 : current_spend_ratio;
}
- // Calculate a correction factor based on the quality of prediction in the sequence as indicated by intra_inter error score ratio (IIRatio)
- // The idea here is to favour subsampling in the hardest sections vs the easyest.
+ /* Calculate a correction factor based on the quality of prediction in
+ * the sequence as indicated by intra_inter error score ratio (IIRatio)
+ * The idea here is to favour subsampling in the hardest sections vs
+ * the easyest.
+ */
iiratio_correction_factor = 1.0 - ((group_iiratio - 6.0) * 0.1);
if (iiratio_correction_factor < 0.5)
iiratio_correction_factor = 0.5;
- // Corrections for higher compression speed settings (reduced compression expected)
+ /* Corrections for higher compression speed settings
+ * (reduced compression expected)
+ */
if ((cpi->compressor_speed == 3) || (cpi->compressor_speed == 1))
{
if (cpi->oxcf.cpu_used <= 5)
@@ -1223,13 +1266,15 @@ static int estimate_kf_group_q(VP8_COMP *cpi, double section_err, int section_ta
speed_correction = 1.25;
}
- // Combine the various factors calculated above
+ /* Combine the various factors calculated above */
combined_correction_factor = speed_correction * iiratio_correction_factor * current_spend_ratio;
- // Try and pick a Q that should be high enough to encode the content at the given rate.
+ /* Try and pick a Q that should be high enough to encode the content at
+ * the given rate.
+ */
for (Q = 0; Q < MAXQ; Q++)
{
- // Error per MB based correction factor
+ /* Error per MB based correction factor */
err_correction_factor =
calc_correction_factor(err_per_mb, 150.0, pow_lowq, pow_highq, Q);
@@ -1242,7 +1287,9 @@ static int estimate_kf_group_q(VP8_COMP *cpi, double section_err, int section_ta
break;
}
- // If we could not hit the target even at Max Q then estimate what Q would have bee required
+ /* If we could not hit the target even at Max Q then estimate what Q
+ * would have been required
+ */
while ((bits_per_mb_at_this_q > target_norm_bits_per_mb) && (Q < (MAXQ * 2)))
{
@@ -1281,30 +1328,34 @@ void vp8_init_second_pass(VP8_COMP *cpi)
cpi->twopass.total_stats = *cpi->twopass.stats_in_end;
cpi->twopass.total_left_stats = cpi->twopass.total_stats;
- // each frame can have a different duration, as the frame rate in the source
- // isn't guaranteed to be constant. The frame rate prior to the first frame
- // encoded in the second pass is a guess. However the sum duration is not.
- // Its calculated based on the actual durations of all frames from the first
- // pass.
+ /* each frame can have a different duration, as the frame rate in the
+ * source isn't guaranteed to be constant. The frame rate prior to
+ * the first frame encoded in the second pass is a guess. However the
+ * sum duration is not. Its calculated based on the actual durations of
+ * all frames from the first pass.
+ */
vp8_new_frame_rate(cpi, 10000000.0 * cpi->twopass.total_stats.count / cpi->twopass.total_stats.duration);
cpi->output_frame_rate = cpi->frame_rate;
cpi->twopass.bits_left = (int64_t)(cpi->twopass.total_stats.duration * cpi->oxcf.target_bandwidth / 10000000.0) ;
cpi->twopass.bits_left -= (int64_t)(cpi->twopass.total_stats.duration * two_pass_min_rate / 10000000.0);
- // Calculate a minimum intra value to be used in determining the IIratio
- // scores used in the second pass. We have this minimum to make sure
- // that clips that are static but "low complexity" in the intra domain
- // are still boosted appropriately for KF/GF/ARF
+ /* Calculate a minimum intra value to be used in determining the IIratio
+ * scores used in the second pass. We have this minimum to make sure
+ * that clips that are static but "low complexity" in the intra domain
+ * are still boosted appropriately for KF/GF/ARF
+ */
cpi->twopass.kf_intra_err_min = KF_MB_INTRA_MIN * cpi->common.MBs;
cpi->twopass.gf_intra_err_min = GF_MB_INTRA_MIN * cpi->common.MBs;
- // Scan the first pass file and calculate an average Intra / Inter error score ratio for the sequence
+ /* Scan the first pass file and calculate an average Intra / Inter error
+ * score ratio for the sequence
+ */
{
double sum_iiratio = 0.0;
double IIRatio;
- start_pos = cpi->twopass.stats_in; // Note starting "file" position
+ start_pos = cpi->twopass.stats_in; /* Note starting "file" position */
while (input_stats(cpi, &this_frame) != EOF)
{
@@ -1315,14 +1366,15 @@ void vp8_init_second_pass(VP8_COMP *cpi)
cpi->twopass.avg_iiratio = sum_iiratio / DOUBLE_DIVIDE_CHECK((double)cpi->twopass.total_stats.count);
- // Reset file position
+ /* Reset file position */
reset_fpf_position(cpi, start_pos);
}
- // Scan the first pass file and calculate a modified total error based upon the bias/power function
- // used to allocate bits
+ /* Scan the first pass file and calculate a modified total error based
+ * upon the bias/power function used to allocate bits
+ */
{
- start_pos = cpi->twopass.stats_in; // Note starting "file" position
+ start_pos = cpi->twopass.stats_in; /* Note starting "file" position */
cpi->twopass.modified_error_total = 0.0;
cpi->twopass.modified_error_used = 0.0;
@@ -1333,7 +1385,7 @@ void vp8_init_second_pass(VP8_COMP *cpi)
}
cpi->twopass.modified_error_left = cpi->twopass.modified_error_total;
- reset_fpf_position(cpi, start_pos); // Reset file position
+ reset_fpf_position(cpi, start_pos); /* Reset file position */
}
}
@@ -1342,23 +1394,24 @@ void vp8_end_second_pass(VP8_COMP *cpi)
{
}
-// This function gives and estimate of how badly we believe
-// the prediction quality is decaying from frame to frame.
+/* This function gives and estimate of how badly we believe the prediction
+ * quality is decaying from frame to frame.
+ */
static double get_prediction_decay_rate(VP8_COMP *cpi, FIRSTPASS_STATS *next_frame)
{
double prediction_decay_rate;
double motion_decay;
double motion_pct = next_frame->pcnt_motion;
- // Initial basis is the % mbs inter coded
+ /* Initial basis is the % mbs inter coded */
prediction_decay_rate = next_frame->pcnt_inter;
- // High % motion -> somewhat higher decay rate
+ /* High % motion -> somewhat higher decay rate */
motion_decay = (1.0 - (motion_pct / 20.0));
if (motion_decay < prediction_decay_rate)
prediction_decay_rate = motion_decay;
- // Adjustment to decay rate based on speed of motion
+ /* Adjustment to decay rate based on speed of motion */
{
double this_mv_rabs;
double this_mv_cabs;
@@ -1378,9 +1431,10 @@ static double get_prediction_decay_rate(VP8_COMP *cpi, FIRSTPASS_STATS *next_fra
return prediction_decay_rate;
}
-// Function to test for a condition where a complex transition is followed
-// by a static section. For example in slide shows where there is a fade
-// between slides. This is to help with more optimal kf and gf positioning.
+/* Function to test for a condition where a complex transition is followed
+ * by a static section. For example in slide shows where there is a fade
+ * between slides. This is to help with more optimal kf and gf positioning.
+ */
static int detect_transition_to_still(
VP8_COMP *cpi,
int frame_interval,
@@ -1390,9 +1444,10 @@ static int detect_transition_to_still(
{
int trans_to_still = 0;
- // Break clause to detect very still sections after motion
- // For example a static image after a fade or other transition
- // instead of a clean scene cut.
+ /* Break clause to detect very still sections after motion
+ * For example a static image after a fade or other transition
+ * instead of a clean scene cut.
+ */
if ( (frame_interval > MIN_GF_INTERVAL) &&
(loop_decay_rate >= 0.999) &&
(decay_accumulator < 0.9) )
@@ -1402,8 +1457,7 @@ static int detect_transition_to_still(
FIRSTPASS_STATS tmp_next_frame;
double decay_rate;
- // Look ahead a few frames to see if static condition
- // persists...
+ /* Look ahead a few frames to see if static condition persists... */
for ( j = 0; j < still_interval; j++ )
{
if (EOF == input_stats(cpi, &tmp_next_frame))
@@ -1413,10 +1467,10 @@ static int detect_transition_to_still(
if ( decay_rate < 0.999 )
break;
}
- // Reset file position
+ /* Reset file position */
reset_fpf_position(cpi, position);
- // Only if it does do we signal a transition to still
+ /* Only if it does do we signal a transition to still */
if ( j == still_interval )
trans_to_still = 1;
}
@@ -1424,24 +1478,26 @@ static int detect_transition_to_still(
return trans_to_still;
}
-// This function detects a flash through the high relative pcnt_second_ref
-// score in the frame following a flash frame. The offset passed in should
-// reflect this
+/* This function detects a flash through the high relative pcnt_second_ref
+ * score in the frame following a flash frame. The offset passed in should
+ * reflect this
+ */
static int detect_flash( VP8_COMP *cpi, int offset )
{
FIRSTPASS_STATS next_frame;
int flash_detected = 0;
- // Read the frame data.
- // The return is 0 (no flash detected) if not a valid frame
+ /* Read the frame data. */
+ /* The return is 0 (no flash detected) if not a valid frame */
if ( read_frame_stats(cpi, &next_frame, offset) != EOF )
{
- // What we are looking for here is a situation where there is a
- // brief break in prediction (such as a flash) but subsequent frames
- // are reasonably well predicted by an earlier (pre flash) frame.
- // The recovery after a flash is indicated by a high pcnt_second_ref
- // comapred to pcnt_inter.
+ /* What we are looking for here is a situation where there is a
+ * brief break in prediction (such as a flash) but subsequent frames
+ * are reasonably well predicted by an earlier (pre flash) frame.
+ * The recovery after a flash is indicated by a high pcnt_second_ref
+ * comapred to pcnt_inter.
+ */
if ( (next_frame.pcnt_second_ref > next_frame.pcnt_inter) &&
(next_frame.pcnt_second_ref >= 0.5 ) )
{
@@ -1462,7 +1518,7 @@ static int detect_flash( VP8_COMP *cpi, int offset )
return flash_detected;
}
-// Update the motion related elements to the GF arf boost calculation
+/* Update the motion related elements to the GF arf boost calculation */
static void accumulate_frame_motion_stats(
VP8_COMP *cpi,
FIRSTPASS_STATS * this_frame,
@@ -1471,22 +1527,22 @@ static void accumulate_frame_motion_stats(
double * abs_mv_in_out_accumulator,
double * mv_ratio_accumulator )
{
- //double this_frame_mv_in_out;
double this_frame_mvr_ratio;
double this_frame_mvc_ratio;
double motion_pct;
- // Accumulate motion stats.
+ /* Accumulate motion stats. */
motion_pct = this_frame->pcnt_motion;
- // Accumulate Motion In/Out of frame stats
+ /* Accumulate Motion In/Out of frame stats */
*this_frame_mv_in_out = this_frame->mv_in_out_count * motion_pct;
*mv_in_out_accumulator += this_frame->mv_in_out_count * motion_pct;
*abs_mv_in_out_accumulator +=
fabs(this_frame->mv_in_out_count * motion_pct);
- // Accumulate a measure of how uniform (or conversely how random)
- // the motion field is. (A ratio of absmv / mv)
+ /* Accumulate a measure of how uniform (or conversely how random)
+ * the motion field is. (A ratio of absmv / mv)
+ */
if (motion_pct > 0.05)
{
this_frame_mvr_ratio = fabs(this_frame->mvr_abs) /
@@ -1508,7 +1564,7 @@ static void accumulate_frame_motion_stats(
}
}
-// Calculate a baseline boost number for the current frame.
+/* Calculate a baseline boost number for the current frame. */
static double calc_frame_boost(
VP8_COMP *cpi,
FIRSTPASS_STATS * this_frame,
@@ -1516,7 +1572,7 @@ static double calc_frame_boost(
{
double frame_boost;
- // Underlying boost factor is based on inter intra error ratio
+ /* Underlying boost factor is based on inter intra error ratio */
if (this_frame->intra_error > cpi->twopass.gf_intra_err_min)
frame_boost = (IIFACTOR * this_frame->intra_error /
DOUBLE_DIVIDE_CHECK(this_frame->coded_error));
@@ -1524,17 +1580,18 @@ static double calc_frame_boost(
frame_boost = (IIFACTOR * cpi->twopass.gf_intra_err_min /
DOUBLE_DIVIDE_CHECK(this_frame->coded_error));
- // Increase boost for frames where new data coming into frame
- // (eg zoom out). Slightly reduce boost if there is a net balance
- // of motion out of the frame (zoom in).
- // The range for this_frame_mv_in_out is -1.0 to +1.0
+ /* Increase boost for frames where new data coming into frame
+ * (eg zoom out). Slightly reduce boost if there is a net balance
+ * of motion out of the frame (zoom in).
+ * The range for this_frame_mv_in_out is -1.0 to +1.0
+ */
if (this_frame_mv_in_out > 0.0)
frame_boost += frame_boost * (this_frame_mv_in_out * 2.0);
- // In extreme case boost is halved
+ /* In extreme case boost is halved */
else
frame_boost += frame_boost * (this_frame_mv_in_out / 2.0);
- // Clip to maximum
+ /* Clip to maximum */
if (frame_boost > GF_RMAX)
frame_boost = GF_RMAX;
@@ -1562,26 +1619,27 @@ static int calc_arf_boost(
double r;
int flash_detected = 0;
- // Search forward from the proposed arf/next gf position
+ /* Search forward from the proposed arf/next gf position */
for ( i = 0; i < f_frames; i++ )
{
if ( read_frame_stats(cpi, &this_frame, (i+offset)) == EOF )
break;
- // Update the motion related elements to the boost calculation
+ /* Update the motion related elements to the boost calculation */
accumulate_frame_motion_stats( cpi, &this_frame,
&this_frame_mv_in_out, &mv_in_out_accumulator,
&abs_mv_in_out_accumulator, &mv_ratio_accumulator );
- // Calculate the baseline boost number for this frame
+ /* Calculate the baseline boost number for this frame */
r = calc_frame_boost( cpi, &this_frame, this_frame_mv_in_out );
- // We want to discount the the flash frame itself and the recovery
- // frame that follows as both will have poor scores.
+ /* We want to discount the the flash frame itself and the recovery
+ * frame that follows as both will have poor scores.
+ */
flash_detected = detect_flash(cpi, (i+offset)) ||
detect_flash(cpi, (i+offset+1));
- // Cumulative effect of prediction quality decay
+ /* Cumulative effect of prediction quality decay */
if ( !flash_detected )
{
decay_accumulator =
@@ -1592,7 +1650,7 @@ static int calc_arf_boost(
}
boost_score += (decay_accumulator * r);
- // Break out conditions.
+ /* Break out conditions. */
if ( (!flash_detected) &&
((mv_ratio_accumulator > 100.0) ||
(abs_mv_in_out_accumulator > 3.0) ||
@@ -1604,7 +1662,7 @@ static int calc_arf_boost(
*f_boost = (int)(boost_score * 100.0) >> 4;
- // Reset for backward looking loop
+ /* Reset for backward looking loop */
boost_score = 0.0;
mv_ratio_accumulator = 0.0;
decay_accumulator = 1.0;
@@ -1612,26 +1670,27 @@ static int calc_arf_boost(
mv_in_out_accumulator = 0.0;
abs_mv_in_out_accumulator = 0.0;
- // Search forward from the proposed arf/next gf position
+ /* Search forward from the proposed arf/next gf position */
for ( i = -1; i >= -b_frames; i-- )
{
if ( read_frame_stats(cpi, &this_frame, (i+offset)) == EOF )
break;
- // Update the motion related elements to the boost calculation
+ /* Update the motion related elements to the boost calculation */
accumulate_frame_motion_stats( cpi, &this_frame,
&this_frame_mv_in_out, &mv_in_out_accumulator,
&abs_mv_in_out_accumulator, &mv_ratio_accumulator );
- // Calculate the baseline boost number for this frame
+ /* Calculate the baseline boost number for this frame */
r = calc_frame_boost( cpi, &this_frame, this_frame_mv_in_out );
- // We want to discount the the flash frame itself and the recovery
- // frame that follows as both will have poor scores.
+ /* We want to discount the the flash frame itself and the recovery
+ * frame that follows as both will have poor scores.
+ */
flash_detected = detect_flash(cpi, (i+offset)) ||
detect_flash(cpi, (i+offset+1));
- // Cumulative effect of prediction quality decay
+ /* Cumulative effect of prediction quality decay */
if ( !flash_detected )
{
decay_accumulator =
@@ -1643,7 +1702,7 @@ static int calc_arf_boost(
boost_score += (decay_accumulator * r);
- // Break out conditions.
+ /* Break out conditions. */
if ( (!flash_detected) &&
((mv_ratio_accumulator > 100.0) ||
(abs_mv_in_out_accumulator > 3.0) ||
@@ -1658,7 +1717,7 @@ static int calc_arf_boost(
}
#endif
-// Analyse and define a gf/arf group .
+/* Analyse and define a gf/arf group . */
static void define_gf_group(VP8_COMP *cpi, FIRSTPASS_STATS *this_frame)
{
FIRSTPASS_STATS next_frame;
@@ -1674,14 +1733,14 @@ static void define_gf_group(VP8_COMP *cpi, FIRSTPASS_STATS *this_frame)
double mv_ratio_accumulator = 0.0;
double decay_accumulator = 1.0;
- double loop_decay_rate = 1.00; // Starting decay rate
+ double loop_decay_rate = 1.00; /* Starting decay rate */
double this_frame_mv_in_out = 0.0;
double mv_in_out_accumulator = 0.0;
double abs_mv_in_out_accumulator = 0.0;
double mod_err_per_mb_accumulator = 0.0;
- int max_bits = frame_max_bits(cpi); // Max for a single frame
+ int max_bits = frame_max_bits(cpi); /* Max for a single frame */
unsigned int allow_alt_ref =
cpi->oxcf.play_alternate && cpi->oxcf.lag_in_frames;
@@ -1694,37 +1753,40 @@ static void define_gf_group(VP8_COMP *cpi, FIRSTPASS_STATS *this_frame)
cpi->twopass.gf_group_bits = 0;
cpi->twopass.gf_decay_rate = 0;
- vp8_clear_system_state(); //__asm emms;
+ vp8_clear_system_state();
start_pos = cpi->twopass.stats_in;
- vpx_memset(&next_frame, 0, sizeof(next_frame)); // assure clean
+ vpx_memset(&next_frame, 0, sizeof(next_frame)); /* assure clean */
- // Load stats for the current frame.
+ /* Load stats for the current frame. */
mod_frame_err = calculate_modified_err(cpi, this_frame);
- // Note the error of the frame at the start of the group (this will be
- // the GF frame error if we code a normal gf
+ /* Note the error of the frame at the start of the group (this will be
+ * the GF frame error if we code a normal gf
+ */
gf_first_frame_err = mod_frame_err;
- // Special treatment if the current frame is a key frame (which is also
- // a gf). If it is then its error score (and hence bit allocation) need
- // to be subtracted out from the calculation for the GF group
+ /* Special treatment if the current frame is a key frame (which is also
+ * a gf). If it is then its error score (and hence bit allocation) need
+ * to be subtracted out from the calculation for the GF group
+ */
if (cpi->common.frame_type == KEY_FRAME)
gf_group_err -= gf_first_frame_err;
- // Scan forward to try and work out how many frames the next gf group
- // should contain and what level of boost is appropriate for the GF
- // or ARF that will be coded with the group
+ /* Scan forward to try and work out how many frames the next gf group
+ * should contain and what level of boost is appropriate for the GF
+ * or ARF that will be coded with the group
+ */
i = 0;
while (((i < cpi->twopass.static_scene_max_gf_interval) ||
((cpi->twopass.frames_to_key - i) < MIN_GF_INTERVAL)) &&
(i < cpi->twopass.frames_to_key))
{
- i++; // Increment the loop counter
+ i++;
- // Accumulate error score of frames in this gf group
+ /* Accumulate error score of frames in this gf group */
mod_frame_err = calculate_modified_err(cpi, this_frame);
gf_group_err += mod_frame_err;
@@ -1735,19 +1797,20 @@ static void define_gf_group(VP8_COMP *cpi, FIRSTPASS_STATS *this_frame)
if (EOF == input_stats(cpi, &next_frame))
break;
- // Test for the case where there is a brief flash but the prediction
- // quality back to an earlier frame is then restored.
+ /* Test for the case where there is a brief flash but the prediction
+ * quality back to an earlier frame is then restored.
+ */
flash_detected = detect_flash(cpi, 0);
- // Update the motion related elements to the boost calculation
+ /* Update the motion related elements to the boost calculation */
accumulate_frame_motion_stats( cpi, &next_frame,
&this_frame_mv_in_out, &mv_in_out_accumulator,
&abs_mv_in_out_accumulator, &mv_ratio_accumulator );
- // Calculate a baseline boost number for this frame
+ /* Calculate a baseline boost number for this frame */
r = calc_frame_boost( cpi, &next_frame, this_frame_mv_in_out );
- // Cumulative effect of prediction quality decay
+ /* Cumulative effect of prediction quality decay */
if ( !flash_detected )
{
loop_decay_rate = get_prediction_decay_rate(cpi, &next_frame);
@@ -1757,8 +1820,9 @@ static void define_gf_group(VP8_COMP *cpi, FIRSTPASS_STATS *this_frame)
}
boost_score += (decay_accumulator * r);
- // Break clause to detect very still sections after motion
- // For example a staic image after a fade or other transition.
+ /* Break clause to detect very still sections after motion
+ * For example a staic image after a fade or other transition.
+ */
if ( detect_transition_to_still( cpi, i, 5,
loop_decay_rate,
decay_accumulator ) )
@@ -1768,14 +1832,14 @@ static void define_gf_group(VP8_COMP *cpi, FIRSTPASS_STATS *this_frame)
break;
}
- // Break out conditions.
+ /* Break out conditions. */
if (
- // Break at cpi->max_gf_interval unless almost totally static
+ /* Break at cpi->max_gf_interval unless almost totally static */
(i >= cpi->max_gf_interval && (decay_accumulator < 0.995)) ||
(
- // Dont break out with a very short interval
+ /* Dont break out with a very short interval */
(i > MIN_GF_INTERVAL) &&
- // Dont break out very close to a key frame
+ /* Dont break out very close to a key frame */
((cpi->twopass.frames_to_key - i) >= MIN_GF_INTERVAL) &&
((boost_score > 20.0) || (next_frame.pcnt_inter < 0.75)) &&
(!flash_detected) &&
@@ -1797,12 +1861,12 @@ static void define_gf_group(VP8_COMP *cpi, FIRSTPASS_STATS *this_frame)
cpi->twopass.gf_decay_rate =
(i > 0) ? (int)(100.0 * (1.0 - decay_accumulator)) / i : 0;
- // When using CBR apply additional buffer related upper limits
+ /* When using CBR apply additional buffer related upper limits */
if (cpi->oxcf.end_usage == USAGE_STREAM_FROM_SERVER)
{
double max_boost;
- // For cbr apply buffer related limits
+ /* For cbr apply buffer related limits */
if (cpi->drop_frames_allowed)
{
int df_buffer_level = cpi->oxcf.drop_frames_water_mark *
@@ -1826,7 +1890,7 @@ static void define_gf_group(VP8_COMP *cpi, FIRSTPASS_STATS *this_frame)
boost_score = max_boost;
}
- // Dont allow conventional gf too near the next kf
+ /* Dont allow conventional gf too near the next kf */
if ((cpi->twopass.frames_to_key - i) < MIN_GF_INTERVAL)
{
while (i < cpi->twopass.frames_to_key)
@@ -1847,14 +1911,14 @@ static void define_gf_group(VP8_COMP *cpi, FIRSTPASS_STATS *this_frame)
cpi->gfu_boost = (int)(boost_score * 100.0) >> 4;
#if NEW_BOOST
- // Alterrnative boost calculation for alt ref
+ /* Alterrnative boost calculation for alt ref */
alt_boost = calc_arf_boost( cpi, 0, (i-1), (i-1), &f_boost, &b_boost );
#endif
- // Should we use the alternate refernce frame
+ /* Should we use the alternate refernce frame */
if (allow_alt_ref &&
(i >= MIN_GF_INTERVAL) &&
- // dont use ARF very near next kf
+ /* dont use ARF very near next kf */
(i <= (cpi->twopass.frames_to_key - MIN_GF_INTERVAL)) &&
#if NEW_BOOST
((next_frame.pcnt_inter > 0.75) ||
@@ -1884,7 +1948,7 @@ static void define_gf_group(VP8_COMP *cpi, FIRSTPASS_STATS *this_frame)
cpi->gfu_boost = alt_boost;
#endif
- // Estimate the bits to be allocated to the group as a whole
+ /* Estimate the bits to be allocated to the group as a whole */
if ((cpi->twopass.kf_group_bits > 0) &&
(cpi->twopass.kf_group_error_left > 0))
{
@@ -1894,7 +1958,7 @@ static void define_gf_group(VP8_COMP *cpi, FIRSTPASS_STATS *this_frame)
else
group_bits = 0;
- // Boost for arf frame
+ /* Boost for arf frame */
#if NEW_BOOST
Boost = (alt_boost * GFQ_ADJUSTMENT) / 100;
#else
@@ -1902,7 +1966,7 @@ static void define_gf_group(VP8_COMP *cpi, FIRSTPASS_STATS *this_frame)
#endif
Boost += (i * 50);
- // Set max and minimum boost and hence minimum allocation
+ /* Set max and minimum boost and hence minimum allocation */
if (Boost > ((cpi->baseline_gf_interval + 1) * 200))
Boost = ((cpi->baseline_gf_interval + 1) * 200);
else if (Boost < 125)
@@ -1910,24 +1974,27 @@ static void define_gf_group(VP8_COMP *cpi, FIRSTPASS_STATS *this_frame)
allocation_chunks = (i * 100) + Boost;
- // Normalize Altboost and allocations chunck down to prevent overflow
+ /* Normalize Altboost and allocations chunck down to prevent overflow */
while (Boost > 1000)
{
Boost /= 2;
allocation_chunks /= 2;
}
- // Calculate the number of bits to be spent on the arf based on the
- // boost number
+ /* Calculate the number of bits to be spent on the arf based on the
+ * boost number
+ */
arf_frame_bits = (int)((double)Boost * (group_bits /
(double)allocation_chunks));
- // Estimate if there are enough bits available to make worthwhile use
- // of an arf.
+ /* Estimate if there are enough bits available to make worthwhile use
+ * of an arf.
+ */
tmp_q = estimate_q(cpi, mod_frame_err, (int)arf_frame_bits);
- // Only use an arf if it is likely we will be able to code
- // it at a lower Q than the surrounding frames.
+ /* Only use an arf if it is likely we will be able to code
+ * it at a lower Q than the surrounding frames.
+ */
if (tmp_q < cpi->worst_quality)
{
int half_gf_int;
@@ -1937,42 +2004,46 @@ static void define_gf_group(VP8_COMP *cpi, FIRSTPASS_STATS *this_frame)
cpi->source_alt_ref_pending = 1;
- // For alt ref frames the error score for the end frame of the
- // group (the alt ref frame) should not contribute to the group
- // total and hence the number of bit allocated to the group.
- // Rather it forms part of the next group (it is the GF at the
- // start of the next group)
- // gf_group_err -= mod_frame_err;
-
- // For alt ref frames alt ref frame is technically part of the
- // GF frame for the next group but we always base the error
- // calculation and bit allocation on the current group of frames.
-
- // Set the interval till the next gf or arf.
- // For ARFs this is the number of frames to be coded before the
- // future frame that is coded as an ARF.
- // The future frame itself is part of the next group
+ /*
+ * For alt ref frames the error score for the end frame of the
+ * group (the alt ref frame) should not contribute to the group
+ * total and hence the number of bit allocated to the group.
+ * Rather it forms part of the next group (it is the GF at the
+ * start of the next group)
+ * gf_group_err -= mod_frame_err;
+ *
+ * For alt ref frames alt ref frame is technically part of the
+ * GF frame for the next group but we always base the error
+ * calculation and bit allocation on the current group of frames.
+ *
+ * Set the interval till the next gf or arf.
+ * For ARFs this is the number of frames to be coded before the
+ * future frame that is coded as an ARF.
+ * The future frame itself is part of the next group
+ */
cpi->baseline_gf_interval = i;
- // Define the arnr filter width for this group of frames:
- // We only filter frames that lie within a distance of half
- // the GF interval from the ARF frame. We also have to trap
- // cases where the filter extends beyond the end of clip.
- // Note: this_frame->frame has been updated in the loop
- // so it now points at the ARF frame.
+ /*
+ * Define the arnr filter width for this group of frames:
+ * We only filter frames that lie within a distance of half
+ * the GF interval from the ARF frame. We also have to trap
+ * cases where the filter extends beyond the end of clip.
+ * Note: this_frame->frame has been updated in the loop
+ * so it now points at the ARF frame.
+ */
half_gf_int = cpi->baseline_gf_interval >> 1;
frames_after_arf = cpi->twopass.total_stats.count -
this_frame->frame - 1;
switch (cpi->oxcf.arnr_type)
{
- case 1: // Backward filter
+ case 1: /* Backward filter */
frames_fwd = 0;
if (frames_bwd > half_gf_int)
frames_bwd = half_gf_int;
break;
- case 2: // Forward filter
+ case 2: /* Forward filter */
if (frames_fwd > half_gf_int)
frames_fwd = half_gf_int;
if (frames_fwd > frames_after_arf)
@@ -1980,7 +2051,7 @@ static void define_gf_group(VP8_COMP *cpi, FIRSTPASS_STATS *this_frame)
frames_bwd = 0;
break;
- case 3: // Centered filter
+ case 3: /* Centered filter */
default:
frames_fwd >>= 1;
if (frames_fwd > frames_after_arf)
@@ -1990,8 +2061,9 @@ static void define_gf_group(VP8_COMP *cpi, FIRSTPASS_STATS *this_frame)
frames_bwd = frames_fwd;
- // For even length filter there is one more frame backward
- // than forward: e.g. len=6 ==> bbbAff, len=7 ==> bbbAfff.
+ /* For even length filter there is one more frame backward
+ * than forward: e.g. len=6 ==> bbbAff, len=7 ==> bbbAfff.
+ */
if (frames_bwd < half_gf_int)
frames_bwd += (cpi->oxcf.arnr_max_frames+1) & 0x1;
break;
@@ -2011,12 +2083,14 @@ static void define_gf_group(VP8_COMP *cpi, FIRSTPASS_STATS *this_frame)
cpi->baseline_gf_interval = i;
}
- // Now decide how many bits should be allocated to the GF group as a
- // proportion of those remaining in the kf group.
- // The final key frame group in the clip is treated as a special case
- // where cpi->twopass.kf_group_bits is tied to cpi->twopass.bits_left.
- // This is also important for short clips where there may only be one
- // key frame.
+ /*
+ * Now decide how many bits should be allocated to the GF group as a
+ * proportion of those remaining in the kf group.
+ * The final key frame group in the clip is treated as a special case
+ * where cpi->twopass.kf_group_bits is tied to cpi->twopass.bits_left.
+ * This is also important for short clips where there may only be one
+ * key frame.
+ */
if (cpi->twopass.frames_to_key >= (int)(cpi->twopass.total_stats.count -
cpi->common.current_video_frame))
{
@@ -2024,7 +2098,7 @@ static void define_gf_group(VP8_COMP *cpi, FIRSTPASS_STATS *this_frame)
(cpi->twopass.bits_left > 0) ? cpi->twopass.bits_left : 0;
}
- // Calculate the bits to be allocated to the group as a whole
+ /* Calculate the bits to be allocated to the group as a whole */
if ((cpi->twopass.kf_group_bits > 0) &&
(cpi->twopass.kf_group_error_left > 0))
{
@@ -2041,25 +2115,26 @@ static void define_gf_group(VP8_COMP *cpi, FIRSTPASS_STATS *this_frame)
: (cpi->twopass.gf_group_bits > cpi->twopass.kf_group_bits)
? cpi->twopass.kf_group_bits : cpi->twopass.gf_group_bits;
- // Clip cpi->twopass.gf_group_bits based on user supplied data rate
- // variability limit (cpi->oxcf.two_pass_vbrmax_section)
+ /* Clip cpi->twopass.gf_group_bits based on user supplied data rate
+ * variability limit (cpi->oxcf.two_pass_vbrmax_section)
+ */
if (cpi->twopass.gf_group_bits > max_bits * cpi->baseline_gf_interval)
cpi->twopass.gf_group_bits = max_bits * cpi->baseline_gf_interval;
- // Reset the file position
+ /* Reset the file position */
reset_fpf_position(cpi, start_pos);
- // Update the record of error used so far (only done once per gf group)
+ /* Update the record of error used so far (only done once per gf group) */
cpi->twopass.modified_error_used += gf_group_err;
- // Assign bits to the arf or gf.
+ /* Assign bits to the arf or gf. */
for (i = 0; i <= (cpi->source_alt_ref_pending && cpi->common.frame_type != KEY_FRAME); i++) {
int Boost;
int allocation_chunks;
int Q = (cpi->oxcf.fixed_q < 0) ? cpi->last_q[INTER_FRAME] : cpi->oxcf.fixed_q;
int gf_bits;
- // For ARF frames
+ /* For ARF frames */
if (cpi->source_alt_ref_pending && i == 0)
{
#if NEW_BOOST
@@ -2069,7 +2144,7 @@ static void define_gf_group(VP8_COMP *cpi, FIRSTPASS_STATS *this_frame)
#endif
Boost += (cpi->baseline_gf_interval * 50);
- // Set max and minimum boost and hence minimum allocation
+ /* Set max and minimum boost and hence minimum allocation */
if (Boost > ((cpi->baseline_gf_interval + 1) * 200))
Boost = ((cpi->baseline_gf_interval + 1) * 200);
else if (Boost < 125)
@@ -2078,13 +2153,13 @@ static void define_gf_group(VP8_COMP *cpi, FIRSTPASS_STATS *this_frame)
allocation_chunks =
((cpi->baseline_gf_interval + 1) * 100) + Boost;
}
- // Else for standard golden frames
+ /* Else for standard golden frames */
else
{
- // boost based on inter / intra ratio of subsequent frames
+ /* boost based on inter / intra ratio of subsequent frames */
Boost = (cpi->gfu_boost * GFQ_ADJUSTMENT) / 100;
- // Set max and minimum boost and hence minimum allocation
+ /* Set max and minimum boost and hence minimum allocation */
if (Boost > (cpi->baseline_gf_interval * 150))
Boost = (cpi->baseline_gf_interval * 150);
else if (Boost < 125)
@@ -2094,22 +2169,24 @@ static void define_gf_group(VP8_COMP *cpi, FIRSTPASS_STATS *this_frame)
(cpi->baseline_gf_interval * 100) + (Boost - 100);
}
- // Normalize Altboost and allocations chunck down to prevent overflow
+ /* Normalize Altboost and allocations chunck down to prevent overflow */
while (Boost > 1000)
{
Boost /= 2;
allocation_chunks /= 2;
}
- // Calculate the number of bits to be spent on the gf or arf based on
- // the boost number
+ /* Calculate the number of bits to be spent on the gf or arf based on
+ * the boost number
+ */
gf_bits = (int)((double)Boost *
(cpi->twopass.gf_group_bits /
(double)allocation_chunks));
- // If the frame that is to be boosted is simpler than the average for
- // the gf/arf group then use an alternative calculation
- // based on the error score of the frame itself
+ /* If the frame that is to be boosted is simpler than the average for
+ * the gf/arf group then use an alternative calculation
+ * based on the error score of the frame itself
+ */
if (mod_frame_err < gf_group_err / (double)cpi->baseline_gf_interval)
{
double alt_gf_grp_bits;
@@ -2128,9 +2205,10 @@ static void define_gf_group(VP8_COMP *cpi, FIRSTPASS_STATS *this_frame)
gf_bits = alt_gf_bits;
}
}
- // Else if it is harder than other frames in the group make sure it at
- // least receives an allocation in keeping with its relative error
- // score, otherwise it may be worse off than an "un-boosted" frame
+ /* Else if it is harder than other frames in the group make sure it at
+ * least receives an allocation in keeping with its relative error
+ * score, otherwise it may be worse off than an "un-boosted" frame
+ */
else
{
int alt_gf_bits =
@@ -2144,18 +2222,19 @@ static void define_gf_group(VP8_COMP *cpi, FIRSTPASS_STATS *this_frame)
}
}
- // Apply an additional limit for CBR
+ /* Apply an additional limit for CBR */
if (cpi->oxcf.end_usage == USAGE_STREAM_FROM_SERVER)
{
if (cpi->twopass.gf_bits > (cpi->buffer_level >> 1))
cpi->twopass.gf_bits = cpi->buffer_level >> 1;
}
- // Dont allow a negative value for gf_bits
+ /* Dont allow a negative value for gf_bits */
if (gf_bits < 0)
gf_bits = 0;
- gf_bits += cpi->min_frame_bandwidth; // Add in minimum for a frame
+ /* Add in minimum for a frame */
+ gf_bits += cpi->min_frame_bandwidth;
if (i == 0)
{
@@ -2163,20 +2242,24 @@ static void define_gf_group(VP8_COMP *cpi, FIRSTPASS_STATS *this_frame)
}
if (i == 1 || (!cpi->source_alt_ref_pending && (cpi->common.frame_type != KEY_FRAME)))
{
- cpi->per_frame_bandwidth = gf_bits; // Per frame bit target for this frame
+ /* Per frame bit target for this frame */
+ cpi->per_frame_bandwidth = gf_bits;
}
}
{
- // Adjust KF group bits and error remainin
+ /* Adjust KF group bits and error remainin */
cpi->twopass.kf_group_error_left -= gf_group_err;
cpi->twopass.kf_group_bits -= cpi->twopass.gf_group_bits;
if (cpi->twopass.kf_group_bits < 0)
cpi->twopass.kf_group_bits = 0;
- // Note the error score left in the remaining frames of the group.
- // For normal GFs we want to remove the error score for the first frame of the group (except in Key frame case where this has already happened)
+ /* Note the error score left in the remaining frames of the group.
+ * For normal GFs we want to remove the error score for the first
+ * frame of the group (except in Key frame case where this has
+ * already happened)
+ */
if (!cpi->source_alt_ref_pending && cpi->common.frame_type != KEY_FRAME)
cpi->twopass.gf_group_error_left = gf_group_err - gf_first_frame_err;
else
@@ -2187,9 +2270,10 @@ static void define_gf_group(VP8_COMP *cpi, FIRSTPASS_STATS *this_frame)
if (cpi->twopass.gf_group_bits < 0)
cpi->twopass.gf_group_bits = 0;
- // This condition could fail if there are two kfs very close together
- // despite (MIN_GF_INTERVAL) and would cause a devide by 0 in the
- // calculation of cpi->twopass.alt_extra_bits.
+ /* This condition could fail if there are two kfs very close together
+ * despite (MIN_GF_INTERVAL) and would cause a devide by 0 in the
+ * calculation of cpi->twopass.alt_extra_bits.
+ */
if ( cpi->baseline_gf_interval >= 3 )
{
#if NEW_BOOST
@@ -2218,7 +2302,7 @@ static void define_gf_group(VP8_COMP *cpi, FIRSTPASS_STATS *this_frame)
cpi->twopass.alt_extra_bits = 0;
}
- // Adjustments based on a measure of complexity of the section
+ /* Adjustments based on a measure of complexity of the section */
if (cpi->common.frame_type != KEY_FRAME)
{
FIRSTPASS_STATS sectionstats;
@@ -2240,42 +2324,40 @@ static void define_gf_group(VP8_COMP *cpi, FIRSTPASS_STATS *this_frame)
DOUBLE_DIVIDE_CHECK(sectionstats.coded_error);
Ratio = sectionstats.intra_error / DOUBLE_DIVIDE_CHECK(sectionstats.coded_error);
- //if( (Ratio > 11) ) //&& (sectionstats.pcnt_second_ref < .20) )
- //{
cpi->twopass.section_max_qfactor = 1.0 - ((Ratio - 10.0) * 0.025);
if (cpi->twopass.section_max_qfactor < 0.80)
cpi->twopass.section_max_qfactor = 0.80;
- //}
- //else
- // cpi->twopass.section_max_qfactor = 1.0;
-
reset_fpf_position(cpi, start_pos);
}
}
-// Allocate bits to a normal frame that is neither a gf an arf or a key frame.
+/* Allocate bits to a normal frame that is neither a gf an arf or a key frame. */
static void assign_std_frame_bits(VP8_COMP *cpi, FIRSTPASS_STATS *this_frame)
{
- int target_frame_size; // gf_group_error_left
+ int target_frame_size;
double modified_err;
- double err_fraction; // What portion of the remaining GF group error is used by this frame
+ double err_fraction;
- int max_bits = frame_max_bits(cpi); // Max for a single frame
+ int max_bits = frame_max_bits(cpi); /* Max for a single frame */
- // Calculate modified prediction error used in bit allocation
+ /* Calculate modified prediction error used in bit allocation */
modified_err = calculate_modified_err(cpi, this_frame);
+ /* What portion of the remaining GF group error is used by this frame */
if (cpi->twopass.gf_group_error_left > 0)
- err_fraction = modified_err / cpi->twopass.gf_group_error_left; // What portion of the remaining GF group error is used by this frame
+ err_fraction = modified_err / cpi->twopass.gf_group_error_left;
else
err_fraction = 0.0;
- target_frame_size = (int)((double)cpi->twopass.gf_group_bits * err_fraction); // How many of those bits available for allocation should we give it?
+ /* How many of those bits available for allocation should we give it? */
+ target_frame_size = (int)((double)cpi->twopass.gf_group_bits * err_fraction);
- // Clip to target size to 0 - max_bits (or cpi->twopass.gf_group_bits) at the top end.
+ /* Clip to target size to 0 - max_bits (or cpi->twopass.gf_group_bits)
+ * at the top end.
+ */
if (target_frame_size < 0)
target_frame_size = 0;
else
@@ -2287,22 +2369,25 @@ static void assign_std_frame_bits(VP8_COMP *cpi, FIRSTPASS_STATS *this_frame)
target_frame_size = cpi->twopass.gf_group_bits;
}
- cpi->twopass.gf_group_error_left -= modified_err; // Adjust error remaining
- cpi->twopass.gf_group_bits -= target_frame_size; // Adjust bits remaining
+ /* Adjust error and bits remaining */
+ cpi->twopass.gf_group_error_left -= modified_err;
+ cpi->twopass.gf_group_bits -= target_frame_size;
if (cpi->twopass.gf_group_bits < 0)
cpi->twopass.gf_group_bits = 0;
- target_frame_size += cpi->min_frame_bandwidth; // Add in the minimum number of bits that is set aside for every frame.
+ /* Add in the minimum number of bits that is set aside for every frame. */
+ target_frame_size += cpi->min_frame_bandwidth;
- // Every other frame gets a few extra bits
+ /* Every other frame gets a few extra bits */
if ( (cpi->common.frames_since_golden & 0x01) &&
(cpi->frames_till_gf_update_due > 0) )
{
target_frame_size += cpi->twopass.alt_extra_bits;
}
- cpi->per_frame_bandwidth = target_frame_size; // Per frame bit target for this frame
+ /* Per frame bit target for this frame */
+ cpi->per_frame_bandwidth = target_frame_size;
}
void vp8_second_pass(VP8_COMP *cpi)
@@ -2331,16 +2416,20 @@ void vp8_second_pass(VP8_COMP *cpi)
this_frame_intra_error = this_frame.intra_error;
this_frame_coded_error = this_frame.coded_error;
- // keyframe and section processing !
+ /* keyframe and section processing ! */
if (cpi->twopass.frames_to_key == 0)
{
- // Define next KF group and assign bits to it
+ /* Define next KF group and assign bits to it */
vpx_memcpy(&this_frame_copy, &this_frame, sizeof(this_frame));
find_next_key_frame(cpi, &this_frame_copy);
- // Special case: Error error_resilient_mode mode does not make much sense for two pass but with its current meaning but this code is designed to stop
- // outlandish behaviour if someone does set it when using two pass. It effectively disables GF groups.
- // This is temporary code till we decide what should really happen in this case.
+ /* Special case: Error error_resilient_mode mode does not make much
+ * sense for two pass but with its current meaning but this code is
+ * designed to stop outlandish behaviour if someone does set it when
+ * using two pass. It effectively disables GF groups. This is
+ * temporary code till we decide what should really happen in this
+ * case.
+ */
if (cpi->oxcf.error_resilient_mode)
{
cpi->twopass.gf_group_bits = cpi->twopass.kf_group_bits;
@@ -2352,19 +2441,25 @@ void vp8_second_pass(VP8_COMP *cpi)
}
- // Is this a GF / ARF (Note that a KF is always also a GF)
+ /* Is this a GF / ARF (Note that a KF is always also a GF) */
if (cpi->frames_till_gf_update_due == 0)
{
- // Define next gf group and assign bits to it
+ /* Define next gf group and assign bits to it */
vpx_memcpy(&this_frame_copy, &this_frame, sizeof(this_frame));
define_gf_group(cpi, &this_frame_copy);
- // If we are going to code an altref frame at the end of the group and the current frame is not a key frame....
- // If the previous group used an arf this frame has already benefited from that arf boost and it should not be given extra bits
- // If the previous group was NOT coded using arf we may want to apply some boost to this GF as well
+ /* If we are going to code an altref frame at the end of the group
+ * and the current frame is not a key frame.... If the previous
+ * group used an arf this frame has already benefited from that arf
+ * boost and it should not be given extra bits If the previous
+ * group was NOT coded using arf we may want to apply some boost to
+ * this GF as well
+ */
if (cpi->source_alt_ref_pending && (cpi->common.frame_type != KEY_FRAME))
{
- // Assign a standard frames worth of bits from those allocated to the GF group
+ /* Assign a standard frames worth of bits from those allocated
+ * to the GF group
+ */
int bak = cpi->per_frame_bandwidth;
vpx_memcpy(&this_frame_copy, &this_frame, sizeof(this_frame));
assign_std_frame_bits(cpi, &this_frame_copy);
@@ -2372,32 +2467,36 @@ void vp8_second_pass(VP8_COMP *cpi)
}
}
- // Otherwise this is an ordinary frame
+ /* Otherwise this is an ordinary frame */
else
{
- // Special case: Error error_resilient_mode mode does not make much sense for two pass but with its current meaning but this code is designed to stop
- // outlandish behaviour if someone does set it when using two pass. It effectively disables GF groups.
- // This is temporary code till we decide what should really happen in this case.
+ /* Special case: Error error_resilient_mode mode does not make much
+ * sense for two pass but with its current meaning but this code is
+ * designed to stop outlandish behaviour if someone does set it
+ * when using two pass. It effectively disables GF groups. This is
+ * temporary code till we decide what should really happen in this
+ * case.
+ */
if (cpi->oxcf.error_resilient_mode)
{
cpi->frames_till_gf_update_due = cpi->twopass.frames_to_key;
if (cpi->common.frame_type != KEY_FRAME)
{
- // Assign bits from those allocated to the GF group
+ /* Assign bits from those allocated to the GF group */
vpx_memcpy(&this_frame_copy, &this_frame, sizeof(this_frame));
assign_std_frame_bits(cpi, &this_frame_copy);
}
}
else
{
- // Assign bits from those allocated to the GF group
+ /* Assign bits from those allocated to the GF group */
vpx_memcpy(&this_frame_copy, &this_frame, sizeof(this_frame));
assign_std_frame_bits(cpi, &this_frame_copy);
}
}
- // Keep a globally available copy of this and the next frame's iiratio.
+ /* Keep a globally available copy of this and the next frame's iiratio. */
cpi->twopass.this_iiratio = this_frame_intra_error /
DOUBLE_DIVIDE_CHECK(this_frame_coded_error);
{
@@ -2409,22 +2508,22 @@ void vp8_second_pass(VP8_COMP *cpi)
}
}
- // Set nominal per second bandwidth for this frame
+ /* Set nominal per second bandwidth for this frame */
cpi->target_bandwidth = cpi->per_frame_bandwidth * cpi->output_frame_rate;
if (cpi->target_bandwidth < 0)
cpi->target_bandwidth = 0;
- // Account for mv, mode and other overheads.
+ /* Account for mv, mode and other overheads. */
overhead_bits = estimate_modemvcost(
cpi, &cpi->twopass.total_left_stats );
- // Special case code for first frame.
+ /* Special case code for first frame. */
if (cpi->common.current_video_frame == 0)
{
cpi->twopass.est_max_qcorrection_factor = 1.0;
- // Set a cq_level in constrained quality mode.
+ /* Set a cq_level in constrained quality mode. */
if ( cpi->oxcf.end_usage == USAGE_CONSTRAINED_QUALITY )
{
int est_cq;
@@ -2440,7 +2539,7 @@ void vp8_second_pass(VP8_COMP *cpi)
cpi->cq_target_quality = est_cq;
}
- // guess at maxq needed in 2nd pass
+ /* guess at maxq needed in 2nd pass */
cpi->twopass.maxq_max_limit = cpi->worst_quality;
cpi->twopass.maxq_min_limit = cpi->best_quality;
@@ -2450,11 +2549,12 @@ void vp8_second_pass(VP8_COMP *cpi)
(int)(cpi->twopass.bits_left / frames_left),
overhead_bits );
- // Limit the maxq value returned subsequently.
- // This increases the risk of overspend or underspend if the initial
- // estimate for the clip is bad, but helps prevent excessive
- // variation in Q, especially near the end of a clip
- // where for example a small overspend may cause Q to crash
+ /* Limit the maxq value returned subsequently.
+ * This increases the risk of overspend or underspend if the initial
+ * estimate for the clip is bad, but helps prevent excessive
+ * variation in Q, especially near the end of a clip
+ * where for example a small overspend may cause Q to crash
+ */
cpi->twopass.maxq_max_limit = ((tmp_q + 32) < cpi->worst_quality)
? (tmp_q + 32) : cpi->worst_quality;
cpi->twopass.maxq_min_limit = ((tmp_q - 32) > cpi->best_quality)
@@ -2464,10 +2564,11 @@ void vp8_second_pass(VP8_COMP *cpi)
cpi->ni_av_qi = tmp_q;
}
- // The last few frames of a clip almost always have to few or too many
- // bits and for the sake of over exact rate control we dont want to make
- // radical adjustments to the allowed quantizer range just to use up a
- // few surplus bits or get beneath the target rate.
+ /* The last few frames of a clip almost always have to few or too many
+ * bits and for the sake of over exact rate control we dont want to make
+ * radical adjustments to the allowed quantizer range just to use up a
+ * few surplus bits or get beneath the target rate.
+ */
else if ( (cpi->common.current_video_frame <
(((unsigned int)cpi->twopass.total_stats.count * 255)>>8)) &&
((cpi->common.current_video_frame + cpi->baseline_gf_interval) <
@@ -2482,7 +2583,7 @@ void vp8_second_pass(VP8_COMP *cpi)
(int)(cpi->twopass.bits_left / frames_left),
overhead_bits );
- // Move active_worst_quality but in a damped way
+ /* Move active_worst_quality but in a damped way */
if (tmp_q > cpi->active_worst_quality)
cpi->active_worst_quality ++;
else if (tmp_q < cpi->active_worst_quality)
@@ -2494,7 +2595,7 @@ void vp8_second_pass(VP8_COMP *cpi)
cpi->twopass.frames_to_key --;
- // Update the total stats remaining sturcture
+ /* Update the total stats remaining sturcture */
subtract_stats(&cpi->twopass.total_left_stats, &this_frame );
}
@@ -2503,8 +2604,9 @@ static int test_candidate_kf(VP8_COMP *cpi, FIRSTPASS_STATS *last_frame, FIRSTP
{
int is_viable_kf = 0;
- // Does the frame satisfy the primary criteria of a key frame
- // If so, then examine how well it predicts subsequent frames
+ /* Does the frame satisfy the primary criteria of a key frame
+ * If so, then examine how well it predicts subsequent frames
+ */
if ((this_frame->pcnt_second_ref < 0.10) &&
(next_frame->pcnt_second_ref < 0.10) &&
((this_frame->pcnt_inter < 0.05) ||
@@ -2531,10 +2633,10 @@ static int test_candidate_kf(VP8_COMP *cpi, FIRSTPASS_STATS *last_frame, FIRSTP
vpx_memcpy(&local_next_frame, next_frame, sizeof(*next_frame));
- // Note the starting file position so we can reset to it
+ /* Note the starting file position so we can reset to it */
start_pos = cpi->twopass.stats_in;
- // Examine how well the key frame predicts subsequent frames
+ /* Examine how well the key frame predicts subsequent frames */
for (i = 0 ; i < 16; i++)
{
next_iiratio = (IIKFACTOR1 * local_next_frame.intra_error / DOUBLE_DIVIDE_CHECK(local_next_frame.coded_error)) ;
@@ -2542,18 +2644,16 @@ static int test_candidate_kf(VP8_COMP *cpi, FIRSTPASS_STATS *last_frame, FIRSTP
if (next_iiratio > RMAX)
next_iiratio = RMAX;
- // Cumulative effect of decay in prediction quality
+ /* Cumulative effect of decay in prediction quality */
if (local_next_frame.pcnt_inter > 0.85)
decay_accumulator = decay_accumulator * local_next_frame.pcnt_inter;
else
decay_accumulator = decay_accumulator * ((0.85 + local_next_frame.pcnt_inter) / 2.0);
- //decay_accumulator = decay_accumulator * local_next_frame.pcnt_inter;
-
- // Keep a running total
+ /* Keep a running total */
boost_score += (decay_accumulator * next_iiratio);
- // Test various breakout clauses
+ /* Test various breakout clauses */
if ((local_next_frame.pcnt_inter < 0.05) ||
(next_iiratio < 1.5) ||
(((local_next_frame.pcnt_inter -
@@ -2568,17 +2668,19 @@ static int test_candidate_kf(VP8_COMP *cpi, FIRSTPASS_STATS *last_frame, FIRSTP
old_boost_score = boost_score;
- // Get the next frame details
+ /* Get the next frame details */
if (EOF == input_stats(cpi, &local_next_frame))
break;
}
- // If there is tolerable prediction for at least the next 3 frames then break out else discard this pottential key frame and move on
+ /* If there is tolerable prediction for at least the next 3 frames
+ * then break out else discard this pottential key frame and move on
+ */
if (boost_score > 5.0 && (i > 3))
is_viable_kf = 1;
else
{
- // Reset the file position
+ /* Reset the file position */
reset_fpf_position(cpi, start_pos);
is_viable_kf = 0;
@@ -2606,65 +2708,71 @@ static void find_next_key_frame(VP8_COMP *cpi, FIRSTPASS_STATS *this_frame)
double kf_group_coded_err = 0.0;
double recent_loop_decay[8] = {1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0};
- vpx_memset(&next_frame, 0, sizeof(next_frame)); // assure clean
+ vpx_memset(&next_frame, 0, sizeof(next_frame));
- vp8_clear_system_state(); //__asm emms;
+ vp8_clear_system_state();
start_position = cpi->twopass.stats_in;
cpi->common.frame_type = KEY_FRAME;
- // is this a forced key frame by interval
+ /* is this a forced key frame by interval */
cpi->this_key_frame_forced = cpi->next_key_frame_forced;
- // Clear the alt ref active flag as this can never be active on a key frame
+ /* Clear the alt ref active flag as this can never be active on a key
+ * frame
+ */
cpi->source_alt_ref_active = 0;
- // Kf is always a gf so clear frames till next gf counter
+ /* Kf is always a gf so clear frames till next gf counter */
cpi->frames_till_gf_update_due = 0;
cpi->twopass.frames_to_key = 1;
- // Take a copy of the initial frame details
+ /* Take a copy of the initial frame details */
vpx_memcpy(&first_frame, this_frame, sizeof(*this_frame));
- cpi->twopass.kf_group_bits = 0; // Total bits avaialable to kf group
- cpi->twopass.kf_group_error_left = 0; // Group modified error score.
+ cpi->twopass.kf_group_bits = 0;
+ cpi->twopass.kf_group_error_left = 0;
kf_mod_err = calculate_modified_err(cpi, this_frame);
- // find the next keyframe
+ /* find the next keyframe */
i = 0;
while (cpi->twopass.stats_in < cpi->twopass.stats_in_end)
{
- // Accumulate kf group error
+ /* Accumulate kf group error */
kf_group_err += calculate_modified_err(cpi, this_frame);
- // These figures keep intra and coded error counts for all frames including key frames in the group.
- // The effect of the key frame itself can be subtracted out using the first_frame data collected above
+ /* These figures keep intra and coded error counts for all frames
+ * including key frames in the group. The effect of the key frame
+ * itself can be subtracted out using the first_frame data
+ * collected above
+ */
kf_group_intra_err += this_frame->intra_error;
kf_group_coded_err += this_frame->coded_error;
- // load a the next frame's stats
+ /* load a the next frame's stats */
vpx_memcpy(&last_frame, this_frame, sizeof(*this_frame));
input_stats(cpi, this_frame);
- // Provided that we are not at the end of the file...
+ /* Provided that we are not at the end of the file... */
if (cpi->oxcf.auto_key
&& lookup_next_frame_stats(cpi, &next_frame) != EOF)
{
- // Normal scene cut check
+ /* Normal scene cut check */
if ( ( i >= MIN_GF_INTERVAL ) &&
test_candidate_kf(cpi, &last_frame, this_frame, &next_frame) )
{
break;
}
- // How fast is prediction quality decaying
+ /* How fast is prediction quality decaying */
loop_decay_rate = get_prediction_decay_rate(cpi, &next_frame);
- // We want to know something about the recent past... rather than
- // as used elsewhere where we are concened with decay in prediction
- // quality since the last GF or KF.
+ /* We want to know something about the recent past... rather than
+ * as used elsewhere where we are concened with decay in prediction
+ * quality since the last GF or KF.
+ */
recent_loop_decay[i%8] = loop_decay_rate;
decay_accumulator = 1.0;
for (j = 0; j < 8; j++)
@@ -2672,8 +2780,9 @@ static void find_next_key_frame(VP8_COMP *cpi, FIRSTPASS_STATS *this_frame)
decay_accumulator = decay_accumulator * recent_loop_decay[j];
}
- // Special check for transition or high motion followed by a
- // to a static scene.
+ /* Special check for transition or high motion followed by a
+ * static scene.
+ */
if ( detect_transition_to_still( cpi, i,
(cpi->key_frame_frequency-i),
loop_decay_rate,
@@ -2683,11 +2792,12 @@ static void find_next_key_frame(VP8_COMP *cpi, FIRSTPASS_STATS *this_frame)
}
- // Step on to the next frame
+ /* Step on to the next frame */
cpi->twopass.frames_to_key ++;
- // If we don't have a real key frame within the next two
- // forcekeyframeevery intervals then break out of the loop.
+ /* If we don't have a real key frame within the next two
+ * forcekeyframeevery intervals then break out of the loop.
+ */
if (cpi->twopass.frames_to_key >= 2 *(int)cpi->key_frame_frequency)
break;
} else
@@ -2696,10 +2806,11 @@ static void find_next_key_frame(VP8_COMP *cpi, FIRSTPASS_STATS *this_frame)
i++;
}
- // If there is a max kf interval set by the user we must obey it.
- // We already breakout of the loop above at 2x max.
- // This code centers the extra kf if the actual natural
- // interval is between 1x and 2x
+ /* If there is a max kf interval set by the user we must obey it.
+ * We already breakout of the loop above at 2x max.
+ * This code centers the extra kf if the actual natural
+ * interval is between 1x and 2x
+ */
if (cpi->oxcf.auto_key
&& cpi->twopass.frames_to_key > (int)cpi->key_frame_frequency )
{
@@ -2708,29 +2819,29 @@ static void find_next_key_frame(VP8_COMP *cpi, FIRSTPASS_STATS *this_frame)
cpi->twopass.frames_to_key /= 2;
- // Copy first frame details
+ /* Copy first frame details */
vpx_memcpy(&tmp_frame, &first_frame, sizeof(first_frame));
- // Reset to the start of the group
+ /* Reset to the start of the group */
reset_fpf_position(cpi, start_position);
kf_group_err = 0;
kf_group_intra_err = 0;
kf_group_coded_err = 0;
- // Rescan to get the correct error data for the forced kf group
+ /* Rescan to get the correct error data for the forced kf group */
for( i = 0; i < cpi->twopass.frames_to_key; i++ )
{
- // Accumulate kf group errors
+ /* Accumulate kf group errors */
kf_group_err += calculate_modified_err(cpi, &tmp_frame);
kf_group_intra_err += tmp_frame.intra_error;
kf_group_coded_err += tmp_frame.coded_error;
- // Load a the next frame's stats
+ /* Load a the next frame's stats */
input_stats(cpi, &tmp_frame);
}
- // Reset to the start of the group
+ /* Reset to the start of the group */
reset_fpf_position(cpi, current_pos);
cpi->next_key_frame_forced = 1;
@@ -2738,46 +2849,51 @@ static void find_next_key_frame(VP8_COMP *cpi, FIRSTPASS_STATS *this_frame)
else
cpi->next_key_frame_forced = 0;
- // Special case for the last frame of the file
+ /* Special case for the last frame of the file */
if (cpi->twopass.stats_in >= cpi->twopass.stats_in_end)
{
- // Accumulate kf group error
+ /* Accumulate kf group error */
kf_group_err += calculate_modified_err(cpi, this_frame);
- // These figures keep intra and coded error counts for all frames including key frames in the group.
- // The effect of the key frame itself can be subtracted out using the first_frame data collected above
+ /* These figures keep intra and coded error counts for all frames
+ * including key frames in the group. The effect of the key frame
+ * itself can be subtracted out using the first_frame data
+ * collected above
+ */
kf_group_intra_err += this_frame->intra_error;
kf_group_coded_err += this_frame->coded_error;
}
- // Calculate the number of bits that should be assigned to the kf group.
+ /* Calculate the number of bits that should be assigned to the kf group. */
if ((cpi->twopass.bits_left > 0) && (cpi->twopass.modified_error_left > 0.0))
{
- // Max for a single normal frame (not key frame)
+ /* Max for a single normal frame (not key frame) */
int max_bits = frame_max_bits(cpi);
- // Maximum bits for the kf group
+ /* Maximum bits for the kf group */
int64_t max_grp_bits;
- // Default allocation based on bits left and relative
- // complexity of the section
+ /* Default allocation based on bits left and relative
+ * complexity of the section
+ */
cpi->twopass.kf_group_bits = (int64_t)( cpi->twopass.bits_left *
( kf_group_err /
cpi->twopass.modified_error_left ));
- // Clip based on maximum per frame rate defined by the user.
+ /* Clip based on maximum per frame rate defined by the user. */
max_grp_bits = (int64_t)max_bits * (int64_t)cpi->twopass.frames_to_key;
if (cpi->twopass.kf_group_bits > max_grp_bits)
cpi->twopass.kf_group_bits = max_grp_bits;
- // Additional special case for CBR if buffer is getting full.
+ /* Additional special case for CBR if buffer is getting full. */
if (cpi->oxcf.end_usage == USAGE_STREAM_FROM_SERVER)
{
int opt_buffer_lvl = cpi->oxcf.optimal_buffer_level;
int buffer_lvl = cpi->buffer_level;
- // If the buffer is near or above the optimal and this kf group is
- // not being allocated much then increase the allocation a bit.
+ /* If the buffer is near or above the optimal and this kf group is
+ * not being allocated much then increase the allocation a bit.
+ */
if (buffer_lvl >= opt_buffer_lvl)
{
int high_water_mark = (opt_buffer_lvl +
@@ -2785,11 +2901,11 @@ static void find_next_key_frame(VP8_COMP *cpi, FIRSTPASS_STATS *this_frame)
int64_t av_group_bits;
- // Av bits per frame * number of frames
+ /* Av bits per frame * number of frames */
av_group_bits = (int64_t)cpi->av_per_frame_bandwidth *
(int64_t)cpi->twopass.frames_to_key;
- // We are at or above the maximum.
+ /* We are at or above the maximum. */
if (cpi->buffer_level >= high_water_mark)
{
int64_t min_group_bits;
@@ -2801,7 +2917,7 @@ static void find_next_key_frame(VP8_COMP *cpi, FIRSTPASS_STATS *this_frame)
if (cpi->twopass.kf_group_bits < min_group_bits)
cpi->twopass.kf_group_bits = min_group_bits;
}
- // We are above optimal but below the maximum
+ /* We are above optimal but below the maximum */
else if (cpi->twopass.kf_group_bits < av_group_bits)
{
int64_t bits_below_av = av_group_bits -
@@ -2818,13 +2934,15 @@ static void find_next_key_frame(VP8_COMP *cpi, FIRSTPASS_STATS *this_frame)
else
cpi->twopass.kf_group_bits = 0;
- // Reset the first pass file position
+ /* Reset the first pass file position */
reset_fpf_position(cpi, start_position);
- // determine how big to make this keyframe based on how well the subsequent frames use inter blocks
+ /* determine how big to make this keyframe based on how well the
+ * subsequent frames use inter blocks
+ */
decay_accumulator = 1.0;
boost_score = 0.0;
- loop_decay_rate = 1.00; // Starting decay rate
+ loop_decay_rate = 1.00; /* Starting decay rate */
for (i = 0 ; i < cpi->twopass.frames_to_key ; i++)
{
@@ -2843,7 +2961,7 @@ static void find_next_key_frame(VP8_COMP *cpi, FIRSTPASS_STATS *this_frame)
if (r > RMAX)
r = RMAX;
- // How fast is prediction quality decaying
+ /* How fast is prediction quality decaying */
loop_decay_rate = get_prediction_decay_rate(cpi, &next_frame);
decay_accumulator = decay_accumulator * loop_decay_rate;
@@ -2881,19 +2999,13 @@ static void find_next_key_frame(VP8_COMP *cpi, FIRSTPASS_STATS *this_frame)
/ DOUBLE_DIVIDE_CHECK(sectionstats.coded_error);
Ratio = sectionstats.intra_error / DOUBLE_DIVIDE_CHECK(sectionstats.coded_error);
- // if( (Ratio > 11) ) //&& (sectionstats.pcnt_second_ref < .20) )
- //{
cpi->twopass.section_max_qfactor = 1.0 - ((Ratio - 10.0) * 0.025);
if (cpi->twopass.section_max_qfactor < 0.80)
cpi->twopass.section_max_qfactor = 0.80;
-
- //}
- //else
- // cpi->twopass.section_max_qfactor = 1.0;
}
- // When using CBR apply additional buffer fullness related upper limits
+ /* When using CBR apply additional buffer fullness related upper limits */
if (cpi->oxcf.end_usage == USAGE_STREAM_FROM_SERVER)
{
double max_boost;
@@ -2920,10 +3032,10 @@ static void find_next_key_frame(VP8_COMP *cpi, FIRSTPASS_STATS *this_frame)
boost_score = max_boost;
}
- // Reset the first pass file position
+ /* Reset the first pass file position */
reset_fpf_position(cpi, start_position);
- // Work out how many bits to allocate for the key frame itself
+ /* Work out how many bits to allocate for the key frame itself */
if (1)
{
int kf_boost = boost_score;
@@ -2931,7 +3043,7 @@ static void find_next_key_frame(VP8_COMP *cpi, FIRSTPASS_STATS *this_frame)
int Counter = cpi->twopass.frames_to_key;
int alt_kf_bits;
YV12_BUFFER_CONFIG *lst_yv12 = &cpi->common.yv12_fb[cpi->common.lst_fb_idx];
- // Min boost based on kf interval
+ /* Min boost based on kf interval */
#if 0
while ((kf_boost < 48) && (Counter > 0))
@@ -2949,32 +3061,33 @@ static void find_next_key_frame(VP8_COMP *cpi, FIRSTPASS_STATS *this_frame)
if (kf_boost > 48) kf_boost = 48;
}
- // bigger frame sizes need larger kf boosts, smaller frames smaller boosts...
+ /* bigger frame sizes need larger kf boosts, smaller frames smaller
+ * boosts...
+ */
if ((lst_yv12->y_width * lst_yv12->y_height) > (320 * 240))
kf_boost += 2 * (lst_yv12->y_width * lst_yv12->y_height) / (320 * 240);
else if ((lst_yv12->y_width * lst_yv12->y_height) < (320 * 240))
kf_boost -= 4 * (320 * 240) / (lst_yv12->y_width * lst_yv12->y_height);
- kf_boost = (int)((double)kf_boost * 100.0) >> 4; // Scale 16 to 100
-
- // Adjustment to boost based on recent average q
- //kf_boost = kf_boost * vp8_kf_boost_qadjustment[cpi->ni_av_qi] / 100;
-
- if (kf_boost < 250) // Min KF boost
+ /* Min KF boost */
+ kf_boost = (int)((double)kf_boost * 100.0) >> 4; /* Scale 16 to 100 */
+ if (kf_boost < 250)
kf_boost = 250;
- // We do three calculations for kf size.
- // The first is based on the error score for the whole kf group.
- // The second (optionaly) on the key frames own error if this is
- // smaller than the average for the group.
- // The final one insures that the frame receives at least the
- // allocation it would have received based on its own error score vs
- // the error score remaining
- // Special case if the sequence appears almost totaly static
- // as measured by the decay accumulator. In this case we want to
- // spend almost all of the bits on the key frame.
- // cpi->twopass.frames_to_key-1 because key frame itself is taken
- // care of by kf_boost.
+ /*
+ * We do three calculations for kf size.
+ * The first is based on the error score for the whole kf group.
+ * The second (optionaly) on the key frames own error if this is
+ * smaller than the average for the group.
+ * The final one insures that the frame receives at least the
+ * allocation it would have received based on its own error score vs
+ * the error score remaining
+ * Special case if the sequence appears almost totaly static
+ * as measured by the decay accumulator. In this case we want to
+ * spend almost all of the bits on the key frame.
+ * cpi->twopass.frames_to_key-1 because key frame itself is taken
+ * care of by kf_boost.
+ */
if ( decay_accumulator >= 0.99 )
{
allocation_chunks =
@@ -2986,7 +3099,7 @@ static void find_next_key_frame(VP8_COMP *cpi, FIRSTPASS_STATS *this_frame)
((cpi->twopass.frames_to_key - 1) * 100) + kf_boost;
}
- // Normalize Altboost and allocations chunck down to prevent overflow
+ /* Normalize Altboost and allocations chunck down to prevent overflow */
while (kf_boost > 1000)
{
kf_boost /= 2;
@@ -2995,20 +3108,21 @@ static void find_next_key_frame(VP8_COMP *cpi, FIRSTPASS_STATS *this_frame)
cpi->twopass.kf_group_bits = (cpi->twopass.kf_group_bits < 0) ? 0 : cpi->twopass.kf_group_bits;
- // Calculate the number of bits to be spent on the key frame
+ /* Calculate the number of bits to be spent on the key frame */
cpi->twopass.kf_bits = (int)((double)kf_boost * ((double)cpi->twopass.kf_group_bits / (double)allocation_chunks));
- // Apply an additional limit for CBR
+ /* Apply an additional limit for CBR */
if (cpi->oxcf.end_usage == USAGE_STREAM_FROM_SERVER)
{
if (cpi->twopass.kf_bits > ((3 * cpi->buffer_level) >> 2))
cpi->twopass.kf_bits = (3 * cpi->buffer_level) >> 2;
}
- // If the key frame is actually easier than the average for the
- // kf group (which does sometimes happen... eg a blank intro frame)
- // Then use an alternate calculation based on the kf error score
- // which should give a smaller key frame.
+ /* If the key frame is actually easier than the average for the
+ * kf group (which does sometimes happen... eg a blank intro frame)
+ * Then use an alternate calculation based on the kf error score
+ * which should give a smaller key frame.
+ */
if (kf_mod_err < kf_group_err / cpi->twopass.frames_to_key)
{
double alt_kf_grp_bits =
@@ -3024,9 +3138,10 @@ static void find_next_key_frame(VP8_COMP *cpi, FIRSTPASS_STATS *this_frame)
cpi->twopass.kf_bits = alt_kf_bits;
}
}
- // Else if it is much harder than other frames in the group make sure
- // it at least receives an allocation in keeping with its relative
- // error score
+ /* Else if it is much harder than other frames in the group make sure
+ * it at least receives an allocation in keeping with its relative
+ * error score
+ */
else
{
alt_kf_bits =
@@ -3041,17 +3156,22 @@ static void find_next_key_frame(VP8_COMP *cpi, FIRSTPASS_STATS *this_frame)
}
cpi->twopass.kf_group_bits -= cpi->twopass.kf_bits;
- cpi->twopass.kf_bits += cpi->min_frame_bandwidth; // Add in the minimum frame allowance
+ /* Add in the minimum frame allowance */
+ cpi->twopass.kf_bits += cpi->min_frame_bandwidth;
+
+ /* Peer frame bit target for this frame */
+ cpi->per_frame_bandwidth = cpi->twopass.kf_bits;
- cpi->per_frame_bandwidth = cpi->twopass.kf_bits; // Peer frame bit target for this frame
- cpi->target_bandwidth = cpi->twopass.kf_bits * cpi->output_frame_rate; // Convert to a per second bitrate
+ /* Convert to a per second bitrate */
+ cpi->target_bandwidth = cpi->twopass.kf_bits * cpi->output_frame_rate;
}
- // Note the total error score of the kf group minus the key frame itself
+ /* Note the total error score of the kf group minus the key frame itself */
cpi->twopass.kf_group_error_left = (int)(kf_group_err - kf_mod_err);
- // Adjust the count of total modified error left.
- // The count of bits left is adjusted elsewhere based on real coded frame sizes
+ /* Adjust the count of total modified error left. The count of bits left
+ * is adjusted elsewhere based on real coded frame sizes
+ */
cpi->twopass.modified_error_left -= kf_group_err;
if (cpi->oxcf.allow_spatial_resampling)
@@ -3077,40 +3197,45 @@ static void find_next_key_frame(VP8_COMP *cpi, FIRSTPASS_STATS *this_frame)
if ((cpi->common.Width != cpi->oxcf.Width) || (cpi->common.Height != cpi->oxcf.Height))
last_kf_resampled = 1;
- // Set back to unscaled by defaults
+ /* Set back to unscaled by defaults */
cpi->common.horiz_scale = NORMAL;
cpi->common.vert_scale = NORMAL;
- // Calculate Average bits per frame.
- //av_bits_per_frame = cpi->twopass.bits_left/(double)(cpi->twopass.total_stats.count - cpi->common.current_video_frame);
+ /* Calculate Average bits per frame. */
av_bits_per_frame = cpi->oxcf.target_bandwidth / DOUBLE_DIVIDE_CHECK((double)cpi->frame_rate);
- //if ( av_bits_per_frame < 0.0 )
- // av_bits_per_frame = 0.0
- // CBR... Use the clip average as the target for deciding resample
+ /* CBR... Use the clip average as the target for deciding resample */
if (cpi->oxcf.end_usage == USAGE_STREAM_FROM_SERVER)
{
bits_per_frame = av_bits_per_frame;
}
- // In VBR we want to avoid downsampling in easy section unless we are under extreme pressure
- // So use the larger of target bitrate for this sectoion or average bitrate for sequence
+ /* In VBR we want to avoid downsampling in easy section unless we
+ * are under extreme pressure So use the larger of target bitrate
+ * for this section or average bitrate for sequence
+ */
else
{
- bits_per_frame = cpi->twopass.kf_group_bits / cpi->twopass.frames_to_key; // This accounts for how hard the section is...
+ /* This accounts for how hard the section is... */
+ bits_per_frame = cpi->twopass.kf_group_bits / cpi->twopass.frames_to_key;
- if (bits_per_frame < av_bits_per_frame) // Dont turn to resampling in easy sections just because they have been assigned a small number of bits
+ /* Dont turn to resampling in easy sections just because they
+ * have been assigned a small number of bits
+ */
+ if (bits_per_frame < av_bits_per_frame)
bits_per_frame = av_bits_per_frame;
}
- // bits_per_frame should comply with our minimum
+ /* bits_per_frame should comply with our minimum */
if (bits_per_frame < (cpi->oxcf.target_bandwidth * cpi->oxcf.two_pass_vbrmin_section / 100))
bits_per_frame = (cpi->oxcf.target_bandwidth * cpi->oxcf.two_pass_vbrmin_section / 100);
- // Work out if spatial resampling is necessary
+ /* Work out if spatial resampling is necessary */
kf_q = estimate_kf_group_q(cpi, err_per_frame, bits_per_frame, group_iiratio);
- // If we project a required Q higher than the maximum allowed Q then make a guess at the actual size of frames in this section
+ /* If we project a required Q higher than the maximum allowed Q then
+ * make a guess at the actual size of frames in this section
+ */
projected_bits_perframe = bits_per_frame;
tmp_q = kf_q;
@@ -3120,7 +3245,7 @@ static void find_next_key_frame(VP8_COMP *cpi, FIRSTPASS_STATS *this_frame)
tmp_q--;
}
- // Guess at buffer level at the end of the section
+ /* Guess at buffer level at the end of the section */
projected_buffer_level = cpi->buffer_level - (int)((projected_bits_perframe - av_bits_per_frame) * cpi->twopass.frames_to_key);
if (0)
@@ -3130,15 +3255,17 @@ static void find_next_key_frame(VP8_COMP *cpi, FIRSTPASS_STATS *this_frame)
fclose(f);
}
- // The trigger for spatial resampling depends on the various parameters such as whether we are streaming (CBR) or VBR.
+ /* The trigger for spatial resampling depends on the various
+ * parameters such as whether we are streaming (CBR) or VBR.
+ */
if (cpi->oxcf.end_usage == USAGE_STREAM_FROM_SERVER)
{
- // Trigger resample if we are projected to fall below down sample level or
- // resampled last time and are projected to remain below the up sample level
+ /* Trigger resample if we are projected to fall below down
+ * sample level or resampled last time and are projected to
+ * remain below the up sample level
+ */
if ((projected_buffer_level < (cpi->oxcf.resample_down_water_mark * cpi->oxcf.optimal_buffer_level / 100)) ||
(last_kf_resampled && (projected_buffer_level < (cpi->oxcf.resample_up_water_mark * cpi->oxcf.optimal_buffer_level / 100))))
- //( ((cpi->buffer_level < (cpi->oxcf.resample_down_water_mark * cpi->oxcf.optimal_buffer_level / 100))) &&
- // ((projected_buffer_level < (cpi->oxcf.resample_up_water_mark * cpi->oxcf.optimal_buffer_level / 100))) ))
resample_trigger = 1;
else
resample_trigger = 0;
@@ -3148,9 +3275,15 @@ static void find_next_key_frame(VP8_COMP *cpi, FIRSTPASS_STATS *this_frame)
int64_t clip_bits = (int64_t)(cpi->twopass.total_stats.count * cpi->oxcf.target_bandwidth / DOUBLE_DIVIDE_CHECK((double)cpi->frame_rate));
int64_t over_spend = cpi->oxcf.starting_buffer_level - cpi->buffer_level;
- if ((last_kf_resampled && (kf_q > cpi->worst_quality)) || // If triggered last time the threshold for triggering again is reduced
- ((kf_q > cpi->worst_quality) && // Projected Q higher than allowed and ...
- (over_spend > clip_bits / 20))) // ... Overspend > 5% of total bits
+ /* If triggered last time the threshold for triggering again is
+ * reduced:
+ *
+ * Projected Q higher than allowed and Overspend > 5% of total
+ * bits
+ */
+ if ((last_kf_resampled && (kf_q > cpi->worst_quality)) ||
+ ((kf_q > cpi->worst_quality) &&
+ (over_spend > clip_bits / 20)))
resample_trigger = 1;
else
resample_trigger = 0;
@@ -3172,12 +3305,16 @@ static void find_next_key_frame(VP8_COMP *cpi, FIRSTPASS_STATS *this_frame)
new_width = ((hs - 1) + (cpi->oxcf.Width * hr)) / hs;
new_height = ((vs - 1) + (cpi->oxcf.Height * vr)) / vs;
- // Reducing the area to 1/4 does not reduce the complexity (err_per_frame) to 1/4...
- // effective_sizeratio attempts to provide a crude correction for this
+ /* Reducing the area to 1/4 does not reduce the complexity
+ * (err_per_frame) to 1/4... effective_sizeratio attempts
+ * to provide a crude correction for this
+ */
effective_size_ratio = (double)(new_width * new_height) / (double)(cpi->oxcf.Width * cpi->oxcf.Height);
effective_size_ratio = (1.0 + (3.0 * effective_size_ratio)) / 4.0;
- // Now try again and see what Q we get with the smaller image size
+ /* Now try again and see what Q we get with the smaller
+ * image size
+ */
kf_q = estimate_kf_group_q(cpi, err_per_frame * effective_size_ratio, bits_per_frame, group_iiratio);
if (0)
diff --git a/vp8/encoder/lookahead.c b/vp8/encoder/lookahead.c
index 4c9228186..ce2ce08c1 100644
--- a/vp8/encoder/lookahead.c
+++ b/vp8/encoder/lookahead.c
@@ -118,10 +118,11 @@ vp8_lookahead_push(struct lookahead_ctx *ctx,
ctx->sz++;
buf = pop(ctx, &ctx->write_idx);
- // Only do this partial copy if the following conditions are all met:
- // 1. Lookahead queue has has size of 1.
- // 2. Active map is provided.
- // 3. This is not a key frame, golden nor altref frame.
+ /* Only do this partial copy if the following conditions are all met:
+ * 1. Lookahead queue has has size of 1.
+ * 2. Active map is provided.
+ * 3. This is not a key frame, golden nor altref frame.
+ */
if (ctx->max_sz == 1 && active_map && !flags)
{
for (row = 0; row < mb_rows; ++row)
@@ -130,18 +131,18 @@ vp8_lookahead_push(struct lookahead_ctx *ctx,
while (1)
{
- // Find the first active macroblock in this row.
+ /* Find the first active macroblock in this row. */
for (; col < mb_cols; ++col)
{
if (active_map[col])
break;
}
- // No more active macroblock in this row.
+ /* No more active macroblock in this row. */
if (col == mb_cols)
break;
- // Find the end of active region in this row.
+ /* Find the end of active region in this row. */
active_end = col;
for (; active_end < mb_cols; ++active_end)
@@ -150,13 +151,13 @@ vp8_lookahead_push(struct lookahead_ctx *ctx,
break;
}
- // Only copy this active region.
+ /* Only copy this active region. */
vp8_copy_and_extend_frame_with_rect(src, &buf->img,
row << 4,
col << 4, 16,
(active_end - col) << 4);
- // Start again from the end of this active region.
+ /* Start again from the end of this active region. */
col = active_end;
}
diff --git a/vp8/encoder/mcomp.c b/vp8/encoder/mcomp.c
index dc0edfbc1..8ae6bc749 100644
--- a/vp8/encoder/mcomp.c
+++ b/vp8/encoder/mcomp.c
@@ -25,16 +25,19 @@ static int mv_mode_cts [4] [2];
int vp8_mv_bit_cost(int_mv *mv, int_mv *ref, int *mvcost[2], int Weight)
{
- // MV costing is based on the distribution of vectors in the previous frame and as such will tend to
- // over state the cost of vectors. In addition coding a new vector can have a knock on effect on the
- // cost of subsequent vectors and the quality of prediction from NEAR and NEAREST for subsequent blocks.
- // The "Weight" parameter allows, to a limited extent, for some account to be taken of these factors.
+ /* MV costing is based on the distribution of vectors in the previous
+ * frame and as such will tend to over state the cost of vectors. In
+ * addition coding a new vector can have a knock on effect on the cost
+ * of subsequent vectors and the quality of prediction from NEAR and
+ * NEAREST for subsequent blocks. The "Weight" parameter allows, to a
+ * limited extent, for some account to be taken of these factors.
+ */
return ((mvcost[0][(mv->as_mv.row - ref->as_mv.row) >> 1] + mvcost[1][(mv->as_mv.col - ref->as_mv.col) >> 1]) * Weight) >> 7;
}
static int mv_err_cost(int_mv *mv, int_mv *ref, int *mvcost[2], int error_per_bit)
{
- // Ignore mv costing if mvcost is NULL
+ /* Ignore mv costing if mvcost is NULL */
if (mvcost)
return ((mvcost[0][(mv->as_mv.row - ref->as_mv.row) >> 1] +
mvcost[1][(mv->as_mv.col - ref->as_mv.col) >> 1])
@@ -44,8 +47,8 @@ static int mv_err_cost(int_mv *mv, int_mv *ref, int *mvcost[2], int error_per_bi
static int mvsad_err_cost(int_mv *mv, int_mv *ref, int *mvsadcost[2], int error_per_bit)
{
- // Calculate sad error cost on full pixel basis.
- // Ignore mv costing if mvsadcost is NULL
+ /* Calculate sad error cost on full pixel basis. */
+ /* Ignore mv costing if mvsadcost is NULL */
if (mvsadcost)
return ((mvsadcost[0][(mv->as_mv.row - ref->as_mv.row)] +
mvsadcost[1][(mv->as_mv.col - ref->as_mv.col)])
@@ -59,7 +62,7 @@ void vp8_init_dsmotion_compensation(MACROBLOCK *x, int stride)
int search_site_count = 0;
- // Generate offsets for 4 search sites per step.
+ /* Generate offsets for 4 search sites per step. */
Len = MAX_FIRST_STEP;
x->ss[search_site_count].mv.col = 0;
x->ss[search_site_count].mv.row = 0;
@@ -69,31 +72,31 @@ void vp8_init_dsmotion_compensation(MACROBLOCK *x, int stride)
while (Len > 0)
{
- // Compute offsets for search sites.
+ /* Compute offsets for search sites. */
x->ss[search_site_count].mv.col = 0;
x->ss[search_site_count].mv.row = -Len;
x->ss[search_site_count].offset = -Len * stride;
search_site_count++;
- // Compute offsets for search sites.
+ /* Compute offsets for search sites. */
x->ss[search_site_count].mv.col = 0;
x->ss[search_site_count].mv.row = Len;
x->ss[search_site_count].offset = Len * stride;
search_site_count++;
- // Compute offsets for search sites.
+ /* Compute offsets for search sites. */
x->ss[search_site_count].mv.col = -Len;
x->ss[search_site_count].mv.row = 0;
x->ss[search_site_count].offset = -Len;
search_site_count++;
- // Compute offsets for search sites.
+ /* Compute offsets for search sites. */
x->ss[search_site_count].mv.col = Len;
x->ss[search_site_count].mv.row = 0;
x->ss[search_site_count].offset = Len;
search_site_count++;
- // Contract.
+ /* Contract. */
Len /= 2;
}
@@ -106,7 +109,7 @@ void vp8_init3smotion_compensation(MACROBLOCK *x, int stride)
int Len;
int search_site_count = 0;
- // Generate offsets for 8 search sites per step.
+ /* Generate offsets for 8 search sites per step. */
Len = MAX_FIRST_STEP;
x->ss[search_site_count].mv.col = 0;
x->ss[search_site_count].mv.row = 0;
@@ -116,56 +119,56 @@ void vp8_init3smotion_compensation(MACROBLOCK *x, int stride)
while (Len > 0)
{
- // Compute offsets for search sites.
+ /* Compute offsets for search sites. */
x->ss[search_site_count].mv.col = 0;
x->ss[search_site_count].mv.row = -Len;
x->ss[search_site_count].offset = -Len * stride;
search_site_count++;
- // Compute offsets for search sites.
+ /* Compute offsets for search sites. */
x->ss[search_site_count].mv.col = 0;
x->ss[search_site_count].mv.row = Len;
x->ss[search_site_count].offset = Len * stride;
search_site_count++;
- // Compute offsets for search sites.
+ /* Compute offsets for search sites. */
x->ss[search_site_count].mv.col = -Len;
x->ss[search_site_count].mv.row = 0;
x->ss[search_site_count].offset = -Len;
search_site_count++;
- // Compute offsets for search sites.
+ /* Compute offsets for search sites. */
x->ss[search_site_count].mv.col = Len;
x->ss[search_site_count].mv.row = 0;
x->ss[search_site_count].offset = Len;
search_site_count++;
- // Compute offsets for search sites.
+ /* Compute offsets for search sites. */
x->ss[search_site_count].mv.col = -Len;
x->ss[search_site_count].mv.row = -Len;
x->ss[search_site_count].offset = -Len * stride - Len;
search_site_count++;
- // Compute offsets for search sites.
+ /* Compute offsets for search sites. */
x->ss[search_site_count].mv.col = Len;
x->ss[search_site_count].mv.row = -Len;
x->ss[search_site_count].offset = -Len * stride + Len;
search_site_count++;
- // Compute offsets for search sites.
+ /* Compute offsets for search sites. */
x->ss[search_site_count].mv.col = -Len;
x->ss[search_site_count].mv.row = Len;
x->ss[search_site_count].offset = Len * stride - Len;
search_site_count++;
- // Compute offsets for search sites.
+ /* Compute offsets for search sites. */
x->ss[search_site_count].mv.col = Len;
x->ss[search_site_count].mv.row = Len;
x->ss[search_site_count].offset = Len * stride + Len;
search_site_count++;
- // Contract.
+ /* Contract. */
Len /= 2;
}
@@ -182,13 +185,20 @@ void vp8_init3smotion_compensation(MACROBLOCK *x, int stride)
* 32 cols area that is enough for 16x16 macroblock. Later, for SPLITMV, we
* could reduce the area.
*/
-#define MVC(r,c) (mvcost ? ((mvcost[0][(r)-rr] + mvcost[1][(c) - rc]) * error_per_bit + 128 )>>8 : 0) // estimated cost of a motion vector (r,c)
-#define PRE(r,c) (y + (((r)>>2) * y_stride + ((c)>>2) -(offset))) // pointer to predictor base of a motionvector
-#define SP(x) (((x)&3)<<1) // convert motion vector component to offset for svf calc
-#define DIST(r,c) vfp->svf( PRE(r,c), y_stride, SP(c),SP(r), z,b->src_stride,&sse) // returns subpixel variance error function.
+
+/* estimated cost of a motion vector (r,c) */
+#define MVC(r,c) (mvcost ? ((mvcost[0][(r)-rr] + mvcost[1][(c) - rc]) * error_per_bit + 128 )>>8 : 0)
+/* pointer to predictor base of a motionvector */
+#define PRE(r,c) (y + (((r)>>2) * y_stride + ((c)>>2) -(offset)))
+/* convert motion vector component to offset for svf calc */
+#define SP(x) (((x)&3)<<1)
+/* returns subpixel variance error function. */
+#define DIST(r,c) vfp->svf( PRE(r,c), y_stride, SP(c),SP(r), z,b->src_stride,&sse)
#define IFMVCV(r,c,s,e) if ( c >= minc && c <= maxc && r >= minr && r <= maxr) s else e;
-#define ERR(r,c) (MVC(r,c)+DIST(r,c)) // returns distortion + motion vector cost
-#define CHECK_BETTER(v,r,c) IFMVCV(r,c,{thismse = DIST(r,c); if((v = (MVC(r,c)+thismse)) < besterr) { besterr = v; br=r; bc=c; *distortion = thismse; *sse1 = sse; }}, v=INT_MAX;)// checks if (r,c) has better score than previous best
+/* returns distortion + motion vector cost */
+#define ERR(r,c) (MVC(r,c)+DIST(r,c))
+/* checks if (r,c) has better score than previous best */
+#define CHECK_BETTER(v,r,c) IFMVCV(r,c,{thismse = DIST(r,c); if((v = (MVC(r,c)+thismse)) < besterr) { besterr = v; br=r; bc=c; *distortion = thismse; *sse1 = sse; }}, v=INT_MAX;)
int vp8_find_best_sub_pixel_step_iteratively(MACROBLOCK *x, BLOCK *b, BLOCKD *d,
int_mv *bestmv, int_mv *ref_mv,
@@ -227,7 +237,7 @@ int vp8_find_best_sub_pixel_step_iteratively(MACROBLOCK *x, BLOCK *b, BLOCKD *d,
unsigned char *y;
int buf_r1, buf_r2, buf_c1, buf_c2;
- // Clamping to avoid out-of-range data access
+ /* Clamping to avoid out-of-range data access */
buf_r1 = ((bestmv->as_mv.row - 3) < x->mv_row_min)?(bestmv->as_mv.row - x->mv_row_min):3;
buf_r2 = ((bestmv->as_mv.row + 3) > x->mv_row_max)?(x->mv_row_max - bestmv->as_mv.row):3;
buf_c1 = ((bestmv->as_mv.col - 3) < x->mv_col_min)?(bestmv->as_mv.col - x->mv_col_min):3;
@@ -244,19 +254,21 @@ int vp8_find_best_sub_pixel_step_iteratively(MACROBLOCK *x, BLOCK *b, BLOCKD *d,
offset = (bestmv->as_mv.row) * y_stride + bestmv->as_mv.col;
- // central mv
+ /* central mv */
bestmv->as_mv.row <<= 3;
bestmv->as_mv.col <<= 3;
- // calculate central point error
+ /* calculate central point error */
besterr = vfp->vf(y, y_stride, z, b->src_stride, sse1);
*distortion = besterr;
besterr += mv_err_cost(bestmv, ref_mv, mvcost, error_per_bit);
- // TODO: Each subsequent iteration checks at least one point in common with the last iteration could be 2 ( if diag selected)
+ /* TODO: Each subsequent iteration checks at least one point in common
+ * with the last iteration could be 2 ( if diag selected)
+ */
while (--halfiters)
{
- // 1/2 pel
+ /* 1/2 pel */
CHECK_BETTER(left, tr, tc - 2);
CHECK_BETTER(right, tr, tc + 2);
CHECK_BETTER(up, tr - 2, tc);
@@ -280,7 +292,7 @@ int vp8_find_best_sub_pixel_step_iteratively(MACROBLOCK *x, BLOCK *b, BLOCKD *d,
break;
}
- // no reason to check the same one again.
+ /* no reason to check the same one again. */
if (tr == br && tc == bc)
break;
@@ -288,8 +300,11 @@ int vp8_find_best_sub_pixel_step_iteratively(MACROBLOCK *x, BLOCK *b, BLOCKD *d,
tc = bc;
}
- // TODO: Each subsequent iteration checks at least one point in common with the last iteration could be 2 ( if diag selected)
- // 1/4 pel
+ /* TODO: Each subsequent iteration checks at least one point in common
+ * with the last iteration could be 2 ( if diag selected)
+ */
+
+ /* 1/4 pel */
while (--quarteriters)
{
CHECK_BETTER(left, tr, tc - 1);
@@ -315,7 +330,7 @@ int vp8_find_best_sub_pixel_step_iteratively(MACROBLOCK *x, BLOCK *b, BLOCKD *d,
break;
}
- // no reason to check the same one again.
+ /* no reason to check the same one again. */
if (tr == br && tc == bc)
break;
@@ -373,17 +388,17 @@ int vp8_find_best_sub_pixel_step(MACROBLOCK *x, BLOCK *b, BLOCKD *d,
y_stride = pre_stride;
#endif
- // central mv
+ /* central mv */
bestmv->as_mv.row <<= 3;
bestmv->as_mv.col <<= 3;
startmv = *bestmv;
- // calculate central point error
+ /* calculate central point error */
bestmse = vfp->vf(y, y_stride, z, b->src_stride, sse1);
*distortion = bestmse;
bestmse += mv_err_cost(bestmv, ref_mv, mvcost, error_per_bit);
- // go left then right and check error
+ /* go left then right and check error */
this_mv.as_mv.row = startmv.as_mv.row;
this_mv.as_mv.col = ((startmv.as_mv.col - 8) | 4);
thismse = vfp->svf_halfpix_h(y - 1, y_stride, z, b->src_stride, &sse);
@@ -409,7 +424,7 @@ int vp8_find_best_sub_pixel_step(MACROBLOCK *x, BLOCK *b, BLOCKD *d,
*sse1 = sse;
}
- // go up then down and check error
+ /* go up then down and check error */
this_mv.as_mv.col = startmv.as_mv.col;
this_mv.as_mv.row = ((startmv.as_mv.row - 8) | 4);
thismse = vfp->svf_halfpix_v(y - y_stride, y_stride, z, b->src_stride, &sse);
@@ -436,10 +451,8 @@ int vp8_find_best_sub_pixel_step(MACROBLOCK *x, BLOCK *b, BLOCKD *d,
}
- // now check 1 more diagonal
+ /* now check 1 more diagonal */
whichdir = (left < right ? 0 : 1) + (up < down ? 0 : 2);
- //for(whichdir =0;whichdir<4;whichdir++)
- //{
this_mv = startmv;
switch (whichdir)
@@ -477,10 +490,8 @@ int vp8_find_best_sub_pixel_step(MACROBLOCK *x, BLOCK *b, BLOCKD *d,
*sse1 = sse;
}
-// }
-
- // time to check quarter pels.
+ /* time to check quarter pels. */
if (bestmv->as_mv.row < startmv.as_mv.row)
y -= y_stride;
@@ -491,7 +502,7 @@ int vp8_find_best_sub_pixel_step(MACROBLOCK *x, BLOCK *b, BLOCKD *d,
- // go left then right and check error
+ /* go left then right and check error */
this_mv.as_mv.row = startmv.as_mv.row;
if (startmv.as_mv.col & 7)
@@ -527,7 +538,7 @@ int vp8_find_best_sub_pixel_step(MACROBLOCK *x, BLOCK *b, BLOCKD *d,
*sse1 = sse;
}
- // go up then down and check error
+ /* go up then down and check error */
this_mv.as_mv.col = startmv.as_mv.col;
if (startmv.as_mv.row & 7)
@@ -564,11 +575,9 @@ int vp8_find_best_sub_pixel_step(MACROBLOCK *x, BLOCK *b, BLOCKD *d,
}
- // now check 1 more diagonal
+ /* now check 1 more diagonal */
whichdir = (left < right ? 0 : 1) + (up < down ? 0 : 2);
-// for(whichdir=0;whichdir<4;whichdir++)
-// {
this_mv = startmv;
switch (whichdir)
@@ -690,17 +699,17 @@ int vp8_find_best_half_pixel_step(MACROBLOCK *x, BLOCK *b, BLOCKD *d,
y_stride = pre_stride;
#endif
- // central mv
+ /* central mv */
bestmv->as_mv.row <<= 3;
bestmv->as_mv.col <<= 3;
startmv = *bestmv;
- // calculate central point error
+ /* calculate central point error */
bestmse = vfp->vf(y, y_stride, z, b->src_stride, sse1);
*distortion = bestmse;
bestmse += mv_err_cost(bestmv, ref_mv, mvcost, error_per_bit);
- // go left then right and check error
+ /* go left then right and check error */
this_mv.as_mv.row = startmv.as_mv.row;
this_mv.as_mv.col = ((startmv.as_mv.col - 8) | 4);
thismse = vfp->svf_halfpix_h(y - 1, y_stride, z, b->src_stride, &sse);
@@ -726,7 +735,7 @@ int vp8_find_best_half_pixel_step(MACROBLOCK *x, BLOCK *b, BLOCKD *d,
*sse1 = sse;
}
- // go up then down and check error
+ /* go up then down and check error */
this_mv.as_mv.col = startmv.as_mv.col;
this_mv.as_mv.row = ((startmv.as_mv.row - 8) | 4);
thismse = vfp->svf_halfpix_v(y - y_stride, y_stride, z, b->src_stride, &sse);
@@ -752,7 +761,7 @@ int vp8_find_best_half_pixel_step(MACROBLOCK *x, BLOCK *b, BLOCKD *d,
*sse1 = sse;
}
- // now check 1 more diagonal -
+ /* now check 1 more diagonal - */
whichdir = (left < right ? 0 : 1) + (up < down ? 0 : 2);
this_mv = startmv;
@@ -875,12 +884,12 @@ int vp8_hex_search
fcenter_mv.as_mv.row = center_mv->as_mv.row >> 3;
fcenter_mv.as_mv.col = center_mv->as_mv.col >> 3;
- // adjust ref_mv to make sure it is within MV range
+ /* adjust ref_mv to make sure it is within MV range */
vp8_clamp_mv(ref_mv, x->mv_col_min, x->mv_col_max, x->mv_row_min, x->mv_row_max);
br = ref_mv->as_mv.row;
bc = ref_mv->as_mv.col;
- // Work out the start point for the search
+ /* Work out the start point for the search */
base_offset = (unsigned char *)(base_pre + d->offset);
this_offset = base_offset + (br * (pre_stride)) + bc;
this_mv.as_mv.row = br;
@@ -901,8 +910,7 @@ int vp8_hex_search
dia_range = 8;
#endif
- // hex search
- //j=0
+ /* hex search */
CHECK_BOUNDS(2)
if(all_in)
@@ -977,7 +985,7 @@ int vp8_hex_search
}
}
- // check 4 1-away neighbors
+ /* check 4 1-away neighbors */
cal_neighbors:
for (j = 0; j < dia_range; j++)
{
@@ -1066,8 +1074,11 @@ int vp8_diamond_search_sad_c
unsigned char *check_here;
int thissad;
- int *mvsadcost[2] = {x->mvsadcost[0], x->mvsadcost[1]};
+ int *mvsadcost[2];
int_mv fcenter_mv;
+
+ mvsadcost[0] = x->mvsadcost[0];
+ mvsadcost[1] = x->mvsadcost[1];
fcenter_mv.as_mv.row = center_mv->as_mv.row >> 3;
fcenter_mv.as_mv.col = center_mv->as_mv.col >> 3;
@@ -1078,17 +1089,19 @@ int vp8_diamond_search_sad_c
best_mv->as_mv.row = ref_row;
best_mv->as_mv.col = ref_col;
- // Work out the start point for the search
+ /* Work out the start point for the search */
in_what = (unsigned char *)(base_pre + d->offset + (ref_row * pre_stride) + ref_col);
best_address = in_what;
- // Check the starting position
+ /* Check the starting position */
bestsad = fn_ptr->sdf(what, what_stride, in_what,
in_what_stride, 0x7fffffff)
+ mvsad_err_cost(best_mv, &fcenter_mv, mvsadcost, sad_per_bit);
- // search_param determines the length of the initial step and hence the number of iterations
- // 0 = initial step (MAX_FIRST_STEP) pel : 1 = (MAX_FIRST_STEP/2) pel, 2 = (MAX_FIRST_STEP/4) pel... etc.
+ /* search_param determines the length of the initial step and hence
+ * the number of iterations 0 = initial step (MAX_FIRST_STEP) pel :
+ * 1 = (MAX_FIRST_STEP/2) pel, 2 = (MAX_FIRST_STEP/4) pel... etc.
+ */
ss = &x->ss[search_param * x->searches_per_step];
tot_steps = (x->ss_count / x->searches_per_step) - search_param;
@@ -1098,7 +1111,7 @@ int vp8_diamond_search_sad_c
{
for (j = 0 ; j < x->searches_per_step ; j++)
{
- // Trap illegal vectors
+ /* Trap illegal vectors */
this_row_offset = best_mv->as_mv.row + ss[i].mv.row;
this_col_offset = best_mv->as_mv.col + ss[i].mv.col;
@@ -1189,8 +1202,11 @@ int vp8_diamond_search_sadx4
unsigned char *check_here;
unsigned int thissad;
- int *mvsadcost[2] = {x->mvsadcost[0], x->mvsadcost[1]};
+ int *mvsadcost[2];
int_mv fcenter_mv;
+
+ mvsadcost[0] = x->mvsadcost[0];
+ mvsadcost[1] = x->mvsadcost[1];
fcenter_mv.as_mv.row = center_mv->as_mv.row >> 3;
fcenter_mv.as_mv.col = center_mv->as_mv.col >> 3;
@@ -1201,17 +1217,19 @@ int vp8_diamond_search_sadx4
best_mv->as_mv.row = ref_row;
best_mv->as_mv.col = ref_col;
- // Work out the start point for the search
+ /* Work out the start point for the search */
in_what = (unsigned char *)(base_pre + d->offset + (ref_row * pre_stride) + ref_col);
best_address = in_what;
- // Check the starting position
+ /* Check the starting position */
bestsad = fn_ptr->sdf(what, what_stride,
in_what, in_what_stride, 0x7fffffff)
+ mvsad_err_cost(best_mv, &fcenter_mv, mvsadcost, sad_per_bit);
- // search_param determines the length of the initial step and hence the number of iterations
- // 0 = initial step (MAX_FIRST_STEP) pel : 1 = (MAX_FIRST_STEP/2) pel, 2 = (MAX_FIRST_STEP/4) pel... etc.
+ /* search_param determines the length of the initial step and hence the
+ * number of iterations 0 = initial step (MAX_FIRST_STEP) pel : 1 =
+ * (MAX_FIRST_STEP/2) pel, 2 = (MAX_FIRST_STEP/4) pel... etc.
+ */
ss = &x->ss[search_param * x->searches_per_step];
tot_steps = (x->ss_count / x->searches_per_step) - search_param;
@@ -1221,8 +1239,10 @@ int vp8_diamond_search_sadx4
{
int all_in = 1, t;
- // To know if all neighbor points are within the bounds, 4 bounds checking are enough instead of
- // checking 4 bounds for each points.
+ /* To know if all neighbor points are within the bounds, 4 bounds
+ * checking are enough instead of checking 4 bounds for each
+ * points.
+ */
all_in &= ((best_mv->as_mv.row + ss[i].mv.row)> x->mv_row_min);
all_in &= ((best_mv->as_mv.row + ss[i+1].mv.row) < x->mv_row_max);
all_in &= ((best_mv->as_mv.col + ss[i+2].mv.col) > x->mv_col_min);
@@ -1263,7 +1283,7 @@ int vp8_diamond_search_sadx4
{
for (j = 0 ; j < x->searches_per_step ; j++)
{
- // Trap illegal vectors
+ /* Trap illegal vectors */
this_row_offset = best_mv->as_mv.row + ss[i].mv.row;
this_col_offset = best_mv->as_mv.col + ss[i].mv.col;
@@ -1341,24 +1361,29 @@ int vp8_full_search_sad_c(MACROBLOCK *x, BLOCK *b, BLOCKD *d, int_mv *ref_mv,
int col_min = ref_col - distance;
int col_max = ref_col + distance;
- int *mvsadcost[2] = {x->mvsadcost[0], x->mvsadcost[1]};
+ int *mvsadcost[2];
int_mv fcenter_mv;
+
+ mvsadcost[0] = x->mvsadcost[0];
+ mvsadcost[1] = x->mvsadcost[1];
fcenter_mv.as_mv.row = center_mv->as_mv.row >> 3;
fcenter_mv.as_mv.col = center_mv->as_mv.col >> 3;
- // Work out the mid point for the search
+ /* Work out the mid point for the search */
in_what = base_pre + d->offset;
bestaddress = in_what + (ref_row * pre_stride) + ref_col;
best_mv->as_mv.row = ref_row;
best_mv->as_mv.col = ref_col;
- // Baseline value at the centre
+ /* Baseline value at the centre */
bestsad = fn_ptr->sdf(what, what_stride, bestaddress,
in_what_stride, 0x7fffffff)
+ mvsad_err_cost(best_mv, &fcenter_mv, mvsadcost, sad_per_bit);
- // Apply further limits to prevent us looking using vectors that stretch beyiond the UMV border
+ /* Apply further limits to prevent us looking using vectors that
+ * stretch beyiond the UMV border
+ */
if (col_min < x->mv_col_min)
col_min = x->mv_col_min;
@@ -1437,24 +1462,29 @@ int vp8_full_search_sadx3(MACROBLOCK *x, BLOCK *b, BLOCKD *d, int_mv *ref_mv,
unsigned int sad_array[3];
- int *mvsadcost[2] = {x->mvsadcost[0], x->mvsadcost[1]};
+ int *mvsadcost[2];
int_mv fcenter_mv;
+
+ mvsadcost[0] = x->mvsadcost[0];
+ mvsadcost[1] = x->mvsadcost[1];
fcenter_mv.as_mv.row = center_mv->as_mv.row >> 3;
fcenter_mv.as_mv.col = center_mv->as_mv.col >> 3;
- // Work out the mid point for the search
+ /* Work out the mid point for the search */
in_what = base_pre + d->offset;
bestaddress = in_what + (ref_row * pre_stride) + ref_col;
best_mv->as_mv.row = ref_row;
best_mv->as_mv.col = ref_col;
- // Baseline value at the centre
+ /* Baseline value at the centre */
bestsad = fn_ptr->sdf(what, what_stride,
bestaddress, in_what_stride, 0x7fffffff)
+ mvsad_err_cost(best_mv, &fcenter_mv, mvsadcost, sad_per_bit);
- // Apply further limits to prevent us looking using vectors that stretch beyiond the UMV border
+ /* Apply further limits to prevent us looking using vectors that stretch
+ * beyond the UMV border
+ */
if (col_min < x->mv_col_min)
col_min = x->mv_col_min;
@@ -1570,24 +1600,29 @@ int vp8_full_search_sadx8(MACROBLOCK *x, BLOCK *b, BLOCKD *d, int_mv *ref_mv,
DECLARE_ALIGNED_ARRAY(16, unsigned short, sad_array8, 8);
unsigned int sad_array[3];
- int *mvsadcost[2] = {x->mvsadcost[0], x->mvsadcost[1]};
+ int *mvsadcost[2];
int_mv fcenter_mv;
+
+ mvsadcost[0] = x->mvsadcost[0];
+ mvsadcost[1] = x->mvsadcost[1];
fcenter_mv.as_mv.row = center_mv->as_mv.row >> 3;
fcenter_mv.as_mv.col = center_mv->as_mv.col >> 3;
- // Work out the mid point for the search
+ /* Work out the mid point for the search */
in_what = base_pre + d->offset;
bestaddress = in_what + (ref_row * pre_stride) + ref_col;
best_mv->as_mv.row = ref_row;
best_mv->as_mv.col = ref_col;
- // Baseline value at the centre
+ /* Baseline value at the centre */
bestsad = fn_ptr->sdf(what, what_stride,
bestaddress, in_what_stride, 0x7fffffff)
+ mvsad_err_cost(best_mv, &fcenter_mv, mvsadcost, sad_per_bit);
- // Apply further limits to prevent us looking using vectors that stretch beyiond the UMV border
+ /* Apply further limits to prevent us looking using vectors that stretch
+ * beyond the UMV border
+ */
if (col_min < x->mv_col_min)
col_min = x->mv_col_min;
@@ -1721,9 +1756,11 @@ int vp8_refining_search_sad_c(MACROBLOCK *x, BLOCK *b, BLOCKD *d, int_mv *ref_mv
int_mv this_mv;
unsigned int bestsad = INT_MAX;
- int *mvsadcost[2] = {x->mvsadcost[0], x->mvsadcost[1]};
+ int *mvsadcost[2];
int_mv fcenter_mv;
+ mvsadcost[0] = x->mvsadcost[0];
+ mvsadcost[1] = x->mvsadcost[1];
fcenter_mv.as_mv.row = center_mv->as_mv.row >> 3;
fcenter_mv.as_mv.col = center_mv->as_mv.col >> 3;
@@ -1800,9 +1837,11 @@ int vp8_refining_search_sadx4(MACROBLOCK *x, BLOCK *b, BLOCKD *d,
int_mv this_mv;
unsigned int bestsad = INT_MAX;
- int *mvsadcost[2] = {x->mvsadcost[0], x->mvsadcost[1]};
+ int *mvsadcost[2];
int_mv fcenter_mv;
+ mvsadcost[0] = x->mvsadcost[0];
+ mvsadcost[1] = x->mvsadcost[1];
fcenter_mv.as_mv.row = center_mv->as_mv.row >> 3;
fcenter_mv.as_mv.col = center_mv->as_mv.col >> 3;
@@ -1906,16 +1945,16 @@ void print_mode_context(void)
for (j = 0; j < 6; j++)
{
- fprintf(f, " { // %d \n", j);
+ fprintf(f, " { /* %d */\n", j);
fprintf(f, " ");
for (i = 0; i < 4; i++)
{
int overal_prob;
int this_prob;
- int count; // = mv_ref_ct[j][i][0]+mv_ref_ct[j][i][1];
+ int count;
- // Overall probs
+ /* Overall probs */
count = mv_mode_cts[i][0] + mv_mode_cts[i][1];
if (count)
@@ -1926,7 +1965,7 @@ void print_mode_context(void)
if (overal_prob == 0)
overal_prob = 1;
- // context probs
+ /* context probs */
count = mv_ref_ct[j][i][0] + mv_ref_ct[j][i][1];
if (count)
@@ -1938,8 +1977,6 @@ void print_mode_context(void)
this_prob = 1;
fprintf(f, "%5d, ", this_prob);
- //fprintf(f,"%5d, %5d, %8d,", this_prob, overal_prob, (this_prob << 10)/overal_prob);
- //fprintf(f,"%8d, ", (this_prob << 10)/overal_prob);
}
fprintf(f, " },\n");
diff --git a/vp8/encoder/mcomp.h b/vp8/encoder/mcomp.h
index cdb0cb63c..890113f9a 100644
--- a/vp8/encoder/mcomp.h
+++ b/vp8/encoder/mcomp.h
@@ -21,9 +21,16 @@ extern void accum_mv_refs(MB_PREDICTION_MODE, const int near_mv_ref_cts[4]);
#endif
-#define MAX_MVSEARCH_STEPS 8 // The maximum number of steps in a step search given the largest allowed initial step
-#define MAX_FULL_PEL_VAL ((1 << (MAX_MVSEARCH_STEPS)) - 1) // Max full pel mv specified in 1 pel units
-#define MAX_FIRST_STEP (1 << (MAX_MVSEARCH_STEPS-1)) // Maximum size of the first step in full pel units
+/* The maximum number of steps in a step search given the largest allowed
+ * initial step
+ */
+#define MAX_MVSEARCH_STEPS 8
+
+/* Max full pel mv specified in 1 pel units */
+#define MAX_FULL_PEL_VAL ((1 << (MAX_MVSEARCH_STEPS)) - 1)
+
+/* Maximum size of the first step in full pel units */
+#define MAX_FIRST_STEP (1 << (MAX_MVSEARCH_STEPS-1))
extern void print_mode_context(void);
extern int vp8_mv_bit_cost(int_mv *mv, int_mv *ref, int *mvcost[2], int Weight);
diff --git a/vp8/encoder/onyx_if.c b/vp8/encoder/onyx_if.c
index fb01ea732..4a57d3b27 100644
--- a/vp8/encoder/onyx_if.c
+++ b/vp8/encoder/onyx_if.c
@@ -57,10 +57,6 @@ extern unsigned int vp8_get_processor_freq();
extern void print_tree_update_probs();
extern void vp8cx_create_encoder_threads(VP8_COMP *cpi);
extern void vp8cx_remove_encoder_threads(VP8_COMP *cpi);
-#if HAVE_NEON
-extern void vp8_yv12_copy_frame_func_neon(YV12_BUFFER_CONFIG *src_ybc, YV12_BUFFER_CONFIG *dst_ybc);
-extern void vp8_yv12_copy_src_frame_func_neon(YV12_BUFFER_CONFIG *src_ybc, YV12_BUFFER_CONFIG *dst_ybc);
-#endif
int vp8_estimate_entropy_savings(VP8_COMP *cpi);
@@ -143,7 +139,7 @@ extern const int qzbin_factors[129];
extern void vp8cx_init_quantizer(VP8_COMP *cpi);
extern const int vp8cx_base_skip_false_prob[128];
-// Tables relating active max Q to active min Q
+/* Tables relating active max Q to active min Q */
static const unsigned char kf_low_motion_minq[QINDEX_RANGE] =
{
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
@@ -219,9 +215,8 @@ static void save_layer_context(VP8_COMP *cpi)
{
LAYER_CONTEXT *lc = &cpi->layer_context[cpi->current_layer];
- // Save layer dependent coding state
+ /* Save layer dependent coding state */
lc->target_bandwidth = cpi->target_bandwidth;
- //lc->target_bandwidth = cpi->oxcf.target_bandwidth;
lc->starting_buffer_level = cpi->oxcf.starting_buffer_level;
lc->optimal_buffer_level = cpi->oxcf.optimal_buffer_level;
lc->maximum_buffer_size = cpi->oxcf.maximum_buffer_size;
@@ -258,7 +253,7 @@ static void restore_layer_context(VP8_COMP *cpi, const int layer)
{
LAYER_CONTEXT *lc = &cpi->layer_context[layer];
- // Restore layer dependent coding state
+ /* Restore layer dependent coding state */
cpi->current_layer = layer;
cpi->target_bandwidth = lc->target_bandwidth;
cpi->oxcf.target_bandwidth = lc->target_bandwidth;
@@ -271,9 +266,7 @@ static void restore_layer_context(VP8_COMP *cpi, const int layer)
cpi->buffer_level = lc->buffer_level;
cpi->bits_off_target = lc->bits_off_target;
cpi->total_actual_bits = lc->total_actual_bits;
- //cpi->worst_quality = lc->worst_quality;
cpi->active_worst_quality = lc->active_worst_quality;
- //cpi->best_quality = lc->best_quality;
cpi->active_best_quality = lc->active_best_quality;
cpi->ni_av_qi = lc->ni_av_qi;
cpi->ni_tot_qi = lc->ni_tot_qi;
@@ -296,12 +289,17 @@ static void restore_layer_context(VP8_COMP *cpi, const int layer)
static void setup_features(VP8_COMP *cpi)
{
- // Set up default state for MB feature flags
- cpi->mb.e_mbd.segmentation_enabled = 0;
- cpi->mb.e_mbd.update_mb_segmentation_map = 0;
- cpi->mb.e_mbd.update_mb_segmentation_data = 0;
- vpx_memset(cpi->mb.e_mbd.mb_segment_tree_probs, 255, sizeof(cpi->mb.e_mbd.mb_segment_tree_probs));
- vpx_memset(cpi->mb.e_mbd.segment_feature_data, 0, sizeof(cpi->mb.e_mbd.segment_feature_data));
+ // If segmentation enabled set the update flags
+ if ( cpi->mb.e_mbd.segmentation_enabled )
+ {
+ cpi->mb.e_mbd.update_mb_segmentation_map = 1;
+ cpi->mb.e_mbd.update_mb_segmentation_data = 1;
+ }
+ else
+ {
+ cpi->mb.e_mbd.update_mb_segmentation_map = 0;
+ cpi->mb.e_mbd.update_mb_segmentation_data = 0;
+ }
cpi->mb.e_mbd.mode_ref_lf_delta_enabled = 0;
cpi->mb.e_mbd.mode_ref_lf_delta_update = 0;
@@ -323,7 +321,7 @@ static void dealloc_compressor_data(VP8_COMP *cpi)
vpx_free(cpi->tplist);
cpi->tplist = NULL;
- // Delete last frame MV storage buffers
+ /* Delete last frame MV storage buffers */
vpx_free(cpi->lfmv);
cpi->lfmv = 0;
@@ -333,7 +331,7 @@ static void dealloc_compressor_data(VP8_COMP *cpi)
vpx_free(cpi->lf_ref_frame);
cpi->lf_ref_frame = 0;
- // Delete sementation map
+ /* Delete sementation map */
vpx_free(cpi->segmentation_map);
cpi->segmentation_map = 0;
@@ -349,11 +347,11 @@ static void dealloc_compressor_data(VP8_COMP *cpi)
vpx_free(cpi->tok);
cpi->tok = 0;
- // Structure used to monitor GF usage
+ /* Structure used to monitor GF usage */
vpx_free(cpi->gf_active_flags);
cpi->gf_active_flags = 0;
- // Activity mask based per mb zbin adjustments
+ /* Activity mask based per mb zbin adjustments */
vpx_free(cpi->mb_activity_map);
cpi->mb_activity_map = 0;
vpx_free(cpi->mb_norm_activity_map);
@@ -365,37 +363,42 @@ static void dealloc_compressor_data(VP8_COMP *cpi)
static void enable_segmentation(VP8_COMP *cpi)
{
- // Set the appropriate feature bit
+ /* Set the appropriate feature bit */
cpi->mb.e_mbd.segmentation_enabled = 1;
cpi->mb.e_mbd.update_mb_segmentation_map = 1;
cpi->mb.e_mbd.update_mb_segmentation_data = 1;
}
static void disable_segmentation(VP8_COMP *cpi)
{
- // Clear the appropriate feature bit
+ /* Clear the appropriate feature bit */
cpi->mb.e_mbd.segmentation_enabled = 0;
}
-// Valid values for a segment are 0 to 3
-// Segmentation map is arrange as [Rows][Columns]
+/* Valid values for a segment are 0 to 3
+ * Segmentation map is arrange as [Rows][Columns]
+ */
static void set_segmentation_map(VP8_COMP *cpi, unsigned char *segmentation_map)
{
- // Copy in the new segmentation map
+ /* Copy in the new segmentation map */
vpx_memcpy(cpi->segmentation_map, segmentation_map, (cpi->common.mb_rows * cpi->common.mb_cols));
- // Signal that the map should be updated.
+ /* Signal that the map should be updated. */
cpi->mb.e_mbd.update_mb_segmentation_map = 1;
cpi->mb.e_mbd.update_mb_segmentation_data = 1;
}
-// The values given for each segment can be either deltas (from the default value chosen for the frame) or absolute values.
-//
-// Valid range for abs values is (0-127 for MB_LVL_ALT_Q) , (0-63 for SEGMENT_ALT_LF)
-// Valid range for delta values are (+/-127 for MB_LVL_ALT_Q) , (+/-63 for SEGMENT_ALT_LF)
-//
-// abs_delta = SEGMENT_DELTADATA (deltas) abs_delta = SEGMENT_ABSDATA (use the absolute values given).
-//
-//
+/* The values given for each segment can be either deltas (from the default
+ * value chosen for the frame) or absolute values.
+ *
+ * Valid range for abs values is:
+ * (0-127 for MB_LVL_ALT_Q), (0-63 for SEGMENT_ALT_LF)
+ * Valid range for delta values are:
+ * (+/-127 for MB_LVL_ALT_Q), (+/-63 for SEGMENT_ALT_LF)
+ *
+ * abs_delta = SEGMENT_DELTADATA (deltas)
+ * abs_delta = SEGMENT_ABSDATA (use the absolute values given).
+ *
+ */
static void set_segment_data(VP8_COMP *cpi, signed char *feature_data, unsigned char abs_delta)
{
cpi->mb.e_mbd.mb_segement_abs_delta = abs_delta;
@@ -411,26 +414,6 @@ static void segmentation_test_function(VP8_COMP *cpi)
// Create a temporary map for segmentation data.
CHECK_MEM_ERROR(seg_map, vpx_calloc(cpi->common.mb_rows * cpi->common.mb_cols, 1));
- // MB loop to set local segmentation map
- /*for ( i = 0; i < cpi->common.mb_rows; i++ )
- {
- for ( j = 0; j < cpi->common.mb_cols; j++ )
- {
- //seg_map[(i*cpi->common.mb_cols) + j] = (j % 2) + ((i%2)* 2);
- //if ( j < cpi->common.mb_cols/2 )
-
- // Segment 1 around the edge else 0
- if ( (i == 0) || (j == 0) || (i == (cpi->common.mb_rows-1)) || (j == (cpi->common.mb_cols-1)) )
- seg_map[(i*cpi->common.mb_cols) + j] = 1;
- //else if ( (i < 2) || (j < 2) || (i > (cpi->common.mb_rows-3)) || (j > (cpi->common.mb_cols-3)) )
- // seg_map[(i*cpi->common.mb_cols) + j] = 2;
- //else if ( (i < 5) || (j < 5) || (i > (cpi->common.mb_rows-6)) || (j > (cpi->common.mb_cols-6)) )
- // seg_map[(i*cpi->common.mb_cols) + j] = 3;
- else
- seg_map[(i*cpi->common.mb_cols) + j] = 0;
- }
- }*/
-
// Set the segmentation Map
set_segmentation_map(cpi, seg_map);
@@ -453,13 +436,12 @@ static void segmentation_test_function(VP8_COMP *cpi)
set_segment_data(cpi, &feature_data[0][0], SEGMENT_DELTADATA);
// Delete sementation map
- vpx_free(seg_map);
+ vpx_free(seg_map);
seg_map = 0;
-
}
-// A simple function to cyclically refresh the background at a lower Q
+/* A simple function to cyclically refresh the background at a lower Q */
static void cyclic_background_refresh(VP8_COMP *cpi, int Q, int lf_adjustment)
{
unsigned char *seg_map;
@@ -468,7 +450,7 @@ static void cyclic_background_refresh(VP8_COMP *cpi, int Q, int lf_adjustment)
int block_count = cpi->cyclic_refresh_mode_max_mbs_perframe;
int mbs_in_frame = cpi->common.mb_rows * cpi->common.mb_cols;
- // Create a temporary map for segmentation data.
+ /* Create a temporary map for segmentation data. */
CHECK_MEM_ERROR(seg_map, vpx_calloc(cpi->common.mb_rows * cpi->common.mb_cols, 1));
cpi->cyclic_refresh_q = Q;
@@ -476,7 +458,6 @@ static void cyclic_background_refresh(VP8_COMP *cpi, int Q, int lf_adjustment)
for (i = Q; i > 0; i--)
{
if (vp8_bits_per_mb[cpi->common.frame_type][i] >= ((vp8_bits_per_mb[cpi->common.frame_type][Q]*(Q + 128)) / 64))
- //if ( vp8_bits_per_mb[cpi->common.frame_type][i] >= ((vp8_bits_per_mb[cpi->common.frame_type][Q]*((2*Q)+96))/64) )
{
break;
}
@@ -484,16 +465,19 @@ static void cyclic_background_refresh(VP8_COMP *cpi, int Q, int lf_adjustment)
cpi->cyclic_refresh_q = i;
- // Only update for inter frames
+ /* Only update for inter frames */
if (cpi->common.frame_type != KEY_FRAME)
{
- // Cycle through the macro_block rows
- // MB loop to set local segmentation map
+ /* Cycle through the macro_block rows */
+ /* MB loop to set local segmentation map */
for (i = cpi->cyclic_refresh_mode_index; i < mbs_in_frame; i++)
{
- // If the MB is as a candidate for clean up then mark it for possible boost/refresh (segment 1)
- // The segment id may get reset to 0 later if the MB gets coded anything other than last frame 0,0
- // as only (last frame 0,0) MBs are eligable for refresh : that is to say Mbs likely to be background blocks.
+ /* If the MB is as a candidate for clean up then mark it for
+ * possible boost/refresh (segment 1) The segment id may get
+ * reset to 0 later if the MB gets coded anything other than
+ * last frame 0,0 as only (last frame 0,0) MBs are eligable for
+ * refresh : that is to say Mbs likely to be background blocks.
+ */
if (cpi->cyclic_refresh_map[i] == 0)
{
seg_map[i] = 1;
@@ -502,9 +486,8 @@ static void cyclic_background_refresh(VP8_COMP *cpi, int Q, int lf_adjustment)
{
seg_map[i] = 0;
- // Skip blocks that have been refreshed recently anyway.
+ /* Skip blocks that have been refreshed recently anyway. */
if (cpi->cyclic_refresh_map[i] < 0)
- //cpi->cyclic_refresh_map[i] = cpi->cyclic_refresh_map[i] / 16;
cpi->cyclic_refresh_map[i]++;
}
@@ -516,36 +499,35 @@ static void cyclic_background_refresh(VP8_COMP *cpi, int Q, int lf_adjustment)
}
- // If we have gone through the frame reset to the start
+ /* If we have gone through the frame reset to the start */
cpi->cyclic_refresh_mode_index = i;
if (cpi->cyclic_refresh_mode_index >= mbs_in_frame)
cpi->cyclic_refresh_mode_index = 0;
}
- // Set the segmentation Map
+ /* Set the segmentation Map */
set_segmentation_map(cpi, seg_map);
- // Activate segmentation.
+ /* Activate segmentation. */
enable_segmentation(cpi);
- // Set up the quant segment data
+ /* Set up the quant segment data */
feature_data[MB_LVL_ALT_Q][0] = 0;
feature_data[MB_LVL_ALT_Q][1] = (cpi->cyclic_refresh_q - Q);
feature_data[MB_LVL_ALT_Q][2] = 0;
feature_data[MB_LVL_ALT_Q][3] = 0;
- // Set up the loop segment data
+ /* Set up the loop segment data */
feature_data[MB_LVL_ALT_LF][0] = 0;
feature_data[MB_LVL_ALT_LF][1] = lf_adjustment;
feature_data[MB_LVL_ALT_LF][2] = 0;
feature_data[MB_LVL_ALT_LF][3] = 0;
- // Initialise the feature data structure
- // SEGMENT_DELTADATA 0, SEGMENT_ABSDATA 1
+ /* Initialise the feature data structure */
set_segment_data(cpi, &feature_data[0][0], SEGMENT_DELTADATA);
- // Delete sementation map
+ /* Delete sementation map */
vpx_free(seg_map);
seg_map = 0;
@@ -560,16 +542,16 @@ static void set_default_lf_deltas(VP8_COMP *cpi)
vpx_memset(cpi->mb.e_mbd.ref_lf_deltas, 0, sizeof(cpi->mb.e_mbd.ref_lf_deltas));
vpx_memset(cpi->mb.e_mbd.mode_lf_deltas, 0, sizeof(cpi->mb.e_mbd.mode_lf_deltas));
- // Test of ref frame deltas
+ /* Test of ref frame deltas */
cpi->mb.e_mbd.ref_lf_deltas[INTRA_FRAME] = 2;
cpi->mb.e_mbd.ref_lf_deltas[LAST_FRAME] = 0;
cpi->mb.e_mbd.ref_lf_deltas[GOLDEN_FRAME] = -2;
cpi->mb.e_mbd.ref_lf_deltas[ALTREF_FRAME] = -2;
- cpi->mb.e_mbd.mode_lf_deltas[0] = 4; // BPRED
- cpi->mb.e_mbd.mode_lf_deltas[1] = -2; // Zero
- cpi->mb.e_mbd.mode_lf_deltas[2] = 2; // New mv
- cpi->mb.e_mbd.mode_lf_deltas[3] = 4; // Split mv
+ cpi->mb.e_mbd.mode_lf_deltas[0] = 4; /* BPRED */
+ cpi->mb.e_mbd.mode_lf_deltas[1] = -2; /* Zero */
+ cpi->mb.e_mbd.mode_lf_deltas[2] = 2; /* New mv */
+ cpi->mb.e_mbd.mode_lf_deltas[3] = 4; /* Split mv */
}
/* Convenience macros for mapping speed and mode into a continuous
@@ -669,7 +651,7 @@ void vp8_set_speed_features(VP8_COMP *cpi)
int last_improved_quant = sf->improved_quant;
int ref_frames;
- // Initialise default mode frequency sampling variables
+ /* Initialise default mode frequency sampling variables */
for (i = 0; i < MAX_MODES; i ++)
{
cpi->mode_check_freq[i] = 0;
@@ -679,7 +661,7 @@ void vp8_set_speed_features(VP8_COMP *cpi)
cpi->mbs_tested_so_far = 0;
- // best quality defaults
+ /* best quality defaults */
sf->RD = 1;
sf->search_method = NSTEP;
sf->improved_quant = 1;
@@ -697,7 +679,7 @@ void vp8_set_speed_features(VP8_COMP *cpi)
sf->max_step_search_steps = MAX_MVSEARCH_STEPS;
sf->improved_mv_pred = 1;
- // default thresholds to 0
+ /* default thresholds to 0 */
for (i = 0; i < MAX_MODES; i++)
sf->thresh_mult[i] = 0;
@@ -779,7 +761,7 @@ void vp8_set_speed_features(VP8_COMP *cpi)
switch (Mode)
{
#if !(CONFIG_REALTIME_ONLY)
- case 0: // best quality mode
+ case 0: /* best quality mode */
sf->first_step = 0;
sf->max_step_search_steps = MAX_MVSEARCH_STEPS;
break;
@@ -800,8 +782,9 @@ void vp8_set_speed_features(VP8_COMP *cpi)
sf->improved_quant = 0;
sf->improved_dct = 0;
- // Only do recode loop on key frames, golden frames and
- // alt ref frames
+ /* Only do recode loop on key frames, golden frames and
+ * alt ref frames
+ */
sf->recode_loop = 2;
}
@@ -809,14 +792,14 @@ void vp8_set_speed_features(VP8_COMP *cpi)
if (Speed > 3)
{
sf->auto_filter = 1;
- sf->recode_loop = 0; // recode loop off
- sf->RD = 0; // Turn rd off
+ sf->recode_loop = 0; /* recode loop off */
+ sf->RD = 0; /* Turn rd off */
}
if (Speed > 4)
{
- sf->auto_filter = 0; // Faster selection of loop filter
+ sf->auto_filter = 0; /* Faster selection of loop filter */
}
break;
@@ -839,7 +822,7 @@ void vp8_set_speed_features(VP8_COMP *cpi)
}
if (Speed > 2)
- sf->auto_filter = 0; // Faster selection of loop filter
+ sf->auto_filter = 0; /* Faster selection of loop filter */
if (Speed > 3)
{
@@ -849,7 +832,7 @@ void vp8_set_speed_features(VP8_COMP *cpi)
if (Speed > 4)
{
- sf->auto_filter = 0; // Faster selection of loop filter
+ sf->auto_filter = 0; /* Faster selection of loop filter */
sf->search_method = HEX;
sf->iterative_sub_pixel = 0;
}
@@ -876,7 +859,7 @@ void vp8_set_speed_features(VP8_COMP *cpi)
total_skip = sum;
sum = 0;
- // i starts from 2 to make sure thresh started from 2048
+ /* i starts from 2 to make sure thresh started from 2048 */
for (; i < 1024; i++)
{
sum += cpi->error_bins[i];
@@ -930,7 +913,7 @@ void vp8_set_speed_features(VP8_COMP *cpi)
cm->filter_type = SIMPLE_LOOPFILTER;
}
- // This has a big hit on quality. Last resort
+ /* This has a big hit on quality. Last resort */
if (Speed >= 15)
sf->half_pixel_search = 0;
@@ -938,8 +921,9 @@ void vp8_set_speed_features(VP8_COMP *cpi)
}; /* switch */
- // Slow quant, dct and trellis not worthwhile for first pass
- // so make sure they are always turned off.
+ /* Slow quant, dct and trellis not worthwhile for first pass
+ * so make sure they are always turned off.
+ */
if ( cpi->pass == 1 )
{
sf->improved_quant = 0;
@@ -1107,13 +1091,13 @@ void vp8_alloc_compressor_data(VP8_COMP *cpi)
CHECK_MEM_ERROR(cpi->tok, vpx_calloc(tokens, sizeof(*cpi->tok)));
}
- // Data used for real time vc mode to see if gf needs refreshing
+ /* Data used for real time vc mode to see if gf needs refreshing */
cpi->inter_zz_count = 0;
cpi->gf_bad_count = 0;
cpi->gf_update_recommended = 0;
- // Structures used to minitor GF usage
+ /* Structures used to minitor GF usage */
vpx_free(cpi->gf_active_flags);
CHECK_MEM_ERROR(cpi->gf_active_flags,
vpx_calloc(1, cm->mb_rows * cm->mb_cols));
@@ -1146,7 +1130,7 @@ void vp8_alloc_compressor_data(VP8_COMP *cpi)
}
-// Quant MOD
+/* Quant MOD */
static const int q_trans[] =
{
0, 1, 2, 3, 4, 5, 7, 8,
@@ -1168,7 +1152,7 @@ int vp8_reverse_trans(int x)
return i;
return 63;
-};
+}
void vp8_new_frame_rate(VP8_COMP *cpi, double framerate)
{
if(framerate < .1)
@@ -1182,16 +1166,16 @@ void vp8_new_frame_rate(VP8_COMP *cpi, double framerate)
cpi->min_frame_bandwidth = (int)(cpi->av_per_frame_bandwidth *
cpi->oxcf.two_pass_vbrmin_section / 100);
- // Set Maximum gf/arf interval
+ /* Set Maximum gf/arf interval */
cpi->max_gf_interval = ((int)(cpi->output_frame_rate / 2.0) + 2);
if(cpi->max_gf_interval < 12)
cpi->max_gf_interval = 12;
- // Extended interval for genuinely static scenes
+ /* Extended interval for genuinely static scenes */
cpi->twopass.static_scene_max_gf_interval = cpi->key_frame_frequency >> 1;
- // Special conditions when altr ref frame enabled in lagged compress mode
+ /* Special conditions when altr ref frame enabled in lagged compress mode */
if (cpi->oxcf.play_alternate && cpi->oxcf.lag_in_frames)
{
if (cpi->max_gf_interval > cpi->oxcf.lag_in_frames - 1)
@@ -1244,15 +1228,15 @@ static void init_config(VP8_COMP *cpi, VP8_CONFIG *oxcf)
cpi->ref_frame_rate = cpi->frame_rate;
- // change includes all joint functionality
+ /* change includes all joint functionality */
vp8_change_config(cpi, oxcf);
- // Initialize active best and worst q and average q values.
+ /* Initialize active best and worst q and average q values. */
cpi->active_worst_quality = cpi->oxcf.worst_allowed_q;
cpi->active_best_quality = cpi->oxcf.best_allowed_q;
cpi->avg_frame_qindex = cpi->oxcf.worst_allowed_q;
- // Initialise the starting buffer levels
+ /* Initialise the starting buffer levels */
cpi->buffer_level = cpi->oxcf.starting_buffer_level;
cpi->bits_off_target = cpi->oxcf.starting_buffer_level;
@@ -1264,7 +1248,7 @@ static void init_config(VP8_COMP *cpi, VP8_CONFIG *oxcf)
cpi->total_actual_bits = 0;
cpi->total_target_vs_actual = 0;
- // Temporal scalabilty
+ /* Temporal scalabilty */
if (cpi->oxcf.number_of_layers > 1)
{
unsigned int i;
@@ -1274,7 +1258,7 @@ static void init_config(VP8_COMP *cpi, VP8_CONFIG *oxcf)
{
LAYER_CONTEXT *lc = &cpi->layer_context[i];
- // Layer configuration
+ /* Layer configuration */
lc->frame_rate =
cpi->output_frame_rate / cpi->oxcf.rate_decimator[i];
lc->target_bandwidth = cpi->oxcf.target_bitrate[i] * 1000;
@@ -1301,7 +1285,7 @@ static void init_config(VP8_COMP *cpi, VP8_CONFIG *oxcf)
rescale(oxcf->maximum_buffer_size,
lc->target_bandwidth, 1000);
- // Work out the average size of a frame within this layer
+ /* Work out the average size of a frame within this layer */
if (i > 0)
lc->avg_frame_size_for_layer = (cpi->oxcf.target_bitrate[i] -
cpi->oxcf.target_bitrate[i-1]) * 1000 /
@@ -1375,7 +1359,7 @@ static void update_layer_contexts (VP8_COMP *cpi)
oxcf->maximum_buffer_size_in_ms,
lc->target_bandwidth, 1000);
- // Work out the average size of a frame within this layer
+ /* Work out the average size of a frame within this layer */
if (i > 0)
lc->avg_frame_size_for_layer = (oxcf->target_bitrate[i] -
oxcf->target_bitrate[i-1]) * 1000 /
@@ -1516,8 +1500,6 @@ void vp8_change_config(VP8_COMP *cpi, VP8_CONFIG *oxcf)
cpi->ref_frame_flags = VP8_ALTR_FRAME | VP8_GOLD_FRAME | VP8_LAST_FRAME;
- //cpi->use_golden_frame_only = 0;
- //cpi->use_last_frame_only = 0;
cm->refresh_golden_frame = 0;
cm->refresh_last_frame = 1;
cm->refresh_entropy_probs = 1;
@@ -1539,11 +1521,11 @@ void vp8_change_config(VP8_COMP *cpi, VP8_CONFIG *oxcf)
cpi->segment_encode_breakout[i] = cpi->oxcf.encode_breakout;
}
- // At the moment the first order values may not be > MAXQ
+ /* At the moment the first order values may not be > MAXQ */
if (cpi->oxcf.fixed_q > MAXQ)
cpi->oxcf.fixed_q = MAXQ;
- // local file playback mode == really big buffer
+ /* local file playback mode == really big buffer */
if (cpi->oxcf.end_usage == USAGE_LOCAL_FILE_PLAYBACK)
{
cpi->oxcf.starting_buffer_level = 60000;
@@ -1554,14 +1536,14 @@ void vp8_change_config(VP8_COMP *cpi, VP8_CONFIG *oxcf)
cpi->oxcf.maximum_buffer_size_in_ms = 240000;
}
- // Convert target bandwidth from Kbit/s to Bit/s
+ /* Convert target bandwidth from Kbit/s to Bit/s */
cpi->oxcf.target_bandwidth *= 1000;
cpi->oxcf.starting_buffer_level =
rescale(cpi->oxcf.starting_buffer_level,
cpi->oxcf.target_bandwidth, 1000);
- // Set or reset optimal and maximum buffer levels.
+ /* Set or reset optimal and maximum buffer levels. */
if (cpi->oxcf.optimal_buffer_level == 0)
cpi->oxcf.optimal_buffer_level = cpi->oxcf.target_bandwidth / 8;
else
@@ -1576,19 +1558,19 @@ void vp8_change_config(VP8_COMP *cpi, VP8_CONFIG *oxcf)
rescale(cpi->oxcf.maximum_buffer_size,
cpi->oxcf.target_bandwidth, 1000);
- // Set up frame rate and related parameters rate control values.
+ /* Set up frame rate and related parameters rate control values. */
vp8_new_frame_rate(cpi, cpi->frame_rate);
- // Set absolute upper and lower quality limits
+ /* Set absolute upper and lower quality limits */
cpi->worst_quality = cpi->oxcf.worst_allowed_q;
cpi->best_quality = cpi->oxcf.best_allowed_q;
- // active values should only be modified if out of new range
+ /* active values should only be modified if out of new range */
if (cpi->active_worst_quality > cpi->oxcf.worst_allowed_q)
{
cpi->active_worst_quality = cpi->oxcf.worst_allowed_q;
}
- // less likely
+ /* less likely */
else if (cpi->active_worst_quality < cpi->oxcf.best_allowed_q)
{
cpi->active_worst_quality = cpi->oxcf.best_allowed_q;
@@ -1597,7 +1579,7 @@ void vp8_change_config(VP8_COMP *cpi, VP8_CONFIG *oxcf)
{
cpi->active_best_quality = cpi->oxcf.best_allowed_q;
}
- // less likely
+ /* less likely */
else if (cpi->active_best_quality > cpi->oxcf.worst_allowed_q)
{
cpi->active_best_quality = cpi->oxcf.worst_allowed_q;
@@ -1607,7 +1589,7 @@ void vp8_change_config(VP8_COMP *cpi, VP8_CONFIG *oxcf)
cpi->cq_target_quality = cpi->oxcf.cq_level;
- // Only allow dropped frames in buffered mode
+ /* Only allow dropped frames in buffered mode */
cpi->drop_frames_allowed = cpi->oxcf.allow_df && cpi->buffered_mode;
cpi->target_bandwidth = cpi->oxcf.target_bandwidth;
@@ -1622,7 +1604,7 @@ void vp8_change_config(VP8_COMP *cpi, VP8_CONFIG *oxcf)
* correct.
*/
- // VP8 sharpness level mapping 0-7 (vs 0-10 in general VPx dialogs)
+ /* VP8 sharpness level mapping 0-7 (vs 0-10 in general VPx dialogs) */
if (cpi->oxcf.Sharpness > 7)
cpi->oxcf.Sharpness = 7;
@@ -1636,7 +1618,7 @@ void vp8_change_config(VP8_COMP *cpi, VP8_CONFIG *oxcf)
Scale2Ratio(cm->horiz_scale, &hr, &hs);
Scale2Ratio(cm->vert_scale, &vr, &vs);
- // always go to the next whole number
+ /* always go to the next whole number */
cm->Width = (hs - 1 + cpi->oxcf.Width * hr) / hs;
cm->Height = (vs - 1 + cpi->oxcf.Height * vr) / vs;
}
@@ -1650,6 +1632,7 @@ void vp8_change_config(VP8_COMP *cpi, VP8_CONFIG *oxcf)
cm->yv12_fb[cm->lst_fb_idx].y_height ||
cm->yv12_fb[cm->lst_fb_idx].y_width == 0)
{
+ dealloc_raw_frame_buffers(cpi);
alloc_raw_frame_buffers(cpi);
vp8_alloc_compressor_data(cpi);
}
@@ -1662,16 +1645,16 @@ void vp8_change_config(VP8_COMP *cpi, VP8_CONFIG *oxcf)
cpi->Speed = cpi->oxcf.cpu_used;
- // force to allowlag to 0 if lag_in_frames is 0;
+ /* force to allowlag to 0 if lag_in_frames is 0; */
if (cpi->oxcf.lag_in_frames == 0)
{
cpi->oxcf.allow_lag = 0;
}
- // Limit on lag buffers as these are not currently dynamically allocated
+ /* Limit on lag buffers as these are not currently dynamically allocated */
else if (cpi->oxcf.lag_in_frames > MAX_LAG_BUFFERS)
cpi->oxcf.lag_in_frames = MAX_LAG_BUFFERS;
- // YX Temp
+ /* YX Temp */
cpi->alt_ref_source = NULL;
cpi->is_src_frame_alt_ref = 0;
@@ -1688,7 +1671,7 @@ void vp8_change_config(VP8_COMP *cpi, VP8_CONFIG *oxcf)
#endif
#if 0
- // Experimental RD Code
+ /* Experimental RD Code */
cpi->frame_distortion = 0;
cpi->last_frame_distortion = 0;
#endif
@@ -1723,7 +1706,7 @@ struct VP8_COMP* vp8_create_compressor(VP8_CONFIG *oxcf)
VP8_COMMON *cm;
cpi = vpx_memalign(32, sizeof(VP8_COMP));
- // Check that the CPI instance is valid
+ /* Check that the CPI instance is valid */
if (!cpi)
return 0;
@@ -1757,14 +1740,15 @@ struct VP8_COMP* vp8_create_compressor(VP8_CONFIG *oxcf)
cpi->prob_gf_coded = 128;
cpi->prob_intra_coded = 63;
- // Prime the recent reference frame usage counters.
- // Hereafter they will be maintained as a sort of moving average
+ /* Prime the recent reference frame usage counters.
+ * Hereafter they will be maintained as a sort of moving average
+ */
cpi->recent_ref_frame_usage[INTRA_FRAME] = 1;
cpi->recent_ref_frame_usage[LAST_FRAME] = 1;
cpi->recent_ref_frame_usage[GOLDEN_FRAME] = 1;
cpi->recent_ref_frame_usage[ALTREF_FRAME] = 1;
- // Set reference frame sign bias for ALTREF frame to 1 (for now)
+ /* Set reference frame sign bias for ALTREF frame to 1 (for now) */
cpi->common.ref_frame_sign_bias[ALTREF_FRAME] = 1;
cpi->twopass.gf_decay_rate = 0;
@@ -1774,21 +1758,22 @@ struct VP8_COMP* vp8_create_compressor(VP8_CONFIG *oxcf)
cpi->alt_is_last = 0 ;
cpi->gold_is_alt = 0 ;
- // allocate memory for storing last frame's MVs for MV prediction.
+ /* allocate memory for storing last frame's MVs for MV prediction. */
CHECK_MEM_ERROR(cpi->lfmv, vpx_calloc((cpi->common.mb_rows+2) * (cpi->common.mb_cols+2), sizeof(int_mv)));
CHECK_MEM_ERROR(cpi->lf_ref_frame_sign_bias, vpx_calloc((cpi->common.mb_rows+2) * (cpi->common.mb_cols+2), sizeof(int)));
CHECK_MEM_ERROR(cpi->lf_ref_frame, vpx_calloc((cpi->common.mb_rows+2) * (cpi->common.mb_cols+2), sizeof(int)));
- // Create the encoder segmentation map and set all entries to 0
+ /* Create the encoder segmentation map and set all entries to 0 */
CHECK_MEM_ERROR(cpi->segmentation_map, vpx_calloc(cpi->common.mb_rows * cpi->common.mb_cols, 1));
+
CHECK_MEM_ERROR(cpi->active_map, vpx_calloc(cpi->common.mb_rows * cpi->common.mb_cols, 1));
vpx_memset(cpi->active_map , 1, (cpi->common.mb_rows * cpi->common.mb_cols));
cpi->active_map_enabled = 0;
#if 0
- // Experimental code for lagged and one pass
- // Initialise one_pass GF frames stats
- // Update stats used for GF selection
+ /* Experimental code for lagged and one pass */
+ /* Initialise one_pass GF frames stats */
+ /* Update stats used for GF selection */
if (cpi->pass == 0)
{
cpi->one_pass_frame_index = 0;
@@ -1808,8 +1793,9 @@ struct VP8_COMP* vp8_create_compressor(VP8_CONFIG *oxcf)
}
#endif
- // Should we use the cyclic refresh method.
- // Currently this is tied to error resilliant mode
+ /* Should we use the cyclic refresh method.
+ * Currently this is tied to error resilliant mode
+ */
cpi->cyclic_refresh_mode_enabled = cpi->oxcf.error_resilient_mode;
cpi->cyclic_refresh_mode_max_mbs_perframe = (cpi->common.mb_rows * cpi->common.mb_cols) / 40;
cpi->cyclic_refresh_mode_index = 0;
@@ -1822,9 +1808,6 @@ struct VP8_COMP* vp8_create_compressor(VP8_CONFIG *oxcf)
else
cpi->cyclic_refresh_map = (signed char *) NULL;
- // Test function for segmentation
- //segmentation_test_function( cpi);
-
#ifdef ENTROPY_STATS
init_context_counters();
#endif
@@ -1832,7 +1815,8 @@ struct VP8_COMP* vp8_create_compressor(VP8_CONFIG *oxcf)
/*Initialize the feed-forward activity masking.*/
cpi->activity_avg = 90<<12;
- cpi->frames_since_key = 8; // Give a sensible default for the first frame.
+ /* Give a sensible default for the first frame. */
+ cpi->frames_since_key = 8;
cpi->key_frame_frequency = cpi->oxcf.key_freq;
cpi->this_key_frame_forced = 0;
cpi->next_key_frame_forced = 0;
@@ -1875,10 +1859,7 @@ struct VP8_COMP* vp8_create_compressor(VP8_CONFIG *oxcf)
#endif
-#ifndef LLONG_MAX
-#define LLONG_MAX 9223372036854775807LL
-#endif
- cpi->first_time_stamp_ever = LLONG_MAX;
+ cpi->first_time_stamp_ever = 0x7FFFFFFF;
cpi->frames_till_gf_update_due = 0;
cpi->key_frame_count = 1;
@@ -1933,14 +1914,13 @@ struct VP8_COMP* vp8_create_compressor(VP8_CONFIG *oxcf)
if (cpi->compressor_speed == 2)
{
- cpi->cpu_freq = 0; //vp8_get_processor_freq();
cpi->avg_encode_time = 0;
cpi->avg_pick_mode_time = 0;
}
vp8_set_speed_features(cpi);
- // Set starting values of RD threshold multipliers (128 = *1)
+ /* Set starting values of RD threshold multipliers (128 = *1) */
for (i = 0; i < MAX_MODES; i++)
{
cpi->rd_thresh_mult[i] = 128;
@@ -2016,11 +1996,14 @@ struct VP8_COMP* vp8_create_compressor(VP8_CONFIG *oxcf)
cpi->diamond_search_sad = vp8_diamond_search_sad;
cpi->refining_search_sad = vp8_refining_search_sad;
- // make sure frame 1 is okay
+ /* make sure frame 1 is okay */
cpi->error_bins[0] = cpi->common.MBs;
- //vp8cx_init_quantizer() is first called here. Add check in vp8cx_frame_init_quantizer() so that vp8cx_init_quantizer is only called later
- //when needed. This will avoid unnecessary calls of vp8cx_init_quantizer() for every frame.
+ /* vp8cx_init_quantizer() is first called here. Add check in
+ * vp8cx_frame_init_quantizer() so that vp8cx_init_quantizer is only
+ * called later when needed. This will avoid unnecessary calls of
+ * vp8cx_init_quantizer() for every frame.
+ */
vp8cx_init_quantizer(cpi);
vp8_loop_filter_init(cm);
@@ -2205,7 +2188,6 @@ void vp8_remove_compressor(VP8_COMP **ptr)
fprintf(f, "%5d", frames_at_speed[i]);
fprintf(f, "\n");
- //fprintf(f, "%10d PM %10d %10d %10d EF %10d %10d %10d\n", cpi->Speed, cpi->avg_pick_mode_time, (tot_pm/cnt_pm), cnt_pm, cpi->avg_encode_time, 0, 0);
fclose(f);
}
@@ -2267,7 +2249,7 @@ void vp8_remove_compressor(VP8_COMP **ptr)
for (i = 0; i < 10; i++)
{
- fprintf(fmode, " { //Above Mode : %d\n", i);
+ fprintf(fmode, " { /* Above Mode : %d */\n", i);
for (j = 0; j < 10; j++)
{
@@ -2282,7 +2264,7 @@ void vp8_remove_compressor(VP8_COMP **ptr)
fprintf(fmode, " %5d, ", intra_mode_stats[i][j][k]);
}
- fprintf(fmode, "}, // left_mode %d\n", j);
+ fprintf(fmode, "}, /* left_mode %d */\n", j);
}
@@ -2584,7 +2566,7 @@ static void scale_and_extend_source(YV12_BUFFER_CONFIG *sd, VP8_COMP *cpi)
{
VP8_COMMON *cm = &cpi->common;
- // are we resizing the image
+ /* are we resizing the image */
if (cm->horiz_scale != 0 || cm->vert_scale != 0)
{
#if CONFIG_SPATIAL_RESAMPLING
@@ -2612,51 +2594,57 @@ static void scale_and_extend_source(YV12_BUFFER_CONFIG *sd, VP8_COMP *cpi)
}
-static void resize_key_frame(VP8_COMP *cpi)
+static int resize_key_frame(VP8_COMP *cpi)
{
#if CONFIG_SPATIAL_RESAMPLING
VP8_COMMON *cm = &cpi->common;
- // Do we need to apply resampling for one pass cbr.
- // In one pass this is more limited than in two pass cbr
- // The test and any change is only made one per key frame sequence
+ /* Do we need to apply resampling for one pass cbr.
+ * In one pass this is more limited than in two pass cbr
+ * The test and any change is only made one per key frame sequence
+ */
if (cpi->oxcf.allow_spatial_resampling && (cpi->oxcf.end_usage == USAGE_STREAM_FROM_SERVER))
{
int UNINITIALIZED_IS_SAFE(hr), UNINITIALIZED_IS_SAFE(hs);
int UNINITIALIZED_IS_SAFE(vr), UNINITIALIZED_IS_SAFE(vs);
int new_width, new_height;
- // If we are below the resample DOWN watermark then scale down a notch.
+ /* If we are below the resample DOWN watermark then scale down a
+ * notch.
+ */
if (cpi->buffer_level < (cpi->oxcf.resample_down_water_mark * cpi->oxcf.optimal_buffer_level / 100))
{
cm->horiz_scale = (cm->horiz_scale < ONETWO) ? cm->horiz_scale + 1 : ONETWO;
cm->vert_scale = (cm->vert_scale < ONETWO) ? cm->vert_scale + 1 : ONETWO;
}
- // Should we now start scaling back up
+ /* Should we now start scaling back up */
else if (cpi->buffer_level > (cpi->oxcf.resample_up_water_mark * cpi->oxcf.optimal_buffer_level / 100))
{
cm->horiz_scale = (cm->horiz_scale > NORMAL) ? cm->horiz_scale - 1 : NORMAL;
cm->vert_scale = (cm->vert_scale > NORMAL) ? cm->vert_scale - 1 : NORMAL;
}
- // Get the new hieght and width
+ /* Get the new hieght and width */
Scale2Ratio(cm->horiz_scale, &hr, &hs);
Scale2Ratio(cm->vert_scale, &vr, &vs);
new_width = ((hs - 1) + (cpi->oxcf.Width * hr)) / hs;
new_height = ((vs - 1) + (cpi->oxcf.Height * vr)) / vs;
- // If the image size has changed we need to reallocate the buffers
- // and resample the source image
+ /* If the image size has changed we need to reallocate the buffers
+ * and resample the source image
+ */
if ((cm->Width != new_width) || (cm->Height != new_height))
{
cm->Width = new_width;
cm->Height = new_height;
vp8_alloc_compressor_data(cpi);
scale_and_extend_source(cpi->un_scaled_source, cpi);
+ return 1;
}
}
#endif
+ return 0;
}
@@ -2664,7 +2652,7 @@ static void update_alt_ref_frame_stats(VP8_COMP *cpi)
{
VP8_COMMON *cm = &cpi->common;
- // Select an interval before next GF or altref
+ /* Select an interval before next GF or altref */
if (!cpi->auto_gold)
cpi->frames_till_gf_update_due = cpi->goldfreq;
@@ -2672,26 +2660,27 @@ static void update_alt_ref_frame_stats(VP8_COMP *cpi)
{
cpi->current_gf_interval = cpi->frames_till_gf_update_due;
- // Set the bits per frame that we should try and recover in subsequent inter frames
- // to account for the extra GF spend... note that his does not apply for GF updates
- // that occur coincident with a key frame as the extra cost of key frames is dealt
- // with elsewhere.
-
+ /* Set the bits per frame that we should try and recover in
+ * subsequent inter frames to account for the extra GF spend...
+ * note that his does not apply for GF updates that occur
+ * coincident with a key frame as the extra cost of key frames is
+ * dealt with elsewhere.
+ */
cpi->gf_overspend_bits += cpi->projected_frame_size;
cpi->non_gf_bitrate_adjustment = cpi->gf_overspend_bits / cpi->frames_till_gf_update_due;
}
- // Update data structure that monitors level of reference to last GF
+ /* Update data structure that monitors level of reference to last GF */
vpx_memset(cpi->gf_active_flags, 1, (cm->mb_rows * cm->mb_cols));
cpi->gf_active_count = cm->mb_rows * cm->mb_cols;
- // this frame refreshes means next frames don't unless specified by user
+ /* this frame refreshes means next frames don't unless specified by user */
cpi->common.frames_since_golden = 0;
- // Clear the alternate reference update pending flag.
+ /* Clear the alternate reference update pending flag. */
cpi->source_alt_ref_pending = 0;
- // Set the alternate refernce frame active flag
+ /* Set the alternate refernce frame active flag */
cpi->source_alt_ref_active = 1;
@@ -2700,10 +2689,10 @@ static void update_golden_frame_stats(VP8_COMP *cpi)
{
VP8_COMMON *cm = &cpi->common;
- // Update the Golden frame usage counts.
+ /* Update the Golden frame usage counts. */
if (cm->refresh_golden_frame)
{
- // Select an interval before next GF
+ /* Select an interval before next GF */
if (!cpi->auto_gold)
cpi->frames_till_gf_update_due = cpi->goldfreq;
@@ -2711,14 +2700,18 @@ static void update_golden_frame_stats(VP8_COMP *cpi)
{
cpi->current_gf_interval = cpi->frames_till_gf_update_due;
- // Set the bits per frame that we should try and recover in subsequent inter frames
- // to account for the extra GF spend... note that his does not apply for GF updates
- // that occur coincident with a key frame as the extra cost of key frames is dealt
- // with elsewhere.
+ /* Set the bits per frame that we should try and recover in
+ * subsequent inter frames to account for the extra GF spend...
+ * note that his does not apply for GF updates that occur
+ * coincident with a key frame as the extra cost of key frames
+ * is dealt with elsewhere.
+ */
if ((cm->frame_type != KEY_FRAME) && !cpi->source_alt_ref_active)
{
- // Calcluate GF bits to be recovered
- // Projected size - av frame bits available for inter frames for clip as a whole
+ /* Calcluate GF bits to be recovered
+ * Projected size - av frame bits available for inter
+ * frames for clip as a whole
+ */
cpi->gf_overspend_bits += (cpi->projected_frame_size - cpi->inter_frame_target);
}
@@ -2726,32 +2719,25 @@ static void update_golden_frame_stats(VP8_COMP *cpi)
}
- // Update data structure that monitors level of reference to last GF
+ /* Update data structure that monitors level of reference to last GF */
vpx_memset(cpi->gf_active_flags, 1, (cm->mb_rows * cm->mb_cols));
cpi->gf_active_count = cm->mb_rows * cm->mb_cols;
- // this frame refreshes means next frames don't unless specified by user
+ /* this frame refreshes means next frames don't unless specified by
+ * user
+ */
cm->refresh_golden_frame = 0;
cpi->common.frames_since_golden = 0;
- //if ( cm->frame_type == KEY_FRAME )
- //{
cpi->recent_ref_frame_usage[INTRA_FRAME] = 1;
cpi->recent_ref_frame_usage[LAST_FRAME] = 1;
cpi->recent_ref_frame_usage[GOLDEN_FRAME] = 1;
cpi->recent_ref_frame_usage[ALTREF_FRAME] = 1;
- //}
- //else
- //{
- // // Carry a potrtion of count over to begining of next gf sequence
- // cpi->recent_ref_frame_usage[INTRA_FRAME] >>= 5;
- // cpi->recent_ref_frame_usage[LAST_FRAME] >>= 5;
- // cpi->recent_ref_frame_usage[GOLDEN_FRAME] >>= 5;
- // cpi->recent_ref_frame_usage[ALTREF_FRAME] >>= 5;
- //}
-
- // ******** Fixed Q test code only ************
- // If we are going to use the ALT reference for the next group of frames set a flag to say so.
+
+ /* ******** Fixed Q test code only ************ */
+ /* If we are going to use the ALT reference for the next group of
+ * frames set a flag to say so.
+ */
if (cpi->oxcf.fixed_q >= 0 &&
cpi->oxcf.play_alternate && !cpi->common.refresh_alt_ref_frame)
{
@@ -2762,14 +2748,14 @@ static void update_golden_frame_stats(VP8_COMP *cpi)
if (!cpi->source_alt_ref_pending)
cpi->source_alt_ref_active = 0;
- // Decrement count down till next gf
+ /* Decrement count down till next gf */
if (cpi->frames_till_gf_update_due > 0)
cpi->frames_till_gf_update_due--;
}
else if (!cpi->common.refresh_alt_ref_frame)
{
- // Decrement count down till next gf
+ /* Decrement count down till next gf */
if (cpi->frames_till_gf_update_due > 0)
cpi->frames_till_gf_update_due--;
@@ -2788,8 +2774,9 @@ static void update_golden_frame_stats(VP8_COMP *cpi)
}
}
-// This function updates the reference frame probability estimates that
-// will be used during mode selection
+/* This function updates the reference frame probability estimates that
+ * will be used during mode selection
+ */
static void update_rd_ref_frame_probs(VP8_COMP *cpi)
{
VP8_COMMON *cm = &cpi->common;
@@ -2811,7 +2798,9 @@ static void update_rd_ref_frame_probs(VP8_COMP *cpi)
cpi->prob_gf_coded = 128;
}
- // update reference frame costs since we can do better than what we got last frame.
+ /* update reference frame costs since we can do better than what we got
+ * last frame.
+ */
if (cpi->oxcf.number_of_layers == 1)
{
if (cpi->common.refresh_alt_ref_frame)
@@ -2842,7 +2831,7 @@ static void update_rd_ref_frame_probs(VP8_COMP *cpi)
}
-// 1 = key, 0 = inter
+/* 1 = key, 0 = inter */
static int decide_key_frame(VP8_COMP *cpi)
{
VP8_COMMON *cm = &cpi->common;
@@ -2854,8 +2843,8 @@ static int decide_key_frame(VP8_COMP *cpi)
if (cpi->Speed > 11)
return 0;
- // Clear down mmx registers
- vp8_clear_system_state(); //__asm emms;
+ /* Clear down mmx registers */
+ vp8_clear_system_state();
if ((cpi->compressor_speed == 2) && (cpi->Speed >= 5) && (cpi->sf.RD == 0))
{
@@ -2901,7 +2890,7 @@ static int decide_key_frame(VP8_COMP *cpi)
}
- // If the following are true we might as well code a key frame
+ /* If the following are true we might as well code a key frame */
if (((cpi->this_frame_percent_intra == 100) &&
(cpi->this_frame_percent_intra > (cpi->last_frame_percent_intra + 2))) ||
((cpi->this_frame_percent_intra > 95) &&
@@ -2909,9 +2898,12 @@ static int decide_key_frame(VP8_COMP *cpi)
{
code_key_frame = 1;
}
- // in addition if the following are true and this is not a golden frame then code a key frame
- // Note that on golden frames there often seems to be a pop in intra useage anyway hence this
- // restriction is designed to prevent spurious key frames. The Intra pop needs to be investigated.
+ /* in addition if the following are true and this is not a golden frame
+ * then code a key frame Note that on golden frames there often seems
+ * to be a pop in intra useage anyway hence this restriction is
+ * designed to prevent spurious key frames. The Intra pop needs to be
+ * investigated.
+ */
else if (((cpi->this_frame_percent_intra > 60) &&
(cpi->this_frame_percent_intra > (cpi->last_frame_percent_intra * 2))) ||
((cpi->this_frame_percent_intra > 75) &&
@@ -2943,7 +2935,7 @@ static void Pass1Encode(VP8_COMP *cpi, unsigned long *size, unsigned char *dest,
void write_cx_frame_to_file(YV12_BUFFER_CONFIG *frame, int this_frame)
{
- // write the frame
+ /* write the frame */
FILE *yframe;
int i;
char filename[255];
@@ -2971,10 +2963,11 @@ void write_cx_frame_to_file(YV12_BUFFER_CONFIG *frame, int this_frame)
fclose(yframe);
}
#endif
-// return of 0 means drop frame
+/* return of 0 means drop frame */
-// Function to test for conditions that indeicate we should loop
-// back and recode a frame.
+/* Function to test for conditions that indeicate we should loop
+ * back and recode a frame.
+ */
static int recode_loop_test( VP8_COMP *cpi,
int high_limit, int low_limit,
int q, int maxq, int minq )
@@ -2982,32 +2975,33 @@ static int recode_loop_test( VP8_COMP *cpi,
int force_recode = 0;
VP8_COMMON *cm = &cpi->common;
- // Is frame recode allowed at all
- // Yes if either recode mode 1 is selected or mode two is selcted
- // and the frame is a key frame. golden frame or alt_ref_frame
+ /* Is frame recode allowed at all
+ * Yes if either recode mode 1 is selected or mode two is selcted
+ * and the frame is a key frame. golden frame or alt_ref_frame
+ */
if ( (cpi->sf.recode_loop == 1) ||
( (cpi->sf.recode_loop == 2) &&
( (cm->frame_type == KEY_FRAME) ||
cm->refresh_golden_frame ||
cm->refresh_alt_ref_frame ) ) )
{
- // General over and under shoot tests
+ /* General over and under shoot tests */
if ( ((cpi->projected_frame_size > high_limit) && (q < maxq)) ||
((cpi->projected_frame_size < low_limit) && (q > minq)) )
{
force_recode = 1;
}
- // Special Constrained quality tests
+ /* Special Constrained quality tests */
else if (cpi->oxcf.end_usage == USAGE_CONSTRAINED_QUALITY)
{
- // Undershoot and below auto cq level
+ /* Undershoot and below auto cq level */
if ( (q > cpi->cq_target_quality) &&
(cpi->projected_frame_size <
((cpi->this_frame_target * 7) >> 3)))
{
force_recode = 1;
}
- // Severe undershoot and between auto and user cq level
+ /* Severe undershoot and between auto and user cq level */
else if ( (q > cpi->oxcf.cq_level) &&
(cpi->projected_frame_size < cpi->min_frame_bandwidth) &&
(cpi->active_best_quality > cpi->oxcf.cq_level))
@@ -3025,8 +3019,9 @@ static void update_reference_frames(VP8_COMMON *cm)
{
YV12_BUFFER_CONFIG *yv12_fb = cm->yv12_fb;
- // At this point the new frame has been encoded.
- // If any buffer copy / swapping is signaled it should be done here.
+ /* At this point the new frame has been encoded.
+ * If any buffer copy / swapping is signaled it should be done here.
+ */
if (cm->frame_type == KEY_FRAME)
{
@@ -3237,7 +3232,7 @@ static void encode_frame_to_data_rate
int drop_mark25 = drop_mark / 8;
- // Clear down mmx registers to allow floating point in what follows
+ /* Clear down mmx registers to allow floating point in what follows */
vp8_clear_system_state();
#if CONFIG_MULTITHREAD
@@ -3249,41 +3244,43 @@ static void encode_frame_to_data_rate
}
#endif
- // Test code for segmentation of gf/arf (0,0)
- //segmentation_test_function( cpi);
-
if(cpi->force_next_frame_intra)
{
cm->frame_type = KEY_FRAME; /* delayed intra frame */
cpi->force_next_frame_intra = 0;
}
- // For an alt ref frame in 2 pass we skip the call to the second pass function that sets the target bandwidth
+ /* For an alt ref frame in 2 pass we skip the call to the second pass
+ * function that sets the target bandwidth
+ */
#if !(CONFIG_REALTIME_ONLY)
if (cpi->pass == 2)
{
if (cpi->common.refresh_alt_ref_frame)
{
- cpi->per_frame_bandwidth = cpi->twopass.gf_bits; // Per frame bit target for the alt ref frame
- cpi->target_bandwidth = cpi->twopass.gf_bits * cpi->output_frame_rate; // per second target bitrate
+ /* Per frame bit target for the alt ref frame */
+ cpi->per_frame_bandwidth = cpi->twopass.gf_bits;
+ /* per second target bitrate */
+ cpi->target_bandwidth = cpi->twopass.gf_bits * cpi->output_frame_rate;
}
}
else
#endif
cpi->per_frame_bandwidth = (int)(cpi->target_bandwidth / cpi->output_frame_rate);
- // Default turn off buffer to buffer copying
+ /* Default turn off buffer to buffer copying */
cm->copy_buffer_to_gf = 0;
cm->copy_buffer_to_arf = 0;
- // Clear zbin over-quant value and mode boost values.
+ /* Clear zbin over-quant value and mode boost values. */
cpi->zbin_over_quant = 0;
cpi->zbin_mode_boost = 0;
- // Enable or disable mode based tweaking of the zbin
- // For 2 Pass Only used where GF/ARF prediction quality
- // is above a threshold
+ /* Enable or disable mode based tweaking of the zbin
+ * For 2 Pass Only used where GF/ARF prediction quality
+ * is above a threshold
+ */
cpi->zbin_mode_boost_enabled = 1;
if (cpi->pass == 2)
{
@@ -3293,19 +3290,21 @@ static void encode_frame_to_data_rate
}
}
- // Current default encoder behaviour for the altref sign bias
+ /* Current default encoder behaviour for the altref sign bias */
if (cpi->source_alt_ref_active)
cpi->common.ref_frame_sign_bias[ALTREF_FRAME] = 1;
else
cpi->common.ref_frame_sign_bias[ALTREF_FRAME] = 0;
- // Check to see if a key frame is signalled
- // For two pass with auto key frame enabled cm->frame_type may already be set, but not for one pass.
+ /* Check to see if a key frame is signalled
+ * For two pass with auto key frame enabled cm->frame_type may already
+ * be set, but not for one pass.
+ */
if ((cm->current_video_frame == 0) ||
(cm->frame_flags & FRAMEFLAGS_KEY) ||
(cpi->oxcf.auto_key && (cpi->frames_since_key % cpi->key_frame_frequency == 0)))
{
- // Key frame from VFW/auto-keyframe/first frame
+ /* Key frame from VFW/auto-keyframe/first frame */
cm->frame_type = KEY_FRAME;
}
@@ -3320,48 +3319,29 @@ static void encode_frame_to_data_rate
}
#endif
- // Set default state for segment and mode based loop filter update flags
- cpi->mb.e_mbd.update_mb_segmentation_map = 0;
- cpi->mb.e_mbd.update_mb_segmentation_data = 0;
- cpi->mb.e_mbd.mode_ref_lf_delta_update = 0;
-
- // Set various flags etc to special state if it is a key frame
+ /* Set various flags etc to special state if it is a key frame */
if (cm->frame_type == KEY_FRAME)
{
int i;
- // Reset the loop filter deltas and segmentation map
+ // Set the loop filter deltas and segmentation map update
setup_features(cpi);
- // If segmentation is enabled force a map update for key frames
- if (cpi->mb.e_mbd.segmentation_enabled)
- {
- cpi->mb.e_mbd.update_mb_segmentation_map = 1;
- cpi->mb.e_mbd.update_mb_segmentation_data = 1;
- }
-
- // The alternate reference frame cannot be active for a key frame
+ /* The alternate reference frame cannot be active for a key frame */
cpi->source_alt_ref_active = 0;
- // Reset the RD threshold multipliers to default of * 1 (128)
+ /* Reset the RD threshold multipliers to default of * 1 (128) */
for (i = 0; i < MAX_MODES; i++)
{
cpi->rd_thresh_mult[i] = 128;
}
}
- // Test code for segmentation
- //if ( (cm->frame_type == KEY_FRAME) || ((cm->current_video_frame % 2) == 0))
- //if ( (cm->current_video_frame % 2) == 0 )
- // enable_segmentation(cpi);
- //else
- // disable_segmentation(cpi);
-
#if 0
- // Experimental code for lagged compress and one pass
- // Initialise one_pass GF frames stats
- // Update stats used for GF selection
- //if ( cpi->pass == 0 )
+ /* Experimental code for lagged compress and one pass
+ * Initialise one_pass GF frames stats
+ * Update stats used for GF selection
+ */
{
cpi->one_pass_frame_index = cm->current_video_frame % MAX_LAG_BUFFERS;
@@ -3381,8 +3361,9 @@ static void encode_frame_to_data_rate
if (cpi->drop_frames_allowed)
{
- // The reset to decimation 0 is only done here for one pass.
- // Once it is set two pass leaves decimation on till the next kf.
+ /* The reset to decimation 0 is only done here for one pass.
+ * Once it is set two pass leaves decimation on till the next kf.
+ */
if ((cpi->buffer_level > drop_mark) && (cpi->decimation_factor > 0))
cpi->decimation_factor --;
@@ -3401,14 +3382,17 @@ static void encode_frame_to_data_rate
{
cpi->decimation_factor = 1;
}
- //vpx_log("Encoder: Decimation Factor: %d \n",cpi->decimation_factor);
}
- // The following decimates the frame rate according to a regular pattern (i.e. to 1/2 or 2/3 frame rate)
- // This can be used to help prevent buffer under-run in CBR mode. Alternatively it might be desirable in
- // some situations to drop frame rate but throw more bits at each frame.
- //
- // Note that dropping a key frame can be problematic if spatial resampling is also active
+ /* The following decimates the frame rate according to a regular
+ * pattern (i.e. to 1/2 or 2/3 frame rate) This can be used to help
+ * prevent buffer under-run in CBR mode. Alternatively it might be
+ * desirable in some situations to drop frame rate but throw more bits
+ * at each frame.
+ *
+ * Note that dropping a key frame can be problematic if spatial
+ * resampling is also active
+ */
if (cpi->decimation_factor > 0)
{
switch (cpi->decimation_factor)
@@ -3424,8 +3408,10 @@ static void encode_frame_to_data_rate
break;
}
- // Note that we should not throw out a key frame (especially when spatial resampling is enabled).
- if ((cm->frame_type == KEY_FRAME)) // && cpi->oxcf.allow_spatial_resampling )
+ /* Note that we should not throw out a key frame (especially when
+ * spatial resampling is enabled).
+ */
+ if ((cm->frame_type == KEY_FRAME))
{
cpi->decimation_count = cpi->decimation_factor;
}
@@ -3450,7 +3436,9 @@ static void encode_frame_to_data_rate
{
unsigned int i;
- // Propagate bits saved by dropping the frame to higher layers
+ /* Propagate bits saved by dropping the frame to higher
+ * layers
+ */
for (i=cpi->current_layer+1; i<cpi->oxcf.number_of_layers; i++)
{
LAYER_CONTEXT *lc = &cpi->layer_context[i];
@@ -3469,7 +3457,7 @@ static void encode_frame_to_data_rate
else
cpi->decimation_count = 0;
- // Decide how big to make the frame
+ /* Decide how big to make the frame */
if (!vp8_pick_frame_size(cpi))
{
cm->current_video_frame++;
@@ -3477,15 +3465,17 @@ static void encode_frame_to_data_rate
return;
}
- // Reduce active_worst_allowed_q for CBR if our buffer is getting too full.
- // This has a knock on effect on active best quality as well.
- // For CBR if the buffer reaches its maximum level then we can no longer
- // save up bits for later frames so we might as well use them up
- // on the current frame.
+ /* Reduce active_worst_allowed_q for CBR if our buffer is getting too full.
+ * This has a knock on effect on active best quality as well.
+ * For CBR if the buffer reaches its maximum level then we can no longer
+ * save up bits for later frames so we might as well use them up
+ * on the current frame.
+ */
if ((cpi->oxcf.end_usage == USAGE_STREAM_FROM_SERVER) &&
(cpi->buffer_level >= cpi->oxcf.optimal_buffer_level) && cpi->buffered_mode)
{
- int Adjustment = cpi->active_worst_quality / 4; // Max adjustment is 1/4
+ /* Max adjustment is 1/4 */
+ int Adjustment = cpi->active_worst_quality / 4;
if (Adjustment)
{
@@ -3508,8 +3498,9 @@ static void encode_frame_to_data_rate
}
}
- // Set an active best quality and if necessary active worst quality
- // There is some odd behavior for one pass here that needs attention.
+ /* Set an active best quality and if necessary active worst quality
+ * There is some odd behavior for one pass here that needs attention.
+ */
if ( (cpi->pass == 2) || (cpi->ni_frames > 150))
{
vp8_clear_system_state();
@@ -3525,9 +3516,10 @@ static void encode_frame_to_data_rate
else
cpi->active_best_quality = kf_high_motion_minq[Q];
- // Special case for key frames forced because we have reached
- // the maximum key frame interval. Here force the Q to a range
- // based on the ambient Q to reduce the risk of popping
+ /* Special case for key frames forced because we have reached
+ * the maximum key frame interval. Here force the Q to a range
+ * based on the ambient Q to reduce the risk of popping
+ */
if ( cpi->this_key_frame_forced )
{
if ( cpi->active_best_quality > cpi->avg_frame_qindex * 7/8)
@@ -3536,7 +3528,7 @@ static void encode_frame_to_data_rate
cpi->active_best_quality = cpi->avg_frame_qindex >> 2;
}
}
- // One pass more conservative
+ /* One pass more conservative */
else
cpi->active_best_quality = kf_high_motion_minq[Q];
}
@@ -3544,16 +3536,17 @@ static void encode_frame_to_data_rate
else if (cpi->oxcf.number_of_layers==1 &&
(cm->refresh_golden_frame || cpi->common.refresh_alt_ref_frame))
{
- // Use the lower of cpi->active_worst_quality and recent
- // average Q as basis for GF/ARF Q limit unless last frame was
- // a key frame.
+ /* Use the lower of cpi->active_worst_quality and recent
+ * average Q as basis for GF/ARF Q limit unless last frame was
+ * a key frame.
+ */
if ( (cpi->frames_since_key > 1) &&
(cpi->avg_frame_qindex < cpi->active_worst_quality) )
{
Q = cpi->avg_frame_qindex;
}
- // For constrained quality dont allow Q less than the cq level
+ /* For constrained quality dont allow Q less than the cq level */
if ( (cpi->oxcf.end_usage == USAGE_CONSTRAINED_QUALITY) &&
(Q < cpi->cq_target_quality) )
{
@@ -3569,14 +3562,14 @@ static void encode_frame_to_data_rate
else
cpi->active_best_quality = gf_mid_motion_minq[Q];
- // Constrained quality use slightly lower active best.
+ /* Constrained quality use slightly lower active best. */
if ( cpi->oxcf.end_usage == USAGE_CONSTRAINED_QUALITY )
{
cpi->active_best_quality =
cpi->active_best_quality * 15/16;
}
}
- // One pass more conservative
+ /* One pass more conservative */
else
cpi->active_best_quality = gf_high_motion_minq[Q];
}
@@ -3584,14 +3577,16 @@ static void encode_frame_to_data_rate
{
cpi->active_best_quality = inter_minq[Q];
- // For the constant/constrained quality mode we dont want
- // q to fall below the cq level.
+ /* For the constant/constrained quality mode we dont want
+ * q to fall below the cq level.
+ */
if ((cpi->oxcf.end_usage == USAGE_CONSTRAINED_QUALITY) &&
(cpi->active_best_quality < cpi->cq_target_quality) )
{
- // If we are strongly undershooting the target rate in the last
- // frames then use the user passed in cq value not the auto
- // cq value.
+ /* If we are strongly undershooting the target rate in the last
+ * frames then use the user passed in cq value not the auto
+ * cq value.
+ */
if ( cpi->rolling_actual_bits < cpi->min_frame_bandwidth )
cpi->active_best_quality = cpi->oxcf.cq_level;
else
@@ -3599,12 +3594,14 @@ static void encode_frame_to_data_rate
}
}
- // If CBR and the buffer is as full then it is reasonable to allow
- // higher quality on the frames to prevent bits just going to waste.
+ /* If CBR and the buffer is as full then it is reasonable to allow
+ * higher quality on the frames to prevent bits just going to waste.
+ */
if (cpi->oxcf.end_usage == USAGE_STREAM_FROM_SERVER)
{
- // Note that the use of >= here elliminates the risk of a devide
- // by 0 error in the else if clause
+ /* Note that the use of >= here elliminates the risk of a devide
+ * by 0 error in the else if clause
+ */
if (cpi->buffer_level >= cpi->oxcf.maximum_buffer_size)
cpi->active_best_quality = cpi->best_quality;
@@ -3617,8 +3614,9 @@ static void encode_frame_to_data_rate
}
}
}
- // Make sure constrained quality mode limits are adhered to for the first
- // few frames of one pass encodes
+ /* Make sure constrained quality mode limits are adhered to for the first
+ * few frames of one pass encodes
+ */
else if (cpi->oxcf.end_usage == USAGE_CONSTRAINED_QUALITY)
{
if ( (cm->frame_type == KEY_FRAME) ||
@@ -3632,7 +3630,7 @@ static void encode_frame_to_data_rate
}
}
- // Clip the active best and worst quality values to limits
+ /* Clip the active best and worst quality values to limits */
if (cpi->active_worst_quality > cpi->worst_quality)
cpi->active_worst_quality = cpi->worst_quality;
@@ -3642,14 +3640,14 @@ static void encode_frame_to_data_rate
if ( cpi->active_worst_quality < cpi->active_best_quality )
cpi->active_worst_quality = cpi->active_best_quality;
- // Determine initial Q to try
+ /* Determine initial Q to try */
Q = vp8_regulate_q(cpi, cpi->this_frame_target);
#if !(CONFIG_REALTIME_ONLY)
- // Set highest allowed value for Zbin over quant
+ /* Set highest allowed value for Zbin over quant */
if (cm->frame_type == KEY_FRAME)
- zbin_oq_high = 0; //ZBIN_OQ_MAX/16
+ zbin_oq_high = 0;
else if ((cpi->oxcf.number_of_layers == 1) && ((cm->refresh_alt_ref_frame ||
(cm->refresh_golden_frame && !cpi->source_alt_ref_active))))
{
@@ -3659,15 +3657,16 @@ static void encode_frame_to_data_rate
zbin_oq_high = ZBIN_OQ_MAX;
#endif
- // Setup background Q adjustment for error resilient mode.
- // For multi-layer encodes only enable this for the base layer.
+ /* Setup background Q adjustment for error resilient mode.
+ * For multi-layer encodes only enable this for the base layer.
+ */
if (cpi->cyclic_refresh_mode_enabled && (cpi->current_layer==0))
cyclic_background_refresh(cpi, Q, 0);
vp8_compute_frame_size_bounds(cpi, &frame_under_shoot_limit, &frame_over_shoot_limit);
#if !(CONFIG_REALTIME_ONLY)
- // Limit Q range for the adaptive loop.
+ /* Limit Q range for the adaptive loop. */
bottom_index = cpi->active_best_quality;
top_index = cpi->active_worst_quality;
q_low = cpi->active_best_quality;
@@ -3735,16 +3734,11 @@ static void encode_frame_to_data_rate
do
{
- vp8_clear_system_state(); //__asm emms;
-
- /*
- if(cpi->is_src_frame_alt_ref)
- Q = 127;
- */
+ vp8_clear_system_state();
vp8_set_quantizer(cpi, Q);
- // setup skip prob for costing in mode/mv decision
+ /* setup skip prob for costing in mode/mv decision */
if (cpi->common.mb_no_coeff_skip)
{
cpi->prob_skip_false = cpi->base_skip_false_prob[Q];
@@ -3788,7 +3782,9 @@ static void encode_frame_to_data_rate
*/
}
- //as this is for cost estimate, let's make sure it does not go extreme eitehr way
+ /* as this is for cost estimate, let's make sure it does not
+ * go extreme eitehr way
+ */
if (cpi->prob_skip_false < 5)
cpi->prob_skip_false = 5;
@@ -3814,7 +3810,17 @@ static void encode_frame_to_data_rate
if (cm->frame_type == KEY_FRAME)
{
- resize_key_frame(cpi);
+ if(resize_key_frame(cpi))
+ {
+ /* If the frame size has changed, need to reset Q, quantizer,
+ * and background refresh.
+ */
+ Q = vp8_regulate_q(cpi, cpi->this_frame_target);
+ if (cpi->cyclic_refresh_mode_enabled && (cpi->current_layer==0))
+ cyclic_background_refresh(cpi, Q, 0);
+ vp8_set_quantizer(cpi, Q);
+ }
+
vp8_setup_key_frame(cpi);
}
@@ -3833,7 +3839,7 @@ static void encode_frame_to_data_rate
if (cm->refresh_entropy_probs == 0)
{
- // save a copy for later refresh
+ /* save a copy for later refresh */
vpx_memcpy(&cm->lfc, &cm->fc, sizeof(cm->fc));
}
@@ -3841,23 +3847,25 @@ static void encode_frame_to_data_rate
vp8_update_coef_probs(cpi);
- // transform / motion compensation build reconstruction frame
- // +pack coef partitions
+ /* transform / motion compensation build reconstruction frame
+ * +pack coef partitions
+ */
vp8_encode_frame(cpi);
/* cpi->projected_frame_size is not needed for RT mode */
}
#else
- // transform / motion compensation build reconstruction frame
+ /* transform / motion compensation build reconstruction frame */
vp8_encode_frame(cpi);
cpi->projected_frame_size -= vp8_estimate_entropy_savings(cpi);
cpi->projected_frame_size = (cpi->projected_frame_size > 0) ? cpi->projected_frame_size : 0;
#endif
- vp8_clear_system_state(); //__asm emms;
+ vp8_clear_system_state();
- // Test to see if the stats generated for this frame indicate that we should have coded a key frame
- // (assuming that we didn't)!
+ /* Test to see if the stats generated for this frame indicate that
+ * we should have coded a key frame (assuming that we didn't)!
+ */
if (cpi->pass != 2 && cpi->oxcf.auto_key && cm->frame_type != KEY_FRAME)
{
int key_frame_decision = decide_key_frame(cpi);
@@ -3871,31 +3879,26 @@ static void encode_frame_to_data_rate
#if !(CONFIG_REALTIME_ONLY)
else if (key_frame_decision)
{
- // Reset all our sizing numbers and recode
+ /* Reset all our sizing numbers and recode */
cm->frame_type = KEY_FRAME;
vp8_pick_frame_size(cpi);
- // Clear the Alt reference frame active flag when we have a key frame
+ /* Clear the Alt reference frame active flag when we have
+ * a key frame
+ */
cpi->source_alt_ref_active = 0;
- // Reset the loop filter deltas and segmentation map
+ // Set the loop filter deltas and segmentation map update
setup_features(cpi);
- // If segmentation is enabled force a map update for key frames
- if (cpi->mb.e_mbd.segmentation_enabled)
- {
- cpi->mb.e_mbd.update_mb_segmentation_map = 1;
- cpi->mb.e_mbd.update_mb_segmentation_data = 1;
- }
-
vp8_restore_coding_context(cpi);
Q = vp8_regulate_q(cpi, cpi->this_frame_target);
vp8_compute_frame_size_bounds(cpi, &frame_under_shoot_limit, &frame_over_shoot_limit);
- // Limit Q range for the adaptive loop.
+ /* Limit Q range for the adaptive loop. */
bottom_index = cpi->active_best_quality;
top_index = cpi->active_worst_quality;
q_low = cpi->active_best_quality;
@@ -3914,7 +3917,7 @@ static void encode_frame_to_data_rate
if (frame_over_shoot_limit == 0)
frame_over_shoot_limit = 1;
- // Are we are overshooting and up against the limit of active max Q.
+ /* Are we are overshooting and up against the limit of active max Q. */
if (((cpi->pass != 2) || (cpi->oxcf.end_usage == USAGE_STREAM_FROM_SERVER)) &&
(Q == cpi->active_worst_quality) &&
(cpi->active_worst_quality < cpi->worst_quality) &&
@@ -3922,50 +3925,52 @@ static void encode_frame_to_data_rate
{
int over_size_percent = ((cpi->projected_frame_size - frame_over_shoot_limit) * 100) / frame_over_shoot_limit;
- // If so is there any scope for relaxing it
+ /* If so is there any scope for relaxing it */
while ((cpi->active_worst_quality < cpi->worst_quality) && (over_size_percent > 0))
{
cpi->active_worst_quality++;
-
- over_size_percent = (int)(over_size_percent * 0.96); // Assume 1 qstep = about 4% on frame size.
+ /* Assume 1 qstep = about 4% on frame size. */
+ over_size_percent = (int)(over_size_percent * 0.96);
}
#if !(CONFIG_REALTIME_ONLY)
top_index = cpi->active_worst_quality;
#endif
- // If we have updated the active max Q do not call vp8_update_rate_correction_factors() this loop.
+ /* If we have updated the active max Q do not call
+ * vp8_update_rate_correction_factors() this loop.
+ */
active_worst_qchanged = 1;
}
else
active_worst_qchanged = 0;
#if !(CONFIG_REALTIME_ONLY)
- // Special case handling for forced key frames
+ /* Special case handling for forced key frames */
if ( (cm->frame_type == KEY_FRAME) && cpi->this_key_frame_forced )
{
int last_q = Q;
int kf_err = vp8_calc_ss_err(cpi->Source,
&cm->yv12_fb[cm->new_fb_idx]);
- // The key frame is not good enough
+ /* The key frame is not good enough */
if ( kf_err > ((cpi->ambient_err * 7) >> 3) )
{
- // Lower q_high
+ /* Lower q_high */
q_high = (Q > q_low) ? (Q - 1) : q_low;
- // Adjust Q
+ /* Adjust Q */
Q = (q_high + q_low) >> 1;
}
- // The key frame is much better than the previous frame
+ /* The key frame is much better than the previous frame */
else if ( kf_err < (cpi->ambient_err >> 1) )
{
- // Raise q_low
+ /* Raise q_low */
q_low = (Q < q_high) ? (Q + 1) : q_high;
- // Adjust Q
+ /* Adjust Q */
Q = (q_high + q_low + 1) >> 1;
}
- // Clamp Q to upper and lower limits:
+ /* Clamp Q to upper and lower limits: */
if (Q > q_high)
Q = q_high;
else if (Q < q_low)
@@ -3974,7 +3979,9 @@ static void encode_frame_to_data_rate
Loop = Q != last_q;
}
- // Is the projected frame size out of range and are we allowed to attempt to recode.
+ /* Is the projected frame size out of range and are we allowed
+ * to attempt to recode.
+ */
else if ( recode_loop_test( cpi,
frame_over_shoot_limit, frame_under_shoot_limit,
Q, top_index, bottom_index ) )
@@ -3982,28 +3989,33 @@ static void encode_frame_to_data_rate
int last_q = Q;
int Retries = 0;
- // Frame size out of permitted range:
- // Update correction factor & compute new Q to try...
+ /* Frame size out of permitted range. Update correction factor
+ * & compute new Q to try...
+ */
- // Frame is too large
+ /* Frame is too large */
if (cpi->projected_frame_size > cpi->this_frame_target)
{
- //if ( cpi->zbin_over_quant == 0 )
- q_low = (Q < q_high) ? (Q + 1) : q_high; // Raise Qlow as to at least the current value
+ /* Raise Qlow as to at least the current value */
+ q_low = (Q < q_high) ? (Q + 1) : q_high;
- if (cpi->zbin_over_quant > 0) // If we are using over quant do the same for zbin_oq_low
+ /* If we are using over quant do the same for zbin_oq_low */
+ if (cpi->zbin_over_quant > 0)
zbin_oq_low = (cpi->zbin_over_quant < zbin_oq_high) ? (cpi->zbin_over_quant + 1) : zbin_oq_high;
- //if ( undershoot_seen || (Q == MAXQ) )
if (undershoot_seen)
{
- // Update rate_correction_factor unless cpi->active_worst_quality has changed.
+ /* Update rate_correction_factor unless
+ * cpi->active_worst_quality has changed.
+ */
if (!active_worst_qchanged)
vp8_update_rate_correction_factors(cpi, 1);
Q = (q_high + q_low + 1) / 2;
- // Adjust cpi->zbin_over_quant (only allowed when Q is max)
+ /* Adjust cpi->zbin_over_quant (only allowed when Q
+ * is max)
+ */
if (Q < MAXQ)
cpi->zbin_over_quant = 0;
else
@@ -4014,7 +4026,9 @@ static void encode_frame_to_data_rate
}
else
{
- // Update rate_correction_factor unless cpi->active_worst_quality has changed.
+ /* Update rate_correction_factor unless
+ * cpi->active_worst_quality has changed.
+ */
if (!active_worst_qchanged)
vp8_update_rate_correction_factors(cpi, 0);
@@ -4030,23 +4044,29 @@ static void encode_frame_to_data_rate
overshoot_seen = 1;
}
- // Frame is too small
+ /* Frame is too small */
else
{
if (cpi->zbin_over_quant == 0)
- q_high = (Q > q_low) ? (Q - 1) : q_low; // Lower q_high if not using over quant
- else // else lower zbin_oq_high
+ /* Lower q_high if not using over quant */
+ q_high = (Q > q_low) ? (Q - 1) : q_low;
+ else
+ /* else lower zbin_oq_high */
zbin_oq_high = (cpi->zbin_over_quant > zbin_oq_low) ? (cpi->zbin_over_quant - 1) : zbin_oq_low;
if (overshoot_seen)
{
- // Update rate_correction_factor unless cpi->active_worst_quality has changed.
+ /* Update rate_correction_factor unless
+ * cpi->active_worst_quality has changed.
+ */
if (!active_worst_qchanged)
vp8_update_rate_correction_factors(cpi, 1);
Q = (q_high + q_low) / 2;
- // Adjust cpi->zbin_over_quant (only allowed when Q is max)
+ /* Adjust cpi->zbin_over_quant (only allowed when Q
+ * is max)
+ */
if (Q < MAXQ)
cpi->zbin_over_quant = 0;
else
@@ -4054,16 +4074,19 @@ static void encode_frame_to_data_rate
}
else
{
- // Update rate_correction_factor unless cpi->active_worst_quality has changed.
+ /* Update rate_correction_factor unless
+ * cpi->active_worst_quality has changed.
+ */
if (!active_worst_qchanged)
vp8_update_rate_correction_factors(cpi, 0);
Q = vp8_regulate_q(cpi, cpi->this_frame_target);
- // Special case reset for qlow for constrained quality.
- // This should only trigger where there is very substantial
- // undershoot on a frame and the auto cq level is above
- // the user passsed in value.
+ /* Special case reset for qlow for constrained quality.
+ * This should only trigger where there is very substantial
+ * undershoot on a frame and the auto cq level is above
+ * the user passsed in value.
+ */
if ( (cpi->oxcf.end_usage == USAGE_CONSTRAINED_QUALITY) &&
(Q < q_low) )
{
@@ -4081,13 +4104,13 @@ static void encode_frame_to_data_rate
undershoot_seen = 1;
}
- // Clamp Q to upper and lower limits:
+ /* Clamp Q to upper and lower limits: */
if (Q > q_high)
Q = q_high;
else if (Q < q_low)
Q = q_low;
- // Clamp cpi->zbin_over_quant
+ /* Clamp cpi->zbin_over_quant */
cpi->zbin_over_quant = (cpi->zbin_over_quant < zbin_oq_low) ? zbin_oq_low : (cpi->zbin_over_quant > zbin_oq_high) ? zbin_oq_high : cpi->zbin_over_quant;
Loop = Q != last_q;
@@ -4111,30 +4134,20 @@ static void encode_frame_to_data_rate
while (Loop == 1);
#if 0
- // Experimental code for lagged and one pass
- // Update stats used for one pass GF selection
- {
- /*
- int frames_so_far;
- double frame_intra_error;
- double frame_coded_error;
- double frame_pcnt_inter;
- double frame_pcnt_motion;
- double frame_mvr;
- double frame_mvr_abs;
- double frame_mvc;
- double frame_mvc_abs;
- */
-
+ /* Experimental code for lagged and one pass
+ * Update stats used for one pass GF selection
+ */
+ {
cpi->one_pass_frame_stats[cpi->one_pass_frame_index].frame_coded_error = (double)cpi->prediction_error;
cpi->one_pass_frame_stats[cpi->one_pass_frame_index].frame_intra_error = (double)cpi->intra_error;
cpi->one_pass_frame_stats[cpi->one_pass_frame_index].frame_pcnt_inter = (double)(100 - cpi->this_frame_percent_intra) / 100.0;
}
#endif
- // Special case code to reduce pulsing when key frames are forced at a
- // fixed interval. Note the reconstruction error if it is the frame before
- // the force key frame
+ /* Special case code to reduce pulsing when key frames are forced at a
+ * fixed interval. Note the reconstruction error if it is the frame before
+ * the force key frame
+ */
if ( cpi->next_key_frame_forced && (cpi->twopass.frames_to_key == 0) )
{
cpi->ambient_err = vp8_calc_ss_err(cpi->Source,
@@ -4177,9 +4190,10 @@ static void encode_frame_to_data_rate
vp8_cal_dissimilarity(cpi);
#endif
- // Update the GF useage maps.
- // This is done after completing the compression of a frame when all
- // modes etc. are finalized but before loop filter
+ /* Update the GF useage maps.
+ * This is done after completing the compression of a frame when all
+ * modes etc. are finalized but before loop filter
+ */
if (cpi->oxcf.number_of_layers == 1)
vp8_update_gf_useage_maps(cpi, cm, &cpi->mb);
@@ -4194,9 +4208,10 @@ static void encode_frame_to_data_rate
}
#endif
- // For inter frames the current default behavior is that when
- // cm->refresh_golden_frame is set we copy the old GF over to the ARF buffer
- // This is purely an encoder decision at present.
+ /* For inter frames the current default behavior is that when
+ * cm->refresh_golden_frame is set we copy the old GF over to the ARF buffer
+ * This is purely an encoder decision at present.
+ */
if (!cpi->oxcf.error_resilient_mode && cm->refresh_golden_frame)
cm->copy_buffer_to_arf = 2;
else
@@ -4207,7 +4222,8 @@ static void encode_frame_to_data_rate
#if CONFIG_MULTITHREAD
if (cpi->b_multi_threaded)
{
- sem_post(&cpi->h_event_start_lpf); /* start loopfilter in separate thread */
+ /* start loopfilter in separate thread */
+ sem_post(&cpi->h_event_start_lpf);
cpi->b_lpf_running = 1;
}
else
@@ -4231,7 +4247,7 @@ static void encode_frame_to_data_rate
sem_wait(&cpi->h_event_end_lpf);
#endif
- // build the bitstream
+ /* build the bitstream */
vp8_pack_bitstream(cpi, dest, dest_end, size);
#if CONFIG_MULTITHREAD
@@ -4247,7 +4263,7 @@ static void encode_frame_to_data_rate
* needed in motion search besides loopfilter */
cm->last_frame_type = cm->frame_type;
- // Update rate control heuristics
+ /* Update rate control heuristics */
cpi->total_byte_count += (*size);
cpi->projected_frame_size = (*size) << 3;
@@ -4268,18 +4284,21 @@ static void encode_frame_to_data_rate
vp8_adjust_key_frame_context(cpi);
}
- // Keep a record of ambient average Q.
+ /* Keep a record of ambient average Q. */
if (cm->frame_type != KEY_FRAME)
cpi->avg_frame_qindex = (2 + 3 * cpi->avg_frame_qindex + cm->base_qindex) >> 2;
- // Keep a record from which we can calculate the average Q excluding GF updates and key frames
+ /* Keep a record from which we can calculate the average Q excluding
+ * GF updates and key frames
+ */
if ((cm->frame_type != KEY_FRAME) && ((cpi->oxcf.number_of_layers > 1) ||
(!cm->refresh_golden_frame && !cm->refresh_alt_ref_frame)))
{
cpi->ni_frames++;
- // Calculate the average Q for normal inter frames (not key or GFU
- // frames).
+ /* Calculate the average Q for normal inter frames (not key or GFU
+ * frames).
+ */
if ( cpi->pass == 2 )
{
cpi->ni_tot_qi += Q;
@@ -4287,57 +4306,62 @@ static void encode_frame_to_data_rate
}
else
{
- // Damp value for first few frames
+ /* Damp value for first few frames */
if (cpi->ni_frames > 150 )
{
cpi->ni_tot_qi += Q;
cpi->ni_av_qi = (cpi->ni_tot_qi / cpi->ni_frames);
}
- // For one pass, early in the clip ... average the current frame Q
- // value with the worstq entered by the user as a dampening measure
+ /* For one pass, early in the clip ... average the current frame Q
+ * value with the worstq entered by the user as a dampening measure
+ */
else
{
cpi->ni_tot_qi += Q;
cpi->ni_av_qi = ((cpi->ni_tot_qi / cpi->ni_frames) + cpi->worst_quality + 1) / 2;
}
- // If the average Q is higher than what was used in the last frame
- // (after going through the recode loop to keep the frame size within range)
- // then use the last frame value - 1.
- // The -1 is designed to stop Q and hence the data rate, from progressively
- // falling away during difficult sections, but at the same time reduce the number of
- // itterations around the recode loop.
+ /* If the average Q is higher than what was used in the last
+ * frame (after going through the recode loop to keep the frame
+ * size within range) then use the last frame value - 1. The -1
+ * is designed to stop Q and hence the data rate, from
+ * progressively falling away during difficult sections, but at
+ * the same time reduce the number of itterations around the
+ * recode loop.
+ */
if (Q > cpi->ni_av_qi)
cpi->ni_av_qi = Q - 1;
}
}
- // Update the buffer level variable.
- // Non-viewable frames are a special case and are treated as pure overhead.
+ /* Update the buffer level variable. */
+ /* Non-viewable frames are a special case and are treated as pure overhead. */
if ( !cm->show_frame )
cpi->bits_off_target -= cpi->projected_frame_size;
else
cpi->bits_off_target += cpi->av_per_frame_bandwidth - cpi->projected_frame_size;
- // Clip the buffer level to the maximum specified buffer size
+ /* Clip the buffer level to the maximum specified buffer size */
if (cpi->bits_off_target > cpi->oxcf.maximum_buffer_size)
cpi->bits_off_target = cpi->oxcf.maximum_buffer_size;
- // Rolling monitors of whether we are over or underspending used to help regulate min and Max Q in two pass.
+ /* Rolling monitors of whether we are over or underspending used to
+ * help regulate min and Max Q in two pass.
+ */
cpi->rolling_target_bits = ((cpi->rolling_target_bits * 3) + cpi->this_frame_target + 2) / 4;
cpi->rolling_actual_bits = ((cpi->rolling_actual_bits * 3) + cpi->projected_frame_size + 2) / 4;
cpi->long_rolling_target_bits = ((cpi->long_rolling_target_bits * 31) + cpi->this_frame_target + 16) / 32;
cpi->long_rolling_actual_bits = ((cpi->long_rolling_actual_bits * 31) + cpi->projected_frame_size + 16) / 32;
- // Actual bits spent
+ /* Actual bits spent */
cpi->total_actual_bits += cpi->projected_frame_size;
- // Debug stats
+ /* Debug stats */
cpi->total_target_vs_actual += (cpi->this_frame_target - cpi->projected_frame_size);
cpi->buffer_level = cpi->bits_off_target;
- // Propagate values to higher temporal layers
+ /* Propagate values to higher temporal layers */
if (cpi->oxcf.number_of_layers > 1)
{
unsigned int i;
@@ -4350,7 +4374,7 @@ static void encode_frame_to_data_rate
lc->bits_off_target += bits_off_for_this_layer;
- // Clip buffer level to maximum buffer size for the layer
+ /* Clip buffer level to maximum buffer size for the layer */
if (lc->bits_off_target > lc->maximum_buffer_size)
lc->bits_off_target = lc->maximum_buffer_size;
@@ -4360,7 +4384,9 @@ static void encode_frame_to_data_rate
}
}
- // Update bits left to the kf and gf groups to account for overshoot or undershoot on these frames
+ /* Update bits left to the kf and gf groups to account for overshoot
+ * or undershoot on these frames
+ */
if (cm->frame_type == KEY_FRAME)
{
cpi->twopass.kf_group_bits += cpi->this_frame_target - cpi->projected_frame_size;
@@ -4393,7 +4419,7 @@ static void encode_frame_to_data_rate
cpi->last_skip_false_probs[0] = cpi->prob_skip_false;
cpi->last_skip_probs_q[0] = cm->base_qindex;
- //update the baseline
+ /* update the baseline */
cpi->base_skip_false_prob[cm->base_qindex] = cpi->prob_skip_false;
}
@@ -4403,7 +4429,7 @@ static void encode_frame_to_data_rate
{
FILE *f = fopen("tmp.stt", "a");
- vp8_clear_system_state(); //__asm emms;
+ vp8_clear_system_state();
if (cpi->twopass.total_left_stats.coded_error != 0.0)
fprintf(f, "%10d %10d %10d %10d %10d %10d %10d %10d %10d %6d %6d"
@@ -4419,7 +4445,6 @@ static void encode_frame_to_data_rate
cpi->active_best_quality, cpi->active_worst_quality,
cpi->ni_av_qi, cpi->cq_target_quality,
cpi->zbin_over_quant,
- //cpi->avg_frame_qindex, cpi->zbin_over_quant,
cm->refresh_golden_frame, cm->refresh_alt_ref_frame,
cm->frame_type, cpi->gfu_boost,
cpi->twopass.est_max_qcorrection_factor,
@@ -4442,7 +4467,6 @@ static void encode_frame_to_data_rate
cpi->active_best_quality, cpi->active_worst_quality,
cpi->ni_av_qi, cpi->cq_target_quality,
cpi->zbin_over_quant,
- //cpi->avg_frame_qindex, cpi->zbin_over_quant,
cm->refresh_golden_frame, cm->refresh_alt_ref_frame,
cm->frame_type, cpi->gfu_boost,
cpi->twopass.est_max_qcorrection_factor,
@@ -4472,7 +4496,7 @@ static void encode_frame_to_data_rate
#endif
- // If this was a kf or Gf note the Q
+ /* If this was a kf or Gf note the Q */
if ((cm->frame_type == KEY_FRAME) || cm->refresh_golden_frame || cm->refresh_alt_ref_frame)
cm->last_kf_gf_q = cm->base_qindex;
@@ -4487,19 +4511,25 @@ static void encode_frame_to_data_rate
cm->frame_flags = cm->frame_flags&~FRAMEFLAGS_ALTREF;
- if (cm->refresh_last_frame & cm->refresh_golden_frame) // both refreshed
+ if (cm->refresh_last_frame & cm->refresh_golden_frame)
+ /* both refreshed */
cpi->gold_is_last = 1;
- else if (cm->refresh_last_frame ^ cm->refresh_golden_frame) // 1 refreshed but not the other
+ else if (cm->refresh_last_frame ^ cm->refresh_golden_frame)
+ /* 1 refreshed but not the other */
cpi->gold_is_last = 0;
- if (cm->refresh_last_frame & cm->refresh_alt_ref_frame) // both refreshed
+ if (cm->refresh_last_frame & cm->refresh_alt_ref_frame)
+ /* both refreshed */
cpi->alt_is_last = 1;
- else if (cm->refresh_last_frame ^ cm->refresh_alt_ref_frame) // 1 refreshed but not the other
+ else if (cm->refresh_last_frame ^ cm->refresh_alt_ref_frame)
+ /* 1 refreshed but not the other */
cpi->alt_is_last = 0;
- if (cm->refresh_alt_ref_frame & cm->refresh_golden_frame) // both refreshed
+ if (cm->refresh_alt_ref_frame & cm->refresh_golden_frame)
+ /* both refreshed */
cpi->gold_is_alt = 1;
- else if (cm->refresh_alt_ref_frame ^ cm->refresh_golden_frame) // 1 refreshed but not the other
+ else if (cm->refresh_alt_ref_frame ^ cm->refresh_golden_frame)
+ /* 1 refreshed but not the other */
cpi->gold_is_alt = 0;
cpi->ref_frame_flags = VP8_ALTR_FRAME | VP8_GOLD_FRAME | VP8_LAST_FRAME;
@@ -4517,19 +4547,19 @@ static void encode_frame_to_data_rate
if (!cpi->oxcf.error_resilient_mode)
{
if (cpi->oxcf.play_alternate && cm->refresh_alt_ref_frame && (cm->frame_type != KEY_FRAME))
- // Update the alternate reference frame stats as appropriate.
+ /* Update the alternate reference frame stats as appropriate. */
update_alt_ref_frame_stats(cpi);
else
- // Update the Golden frame stats as appropriate.
+ /* Update the Golden frame stats as appropriate. */
update_golden_frame_stats(cpi);
}
if (cm->frame_type == KEY_FRAME)
{
- // Tell the caller that the frame was coded as a key frame
+ /* Tell the caller that the frame was coded as a key frame */
*frame_flags = cm->frame_flags | FRAMEFLAGS_KEY;
- // As this frame is a key frame the next defaults to an inter frame.
+ /* As this frame is a key frame the next defaults to an inter frame. */
cm->frame_type = INTER_FRAME;
cpi->last_frame_percent_intra = 100;
@@ -4541,20 +4571,24 @@ static void encode_frame_to_data_rate
cpi->last_frame_percent_intra = cpi->this_frame_percent_intra;
}
- // Clear the one shot update flags for segmentation map and mode/ref loop filter deltas.
+ /* Clear the one shot update flags for segmentation map and mode/ref
+ * loop filter deltas.
+ */
cpi->mb.e_mbd.update_mb_segmentation_map = 0;
cpi->mb.e_mbd.update_mb_segmentation_data = 0;
cpi->mb.e_mbd.mode_ref_lf_delta_update = 0;
- // Dont increment frame counters if this was an altref buffer update not a real frame
+ /* Dont increment frame counters if this was an altref buffer update
+ * not a real frame
+ */
if (cm->show_frame)
{
cm->current_video_frame++;
cpi->frames_since_key++;
}
- // reset to normal state now that we are done.
+ /* reset to normal state now that we are done. */
@@ -4570,8 +4604,8 @@ static void encode_frame_to_data_rate
}
#endif
- // DEBUG
- //vp8_write_yuv_frame("encoder_recon.yuv", cm->frame_to_show);
+ /* DEBUG */
+ /* vp8_write_yuv_frame("encoder_recon.yuv", cm->frame_to_show); */
}
@@ -4584,33 +4618,38 @@ static void check_gf_quality(VP8_COMP *cpi)
int gf_ref_usage_pct = (cpi->count_mb_ref_frame_usage[GOLDEN_FRAME] * 100) / (cm->mb_rows * cm->mb_cols);
int last_ref_zz_useage = (cpi->inter_zz_count * 100) / (cm->mb_rows * cm->mb_cols);
- // Gf refresh is not currently being signalled
+ /* Gf refresh is not currently being signalled */
if (cpi->gf_update_recommended == 0)
{
if (cpi->common.frames_since_golden > 7)
{
- // Low use of gf
+ /* Low use of gf */
if ((gf_active_pct < 10) || ((gf_active_pct + gf_ref_usage_pct) < 15))
{
- // ...but last frame zero zero usage is reasonbable so a new gf might be appropriate
+ /* ...but last frame zero zero usage is reasonbable so a
+ * new gf might be appropriate
+ */
if (last_ref_zz_useage >= 25)
{
cpi->gf_bad_count ++;
- if (cpi->gf_bad_count >= 8) // Check that the condition is stable
+ /* Check that the condition is stable */
+ if (cpi->gf_bad_count >= 8)
{
cpi->gf_update_recommended = 1;
cpi->gf_bad_count = 0;
}
}
else
- cpi->gf_bad_count = 0; // Restart count as the background is not stable enough
+ /* Restart count as the background is not stable enough */
+ cpi->gf_bad_count = 0;
}
else
- cpi->gf_bad_count = 0; // Gf useage has picked up so reset count
+ /* Gf useage has picked up so reset count */
+ cpi->gf_bad_count = 0;
}
}
- // If the signal is set but has not been read should we cancel it.
+ /* If the signal is set but has not been read should we cancel it. */
else if (last_ref_zz_useage < 15)
{
cpi->gf_update_recommended = 0;
@@ -4650,7 +4689,7 @@ static void Pass2Encode(VP8_COMP *cpi, unsigned long *size, unsigned char *dest,
}
#endif
-//For ARM NEON, d8-d15 are callee-saved registers, and need to be saved by us.
+/* For ARM NEON, d8-d15 are callee-saved registers, and need to be saved. */
#if HAVE_NEON
extern void vp8_push_neon(int64_t *store);
extern void vp8_pop_neon(int64_t *store);
@@ -4757,7 +4796,7 @@ int vp8_get_compressed_data(VP8_COMP *cpi, unsigned int *frame_flags, unsigned l
cpi->source = NULL;
#if !(CONFIG_REALTIME_ONLY)
- // Should we code an alternate reference frame
+ /* Should we code an alternate reference frame */
if (cpi->oxcf.error_resilient_mode == 0 &&
cpi->oxcf.play_alternate &&
cpi->source_alt_ref_pending)
@@ -4778,7 +4817,8 @@ int vp8_get_compressed_data(VP8_COMP *cpi, unsigned int *frame_flags, unsigned l
cm->refresh_golden_frame = 0;
cm->refresh_last_frame = 0;
cm->show_frame = 0;
- cpi->source_alt_ref_pending = 0; // Clear Pending alt Ref flag.
+ /* Clear Pending alt Ref flag. */
+ cpi->source_alt_ref_pending = 0;
cpi->is_src_frame_alt_ref = 0;
}
}
@@ -4850,7 +4890,7 @@ int vp8_get_compressed_data(VP8_COMP *cpi, unsigned int *frame_flags, unsigned l
cpi->last_end_time_stamp_seen = cpi->source->ts_start;
}
- // adjust frame rates based on timestamps given
+ /* adjust frame rates based on timestamps given */
if (cm->show_frame)
{
int64_t this_duration;
@@ -4868,7 +4908,7 @@ int vp8_get_compressed_data(VP8_COMP *cpi, unsigned int *frame_flags, unsigned l
this_duration = cpi->source->ts_end - cpi->last_end_time_stamp_seen;
last_duration = cpi->last_end_time_stamp_seen
- cpi->last_time_stamp_seen;
- // do a step update if the duration changes by 10%
+ /* do a step update if the duration changes by 10% */
if (last_duration)
step = ((this_duration - last_duration) * 10 / last_duration);
}
@@ -4900,7 +4940,7 @@ int vp8_get_compressed_data(VP8_COMP *cpi, unsigned int *frame_flags, unsigned l
{
unsigned int i;
- // Update frame rates for each layer
+ /* Update frame rates for each layer */
for (i=0; i<cpi->oxcf.number_of_layers; i++)
{
LAYER_CONTEXT *lc = &cpi->layer_context[i];
@@ -4922,7 +4962,7 @@ int vp8_get_compressed_data(VP8_COMP *cpi, unsigned int *frame_flags, unsigned l
update_layer_contexts (cpi);
- // Restore layer specific context & set frame rate
+ /* Restore layer specific context & set frame rate */
layer = cpi->oxcf.layer_id[
cm->current_video_frame % cpi->oxcf.periodicity];
restore_layer_context (cpi, layer);
@@ -4960,11 +5000,11 @@ int vp8_get_compressed_data(VP8_COMP *cpi, unsigned int *frame_flags, unsigned l
}
#endif
- // start with a 0 size frame
+ /* start with a 0 size frame */
*size = 0;
- // Clear down mmx registers
- vp8_clear_system_state(); //__asm emms;
+ /* Clear down mmx registers */
+ vp8_clear_system_state();
cm->frame_type = INTER_FRAME;
cm->frame_flags = *frame_flags;
@@ -4973,7 +5013,6 @@ int vp8_get_compressed_data(VP8_COMP *cpi, unsigned int *frame_flags, unsigned l
if (cm->refresh_alt_ref_frame)
{
- //cm->refresh_golden_frame = 1;
cm->refresh_golden_frame = 0;
cm->refresh_last_frame = 0;
}
@@ -5031,7 +5070,6 @@ int vp8_get_compressed_data(VP8_COMP *cpi, unsigned int *frame_flags, unsigned l
if (duration2)
{
- //if(*frame_flags!=1)
{
if (cpi->avg_pick_mode_time == 0)
@@ -5048,8 +5086,8 @@ int vp8_get_compressed_data(VP8_COMP *cpi, unsigned int *frame_flags, unsigned l
vpx_memcpy(&cm->fc, &cm->lfc, sizeof(cm->fc));
}
- // Save the contexts separately for alt ref, gold and last.
- // (TODO jbb -> Optimize this with pointers to avoid extra copies. )
+ /* Save the contexts separately for alt ref, gold and last. */
+ /* (TODO jbb -> Optimize this with pointers to avoid extra copies. ) */
if(cm->refresh_alt_ref_frame)
vpx_memcpy(&cpi->lfc_a, &cm->fc, sizeof(cm->fc));
@@ -5059,12 +5097,12 @@ int vp8_get_compressed_data(VP8_COMP *cpi, unsigned int *frame_flags, unsigned l
if(cm->refresh_last_frame)
vpx_memcpy(&cpi->lfc_n, &cm->fc, sizeof(cm->fc));
- // if its a dropped frame honor the requests on subsequent frames
+ /* if its a dropped frame honor the requests on subsequent frames */
if (*size > 0)
{
cpi->droppable = !frame_is_reference(cpi);
- // return to normal state
+ /* return to normal state */
cm->refresh_entropy_probs = 1;
cm->refresh_alt_ref_frame = 0;
cm->refresh_golden_frame = 0;
@@ -5073,7 +5111,7 @@ int vp8_get_compressed_data(VP8_COMP *cpi, unsigned int *frame_flags, unsigned l
}
- // Save layer specific state
+ /* Save layer specific state */
if (cpi->oxcf.number_of_layers > 1)
save_layer_context (cpi);
@@ -5287,7 +5325,7 @@ int vp8_get_preview_raw_frame(VP8_COMP *cpi, YV12_BUFFER_CONFIG *dest, vp8_ppfla
ret = -1;
}
-#endif //!CONFIG_POSTPROC
+#endif
vp8_clear_system_state();
return ret;
}
@@ -5296,29 +5334,53 @@ int vp8_get_preview_raw_frame(VP8_COMP *cpi, YV12_BUFFER_CONFIG *dest, vp8_ppfla
int vp8_set_roimap(VP8_COMP *cpi, unsigned char *map, unsigned int rows, unsigned int cols, int delta_q[4], int delta_lf[4], unsigned int threshold[4])
{
signed char feature_data[MB_LVL_MAX][MAX_MB_SEGMENTS];
+ int internal_delta_q[MAX_MB_SEGMENTS];
+ const unsigned int range = 63;
+ int i;
+
+ // This method is currently incompatible with the cyclic refresh method
+ if ( cpi->cyclic_refresh_mode_enabled )
+ return -1;
+ // Check number of rows and columns match
if (cpi->common.mb_rows != rows || cpi->common.mb_cols != cols)
return -1;
+ // Range check the delta Q values and convert the external Q range values
+ // to internal ones.
+ if ( (abs(delta_q[0]) > range) || (abs(delta_q[1]) > range) ||
+ (abs(delta_q[2]) > range) || (abs(delta_q[3]) > range) )
+ return -1;
+
+ // Range check the delta lf values
+ if ( (abs(delta_lf[0]) > range) || (abs(delta_lf[1]) > range) ||
+ (abs(delta_lf[2]) > range) || (abs(delta_lf[3]) > range) )
+ return -1;
+
if (!map)
{
disable_segmentation(cpi);
return 0;
}
- // Set the segmentation Map
+ // Translate the external delta q values to internal values.
+ for ( i = 0; i < MAX_MB_SEGMENTS; i++ )
+ internal_delta_q[i] =
+ ( delta_q[i] >= 0 ) ? q_trans[delta_q[i]] : -q_trans[-delta_q[i]];
+
+ /* Set the segmentation Map */
set_segmentation_map(cpi, map);
- // Activate segmentation.
+ /* Activate segmentation. */
enable_segmentation(cpi);
- // Set up the quant segment data
- feature_data[MB_LVL_ALT_Q][0] = delta_q[0];
- feature_data[MB_LVL_ALT_Q][1] = delta_q[1];
- feature_data[MB_LVL_ALT_Q][2] = delta_q[2];
- feature_data[MB_LVL_ALT_Q][3] = delta_q[3];
+ /* Set up the quant segment data */
+ feature_data[MB_LVL_ALT_Q][0] = internal_delta_q[0];
+ feature_data[MB_LVL_ALT_Q][1] = internal_delta_q[1];
+ feature_data[MB_LVL_ALT_Q][2] = internal_delta_q[2];
+ feature_data[MB_LVL_ALT_Q][3] = internal_delta_q[3];
- // Set up the loop segment data s
+ /* Set up the loop segment data s */
feature_data[MB_LVL_ALT_LF][0] = delta_lf[0];
feature_data[MB_LVL_ALT_LF][1] = delta_lf[1];
feature_data[MB_LVL_ALT_LF][2] = delta_lf[2];
@@ -5329,8 +5391,7 @@ int vp8_set_roimap(VP8_COMP *cpi, unsigned char *map, unsigned int rows, unsigne
cpi->segment_encode_breakout[2] = threshold[2];
cpi->segment_encode_breakout[3] = threshold[3];
- // Initialise the feature data structure
- // SEGMENT_DELTADATA 0, SEGMENT_ABSDATA 1
+ /* Initialise the feature data structure */
set_segment_data(cpi, &feature_data[0][0], SEGMENT_DELTADATA);
return 0;
@@ -5352,7 +5413,6 @@ int vp8_set_active_map(VP8_COMP *cpi, unsigned char *map, unsigned int rows, uns
}
else
{
- //cpi->active_map_enabled = 0;
return -1 ;
}
}
@@ -5382,7 +5442,9 @@ int vp8_calc_ss_err(YV12_BUFFER_CONFIG *source, YV12_BUFFER_CONFIG *dest)
unsigned char *src = source->y_buffer;
unsigned char *dst = dest->y_buffer;
- // Loop through the Y plane raw and reconstruction data summing (square differences)
+ /* Loop through the Y plane raw and reconstruction data summing
+ * (square differences)
+ */
for (i = 0; i < source->y_height; i += 16)
{
for (j = 0; j < source->y_width; j += 16)
diff --git a/vp8/encoder/onyx_int.h b/vp8/encoder/onyx_int.h
index 3b202c941..22bfeaf3a 100644
--- a/vp8/encoder/onyx_int.h
+++ b/vp8/encoder/onyx_int.h
@@ -33,7 +33,6 @@
#include "vp8/encoder/denoising.h"
#endif
-//#define SPEEDSTATS 1
#define MIN_GF_INTERVAL 4
#define DEFAULT_GF_INTERVAL 7
@@ -74,7 +73,6 @@ typedef struct
int mvcosts[2][MVvals+1];
#ifdef MODE_STATS
- // Stats
int y_modes[5];
int uv_modes[4];
int b_modes[10];
@@ -233,11 +231,11 @@ enum
typedef struct
{
- // Layer configuration
+ /* Layer configuration */
double frame_rate;
int target_bandwidth;
- // Layer specific coding parameters
+ /* Layer specific coding parameters */
int starting_buffer_level;
int optimal_buffer_level;
int maximum_buffer_size;
@@ -308,7 +306,7 @@ typedef struct VP8_COMP
MACROBLOCK mb;
VP8_COMMON common;
- vp8_writer bc[9]; // one boolcoder for each partition
+ vp8_writer bc[9]; /* one boolcoder for each partition */
VP8_CONFIG oxcf;
@@ -322,16 +320,20 @@ typedef struct VP8_COMP
YV12_BUFFER_CONFIG scaled_source;
YV12_BUFFER_CONFIG *last_frame_unscaled_source;
- int source_alt_ref_pending; // frame in src_buffers has been identified to be encoded as an alt ref
- int source_alt_ref_active; // an alt ref frame has been encoded and is usable
+ /* frame in src_buffers has been identified to be encoded as an alt ref */
+ int source_alt_ref_pending;
+ /* an alt ref frame has been encoded and is usable */
+ int source_alt_ref_active;
+ /* source of frame to encode is an exact copy of an alt ref frame */
+ int is_src_frame_alt_ref;
- int is_src_frame_alt_ref; // source of frame to encode is an exact copy of an alt ref frame
+ /* golden frame same as last frame ( short circuit gold searches) */
+ int gold_is_last;
+ /* Alt reference frame same as last ( short circuit altref search) */
+ int alt_is_last;
+ /* don't do both alt and gold search ( just do gold). */
+ int gold_is_alt;
- int gold_is_last; // golden frame same as last frame ( short circuit gold searches)
- int alt_is_last; // Alt reference frame same as last ( short circuit altref search)
- int gold_is_alt; // don't do both alt and gold search ( just do gold).
-
- //int refresh_alt_ref_frame;
YV12_BUFFER_CONFIG pick_lf_lvl_frame;
TOKENEXTRA *tok;
@@ -343,7 +345,7 @@ typedef struct VP8_COMP
unsigned int this_key_frame_forced;
unsigned int next_key_frame_forced;
- // Ambient reconstruction err target for force key frames
+ /* Ambient reconstruction err target for force key frames */
int ambient_err;
unsigned int mode_check_freq[MAX_MODES];
@@ -360,7 +362,7 @@ typedef struct VP8_COMP
CODING_CONTEXT coding_context;
- // Rate targetting variables
+ /* Rate targetting variables */
int64_t prediction_error;
int64_t last_prediction_error;
int64_t intra_error;
@@ -368,30 +370,43 @@ typedef struct VP8_COMP
int this_frame_target;
int projected_frame_size;
- int last_q[2]; // Separate values for Intra/Inter
+ int last_q[2]; /* Separate values for Intra/Inter */
double rate_correction_factor;
double key_frame_rate_correction_factor;
double gf_rate_correction_factor;
- int frames_till_gf_update_due; // Count down till next GF
- int current_gf_interval; // GF interval chosen when we coded the last GF
+ /* Count down till next GF */
+ int frames_till_gf_update_due;
+
+ /* GF interval chosen when we coded the last GF */
+ int current_gf_interval;
+
+ /* Total bits overspent becasue of GF boost (cumulative) */
+ int gf_overspend_bits;
- int gf_overspend_bits; // Total bits overspent becasue of GF boost (cumulative)
+ /* Used in the few frames following a GF to recover the extra bits
+ * spent in that GF
+ */
+ int non_gf_bitrate_adjustment;
- int non_gf_bitrate_adjustment; // Used in the few frames following a GF to recover the extra bits spent in that GF
+ /* Extra bits spent on key frames that need to be recovered */
+ int kf_overspend_bits;
- int kf_overspend_bits; // Extra bits spent on key frames that need to be recovered on inter frames
- int kf_bitrate_adjustment; // Current number of bit s to try and recover on each inter frame.
+ /* Current number of bit s to try and recover on each inter frame. */
+ int kf_bitrate_adjustment;
int max_gf_interval;
int baseline_gf_interval;
- int active_arnr_frames; // <= cpi->oxcf.arnr_max_frames
+ int active_arnr_frames;
int64_t key_frame_count;
int prior_key_frame_distance[KEY_FRAME_CONTEXT];
- int per_frame_bandwidth; // Current section per frame bandwidth target
- int av_per_frame_bandwidth; // Average frame size target for clip
- int min_frame_bandwidth; // Minimum allocation that should be used for any frame
+ /* Current section per frame bandwidth target */
+ int per_frame_bandwidth;
+ /* Average frame size target for clip */
+ int av_per_frame_bandwidth;
+ /* Minimum allocation that should be used for any frame */
+ int min_frame_bandwidth;
int inter_frame_target;
double output_frame_rate;
int64_t last_time_stamp_seen;
@@ -425,7 +440,7 @@ typedef struct VP8_COMP
int long_rolling_actual_bits;
int64_t total_actual_bits;
- int total_target_vs_actual; // debug stats
+ int total_target_vs_actual; /* debug stats */
int worst_quality;
int active_worst_quality;
@@ -434,18 +449,16 @@ typedef struct VP8_COMP
int cq_target_quality;
- int drop_frames_allowed; // Are we permitted to drop frames?
- int drop_frame; // Drop this frame?
+ int drop_frames_allowed; /* Are we permitted to drop frames? */
+ int drop_frame; /* Drop this frame? */
int ymode_count [VP8_YMODES]; /* intra MB type cts this frame */
- int uv_mode_count[VP8_UV_MODES]; /* intra MB type cts this frame */
+ int uv_mode_count[VP8_UV_MODES]; /* intra MB type cts this frame */
unsigned int MVcount [2] [MVvals]; /* (row,col) MV cts this frame */
unsigned int coef_counts [BLOCK_TYPES] [COEF_BANDS] [PREV_COEF_CONTEXTS] [MAX_ENTROPY_TOKENS]; /* for this frame */
- //DECLARE_ALIGNED(16, int, coef_counts_backup [BLOCK_TYPES] [COEF_BANDS] [PREV_COEF_CONTEXTS] [MAX_ENTROPY_TOKENS]); //not used any more
- //save vp8_tree_probs_from_distribution result for each frame to avoid repeat calculation
vp8_prob frame_coef_probs [BLOCK_TYPES] [COEF_BANDS] [PREV_COEF_CONTEXTS] [ENTROPY_NODES];
char update_probs [BLOCK_TYPES] [COEF_BANDS] [PREV_COEF_CONTEXTS] [ENTROPY_NODES];
@@ -459,7 +472,7 @@ typedef struct VP8_COMP
struct vpx_codec_pkt_list *output_pkt_list;
#if 0
- // Experimental code for lagged and one pass
+ /* Experimental code for lagged and one pass */
ONEPASS_FRAMESTATS one_pass_frame_stats[MAX_LAG_BUFFERS];
int one_pass_frame_index;
#endif
@@ -467,11 +480,10 @@ typedef struct VP8_COMP
int decimation_factor;
int decimation_count;
- // for real time encoding
- int avg_encode_time; //microsecond
- int avg_pick_mode_time; //microsecond
+ /* for real time encoding */
+ int avg_encode_time; /* microsecond */
+ int avg_pick_mode_time; /* microsecond */
int Speed;
- unsigned int cpu_freq; //Mhz
int compressor_speed;
int interquantizer;
@@ -500,20 +512,25 @@ typedef struct VP8_COMP
SPEED_FEATURES sf;
int error_bins[1024];
- // Data used for real time conferencing mode to help determine if it would be good to update the gf
+ /* Data used for real time conferencing mode to help determine if it
+ * would be good to update the gf
+ */
int inter_zz_count;
int gf_bad_count;
int gf_update_recommended;
int skip_true_count;
unsigned char *segmentation_map;
- signed char segment_feature_data[MB_LVL_MAX][MAX_MB_SEGMENTS]; // Segment data (can be deltas or absolute values)
- int segment_encode_breakout[MAX_MB_SEGMENTS]; // segment threashold for encode breakout
+ signed char segment_feature_data[MB_LVL_MAX][MAX_MB_SEGMENTS];
+ int segment_encode_breakout[MAX_MB_SEGMENTS];
unsigned char *active_map;
unsigned int active_map_enabled;
- // Video conferencing cyclic refresh mode flags etc
- // This is a mode designed to clean up the background over time in live encoding scenarious. It uses segmentation
+
+ /* Video conferencing cyclic refresh mode flags. This is a mode
+ * designed to clean up the background over time in live encoding
+ * scenarious. It uses segmentation.
+ */
int cyclic_refresh_mode_enabled;
int cyclic_refresh_mode_max_mbs_perframe;
int cyclic_refresh_mode_index;
@@ -521,7 +538,7 @@ typedef struct VP8_COMP
signed char *cyclic_refresh_map;
#if CONFIG_MULTITHREAD
- // multithread data
+ /* multithread data */
int * mt_current_mb_col;
int mt_sync_range;
int b_multi_threaded;
@@ -535,7 +552,7 @@ typedef struct VP8_COMP
ENCODETHREAD_DATA *en_thread_data;
LPFTHREAD_DATA lpf_thread_data;
- //events
+ /* events */
sem_t *h_event_start_encoding;
sem_t h_event_end_encoding;
sem_t h_event_start_lpf;
@@ -546,7 +563,6 @@ typedef struct VP8_COMP
unsigned int partition_sz[MAX_PARTITIONS];
unsigned char *partition_d[MAX_PARTITIONS];
unsigned char *partition_d_end[MAX_PARTITIONS];
- // end of multithread data
fractional_mv_step_fp *find_fractional_mv_step;
@@ -591,16 +607,16 @@ typedef struct VP8_COMP
int gf_decay_rate;
int static_scene_max_gf_interval;
int kf_bits;
- int gf_group_error_left; // Remaining error from uncoded frames in a gf group. Two pass use only
-
- // Projected total bits available for a key frame group of frames
+ /* Remaining error from uncoded frames in a gf group. */
+ int gf_group_error_left;
+ /* Projected total bits available for a key frame group of frames */
int64_t kf_group_bits;
-
- // Error score of frames still to be coded in kf group
+ /* Error score of frames still to be coded in kf group */
int64_t kf_group_error_left;
-
- int gf_group_bits; // Projected Bits available for a group of frames including 1 GF or ARF
- int gf_bits; // Bits for the golden frame or ARF - 2 pass only
+ /* Projected Bits available for a group including 1 GF or ARF */
+ int gf_group_bits;
+ /* Bits for the golden frame or ARF */
+ int gf_bits;
int alt_extra_bits;
double est_max_qcorrection_factor;
} twopass;
@@ -638,24 +654,26 @@ typedef struct VP8_COMP
#endif
int b_calculate_psnr;
- // Per MB activity measurement
+ /* Per MB activity measurement */
unsigned int activity_avg;
unsigned int * mb_activity_map;
int * mb_norm_activity_map;
- // Record of which MBs still refer to last golden frame either
- // directly or through 0,0
+ /* Record of which MBs still refer to last golden frame either
+ * directly or through 0,0
+ */
unsigned char *gf_active_flags;
int gf_active_count;
int output_partition;
- //Store last frame's MV info for next frame MV prediction
+ /* Store last frame's MV info for next frame MV prediction */
int_mv *lfmv;
int *lf_ref_frame_sign_bias;
int *lf_ref_frame;
- int force_next_frame_intra; /* force next frame to intra when kf_auto says so */
+ /* force next frame to intra when kf_auto says so */
+ int force_next_frame_intra;
int droppable;
@@ -663,7 +681,7 @@ typedef struct VP8_COMP
VP8_DENOISER denoiser;
#endif
- // Coding layer state variables
+ /* Coding layer state variables */
unsigned int current_layer;
LAYER_CONTEXT layer_context[VPX_TS_MAX_LAYERS];
diff --git a/vp8/encoder/pickinter.c b/vp8/encoder/pickinter.c
index 47d646fee..e2e052f2e 100644
--- a/vp8/encoder/pickinter.c
+++ b/vp8/encoder/pickinter.c
@@ -143,7 +143,7 @@ static int pick_intra4x4block(
int dst_stride = x->e_mbd.dst.y_stride;
unsigned char *base_dst = x->e_mbd.dst.y_buffer;
B_PREDICTION_MODE mode;
- int best_rd = INT_MAX; // 1<<30
+ int best_rd = INT_MAX;
int rate;
int distortion;
@@ -214,8 +214,9 @@ static int pick_intra4x4mby_modes
distortion += d;
mic->bmi[i].as_mode = best_mode;
- // Break out case where we have already exceeded best so far value
- // that was passed in
+ /* Break out case where we have already exceeded best so far value
+ * that was passed in
+ */
if (distortion > *best_dist)
break;
}
@@ -408,7 +409,6 @@ void get_lower_res_motion_info(VP8_COMP *cpi, MACROBLOCKD *xd, int *dissim,
LOWER_RES_MB_INFO* store_mode_info
= ((LOWER_RES_FRAME_INFO*)cpi->oxcf.mr_low_res_mode_info)->mb_info;
unsigned int parent_mb_index;
- //unsigned int parent_mb_index = map_640x480_to_320x240[mb_row][mb_col];
/* Consider different down_sampling_factor. */
{
@@ -440,7 +440,6 @@ void get_lower_res_motion_info(VP8_COMP *cpi, MACROBLOCKD *xd, int *dissim,
/* Consider different down_sampling_factor.
* The result can be rounded to be more precise, but it takes more time.
*/
- //int round = cpi->oxcf.mr_down_sampling_factor.den/2;
(*parent_ref_mv).as_mv.row = store_mode_info[parent_mb_index].mv.as_mv.row
*cpi->oxcf.mr_down_sampling_factor.num
/cpi->oxcf.mr_down_sampling_factor.den;
@@ -457,7 +456,7 @@ static void check_for_encode_breakout(unsigned int sse, MACROBLOCK* x)
{
if (sse < x->encode_breakout)
{
- // Check u and v to make sure skip is ok
+ /* Check u and v to make sure skip is ok */
unsigned int sse2 = 0;
sse2 = VP8_UVSSE(x);
@@ -513,7 +512,7 @@ void vp8_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset,
MB_PREDICTION_MODE this_mode;
int num00;
int mdcounts[4];
- int best_rd = INT_MAX; // 1 << 30;
+ int best_rd = INT_MAX;
int best_intra_rd = INT_MAX;
int mode_index;
int rate;
@@ -530,7 +529,8 @@ void vp8_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset,
int near_sadidx[8] = {0, 1, 2, 3, 4, 5, 6, 7};
int saddone=0;
- int sr=0; //search range got from mv_pred(). It uses step_param levels. (0-7)
+ /* search range got from mv_pred(). It uses step_param levels. (0-7) */
+ int sr=0;
unsigned char *plane[4][3];
int ref_frame_map[4];
@@ -574,15 +574,17 @@ void vp8_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset,
get_predictor_pointers(cpi, plane, recon_yoffset, recon_uvoffset);
- cpi->mbs_tested_so_far++; // Count of the number of MBs tested so far this frame
+ /* Count of the number of MBs tested so far this frame */
+ cpi->mbs_tested_so_far++;
*returnintra = INT_MAX;
x->skip = 0;
x->e_mbd.mode_info_context->mbmi.ref_frame = INTRA_FRAME;
- // if we encode a new mv this is important
- // find the best new motion vector
+ /* if we encode a new mv this is important
+ * find the best new motion vector
+ */
for (mode_index = 0; mode_index < MAX_MODES; mode_index++)
{
int frame_cost;
@@ -613,7 +615,7 @@ void vp8_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset,
}
#endif
- // everything but intra
+ /* everything but intra */
if (x->e_mbd.mode_info_context->mbmi.ref_frame)
{
x->e_mbd.pre.y_buffer = plane[this_ref_frame][0];
@@ -638,7 +640,7 @@ void vp8_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset,
continue;
if (vp8_mode_order[mode_index] == NEWMV && parent_mode == ZEROMV
- && best_ref_mv.as_int==0) //&& dissim==0
+ && best_ref_mv.as_int==0)
continue;
else if(vp8_mode_order[mode_index] == NEWMV && dissim==0
&& best_ref_mv.as_int==parent_ref_mv.as_int)
@@ -728,7 +730,7 @@ void vp8_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset,
case SPLITMV:
- // Split MV modes currently not supported when RD is nopt enabled.
+ /* Split MV modes currently not supported when RD is not enabled. */
break;
case DC_PRED:
@@ -777,13 +779,15 @@ void vp8_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset,
int speed_adjust = (cpi->Speed > 5) ? ((cpi->Speed >= 8)? 3 : 2) : 1;
- // Further step/diamond searches as necessary
+ /* Further step/diamond searches as necessary */
step_param = cpi->sf.first_step + speed_adjust;
#if CONFIG_MULTI_RES_ENCODING
if (cpi->oxcf.mr_encoder_id)
{
- // Use parent MV as predictor. Adjust search range accordingly.
+ /* Use parent MV as predictor. Adjust search range
+ * accordingly.
+ */
mvp.as_int = parent_ref_mv.as_int;
mvp_full.as_mv.col = parent_ref_mv.as_mv.col>>3;
mvp_full.as_mv.row = parent_ref_mv.as_mv.row>>3;
@@ -808,7 +812,7 @@ void vp8_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset,
&near_sadidx[0]);
sr += speed_adjust;
- //adjust search range according to sr from mv prediction
+ /* adjust search range according to sr from mv prediction */
if(sr > step_param)
step_param = sr;
@@ -877,10 +881,7 @@ void vp8_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset,
x->mvcost, &best_ref_mv);
mode_mv[NEWMV].as_int = d->bmi.mv.as_int;
- // Further step/diamond searches as necessary
- n = 0;
- //further_steps = (cpi->sf.max_step_search_steps - 1) - step_param;
-
+ /* Further step/diamond searches as necessary */
n = num00;
num00 = 0;
@@ -927,7 +928,7 @@ void vp8_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset,
mode_mv[NEWMV].as_int = d->bmi.mv.as_int;
- // mv cost;
+ /* mv cost; */
rate2 += vp8_mv_bit_cost(&mode_mv[NEWMV], &best_ref_mv,
cpi->mb.mvcost, 128);
}
@@ -965,7 +966,7 @@ void vp8_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset,
if (cpi->oxcf.noise_sensitivity)
{
- // Store for later use by denoiser.
+ /* Store for later use by denoiser. */
if (this_mode == ZEROMV && sse < zero_mv_sse )
{
zero_mv_sse = sse;
@@ -973,7 +974,7 @@ void vp8_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset,
x->e_mbd.mode_info_context->mbmi.ref_frame;
}
- // Store the best NEWMV in x for later use in the denoiser.
+ /* Store the best NEWMV in x for later use in the denoiser. */
if (x->e_mbd.mode_info_context->mbmi.mode == NEWMV &&
sse < best_sse)
{
@@ -990,7 +991,7 @@ void vp8_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset,
if (this_rd < best_rd || x->skip)
{
- // Note index of best mode
+ /* Note index of best mode */
best_mode_index = mode_index;
*returnrate = rate2;
@@ -1030,7 +1031,7 @@ void vp8_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset,
break;
}
- // Reduce the activation RD thresholds for the best choice mode
+ /* Reduce the activation RD thresholds for the best choice mode */
if ((cpi->rd_baseline_thresh[best_mode_index] > 0) && (cpi->rd_baseline_thresh[best_mode_index] < (INT_MAX >> 2)))
{
int best_adjustment = (cpi->rd_thresh_mult[best_mode_index] >> 3);
@@ -1062,7 +1063,7 @@ void vp8_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset,
{
if (x->best_sse_inter_mode == DC_PRED)
{
- // No best MV found.
+ /* No best MV found. */
x->best_sse_inter_mode = best_mbmode.mode;
x->best_sse_mv = best_mbmode.mv;
x->need_to_clamp_best_mvs = best_mbmode.need_to_clamp_mvs;
@@ -1073,7 +1074,7 @@ void vp8_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset,
recon_yoffset, recon_uvoffset);
- // Reevaluate ZEROMV after denoising.
+ /* Reevaluate ZEROMV after denoising. */
if (best_mbmode.ref_frame == INTRA_FRAME &&
x->best_zeromv_reference_frame != INTRA_FRAME)
{
@@ -1083,7 +1084,7 @@ void vp8_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset,
vp8_cost_mv_ref(ZEROMV, mdcounts);
distortion2 = 0;
- // set up the proper prediction buffers for the frame
+ /* set up the proper prediction buffers for the frame */
x->e_mbd.mode_info_context->mbmi.ref_frame = this_ref_frame;
x->e_mbd.pre.y_buffer = plane[this_ref_frame][0];
x->e_mbd.pre.u_buffer = plane[this_ref_frame][1];
diff --git a/vp8/encoder/picklpf.c b/vp8/encoder/picklpf.c
index 21af45a0f..4121349a9 100644
--- a/vp8/encoder/picklpf.c
+++ b/vp8/encoder/picklpf.c
@@ -74,7 +74,9 @@ static int calc_partial_ssl_err(YV12_BUFFER_CONFIG *source,
src += srcoffset;
dst += dstoffset;
- // Loop through the Y plane raw and reconstruction data summing (square differences)
+ /* Loop through the Y plane raw and reconstruction data summing
+ * (square differences)
+ */
for (i = 0; i < linestocopy; i += 16)
{
for (j = 0; j < source->y_width; j += 16)
@@ -92,7 +94,7 @@ static int calc_partial_ssl_err(YV12_BUFFER_CONFIG *source,
return Total;
}
-// Enforce a minimum filter level based upon baseline Q
+/* Enforce a minimum filter level based upon baseline Q */
static int get_min_filter_level(VP8_COMP *cpi, int base_qindex)
{
int min_filter_level;
@@ -113,14 +115,15 @@ static int get_min_filter_level(VP8_COMP *cpi, int base_qindex)
return min_filter_level;
}
-// Enforce a maximum filter level based upon baseline Q
+/* Enforce a maximum filter level based upon baseline Q */
static int get_max_filter_level(VP8_COMP *cpi, int base_qindex)
{
- // PGW August 2006: Highest filter values almost always a bad idea
+ /* PGW August 2006: Highest filter values almost always a bad idea */
- // jbb chg: 20100118 - not so any more with this overquant stuff allow high values
- // with lots of intra coming in.
- int max_filter_level = MAX_LOOP_FILTER ;//* 3 / 4;
+ /* jbb chg: 20100118 - not so any more with this overquant stuff allow
+ * high values with lots of intra coming in.
+ */
+ int max_filter_level = MAX_LOOP_FILTER;
(void)base_qindex;
if (cpi->twopass.section_intra_rating > 8)
@@ -155,7 +158,9 @@ void vp8cx_pick_filter_level_fast(YV12_BUFFER_CONFIG *sd, VP8_COMP *cpi)
cm->last_sharpness_level = cm->sharpness_level;
}
- // Start the search at the previous frame filter level unless it is now out of range.
+ /* Start the search at the previous frame filter level unless it is
+ * now out of range.
+ */
if (cm->filter_level < min_filter_level)
cm->filter_level = min_filter_level;
else if (cm->filter_level > max_filter_level)
@@ -164,7 +169,7 @@ void vp8cx_pick_filter_level_fast(YV12_BUFFER_CONFIG *sd, VP8_COMP *cpi)
filt_val = cm->filter_level;
best_filt_val = filt_val;
- // Get the err using the previous frame's filter value.
+ /* Get the err using the previous frame's filter value. */
/* Copy the unfiltered / processed recon buffer to the new buffer */
vp8_yv12_copy_partial_frame(saved_frame, cm->frame_to_show);
@@ -174,17 +179,17 @@ void vp8cx_pick_filter_level_fast(YV12_BUFFER_CONFIG *sd, VP8_COMP *cpi)
filt_val -= 1 + (filt_val > 10);
- // Search lower filter levels
+ /* Search lower filter levels */
while (filt_val >= min_filter_level)
{
- // Apply the loop filter
+ /* Apply the loop filter */
vp8_yv12_copy_partial_frame(saved_frame, cm->frame_to_show);
vp8_loop_filter_partial_frame(cm, &cpi->mb.e_mbd, filt_val);
- // Get the err for filtered frame
+ /* Get the err for filtered frame */
filt_err = calc_partial_ssl_err(sd, cm->frame_to_show);
- // Update the best case record or exit loop.
+ /* Update the best case record or exit loop. */
if (filt_err < best_err)
{
best_err = filt_err;
@@ -193,32 +198,34 @@ void vp8cx_pick_filter_level_fast(YV12_BUFFER_CONFIG *sd, VP8_COMP *cpi)
else
break;
- // Adjust filter level
+ /* Adjust filter level */
filt_val -= 1 + (filt_val > 10);
}
- // Search up (note that we have already done filt_val = cm->filter_level)
+ /* Search up (note that we have already done filt_val = cm->filter_level) */
filt_val = cm->filter_level + 1 + (filt_val > 10);
if (best_filt_val == cm->filter_level)
{
- // Resist raising filter level for very small gains
+ /* Resist raising filter level for very small gains */
best_err -= (best_err >> 10);
while (filt_val < max_filter_level)
{
- // Apply the loop filter
+ /* Apply the loop filter */
vp8_yv12_copy_partial_frame(saved_frame, cm->frame_to_show);
vp8_loop_filter_partial_frame(cm, &cpi->mb.e_mbd, filt_val);
- // Get the err for filtered frame
+ /* Get the err for filtered frame */
filt_err = calc_partial_ssl_err(sd, cm->frame_to_show);
- // Update the best case record or exit loop.
+ /* Update the best case record or exit loop. */
if (filt_err < best_err)
{
- // Do not raise filter level if improvement is < 1 part in 4096
+ /* Do not raise filter level if improvement is < 1 part
+ * in 4096
+ */
best_err = filt_err - (filt_err >> 10);
best_filt_val = filt_val;
@@ -226,7 +233,7 @@ void vp8cx_pick_filter_level_fast(YV12_BUFFER_CONFIG *sd, VP8_COMP *cpi)
else
break;
- // Adjust filter level
+ /* Adjust filter level */
filt_val += 1 + (filt_val > 10);
}
}
@@ -243,7 +250,7 @@ void vp8cx_pick_filter_level_fast(YV12_BUFFER_CONFIG *sd, VP8_COMP *cpi)
cm->frame_to_show = saved_frame;
}
-// Stub function for now Alt LF not used
+/* Stub function for now Alt LF not used */
void vp8cx_set_alt_lf_level(VP8_COMP *cpi, int filt_val)
{
MACROBLOCKD *mbd = &cpi->mb.e_mbd;
@@ -266,12 +273,14 @@ void vp8cx_pick_filter_level(YV12_BUFFER_CONFIG *sd, VP8_COMP *cpi)
int filter_step;
int filt_high = 0;
- int filt_mid = cm->filter_level; // Start search at previous frame filter level
+ /* Start search at previous frame filter level */
+ int filt_mid = cm->filter_level;
int filt_low = 0;
int filt_best;
int filt_direction = 0;
- int Bias = 0; // Bias against raising loop filter and in favor of lowering it
+ /* Bias against raising loop filter and in favor of lowering it */
+ int Bias = 0;
int ss_err[MAX_LOOP_FILTER + 1];
@@ -287,7 +296,9 @@ void vp8cx_pick_filter_level(YV12_BUFFER_CONFIG *sd, VP8_COMP *cpi)
else
cm->sharpness_level = cpi->oxcf.Sharpness;
- // Start the search at the previous frame filter level unless it is now out of range.
+ /* Start the search at the previous frame filter level unless it is
+ * now out of range.
+ */
filt_mid = cm->filter_level;
if (filt_mid < min_filter_level)
@@ -295,10 +306,10 @@ void vp8cx_pick_filter_level(YV12_BUFFER_CONFIG *sd, VP8_COMP *cpi)
else if (filt_mid > max_filter_level)
filt_mid = max_filter_level;
- // Define the initial step size
+ /* Define the initial step size */
filter_step = (filt_mid < 16) ? 4 : filt_mid / 4;
- // Get baseline error score
+ /* Get baseline error score */
/* Copy the unfiltered / processed recon buffer to the new buffer */
vp8_yv12_copy_y(saved_frame, cm->frame_to_show);
@@ -314,9 +325,8 @@ void vp8cx_pick_filter_level(YV12_BUFFER_CONFIG *sd, VP8_COMP *cpi)
while (filter_step > 0)
{
- Bias = (best_err >> (15 - (filt_mid / 8))) * filter_step; //PGW change 12/12/06 for small images
+ Bias = (best_err >> (15 - (filt_mid / 8))) * filter_step;
- // jbb chg: 20100118 - in sections with lots of new material coming in don't bias as much to a low filter value
if (cpi->twopass.section_intra_rating < 20)
Bias = Bias * cpi->twopass.section_intra_rating / 20;
@@ -327,7 +337,7 @@ void vp8cx_pick_filter_level(YV12_BUFFER_CONFIG *sd, VP8_COMP *cpi)
{
if(ss_err[filt_low] == 0)
{
- // Get Low filter error score
+ /* Get Low filter error score */
vp8_yv12_copy_y(saved_frame, cm->frame_to_show);
vp8cx_set_alt_lf_level(cpi, filt_low);
vp8_loop_filter_frame_yonly(cm, &cpi->mb.e_mbd, filt_low);
@@ -338,10 +348,12 @@ void vp8cx_pick_filter_level(YV12_BUFFER_CONFIG *sd, VP8_COMP *cpi)
else
filt_err = ss_err[filt_low];
- // If value is close to the best so far then bias towards a lower loop filter value.
+ /* If value is close to the best so far then bias towards a
+ * lower loop filter value.
+ */
if ((filt_err - Bias) < best_err)
{
- // Was it actually better than the previous best?
+ /* Was it actually better than the previous best? */
if (filt_err < best_err)
best_err = filt_err;
@@ -349,7 +361,7 @@ void vp8cx_pick_filter_level(YV12_BUFFER_CONFIG *sd, VP8_COMP *cpi)
}
}
- // Now look at filt_high
+ /* Now look at filt_high */
if ((filt_direction >= 0) && (filt_high != filt_mid))
{
if(ss_err[filt_high] == 0)
@@ -364,7 +376,7 @@ void vp8cx_pick_filter_level(YV12_BUFFER_CONFIG *sd, VP8_COMP *cpi)
else
filt_err = ss_err[filt_high];
- // Was it better than the previous best?
+ /* Was it better than the previous best? */
if (filt_err < (best_err - Bias))
{
best_err = filt_err;
@@ -372,7 +384,9 @@ void vp8cx_pick_filter_level(YV12_BUFFER_CONFIG *sd, VP8_COMP *cpi)
}
}
- // Half the step distance if the best filter value was the same as last time
+ /* Half the step distance if the best filter value was the same
+ * as last time
+ */
if (filt_best == filt_mid)
{
filter_step = filter_step / 2;
diff --git a/vp8/encoder/psnr.c b/vp8/encoder/psnr.c
index 5119bb8aa..5bb49ad26 100644
--- a/vp8/encoder/psnr.c
+++ b/vp8/encoder/psnr.c
@@ -22,7 +22,7 @@ double vp8_mse2psnr(double Samples, double Peak, double Mse)
if ((double)Mse > 0.0)
psnr = 10.0 * log10(Peak * Peak * Samples / Mse);
else
- psnr = MAX_PSNR; // Limit to prevent / 0
+ psnr = MAX_PSNR; /* Limit to prevent / 0 */
if (psnr > MAX_PSNR)
psnr = MAX_PSNR;
diff --git a/vp8/encoder/quantize.c b/vp8/encoder/quantize.c
index 766d2b257..88fea11bb 100644
--- a/vp8/encoder/quantize.c
+++ b/vp8/encoder/quantize.c
@@ -44,21 +44,21 @@ void vp8_fast_quantize_b_c(BLOCK *b, BLOCKD *d)
z = coeff_ptr[rc];
zbin = zbin_ptr[rc] ;
- sz = (z >> 31); // sign of z
- x = (z ^ sz) - sz; // x = abs(z)
+ sz = (z >> 31); /* sign of z */
+ x = (z ^ sz) - sz; /* x = abs(z) */
if (x >= zbin)
{
x += round_ptr[rc];
y = (((x * quant_ptr[rc]) >> 16) + x)
- >> quant_shift_ptr[rc]; // quantize (x)
- x = (y ^ sz) - sz; // get the sign back
- qcoeff_ptr[rc] = x; // write to destination
- dqcoeff_ptr[rc] = x * dequant_ptr[rc]; // dequantized value
+ >> quant_shift_ptr[rc]; /* quantize (x) */
+ x = (y ^ sz) - sz; /* get the sign back */
+ qcoeff_ptr[rc] = x; /* write to destination */
+ dqcoeff_ptr[rc] = x * dequant_ptr[rc]; /* dequantized value */
if (y)
{
- eob = i; // last nonzero coeffs
+ eob = i; /* last nonzero coeffs */
}
}
}
@@ -84,17 +84,17 @@ void vp8_fast_quantize_b_c(BLOCK *b, BLOCKD *d)
rc = vp8_default_zig_zag1d[i];
z = coeff_ptr[rc];
- sz = (z >> 31); // sign of z
- x = (z ^ sz) - sz; // x = abs(z)
+ sz = (z >> 31); /* sign of z */
+ x = (z ^ sz) - sz; /* x = abs(z) */
- y = ((x + round_ptr[rc]) * quant_ptr[rc]) >> 16; // quantize (x)
- x = (y ^ sz) - sz; // get the sign back
- qcoeff_ptr[rc] = x; // write to destination
- dqcoeff_ptr[rc] = x * dequant_ptr[rc]; // dequantized value
+ y = ((x + round_ptr[rc]) * quant_ptr[rc]) >> 16; /* quantize (x) */
+ x = (y ^ sz) - sz; /* get the sign back */
+ qcoeff_ptr[rc] = x; /* write to destination */
+ dqcoeff_ptr[rc] = x * dequant_ptr[rc]; /* dequantized value */
if (y)
{
- eob = i; // last nonzero coeffs
+ eob = i; /* last nonzero coeffs */
}
}
*d->eob = (char)(eob + 1);
@@ -132,22 +132,22 @@ void vp8_regular_quantize_b_c(BLOCK *b, BLOCKD *d)
zbin = zbin_ptr[rc] + *zbin_boost_ptr + zbin_oq_value;
zbin_boost_ptr ++;
- sz = (z >> 31); // sign of z
- x = (z ^ sz) - sz; // x = abs(z)
+ sz = (z >> 31); /* sign of z */
+ x = (z ^ sz) - sz; /* x = abs(z) */
if (x >= zbin)
{
x += round_ptr[rc];
y = (((x * quant_ptr[rc]) >> 16) + x)
- >> quant_shift_ptr[rc]; // quantize (x)
- x = (y ^ sz) - sz; // get the sign back
- qcoeff_ptr[rc] = x; // write to destination
- dqcoeff_ptr[rc] = x * dequant_ptr[rc]; // dequantized value
+ >> quant_shift_ptr[rc]; /* quantize (x) */
+ x = (y ^ sz) - sz; /* get the sign back */
+ qcoeff_ptr[rc] = x; /* write to destination */
+ dqcoeff_ptr[rc] = x * dequant_ptr[rc]; /* dequantized value */
if (y)
{
- eob = i; // last nonzero coeffs
- zbin_boost_ptr = b->zrun_zbin_boost; // reset zero runlength
+ eob = i; /* last nonzero coeffs */
+ zbin_boost_ptr = b->zrun_zbin_boost; /* reset zero runlength */
}
}
}
@@ -240,26 +240,23 @@ void vp8_regular_quantize_b_c(BLOCK *b, BLOCKD *d)
rc = vp8_default_zig_zag1d[i];
z = coeff_ptr[rc];
- //if ( i == 0 )
- // zbin = zbin_ptr[rc] + *zbin_boost_ptr + zbin_oq_value/2;
- //else
zbin = zbin_ptr[rc] + *zbin_boost_ptr + zbin_oq_value;
zbin_boost_ptr ++;
- sz = (z >> 31); // sign of z
- x = (z ^ sz) - sz; // x = abs(z)
+ sz = (z >> 31); /* sign of z */
+ x = (z ^ sz) - sz; /* x = abs(z) */
if (x >= zbin)
{
- y = ((x + round_ptr[rc]) * quant_ptr[rc]) >> 16; // quantize (x)
- x = (y ^ sz) - sz; // get the sign back
- qcoeff_ptr[rc] = x; // write to destination
- dqcoeff_ptr[rc] = x * dequant_ptr[rc]; // dequantized value
+ y = ((x + round_ptr[rc]) * quant_ptr[rc]) >> 16; /* quantize (x) */
+ x = (y ^ sz) - sz; /* get the sign back */
+ qcoeff_ptr[rc] = x; /* write to destination */
+ dqcoeff_ptr[rc] = x * dequant_ptr[rc]; /* dequantized value */
if (y)
{
- eob = i; // last nonzero coeffs
- zbin_boost_ptr = &b->zrun_zbin_boost[0]; // reset zero runlength
+ eob = i; /* last nonzero coeffs */
+ zbin_boost_ptr = &b->zrun_zbin_boost[0]; /* reset zrl */
}
}
}
@@ -441,7 +438,7 @@ void vp8cx_init_quantizer(VP8_COMP *cpi)
for (Q = 0; Q < QINDEX_RANGE; Q++)
{
- // dc values
+ /* dc values */
quant_val = vp8_dc_quant(Q, cpi->common.y1dc_delta_q);
cpi->Y1quant_fast[Q][0] = (1 << 16) / quant_val;
invert_quant(cpi->sf.improved_quant, cpi->Y1quant[Q] + 0,
@@ -469,7 +466,7 @@ void vp8cx_init_quantizer(VP8_COMP *cpi)
cpi->common.UVdequant[Q][0] = quant_val;
cpi->zrun_zbin_boost_uv[Q][0] = (quant_val * zbin_boost[0]) >> 7;
- // all the ac values = ;
+ /* all the ac values = ; */
quant_val = vp8_ac_yquant(Q);
cpi->Y1quant_fast[Q][1] = (1 << 16) / quant_val;
invert_quant(cpi->sf.improved_quant, cpi->Y1quant[Q] + 1,
@@ -536,7 +533,7 @@ void vp8cx_init_quantizer(VP8_COMP *cpi)
for (Q = 0; Q < QINDEX_RANGE; Q++)
{
- // dc values
+ /* dc values */
quant_val = vp8_dc_quant(Q, cpi->common.y1dc_delta_q);
cpi->Y1quant[Q][0] = (1 << 16) / quant_val;
cpi->Y1zbin[Q][0] = ((qzbin_factors[Q] * quant_val) + 64) >> 7;
@@ -558,7 +555,7 @@ void vp8cx_init_quantizer(VP8_COMP *cpi)
cpi->common.UVdequant[Q][0] = quant_val;
cpi->zrun_zbin_boost_uv[Q][0] = (quant_val * zbin_boost[0]) >> 7;
- // all the ac values = ;
+ /* all the ac values = ; */
for (i = 1; i < 16; i++)
{
int rc = vp8_default_zig_zag1d[i];
@@ -613,18 +610,18 @@ void vp8cx_mb_init_quantizer(VP8_COMP *cpi, MACROBLOCK *x, int ok_to_skip)
MACROBLOCKD *xd = &x->e_mbd;
int zbin_extra;
- // Select the baseline MB Q index.
+ /* Select the baseline MB Q index. */
if (xd->segmentation_enabled)
{
- // Abs Value
+ /* Abs Value */
if (xd->mb_segement_abs_delta == SEGMENT_ABSDATA)
-
QIndex = xd->segment_feature_data[MB_LVL_ALT_Q][xd->mode_info_context->mbmi.segment_id];
- // Delta Value
+ /* Delta Value */
else
{
QIndex = cpi->common.base_qindex + xd->segment_feature_data[MB_LVL_ALT_Q][xd->mode_info_context->mbmi.segment_id];
- QIndex = (QIndex >= 0) ? ((QIndex <= MAXQ) ? QIndex : MAXQ) : 0; // Clamp to valid range
+ /* Clamp to valid range */
+ QIndex = (QIndex >= 0) ? ((QIndex <= MAXQ) ? QIndex : MAXQ) : 0;
}
}
else
@@ -657,13 +654,13 @@ void vp8cx_mb_init_quantizer(VP8_COMP *cpi, MACROBLOCK *x, int ok_to_skip)
* This will also require modifications to the x86 and neon assembly.
* */
for (i = 0; i < 16; i++)
- x->e_mbd.block[i].dequant = xd->dequant_y1; //cpi->common.Y1dequant[QIndex];
+ x->e_mbd.block[i].dequant = xd->dequant_y1;
for (i = 16; i < 24; i++)
- x->e_mbd.block[i].dequant = xd->dequant_uv; //cpi->common.UVdequant[QIndex];
- x->e_mbd.block[24].dequant = xd->dequant_y2; //cpi->common.Y2dequant[QIndex];
+ x->e_mbd.block[i].dequant = xd->dequant_uv;
+ x->e_mbd.block[24].dequant = xd->dequant_y2;
#endif
- // Y
+ /* Y */
zbin_extra = ZBIN_EXTRA_Y;
for (i = 0; i < 16; i++)
@@ -677,7 +674,7 @@ void vp8cx_mb_init_quantizer(VP8_COMP *cpi, MACROBLOCK *x, int ok_to_skip)
x->block[i].zbin_extra = (short)zbin_extra;
}
- // UV
+ /* UV */
zbin_extra = ZBIN_EXTRA_UV;
for (i = 16; i < 24; i++)
@@ -691,7 +688,7 @@ void vp8cx_mb_init_quantizer(VP8_COMP *cpi, MACROBLOCK *x, int ok_to_skip)
x->block[i].zbin_extra = (short)zbin_extra;
}
- // Y2
+ /* Y2 */
zbin_extra = ZBIN_EXTRA_Y2;
x->block[24].quant_fast = cpi->Y2quant_fast[QIndex];
@@ -716,19 +713,19 @@ void vp8cx_mb_init_quantizer(VP8_COMP *cpi, MACROBLOCK *x, int ok_to_skip)
|| cpi->last_zbin_mode_boost != cpi->zbin_mode_boost
|| x->last_act_zbin_adj != x->act_zbin_adj)
{
- // Y
+ /* Y */
zbin_extra = ZBIN_EXTRA_Y;
for (i = 0; i < 16; i++)
x->block[i].zbin_extra = (short)zbin_extra;
- // UV
+ /* UV */
zbin_extra = ZBIN_EXTRA_UV;
for (i = 16; i < 24; i++)
x->block[i].zbin_extra = (short)zbin_extra;
- // Y2
+ /* Y2 */
zbin_extra = ZBIN_EXTRA_Y2;
x->block[24].zbin_extra = (short)zbin_extra;
@@ -744,19 +741,19 @@ void vp8_update_zbin_extra(VP8_COMP *cpi, MACROBLOCK *x)
int QIndex = x->q_index;
int zbin_extra;
- // Y
+ /* Y */
zbin_extra = ZBIN_EXTRA_Y;
for (i = 0; i < 16; i++)
x->block[i].zbin_extra = (short)zbin_extra;
- // UV
+ /* UV */
zbin_extra = ZBIN_EXTRA_UV;
for (i = 16; i < 24; i++)
x->block[i].zbin_extra = (short)zbin_extra;
- // Y2
+ /* Y2 */
zbin_extra = ZBIN_EXTRA_Y2;
x->block[24].zbin_extra = (short)zbin_extra;
}
@@ -766,10 +763,10 @@ void vp8_update_zbin_extra(VP8_COMP *cpi, MACROBLOCK *x)
void vp8cx_frame_init_quantizer(VP8_COMP *cpi)
{
- // Clear Zbin mode boost for default case
+ /* Clear Zbin mode boost for default case */
cpi->zbin_mode_boost = 0;
- // MB level quantizer setup
+ /* MB level quantizer setup */
vp8cx_mb_init_quantizer(cpi, &cpi->mb, 0);
}
@@ -801,7 +798,7 @@ void vp8_set_quantizer(struct VP8_COMP *cpi, int Q)
cm->y2dc_delta_q = new_delta_q;
- // Set Segment specific quatizers
+ /* Set Segment specific quatizers */
mbd->segment_feature_data[MB_LVL_ALT_Q][0] = cpi->segment_feature_data[MB_LVL_ALT_Q][0];
mbd->segment_feature_data[MB_LVL_ALT_Q][1] = cpi->segment_feature_data[MB_LVL_ALT_Q][1];
mbd->segment_feature_data[MB_LVL_ALT_Q][2] = cpi->segment_feature_data[MB_LVL_ALT_Q][2];
diff --git a/vp8/encoder/ratectrl.c b/vp8/encoder/ratectrl.c
index f6baf4c7e..500c1518b 100644
--- a/vp8/encoder/ratectrl.c
+++ b/vp8/encoder/ratectrl.c
@@ -41,15 +41,16 @@ extern int inter_uv_modes[4];
extern int inter_b_modes[10];
#endif
-// Bits Per MB at different Q (Multiplied by 512)
+/* Bits Per MB at different Q (Multiplied by 512) */
#define BPER_MB_NORMBITS 9
-// Work in progress recalibration of baseline rate tables based on
-// the assumption that bits per mb is inversely proportional to the
-// quantizer value.
+/* Work in progress recalibration of baseline rate tables based on
+ * the assumption that bits per mb is inversely proportional to the
+ * quantizer value.
+ */
const int vp8_bits_per_mb[2][QINDEX_RANGE] =
{
- // Intra case 450000/Qintra
+ /* Intra case 450000/Qintra */
{
1125000,900000, 750000, 642857, 562500, 500000, 450000, 450000,
409090, 375000, 346153, 321428, 300000, 281250, 264705, 264705,
@@ -68,7 +69,7 @@ const int vp8_bits_per_mb[2][QINDEX_RANGE] =
36885, 36290, 35714, 35156, 34615, 34090, 33582, 33088,
32608, 32142, 31468, 31034, 30405, 29801, 29220, 28662,
},
- // Inter case 285000/Qinter
+ /* Inter case 285000/Qinter */
{
712500, 570000, 475000, 407142, 356250, 316666, 285000, 259090,
237500, 219230, 203571, 190000, 178125, 167647, 158333, 150000,
@@ -109,7 +110,7 @@ static const int kf_boost_qadjustment[QINDEX_RANGE] =
220, 220, 220, 220, 220, 220, 220, 220,
};
-//#define GFQ_ADJUSTMENT (Q+100)
+/* #define GFQ_ADJUSTMENT (Q+100) */
#define GFQ_ADJUSTMENT vp8_gf_boost_qadjustment[Q]
const int vp8_gf_boost_qadjustment[QINDEX_RANGE] =
{
@@ -173,7 +174,7 @@ static const int kf_gf_boost_qlimits[QINDEX_RANGE] =
600, 600, 600, 600, 600, 600, 600, 600,
};
-// % adjustment to target kf size based on seperation from previous frame
+/* % adjustment to target kf size based on seperation from previous frame */
static const int kf_boost_seperation_adjustment[16] =
{
30, 40, 50, 55, 60, 65, 70, 75,
@@ -224,10 +225,11 @@ void vp8_save_coding_context(VP8_COMP *cpi)
{
CODING_CONTEXT *const cc = & cpi->coding_context;
- // Stores a snapshot of key state variables which can subsequently be
- // restored with a call to vp8_restore_coding_context. These functions are
- // intended for use in a re-code loop in vp8_compress_frame where the
- // quantizer value is adjusted between loop iterations.
+ /* Stores a snapshot of key state variables which can subsequently be
+ * restored with a call to vp8_restore_coding_context. These functions are
+ * intended for use in a re-code loop in vp8_compress_frame where the
+ * quantizer value is adjusted between loop iterations.
+ */
cc->frames_since_key = cpi->frames_since_key;
cc->filter_level = cpi->common.filter_level;
@@ -244,7 +246,7 @@ void vp8_save_coding_context(VP8_COMP *cpi)
vp8_copy(cc->uv_mode_count, cpi->uv_mode_count);
- // Stats
+ /* Stats */
#ifdef MODE_STATS
vp8_copy(cc->y_modes, y_modes);
vp8_copy(cc->uv_modes, uv_modes);
@@ -262,8 +264,9 @@ void vp8_restore_coding_context(VP8_COMP *cpi)
{
CODING_CONTEXT *const cc = & cpi->coding_context;
- // Restore key state variables to the snapshot state stored in the
- // previous call to vp8_save_coding_context.
+ /* Restore key state variables to the snapshot state stored in the
+ * previous call to vp8_save_coding_context.
+ */
cpi->frames_since_key = cc->frames_since_key;
cpi->common.filter_level = cc->filter_level;
@@ -280,7 +283,7 @@ void vp8_restore_coding_context(VP8_COMP *cpi)
vp8_copy(cpi->ymode_count, cc->ymode_count);
vp8_copy(cpi->uv_mode_count, cc->uv_mode_count);
- // Stats
+ /* Stats */
#ifdef MODE_STATS
vp8_copy(y_modes, cc->y_modes);
vp8_copy(uv_modes, cc->uv_modes);
@@ -297,7 +300,7 @@ void vp8_restore_coding_context(VP8_COMP *cpi)
void vp8_setup_key_frame(VP8_COMP *cpi)
{
- // Setup for Key frame:
+ /* Setup for Key frame: */
vp8_default_coef_probs(& cpi->common);
@@ -307,20 +310,20 @@ void vp8_setup_key_frame(VP8_COMP *cpi)
vp8_build_component_cost_table(cpi->mb.mvcost, (const MV_CONTEXT *) cpi->common.fc.mvc, flag);
}
- vpx_memset(cpi->common.fc.pre_mvc, 0, sizeof(cpi->common.fc.pre_mvc)); //initialize pre_mvc to all zero.
+ /* initialize pre_mvc to all zero. */
+ vpx_memset(cpi->common.fc.pre_mvc, 0, sizeof(cpi->common.fc.pre_mvc));
- // Make sure we initialize separate contexts for altref,gold, and normal.
- // TODO shouldn't need 3 different copies of structure to do this!
+ /* Make sure we initialize separate contexts for altref,gold, and normal.
+ * TODO shouldn't need 3 different copies of structure to do this!
+ */
vpx_memcpy(&cpi->lfc_a, &cpi->common.fc, sizeof(cpi->common.fc));
vpx_memcpy(&cpi->lfc_g, &cpi->common.fc, sizeof(cpi->common.fc));
vpx_memcpy(&cpi->lfc_n, &cpi->common.fc, sizeof(cpi->common.fc));
- //cpi->common.filter_level = 0; // Reset every key frame.
cpi->common.filter_level = cpi->common.base_qindex * 3 / 8 ;
- // Provisional interval before next GF
+ /* Provisional interval before next GF */
if (cpi->auto_gold)
- //cpi->frames_till_gf_update_due = DEFAULT_GF_INTERVAL;
cpi->frames_till_gf_update_due = cpi->baseline_gf_interval;
else
cpi->frames_till_gf_update_due = cpi->goldfreq;
@@ -348,12 +351,12 @@ static int estimate_bits_at_q(int frame_kind, int Q, int MBs,
static void calc_iframe_target_size(VP8_COMP *cpi)
{
- // boost defaults to half second
+ /* boost defaults to half second */
int kf_boost;
unsigned int target;
- // Clear down mmx registers to allow floating point in what follows
- vp8_clear_system_state(); //__asm emms;
+ /* Clear down mmx registers to allow floating point in what follows */
+ vp8_clear_system_state();
if (cpi->oxcf.fixed_q >= 0)
{
@@ -364,10 +367,10 @@ static void calc_iframe_target_size(VP8_COMP *cpi)
}
else if (cpi->pass == 2)
{
- // New Two pass RC
+ /* New Two pass RC */
target = cpi->per_frame_bandwidth;
}
- // First Frame is a special case
+ /* First Frame is a special case */
else if (cpi->common.current_video_frame == 0)
{
/* 1 Pass there is no information on which to base size so use
@@ -381,29 +384,29 @@ static void calc_iframe_target_size(VP8_COMP *cpi)
}
else
{
- // if this keyframe was forced, use a more recent Q estimate
+ /* if this keyframe was forced, use a more recent Q estimate */
int Q = (cpi->common.frame_flags & FRAMEFLAGS_KEY)
? cpi->avg_frame_qindex : cpi->ni_av_qi;
- int initial_boost = 24; // Corresponds to: |2.5 * per_frame_bandwidth|
- // Boost depends somewhat on frame rate: only used for 1 layer case.
+ int initial_boost = 24; /* |2.5 * per_frame_bandwidth| */
+ /* Boost depends somewhat on frame rate: only used for 1 layer case. */
if (cpi->oxcf.number_of_layers == 1) {
kf_boost = MAX(initial_boost, (int)(2 * cpi->output_frame_rate - 16));
}
else {
- // Initial factor: set target size to: |2.5 * per_frame_bandwidth|.
+ /* Initial factor: set target size to: |2.5 * per_frame_bandwidth|. */
kf_boost = initial_boost;
}
- // adjustment up based on q: this factor ranges from ~1.2 to 2.2.
+ /* adjustment up based on q: this factor ranges from ~1.2 to 2.2. */
kf_boost = kf_boost * kf_boost_qadjustment[Q] / 100;
- // frame separation adjustment ( down)
+ /* frame separation adjustment ( down) */
if (cpi->frames_since_key < cpi->output_frame_rate / 2)
kf_boost = (int)(kf_boost
* cpi->frames_since_key / (cpi->output_frame_rate / 2));
- // Minimal target size is |2* per_frame_bandwidth|.
+ /* Minimal target size is |2* per_frame_bandwidth|. */
if (kf_boost < 16)
kf_boost = 16;
@@ -422,8 +425,9 @@ static void calc_iframe_target_size(VP8_COMP *cpi)
cpi->this_frame_target = target;
- // TODO: if we separate rate targeting from Q targetting, move this.
- // Reset the active worst quality to the baseline value for key frames.
+ /* TODO: if we separate rate targeting from Q targetting, move this.
+ * Reset the active worst quality to the baseline value for key frames.
+ */
if (cpi->pass != 2)
cpi->active_worst_quality = cpi->worst_quality;
@@ -432,9 +436,6 @@ static void calc_iframe_target_size(VP8_COMP *cpi)
FILE *f;
f = fopen("kf_boost.stt", "a");
- //fprintf(f, " %8d %10d %10d %10d %10d %10d %10d\n",
- // cpi->common.current_video_frame, cpi->target_bandwidth, cpi->frames_to_key, kf_boost_qadjustment[cpi->ni_av_qi], cpi->kf_boost, (cpi->this_frame_target *100 / cpi->per_frame_bandwidth), cpi->this_frame_target );
-
fprintf(f, " %8u %10d %10d %10d\n",
cpi->common.current_video_frame, cpi->gfu_boost, cpi->baseline_gf_interval, cpi->source_alt_ref_pending);
@@ -444,14 +445,15 @@ static void calc_iframe_target_size(VP8_COMP *cpi)
}
-// Do the best we can to define the parameters for the next GF based on what
-// information we have available.
+/* Do the best we can to define the parameters for the next GF based on what
+ * information we have available.
+ */
static void calc_gf_params(VP8_COMP *cpi)
{
int Q = (cpi->oxcf.fixed_q < 0) ? cpi->last_q[INTER_FRAME] : cpi->oxcf.fixed_q;
int Boost = 0;
- int gf_frame_useage = 0; // Golden frame useage since last GF
+ int gf_frame_useage = 0; /* Golden frame useage since last GF */
int tot_mbs = cpi->recent_ref_frame_usage[INTRA_FRAME] +
cpi->recent_ref_frame_usage[LAST_FRAME] +
cpi->recent_ref_frame_usage[GOLDEN_FRAME] +
@@ -459,33 +461,30 @@ static void calc_gf_params(VP8_COMP *cpi)
int pct_gf_active = (100 * cpi->gf_active_count) / (cpi->common.mb_rows * cpi->common.mb_cols);
- // Reset the last boost indicator
- //cpi->last_boost = 100;
-
if (tot_mbs)
gf_frame_useage = (cpi->recent_ref_frame_usage[GOLDEN_FRAME] + cpi->recent_ref_frame_usage[ALTREF_FRAME]) * 100 / tot_mbs;
if (pct_gf_active > gf_frame_useage)
gf_frame_useage = pct_gf_active;
- // Not two pass
+ /* Not two pass */
if (cpi->pass != 2)
{
- // Single Pass lagged mode: TBD
+ /* Single Pass lagged mode: TBD */
if (0)
{
}
- // Single Pass compression: Has to use current and historical data
+ /* Single Pass compression: Has to use current and historical data */
else
{
#if 0
- // Experimental code
+ /* Experimental code */
int index = cpi->one_pass_frame_index;
int frames_to_scan = (cpi->max_gf_interval <= MAX_LAG_BUFFERS) ? cpi->max_gf_interval : MAX_LAG_BUFFERS;
+ /* ************** Experimental code - incomplete */
/*
- // *************** Experimental code - incomplete
double decay_val = 1.0;
double IIAccumulator = 0.0;
double last_iiaccumulator = 0.0;
@@ -528,48 +527,51 @@ static void calc_gf_params(VP8_COMP *cpi)
#else
/*************************************************************/
- // OLD code
+ /* OLD code */
- // Adjust boost based upon ambient Q
+ /* Adjust boost based upon ambient Q */
Boost = GFQ_ADJUSTMENT;
- // Adjust based upon most recently measure intra useage
+ /* Adjust based upon most recently measure intra useage */
Boost = Boost * gf_intra_usage_adjustment[(cpi->this_frame_percent_intra < 15) ? cpi->this_frame_percent_intra : 14] / 100;
- // Adjust gf boost based upon GF usage since last GF
+ /* Adjust gf boost based upon GF usage since last GF */
Boost = Boost * gf_adjust_table[gf_frame_useage] / 100;
#endif
}
- // golden frame boost without recode loop often goes awry. be safe by keeping numbers down.
+ /* golden frame boost without recode loop often goes awry. be
+ * safe by keeping numbers down.
+ */
if (!cpi->sf.recode_loop)
{
if (cpi->compressor_speed == 2)
Boost = Boost / 2;
}
- // Apply an upper limit based on Q for 1 pass encodes
+ /* Apply an upper limit based on Q for 1 pass encodes */
if (Boost > kf_gf_boost_qlimits[Q] && (cpi->pass == 0))
Boost = kf_gf_boost_qlimits[Q];
- // Apply lower limits to boost.
+ /* Apply lower limits to boost. */
else if (Boost < 110)
Boost = 110;
- // Note the boost used
+ /* Note the boost used */
cpi->last_boost = Boost;
}
- // Estimate next interval
- // This is updated once the real frame size/boost is known.
+ /* Estimate next interval
+ * This is updated once the real frame size/boost is known.
+ */
if (cpi->oxcf.fixed_q == -1)
{
- if (cpi->pass == 2) // 2 Pass
+ if (cpi->pass == 2) /* 2 Pass */
{
cpi->frames_till_gf_update_due = cpi->baseline_gf_interval;
}
- else // 1 Pass
+ else /* 1 Pass */
{
cpi->frames_till_gf_update_due = cpi->baseline_gf_interval;
@@ -595,10 +597,10 @@ static void calc_gf_params(VP8_COMP *cpi)
else
cpi->frames_till_gf_update_due = cpi->baseline_gf_interval;
- // ARF on or off
+ /* ARF on or off */
if (cpi->pass != 2)
{
- // For now Alt ref is not allowed except in 2 pass modes.
+ /* For now Alt ref is not allowed except in 2 pass modes. */
cpi->source_alt_ref_pending = 0;
/*if ( cpi->oxcf.fixed_q == -1)
@@ -635,89 +637,34 @@ static void calc_pframe_target_size(VP8_COMP *cpi)
min_frame_target = cpi->per_frame_bandwidth / 4;
- // Special alt reference frame case
+ /* Special alt reference frame case */
if((cpi->common.refresh_alt_ref_frame) && (cpi->oxcf.number_of_layers == 1))
{
if (cpi->pass == 2)
{
- cpi->per_frame_bandwidth = cpi->twopass.gf_bits; // Per frame bit target for the alt ref frame
+ /* Per frame bit target for the alt ref frame */
+ cpi->per_frame_bandwidth = cpi->twopass.gf_bits;
cpi->this_frame_target = cpi->per_frame_bandwidth;
}
/* One Pass ??? TBD */
- /*else
- {
- int frames_in_section;
- int allocation_chunks;
- int Q = (cpi->oxcf.fixed_q < 0) ? cpi->last_q[INTER_FRAME] : cpi->oxcf.fixed_q;
- int alt_boost;
- int max_arf_rate;
-
- alt_boost = (cpi->gfu_boost * 3 * GFQ_ADJUSTMENT) / (2 * 100);
- alt_boost += (cpi->frames_till_gf_update_due * 50);
-
- // If alt ref is not currently active then we have a pottential double hit with GF and ARF so reduce the boost a bit.
- // A similar thing is done on GFs that preceed a arf update.
- if ( !cpi->source_alt_ref_active )
- alt_boost = alt_boost * 3 / 4;
-
- frames_in_section = cpi->frames_till_gf_update_due+1; // Standard frames + GF
- allocation_chunks = (frames_in_section * 100) + alt_boost;
-
- // Normalize Altboost and allocations chunck down to prevent overflow
- while ( alt_boost > 1000 )
- {
- alt_boost /= 2;
- allocation_chunks /= 2;
- }
-
- else
- {
- int bits_in_section;
-
- if ( cpi->kf_overspend_bits > 0 )
- {
- Adjustment = (cpi->kf_bitrate_adjustment <= cpi->kf_overspend_bits) ? cpi->kf_bitrate_adjustment : cpi->kf_overspend_bits;
-
- if ( Adjustment > (cpi->per_frame_bandwidth - min_frame_target) )
- Adjustment = (cpi->per_frame_bandwidth - min_frame_target);
-
- cpi->kf_overspend_bits -= Adjustment;
-
- // Calculate an inter frame bandwidth target for the next few frames designed to recover
- // any extra bits spent on the key frame.
- cpi->inter_frame_target = cpi->per_frame_bandwidth - Adjustment;
- if ( cpi->inter_frame_target < min_frame_target )
- cpi->inter_frame_target = min_frame_target;
- }
- else
- cpi->inter_frame_target = cpi->per_frame_bandwidth;
-
- bits_in_section = cpi->inter_frame_target * frames_in_section;
-
- // Avoid loss of precision but avoid overflow
- if ( (bits_in_section>>7) > allocation_chunks )
- cpi->this_frame_target = alt_boost * (bits_in_section / allocation_chunks);
- else
- cpi->this_frame_target = (alt_boost * bits_in_section) / allocation_chunks;
- }
- }
- */
}
- // Normal frames (gf,and inter)
+ /* Normal frames (gf,and inter) */
else
{
- // 2 pass
+ /* 2 pass */
if (cpi->pass == 2)
{
cpi->this_frame_target = cpi->per_frame_bandwidth;
}
- // 1 pass
+ /* 1 pass */
else
{
- // Make rate adjustment to recover bits spent in key frame
- // Test to see if the key frame inter data rate correction should still be in force
+ /* Make rate adjustment to recover bits spent in key frame
+ * Test to see if the key frame inter data rate correction
+ * should still be in force
+ */
if (cpi->kf_overspend_bits > 0)
{
Adjustment = (cpi->kf_bitrate_adjustment <= cpi->kf_overspend_bits) ? cpi->kf_bitrate_adjustment : cpi->kf_overspend_bits;
@@ -727,8 +674,10 @@ static void calc_pframe_target_size(VP8_COMP *cpi)
cpi->kf_overspend_bits -= Adjustment;
- // Calculate an inter frame bandwidth target for the next few frames designed to recover
- // any extra bits spent on the key frame.
+ /* Calculate an inter frame bandwidth target for the next
+ * few frames designed to recover any extra bits spent on
+ * the key frame.
+ */
cpi->this_frame_target = cpi->per_frame_bandwidth - Adjustment;
if (cpi->this_frame_target < min_frame_target)
@@ -737,7 +686,9 @@ static void calc_pframe_target_size(VP8_COMP *cpi)
else
cpi->this_frame_target = cpi->per_frame_bandwidth;
- // If appropriate make an adjustment to recover bits spent on a recent GF
+ /* If appropriate make an adjustment to recover bits spent on a
+ * recent GF
+ */
if ((cpi->gf_overspend_bits > 0) && (cpi->this_frame_target > min_frame_target))
{
int Adjustment = (cpi->non_gf_bitrate_adjustment <= cpi->gf_overspend_bits) ? cpi->non_gf_bitrate_adjustment : cpi->gf_overspend_bits;
@@ -749,11 +700,11 @@ static void calc_pframe_target_size(VP8_COMP *cpi)
cpi->this_frame_target -= Adjustment;
}
- // Apply small + and - boosts for non gf frames
+ /* Apply small + and - boosts for non gf frames */
if ((cpi->last_boost > 150) && (cpi->frames_till_gf_update_due > 0) &&
(cpi->current_gf_interval >= (MIN_GF_INTERVAL << 1)))
{
- // % Adjustment limited to the range 1% to 10%
+ /* % Adjustment limited to the range 1% to 10% */
Adjustment = (cpi->last_boost - 100) >> 5;
if (Adjustment < 1)
@@ -761,7 +712,7 @@ static void calc_pframe_target_size(VP8_COMP *cpi)
else if (Adjustment > 10)
Adjustment = 10;
- // Convert to bits
+ /* Convert to bits */
Adjustment = (cpi->this_frame_target * Adjustment) / 100;
if (Adjustment > (cpi->this_frame_target - min_frame_target))
@@ -775,22 +726,25 @@ static void calc_pframe_target_size(VP8_COMP *cpi)
}
}
- // Sanity check that the total sum of adjustments is not above the maximum allowed
- // That is that having allowed for KF and GF penalties we have not pushed the
- // current interframe target to low. If the adjustment we apply here is not capable of recovering
- // all the extra bits we have spent in the KF or GF then the remainder will have to be recovered over
- // a longer time span via other buffer / rate control mechanisms.
+ /* Sanity check that the total sum of adjustments is not above the
+ * maximum allowed That is that having allowed for KF and GF penalties
+ * we have not pushed the current interframe target to low. If the
+ * adjustment we apply here is not capable of recovering all the extra
+ * bits we have spent in the KF or GF then the remainder will have to
+ * be recovered over a longer time span via other buffer / rate control
+ * mechanisms.
+ */
if (cpi->this_frame_target < min_frame_target)
cpi->this_frame_target = min_frame_target;
if (!cpi->common.refresh_alt_ref_frame)
- // Note the baseline target data rate for this inter frame.
+ /* Note the baseline target data rate for this inter frame. */
cpi->inter_frame_target = cpi->this_frame_target;
- // One Pass specific code
+ /* One Pass specific code */
if (cpi->pass == 0)
{
- // Adapt target frame size with respect to any buffering constraints:
+ /* Adapt target frame size with respect to any buffering constraints: */
if (cpi->buffered_mode)
{
int one_percent_bits = 1 + cpi->oxcf.optimal_buffer_level / 100;
@@ -800,11 +754,13 @@ static void calc_pframe_target_size(VP8_COMP *cpi)
{
int percent_low = 0;
- // Decide whether or not we need to adjust the frame data rate target.
- //
- // If we are are below the optimal buffer fullness level and adherence
- // to buffering constraints is important to the end usage then adjust
- // the per frame target.
+ /* Decide whether or not we need to adjust the frame data
+ * rate target.
+ *
+ * If we are are below the optimal buffer fullness level
+ * and adherence to buffering constraints is important to
+ * the end usage then adjust the per frame target.
+ */
if ((cpi->oxcf.end_usage == USAGE_STREAM_FROM_SERVER) &&
(cpi->buffer_level < cpi->oxcf.optimal_buffer_level))
{
@@ -812,10 +768,10 @@ static void calc_pframe_target_size(VP8_COMP *cpi)
(cpi->oxcf.optimal_buffer_level - cpi->buffer_level) /
one_percent_bits;
}
- // Are we overshooting the long term clip data rate...
+ /* Are we overshooting the long term clip data rate... */
else if (cpi->bits_off_target < 0)
{
- // Adjust per frame data target downwards to compensate.
+ /* Adjust per frame data target downwards to compensate. */
percent_low = (int)(100 * -cpi->bits_off_target /
(cpi->total_byte_count * 8));
}
@@ -825,40 +781,46 @@ static void calc_pframe_target_size(VP8_COMP *cpi)
else if (percent_low < 0)
percent_low = 0;
- // lower the target bandwidth for this frame.
+ /* lower the target bandwidth for this frame. */
cpi->this_frame_target -=
(cpi->this_frame_target * percent_low) / 200;
- // Are we using allowing control of active_worst_allowed_q
- // according to buffer level.
+ /* Are we using allowing control of active_worst_allowed_q
+ * according to buffer level.
+ */
if (cpi->auto_worst_q && cpi->ni_frames > 150)
{
int critical_buffer_level;
- // For streaming applications the most important factor is
- // cpi->buffer_level as this takes into account the
- // specified short term buffering constraints. However,
- // hitting the long term clip data rate target is also
- // important.
+ /* For streaming applications the most important factor is
+ * cpi->buffer_level as this takes into account the
+ * specified short term buffering constraints. However,
+ * hitting the long term clip data rate target is also
+ * important.
+ */
if (cpi->oxcf.end_usage == USAGE_STREAM_FROM_SERVER)
{
- // Take the smaller of cpi->buffer_level and
- // cpi->bits_off_target
+ /* Take the smaller of cpi->buffer_level and
+ * cpi->bits_off_target
+ */
critical_buffer_level =
(cpi->buffer_level < cpi->bits_off_target)
? cpi->buffer_level : cpi->bits_off_target;
}
- // For local file playback short term buffering constraints
- // are less of an issue
+ /* For local file playback short term buffering constraints
+ * are less of an issue
+ */
else
{
- // Consider only how we are doing for the clip as a
- // whole
+ /* Consider only how we are doing for the clip as a
+ * whole
+ */
critical_buffer_level = cpi->bits_off_target;
}
- // Set the active worst quality based upon the selected
- // buffer fullness number.
+ /* Set the active worst quality based upon the selected
+ * buffer fullness number.
+ */
if (critical_buffer_level < cpi->oxcf.optimal_buffer_level)
{
if ( critical_buffer_level >
@@ -870,12 +832,13 @@ static void calc_pframe_target_size(VP8_COMP *cpi)
(critical_buffer_level -
(cpi->oxcf.optimal_buffer_level >> 2));
- // Step active worst quality down from
- // cpi->ni_av_qi when (critical_buffer_level ==
- // cpi->optimal_buffer_level) to
- // cpi->worst_quality when
- // (critical_buffer_level ==
- // cpi->optimal_buffer_level >> 2)
+ /* Step active worst quality down from
+ * cpi->ni_av_qi when (critical_buffer_level ==
+ * cpi->optimal_buffer_level) to
+ * cpi->worst_quality when
+ * (critical_buffer_level ==
+ * cpi->optimal_buffer_level >> 2)
+ */
cpi->active_worst_quality =
cpi->worst_quality -
((qadjustment_range * above_base) /
@@ -921,11 +884,14 @@ static void calc_pframe_target_size(VP8_COMP *cpi)
cpi->this_frame_target += (cpi->this_frame_target *
percent_high) / 200;
- // Are we allowing control of active_worst_allowed_q according
- // to buffer level.
+ /* Are we allowing control of active_worst_allowed_q according
+ * to buffer level.
+ */
if (cpi->auto_worst_q && cpi->ni_frames > 150)
{
- // When using the relaxed buffer model stick to the user specified value
+ /* When using the relaxed buffer model stick to the
+ * user specified value
+ */
cpi->active_worst_quality = cpi->ni_av_qi;
}
else
@@ -934,26 +900,27 @@ static void calc_pframe_target_size(VP8_COMP *cpi)
}
}
- // Set active_best_quality to prevent quality rising too high
+ /* Set active_best_quality to prevent quality rising too high */
cpi->active_best_quality = cpi->best_quality;
- // Worst quality obviously must not be better than best quality
+ /* Worst quality obviously must not be better than best quality */
if (cpi->active_worst_quality <= cpi->active_best_quality)
cpi->active_worst_quality = cpi->active_best_quality + 1;
if(cpi->active_worst_quality > 127)
cpi->active_worst_quality = 127;
}
- // Unbuffered mode (eg. video conferencing)
+ /* Unbuffered mode (eg. video conferencing) */
else
{
- // Set the active worst quality
+ /* Set the active worst quality */
cpi->active_worst_quality = cpi->worst_quality;
}
- // Special trap for constrained quality mode
- // "active_worst_quality" may never drop below cq level
- // for any frame type.
+ /* Special trap for constrained quality mode
+ * "active_worst_quality" may never drop below cq level
+ * for any frame type.
+ */
if ( cpi->oxcf.end_usage == USAGE_CONSTRAINED_QUALITY &&
cpi->active_worst_quality < cpi->cq_target_quality)
{
@@ -961,16 +928,19 @@ static void calc_pframe_target_size(VP8_COMP *cpi)
}
}
- // Test to see if we have to drop a frame
- // The auto-drop frame code is only used in buffered mode.
- // In unbufferd mode (eg vide conferencing) the descision to
- // code or drop a frame is made outside the codec in response to real
- // world comms or buffer considerations.
+ /* Test to see if we have to drop a frame
+ * The auto-drop frame code is only used in buffered mode.
+ * In unbufferd mode (eg vide conferencing) the descision to
+ * code or drop a frame is made outside the codec in response to real
+ * world comms or buffer considerations.
+ */
if (cpi->drop_frames_allowed &&
(cpi->oxcf.end_usage == USAGE_STREAM_FROM_SERVER) &&
- ((cpi->common.frame_type != KEY_FRAME))) //|| !cpi->oxcf.allow_spatial_resampling) )
+ ((cpi->common.frame_type != KEY_FRAME)))
{
- // Check for a buffer underun-crisis in which case we have to drop a frame
+ /* Check for a buffer underun-crisis in which case we have to drop
+ * a frame
+ */
if ((cpi->buffer_level < 0))
{
#if 0
@@ -981,11 +951,9 @@ static void calc_pframe_target_size(VP8_COMP *cpi)
(cpi->buffer_level * 100) / cpi->oxcf.optimal_buffer_level);
fclose(f);
#endif
- //vpx_log("Decoder: Drop frame due to bandwidth: %d \n",cpi->buffer_level, cpi->av_per_frame_bandwidth);
-
cpi->drop_frame = 1;
- // Update the buffer level variable.
+ /* Update the buffer level variable. */
cpi->bits_off_target += cpi->av_per_frame_bandwidth;
if (cpi->bits_off_target > cpi->oxcf.maximum_buffer_size)
cpi->bits_off_target = cpi->oxcf.maximum_buffer_size;
@@ -993,14 +961,13 @@ static void calc_pframe_target_size(VP8_COMP *cpi)
}
}
- // Adjust target frame size for Golden Frames:
+ /* Adjust target frame size for Golden Frames: */
if (cpi->oxcf.error_resilient_mode == 0 &&
(cpi->frames_till_gf_update_due == 0) && !cpi->drop_frame)
{
- //int Boost = 0;
int Q = (cpi->oxcf.fixed_q < 0) ? cpi->last_q[INTER_FRAME] : cpi->oxcf.fixed_q;
- int gf_frame_useage = 0; // Golden frame useage since last GF
+ int gf_frame_useage = 0; /* Golden frame useage since last GF */
int tot_mbs = cpi->recent_ref_frame_usage[INTRA_FRAME] +
cpi->recent_ref_frame_usage[LAST_FRAME] +
cpi->recent_ref_frame_usage[GOLDEN_FRAME] +
@@ -1008,30 +975,29 @@ static void calc_pframe_target_size(VP8_COMP *cpi)
int pct_gf_active = (100 * cpi->gf_active_count) / (cpi->common.mb_rows * cpi->common.mb_cols);
- // Reset the last boost indicator
- //cpi->last_boost = 100;
-
if (tot_mbs)
gf_frame_useage = (cpi->recent_ref_frame_usage[GOLDEN_FRAME] + cpi->recent_ref_frame_usage[ALTREF_FRAME]) * 100 / tot_mbs;
if (pct_gf_active > gf_frame_useage)
gf_frame_useage = pct_gf_active;
- // Is a fixed manual GF frequency being used
+ /* Is a fixed manual GF frequency being used */
if (cpi->auto_gold)
{
- // For one pass throw a GF if recent frame intra useage is low or the GF useage is high
+ /* For one pass throw a GF if recent frame intra useage is
+ * low or the GF useage is high
+ */
if ((cpi->pass == 0) && (cpi->this_frame_percent_intra < 15 || gf_frame_useage >= 5))
cpi->common.refresh_golden_frame = 1;
- // Two pass GF descision
+ /* Two pass GF descision */
else if (cpi->pass == 2)
cpi->common.refresh_golden_frame = 1;
}
#if 0
- // Debug stats
+ /* Debug stats */
if (0)
{
FILE *f;
@@ -1048,7 +1014,7 @@ static void calc_pframe_target_size(VP8_COMP *cpi)
{
#if 0
- if (0) // p_gw
+ if (0)
{
FILE *f;
@@ -1064,16 +1030,20 @@ static void calc_pframe_target_size(VP8_COMP *cpi)
calc_gf_params(cpi);
}
- // If we are using alternate ref instead of gf then do not apply the boost
- // It will instead be applied to the altref update
- // Jims modified boost
+ /* If we are using alternate ref instead of gf then do not apply the
+ * boost It will instead be applied to the altref update Jims
+ * modified boost
+ */
if (!cpi->source_alt_ref_active)
{
if (cpi->oxcf.fixed_q < 0)
{
if (cpi->pass == 2)
{
- cpi->this_frame_target = cpi->per_frame_bandwidth; // The spend on the GF is defined in the two pass code for two pass encodes
+ /* The spend on the GF is defined in the two pass
+ * code for two pass encodes
+ */
+ cpi->this_frame_target = cpi->per_frame_bandwidth;
}
else
{
@@ -1082,14 +1052,16 @@ static void calc_pframe_target_size(VP8_COMP *cpi)
int allocation_chunks = (frames_in_section * 100) + (Boost - 100);
int bits_in_section = cpi->inter_frame_target * frames_in_section;
- // Normalize Altboost and allocations chunck down to prevent overflow
+ /* Normalize Altboost and allocations chunck down to
+ * prevent overflow
+ */
while (Boost > 1000)
{
Boost /= 2;
allocation_chunks /= 2;
}
- // Avoid loss of precision but avoid overflow
+ /* Avoid loss of precision but avoid overflow */
if ((bits_in_section >> 7) > allocation_chunks)
cpi->this_frame_target = Boost * (bits_in_section / allocation_chunks);
else
@@ -1102,10 +1074,11 @@ static void calc_pframe_target_size(VP8_COMP *cpi)
* cpi->last_boost) / 100;
}
- // If there is an active ARF at this location use the minimum
- // bits on this frame even if it is a contructed arf.
- // The active maximum quantizer insures that an appropriate
- // number of bits will be spent if needed for contstructed ARFs.
+ /* If there is an active ARF at this location use the minimum
+ * bits on this frame even if it is a contructed arf.
+ * The active maximum quantizer insures that an appropriate
+ * number of bits will be spent if needed for contstructed ARFs.
+ */
else
{
cpi->this_frame_target = 0;
@@ -1129,8 +1102,8 @@ void vp8_update_rate_correction_factors(VP8_COMP *cpi, int damp_var)
int projected_size_based_on_q = 0;
- // Clear down mmx registers to allow floating point in what follows
- vp8_clear_system_state(); //__asm emms;
+ /* Clear down mmx registers to allow floating point in what follows */
+ vp8_clear_system_state();
if (cpi->common.frame_type == KEY_FRAME)
{
@@ -1144,17 +1117,18 @@ void vp8_update_rate_correction_factors(VP8_COMP *cpi, int damp_var)
rate_correction_factor = cpi->rate_correction_factor;
}
- // Work out how big we would have expected the frame to be at this Q given the current correction factor.
- // Stay in double to avoid int overflow when values are large
- //projected_size_based_on_q = ((int)(.5 + rate_correction_factor * vp8_bits_per_mb[cpi->common.frame_type][Q]) * cpi->common.MBs) >> BPER_MB_NORMBITS;
+ /* Work out how big we would have expected the frame to be at this Q
+ * given the current correction factor. Stay in double to avoid int
+ * overflow when values are large
+ */
projected_size_based_on_q = (int)(((.5 + rate_correction_factor * vp8_bits_per_mb[cpi->common.frame_type][Q]) * cpi->common.MBs) / (1 << BPER_MB_NORMBITS));
- // Make some allowance for cpi->zbin_over_quant
+ /* Make some allowance for cpi->zbin_over_quant */
if (cpi->zbin_over_quant > 0)
{
int Z = cpi->zbin_over_quant;
double Factor = 0.99;
- double factor_adjustment = 0.01 / 256.0; //(double)ZBIN_OQ_MAX;
+ double factor_adjustment = 0.01 / 256.0;
while (Z > 0)
{
@@ -1168,13 +1142,13 @@ void vp8_update_rate_correction_factors(VP8_COMP *cpi, int damp_var)
}
}
- // Work out a size correction factor.
- //if ( cpi->this_frame_target > 0 )
- // correction_factor = (100 * cpi->projected_frame_size) / cpi->this_frame_target;
+ /* Work out a size correction factor. */
if (projected_size_based_on_q > 0)
correction_factor = (100 * cpi->projected_frame_size) / projected_size_based_on_q;
- // More heavily damped adjustment used if we have been oscillating either side of target
+ /* More heavily damped adjustment used if we have been oscillating
+ * either side of target
+ */
switch (damp_var)
{
case 0:
@@ -1189,25 +1163,23 @@ void vp8_update_rate_correction_factors(VP8_COMP *cpi, int damp_var)
break;
}
- //if ( (correction_factor > 102) && (Q < cpi->active_worst_quality) )
if (correction_factor > 102)
{
- // We are not already at the worst allowable quality
+ /* We are not already at the worst allowable quality */
correction_factor = (int)(100.5 + ((correction_factor - 100) * adjustment_limit));
rate_correction_factor = ((rate_correction_factor * correction_factor) / 100);
- // Keep rate_correction_factor within limits
+ /* Keep rate_correction_factor within limits */
if (rate_correction_factor > MAX_BPB_FACTOR)
rate_correction_factor = MAX_BPB_FACTOR;
}
- //else if ( (correction_factor < 99) && (Q > cpi->active_best_quality) )
else if (correction_factor < 99)
{
- // We are not already at the best allowable quality
+ /* We are not already at the best allowable quality */
correction_factor = (int)(100.5 - ((100 - correction_factor) * adjustment_limit));
rate_correction_factor = ((rate_correction_factor * correction_factor) / 100);
- // Keep rate_correction_factor within limits
+ /* Keep rate_correction_factor within limits */
if (rate_correction_factor < MIN_BPB_FACTOR)
rate_correction_factor = MIN_BPB_FACTOR;
}
@@ -1228,7 +1200,7 @@ int vp8_regulate_q(VP8_COMP *cpi, int target_bits_per_frame)
{
int Q = cpi->active_worst_quality;
- // Reset Zbin OQ value
+ /* Reset Zbin OQ value */
cpi->zbin_over_quant = 0;
if (cpi->oxcf.fixed_q >= 0)
@@ -1257,7 +1229,7 @@ int vp8_regulate_q(VP8_COMP *cpi, int target_bits_per_frame)
int bits_per_mb_at_this_q;
double correction_factor;
- // Select the appropriate correction factor based upon type of frame.
+ /* Select the appropriate correction factor based upon type of frame. */
if (cpi->common.frame_type == KEY_FRAME)
correction_factor = cpi->key_frame_rate_correction_factor;
else
@@ -1268,9 +1240,12 @@ int vp8_regulate_q(VP8_COMP *cpi, int target_bits_per_frame)
correction_factor = cpi->rate_correction_factor;
}
- // Calculate required scaling factor based on target frame size and size of frame produced using previous Q
+ /* Calculate required scaling factor based on target frame size and
+ * size of frame produced using previous Q
+ */
if (target_bits_per_frame >= (INT_MAX >> BPER_MB_NORMBITS))
- target_bits_per_mb = (target_bits_per_frame / cpi->common.MBs) << BPER_MB_NORMBITS; // Case where we would overflow int
+ /* Case where we would overflow int */
+ target_bits_per_mb = (target_bits_per_frame / cpi->common.MBs) << BPER_MB_NORMBITS;
else
target_bits_per_mb = (target_bits_per_frame << BPER_MB_NORMBITS) / cpi->common.MBs;
@@ -1295,17 +1270,19 @@ int vp8_regulate_q(VP8_COMP *cpi, int target_bits_per_frame)
while (++i <= cpi->active_worst_quality);
- // If we are at MAXQ then enable Q over-run which seeks to claw back additional bits through things like
- // the RD multiplier and zero bin size.
+ /* If we are at MAXQ then enable Q over-run which seeks to claw
+ * back additional bits through things like the RD multiplier
+ * and zero bin size.
+ */
if (Q >= MAXQ)
{
int zbin_oqmax;
double Factor = 0.99;
- double factor_adjustment = 0.01 / 256.0; //(double)ZBIN_OQ_MAX;
+ double factor_adjustment = 0.01 / 256.0;
if (cpi->common.frame_type == KEY_FRAME)
- zbin_oqmax = 0; //ZBIN_OQ_MAX/16
+ zbin_oqmax = 0;
else if (cpi->common.refresh_alt_ref_frame || (cpi->common.refresh_golden_frame && !cpi->source_alt_ref_active))
zbin_oqmax = 16;
else
@@ -1325,10 +1302,13 @@ int vp8_regulate_q(VP8_COMP *cpi, int target_bits_per_frame)
cpi->zbin_over_quant = (int)Oq;
}*/
- // Each incrment in the zbin is assumed to have a fixed effect on bitrate. This is not of course true.
- // The effect will be highly clip dependent and may well have sudden steps.
- // The idea here is to acheive higher effective quantizers than the normal maximum by expanding the zero
- // bin and hence decreasing the number of low magnitude non zero coefficients.
+ /* Each incrment in the zbin is assumed to have a fixed effect
+ * on bitrate. This is not of course true. The effect will be
+ * highly clip dependent and may well have sudden steps. The
+ * idea here is to acheive higher effective quantizers than the
+ * normal maximum by expanding the zero bin and hence
+ * decreasing the number of low magnitude non zero coefficients.
+ */
while (cpi->zbin_over_quant < zbin_oqmax)
{
cpi->zbin_over_quant ++;
@@ -1336,14 +1316,15 @@ int vp8_regulate_q(VP8_COMP *cpi, int target_bits_per_frame)
if (cpi->zbin_over_quant > zbin_oqmax)
cpi->zbin_over_quant = zbin_oqmax;
- // Adjust bits_per_mb_at_this_q estimate
+ /* Adjust bits_per_mb_at_this_q estimate */
bits_per_mb_at_this_q = (int)(Factor * bits_per_mb_at_this_q);
Factor += factor_adjustment;
if (Factor >= 0.999)
Factor = 0.999;
- if (bits_per_mb_at_this_q <= target_bits_per_mb) // Break out if we get down to the target rate
+ /* Break out if we get down to the target rate */
+ if (bits_per_mb_at_this_q <= target_bits_per_mb)
break;
}
@@ -1358,7 +1339,7 @@ static int estimate_keyframe_frequency(VP8_COMP *cpi)
{
int i;
- // Average key frame frequency
+ /* Average key frame frequency */
int av_key_frame_frequency = 0;
/* First key frame at start of sequence is a special case. We have no
@@ -1409,11 +1390,11 @@ static int estimate_keyframe_frequency(VP8_COMP *cpi)
void vp8_adjust_key_frame_context(VP8_COMP *cpi)
{
- // Clear down mmx registers to allow floating point in what follows
+ /* Clear down mmx registers to allow floating point in what follows */
vp8_clear_system_state();
- // Do we have any key frame overspend to recover?
- // Two-pass overspend handled elsewhere.
+ /* Do we have any key frame overspend to recover? */
+ /* Two-pass overspend handled elsewhere. */
if ((cpi->pass != 2)
&& (cpi->projected_frame_size > cpi->per_frame_bandwidth))
{
@@ -1447,10 +1428,12 @@ void vp8_adjust_key_frame_context(VP8_COMP *cpi)
void vp8_compute_frame_size_bounds(VP8_COMP *cpi, int *frame_under_shoot_limit, int *frame_over_shoot_limit)
{
- // Set-up bounds on acceptable frame size:
+ /* Set-up bounds on acceptable frame size: */
if (cpi->oxcf.fixed_q >= 0)
{
- // Fixed Q scenario: frame size never outranges target (there is no target!)
+ /* Fixed Q scenario: frame size never outranges target
+ * (there is no target!)
+ */
*frame_under_shoot_limit = 0;
*frame_over_shoot_limit = INT_MAX;
}
@@ -1472,18 +1455,22 @@ void vp8_compute_frame_size_bounds(VP8_COMP *cpi, int *frame_under_shoot_limit,
}
else
{
- // For CBR take buffer fullness into account
+ /* For CBR take buffer fullness into account */
if (cpi->oxcf.end_usage == USAGE_STREAM_FROM_SERVER)
{
if (cpi->buffer_level >= ((cpi->oxcf.optimal_buffer_level + cpi->oxcf.maximum_buffer_size) >> 1))
{
- // Buffer is too full so relax overshoot and tighten undershoot
+ /* Buffer is too full so relax overshoot and tighten
+ * undershoot
+ */
*frame_over_shoot_limit = cpi->this_frame_target * 12 / 8;
*frame_under_shoot_limit = cpi->this_frame_target * 6 / 8;
}
else if (cpi->buffer_level <= (cpi->oxcf.optimal_buffer_level >> 1))
{
- // Buffer is too low so relax undershoot and tighten overshoot
+ /* Buffer is too low so relax undershoot and tighten
+ * overshoot
+ */
*frame_over_shoot_limit = cpi->this_frame_target * 10 / 8;
*frame_under_shoot_limit = cpi->this_frame_target * 4 / 8;
}
@@ -1493,11 +1480,13 @@ void vp8_compute_frame_size_bounds(VP8_COMP *cpi, int *frame_under_shoot_limit,
*frame_under_shoot_limit = cpi->this_frame_target * 5 / 8;
}
}
- // VBR and CQ mode
- // Note that tighter restrictions here can help quality but hurt encode speed
+ /* VBR and CQ mode */
+ /* Note that tighter restrictions here can help quality
+ * but hurt encode speed
+ */
else
{
- // Stron overshoot limit for constrained quality
+ /* Stron overshoot limit for constrained quality */
if (cpi->oxcf.end_usage == USAGE_CONSTRAINED_QUALITY)
{
*frame_over_shoot_limit = cpi->this_frame_target * 11 / 8;
@@ -1512,9 +1501,10 @@ void vp8_compute_frame_size_bounds(VP8_COMP *cpi, int *frame_under_shoot_limit,
}
}
- // For very small rate targets where the fractional adjustment
- // (eg * 7/8) may be tiny make sure there is at least a minimum
- // range.
+ /* For very small rate targets where the fractional adjustment
+ * (eg * 7/8) may be tiny make sure there is at least a minimum
+ * range.
+ */
*frame_over_shoot_limit += 200;
*frame_under_shoot_limit -= 200;
if ( *frame_under_shoot_limit < 0 )
@@ -1524,7 +1514,7 @@ void vp8_compute_frame_size_bounds(VP8_COMP *cpi, int *frame_under_shoot_limit,
}
-// return of 0 means drop frame
+/* return of 0 means drop frame */
int vp8_pick_frame_size(VP8_COMP *cpi)
{
VP8_COMMON *cm = &cpi->common;
@@ -1535,7 +1525,7 @@ int vp8_pick_frame_size(VP8_COMP *cpi)
{
calc_pframe_target_size(cpi);
- // Check if we're dropping the frame:
+ /* Check if we're dropping the frame: */
if (cpi->drop_frame)
{
cpi->drop_frame = 0;
diff --git a/vp8/encoder/ratectrl.h b/vp8/encoder/ratectrl.h
index d4f779677..c43f08d6d 100644
--- a/vp8/encoder/ratectrl.h
+++ b/vp8/encoder/ratectrl.h
@@ -22,7 +22,7 @@ extern int vp8_regulate_q(VP8_COMP *cpi, int target_bits_per_frame);
extern void vp8_adjust_key_frame_context(VP8_COMP *cpi);
extern void vp8_compute_frame_size_bounds(VP8_COMP *cpi, int *frame_under_shoot_limit, int *frame_over_shoot_limit);
-// return of 0 means drop frame
+/* return of 0 means drop frame */
extern int vp8_pick_frame_size(VP8_COMP *cpi);
#endif
diff --git a/vp8/encoder/rdopt.c b/vp8/encoder/rdopt.c
index b457f03cb..3fd9d7232 100644
--- a/vp8/encoder/rdopt.c
+++ b/vp8/encoder/rdopt.c
@@ -160,7 +160,9 @@ static void fill_token_costs(
for (j = 0; j < COEF_BANDS; j++)
for (k = 0; k < PREV_COEF_CONTEXTS; k++)
- // check for pt=0 and band > 1 if block type 0 and 0 if blocktype 1
+ /* check for pt=0 and band > 1 if block type 0
+ * and 0 if blocktype 1
+ */
if (k == 0 && j > (i == 0))
vp8_cost_tokens2(c[i][j][k], p [i][j][k], vp8_coef_tree, 2);
else
@@ -228,22 +230,22 @@ void vp8_initialize_rd_consts(VP8_COMP *cpi, int Qvalue)
double capped_q = (Qvalue < 160) ? (double)Qvalue : 160.0;
double rdconst = 2.80;
- vp8_clear_system_state(); //__asm emms;
+ vp8_clear_system_state();
- // Further tests required to see if optimum is different
- // for key frames, golden frames and arf frames.
- // if (cpi->common.refresh_golden_frame ||
- // cpi->common.refresh_alt_ref_frame)
+ /* Further tests required to see if optimum is different
+ * for key frames, golden frames and arf frames.
+ */
cpi->RDMULT = (int)(rdconst * (capped_q * capped_q));
- // Extend rate multiplier along side quantizer zbin increases
+ /* Extend rate multiplier along side quantizer zbin increases */
if (cpi->zbin_over_quant > 0)
{
double oq_factor;
double modq;
- // Experimental code using the same basic equation as used for Q above
- // The units of cpi->zbin_over_quant are 1/128 of Q bin size
+ /* Experimental code using the same basic equation as used for Q above
+ * The units of cpi->zbin_over_quant are 1/128 of Q bin size
+ */
oq_factor = 1.0 + ((double)0.0015625 * cpi->zbin_over_quant);
modq = (int)((double)capped_q * oq_factor);
cpi->RDMULT = (int)(rdconst * (modq * modq));
@@ -307,7 +309,7 @@ void vp8_initialize_rd_consts(VP8_COMP *cpi, int Qvalue)
}
{
- // build token cost array for the type of frame we have now
+ /* build token cost array for the type of frame we have now */
FRAME_CONTEXT *l = &cpi->lfc_n;
if(cpi->common.refresh_alt_ref_frame)
@@ -326,12 +328,8 @@ void vp8_initialize_rd_consts(VP8_COMP *cpi, int Qvalue)
*/
- // TODO make these mode costs depend on last,alt or gold too. (jbb)
+ /* TODO make these mode costs depend on last,alt or gold too. (jbb) */
vp8_init_mode_costs(cpi);
-
- // TODO figure onnnnuut why making mv cost frame type dependent didn't help (jbb)
- //vp8_build_component_cost_table(cpi->mb.mvcost, (const MV_CONTEXT *) l->mvc, flags);
-
}
}
@@ -356,14 +354,6 @@ void vp8_auto_select_speed(VP8_COMP *cpi)
#endif
- /*
- // this is done during parameter valid check
- if( cpi->oxcf.cpu_used > 16)
- cpi->oxcf.cpu_used = 16;
- if( cpi->oxcf.cpu_used < -16)
- cpi->oxcf.cpu_used = -16;
- */
-
if (cpi->avg_pick_mode_time < milliseconds_for_compress && (cpi->avg_encode_time - cpi->avg_pick_mode_time) < milliseconds_for_compress)
{
if (cpi->avg_pick_mode_time == 0)
@@ -390,10 +380,10 @@ void vp8_auto_select_speed(VP8_COMP *cpi)
cpi->avg_pick_mode_time = 0;
cpi->avg_encode_time = 0;
- // In real-time mode, cpi->speed is in [4, 16].
- if (cpi->Speed < 4) //if ( cpi->Speed < 0 )
+ /* In real-time mode, cpi->speed is in [4, 16]. */
+ if (cpi->Speed < 4)
{
- cpi->Speed = 4; //cpi->Speed = 0;
+ cpi->Speed = 4;
}
}
}
@@ -549,7 +539,7 @@ static int cost_coeffs(MACROBLOCK *mb, BLOCKD *b, int type, ENTROPY_CONTEXT *a,
if (c < 16)
cost += mb->token_costs [type] [vp8_coef_bands[c]] [pt] [DCT_EOB_TOKEN];
- pt = (c != !type); // is eob first coefficient;
+ pt = (c != !type); /* is eob first coefficient; */
*a = *l = pt;
return cost;
@@ -595,7 +585,7 @@ static void macro_block_yrd( MACROBLOCK *mb,
vp8_subtract_mby( mb->src_diff, *(mb->block[0].base_src),
mb->block[0].src_stride, mb->e_mbd.predictor, 16);
- // Fdct and building the 2nd order block
+ /* Fdct and building the 2nd order block */
for (beptr = mb->block; beptr < mb->block + 16; beptr += 2)
{
mb->short_fdct8x4(beptr->src_diff, beptr->coeff, 32);
@@ -603,25 +593,25 @@ static void macro_block_yrd( MACROBLOCK *mb,
*Y2DCPtr++ = beptr->coeff[16];
}
- // 2nd order fdct
+ /* 2nd order fdct */
mb->short_walsh4x4(mb_y2->src_diff, mb_y2->coeff, 8);
- // Quantization
+ /* Quantization */
for (b = 0; b < 16; b++)
{
mb->quantize_b(&mb->block[b], &mb->e_mbd.block[b]);
}
- // DC predication and Quantization of 2nd Order block
+ /* DC predication and Quantization of 2nd Order block */
mb->quantize_b(mb_y2, x_y2);
- // Distortion
+ /* Distortion */
d = vp8_mbblock_error(mb, 1) << 2;
d += vp8_block_error(mb_y2->coeff, x_y2->dqcoeff);
*Distortion = (d >> 4);
- // rate
+ /* rate */
*Rate = vp8_rdcost_mby(mb);
}
@@ -787,7 +777,7 @@ static int rd_pick_intra16x16mby_mode(VP8_COMP *cpi,
int this_rd;
MACROBLOCKD *xd = &x->e_mbd;
- //Y Search for 16x16 intra prediction mode
+ /* Y Search for 16x16 intra prediction mode */
for (mode = DC_PRED; mode <= TM_PRED; mode++)
{
xd->mode_info_context->mbmi.mode = mode;
@@ -984,8 +974,9 @@ static int labels2mode(
m = ABOVE4X4;
else
{
- // the only time we should do costing for new motion vector or mode
- // is when we are on a new label (jbb May 08, 2007)
+ /* the only time we should do costing for new motion vector
+ * or mode is when we are on a new label (jbb May 08, 2007)
+ */
switch (m = this_mode)
{
case NEW4X4 :
@@ -1004,7 +995,7 @@ static int labels2mode(
break;
}
- if (m == ABOVE4X4) // replace above with left if same
+ if (m == ABOVE4X4) /* replace above with left if same */
{
int_mv left_mv;
@@ -1065,9 +1056,6 @@ static unsigned int vp8_encode_inter_mb_segment(MACROBLOCK *x, int const *labels
vp8_build_inter_predictors_b(bd, 16, base_pre, pre_stride, x->e_mbd.subpixel_predict);
vp8_subtract_b(be, bd, 16);
x->short_fdct4x4(be->src_diff, be->coeff, 32);
-
- // set to 0 no way to account for 2nd order DC so discount
- //be->coeff[0] = 0;
x->quantize_b(be, bd);
distortion += vp8_block_error(be->coeff, bd->dqcoeff);
@@ -1098,8 +1086,8 @@ typedef struct
int mvthresh;
int *mdcounts;
- int_mv sv_mvp[4]; // save 4 mvp from 8x8
- int sv_istep[2]; // save 2 initial step_param for 16x8/8x16
+ int_mv sv_mvp[4]; /* save 4 mvp from 8x8 */
+ int sv_istep[2]; /* save 2 initial step_param for 16x8/8x16 */
} BEST_SEG_INFO;
@@ -1146,13 +1134,13 @@ static void rd_check_segment(VP8_COMP *cpi, MACROBLOCK *x,
labels = vp8_mbsplits[segmentation];
label_count = vp8_mbsplit_count[segmentation];
- // 64 makes this threshold really big effectively
- // making it so that we very rarely check mvs on
- // segments. setting this to 1 would make mv thresh
- // roughly equal to what it is for macroblocks
+ /* 64 makes this threshold really big effectively making it so that we
+ * very rarely check mvs on segments. setting this to 1 would make mv
+ * thresh roughly equal to what it is for macroblocks
+ */
label_mv_thresh = 1 * bsi->mvthresh / label_count ;
- // Segmentation method overheads
+ /* Segmentation method overheads */
rate = vp8_cost_token(vp8_mbsplit_tree, vp8_mbsplit_probs, vp8_mbsplit_encodings + segmentation);
rate += vp8_cost_mv_ref(SPLITMV, bsi->mdcounts);
this_segment_rd += RDCOST(x->rdmult, x->rddiv, rate, 0);
@@ -1165,7 +1153,7 @@ static void rd_check_segment(VP8_COMP *cpi, MACROBLOCK *x,
B_PREDICTION_MODE mode_selected = ZERO4X4;
int bestlabelyrate = 0;
- // search for the best motion vector on this segment
+ /* search for the best motion vector on this segment */
for (this_mode = LEFT4X4; this_mode <= NEW4X4 ; this_mode ++)
{
int this_rd;
@@ -1194,7 +1182,9 @@ static void rd_check_segment(VP8_COMP *cpi, MACROBLOCK *x,
BLOCK *c;
BLOCKD *e;
- // Is the best so far sufficiently good that we cant justify doing and new motion search.
+ /* Is the best so far sufficiently good that we cant justify
+ * doing a new motion search.
+ */
if (best_label_rd < label_mv_thresh)
break;
@@ -1209,7 +1199,9 @@ static void rd_check_segment(VP8_COMP *cpi, MACROBLOCK *x,
step_param = bsi->sv_istep[i];
}
- // use previous block's result as next block's MV predictor.
+ /* use previous block's result as next block's MV
+ * predictor.
+ */
if (segmentation == BLOCK_4X4 && i>0)
{
bsi->mvp.as_int = x->e_mbd.block[i-1].bmi.mv.as_int;
@@ -1228,7 +1220,7 @@ static void rd_check_segment(VP8_COMP *cpi, MACROBLOCK *x,
mvp_full.as_mv.row = bsi->mvp.as_mv.row >>3;
mvp_full.as_mv.col = bsi->mvp.as_mv.col >>3;
- // find first label
+ /* find first label */
n = vp8_mbsplit_offset[segmentation][i];
c = &x->block[n];
@@ -1268,7 +1260,7 @@ static void rd_check_segment(VP8_COMP *cpi, MACROBLOCK *x,
sseshift = segmentation_to_sseshift[segmentation];
- // Should we do a full search (best quality only)
+ /* Should we do a full search (best quality only) */
if ((cpi->compressor_speed == 0) && (bestsme >> sseshift) > 4000)
{
/* Check if mvp_full is within the range. */
@@ -1285,7 +1277,9 @@ static void rd_check_segment(VP8_COMP *cpi, MACROBLOCK *x,
}
else
{
- // The full search result is actually worse so re-instate the previous best vector
+ /* The full search result is actually worse so
+ * re-instate the previous best vector
+ */
e->bmi.mv.as_int = mode_mv[NEW4X4].as_int;
}
}
@@ -1305,7 +1299,7 @@ static void rd_check_segment(VP8_COMP *cpi, MACROBLOCK *x,
rate = labels2mode(x, labels, i, this_mode, &mode_mv[this_mode],
bsi->ref_mv, x->mvcost);
- // Trap vectors that reach beyond the UMV borders
+ /* Trap vectors that reach beyond the UMV borders */
if (((mode_mv[this_mode].as_mv.row >> 3) < x->mv_row_min) || ((mode_mv[this_mode].as_mv.row >> 3) > x->mv_row_max) ||
((mode_mv[this_mode].as_mv.col >> 3) < x->mv_col_min) || ((mode_mv[this_mode].as_mv.col >> 3) > x->mv_col_max))
{
@@ -1357,7 +1351,7 @@ static void rd_check_segment(VP8_COMP *cpi, MACROBLOCK *x,
bsi->segment_rd = this_segment_rd;
bsi->segment_num = segmentation;
- // store everything needed to come back to this!!
+ /* store everything needed to come back to this!! */
for (i = 0; i < 16; i++)
{
bsi->mvs[i].as_mv = x->partition_info->bmi[i].mv.as_mv;
@@ -1519,7 +1513,7 @@ static int vp8_rd_pick_best_mbsegmentation(VP8_COMP *cpi, MACROBLOCK *x,
return bsi.segment_rd;
}
-//The improved MV prediction
+/* The improved MV prediction */
void vp8_mv_pred
(
VP8_COMP *cpi,
@@ -1553,7 +1547,9 @@ void vp8_mv_pred
near_mvs[0].as_int = near_mvs[1].as_int = near_mvs[2].as_int = near_mvs[3].as_int = near_mvs[4].as_int = near_mvs[5].as_int = near_mvs[6].as_int = near_mvs[7].as_int = 0;
near_ref[0] = near_ref[1] = near_ref[2] = near_ref[3] = near_ref[4] = near_ref[5] = near_ref[6] = near_ref[7] = 0;
- // read in 3 nearby block's MVs from current frame as prediction candidates.
+ /* read in 3 nearby block's MVs from current frame as prediction
+ * candidates.
+ */
if (above->mbmi.ref_frame != INTRA_FRAME)
{
near_mvs[vcnt].as_int = above->mbmi.mv.as_int;
@@ -1576,12 +1572,12 @@ void vp8_mv_pred
}
vcnt++;
- // read in 5 nearby block's MVs from last frame.
+ /* read in 5 nearby block's MVs from last frame. */
if(cpi->common.last_frame_type != KEY_FRAME)
{
mb_offset = (-xd->mb_to_top_edge/128 + 1) * (xd->mode_info_stride +1) + (-xd->mb_to_left_edge/128 +1) ;
- // current in last frame
+ /* current in last frame */
if (cpi->lf_ref_frame[mb_offset] != INTRA_FRAME)
{
near_mvs[vcnt].as_int = cpi->lfmv[mb_offset].as_int;
@@ -1590,7 +1586,7 @@ void vp8_mv_pred
}
vcnt++;
- // above in last frame
+ /* above in last frame */
if (cpi->lf_ref_frame[mb_offset - xd->mode_info_stride-1] != INTRA_FRAME)
{
near_mvs[vcnt].as_int = cpi->lfmv[mb_offset - xd->mode_info_stride-1].as_int;
@@ -1599,7 +1595,7 @@ void vp8_mv_pred
}
vcnt++;
- // left in last frame
+ /* left in last frame */
if (cpi->lf_ref_frame[mb_offset-1] != INTRA_FRAME)
{
near_mvs[vcnt].as_int = cpi->lfmv[mb_offset -1].as_int;
@@ -1608,7 +1604,7 @@ void vp8_mv_pred
}
vcnt++;
- // right in last frame
+ /* right in last frame */
if (cpi->lf_ref_frame[mb_offset +1] != INTRA_FRAME)
{
near_mvs[vcnt].as_int = cpi->lfmv[mb_offset +1].as_int;
@@ -1617,7 +1613,7 @@ void vp8_mv_pred
}
vcnt++;
- // below in last frame
+ /* below in last frame */
if (cpi->lf_ref_frame[mb_offset + xd->mode_info_stride +1] != INTRA_FRAME)
{
near_mvs[vcnt].as_int = cpi->lfmv[mb_offset + xd->mode_info_stride +1].as_int;
@@ -1658,7 +1654,9 @@ void vp8_mv_pred
mv.as_mv.col = mvy[vcnt/2];
find = 1;
- //sr is set to 0 to allow calling function to decide the search range.
+ /* sr is set to 0 to allow calling function to decide the search
+ * range.
+ */
*sr = 0;
}
}
@@ -1670,21 +1668,24 @@ void vp8_mv_pred
void vp8_cal_sad(VP8_COMP *cpi, MACROBLOCKD *xd, MACROBLOCK *x, int recon_yoffset, int near_sadidx[])
{
-
- int near_sad[8] = {0}; // 0-cf above, 1-cf left, 2-cf aboveleft, 3-lf current, 4-lf above, 5-lf left, 6-lf right, 7-lf below
+ /* near_sad indexes:
+ * 0-cf above, 1-cf left, 2-cf aboveleft,
+ * 3-lf current, 4-lf above, 5-lf left, 6-lf right, 7-lf below
+ */
+ int near_sad[8] = {0};
BLOCK *b = &x->block[0];
unsigned char *src_y_ptr = *(b->base_src);
- //calculate sad for current frame 3 nearby MBs.
+ /* calculate sad for current frame 3 nearby MBs. */
if( xd->mb_to_top_edge==0 && xd->mb_to_left_edge ==0)
{
near_sad[0] = near_sad[1] = near_sad[2] = INT_MAX;
}else if(xd->mb_to_top_edge==0)
- { //only has left MB for sad calculation.
+ { /* only has left MB for sad calculation. */
near_sad[0] = near_sad[2] = INT_MAX;
near_sad[1] = cpi->fn_ptr[BLOCK_16X16].sdf(src_y_ptr, b->src_stride, xd->dst.y_buffer - 16,xd->dst.y_stride, 0x7fffffff);
}else if(xd->mb_to_left_edge ==0)
- { //only has left MB for sad calculation.
+ { /* only has left MB for sad calculation. */
near_sad[1] = near_sad[2] = INT_MAX;
near_sad[0] = cpi->fn_ptr[BLOCK_16X16].sdf(src_y_ptr, b->src_stride, xd->dst.y_buffer - xd->dst.y_stride *16,xd->dst.y_stride, 0x7fffffff);
}else
@@ -1696,7 +1697,7 @@ void vp8_cal_sad(VP8_COMP *cpi, MACROBLOCKD *xd, MACROBLOCK *x, int recon_yoffse
if(cpi->common.last_frame_type != KEY_FRAME)
{
- //calculate sad for last frame 5 nearby MBs.
+ /* calculate sad for last frame 5 nearby MBs. */
unsigned char *pre_y_buffer = cpi->common.yv12_fb[cpi->common.lst_fb_idx].y_buffer + recon_yoffset;
int pre_y_stride = cpi->common.yv12_fb[cpi->common.lst_fb_idx].y_stride;
@@ -1787,7 +1788,7 @@ static int evaluate_inter_mode_rd(int mdcounts[4],
if ((sse - var < q2dc * q2dc >>4) ||
(sse /2 > var && sse-var < 64))
{
- // Check u and v to make sure skip is ok
+ /* Check u and v to make sure skip is ok */
unsigned int sse2 = VP8_UVSSE(x);
if (sse2 * 2 < threshold)
{
@@ -1808,17 +1809,15 @@ static int evaluate_inter_mode_rd(int mdcounts[4],
}
- //intermodecost[mode_index] = vp8_cost_mv_ref(this_mode, mdcounts); // Experimental debug code
-
- // Add in the Mv/mode cost
+ /* Add in the Mv/mode cost */
rd->rate2 += vp8_cost_mv_ref(this_mode, mdcounts);
- // Y cost and distortion
+ /* Y cost and distortion */
macro_block_yrd(x, &rd->rate_y, &distortion);
rd->rate2 += rd->rate_y;
rd->distortion2 += distortion;
- // UV cost and distortion
+ /* UV cost and distortion */
rd_inter16x16_uv(cpi, x, &rd->rate_uv, &rd->distortion_uv,
cpi->common.full_pixel);
rd->rate2 += rd->rate_uv;
@@ -1835,9 +1834,11 @@ static int calculate_final_rd_costs(int this_rd,
VP8_COMP *cpi, MACROBLOCK *x)
{
MB_PREDICTION_MODE this_mode = x->e_mbd.mode_info_context->mbmi.mode;
- // Where skip is allowable add in the default per mb cost for the no skip case.
- // where we then decide to skip we have to delete this and replace it with the
- // cost of signallying a skip
+
+ /* Where skip is allowable add in the default per mb cost for the no
+ * skip case. where we then decide to skip we have to delete this and
+ * replace it with the cost of signalling a skip
+ */
if (cpi->common.mb_no_coeff_skip)
{
*other_cost += vp8_cost_bit(cpi->prob_skip_false, 0);
@@ -1852,7 +1853,10 @@ static int calculate_final_rd_costs(int this_rd,
if (!disable_skip)
{
- // Test for the condition where skip block will be activated because there are no non zero coefficients and make any necessary adjustment for rate
+ /* Test for the condition where skip block will be activated
+ * because there are no non zero coefficients and make any
+ * necessary adjustment for rate
+ */
if (cpi->common.mb_no_coeff_skip)
{
int i;
@@ -1877,10 +1881,10 @@ static int calculate_final_rd_costs(int this_rd,
if (tteob == 0)
{
rd->rate2 -= (rd->rate_y + rd->rate_uv);
- //for best_yrd calculation
+ /* for best_yrd calculation */
rd->rate_uv = 0;
- // Back out no skip flag costing and add in skip flag costing
+ /* Back out no skip flag costing and add in skip flag costing */
if (cpi->prob_skip_false)
{
int prob_skip_cost;
@@ -1892,7 +1896,7 @@ static int calculate_final_rd_costs(int this_rd,
}
}
}
- // Calculate the final RD estimate for this mode
+ /* Calculate the final RD estimate for this mode */
this_rd = RDCOST(x->rdmult, x->rddiv, rd->rate2, rd->distortion2);
if (this_rd < INT_MAX && x->e_mbd.mode_info_context->mbmi.ref_frame
== INTRA_FRAME)
@@ -1956,7 +1960,8 @@ void vp8_rd_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset,
int_mv mvp;
int near_sadidx[8] = {0, 1, 2, 3, 4, 5, 6, 7};
int saddone=0;
- int sr=0; //search range got from mv_pred(). It uses step_param levels. (0-7)
+ /* search range got from mv_pred(). It uses step_param levels. (0-7) */
+ int sr=0;
unsigned char *plane[4][3];
int ref_frame_map[4];
@@ -2002,7 +2007,8 @@ void vp8_rd_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset,
get_predictor_pointers(cpi, plane, recon_yoffset, recon_uvoffset);
*returnintra = INT_MAX;
- cpi->mbs_tested_so_far++; // Count of the number of MBs tested so far this frame
+ /* Count of the number of MBs tested so far this frame */
+ cpi->mbs_tested_so_far++;
x->skip = 0;
@@ -2013,14 +2019,16 @@ void vp8_rd_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset,
int other_cost = 0;
int this_ref_frame = ref_frame_map[vp8_ref_frame_order[mode_index]];
- // Test best rd so far against threshold for trying this mode.
+ /* Test best rd so far against threshold for trying this mode. */
if (best_mode.rd <= cpi->rd_threshes[mode_index])
continue;
if (this_ref_frame < 0)
continue;
- // These variables hold are rolling total cost and distortion for this mode
+ /* These variables hold are rolling total cost and distortion for
+ * this mode
+ */
rd.rate2 = 0;
rd.distortion2 = 0;
@@ -2029,9 +2037,10 @@ void vp8_rd_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset,
x->e_mbd.mode_info_context->mbmi.mode = this_mode;
x->e_mbd.mode_info_context->mbmi.ref_frame = this_ref_frame;
- // Only consider ZEROMV/ALTREF_FRAME for alt ref frame,
- // unless ARNR filtering is enabled in which case we want
- // an unfiltered alternative
+ /* Only consider ZEROMV/ALTREF_FRAME for alt ref frame,
+ * unless ARNR filtering is enabled in which case we want
+ * an unfiltered alternative
+ */
if (cpi->is_src_frame_alt_ref && (cpi->oxcf.arnr_max_frames == 0))
{
if (this_mode != ZEROMV || x->e_mbd.mode_info_context->mbmi.ref_frame != ALTREF_FRAME)
@@ -2053,13 +2062,17 @@ void vp8_rd_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset,
}
}
- // Check to see if the testing frequency for this mode is at its max
- // If so then prevent it from being tested and increase the threshold for its testing
+ /* Check to see if the testing frequency for this mode is at its
+ * max If so then prevent it from being tested and increase the
+ * threshold for its testing
+ */
if (cpi->mode_test_hit_counts[mode_index] && (cpi->mode_check_freq[mode_index] > 1))
{
if (cpi->mbs_tested_so_far <= cpi->mode_check_freq[mode_index] * cpi->mode_test_hit_counts[mode_index])
{
- // Increase the threshold for coding this mode to make it less likely to be chosen
+ /* Increase the threshold for coding this mode to make it
+ * less likely to be chosen
+ */
cpi->rd_thresh_mult[mode_index] += 4;
if (cpi->rd_thresh_mult[mode_index] > MAX_THRESHMULT)
@@ -2071,10 +2084,15 @@ void vp8_rd_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset,
}
}
- // We have now reached the point where we are going to test the current mode so increment the counter for the number of times it has been tested
+ /* We have now reached the point where we are going to test the
+ * current mode so increment the counter for the number of times
+ * it has been tested
+ */
cpi->mode_test_hit_counts[mode_index] ++;
- // Experimental code. Special case for gf and arf zeromv modes. Increase zbin size to supress noise
+ /* Experimental code. Special case for gf and arf zeromv modes.
+ * Increase zbin size to supress noise
+ */
if (cpi->zbin_mode_boost_enabled)
{
if ( this_ref_frame == INTRA_FRAME )
@@ -2121,7 +2139,9 @@ void vp8_rd_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset,
{
int tmp_rd;
- // Note the rate value returned here includes the cost of coding the BPRED mode : x->mbmode_cost[x->e_mbd.frame_type][BPRED];
+ /* Note the rate value returned here includes the cost of
+ * coding the BPRED mode: x->mbmode_cost[x->e_mbd.frame_type][BPRED]
+ */
int distortion;
tmp_rd = rd_pick_intra4x4mby_modes(cpi, x, &rate, &rd.rate_y, &distortion, best_mode.yrd);
rd.rate2 += rate;
@@ -2158,10 +2178,12 @@ void vp8_rd_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset,
rd.rate2 += rate;
rd.distortion2 += distortion;
- // If even the 'Y' rd value of split is higher than best so far then dont bother looking at UV
+ /* If even the 'Y' rd value of split is higher than best so far
+ * then dont bother looking at UV
+ */
if (tmp_rd < best_mode.yrd)
{
- // Now work out UV cost and add it in
+ /* Now work out UV cost and add it in */
rd_inter4x4_uv(cpi, x, &rd.rate_uv, &rd.distortion_uv, cpi->common.full_pixel);
rd.rate2 += rd.rate_uv;
rd.distortion2 += rd.distortion_uv;
@@ -2233,7 +2255,9 @@ void vp8_rd_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset,
mvp_full.as_mv.col = mvp.as_mv.col>>3;
mvp_full.as_mv.row = mvp.as_mv.row>>3;
- // Get intersection of UMV window and valid MV window to reduce # of checks in diamond search.
+ /* Get intersection of UMV window and valid MV window to
+ * reduce # of checks in diamond search.
+ */
if (x->mv_col_min < col_min )
x->mv_col_min = col_min;
if (x->mv_col_max > col_max )
@@ -2243,11 +2267,11 @@ void vp8_rd_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset,
if (x->mv_row_max > row_max )
x->mv_row_max = row_max;
- //adjust search range according to sr from mv prediction
+ /* adjust search range according to sr from mv prediction */
if(sr > step_param)
step_param = sr;
- // Initial step/diamond search
+ /* Initial step/diamond search */
{
bestsme = cpi->diamond_search_sad(x, b, d, &mvp_full, &d->bmi.mv,
step_param, sadpb, &num00,
@@ -2255,7 +2279,7 @@ void vp8_rd_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset,
x->mvcost, &best_ref_mv);
mode_mv[NEWMV].as_int = d->bmi.mv.as_int;
- // Further step/diamond searches as necessary
+ /* Further step/diamond searches as necessary */
n = 0;
further_steps = (cpi->sf.max_step_search_steps - 1) - step_param;
@@ -2301,11 +2325,8 @@ void vp8_rd_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset,
{
int search_range;
- //It seems not a good way to set search_range. Need further investigation.
- //search_range = MAXF(abs((mvp.row>>3) - d->bmi.mv.as_mv.row), abs((mvp.col>>3) - d->bmi.mv.as_mv.col));
search_range = 8;
- //thissme = cpi->full_search_sad(x, b, d, &d->bmi.mv.as_mv, sadpb, search_range, &cpi->fn_ptr[BLOCK_16X16], x->mvcost, &best_ref_mv);
thissme = cpi->refining_search_sad(x, b, d, &d->bmi.mv, sadpb,
search_range, &cpi->fn_ptr[BLOCK_16X16],
x->mvcost, &best_ref_mv);
@@ -2338,24 +2359,31 @@ void vp8_rd_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset,
mode_mv[NEWMV].as_int = d->bmi.mv.as_int;
- // Add the new motion vector cost to our rolling cost variable
+ /* Add the new motion vector cost to our rolling cost variable */
rd.rate2 += vp8_mv_bit_cost(&mode_mv[NEWMV], &best_ref_mv, x->mvcost, 96);
}
case NEARESTMV:
case NEARMV:
- // Clip "next_nearest" so that it does not extend to far out of image
+ /* Clip "next_nearest" so that it does not extend to far out
+ * of image
+ */
vp8_clamp_mv2(&mode_mv[this_mode], xd);
- // Do not bother proceeding if the vector (from newmv,nearest or near) is 0,0 as this should then be coded using the zeromv mode.
+ /* Do not bother proceeding if the vector (from newmv, nearest
+ * or near) is 0,0 as this should then be coded using the zeromv
+ * mode.
+ */
if (((this_mode == NEARMV) || (this_mode == NEARESTMV)) && (mode_mv[this_mode].as_int == 0))
continue;
case ZEROMV:
- // Trap vectors that reach beyond the UMV borders
- // Note that ALL New MV, Nearest MV Near MV and Zero MV code drops through to this point
- // because of the lack of break statements in the previous two cases.
+ /* Trap vectors that reach beyond the UMV borders
+ * Note that ALL New MV, Nearest MV Near MV and Zero MV code
+ * drops through to this point because of the lack of break
+ * statements in the previous two cases.
+ */
if (((mode_mv[this_mode].as_mv.row >> 3) < x->mv_row_min) || ((mode_mv[this_mode].as_mv.row >> 3) > x->mv_row_max) ||
((mode_mv[this_mode].as_mv.col >> 3) < x->mv_col_min) || ((mode_mv[this_mode].as_mv.col >> 3) > x->mv_col_max))
continue;
@@ -2373,7 +2401,7 @@ void vp8_rd_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset,
disable_skip, uv_intra_tteob,
intra_rd_penalty, cpi, x);
- // Keep record of best intra distortion
+ /* Keep record of best intra distortion */
if ((x->e_mbd.mode_info_context->mbmi.ref_frame == INTRA_FRAME) &&
(this_rd < best_mode.intra_rd) )
{
@@ -2390,7 +2418,7 @@ void vp8_rd_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset,
if (sse < best_rd_sse)
best_rd_sse = sse;
- // Store for later use by denoiser.
+ /* Store for later use by denoiser. */
if (this_mode == ZEROMV && sse < zero_mv_sse )
{
zero_mv_sse = sse;
@@ -2398,7 +2426,7 @@ void vp8_rd_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset,
x->e_mbd.mode_info_context->mbmi.ref_frame;
}
- // Store the best NEWMV in x for later use in the denoiser.
+ /* Store the best NEWMV in x for later use in the denoiser. */
if (x->e_mbd.mode_info_context->mbmi.mode == NEWMV &&
sse < best_sse)
{
@@ -2415,10 +2443,10 @@ void vp8_rd_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset,
}
#endif
- // Did this mode help.. i.i is it the new best mode
+ /* Did this mode help.. i.i is it the new best mode */
if (this_rd < best_mode.rd || x->skip)
{
- // Note index of best mode so far
+ /* Note index of best mode so far */
best_mode_index = mode_index;
*returnrate = rd.rate2;
*returndistortion = rd.distortion2;
@@ -2431,12 +2459,16 @@ void vp8_rd_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset,
update_best_mode(&best_mode, this_rd, &rd, other_cost, x);
- // Testing this mode gave rise to an improvement in best error score. Lower threshold a bit for next time
+ /* Testing this mode gave rise to an improvement in best error
+ * score. Lower threshold a bit for next time
+ */
cpi->rd_thresh_mult[mode_index] = (cpi->rd_thresh_mult[mode_index] >= (MIN_THRESHMULT + 2)) ? cpi->rd_thresh_mult[mode_index] - 2 : MIN_THRESHMULT;
cpi->rd_threshes[mode_index] = (cpi->rd_baseline_thresh[mode_index] >> 7) * cpi->rd_thresh_mult[mode_index];
}
- // If the mode did not help improve the best error case then raise the threshold for testing that mode next time around.
+ /* If the mode did not help improve the best error case then raise
+ * the threshold for testing that mode next time around.
+ */
else
{
cpi->rd_thresh_mult[mode_index] += 4;
@@ -2452,33 +2484,16 @@ void vp8_rd_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset,
}
- // Reduce the activation RD thresholds for the best choice mode
+ /* Reduce the activation RD thresholds for the best choice mode */
if ((cpi->rd_baseline_thresh[best_mode_index] > 0) && (cpi->rd_baseline_thresh[best_mode_index] < (INT_MAX >> 2)))
{
int best_adjustment = (cpi->rd_thresh_mult[best_mode_index] >> 2);
cpi->rd_thresh_mult[best_mode_index] = (cpi->rd_thresh_mult[best_mode_index] >= (MIN_THRESHMULT + best_adjustment)) ? cpi->rd_thresh_mult[best_mode_index] - best_adjustment : MIN_THRESHMULT;
cpi->rd_threshes[best_mode_index] = (cpi->rd_baseline_thresh[best_mode_index] >> 7) * cpi->rd_thresh_mult[best_mode_index];
-
- // If we chose a split mode then reset the new MV thresholds as well
- /*if ( vp8_mode_order[best_mode_index] == SPLITMV )
- {
- best_adjustment = 4; //(cpi->rd_thresh_mult[THR_NEWMV] >> 4);
- cpi->rd_thresh_mult[THR_NEWMV] = (cpi->rd_thresh_mult[THR_NEWMV] >= (MIN_THRESHMULT+best_adjustment)) ? cpi->rd_thresh_mult[THR_NEWMV]-best_adjustment: MIN_THRESHMULT;
- cpi->rd_threshes[THR_NEWMV] = (cpi->rd_baseline_thresh[THR_NEWMV] >> 7) * cpi->rd_thresh_mult[THR_NEWMV];
-
- best_adjustment = 4; //(cpi->rd_thresh_mult[THR_NEWG] >> 4);
- cpi->rd_thresh_mult[THR_NEWG] = (cpi->rd_thresh_mult[THR_NEWG] >= (MIN_THRESHMULT+best_adjustment)) ? cpi->rd_thresh_mult[THR_NEWG]-best_adjustment: MIN_THRESHMULT;
- cpi->rd_threshes[THR_NEWG] = (cpi->rd_baseline_thresh[THR_NEWG] >> 7) * cpi->rd_thresh_mult[THR_NEWG];
-
- best_adjustment = 4; //(cpi->rd_thresh_mult[THR_NEWA] >> 4);
- cpi->rd_thresh_mult[THR_NEWA] = (cpi->rd_thresh_mult[THR_NEWA] >= (MIN_THRESHMULT+best_adjustment)) ? cpi->rd_thresh_mult[THR_NEWA]-best_adjustment: MIN_THRESHMULT;
- cpi->rd_threshes[THR_NEWA] = (cpi->rd_baseline_thresh[THR_NEWA] >> 7) * cpi->rd_thresh_mult[THR_NEWA];
- }*/
-
}
- // Note how often each mode chosen as best
+ /* Note how often each mode chosen as best */
cpi->mode_chosen_counts[best_mode_index] ++;
#if CONFIG_TEMPORAL_DENOISING
@@ -2486,7 +2501,7 @@ void vp8_rd_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset,
{
if (x->best_sse_inter_mode == DC_PRED)
{
- // No best MV found.
+ /* No best MV found. */
x->best_sse_inter_mode = best_mode.mbmode.mode;
x->best_sse_mv = best_mode.mbmode.mv;
x->need_to_clamp_best_mvs = best_mode.mbmode.need_to_clamp_mvs;
@@ -2497,7 +2512,7 @@ void vp8_rd_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset,
recon_yoffset, recon_uvoffset);
- // Reevaluate ZEROMV after denoising.
+ /* Reevaluate ZEROMV after denoising. */
if (best_mode.mbmode.ref_frame == INTRA_FRAME &&
x->best_zeromv_reference_frame != INTRA_FRAME)
{
@@ -2509,7 +2524,7 @@ void vp8_rd_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset,
vp8_cost_mv_ref(ZEROMV, mdcounts);
rd.distortion2 = 0;
- // set up the proper prediction buffers for the frame
+ /* set up the proper prediction buffers for the frame */
x->e_mbd.mode_info_context->mbmi.ref_frame = this_ref_frame;
x->e_mbd.pre.y_buffer = plane[this_ref_frame][0];
x->e_mbd.pre.u_buffer = plane[this_ref_frame][1];
@@ -2525,7 +2540,7 @@ void vp8_rd_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset,
intra_rd_penalty, cpi, x);
if (this_rd < best_mode.rd || x->skip)
{
- // Note index of best mode so far
+ /* Note index of best mode so far */
best_mode_index = mode_index;
*returnrate = rd.rate2;
*returndistortion = rd.distortion2;
@@ -2550,7 +2565,7 @@ void vp8_rd_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset,
}
- // macroblock modes
+ /* macroblock modes */
vpx_memcpy(&x->e_mbd.mode_info_context->mbmi, &best_mode.mbmode, sizeof(MB_MODE_INFO));
if (best_mode.mbmode.mode == B_PRED)
diff --git a/vp8/encoder/segmentation.c b/vp8/encoder/segmentation.c
index fc0967db3..37972e219 100644
--- a/vp8/encoder/segmentation.c
+++ b/vp8/encoder/segmentation.c
@@ -22,22 +22,24 @@ void vp8_update_gf_useage_maps(VP8_COMP *cpi, VP8_COMMON *cm, MACROBLOCK *x)
if ((cm->frame_type == KEY_FRAME) || (cm->refresh_golden_frame))
{
- // Reset Gf useage monitors
+ /* Reset Gf useage monitors */
vpx_memset(cpi->gf_active_flags, 1, (cm->mb_rows * cm->mb_cols));
cpi->gf_active_count = cm->mb_rows * cm->mb_cols;
}
else
{
- // for each macroblock row in image
+ /* for each macroblock row in image */
for (mb_row = 0; mb_row < cm->mb_rows; mb_row++)
{
- // for each macroblock col in image
+ /* for each macroblock col in image */
for (mb_col = 0; mb_col < cm->mb_cols; mb_col++)
{
- // If using golden then set GF active flag if not already set.
- // If using last frame 0,0 mode then leave flag as it is
- // else if using non 0,0 motion or intra modes then clear flag if it is currently set
+ /* If using golden then set GF active flag if not already set.
+ * If using last frame 0,0 mode then leave flag as it is
+ * else if using non 0,0 motion or intra modes then clear
+ * flag if it is currently set
+ */
if ((this_mb_mode_info->mbmi.ref_frame == GOLDEN_FRAME) || (this_mb_mode_info->mbmi.ref_frame == ALTREF_FRAME))
{
if (*(x->gf_active_ptr) == 0)
@@ -52,12 +54,12 @@ void vp8_update_gf_useage_maps(VP8_COMP *cpi, VP8_COMMON *cm, MACROBLOCK *x)
cpi->gf_active_count--;
}
- x->gf_active_ptr++; // Step onto next entry
- this_mb_mode_info++; // skip to next mb
+ x->gf_active_ptr++; /* Step onto next entry */
+ this_mb_mode_info++; /* skip to next mb */
}
- // this is to account for the border
+ /* this is to account for the border */
this_mb_mode_info++;
}
}
diff --git a/vp8/encoder/temporal_filter.c b/vp8/encoder/temporal_filter.c
index b391d5ac0..b83ae89ab 100644
--- a/vp8/encoder/temporal_filter.c
+++ b/vp8/encoder/temporal_filter.c
@@ -30,8 +30,8 @@
#include <math.h>
#include <limits.h>
-#define ALT_REF_MC_ENABLED 1 // dis/enable MC in AltRef filtering
-#define ALT_REF_SUBPEL_ENABLED 1 // dis/enable subpel in MC AltRef filtering
+#define ALT_REF_MC_ENABLED 1 /* dis/enable MC in AltRef filtering */
+#define ALT_REF_SUBPEL_ENABLED 1 /* dis/enable subpel in MC AltRef filtering */
#if VP8_TEMPORAL_ALT_REF
@@ -50,7 +50,7 @@ static void vp8_temporal_filter_predictors_mb_c
int offset;
unsigned char *yptr, *uptr, *vptr;
- // Y
+ /* Y */
yptr = y_mb_ptr + (mv_row >> 3) * stride + (mv_col >> 3);
if ((mv_row | mv_col) & 7)
@@ -63,7 +63,7 @@ static void vp8_temporal_filter_predictors_mb_c
vp8_copy_mem16x16(yptr, stride, &pred[0], 16);
}
- // U & V
+ /* U & V */
mv_row >>= 1;
mv_col >>= 1;
stride = (stride + 1) >> 1;
@@ -109,9 +109,10 @@ void vp8_temporal_filter_apply_c
int pixel_value = *frame2++;
modifier = src_byte - pixel_value;
- // This is an integer approximation of:
- // float coeff = (3.0 * modifer * modifier) / pow(2, strength);
- // modifier = (int)roundf(coeff > 16 ? 0 : 16-coeff);
+ /* This is an integer approximation of:
+ * float coeff = (3.0 * modifer * modifier) / pow(2, strength);
+ * modifier = (int)roundf(coeff > 16 ? 0 : 16-coeff);
+ */
modifier *= modifier;
modifier *= 3;
modifier += 1 << (strength - 1);
@@ -154,7 +155,7 @@ static int vp8_temporal_filter_find_matching_mb_c
int_mv best_ref_mv1;
int_mv best_ref_mv1_full; /* full-pixel value of best_ref_mv1 */
- // Save input state
+ /* Save input state */
unsigned char **base_src = b->base_src;
int src = b->src;
int src_stride = b->src_stride;
@@ -166,7 +167,7 @@ static int vp8_temporal_filter_find_matching_mb_c
best_ref_mv1_full.as_mv.col = best_ref_mv1.as_mv.col >>3;
best_ref_mv1_full.as_mv.row = best_ref_mv1.as_mv.row >>3;
- // Setup frame pointers
+ /* Setup frame pointers */
b->base_src = &arf_frame->y_buffer;
b->src_stride = arf_frame->y_stride;
b->src = mb_offset;
@@ -175,7 +176,7 @@ static int vp8_temporal_filter_find_matching_mb_c
x->e_mbd.pre.y_stride = frame_ptr->y_stride;
d->offset = mb_offset;
- // Further step/diamond searches as necessary
+ /* Further step/diamond searches as necessary */
if (cpi->Speed < 8)
{
step_param = cpi->sf.first_step + (cpi->Speed > 5);
@@ -185,21 +186,19 @@ static int vp8_temporal_filter_find_matching_mb_c
step_param = cpi->sf.first_step + 2;
}
- /*cpi->sf.search_method == HEX*/
- // TODO Check that the 16x16 vf & sdf are selected here
- // Ignore mv costing by sending NULL cost arrays
+ /* TODO Check that the 16x16 vf & sdf are selected here */
+ /* Ignore mv costing by sending NULL cost arrays */
bestsme = vp8_hex_search(x, b, d, &best_ref_mv1_full, &d->bmi.mv,
step_param, sadpb,
&cpi->fn_ptr[BLOCK_16X16],
NULL, NULL, &best_ref_mv1);
#if ALT_REF_SUBPEL_ENABLED
- // Try sub-pixel MC?
- //if (bestsme > error_thresh && bestsme < INT_MAX)
+ /* Try sub-pixel MC? */
{
int distortion;
unsigned int sse;
- // Ignore mv costing by sending NULL cost array
+ /* Ignore mv costing by sending NULL cost array */
bestsme = cpi->find_fractional_mv_step(x, b, d,
&d->bmi.mv,
&best_ref_mv1,
@@ -209,7 +208,7 @@ static int vp8_temporal_filter_find_matching_mb_c
}
#endif
- // Save input state
+ /* Save input state */
b->base_src = base_src;
b->src = src;
b->src_stride = src_stride;
@@ -244,7 +243,7 @@ static void vp8_temporal_filter_iterate_c
unsigned char *dst1, *dst2;
DECLARE_ALIGNED_ARRAY(16, unsigned char, predictor, 16*16 + 8*8 + 8*8);
- // Save input state
+ /* Save input state */
unsigned char *y_buffer = mbd->pre.y_buffer;
unsigned char *u_buffer = mbd->pre.u_buffer;
unsigned char *v_buffer = mbd->pre.v_buffer;
@@ -252,16 +251,17 @@ static void vp8_temporal_filter_iterate_c
for (mb_row = 0; mb_row < mb_rows; mb_row++)
{
#if ALT_REF_MC_ENABLED
- // Source frames are extended to 16 pixels. This is different than
- // L/A/G reference frames that have a border of 32 (VP8BORDERINPIXELS)
- // A 6 tap filter is used for motion search. This requires 2 pixels
- // before and 3 pixels after. So the largest Y mv on a border would
- // then be 16 - 3. The UV blocks are half the size of the Y and
- // therefore only extended by 8. The largest mv that a UV block
- // can support is 8 - 3. A UV mv is half of a Y mv.
- // (16 - 3) >> 1 == 6 which is greater than 8 - 3.
- // To keep the mv in play for both Y and UV planes the max that it
- // can be on a border is therefore 16 - 5.
+ /* Source frames are extended to 16 pixels. This is different than
+ * L/A/G reference frames that have a border of 32 (VP8BORDERINPIXELS)
+ * A 6 tap filter is used for motion search. This requires 2 pixels
+ * before and 3 pixels after. So the largest Y mv on a border would
+ * then be 16 - 3. The UV blocks are half the size of the Y and
+ * therefore only extended by 8. The largest mv that a UV block
+ * can support is 8 - 3. A UV mv is half of a Y mv.
+ * (16 - 3) >> 1 == 6 which is greater than 8 - 3.
+ * To keep the mv in play for both Y and UV planes the max that it
+ * can be on a border is therefore 16 - 5.
+ */
cpi->mb.mv_row_min = -((mb_row * 16) + (16 - 5));
cpi->mb.mv_row_max = ((cpi->common.mb_rows - 1 - mb_row) * 16)
+ (16 - 5);
@@ -299,7 +299,7 @@ static void vp8_temporal_filter_iterate_c
#if ALT_REF_MC_ENABLED
#define THRESH_LOW 10000
#define THRESH_HIGH 20000
- // Find best match in this frame by MC
+ /* Find best match in this frame by MC */
err = vp8_temporal_filter_find_matching_mb_c
(cpi,
cpi->frames[alt_ref_index],
@@ -307,16 +307,17 @@ static void vp8_temporal_filter_iterate_c
mb_y_offset,
THRESH_LOW);
#endif
- // Assign higher weight to matching MB if it's error
- // score is lower. If not applying MC default behavior
- // is to weight all MBs equal.
+ /* Assign higher weight to matching MB if it's error
+ * score is lower. If not applying MC default behavior
+ * is to weight all MBs equal.
+ */
filter_weight = err<THRESH_LOW
? 2 : err<THRESH_HIGH ? 1 : 0;
}
if (filter_weight != 0)
{
- // Construct the predictors
+ /* Construct the predictors */
vp8_temporal_filter_predictors_mb_c
(mbd,
cpi->frames[frame]->y_buffer + mb_y_offset,
@@ -327,7 +328,7 @@ static void vp8_temporal_filter_iterate_c
mbd->block[0].bmi.mv.as_mv.col,
predictor);
- // Apply the filter (YUV)
+ /* Apply the filter (YUV) */
vp8_temporal_filter_apply
(f->y_buffer + mb_y_offset,
f->y_stride,
@@ -360,7 +361,7 @@ static void vp8_temporal_filter_iterate_c
}
}
- // Normalize filter output to produce AltRef frame
+ /* Normalize filter output to produce AltRef frame */
dst1 = cpi->alt_ref_buffer.y_buffer;
stride = cpi->alt_ref_buffer.y_stride;
byte = mb_y_offset;
@@ -374,7 +375,7 @@ static void vp8_temporal_filter_iterate_c
dst1[byte] = (unsigned char)pval;
- // move to next pixel
+ /* move to next pixel */
byte++;
}
@@ -391,19 +392,19 @@ static void vp8_temporal_filter_iterate_c
{
int m=k+64;
- // U
+ /* U */
unsigned int pval = accumulator[k] + (count[k] >> 1);
pval *= cpi->fixed_divide[count[k]];
pval >>= 19;
dst1[byte] = (unsigned char)pval;
- // V
+ /* V */
pval = accumulator[m] + (count[m] >> 1);
pval *= cpi->fixed_divide[count[m]];
pval >>= 19;
dst2[byte] = (unsigned char)pval;
- // move to next pixel
+ /* move to next pixel */
byte++;
}
@@ -418,7 +419,7 @@ static void vp8_temporal_filter_iterate_c
mb_uv_offset += 8*(f->uv_stride-mb_cols);
}
- // Restore input state
+ /* Restore input state */
mbd->pre.y_buffer = y_buffer;
mbd->pre.u_buffer = u_buffer;
mbd->pre.v_buffer = v_buffer;
@@ -452,8 +453,7 @@ void vp8_temporal_filter_prepare_c
switch (blur_type)
{
case 1:
- /////////////////////////////////////////
- // Backward Blur
+ /* Backward Blur */
frames_to_blur_backward = num_frames_backward;
@@ -464,8 +464,7 @@ void vp8_temporal_filter_prepare_c
break;
case 2:
- /////////////////////////////////////////
- // Forward Blur
+ /* Forward Blur */
frames_to_blur_forward = num_frames_forward;
@@ -477,8 +476,7 @@ void vp8_temporal_filter_prepare_c
case 3:
default:
- /////////////////////////////////////////
- // Center Blur
+ /* Center Blur */
frames_to_blur_forward = num_frames_forward;
frames_to_blur_backward = num_frames_backward;
@@ -488,7 +486,7 @@ void vp8_temporal_filter_prepare_c
if (frames_to_blur_backward > frames_to_blur_forward)
frames_to_blur_backward = frames_to_blur_forward;
- // When max_frames is even we have 1 more frame backward than forward
+ /* When max_frames is even we have 1 more frame backward than forward */
if (frames_to_blur_forward > (max_frames - 1) / 2)
frames_to_blur_forward = ((max_frames - 1) / 2);
@@ -501,21 +499,7 @@ void vp8_temporal_filter_prepare_c
start_frame = distance + frames_to_blur_forward;
-#ifdef DEBUGFWG
- // DEBUG FWG
- printf("max:%d FBCK:%d FFWD:%d ftb:%d ftbbck:%d ftbfwd:%d sei:%d lasei:%d start:%d"
- , max_frames
- , num_frames_backward
- , num_frames_forward
- , frames_to_blur
- , frames_to_blur_backward
- , frames_to_blur_forward
- , cpi->source_encode_index
- , cpi->last_alt_ref_sei
- , start_frame);
-#endif
-
- // Setup frame pointers, NULL indicates frame not included in filter
+ /* Setup frame pointers, NULL indicates frame not included in filter */
vpx_memset(cpi->frames, 0, max_frames*sizeof(YV12_BUFFER_CONFIG *));
for (frame = 0; frame < frames_to_blur; frame++)
{
diff --git a/vp8/encoder/x86/denoising_sse2.c b/vp8/encoder/x86/denoising_sse2.c
index 41991c253..fbce8d13c 100644
--- a/vp8/encoder/x86/denoising_sse2.c
+++ b/vp8/encoder/x86/denoising_sse2.c
@@ -55,7 +55,7 @@ int vp8_denoiser_filter_sse2(YV12_BUFFER_CONFIG *mc_running_avg,
const __m128i k_zero = _mm_set1_epi16(0);
const __m128i k_128 = _mm_set1_epi32(128);
- // Calculate absolute differences
+ /* Calculate absolute differences */
DECLARE_ALIGNED_ARRAY(16,unsigned char,abs_diff,16);
DECLARE_ALIGNED_ARRAY(16,uint32_t,filter_coefficient,16);
__m128i v_sig = _mm_loadu_si128((__m128i *)(&sig[0]));
@@ -66,14 +66,14 @@ int vp8_denoiser_filter_sse2(YV12_BUFFER_CONFIG *mc_running_avg,
__m128i v_abs_diff = _mm_adds_epu8(a_minus_b, b_minus_a);
_mm_store_si128((__m128i *)(&abs_diff[0]), v_abs_diff);
- // Use LUT to get filter coefficients (two 16b value; f and 256-f)
+ /* Use LUT to get filter coefficients (two 16b value; f and 256-f) */
for (c = 0; c < 16; ++c)
{
filter_coefficient[c] = LUT[abs_diff[c]].as_int;
}
- // Filtering...
- // load filter coefficients (two 16b value; f and 256-f)
+ /* Filtering... */
+ /* load filter coefficients (two 16b value; f and 256-f) */
filter_coefficient_00 = _mm_load_si128(
(__m128i *)(&filter_coefficient[ 0]));
filter_coefficient_04 = _mm_load_si128(
@@ -83,18 +83,18 @@ int vp8_denoiser_filter_sse2(YV12_BUFFER_CONFIG *mc_running_avg,
filter_coefficient_12 = _mm_load_si128(
(__m128i *)(&filter_coefficient[12]));
- // expand sig from 8b to 16b
+ /* expand sig from 8b to 16b */
v_sig0 = _mm_unpacklo_epi8(v_sig, k_zero);
v_sig1 = _mm_unpackhi_epi8(v_sig, k_zero);
- // expand mc_running_avg_y from 8b to 16b
+ /* expand mc_running_avg_y from 8b to 16b */
v_mc_running_avg_y0 = _mm_unpacklo_epi8(v_mc_running_avg_y, k_zero);
v_mc_running_avg_y1 = _mm_unpackhi_epi8(v_mc_running_avg_y, k_zero);
- // interleave sig and mc_running_avg_y for upcoming multiply-add
+ /* interleave sig and mc_running_avg_y for upcoming multiply-add */
state0 = _mm_unpacklo_epi16(v_mc_running_avg_y0, v_sig0);
state1 = _mm_unpackhi_epi16(v_mc_running_avg_y0, v_sig0);
state2 = _mm_unpacklo_epi16(v_mc_running_avg_y1, v_sig1);
state3 = _mm_unpackhi_epi16(v_mc_running_avg_y1, v_sig1);
- // blend values
+ /* blend values */
res0 = _mm_madd_epi16(filter_coefficient_00, state0);
res1 = _mm_madd_epi16(filter_coefficient_04, state1);
res2 = _mm_madd_epi16(filter_coefficient_08, state2);
@@ -107,15 +107,16 @@ int vp8_denoiser_filter_sse2(YV12_BUFFER_CONFIG *mc_running_avg,
res1 = _mm_srai_epi32(res1, 8);
res2 = _mm_srai_epi32(res2, 8);
res3 = _mm_srai_epi32(res3, 8);
- // combine the 32b results into a single 8b vector
+ /* combine the 32b results into a single 8b vector */
res0 = _mm_packs_epi32(res0, res1);
res2 = _mm_packs_epi32(res2, res3);
v_running_avg_y = _mm_packus_epi16(res0, res2);
- // Depending on the magnitude of the difference between the signal and
- // filtered version, either replace the signal by the filtered one or
- // update the filter state with the signal when the change in a pixel
- // isn't classified as noise.
+ /* Depending on the magnitude of the difference between the signal and
+ * filtered version, either replace the signal by the filtered one or
+ * update the filter state with the signal when the change in a pixel
+ * isn't classified as noise.
+ */
diff0 = _mm_sub_epi16(v_sig0, res0);
diff1 = _mm_sub_epi16(v_sig1, res2);
acc_diff = _mm_add_epi16(acc_diff, _mm_add_epi16(diff0, diff1));
@@ -130,14 +131,14 @@ int vp8_denoiser_filter_sse2(YV12_BUFFER_CONFIG *mc_running_avg,
_mm_storeu_si128((__m128i *)(&running_avg_y[0]), p2);
_mm_storeu_si128((__m128i *)(&filtered[0]), p2);
- // Update pointers for next iteration.
+ /* Update pointers for next iteration. */
sig += sig_stride;
filtered += 16;
mc_running_avg_y += mc_avg_y_stride;
running_avg_y += avg_y_stride;
}
{
- // Compute the sum of all pixel differences of this MB.
+ /* Compute the sum of all pixel differences of this MB. */
union sum_union s;
int sum_diff;
s.v = acc_diff;