summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--test/set_roi.cc2
-rw-r--r--vp8/common/blockd.h2
-rw-r--r--vp8/common/vp8_loopfilter.c2
-rw-r--r--vp8/decoder/decodeframe.c6
-rw-r--r--vp8/decoder/threading.c2
-rw-r--r--vp8/encoder/bitstream.c2
-rw-r--r--vp8/encoder/denoising.c2
-rw-r--r--vp8/encoder/ethreading.c2
-rw-r--r--vp8/encoder/firstpass.c24
-rw-r--r--vp8/encoder/onyx_if.c22
-rw-r--r--vp8/encoder/onyx_int.h2
-rw-r--r--vp8/encoder/pickinter.c2
-rw-r--r--vp8/encoder/ratectrl.c38
-rw-r--r--vp8/encoder/rdopt.c4
-rw-r--r--vp8/encoder/segmentation.c4
-rw-r--r--vp8/encoder/segmentation.h4
-rw-r--r--vp8/encoder/vp8_quantize.c2
-rw-r--r--vp8/vp8_cx_iface.c4
-rw-r--r--vp9/common/arm/neon/vp9_highbd_iht16x16_add_neon.c2
-rw-r--r--vp9/common/vp9_blockd.h2
-rw-r--r--vp9/decoder/vp9_decodemv.c12
-rw-r--r--vp9/encoder/vp9_aq_complexity.c2
-rw-r--r--vp9/encoder/vp9_block.h8
-rw-r--r--vp9/encoder/vp9_denoiser.c4
-rw-r--r--vp9/encoder/vp9_encodeframe.c36
-rw-r--r--vp9/encoder/vp9_encoder.c53
-rw-r--r--vp9/encoder/vp9_encoder.h6
-rw-r--r--vp9/encoder/vp9_firstpass.c64
-rw-r--r--vp9/encoder/vp9_mbgraph.c2
-rw-r--r--vp9/encoder/vp9_mcomp.c6
-rw-r--r--vp9/encoder/vp9_noise_estimate.c2
-rw-r--r--vp9/encoder/vp9_pickmode.c22
-rw-r--r--vp9/encoder/vp9_ratectrl.c8
-rw-r--r--vp9/encoder/vp9_rdopt.c72
-rw-r--r--vp9/encoder/vp9_speed_features.c2
-rw-r--r--vp9/encoder/vp9_speed_features.h2
-rw-r--r--vp9/encoder/vp9_svc_layercontext.c2
-rw-r--r--vp9/ratectrl_rtc.h3
-rw-r--r--vp9/simple_encode.cc9
-rw-r--r--vp9/simple_encode.h2
-rw-r--r--vp9/vp9_cx_iface.c6
-rw-r--r--vpx/vp8cx.h4
-rw-r--r--vpx_dsp/ppc/variance_vsx.c2
-rw-r--r--vpx_dsp/variance.c2
44 files changed, 230 insertions, 231 deletions
diff --git a/test/set_roi.cc b/test/set_roi.cc
index 167cf908f..693410e39 100644
--- a/test/set_roi.cc
+++ b/test/set_roi.cc
@@ -40,7 +40,7 @@ TEST(VP8RoiMapTest, ParameterCheck) {
// Initialize elements of cpi with valid defaults.
VP8_COMP cpi;
- cpi.mb.e_mbd.mb_segement_abs_delta = SEGMENT_DELTADATA;
+ cpi.mb.e_mbd.mb_segment_abs_delta = SEGMENT_DELTADATA;
cpi.cyclic_refresh_mode_enabled = 0;
cpi.mb.e_mbd.segmentation_enabled = 0;
cpi.mb.e_mbd.update_mb_segmentation_map = 0;
diff --git a/vp8/common/blockd.h b/vp8/common/blockd.h
index 405443449..8300aad94 100644
--- a/vp8/common/blockd.h
+++ b/vp8/common/blockd.h
@@ -251,7 +251,7 @@ typedef struct macroblockd {
unsigned char update_mb_segmentation_data;
/* 0 (do not update) 1 (update) the macroblock segmentation feature data. */
- unsigned char mb_segement_abs_delta;
+ unsigned char mb_segment_abs_delta;
/* Per frame flags that define which MB level features (such as quantizer or
* loop filter level) */
diff --git a/vp8/common/vp8_loopfilter.c b/vp8/common/vp8_loopfilter.c
index 9c9e5f351..4576c1853 100644
--- a/vp8/common/vp8_loopfilter.c
+++ b/vp8/common/vp8_loopfilter.c
@@ -111,7 +111,7 @@ void vp8_loop_filter_frame_init(VP8_COMMON *cm, MACROBLOCKD *mbd,
/* Note the baseline filter values for each segment */
if (mbd->segmentation_enabled) {
- if (mbd->mb_segement_abs_delta == SEGMENT_ABSDATA) {
+ if (mbd->mb_segment_abs_delta == SEGMENT_ABSDATA) {
lvl_seg = mbd->segment_feature_data[MB_LVL_ALT_LF][seg];
} else { /* Delta Value */
lvl_seg += mbd->segment_feature_data[MB_LVL_ALT_LF][seg];
diff --git a/vp8/decoder/decodeframe.c b/vp8/decoder/decodeframe.c
index 1c1566766..d014cf966 100644
--- a/vp8/decoder/decodeframe.c
+++ b/vp8/decoder/decodeframe.c
@@ -63,7 +63,7 @@ void vp8_mb_init_dequantizer(VP8D_COMP *pbi, MACROBLOCKD *xd) {
/* Decide whether to use the default or alternate baseline Q value. */
if (xd->segmentation_enabled) {
/* Abs Value */
- if (xd->mb_segement_abs_delta == SEGMENT_ABSDATA) {
+ if (xd->mb_segment_abs_delta == SEGMENT_ABSDATA) {
QIndex = xd->segment_feature_data[MB_LVL_ALT_Q][mbmi->segment_id];
/* Delta Value */
@@ -829,7 +829,7 @@ static void init_frame(VP8D_COMP *pbi) {
/* reset the segment feature data to 0 with delta coding (Default state). */
memset(xd->segment_feature_data, 0, sizeof(xd->segment_feature_data));
- xd->mb_segement_abs_delta = SEGMENT_DELTADATA;
+ xd->mb_segment_abs_delta = SEGMENT_DELTADATA;
/* reset the mode ref deltasa for loop filter */
memset(xd->ref_lf_deltas, 0, sizeof(xd->ref_lf_deltas));
@@ -995,7 +995,7 @@ int vp8_decode_frame(VP8D_COMP *pbi) {
xd->update_mb_segmentation_data = (unsigned char)vp8_read_bit(bc);
if (xd->update_mb_segmentation_data) {
- xd->mb_segement_abs_delta = (unsigned char)vp8_read_bit(bc);
+ xd->mb_segment_abs_delta = (unsigned char)vp8_read_bit(bc);
memset(xd->segment_feature_data, 0, sizeof(xd->segment_feature_data));
diff --git a/vp8/decoder/threading.c b/vp8/decoder/threading.c
index 9ea6a4f34..6ccb080cf 100644
--- a/vp8/decoder/threading.c
+++ b/vp8/decoder/threading.c
@@ -56,7 +56,7 @@ static void setup_decoding_thread_data(VP8D_COMP *pbi, MACROBLOCKD *xd,
mbd->dst = xd->dst;
mbd->segmentation_enabled = xd->segmentation_enabled;
- mbd->mb_segement_abs_delta = xd->mb_segement_abs_delta;
+ mbd->mb_segment_abs_delta = xd->mb_segment_abs_delta;
memcpy(mbd->segment_feature_data, xd->segment_feature_data,
sizeof(xd->segment_feature_data));
diff --git a/vp8/encoder/bitstream.c b/vp8/encoder/bitstream.c
index 190b013af..03691fc9d 100644
--- a/vp8/encoder/bitstream.c
+++ b/vp8/encoder/bitstream.c
@@ -1080,7 +1080,7 @@ void vp8_pack_bitstream(VP8_COMP *cpi, unsigned char *dest,
if (xd->update_mb_segmentation_data) {
signed char Data;
- vp8_write_bit(bc, xd->mb_segement_abs_delta);
+ vp8_write_bit(bc, xd->mb_segment_abs_delta);
/* For each segmentation feature (Quant and loop filter level) */
for (i = 0; i < MB_LVL_MAX; ++i) {
diff --git a/vp8/encoder/denoising.c b/vp8/encoder/denoising.c
index e54d1e9f4..a666bca4d 100644
--- a/vp8/encoder/denoising.c
+++ b/vp8/encoder/denoising.c
@@ -135,7 +135,7 @@ int vp8_denoiser_filter_c(unsigned char *mc_running_avg_y, int mc_avg_y_stride,
// When adopting aggressive denoiser, the adj_val for each pixel
// could be at most 8 (this is current max adjustment of the map).
// In SSE code, we calculate the sum of adj_val for
- // the columns, so the sum could be upto 128(16 rows). However,
+ // the columns, so the sum could be up to 128(16 rows). However,
// the range of the value is -128 ~ 127 in SSE code, that's why
// we do this change in C code.
// We don't do this for UV denoiser, since there are only 8 rows,
diff --git a/vp8/encoder/ethreading.c b/vp8/encoder/ethreading.c
index 2583cb0ac..b7f1932c5 100644
--- a/vp8/encoder/ethreading.c
+++ b/vp8/encoder/ethreading.c
@@ -402,7 +402,7 @@ static void setup_mbby_copy(MACROBLOCK *mbdst, MACROBLOCK *mbsrc) {
zd->subpixel_predict8x8 = xd->subpixel_predict8x8;
zd->subpixel_predict16x16 = xd->subpixel_predict16x16;
zd->segmentation_enabled = xd->segmentation_enabled;
- zd->mb_segement_abs_delta = xd->mb_segement_abs_delta;
+ zd->mb_segment_abs_delta = xd->mb_segment_abs_delta;
memcpy(zd->segment_feature_data, xd->segment_feature_data,
sizeof(xd->segment_feature_data));
diff --git a/vp8/encoder/firstpass.c b/vp8/encoder/firstpass.c
index 4149fb4bf..ff088aa96 100644
--- a/vp8/encoder/firstpass.c
+++ b/vp8/encoder/firstpass.c
@@ -412,7 +412,7 @@ static void first_pass_motion_search(VP8_COMP *cpi, MACROBLOCK *x,
int_mv ref_mv_full;
int tmp_err;
- int step_param = 3; /* Dont search over full range for first pass */
+ int step_param = 3; /* Don't search over full range for first pass */
int further_steps = (MAX_MVSEARCH_STEPS - 1) - step_param;
int n;
vp8_variance_fn_ptr_t v_fn_ptr = cpi->fn_ptr[BLOCK_16X16];
@@ -1537,7 +1537,7 @@ static int calc_arf_boost(VP8_COMP *cpi, int offset, int f_frames, int b_frames,
/* Calculate the baseline boost number for this frame */
r = calc_frame_boost(cpi, &this_frame, this_frame_mv_in_out);
- /* We want to discount the the flash frame itself and the recovery
+ /* We want to discount the flash frame itself and the recovery
* frame that follows as both will have poor scores.
*/
flash_detected =
@@ -1581,7 +1581,7 @@ static int calc_arf_boost(VP8_COMP *cpi, int offset, int f_frames, int b_frames,
/* Calculate the baseline boost number for this frame */
r = calc_frame_boost(cpi, &this_frame, this_frame_mv_in_out);
- /* We want to discount the the flash frame itself and the recovery
+ /* We want to discount the flash frame itself and the recovery
* frame that follows as both will have poor scores.
*/
flash_detected =
@@ -1717,9 +1717,9 @@ static void define_gf_group(VP8_COMP *cpi, FIRSTPASS_STATS *this_frame) {
/* Break at cpi->max_gf_interval unless almost totally static */
(i >= cpi->max_gf_interval && (decay_accumulator < 0.995)) ||
(
- /* Dont break out with a very short interval */
+ /* Don't break out with a very short interval */
(i > MIN_GF_INTERVAL) &&
- /* Dont break out very close to a key frame */
+ /* Don't break out very close to a key frame */
((cpi->twopass.frames_to_key - i) >= MIN_GF_INTERVAL) &&
((boost_score > 20.0) || (next_frame.pcnt_inter < 0.75)) &&
(!flash_detected) &&
@@ -1765,7 +1765,7 @@ static void define_gf_group(VP8_COMP *cpi, FIRSTPASS_STATS *this_frame) {
if (boost_score > max_boost) boost_score = max_boost;
}
- /* Dont allow conventional gf too near the next kf */
+ /* Don't allow conventional gf too near the next kf */
if ((cpi->twopass.frames_to_key - i) < MIN_GF_INTERVAL) {
while (i < cpi->twopass.frames_to_key) {
i++;
@@ -1786,9 +1786,9 @@ static void define_gf_group(VP8_COMP *cpi, FIRSTPASS_STATS *this_frame) {
alt_boost = calc_arf_boost(cpi, 0, (i - 1), (i - 1), &f_boost, &b_boost);
#endif
- /* Should we use the alternate refernce frame */
+ /* Should we use the alternate reference frame */
if (allow_alt_ref && (i >= MIN_GF_INTERVAL) &&
- /* dont use ARF very near next kf */
+ /* don't use ARF very near next kf */
(i <= (cpi->twopass.frames_to_key - MIN_GF_INTERVAL)) &&
#if NEW_BOOST
((next_frame.pcnt_inter > 0.75) || (next_frame.pcnt_second_ref > 0.5)) &&
@@ -2082,7 +2082,7 @@ static void define_gf_group(VP8_COMP *cpi, FIRSTPASS_STATS *this_frame) {
}
}
- /* Dont allow a negative value for gf_bits */
+ /* Don't allow a negative value for gf_bits */
if (gf_bits < 0) gf_bits = 0;
/* Add in minimum for a frame */
@@ -2123,7 +2123,7 @@ static void define_gf_group(VP8_COMP *cpi, FIRSTPASS_STATS *this_frame) {
if (cpi->twopass.gf_group_bits < 0) cpi->twopass.gf_group_bits = 0;
/* This condition could fail if there are two kfs very close together
- * despite (MIN_GF_INTERVAL) and would cause a devide by 0 in the
+ * despite (MIN_GF_INTERVAL) and would cause a divide by 0 in the
* calculation of cpi->twopass.alt_extra_bits.
*/
if (cpi->baseline_gf_interval >= 3) {
@@ -2393,7 +2393,7 @@ void vp8_second_pass(VP8_COMP *cpi) {
}
/* The last few frames of a clip almost always have to few or too many
- * bits and for the sake of over exact rate control we dont want to make
+ * bits and for the sake of over exact rate control we don't want to make
* radical adjustments to the allowed quantizer range just to use up a
* few surplus bits or get beneath the target rate.
*/
@@ -3011,7 +3011,7 @@ static void find_next_key_frame(VP8_COMP *cpi, FIRSTPASS_STATS *this_frame) {
bits_per_frame =
(double)(cpi->twopass.kf_group_bits / cpi->twopass.frames_to_key);
- /* Dont turn to resampling in easy sections just because they
+ /* Don't turn to resampling in easy sections just because they
* have been assigned a small number of bits
*/
if (bits_per_frame < av_bits_per_frame) {
diff --git a/vp8/encoder/onyx_if.c b/vp8/encoder/onyx_if.c
index 894132941..c65afc643 100644
--- a/vp8/encoder/onyx_if.c
+++ b/vp8/encoder/onyx_if.c
@@ -488,7 +488,7 @@ static void set_segmentation_map(VP8_COMP *cpi,
*/
static void set_segment_data(VP8_COMP *cpi, signed char *feature_data,
unsigned char abs_delta) {
- cpi->mb.e_mbd.mb_segement_abs_delta = abs_delta;
+ cpi->mb.e_mbd.mb_segment_abs_delta = abs_delta;
memcpy(cpi->segment_feature_data, feature_data,
sizeof(cpi->segment_feature_data));
}
@@ -2251,7 +2251,7 @@ void vp8_remove_compressor(VP8_COMP **comp) {
#if 0
{
printf("\n_pick_loop_filter_level:%d\n", cpi->time_pick_lpf / 1000);
- printf("\n_frames recive_data encod_mb_row compress_frame Total\n");
+ printf("\n_frames receive_data encod_mb_row compress_frame Total\n");
printf("%6d %10ld %10ld %10ld %10ld\n", cpi->common.current_video_frame, cpi->time_receive_data / 1000, cpi->time_encode_mb_row / 1000, cpi->time_compress_data / 1000, (cpi->time_receive_data + cpi->time_compress_data) / 1000);
}
#endif
@@ -2751,7 +2751,7 @@ static int decide_key_frame(VP8_COMP *cpi) {
}
/* in addition if the following are true and this is not a golden frame
* then code a key frame Note that on golden frames there often seems
- * to be a pop in intra useage anyway hence this restriction is
+ * to be a pop in intra usage anyway hence this restriction is
* designed to prevent spurious key frames. The Intra pop needs to be
* investigated.
*/
@@ -3637,7 +3637,7 @@ static void encode_frame_to_data_rate(VP8_COMP *cpi, size_t *size,
Q = cpi->avg_frame_qindex;
}
- /* For constrained quality dont allow Q less than the cq level */
+ /* For constrained quality don't allow Q less than the cq level */
if ((cpi->oxcf.end_usage == USAGE_CONSTRAINED_QUALITY) &&
(Q < cpi->cq_target_quality)) {
Q = cpi->cq_target_quality;
@@ -3664,7 +3664,7 @@ static void encode_frame_to_data_rate(VP8_COMP *cpi, size_t *size,
} else {
cpi->active_best_quality = inter_minq[Q];
- /* For the constant/constrained quality mode we dont want
+ /* For the constant/constrained quality mode we don't want
* q to fall below the cq level.
*/
if ((cpi->oxcf.end_usage == USAGE_CONSTRAINED_QUALITY) &&
@@ -3685,7 +3685,7 @@ static void encode_frame_to_data_rate(VP8_COMP *cpi, size_t *size,
* higher quality on the frames to prevent bits just going to waste.
*/
if (cpi->oxcf.end_usage == USAGE_STREAM_FROM_SERVER) {
- /* Note that the use of >= here elliminates the risk of a devide
+ /* Note that the use of >= here elliminates the risk of a divide
* by 0 error in the else if clause
*/
if (cpi->buffer_level >= cpi->oxcf.maximum_buffer_size) {
@@ -4322,12 +4322,12 @@ static void encode_frame_to_data_rate(VP8_COMP *cpi, size_t *size,
vp8_cal_dissimilarity(cpi);
#endif
- /* Update the GF useage maps.
+ /* Update the GF usage maps.
* This is done after completing the compression of a frame when all
* modes etc. are finalized but before loop filter
*/
if (cpi->oxcf.number_of_layers == 1) {
- vp8_update_gf_useage_maps(cpi, cm, &cpi->mb);
+ vp8_update_gf_usage_maps(cpi, cm, &cpi->mb);
}
if (cm->frame_type == KEY_FRAME) cm->refresh_last_frame = 1;
@@ -4484,7 +4484,7 @@ static void encode_frame_to_data_rate(VP8_COMP *cpi, size_t *size,
* size within range) then use the last frame value - 1. The -1
* is designed to stop Q and hence the data rate, from
* progressively falling away during difficult sections, but at
- * the same time reduce the number of itterations around the
+ * the same time reduce the number of iterations around the
* recode loop.
*/
if (Q > cpi->ni_av_qi) cpi->ni_av_qi = Q - 1;
@@ -4731,7 +4731,7 @@ static void encode_frame_to_data_rate(VP8_COMP *cpi, size_t *size,
cpi->mb.e_mbd.update_mb_segmentation_data = 0;
cpi->mb.e_mbd.mode_ref_lf_delta_update = 0;
- /* Dont increment frame counters if this was an altref buffer update
+ /* Don't increment frame counters if this was an altref buffer update
* not a real frame
*/
if (cm->show_frame) {
@@ -5109,7 +5109,7 @@ int vp8_get_compressed_data(VP8_COMP *cpi, unsigned int *frame_flags,
if (cm->refresh_last_frame) memcpy(&cpi->lfc_n, &cm->fc, sizeof(cm->fc));
- /* if its a dropped frame honor the requests on subsequent frames */
+ /* if it's a dropped frame honor the requests on subsequent frames */
if (*size > 0) {
cpi->droppable = !frame_is_reference(cpi);
diff --git a/vp8/encoder/onyx_int.h b/vp8/encoder/onyx_int.h
index bde5c2f69..4304f054c 100644
--- a/vp8/encoder/onyx_int.h
+++ b/vp8/encoder/onyx_int.h
@@ -360,7 +360,7 @@ typedef struct VP8_COMP {
/* GF interval chosen when we coded the last GF */
int current_gf_interval;
- /* Total bits overspent becasue of GF boost (cumulative) */
+ /* Total bits overspent because of GF boost (cumulative) */
int gf_overspend_bits;
/* Used in the few frames following a GF to recover the extra bits
diff --git a/vp8/encoder/pickinter.c b/vp8/encoder/pickinter.c
index 04f68c324..1af8a2f9b 100644
--- a/vp8/encoder/pickinter.c
+++ b/vp8/encoder/pickinter.c
@@ -1103,7 +1103,7 @@ void vp8_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset,
#if CONFIG_TEMPORAL_DENOISING
if (cpi->oxcf.noise_sensitivity) {
/* Store for later use by denoiser. */
- // Dont' denoise with GOLDEN OR ALTREF is they are old reference
+ // Don't denoise with GOLDEN OR ALTREF is they are old reference
// frames (greater than MAX_GF_ARF_DENOISE_RANGE frames in past).
int skip_old_reference = ((this_ref_frame != LAST_FRAME) &&
(cpi->common.current_video_frame -
diff --git a/vp8/encoder/ratectrl.c b/vp8/encoder/ratectrl.c
index 9cd3963e2..49ab4aa23 100644
--- a/vp8/encoder/ratectrl.c
+++ b/vp8/encoder/ratectrl.c
@@ -388,7 +388,7 @@ static void calc_gf_params(VP8_COMP *cpi) {
(cpi->oxcf.fixed_q < 0) ? cpi->last_q[INTER_FRAME] : cpi->oxcf.fixed_q;
int Boost = 0;
- int gf_frame_useage = 0; /* Golden frame useage since last GF */
+ int gf_frame_usage = 0; /* Golden frame usage since last GF */
int tot_mbs = cpi->recent_ref_frame_usage[INTRA_FRAME] +
cpi->recent_ref_frame_usage[LAST_FRAME] +
cpi->recent_ref_frame_usage[GOLDEN_FRAME] +
@@ -398,12 +398,12 @@ static void calc_gf_params(VP8_COMP *cpi) {
(cpi->common.mb_rows * cpi->common.mb_cols);
if (tot_mbs) {
- gf_frame_useage = (cpi->recent_ref_frame_usage[GOLDEN_FRAME] +
- cpi->recent_ref_frame_usage[ALTREF_FRAME]) *
- 100 / tot_mbs;
+ gf_frame_usage = (cpi->recent_ref_frame_usage[GOLDEN_FRAME] +
+ cpi->recent_ref_frame_usage[ALTREF_FRAME]) *
+ 100 / tot_mbs;
}
- if (pct_gf_active > gf_frame_useage) gf_frame_useage = pct_gf_active;
+ if (pct_gf_active > gf_frame_usage) gf_frame_usage = pct_gf_active;
/* Not two pass */
if (cpi->pass != 2) {
@@ -467,7 +467,7 @@ static void calc_gf_params(VP8_COMP *cpi) {
/* Adjust boost based upon ambient Q */
Boost = GFQ_ADJUSTMENT;
- /* Adjust based upon most recently measure intra useage */
+ /* Adjust based upon most recently measure intra usage */
Boost = Boost *
gf_intra_usage_adjustment[(cpi->this_frame_percent_intra < 15)
? cpi->this_frame_percent_intra
@@ -475,7 +475,7 @@ static void calc_gf_params(VP8_COMP *cpi) {
100;
/* Adjust gf boost based upon GF usage since last GF */
- Boost = Boost * gf_adjust_table[gf_frame_useage] / 100;
+ Boost = Boost * gf_adjust_table[gf_frame_usage] / 100;
#endif
}
@@ -516,8 +516,8 @@ static void calc_gf_params(VP8_COMP *cpi) {
if (cpi->last_boost >= 1500) cpi->frames_till_gf_update_due++;
- if (gf_interval_table[gf_frame_useage] > cpi->frames_till_gf_update_due) {
- cpi->frames_till_gf_update_due = gf_interval_table[gf_frame_useage];
+ if (gf_interval_table[gf_frame_usage] > cpi->frames_till_gf_update_due) {
+ cpi->frames_till_gf_update_due = gf_interval_table[gf_frame_usage];
}
if (cpi->frames_till_gf_update_due > cpi->max_gf_interval) {
@@ -895,7 +895,7 @@ static void calc_pframe_target_size(VP8_COMP *cpi) {
int Q = (cpi->oxcf.fixed_q < 0) ? cpi->last_q[INTER_FRAME]
: cpi->oxcf.fixed_q;
- int gf_frame_useage = 0; /* Golden frame useage since last GF */
+ int gf_frame_usage = 0; /* Golden frame usage since last GF */
int tot_mbs = cpi->recent_ref_frame_usage[INTRA_FRAME] +
cpi->recent_ref_frame_usage[LAST_FRAME] +
cpi->recent_ref_frame_usage[GOLDEN_FRAME] +
@@ -905,20 +905,20 @@ static void calc_pframe_target_size(VP8_COMP *cpi) {
(cpi->common.mb_rows * cpi->common.mb_cols);
if (tot_mbs) {
- gf_frame_useage = (cpi->recent_ref_frame_usage[GOLDEN_FRAME] +
- cpi->recent_ref_frame_usage[ALTREF_FRAME]) *
- 100 / tot_mbs;
+ gf_frame_usage = (cpi->recent_ref_frame_usage[GOLDEN_FRAME] +
+ cpi->recent_ref_frame_usage[ALTREF_FRAME]) *
+ 100 / tot_mbs;
}
- if (pct_gf_active > gf_frame_useage) gf_frame_useage = pct_gf_active;
+ if (pct_gf_active > gf_frame_usage) gf_frame_usage = pct_gf_active;
/* Is a fixed manual GF frequency being used */
if (cpi->auto_gold) {
- /* For one pass throw a GF if recent frame intra useage is
- * low or the GF useage is high
+ /* For one pass throw a GF if recent frame intra usage is
+ * low or the GF usage is high
*/
if ((cpi->pass == 0) &&
- (cpi->this_frame_percent_intra < 15 || gf_frame_useage >= 5)) {
+ (cpi->this_frame_percent_intra < 15 || gf_frame_usage >= 5)) {
cpi->common.refresh_golden_frame = 1;
/* Two pass GF descision */
@@ -933,10 +933,10 @@ static void calc_pframe_target_size(VP8_COMP *cpi) {
if (0) {
FILE *f;
- f = fopen("gf_useaget.stt", "a");
+ f = fopen("gf_usaget.stt", "a");
fprintf(f, " %8ld %10ld %10ld %10ld %10ld\n",
cpi->common.current_video_frame, cpi->gfu_boost,
- GFQ_ADJUSTMENT, cpi->gfu_boost, gf_frame_useage);
+ GFQ_ADJUSTMENT, cpi->gfu_boost, gf_frame_usage);
fclose(f);
}
diff --git a/vp8/encoder/rdopt.c b/vp8/encoder/rdopt.c
index bbddacf8f..5d539ef30 100644
--- a/vp8/encoder/rdopt.c
+++ b/vp8/encoder/rdopt.c
@@ -1021,7 +1021,7 @@ static void rd_check_segment(VP8_COMP *cpi, MACROBLOCK *x, BEST_SEG_INFO *bsi,
BLOCK *c;
BLOCKD *e;
- /* Is the best so far sufficiently good that we cant justify
+ /* Is the best so far sufficiently good that we can't justify
* doing a new motion search.
*/
if (best_label_rd < label_mv_thresh) break;
@@ -1979,7 +1979,7 @@ void vp8_rd_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset,
rd.distortion2 += distortion;
/* If even the 'Y' rd value of split is higher than best so far
- * then dont bother looking at UV
+ * then don't bother looking at UV
*/
if (tmp_rd < best_mode.yrd) {
/* Now work out UV cost and add it in */
diff --git a/vp8/encoder/segmentation.c b/vp8/encoder/segmentation.c
index dcb68119e..212725811 100644
--- a/vp8/encoder/segmentation.c
+++ b/vp8/encoder/segmentation.c
@@ -11,7 +11,7 @@
#include "segmentation.h"
#include "vpx_mem/vpx_mem.h"
-void vp8_update_gf_useage_maps(VP8_COMP *cpi, VP8_COMMON *cm, MACROBLOCK *x) {
+void vp8_update_gf_usage_maps(VP8_COMP *cpi, VP8_COMMON *cm, MACROBLOCK *x) {
int mb_row, mb_col;
MODE_INFO *this_mb_mode_info = cm->mi;
@@ -19,7 +19,7 @@ void vp8_update_gf_useage_maps(VP8_COMP *cpi, VP8_COMMON *cm, MACROBLOCK *x) {
x->gf_active_ptr = (signed char *)cpi->gf_active_flags;
if ((cm->frame_type == KEY_FRAME) || (cm->refresh_golden_frame)) {
- /* Reset Gf useage monitors */
+ /* Reset Gf usage monitors */
memset(cpi->gf_active_flags, 1, (cm->mb_rows * cm->mb_cols));
cpi->gf_active_count = cm->mb_rows * cm->mb_cols;
} else {
diff --git a/vp8/encoder/segmentation.h b/vp8/encoder/segmentation.h
index 4ddbdbbd2..0fecfc221 100644
--- a/vp8/encoder/segmentation.h
+++ b/vp8/encoder/segmentation.h
@@ -19,8 +19,8 @@
extern "C" {
#endif
-extern void vp8_update_gf_useage_maps(VP8_COMP *cpi, VP8_COMMON *cm,
- MACROBLOCK *x);
+extern void vp8_update_gf_usage_maps(VP8_COMP *cpi, VP8_COMMON *cm,
+ MACROBLOCK *x);
#ifdef __cplusplus
} // extern "C"
diff --git a/vp8/encoder/vp8_quantize.c b/vp8/encoder/vp8_quantize.c
index 5b8955510..8e5e31824 100644
--- a/vp8/encoder/vp8_quantize.c
+++ b/vp8/encoder/vp8_quantize.c
@@ -294,7 +294,7 @@ void vp8cx_mb_init_quantizer(VP8_COMP *cpi, MACROBLOCK *x, int ok_to_skip) {
/* Select the baseline MB Q index. */
if (xd->segmentation_enabled) {
/* Abs Value */
- if (xd->mb_segement_abs_delta == SEGMENT_ABSDATA) {
+ if (xd->mb_segment_abs_delta == SEGMENT_ABSDATA) {
QIndex = xd->segment_feature_data[MB_LVL_ALT_Q]
[xd->mode_info_context->mbmi.segment_id];
/* Delta Value */
diff --git a/vp8/vp8_cx_iface.c b/vp8/vp8_cx_iface.c
index 0821eef02..8950de0d8 100644
--- a/vp8/vp8_cx_iface.c
+++ b/vp8/vp8_cx_iface.c
@@ -1292,8 +1292,8 @@ static vpx_codec_enc_cfg_map_t vp8e_usage_cfg_map[] = {
0, /* rc_resize_allowed */
1, /* rc_scaled_width */
1, /* rc_scaled_height */
- 60, /* rc_resize_down_thresold */
- 30, /* rc_resize_up_thresold */
+ 60, /* rc_resize_down_thresh */
+ 30, /* rc_resize_up_thresh */
VPX_VBR, /* rc_end_usage */
{ NULL, 0 }, /* rc_twopass_stats_in */
diff --git a/vp9/common/arm/neon/vp9_highbd_iht16x16_add_neon.c b/vp9/common/arm/neon/vp9_highbd_iht16x16_add_neon.c
index aeb7e49c1..b43d7fa4f 100644
--- a/vp9/common/arm/neon/vp9_highbd_iht16x16_add_neon.c
+++ b/vp9/common/arm/neon/vp9_highbd_iht16x16_add_neon.c
@@ -18,7 +18,7 @@
#include "vpx_dsp/arm/transpose_neon.h"
#include "vpx_dsp/inv_txfm.h"
-// Use macros to make sure argument lane is passed in as an constant integer.
+// Use macros to make sure argument lane is passed in as a constant integer.
#define vmull_lane_s32_dual(in, c, lane, out) \
do { \
diff --git a/vp9/common/vp9_blockd.h b/vp9/common/vp9_blockd.h
index d7de46cf4..aa13d8a0d 100644
--- a/vp9/common/vp9_blockd.h
+++ b/vp9/common/vp9_blockd.h
@@ -54,7 +54,7 @@ typedef struct {
// decoder implementation modules critically rely on the defined entry values
// specified herein. They should be refactored concurrently.
-#define NONE (-1)
+#define NO_REF_FRAME (-1)
#define INTRA_FRAME 0
#define LAST_FRAME 1
#define GOLDEN_FRAME 2
diff --git a/vp9/decoder/vp9_decodemv.c b/vp9/decoder/vp9_decodemv.c
index db3e74663..0989cde58 100644
--- a/vp9/decoder/vp9_decodemv.c
+++ b/vp9/decoder/vp9_decodemv.c
@@ -204,7 +204,7 @@ static void read_intra_frame_mode_info(VP9_COMMON *const cm,
mi->skip = read_skip(cm, xd, mi->segment_id, r);
mi->tx_size = read_tx_size(cm, xd, 1, r);
mi->ref_frame[0] = INTRA_FRAME;
- mi->ref_frame[1] = NONE;
+ mi->ref_frame[1] = NO_REF_FRAME;
switch (bsize) {
case BLOCK_4X4:
@@ -299,7 +299,7 @@ static REFERENCE_MODE read_block_reference_mode(VP9_COMMON *cm,
}
}
-// Read the referncence frame
+// Read the reference frame
static void read_ref_frames(VP9_COMMON *const cm, MACROBLOCKD *const xd,
vpx_reader *r, int segment_id,
MV_REFERENCE_FRAME ref_frame[2]) {
@@ -309,7 +309,7 @@ static void read_ref_frames(VP9_COMMON *const cm, MACROBLOCKD *const xd,
if (segfeature_active(&cm->seg, segment_id, SEG_LVL_REF_FRAME)) {
ref_frame[0] = (MV_REFERENCE_FRAME)get_segdata(&cm->seg, segment_id,
SEG_LVL_REF_FRAME);
- ref_frame[1] = NONE;
+ ref_frame[1] = NO_REF_FRAME;
} else {
const REFERENCE_MODE mode = read_block_reference_mode(cm, xd, r);
// FIXME(rbultje) I'm pretty sure this breaks segmentation ref frame coding
@@ -333,7 +333,7 @@ static void read_ref_frames(VP9_COMMON *const cm, MACROBLOCKD *const xd,
ref_frame[0] = LAST_FRAME;
}
- ref_frame[1] = NONE;
+ ref_frame[1] = NO_REF_FRAME;
} else {
assert(0 && "Invalid prediction mode.");
}
@@ -383,7 +383,7 @@ static void read_intra_block_mode_info(VP9_COMMON *const cm,
mi->interp_filter = SWITCHABLE_FILTERS;
mi->ref_frame[0] = INTRA_FRAME;
- mi->ref_frame[1] = NONE;
+ mi->ref_frame[1] = NO_REF_FRAME;
}
static INLINE int is_mv_valid(const MV *mv) {
@@ -708,7 +708,7 @@ static void read_inter_block_mode_info(VP9Decoder *const pbi,
mi->mode = ZEROMV;
if (bsize < BLOCK_8X8) {
vpx_internal_error(xd->error_info, VPX_CODEC_UNSUP_BITSTREAM,
- "Invalid usage of segement feature on small blocks");
+ "Invalid usage of segment feature on small blocks");
return;
}
} else {
diff --git a/vp9/encoder/vp9_aq_complexity.c b/vp9/encoder/vp9_aq_complexity.c
index bd3812036..ef3423f8e 100644
--- a/vp9/encoder/vp9_aq_complexity.c
+++ b/vp9/encoder/vp9_aq_complexity.c
@@ -87,7 +87,7 @@ void vp9_setup_in_frame_q_adj(VP9_COMP *cpi) {
&cpi->rc, cm->frame_type, cm->base_qindex,
aq_c_q_adj_factor[aq_strength][segment], cm->bit_depth);
- // For AQ complexity mode, we dont allow Q0 in a segment if the base
+ // For AQ complexity mode, we don't allow Q0 in a segment if the base
// Q is not 0. Q0 (lossless) implies 4x4 only and in AQ mode 2 a segment
// Q delta is sometimes applied without going back around the rd loop.
// This could lead to an illegal combination of partition size and q.
diff --git a/vp9/encoder/vp9_block.h b/vp9/encoder/vp9_block.h
index 4d336f2a4..7fa00cd19 100644
--- a/vp9/encoder/vp9_block.h
+++ b/vp9/encoder/vp9_block.h
@@ -34,7 +34,7 @@ struct macroblock_plane {
uint16_t *eobs;
struct buf_2d src;
- // Quantizer setings
+ // Quantizer settings
int16_t *round_fp;
int16_t *quant_fp;
int16_t *quant;
@@ -85,10 +85,10 @@ struct macroblock {
// The equivalent error at the current rdmult of one whole bit (not one
// bitcost unit).
int errorperbit;
- // The equivalend SAD error of one (whole) bit at the current quantizer
+ // The equivalent SAD error of one (whole) bit at the current quantizer
// for large blocks.
int sadperbit16;
- // The equivalend SAD error of one (whole) bit at the current quantizer
+ // The equivalent SAD error of one (whole) bit at the current quantizer
// for sub-8x8 blocks.
int sadperbit4;
int rddiv;
@@ -128,7 +128,7 @@ struct macroblock {
// from extending outside the UMV borders
MvLimits mv_limits;
- // Notes transform blocks where no coefficents are coded.
+ // Notes transform blocks where no coefficients are coded.
// Set during mode selection. Read during block encoding.
uint8_t zcoeff_blk[TX_SIZES][256];
diff --git a/vp9/encoder/vp9_denoiser.c b/vp9/encoder/vp9_denoiser.c
index baea8ebb3..e5dffa90a 100644
--- a/vp9/encoder/vp9_denoiser.c
+++ b/vp9/encoder/vp9_denoiser.c
@@ -319,7 +319,7 @@ static VP9_DENOISER_DECISION perform_motion_compensation(
filter_mbd->plane[2].dst.stride =
denoiser->mc_running_avg_y[denoise_layer_idx].uv_stride;
- set_ref_ptrs(cm, filter_mbd, saved_frame, NONE);
+ set_ref_ptrs(cm, filter_mbd, saved_frame, NO_REF_FRAME);
vp9_build_inter_predictors_sby(filter_mbd, mi_row, mi_col, bs);
// Restore everything to its original state
@@ -387,7 +387,7 @@ void vp9_denoiser_denoise(VP9_COMP *cpi, MACROBLOCK *mb, int mi_row, int mi_col,
consec_zeromv = VPXMIN(cpi->consec_zero_mv[bl_index], consec_zeromv);
// No need to keep checking 8x8 blocks if any of the sub-blocks
// has small consec_zeromv (since threshold for no_skin based on
- // zero/small motion in skin detection is high, i.e, > 4).
+ // zero/small motion in skin detection is high, i.e., > 4).
if (consec_zeromv < 4) {
i = ymis;
break;
diff --git a/vp9/encoder/vp9_encodeframe.c b/vp9/encoder/vp9_encodeframe.c
index a979ae1c9..7280e0da8 100644
--- a/vp9/encoder/vp9_encodeframe.c
+++ b/vp9/encoder/vp9_encodeframe.c
@@ -1437,7 +1437,7 @@ static int choose_partitioning(VP9_COMP *cpi, const TileInfo *const tile,
&cm->frame_refs[LAST_FRAME - 1].sf);
mi->ref_frame[0] = LAST_FRAME;
}
- mi->ref_frame[1] = NONE;
+ mi->ref_frame[1] = NO_REF_FRAME;
mi->sb_type = BLOCK_64X64;
mi->mv[0].as_int = 0;
mi->interp_filter = BILINEAR;
@@ -1706,7 +1706,7 @@ static int choose_partitioning(VP9_COMP *cpi, const TileInfo *const tile,
const int y16_idx = ((j >> 1) << 1);
// For inter frames: if variance4x4downsample[] == 1 for this 16x16
// block, then the variance is based on 4x4 down-sampling, so use vt2
- // in set_vt_partioning(), otherwise use vt.
+ // in set_vt_partitioning(), otherwise use vt.
v16x16 *vtemp = (!is_key_frame && variance4x4downsample[i2 + j] == 1)
? &vt2[i2 + j]
: &vt.split[i].split[j];
@@ -1924,7 +1924,7 @@ static void set_mode_info_seg_skip(MACROBLOCK *x, TX_MODE tx_mode,
mi->skip = 1;
mi->uv_mode = DC_PRED;
mi->ref_frame[0] = LAST_FRAME;
- mi->ref_frame[1] = NONE;
+ mi->ref_frame[1] = NO_REF_FRAME;
mi->mv[0].as_int = 0;
mi->interp_filter = filter_ref;
@@ -3449,7 +3449,7 @@ static void simple_motion_search(const VP9_COMP *const cpi, MACROBLOCK *const x,
vp9_setup_pre_planes(xd, 0, yv12, mi_row, mi_col,
&cm->frame_refs[ref - 1].sf);
mi->ref_frame[0] = ref;
- mi->ref_frame[1] = NONE;
+ mi->ref_frame[1] = NO_REF_FRAME;
mi->sb_type = bsize;
vp9_set_mv_search_range(&x->mv_limits, &ref_mv);
vp9_full_pixel_search(cpi, x, bsize, &ref_mv_full, step_param, search_method,
@@ -3470,11 +3470,11 @@ static void simple_motion_search(const VP9_COMP *const cpi, MACROBLOCK *const x,
// Features used: QP; spatial block size contexts; variance of prediction
// residue after simple_motion_search.
#define FEATURES 12
-static void ml_predict_var_rd_paritioning(const VP9_COMP *const cpi,
- MACROBLOCK *const x,
- PC_TREE *const pc_tree,
- BLOCK_SIZE bsize, int mi_row,
- int mi_col, int *none, int *split) {
+static void ml_predict_var_rd_partitioning(const VP9_COMP *const cpi,
+ MACROBLOCK *const x,
+ PC_TREE *const pc_tree,
+ BLOCK_SIZE bsize, int mi_row,
+ int mi_col, int *none, int *split) {
const VP9_COMMON *const cm = &cpi->common;
const NN_CONFIG *nn_config = NULL;
const MACROBLOCKD *const xd = &x->e_mbd;
@@ -3789,7 +3789,7 @@ static void assign_motion_vector_info(const int block_width_4x4,
const int col_4x4 = col_start_4x4 + j;
const int unit_index = row_4x4 * num_unit_cols + col_4x4;
if (row_4x4 >= num_unit_rows || col_4x4 >= num_unit_cols) continue;
- if (source_ref_frame[1] == NONE) {
+ if (source_ref_frame[1] == NO_REF_FRAME) {
assert(source_mv[1]->row == 0 && source_mv[1]->col == 0);
}
motion_vector_info[unit_index].ref_frame[0] = source_ref_frame[0];
@@ -4092,8 +4092,8 @@ static int rd_pick_partition(VP9_COMP *cpi, ThreadData *td,
mi_row + num_8x8_blocks_high_lookup[bsize] <= cm->mi_rows &&
mi_col + num_8x8_blocks_wide_lookup[bsize] <= cm->mi_cols;
if (do_rd_ml_partition_var_pruning) {
- ml_predict_var_rd_paritioning(cpi, x, pc_tree, bsize, mi_row, mi_col,
- &partition_none_allowed, &do_split);
+ ml_predict_var_rd_partitioning(cpi, x, pc_tree, bsize, mi_row, mi_col,
+ &partition_none_allowed, &do_split);
} else {
vp9_zero(pc_tree->mv);
}
@@ -4820,9 +4820,9 @@ static void pred_pixel_ready_reset(PC_TREE *pc_tree, BLOCK_SIZE bsize) {
#define FEATURES 6
#define LABELS 2
-static int ml_predict_var_paritioning(VP9_COMP *cpi, MACROBLOCK *x,
- BLOCK_SIZE bsize, int mi_row,
- int mi_col) {
+static int ml_predict_var_partitioning(VP9_COMP *cpi, MACROBLOCK *x,
+ BLOCK_SIZE bsize, int mi_row,
+ int mi_col) {
VP9_COMMON *const cm = &cpi->common;
const NN_CONFIG *nn_config = NULL;
@@ -4954,7 +4954,7 @@ static void nonrd_pick_partition(VP9_COMP *cpi, ThreadData *td,
if (partition_none_allowed || do_split) do_rect = 0;
if (partition_none_allowed && do_split) {
const int ml_predicted_partition =
- ml_predict_var_paritioning(cpi, x, bsize, mi_row, mi_col);
+ ml_predict_var_partitioning(cpi, x, bsize, mi_row, mi_col);
if (ml_predicted_partition == PARTITION_NONE) do_split = 0;
if (ml_predicted_partition == PARTITION_SPLIT) partition_none_allowed = 0;
}
@@ -5443,7 +5443,7 @@ static void get_estimated_pred(VP9_COMP *cpi, const TileInfo *const tile,
&cm->frame_refs[LAST_FRAME - 1].sf);
mi->ref_frame[0] = LAST_FRAME;
}
- mi->ref_frame[1] = NONE;
+ mi->ref_frame[1] = NO_REF_FRAME;
mi->sb_type = BLOCK_64X64;
mi->mv[0].as_int = 0;
mi->interp_filter = BILINEAR;
@@ -5633,7 +5633,7 @@ static void encode_nonrd_sb_row(VP9_COMP *cpi, ThreadData *td,
if ((cpi->oxcf.rc_mode == VPX_VBR && cpi->rc.high_source_sad &&
cpi->oxcf.speed < 6 && !frame_is_intra_only(cm) &&
(cpi->refresh_golden_frame || cpi->refresh_alt_ref_frame))) {
- // Use lower max_partition_size for low resoultions.
+ // Use lower max_partition_size for low resolutions.
if (cm->width <= 352 && cm->height <= 288)
x->max_partition_size = BLOCK_32X32;
else
diff --git a/vp9/encoder/vp9_encoder.c b/vp9/encoder/vp9_encoder.c
index 9d5c0030a..4d7f475a0 100644
--- a/vp9/encoder/vp9_encoder.c
+++ b/vp9/encoder/vp9_encoder.c
@@ -139,7 +139,7 @@ static int compute_context_model_thresh(const VP9_COMP *const cpi) {
// frame context probability model is less than a certain threshold.
// The first component is the most critical part to guarantee adaptivity.
// Other parameters are estimated based on normal setting of hd resolution
- // parameters. e.g frame_size = 1920x1080, bitrate = 8000, qindex_factor < 50
+ // parameters. e.g. frame_size = 1920x1080, bitrate = 8000, qindex_factor < 50
const int thresh =
((FRAME_SIZE_FACTOR * frame_size - FRAME_RATE_FACTOR * bitrate) *
qindex_factor) >>
@@ -2836,7 +2836,7 @@ void vp9_remove_compressor(VP9_COMP *cpi) {
#if 0
{
printf("\n_pick_loop_filter_level:%d\n", cpi->time_pick_lpf / 1000);
- printf("\n_frames recive_data encod_mb_row compress_frame Total\n");
+ printf("\n_frames receive_data encod_mb_row compress_frame Total\n");
printf("%6d %10ld %10ld %10ld %10ld\n", cpi->common.current_video_frame,
cpi->time_receive_data / 1000, cpi->time_encode_sb_row / 1000,
cpi->time_compress_data / 1000,
@@ -2949,7 +2949,7 @@ void vp9_update_reference(VP9_COMP *cpi, int ref_frame_flags) {
static YV12_BUFFER_CONFIG *get_vp9_ref_frame_buffer(
VP9_COMP *cpi, VP9_REFFRAME ref_frame_flag) {
- MV_REFERENCE_FRAME ref_frame = NONE;
+ MV_REFERENCE_FRAME ref_frame = NO_REF_FRAME;
if (ref_frame_flag == VP9_LAST_FLAG)
ref_frame = LAST_FRAME;
else if (ref_frame_flag == VP9_GOLD_FLAG)
@@ -2957,7 +2957,8 @@ static YV12_BUFFER_CONFIG *get_vp9_ref_frame_buffer(
else if (ref_frame_flag == VP9_ALT_FLAG)
ref_frame = ALTREF_FRAME;
- return ref_frame == NONE ? NULL : get_ref_frame_buffer(cpi, ref_frame);
+ return ref_frame == NO_REF_FRAME ? NULL
+ : get_ref_frame_buffer(cpi, ref_frame);
}
int vp9_copy_reference_enc(VP9_COMP *cpi, VP9_REFFRAME ref_frame_flag,
@@ -5020,8 +5021,8 @@ static int setup_interp_filter_search_mask(VP9_COMP *cpi) {
#ifdef ENABLE_KF_DENOISE
// Baseline kernel weights for denoise
-static uint8_t dn_kernal_3[9] = { 1, 2, 1, 2, 4, 2, 1, 2, 1 };
-static uint8_t dn_kernal_5[25] = { 1, 1, 1, 1, 1, 1, 1, 2, 1, 1, 1, 2, 4,
+static uint8_t dn_kernel_3[9] = { 1, 2, 1, 2, 4, 2, 1, 2, 1 };
+static uint8_t dn_kernel_5[25] = { 1, 1, 1, 1, 1, 1, 1, 2, 1, 1, 1, 2, 4,
2, 1, 1, 1, 2, 1, 1, 1, 1, 1, 1, 1 };
static INLINE void add_denoise_point(int centre_val, int data_val, int thresh,
@@ -5038,17 +5039,17 @@ static void spatial_denoise_point(uint8_t *src_ptr, const int stride,
int sum_weight = 0;
int sum_val = 0;
int thresh = strength;
- int kernal_size = 5;
+ int kernel_size = 5;
int half_k_size = 2;
int i, j;
int max_diff = 0;
uint8_t *tmp_ptr;
- uint8_t *kernal_ptr;
+ uint8_t *kernel_ptr;
// Find the maximum deviation from the source point in the locale.
tmp_ptr = src_ptr - (stride * (half_k_size + 1)) - (half_k_size + 1);
- for (i = 0; i < kernal_size + 2; ++i) {
- for (j = 0; j < kernal_size + 2; ++j) {
+ for (i = 0; i < kernel_size + 2; ++i) {
+ for (j = 0; j < kernel_size + 2; ++j) {
max_diff = VPXMAX(max_diff, abs((int)*src_ptr - (int)tmp_ptr[j]));
}
tmp_ptr += stride;
@@ -5056,19 +5057,19 @@ static void spatial_denoise_point(uint8_t *src_ptr, const int stride,
// Select the kernel size.
if (max_diff > (strength + (strength >> 1))) {
- kernal_size = 3;
+ kernel_size = 3;
half_k_size = 1;
thresh = thresh >> 1;
}
- kernal_ptr = (kernal_size == 3) ? dn_kernal_3 : dn_kernal_5;
+ kernel_ptr = (kernel_size == 3) ? dn_kernel_3 : dn_kernel_5;
// Apply the kernel
tmp_ptr = src_ptr - (stride * half_k_size) - half_k_size;
- for (i = 0; i < kernal_size; ++i) {
- for (j = 0; j < kernal_size; ++j) {
- add_denoise_point((int)*src_ptr, (int)tmp_ptr[j], thresh, *kernal_ptr,
+ for (i = 0; i < kernel_size; ++i) {
+ for (j = 0; j < kernel_size; ++j) {
+ add_denoise_point((int)*src_ptr, (int)tmp_ptr[j], thresh, *kernel_ptr,
&sum_val, &sum_weight);
- ++kernal_ptr;
+ ++kernel_ptr;
}
tmp_ptr += stride;
}
@@ -5083,17 +5084,17 @@ static void highbd_spatial_denoise_point(uint16_t *src_ptr, const int stride,
int sum_weight = 0;
int sum_val = 0;
int thresh = strength;
- int kernal_size = 5;
+ int kernel_size = 5;
int half_k_size = 2;
int i, j;
int max_diff = 0;
uint16_t *tmp_ptr;
- uint8_t *kernal_ptr;
+ uint8_t *kernel_ptr;
// Find the maximum deviation from the source point in the locale.
tmp_ptr = src_ptr - (stride * (half_k_size + 1)) - (half_k_size + 1);
- for (i = 0; i < kernal_size + 2; ++i) {
- for (j = 0; j < kernal_size + 2; ++j) {
+ for (i = 0; i < kernel_size + 2; ++i) {
+ for (j = 0; j < kernel_size + 2; ++j) {
max_diff = VPXMAX(max_diff, abs((int)src_ptr - (int)tmp_ptr[j]));
}
tmp_ptr += stride;
@@ -5101,19 +5102,19 @@ static void highbd_spatial_denoise_point(uint16_t *src_ptr, const int stride,
// Select the kernel size.
if (max_diff > (strength + (strength >> 1))) {
- kernal_size = 3;
+ kernel_size = 3;
half_k_size = 1;
thresh = thresh >> 1;
}
- kernal_ptr = (kernal_size == 3) ? dn_kernal_3 : dn_kernal_5;
+ kernel_ptr = (kernel_size == 3) ? dn_kernel_3 : dn_kernel_5;
// Apply the kernel
tmp_ptr = src_ptr - (stride * half_k_size) - half_k_size;
- for (i = 0; i < kernal_size; ++i) {
- for (j = 0; j < kernal_size; ++j) {
- add_denoise_point((int)*src_ptr, (int)tmp_ptr[j], thresh, *kernal_ptr,
+ for (i = 0; i < kernel_size; ++i) {
+ for (j = 0; j < kernel_size; ++j) {
+ add_denoise_point((int)*src_ptr, (int)tmp_ptr[j], thresh, *kernel_ptr,
&sum_val, &sum_weight);
- ++kernal_ptr;
+ ++kernel_ptr;
}
tmp_ptr += stride;
}
diff --git a/vp9/encoder/vp9_encoder.h b/vp9/encoder/vp9_encoder.h
index 2e0c4db9e..333ff0b36 100644
--- a/vp9/encoder/vp9_encoder.h
+++ b/vp9/encoder/vp9_encoder.h
@@ -846,7 +846,7 @@ typedef struct VP9_COMP {
uint8_t *skin_map;
- // segment threashold for encode breakout
+ // segment threshold for encode breakout
int segment_encode_breakout[MAX_SEGMENTS];
CYCLIC_REFRESH *cyclic_refresh;
@@ -1076,8 +1076,8 @@ static INLINE void free_partition_info(struct VP9_COMP *cpi) {
}
static INLINE void reset_mv_info(MOTION_VECTOR_INFO *mv_info) {
- mv_info->ref_frame[0] = NONE;
- mv_info->ref_frame[1] = NONE;
+ mv_info->ref_frame[0] = NO_REF_FRAME;
+ mv_info->ref_frame[1] = NO_REF_FRAME;
mv_info->mv[0].as_int = INVALID_MV;
mv_info->mv[1].as_int = INVALID_MV;
}
diff --git a/vp9/encoder/vp9_firstpass.c b/vp9/encoder/vp9_firstpass.c
index 8fdd97681..22669ab84 100644
--- a/vp9/encoder/vp9_firstpass.c
+++ b/vp9/encoder/vp9_firstpass.c
@@ -606,11 +606,11 @@ static int get_smooth_intra_threshold(VP9_COMMON *cm) {
#define FP_MAX_DN_THRESH 24
#define KERNEL_SIZE 3
-// Baseline Kernal weights for first pass noise metric
-static uint8_t fp_dn_kernal_3[KERNEL_SIZE * KERNEL_SIZE] = { 1, 2, 1, 2, 4,
+// Baseline Kernel weights for first pass noise metric
+static uint8_t fp_dn_kernel_3[KERNEL_SIZE * KERNEL_SIZE] = { 1, 2, 1, 2, 4,
2, 1, 2, 1 };
-// Estimate noise at a single point based on the impace of a spatial kernal
+// Estimate noise at a single point based on the impact of a spatial kernel
// on the point value
static int fp_estimate_point_noise(uint8_t *src_ptr, const int stride) {
int sum_weight = 0;
@@ -620,23 +620,23 @@ static int fp_estimate_point_noise(uint8_t *src_ptr, const int stride) {
int diff;
int dn_diff;
uint8_t *tmp_ptr;
- uint8_t *kernal_ptr;
+ uint8_t *kernel_ptr;
uint8_t dn_val;
uint8_t centre_val = *src_ptr;
- kernal_ptr = fp_dn_kernal_3;
+ kernel_ptr = fp_dn_kernel_3;
- // Apply the kernal
+ // Apply the kernel
tmp_ptr = src_ptr - stride - 1;
for (i = 0; i < KERNEL_SIZE; ++i) {
for (j = 0; j < KERNEL_SIZE; ++j) {
diff = abs((int)centre_val - (int)tmp_ptr[j]);
max_diff = VPXMAX(max_diff, diff);
if (diff <= FP_DN_THRESH) {
- sum_weight += *kernal_ptr;
- sum_val += (int)tmp_ptr[j] * (int)*kernal_ptr;
+ sum_weight += *kernel_ptr;
+ sum_val += (int)tmp_ptr[j] * (int)*kernel_ptr;
}
- ++kernal_ptr;
+ ++kernel_ptr;
}
tmp_ptr += stride;
}
@@ -662,13 +662,13 @@ static int fp_highbd_estimate_point_noise(uint8_t *src_ptr, const int stride) {
int dn_diff;
uint8_t *tmp_ptr;
uint16_t *tmp_ptr16;
- uint8_t *kernal_ptr;
+ uint8_t *kernel_ptr;
uint16_t dn_val;
uint16_t centre_val = *CONVERT_TO_SHORTPTR(src_ptr);
- kernal_ptr = fp_dn_kernal_3;
+ kernel_ptr = fp_dn_kernel_3;
- // Apply the kernal
+ // Apply the kernel
tmp_ptr = src_ptr - stride - 1;
for (i = 0; i < KERNEL_SIZE; ++i) {
tmp_ptr16 = CONVERT_TO_SHORTPTR(tmp_ptr);
@@ -676,10 +676,10 @@ static int fp_highbd_estimate_point_noise(uint8_t *src_ptr, const int stride) {
diff = abs((int)centre_val - (int)tmp_ptr16[j]);
max_diff = VPXMAX(max_diff, diff);
if (diff <= FP_DN_THRESH) {
- sum_weight += *kernal_ptr;
- sum_val += (int)tmp_ptr16[j] * (int)*kernal_ptr;
+ sum_weight += *kernel_ptr;
+ sum_val += (int)tmp_ptr16[j] * (int)*kernel_ptr;
}
- ++kernal_ptr;
+ ++kernel_ptr;
}
tmp_ptr += stride;
}
@@ -1264,7 +1264,7 @@ void vp9_first_pass_encode_tile_mb_row(VP9_COMP *cpi, ThreadData *td,
xd->mi[0]->mv[0].as_mv = mv;
xd->mi[0]->tx_size = TX_4X4;
xd->mi[0]->ref_frame[0] = LAST_FRAME;
- xd->mi[0]->ref_frame[1] = NONE;
+ xd->mi[0]->ref_frame[1] = NO_REF_FRAME;
vp9_build_inter_predictors_sby(xd, mb_row << 1, mb_col << 1, bsize);
vp9_encode_sby_pass1(x, bsize);
fp_acc_data->sum_mvr += mv.row;
@@ -1448,7 +1448,7 @@ void vp9_first_pass(VP9_COMP *cpi, const struct lookahead_entry *source) {
first_pass_stat_calc(cpi, &fps, &(first_tile_col->fp_data));
}
- // Dont allow a value of 0 for duration.
+ // Don't allow a value of 0 for duration.
// (Section duration is also defaulted to minimum of 1.0).
fps.duration = VPXMAX(1.0, (double)(source->ts_end - source->ts_start));
@@ -1458,7 +1458,7 @@ void vp9_first_pass(VP9_COMP *cpi, const struct lookahead_entry *source) {
accumulate_stats(&twopass->total_stats, &fps);
}
- // Copy the previous Last Frame back into gf and and arf buffers if
+ // Copy the previous Last Frame back into gf and arf buffers if
// the prediction is good enough... but also don't allow it to lag too far.
if ((twopass->sr_update_lag > 3) ||
((cm->current_video_frame > 0) &&
@@ -1675,7 +1675,7 @@ void vp9_init_second_pass(VP9_COMP *cpi) {
// Scan the first pass file and calculate a modified score for each
// frame that is used to distribute bits. The modified score is assumed
- // to provide a linear basis for bit allocation. I.e a frame A with a score
+ // to provide a linear basis for bit allocation. I.e., a frame A with a score
// that is double that of frame B will be allocated 2x as many bits.
{
double modified_score_total = 0.0;
@@ -1700,8 +1700,8 @@ void vp9_init_second_pass(VP9_COMP *cpi) {
}
// Second scan using clamps based on the previous cycle average.
- // This may modify the total and average somewhat but we dont bother with
- // further itterations.
+ // This may modify the total and average somewhat but we don't bother with
+ // further iterations.
modified_score_total = 0.0;
s = twopass->stats_in;
while (s < twopass->stats_in_end) {
@@ -1858,7 +1858,7 @@ static int detect_flash_from_frame_stats(const FIRSTPASS_STATS *frame_stats) {
// brief break in prediction (such as a flash) but subsequent frames
// are reasonably well predicted by an earlier (pre flash) frame.
// The recovery after a flash is indicated by a high pcnt_second_ref
- // useage or a second ref coded error notabley lower than the last
+ // usage or a second ref coded error notabley lower than the last
// frame coded error.
if (frame_stats == NULL) {
return 0;
@@ -2038,7 +2038,7 @@ static int compute_arf_boost(const FRAME_INFO *frame_info,
this_frame, &this_frame_mv_in_out, &mv_in_out_accumulator,
&abs_mv_in_out_accumulator, &mv_ratio_accumulator);
- // We want to discount the the flash frame itself and the recovery
+ // We want to discount the flash frame itself and the recovery
// frame that follows as both will have poor scores.
flash_detected = detect_flash_from_frame_stats(this_frame) ||
detect_flash_from_frame_stats(next_frame);
@@ -2169,7 +2169,7 @@ static double calculate_group_score(VP9_COMP *cpi, double av_score,
double score_total = 0.0;
int i = 0;
- // We dont ever want to return a 0 score here.
+ // We don't ever want to return a 0 score here.
if (frame_count == 0) return 1.0;
while ((i < frame_count) && (s < twopass->stats_in_end)) {
@@ -2597,7 +2597,7 @@ static int get_gop_coding_frame_num(
if (
// Don't break out with a very short interval.
(gop_coding_frames >= active_gf_interval->min) &&
- // If possible dont break very close to a kf
+ // If possible don't break very close to a kf
((rc->frames_to_key - gop_coding_frames) >= rc->min_gf_interval) &&
(gop_coding_frames & 0x01) && (!flash_detected) &&
((mv_ratio_accumulator > mv_ratio_accumulator_thresh) ||
@@ -3031,7 +3031,7 @@ static int intra_step_transition(const FIRSTPASS_STATS *this_frame,
next_frame->intra_error / DOUBLE_DIVIDE_CHECK(next_frame->coded_error);
// Return true the intra/inter ratio for the current frame is
- // low but better in the next and previous frame and the relative useage of
+ // low but better in the next and previous frame and the relative usage of
// intra in the current frame is markedly higher than the last and next frame.
if ((this_ii_ratio < 2.0) && (last_ii_ratio > 2.25) &&
(next_ii_ratio > 2.25) && (this_pcnt_intra > (3 * last_pcnt_intra)) &&
@@ -3052,8 +3052,8 @@ static int intra_step_transition(const FIRSTPASS_STATS *this_frame,
// Minimum % intra coding observed in first pass (1.0 = 100%)
#define MIN_INTRA_LEVEL 0.25
// Threshold for use of the lagging second reference frame. Scene cuts do not
-// usually have a high second ref useage.
-#define SECOND_REF_USEAGE_THRESH 0.2
+// usually have a high second ref usage.
+#define SECOND_REF_USAGE_THRESH 0.2
// Hard threshold where the first pass chooses intra for almost all blocks.
// In such a case even if the frame is not a scene cut coding a key frame
// may be a good option.
@@ -3083,7 +3083,7 @@ static int test_candidate_kf(const FIRST_PASS_INFO *first_pass_info,
detect_flash_from_frame_stats(next_frame);
if (!detect_flash_from_frame_stats(this_frame) &&
!detect_flash_from_frame_stats(next_frame) &&
- (this_frame->pcnt_second_ref < SECOND_REF_USEAGE_THRESH) &&
+ (this_frame->pcnt_second_ref < SECOND_REF_USAGE_THRESH) &&
((this_frame->pcnt_inter < VERY_LOW_INTER_THRESH) ||
(slide_transition(this_frame, last_frame, next_frame)) ||
(intra_step_transition(this_frame, last_frame, next_frame)) ||
@@ -3361,7 +3361,7 @@ static void find_next_key_frame(VP9_COMP *cpi, int kf_show_idx) {
// The second (lagging) ref error is not valid immediately after
// a key frame because either the lag has not built up (in the case of
- // the first key frame or it points to a refernce before the new key
+ // the first key frame or it points to a reference before the new key
// frame.
if (i < 2) sr_accumulator = 0.0;
frame_boost =
@@ -3391,7 +3391,7 @@ static void find_next_key_frame(VP9_COMP *cpi, int kf_show_idx) {
twopass->key_frame_section_intra_rating = calculate_section_intra_ratio(
start_position, twopass->stats_in_end, rc->frames_to_key);
- // Special case for static / slide show content but dont apply
+ // Special case for static / slide show content but don't apply
// if the kf group is very short.
if ((zero_motion_accumulator > 0.99) && (rc->frames_to_key > 8)) {
rc->kf_boost = (int)(twopass->kf_max_total_boost);
@@ -3523,7 +3523,7 @@ void vp9_rc_get_second_pass_params(VP9_COMP *cpi) {
vp9_init_vizier_params(twopass, screen_area);
}
- // If this is an arf frame then we dont want to read the stats file or
+ // If this is an arf frame then we don't want to read the stats file or
// advance the input pointer as we already have what we need.
if (gf_group->update_type[gf_group->index] == ARF_UPDATE) {
int target_rate;
diff --git a/vp9/encoder/vp9_mbgraph.c b/vp9/encoder/vp9_mbgraph.c
index fafc673ac..8b6521d91 100644
--- a/vp9/encoder/vp9_mbgraph.c
+++ b/vp9/encoder/vp9_mbgraph.c
@@ -237,7 +237,7 @@ static void update_mbgraph_frame_stats(VP9_COMP *cpi,
xd->mi[0] = &mi_local;
mi_local.sb_type = BLOCK_16X16;
mi_local.ref_frame[0] = LAST_FRAME;
- mi_local.ref_frame[1] = NONE;
+ mi_local.ref_frame[1] = NO_REF_FRAME;
for (mb_row = 0; mb_row < cm->mb_rows; mb_row++) {
MV gld_left_mv = gld_top_mv;
diff --git a/vp9/encoder/vp9_mcomp.c b/vp9/encoder/vp9_mcomp.c
index 0ea0f85e4..cbe1c4029 100644
--- a/vp9/encoder/vp9_mcomp.c
+++ b/vp9/encoder/vp9_mcomp.c
@@ -953,7 +953,7 @@ static INLINE int is_mv_in(const MvLimits *mv_limits, const MV *mv) {
}
#define MAX_PATTERN_SCALES 11
-#define MAX_PATTERN_CANDIDATES 8 // max number of canddiates per scale
+#define MAX_PATTERN_CANDIDATES 8 // max number of candidates per scale
#define PATTERN_CANDIDATES_REF 3 // number of refinement candidates
// Calculate and return a sad+mvcost list around an integer best pel.
@@ -1034,7 +1034,7 @@ static int vp9_pattern_search(
in_what->stride) +
mvsad_err_cost(x, ref_mv, &fcenter_mv, sad_per_bit);
- // Search all possible scales upto the search param around the center point
+ // Search all possible scales up to the search param around the center point
// pick the scale of the point that is best as the starting scale of
// further steps around it.
if (do_init_search) {
@@ -1208,7 +1208,7 @@ static int vp9_pattern_search_sad(
in_what->stride) +
mvsad_err_cost(x, ref_mv, &fcenter_mv, sad_per_bit);
- // Search all possible scales upto the search param around the center point
+ // Search all possible scales up to the search param around the center point
// pick the scale of the point that is best as the starting scale of
// further steps around it.
if (do_init_search) {
diff --git a/vp9/encoder/vp9_noise_estimate.c b/vp9/encoder/vp9_noise_estimate.c
index 9696529c5..4ee6e51ba 100644
--- a/vp9/encoder/vp9_noise_estimate.c
+++ b/vp9/encoder/vp9_noise_estimate.c
@@ -202,7 +202,7 @@ void vp9_update_noise_estimate(VP9_COMP *const cpi) {
VPXMIN(cpi->consec_zero_mv[bl_index1],
VPXMIN(cpi->consec_zero_mv[bl_index2],
cpi->consec_zero_mv[bl_index3])));
- // Only consider blocks that are likely steady background. i.e, have
+ // Only consider blocks that are likely steady background. i.e., have
// been encoded as zero/low motion x (= thresh_consec_zeromv) frames
// in a row. consec_zero_mv[] defined for 8x8 blocks, so consider all
// 4 sub-blocks for 16x16 block. And exclude this frame if
diff --git a/vp9/encoder/vp9_pickmode.c b/vp9/encoder/vp9_pickmode.c
index fa88cd79d..4a92802dc 100644
--- a/vp9/encoder/vp9_pickmode.c
+++ b/vp9/encoder/vp9_pickmode.c
@@ -1398,8 +1398,8 @@ static void recheck_zeromv_after_denoising(
RD_COST this_rdc;
mi->mode = ZEROMV;
mi->ref_frame[0] = LAST_FRAME;
- mi->ref_frame[1] = NONE;
- set_ref_ptrs(cm, xd, mi->ref_frame[0], NONE);
+ mi->ref_frame[1] = NO_REF_FRAME;
+ set_ref_ptrs(cm, xd, mi->ref_frame[0], NO_REF_FRAME);
mi->mv[0].as_int = 0;
mi->interp_filter = EIGHTTAP;
if (cpi->sf.default_interp_filter == BILINEAR) mi->interp_filter = BILINEAR;
@@ -1417,7 +1417,7 @@ static void recheck_zeromv_after_denoising(
this_rdc = *best_rdc;
mi->mode = ctx_den->best_mode;
mi->ref_frame[0] = ctx_den->best_ref_frame;
- set_ref_ptrs(cm, xd, mi->ref_frame[0], NONE);
+ set_ref_ptrs(cm, xd, mi->ref_frame[0], NO_REF_FRAME);
mi->interp_filter = ctx_den->best_pred_filter;
if (ctx_den->best_ref_frame == INTRA_FRAME) {
mi->mv[0].as_int = INVALID_MV;
@@ -1681,7 +1681,7 @@ static INLINE void init_best_pickmode(BEST_PICKMODE *bp) {
bp->best_intra_tx_size = TX_SIZES;
bp->best_pred_filter = EIGHTTAP;
bp->best_mode_skip_txfm = SKIP_TXFM_NONE;
- bp->best_second_ref_frame = NONE;
+ bp->best_second_ref_frame = NO_REF_FRAME;
bp->best_pred = NULL;
}
@@ -1875,8 +1875,8 @@ void vp9_pick_inter_mode(VP9_COMP *cpi, MACROBLOCK *x, TileDataEnc *tile_data,
vp9_rd_cost_reset(&best_rdc);
vp9_rd_cost_reset(rd_cost);
mi->sb_type = bsize;
- mi->ref_frame[0] = NONE;
- mi->ref_frame[1] = NONE;
+ mi->ref_frame[0] = NO_REF_FRAME;
+ mi->ref_frame[1] = NO_REF_FRAME;
mi->tx_size =
VPXMIN(max_txsize_lookup[bsize], tx_mode_to_biggest_tx_size[cm->tx_mode]);
@@ -2054,7 +2054,7 @@ void vp9_pick_inter_mode(VP9_COMP *cpi, MACROBLOCK *x, TileDataEnc *tile_data,
int comp_pred = 0;
int force_mv_inter_layer = 0;
PREDICTION_MODE this_mode;
- second_ref_frame = NONE;
+ second_ref_frame = NO_REF_FRAME;
if (idx < num_inter_modes) {
this_mode = ref_mode_set[idx].pred_mode;
@@ -2631,7 +2631,7 @@ void vp9_pick_inter_mode(VP9_COMP *cpi, MACROBLOCK *x, TileDataEnc *tile_data,
best_pickmode.best_mode = this_mode;
best_pickmode.best_intra_tx_size = mi->tx_size;
best_pickmode.best_ref_frame = INTRA_FRAME;
- best_pickmode.best_second_ref_frame = NONE;
+ best_pickmode.best_second_ref_frame = NO_REF_FRAME;
mi->uv_mode = this_mode;
mi->mv[0].as_int = INVALID_MV;
mi->mv[1].as_int = INVALID_MV;
@@ -2753,8 +2753,8 @@ void vp9_pick_inter_mode_sub8x8(VP9_COMP *cpi, MACROBLOCK *x, int mi_row,
MODE_INFO *const mi = xd->mi[0];
MB_MODE_INFO_EXT *const mbmi_ext = x->mbmi_ext;
const struct segmentation *const seg = &cm->seg;
- MV_REFERENCE_FRAME ref_frame, second_ref_frame = NONE;
- MV_REFERENCE_FRAME best_ref_frame = NONE;
+ MV_REFERENCE_FRAME ref_frame, second_ref_frame = NO_REF_FRAME;
+ MV_REFERENCE_FRAME best_ref_frame = NO_REF_FRAME;
unsigned char segment_id = mi->segment_id;
struct buf_2d yv12_mb[4][MAX_MB_PLANE];
int64_t best_rd = INT64_MAX;
@@ -2793,7 +2793,7 @@ void vp9_pick_inter_mode_sub8x8(VP9_COMP *cpi, MACROBLOCK *x, int mi_row,
mi->tx_size = TX_4X4;
mi->uv_mode = DC_PRED;
mi->ref_frame[0] = LAST_FRAME;
- mi->ref_frame[1] = NONE;
+ mi->ref_frame[1] = NO_REF_FRAME;
mi->interp_filter =
cm->interp_filter == SWITCHABLE ? EIGHTTAP : cm->interp_filter;
diff --git a/vp9/encoder/vp9_ratectrl.c b/vp9/encoder/vp9_ratectrl.c
index 13b43aa63..c32745b4f 100644
--- a/vp9/encoder/vp9_ratectrl.c
+++ b/vp9/encoder/vp9_ratectrl.c
@@ -1196,7 +1196,7 @@ static int rc_pick_q_and_bounds_one_pass_vbr(const VP9_COMP *cpi,
} else {
q = rc->avg_frame_qindex[KEY_FRAME];
}
- // For constrained quality dont allow Q less than the cq level
+ // For constrained quality don't allow Q less than the cq level
if (oxcf->rc_mode == VPX_CQ) {
if (q < cq_level) q = cq_level;
@@ -1359,7 +1359,7 @@ static void pick_kf_q_bound_two_pass(const VP9_COMP *cpi, int *bottom_index,
active_best_quality /= 4;
}
- // Dont allow the active min to be lossless (q0) unlesss the max q
+ // Don't allow the active min to be lossless (q0) unlesss the max q
// already indicates lossless.
active_best_quality =
VPXMIN(active_worst_quality, VPXMAX(1, active_best_quality));
@@ -1457,7 +1457,7 @@ static int rc_pick_q_and_bounds_two_pass(const VP9_COMP *cpi, int *bottom_index,
} else {
q = active_worst_quality;
}
- // For constrained quality dont allow Q less than the cq level
+ // For constrained quality don't allow Q less than the cq level
if (oxcf->rc_mode == VPX_CQ) {
if (q < cq_level) q = cq_level;
}
@@ -2693,7 +2693,7 @@ static void vbr_rate_correction(VP9_COMP *cpi, int *this_frame_target) {
}
// Fast redistribution of bits arising from massive local undershoot.
- // Dont do it for kf,arf,gf or overlay frames.
+ // Don't do it for kf,arf,gf or overlay frames.
if (!frame_is_kf_gf_arf(cpi) && !rc->is_src_frame_alt_ref &&
rc->vbr_bits_off_target_fast) {
int one_frame_bits = VPXMAX(rc->avg_frame_bandwidth, *this_frame_target);
diff --git a/vp9/encoder/vp9_rdopt.c b/vp9/encoder/vp9_rdopt.c
index 464705a67..b7fb26de2 100644
--- a/vp9/encoder/vp9_rdopt.c
+++ b/vp9/encoder/vp9_rdopt.c
@@ -86,28 +86,28 @@ struct rdcost_block_args {
#if !CONFIG_REALTIME_ONLY
static const MODE_DEFINITION vp9_mode_order[MAX_MODES] = {
- { NEARESTMV, { LAST_FRAME, NONE } },
- { NEARESTMV, { ALTREF_FRAME, NONE } },
- { NEARESTMV, { GOLDEN_FRAME, NONE } },
+ { NEARESTMV, { LAST_FRAME, NO_REF_FRAME } },
+ { NEARESTMV, { ALTREF_FRAME, NO_REF_FRAME } },
+ { NEARESTMV, { GOLDEN_FRAME, NO_REF_FRAME } },
- { DC_PRED, { INTRA_FRAME, NONE } },
+ { DC_PRED, { INTRA_FRAME, NO_REF_FRAME } },
- { NEWMV, { LAST_FRAME, NONE } },
- { NEWMV, { ALTREF_FRAME, NONE } },
- { NEWMV, { GOLDEN_FRAME, NONE } },
+ { NEWMV, { LAST_FRAME, NO_REF_FRAME } },
+ { NEWMV, { ALTREF_FRAME, NO_REF_FRAME } },
+ { NEWMV, { GOLDEN_FRAME, NO_REF_FRAME } },
- { NEARMV, { LAST_FRAME, NONE } },
- { NEARMV, { ALTREF_FRAME, NONE } },
- { NEARMV, { GOLDEN_FRAME, NONE } },
+ { NEARMV, { LAST_FRAME, NO_REF_FRAME } },
+ { NEARMV, { ALTREF_FRAME, NO_REF_FRAME } },
+ { NEARMV, { GOLDEN_FRAME, NO_REF_FRAME } },
- { ZEROMV, { LAST_FRAME, NONE } },
- { ZEROMV, { GOLDEN_FRAME, NONE } },
- { ZEROMV, { ALTREF_FRAME, NONE } },
+ { ZEROMV, { LAST_FRAME, NO_REF_FRAME } },
+ { ZEROMV, { GOLDEN_FRAME, NO_REF_FRAME } },
+ { ZEROMV, { ALTREF_FRAME, NO_REF_FRAME } },
{ NEARESTMV, { LAST_FRAME, ALTREF_FRAME } },
{ NEARESTMV, { GOLDEN_FRAME, ALTREF_FRAME } },
- { TM_PRED, { INTRA_FRAME, NONE } },
+ { TM_PRED, { INTRA_FRAME, NO_REF_FRAME } },
{ NEARMV, { LAST_FRAME, ALTREF_FRAME } },
{ NEWMV, { LAST_FRAME, ALTREF_FRAME } },
@@ -117,20 +117,20 @@ static const MODE_DEFINITION vp9_mode_order[MAX_MODES] = {
{ ZEROMV, { LAST_FRAME, ALTREF_FRAME } },
{ ZEROMV, { GOLDEN_FRAME, ALTREF_FRAME } },
- { H_PRED, { INTRA_FRAME, NONE } },
- { V_PRED, { INTRA_FRAME, NONE } },
- { D135_PRED, { INTRA_FRAME, NONE } },
- { D207_PRED, { INTRA_FRAME, NONE } },
- { D153_PRED, { INTRA_FRAME, NONE } },
- { D63_PRED, { INTRA_FRAME, NONE } },
- { D117_PRED, { INTRA_FRAME, NONE } },
- { D45_PRED, { INTRA_FRAME, NONE } },
+ { H_PRED, { INTRA_FRAME, NO_REF_FRAME } },
+ { V_PRED, { INTRA_FRAME, NO_REF_FRAME } },
+ { D135_PRED, { INTRA_FRAME, NO_REF_FRAME } },
+ { D207_PRED, { INTRA_FRAME, NO_REF_FRAME } },
+ { D153_PRED, { INTRA_FRAME, NO_REF_FRAME } },
+ { D63_PRED, { INTRA_FRAME, NO_REF_FRAME } },
+ { D117_PRED, { INTRA_FRAME, NO_REF_FRAME } },
+ { D45_PRED, { INTRA_FRAME, NO_REF_FRAME } },
};
static const REF_DEFINITION vp9_ref_order[MAX_REFS] = {
- { { LAST_FRAME, NONE } }, { { GOLDEN_FRAME, NONE } },
- { { ALTREF_FRAME, NONE } }, { { LAST_FRAME, ALTREF_FRAME } },
- { { GOLDEN_FRAME, ALTREF_FRAME } }, { { INTRA_FRAME, NONE } },
+ { { LAST_FRAME, NO_REF_FRAME } }, { { GOLDEN_FRAME, NO_REF_FRAME } },
+ { { ALTREF_FRAME, NO_REF_FRAME } }, { { LAST_FRAME, ALTREF_FRAME } },
+ { { GOLDEN_FRAME, ALTREF_FRAME } }, { { INTRA_FRAME, NO_REF_FRAME } },
};
#endif // !CONFIG_REALTIME_ONLY
@@ -1811,7 +1811,7 @@ static int check_best_zero_mv(const VP9_COMP *cpi,
const MV_REFERENCE_FRAME ref_frames[2]) {
if ((this_mode == NEARMV || this_mode == NEARESTMV || this_mode == ZEROMV) &&
frame_mv[this_mode][ref_frames[0]].as_int == 0 &&
- (ref_frames[1] == NONE ||
+ (ref_frames[1] == NO_REF_FRAME ||
frame_mv[this_mode][ref_frames[1]].as_int == 0)) {
int rfc = mode_context[ref_frames[0]];
int c1 = cost_mv_ref(cpi, NEARMV, rfc);
@@ -1824,7 +1824,7 @@ static int check_best_zero_mv(const VP9_COMP *cpi,
if (c2 > c3) return 0;
} else {
assert(this_mode == ZEROMV);
- if (ref_frames[1] == NONE) {
+ if (ref_frames[1] == NO_REF_FRAME) {
if ((c3 >= c2 && frame_mv[NEARESTMV][ref_frames[0]].as_int == 0) ||
(c3 >= c1 && frame_mv[NEARMV][ref_frames[0]].as_int == 0))
return 0;
@@ -2183,7 +2183,7 @@ static int64_t rd_pick_best_sub8x8_mode(
int cost_list[5];
const MvLimits tmp_mv_limits = x->mv_limits;
- /* Is the best so far sufficiently good that we cant justify doing
+ /* Is the best so far sufficiently good that we can't justify doing
* and new motion search. */
if (best_rd < label_mv_thresh) break;
@@ -3241,7 +3241,7 @@ void vp9_rd_pick_intra_mode_sb(VP9_COMP *cpi, MACROBLOCK *x, RD_COST *rd_cost,
x->skip_encode = 0;
ctx->skip = 0;
xd->mi[0]->ref_frame[0] = INTRA_FRAME;
- xd->mi[0]->ref_frame[1] = NONE;
+ xd->mi[0]->ref_frame[1] = NO_REF_FRAME;
// Initialize interp_filter here so we do not have to check for inter block
// modes in get_pred_context_switchable_interp()
xd->mi[0]->interp_filter = SWITCHABLE_FILTERS;
@@ -3686,7 +3686,7 @@ void vp9_rd_pick_inter_mode_sb(VP9_COMP *cpi, TileDataEnc *tile_data,
ref_frame_skip_mask[0] |= GOLDEN_FRAME_MODE_MASK;
break;
case ALTREF_FRAME: ref_frame_skip_mask[0] |= ALT_REF_MODE_MASK; break;
- case NONE:
+ case NO_REF_FRAME:
case MAX_REF_FRAMES: assert(0 && "Invalid Reference frame"); break;
}
}
@@ -3719,7 +3719,7 @@ void vp9_rd_pick_inter_mode_sb(VP9_COMP *cpi, TileDataEnc *tile_data,
MODE_INFO *ref_mi;
int const_motion = 1;
int skip_ref_frame = !cb_partition_search_ctrl;
- MV_REFERENCE_FRAME rf = NONE;
+ MV_REFERENCE_FRAME rf = NO_REF_FRAME;
int_mv ref_mv;
ref_mv.as_int = INVALID_MV;
@@ -3736,7 +3736,7 @@ void vp9_rd_pick_inter_mode_sb(VP9_COMP *cpi, TileDataEnc *tile_data,
if ((mi_col - 1) >= tile_info->mi_col_start) {
if (ref_mv.as_int == INVALID_MV) ref_mv = xd->mi[-1]->mv[0];
- if (rf == NONE) rf = xd->mi[-1]->ref_frame[0];
+ if (rf == NO_REF_FRAME) rf = xd->mi[-1]->ref_frame[0];
for (i = 0; i < mi_height; ++i) {
ref_mi = xd->mi[i * xd->mi_stride - 1];
const_motion &= (ref_mv.as_int == ref_mi->mv[0].as_int) &&
@@ -4230,7 +4230,7 @@ void vp9_rd_pick_inter_mode_sb_seg_skip(VP9_COMP *cpi, TileDataEnc *tile_data,
mi->mode = ZEROMV;
mi->uv_mode = DC_PRED;
mi->ref_frame[0] = LAST_FRAME;
- mi->ref_frame[1] = NONE;
+ mi->ref_frame[1] = NO_REF_FRAME;
mi->mv[0].as_int = 0;
x->skip = 1;
@@ -4412,7 +4412,7 @@ void vp9_rd_pick_inter_mode_sub8x8(VP9_COMP *cpi, TileDataEnc *tile_data,
case ALTREF_FRAME:
ref_frame_skip_mask[0] |= (1 << GOLDEN_FRAME) | (1 << LAST_FRAME);
break;
- case NONE:
+ case NO_REF_FRAME:
case MAX_REF_FRAMES: assert(0 && "Invalid Reference frame"); break;
}
}
@@ -4671,7 +4671,7 @@ void vp9_rd_pick_inter_mode_sub8x8(VP9_COMP *cpi, TileDataEnc *tile_data,
if (tmp_best_rdu > 0) {
// If even the 'Y' rd value of split is higher than best so far
- // then dont bother looking at UV
+ // then don't bother looking at UV
vp9_build_inter_predictors_sbuv(&x->e_mbd, mi_row, mi_col, BLOCK_8X8);
memset(x->skip_txfm, SKIP_TXFM_NONE, sizeof(x->skip_txfm));
if (!super_block_uvrd(cpi, x, &rate_uv, &distortion_uv, &uv_skippable,
@@ -4899,7 +4899,7 @@ void vp9_rd_pick_inter_mode_sub8x8(VP9_COMP *cpi, TileDataEnc *tile_data,
mi->mv[1].as_int = xd->mi[0]->bmi[3].as_mv[1].as_int;
}
// If the second reference does not exist, set the corresponding mv to zero.
- if (mi->ref_frame[1] == NONE) {
+ if (mi->ref_frame[1] == NO_REF_FRAME) {
mi->mv[1].as_int = 0;
for (i = 0; i < 4; ++i) {
mi->bmi[i].as_mv[1].as_int = 0;
diff --git a/vp9/encoder/vp9_speed_features.c b/vp9/encoder/vp9_speed_features.c
index 48c21c581..4a7172118 100644
--- a/vp9/encoder/vp9_speed_features.c
+++ b/vp9/encoder/vp9_speed_features.c
@@ -42,7 +42,7 @@ static int frame_is_boosted(const VP9_COMP *cpi) {
// Sets a partition size down to which the auto partition code will always
// search (can go lower), based on the image dimensions. The logic here
// is that the extent to which ringing artefacts are offensive, depends
-// partly on the screen area that over which they propogate. Propogation is
+// partly on the screen area that over which they propagate. Propagation is
// limited by transform block size but the screen area take up by a given block
// size will be larger for a small image format stretched to full screen.
static BLOCK_SIZE set_partition_min_limit(VP9_COMMON *const cm) {
diff --git a/vp9/encoder/vp9_speed_features.h b/vp9/encoder/vp9_speed_features.h
index 70c61fe00..941de639a 100644
--- a/vp9/encoder/vp9_speed_features.h
+++ b/vp9/encoder/vp9_speed_features.h
@@ -643,7 +643,7 @@ typedef struct SPEED_FEATURES {
// Use machine learning based partition search.
int nonrd_use_ml_partition;
- // Multiplier for base thresold for variance partitioning.
+ // Multiplier for base threshold for variance partitioning.
int variance_part_thresh_mult;
// Force subpel motion filter to always use SMOOTH_FILTER.
diff --git a/vp9/encoder/vp9_svc_layercontext.c b/vp9/encoder/vp9_svc_layercontext.c
index e4721271d..24fd81813 100644
--- a/vp9/encoder/vp9_svc_layercontext.c
+++ b/vp9/encoder/vp9_svc_layercontext.c
@@ -1145,7 +1145,7 @@ void vp9_svc_constrain_inter_layer_pred(VP9_COMP *const cpi) {
void vp9_svc_assert_constraints_pattern(VP9_COMP *const cpi) {
SVC *const svc = &cpi->svc;
// For fixed/non-flexible mode, the following constraint are expected,
- // when inter-layer prediciton is on (default).
+ // when inter-layer prediction is on (default).
if (svc->temporal_layering_mode != VP9E_TEMPORAL_LAYERING_MODE_BYPASS &&
svc->disable_inter_layer_pred == INTER_LAYER_PRED_ON &&
svc->framedrop_mode != LAYER_DROP) {
diff --git a/vp9/ratectrl_rtc.h b/vp9/ratectrl_rtc.h
index 7f3c90045..d3876de87 100644
--- a/vp9/ratectrl_rtc.h
+++ b/vp9/ratectrl_rtc.h
@@ -14,12 +14,9 @@
#include <cstdint>
#include <memory>
-#include "vp9/common/vp9_entropymode.h"
#include "vp9/common/vp9_enums.h"
-#include "vp9/common/vp9_onyxc_int.h"
#include "vp9/vp9_iface_common.h"
#include "vp9/encoder/vp9_aq_cyclicrefresh.h"
-#include "vp9/encoder/vp9_firstpass.h"
#include "vp9/vp9_cx_iface.h"
#include "vpx/internal/vpx_ratectrl_rtc.h"
#include "vpx_mem/vpx_mem.h"
diff --git a/vp9/simple_encode.cc b/vp9/simple_encode.cc
index f42912d35..2e2a3746e 100644
--- a/vp9/simple_encode.cc
+++ b/vp9/simple_encode.cc
@@ -183,10 +183,11 @@ static void update_motion_vector_info(
const MV_REFERENCE_FRAME *in_ref_frame =
input_motion_vector_info[i].ref_frame;
output_motion_vector_info[i].mv_count =
- (in_ref_frame[0] == INTRA_FRAME) ? 0
- : ((in_ref_frame[1] == NONE) ? 1 : 2);
- if (in_ref_frame[0] == NONE) {
- fprintf(stderr, "in_ref_frame[0] shouldn't be NONE\n");
+ (in_ref_frame[0] == INTRA_FRAME)
+ ? 0
+ : ((in_ref_frame[1] == NO_REF_FRAME) ? 1 : 2);
+ if (in_ref_frame[0] == NO_REF_FRAME) {
+ fprintf(stderr, "in_ref_frame[0] shouldn't be NO_REF_FRAME\n");
abort();
}
output_motion_vector_info[i].ref_frame[0] =
diff --git a/vp9/simple_encode.h b/vp9/simple_encode.h
index 7920e95ee..d610a5e15 100644
--- a/vp9/simple_encode.h
+++ b/vp9/simple_encode.h
@@ -309,7 +309,7 @@ struct EncodeFrameResult {
// The tpl stats stored in the vector is according to the encoding order.
// For example, suppose there are N show frames for the current GOP.
// Then tpl_stats_info[0] stores the information of the first frame to be
- // encoded for this GOP, i.e, the AltRef frame.
+ // encoded for this GOP, i.e., the AltRef frame.
std::vector<TplStatsInfo> tpl_stats_info;
ImageBuffer coded_frame;
diff --git a/vp9/vp9_cx_iface.c b/vp9/vp9_cx_iface.c
index 409069b4e..cc2ae20d2 100644
--- a/vp9/vp9_cx_iface.c
+++ b/vp9/vp9_cx_iface.c
@@ -2084,8 +2084,8 @@ static vpx_codec_enc_cfg_map_t encoder_usage_cfg_map[] = {
0, // rc_resize_allowed
0, // rc_scaled_width
0, // rc_scaled_height
- 60, // rc_resize_down_thresold
- 30, // rc_resize_up_thresold
+ 60, // rc_resize_down_thresh
+ 30, // rc_resize_up_thresh
VPX_VBR, // rc_end_usage
{ NULL, 0 }, // rc_twopass_stats_in
@@ -2118,7 +2118,7 @@ static vpx_codec_enc_cfg_map_t encoder_usage_cfg_map[] = {
{ 0 }, // ts_rate_decimator
0, // ts_periodicity
{ 0 }, // ts_layer_id
- { 0 }, // layer_taget_bitrate
+ { 0 }, // layer_target_bitrate
0, // temporal_layering_mode
0, // use_vizier_rc_params
{ 1, 1 }, // active_wq_factor
diff --git a/vpx/vp8cx.h b/vpx/vp8cx.h
index c4e04084c..3c0278c84 100644
--- a/vpx/vp8cx.h
+++ b/vpx/vp8cx.h
@@ -302,7 +302,7 @@ enum vp8e_enc_control_id {
* the feature is off, i.e., no golden frame boost in CBR mode and
* average bitrate target is used.
*
- * For example, to allow 100% more bits, i.e, 2X, in a golden frame
+ * For example, to allow 100% more bits, i.e., 2X, in a golden frame
* than average frame, set this to 100.
*
* Supported in codecs: VP9
@@ -598,7 +598,7 @@ enum vp8e_enc_control_id {
* the feature is off, i.e., no golden frame boost in CBR mode and
* average bitrate target is used.
*
- * For example, to allow 100% more bits, i.e, 2X, in a golden frame
+ * For example, to allow 100% more bits, i.e., 2X, in a golden frame
* than average frame, set this to 100.
*
* Supported in codecs: VP8
diff --git a/vpx_dsp/ppc/variance_vsx.c b/vpx_dsp/ppc/variance_vsx.c
index be9614a35..6c6bc9a30 100644
--- a/vpx_dsp/ppc/variance_vsx.c
+++ b/vpx_dsp/ppc/variance_vsx.c
@@ -225,7 +225,7 @@ static INLINE void variance(const uint8_t *src_ptr, int src_stride,
}
/* Identical to the variance call except it does not calculate the
- * sse - sum^2 / w*h and returns sse in addtion to modifying the passed in
+ * sse - sum^2 / w*h and returns sse in addition to modifying the passed in
* variable.
*/
#define MSE(W, H) \
diff --git a/vpx_dsp/variance.c b/vpx_dsp/variance.c
index a6793efb6..1c476542f 100644
--- a/vpx_dsp/variance.c
+++ b/vpx_dsp/variance.c
@@ -180,7 +180,7 @@ static void var_filter_block2d_bil_second_pass(
}
/* Identical to the variance call except it does not calculate the
- * sse - sum^2 / w*h and returns sse in addtion to modifying the passed in
+ * sse - sum^2 / w*h and returns sse in addition to modifying the passed in
* variable.
*/
#define MSE(W, H) \