summaryrefslogtreecommitdiff
path: root/vp9/encoder
diff options
context:
space:
mode:
authorJames Zern <jzern@google.com>2015-04-23 20:47:40 -0700
committerJames Zern <jzern@google.com>2015-04-28 20:00:59 -0700
commitf58011ada5e3d28d9acf078657f8117f0960d240 (patch)
tree70beff98e74f20148d562ec6e94020ca34175ab6 /vp9/encoder
parentf274c2199beab8cd656efd0660222b1aa1864ce8 (diff)
downloadlibvpx-f58011ada5e3d28d9acf078657f8117f0960d240.tar
libvpx-f58011ada5e3d28d9acf078657f8117f0960d240.tar.gz
libvpx-f58011ada5e3d28d9acf078657f8117f0960d240.tar.bz2
libvpx-f58011ada5e3d28d9acf078657f8117f0960d240.zip
vpx_mem: remove vpx_memset
vestigial. replace instances with memset() which they already were being defined to. Change-Id: Ie030cfaaa3e890dd92cf1a995fcb1927ba175201
Diffstat (limited to 'vp9/encoder')
-rw-r--r--vp9/encoder/arm/neon/vp9_quantize_neon.c4
-rw-r--r--vp9/encoder/vp9_aq_complexity.c3
-rw-r--r--vp9/encoder/vp9_aq_cyclicrefresh.c4
-rw-r--r--vp9/encoder/vp9_bitstream.c4
-rw-r--r--vp9/encoder/vp9_dct.c4
-rw-r--r--vp9/encoder/vp9_encodeframe.c24
-rw-r--r--vp9/encoder/vp9_encodemb.c4
-rw-r--r--vp9/encoder/vp9_encoder.c28
-rw-r--r--vp9/encoder/vp9_extend.c4
-rw-r--r--vp9/encoder/vp9_mbgraph.c5
-rw-r--r--vp9/encoder/vp9_picklpf.c2
-rw-r--r--vp9/encoder/vp9_pickmode.c4
-rw-r--r--vp9/encoder/vp9_quantize.c48
-rw-r--r--vp9/encoder/vp9_rdopt.c20
-rw-r--r--vp9/encoder/vp9_segmentation.c6
-rw-r--r--vp9/encoder/vp9_skin_detection.c4
-rw-r--r--vp9/encoder/vp9_svc_layercontext.c4
-rw-r--r--vp9/encoder/vp9_temporal_filter.c4
-rw-r--r--vp9/encoder/x86/vp9_highbd_quantize_intrin_sse2.c8
19 files changed, 91 insertions, 93 deletions
diff --git a/vp9/encoder/arm/neon/vp9_quantize_neon.c b/vp9/encoder/arm/neon/vp9_quantize_neon.c
index 9cf1e5e2c..47363c75b 100644
--- a/vp9/encoder/arm/neon/vp9_quantize_neon.c
+++ b/vp9/encoder/arm/neon/vp9_quantize_neon.c
@@ -111,8 +111,8 @@ void vp9_quantize_fp_neon(const int16_t *coeff_ptr, intptr_t count,
*eob_ptr = (uint16_t)vget_lane_s16(v_eobmax_final, 0);
}
} else {
- vpx_memset(qcoeff_ptr, 0, count * sizeof(int16_t));
- vpx_memset(dqcoeff_ptr, 0, count * sizeof(int16_t));
+ memset(qcoeff_ptr, 0, count * sizeof(int16_t));
+ memset(dqcoeff_ptr, 0, count * sizeof(int16_t));
*eob_ptr = 0;
}
}
diff --git a/vp9/encoder/vp9_aq_complexity.c b/vp9/encoder/vp9_aq_complexity.c
index 22e5217b6..9622ba1d6 100644
--- a/vp9/encoder/vp9_aq_complexity.c
+++ b/vp9/encoder/vp9_aq_complexity.c
@@ -55,8 +55,7 @@ void vp9_setup_in_frame_q_adj(VP9_COMP *cpi) {
const int aq_strength = get_aq_c_strength(cm->base_qindex, cm->bit_depth);
// Clear down the segment map.
- vpx_memset(cpi->segmentation_map, DEFAULT_AQ2_SEG,
- cm->mi_rows * cm->mi_cols);
+ memset(cpi->segmentation_map, DEFAULT_AQ2_SEG, cm->mi_rows * cm->mi_cols);
vp9_clearall_segfeatures(seg);
diff --git a/vp9/encoder/vp9_aq_cyclicrefresh.c b/vp9/encoder/vp9_aq_cyclicrefresh.c
index 4bd083c27..21bea8cc4 100644
--- a/vp9/encoder/vp9_aq_cyclicrefresh.c
+++ b/vp9/encoder/vp9_aq_cyclicrefresh.c
@@ -324,7 +324,7 @@ void vp9_cyclic_refresh_update_map(VP9_COMP *const cpi) {
unsigned char *const seg_map = cpi->segmentation_map;
int i, block_count, bl_index, sb_rows, sb_cols, sbs_in_frame;
int xmis, ymis, x, y;
- vpx_memset(seg_map, CR_SEGMENT_ID_BASE, cm->mi_rows * cm->mi_cols);
+ memset(seg_map, CR_SEGMENT_ID_BASE, cm->mi_rows * cm->mi_cols);
sb_cols = (cm->mi_cols + MI_BLOCK_SIZE - 1) / MI_BLOCK_SIZE;
sb_rows = (cm->mi_rows + MI_BLOCK_SIZE - 1) / MI_BLOCK_SIZE;
sbs_in_frame = sb_cols * sb_rows;
@@ -411,7 +411,7 @@ void vp9_cyclic_refresh_setup(VP9_COMP *const cpi) {
(cpi->svc.spatial_layer_id > 0)) {
// Set segmentation map to 0 and disable.
unsigned char *const seg_map = cpi->segmentation_map;
- vpx_memset(seg_map, 0, cm->mi_rows * cm->mi_cols);
+ memset(seg_map, 0, cm->mi_rows * cm->mi_cols);
vp9_disable_segmentation(&cm->seg);
if (cm->frame_type == KEY_FRAME)
cr->sb_index = 0;
diff --git a/vp9/encoder/vp9_bitstream.c b/vp9/encoder/vp9_bitstream.c
index 2594ac24d..d20e06766 100644
--- a/vp9/encoder/vp9_bitstream.c
+++ b/vp9/encoder/vp9_bitstream.c
@@ -928,8 +928,8 @@ static size_t encode_tiles(VP9_COMP *cpi, uint8_t *data_ptr) {
const int tile_cols = 1 << cm->log2_tile_cols;
const int tile_rows = 1 << cm->log2_tile_rows;
- vpx_memset(cm->above_seg_context, 0, sizeof(*cm->above_seg_context) *
- mi_cols_aligned_to_sb(cm->mi_cols));
+ memset(cm->above_seg_context, 0,
+ sizeof(*cm->above_seg_context) * mi_cols_aligned_to_sb(cm->mi_cols));
for (tile_row = 0; tile_row < tile_rows; tile_row++) {
for (tile_col = 0; tile_col < tile_cols; tile_col++) {
diff --git a/vp9/encoder/vp9_dct.c b/vp9/encoder/vp9_dct.c
index 41f72f89b..9e6ca3d59 100644
--- a/vp9/encoder/vp9_dct.c
+++ b/vp9/encoder/vp9_dct.c
@@ -417,8 +417,8 @@ void vp9_fdct8x8_quant_c(const int16_t *input, int stride,
(void)quant_shift_ptr;
(void)iscan;
- vpx_memset(qcoeff_ptr, 0, n_coeffs * sizeof(*qcoeff_ptr));
- vpx_memset(dqcoeff_ptr, 0, n_coeffs * sizeof(*dqcoeff_ptr));
+ memset(qcoeff_ptr, 0, n_coeffs * sizeof(*qcoeff_ptr));
+ memset(dqcoeff_ptr, 0, n_coeffs * sizeof(*dqcoeff_ptr));
if (!skip_block) {
// Quantization pass: All coefficients with index >= zero_flag are
diff --git a/vp9/encoder/vp9_encodeframe.c b/vp9/encoder/vp9_encodeframe.c
index 0ceacef8b..fa7a694c5 100644
--- a/vp9/encoder/vp9_encodeframe.c
+++ b/vp9/encoder/vp9_encodeframe.c
@@ -1595,7 +1595,7 @@ static void set_source_var_based_partition(VP9_COMP *cpi,
int use32x32 = 0;
unsigned int thr = cpi->source_var_thresh;
- vpx_memset(d32, 0, 4 * sizeof(diff));
+ memset(d32, 0, 4 * sizeof(diff));
for (i = 0; i < 4; i++) {
diff *d16[4];
@@ -2832,8 +2832,8 @@ static void encode_rd_sb_row(VP9_COMP *cpi,
int mi_col;
// Initialize the left context for the new SB row
- vpx_memset(&xd->left_context, 0, sizeof(xd->left_context));
- vpx_memset(xd->left_seg_context, 0, sizeof(xd->left_seg_context));
+ memset(&xd->left_context, 0, sizeof(xd->left_context));
+ memset(xd->left_seg_context, 0, sizeof(xd->left_seg_context));
// Code each SB in the row
for (mi_col = tile_info->mi_col_start; mi_col < tile_info->mi_col_end;
@@ -2917,11 +2917,11 @@ static void init_encode_frame_mb_context(VP9_COMP *cpi) {
// Note: this memset assumes above_context[0], [1] and [2]
// are allocated as part of the same buffer.
- vpx_memset(xd->above_context[0], 0,
- sizeof(*xd->above_context[0]) *
- 2 * aligned_mi_cols * MAX_MB_PLANE);
- vpx_memset(xd->above_seg_context, 0,
- sizeof(*xd->above_seg_context) * aligned_mi_cols);
+ memset(xd->above_context[0], 0,
+ sizeof(*xd->above_context[0]) *
+ 2 * aligned_mi_cols * MAX_MB_PLANE);
+ memset(xd->above_seg_context, 0,
+ sizeof(*xd->above_seg_context) * aligned_mi_cols);
}
static int check_dual_ref_flags(VP9_COMP *cpi) {
@@ -3594,8 +3594,8 @@ static void encode_nonrd_sb_row(VP9_COMP *cpi,
int mi_col;
// Initialize the left context for the new SB row
- vpx_memset(&xd->left_context, 0, sizeof(xd->left_context));
- vpx_memset(xd->left_seg_context, 0, sizeof(xd->left_seg_context));
+ memset(&xd->left_context, 0, sizeof(xd->left_context));
+ memset(xd->left_seg_context, 0, sizeof(xd->left_seg_context));
// Code each SB in the row
for (mi_col = tile_info->mi_col_start; mi_col < tile_info->mi_col_end;
@@ -3689,7 +3689,7 @@ static int set_var_thresh_from_histogram(VP9_COMP *cpi) {
int sum = 0;
int i, j;
- vpx_memset(hist, 0, VAR_HIST_BINS * sizeof(hist[0]));
+ memset(hist, 0, VAR_HIST_BINS * sizeof(hist[0]));
for (i = 0; i < cm->mb_rows; i++) {
for (j = 0; j < cm->mb_cols; j++) {
@@ -4187,7 +4187,7 @@ static void encode_superblock(VP9_COMP *cpi, ThreadData *td,
cpi->sf.allow_skip_recode;
if (!x->skip_recode && !cpi->sf.use_nonrd_pick_mode)
- vpx_memset(x->skip_txfm, 0, sizeof(x->skip_txfm));
+ memset(x->skip_txfm, 0, sizeof(x->skip_txfm));
x->skip_optimize = ctx->is_coded;
ctx->is_coded = 1;
diff --git a/vp9/encoder/vp9_encodemb.c b/vp9/encoder/vp9_encodemb.c
index e6afd4722..9a4e61ec8 100644
--- a/vp9/encoder/vp9_encodemb.c
+++ b/vp9/encoder/vp9_encodemb.c
@@ -319,8 +319,8 @@ static int optimize_b(MACROBLOCK *mb, int plane, int block,
UPDATE_RD_COST();
best = rd_cost1 < rd_cost0;
final_eob = -1;
- vpx_memset(qcoeff, 0, sizeof(*qcoeff) * (16 << (tx_size * 2)));
- vpx_memset(dqcoeff, 0, sizeof(*dqcoeff) * (16 << (tx_size * 2)));
+ memset(qcoeff, 0, sizeof(*qcoeff) * (16 << (tx_size * 2)));
+ memset(dqcoeff, 0, sizeof(*dqcoeff) * (16 << (tx_size * 2)));
for (i = next; i < eob; i = next) {
const int x = tokens[i][best].qc;
const int rc = scan[i];
diff --git a/vp9/encoder/vp9_encoder.c b/vp9/encoder/vp9_encoder.c
index 63d09f22a..a6e4c9c27 100644
--- a/vp9/encoder/vp9_encoder.c
+++ b/vp9/encoder/vp9_encoder.c
@@ -193,7 +193,7 @@ int vp9_get_active_map(VP9_COMP* cpi,
unsigned char* const seg_map_8x8 = cpi->segmentation_map;
const int mi_rows = cpi->common.mi_rows;
const int mi_cols = cpi->common.mi_cols;
- vpx_memset(new_map_16x16, !cpi->active_map.enabled, rows * cols);
+ memset(new_map_16x16, !cpi->active_map.enabled, rows * cols);
if (cpi->active_map.enabled) {
int r, c;
for (r = 0; r < mi_rows; ++r) {
@@ -251,19 +251,19 @@ static void setup_frame(VP9_COMP *cpi) {
static void vp9_enc_setup_mi(VP9_COMMON *cm) {
int i;
cm->mi = cm->mip + cm->mi_stride + 1;
- vpx_memset(cm->mip, 0, cm->mi_stride * (cm->mi_rows + 1) * sizeof(*cm->mip));
+ memset(cm->mip, 0, cm->mi_stride * (cm->mi_rows + 1) * sizeof(*cm->mip));
cm->prev_mi = cm->prev_mip + cm->mi_stride + 1;
// Clear top border row
- vpx_memset(cm->prev_mip, 0, sizeof(*cm->prev_mip) * cm->mi_stride);
+ memset(cm->prev_mip, 0, sizeof(*cm->prev_mip) * cm->mi_stride);
// Clear left border column
for (i = 1; i < cm->mi_rows + 1; ++i)
- vpx_memset(&cm->prev_mip[i * cm->mi_stride], 0, sizeof(*cm->prev_mip));
+ memset(&cm->prev_mip[i * cm->mi_stride], 0, sizeof(*cm->prev_mip));
cm->mi_grid_visible = cm->mi_grid_base + cm->mi_stride + 1;
cm->prev_mi_grid_visible = cm->prev_mi_grid_base + cm->mi_stride + 1;
- vpx_memset(cm->mi_grid_base, 0,
- cm->mi_stride * (cm->mi_rows + 1) * sizeof(*cm->mi_grid_base));
+ memset(cm->mi_grid_base, 0,
+ cm->mi_stride * (cm->mi_rows + 1) * sizeof(*cm->mi_grid_base));
}
static int vp9_enc_alloc_mi(VP9_COMMON *cm, int mi_size) {
@@ -399,11 +399,11 @@ static void dealloc_compressor_data(VP9_COMP *cpi) {
for (i = 0; i < MAX_LAG_BUFFERS; ++i) {
vp9_free_frame_buffer(&cpi->svc.scaled_frames[i]);
}
- vpx_memset(&cpi->svc.scaled_frames[0], 0,
- MAX_LAG_BUFFERS * sizeof(cpi->svc.scaled_frames[0]));
+ memset(&cpi->svc.scaled_frames[0], 0,
+ MAX_LAG_BUFFERS * sizeof(cpi->svc.scaled_frames[0]));
vp9_free_frame_buffer(&cpi->svc.empty_frame.img);
- vpx_memset(&cpi->svc.empty_frame, 0, sizeof(cpi->svc.empty_frame));
+ memset(&cpi->svc.empty_frame, 0, sizeof(cpi->svc.empty_frame));
}
static void save_coding_context(VP9_COMP *cpi) {
@@ -474,7 +474,7 @@ static void configure_static_seg_features(VP9_COMP *cpi) {
// Disable and clear down for KF
if (cm->frame_type == KEY_FRAME) {
// Clear down the global segmentation map
- vpx_memset(cpi->segmentation_map, 0, cm->mi_rows * cm->mi_cols);
+ memset(cpi->segmentation_map, 0, cm->mi_rows * cm->mi_cols);
seg->update_map = 0;
seg->update_data = 0;
cpi->static_mb_pct = 0;
@@ -487,7 +487,7 @@ static void configure_static_seg_features(VP9_COMP *cpi) {
} else if (cpi->refresh_alt_ref_frame) {
// If this is an alt ref frame
// Clear down the global segmentation map
- vpx_memset(cpi->segmentation_map, 0, cm->mi_rows * cm->mi_cols);
+ memset(cpi->segmentation_map, 0, cm->mi_rows * cm->mi_cols);
seg->update_map = 0;
seg->update_data = 0;
cpi->static_mb_pct = 0;
@@ -548,7 +548,7 @@ static void configure_static_seg_features(VP9_COMP *cpi) {
vp9_disable_segmentation(seg);
- vpx_memset(cpi->segmentation_map, 0, cm->mi_rows * cm->mi_cols);
+ memset(cpi->segmentation_map, 0, cm->mi_rows * cm->mi_cols);
seg->update_map = 0;
seg->update_data = 0;
@@ -3515,8 +3515,8 @@ static void encode_frame_to_data_rate(VP9_COMP *cpi,
vp9_clear_system_state();
#if CONFIG_INTERNAL_STATS
- vpx_memset(cpi->mode_chosen_counts, 0,
- MAX_MODES * sizeof(*cpi->mode_chosen_counts));
+ memset(cpi->mode_chosen_counts, 0,
+ MAX_MODES * sizeof(*cpi->mode_chosen_counts));
#endif
if (cpi->sf.recode_loop == DISALLOW_RECODE) {
diff --git a/vp9/encoder/vp9_extend.c b/vp9/encoder/vp9_extend.c
index 5bb77b2e3..96f3598b1 100644
--- a/vp9/encoder/vp9_extend.c
+++ b/vp9/encoder/vp9_extend.c
@@ -27,9 +27,9 @@ static void copy_and_extend_plane(const uint8_t *src, int src_pitch,
uint8_t *dst_ptr2 = dst + w;
for (i = 0; i < h; i++) {
- vpx_memset(dst_ptr1, src_ptr1[0], extend_left);
+ memset(dst_ptr1, src_ptr1[0], extend_left);
memcpy(dst_ptr1 + extend_left, src_ptr1, w);
- vpx_memset(dst_ptr2, src_ptr2[0], extend_right);
+ memset(dst_ptr2, src_ptr2[0], extend_right);
src_ptr1 += src_pitch;
src_ptr2 += src_pitch;
dst_ptr1 += dst_pitch;
diff --git a/vp9/encoder/vp9_mbgraph.c b/vp9/encoder/vp9_mbgraph.c
index 031f77f6d..06c3885c1 100644
--- a/vp9/encoder/vp9_mbgraph.c
+++ b/vp9/encoder/vp9_mbgraph.c
@@ -389,9 +389,8 @@ void vp9_update_mbgraph_stats(VP9_COMP *cpi) {
cpi->mbgraph_n_frames = n_frames;
for (i = 0; i < n_frames; i++) {
MBGRAPH_FRAME_STATS *frame_stats = &cpi->mbgraph_stats[i];
- vpx_memset(frame_stats->mb_stats, 0,
- cm->mb_rows * cm->mb_cols *
- sizeof(*cpi->mbgraph_stats[i].mb_stats));
+ memset(frame_stats->mb_stats, 0,
+ cm->mb_rows * cm->mb_cols * sizeof(*cpi->mbgraph_stats[i].mb_stats));
}
// do motion search to find contribution of each reference to data
diff --git a/vp9/encoder/vp9_picklpf.c b/vp9/encoder/vp9_picklpf.c
index a95f0f46d..5eb5d542b 100644
--- a/vp9/encoder/vp9_picklpf.c
+++ b/vp9/encoder/vp9_picklpf.c
@@ -81,7 +81,7 @@ static int search_filter_level(const YV12_BUFFER_CONFIG *sd, VP9_COMP *cpi,
int64_t ss_err[MAX_LOOP_FILTER + 1];
// Set each entry to -1
- vpx_memset(ss_err, 0xFF, sizeof(ss_err));
+ memset(ss_err, 0xFF, sizeof(ss_err));
// Make a copy of the unfiltered / processed recon buffer
vpx_yv12_copy_y(cm->frame_to_show, &cpi->last_frame_uf);
diff --git a/vp9/encoder/vp9_pickmode.c b/vp9/encoder/vp9_pickmode.c
index 6ef504884..85d7fded7 100644
--- a/vp9/encoder/vp9_pickmode.c
+++ b/vp9/encoder/vp9_pickmode.c
@@ -51,7 +51,7 @@ static int mv_refs_rt(const VP9_COMMON *cm, const MACROBLOCKD *xd,
int const_motion = 0;
// Blank the reference vector list
- vpx_memset(mv_ref_list, 0, sizeof(*mv_ref_list) * MAX_MV_REF_CANDIDATES);
+ memset(mv_ref_list, 0, sizeof(*mv_ref_list) * MAX_MV_REF_CANDIDATES);
// The nearest 2 blocks are treated differently
// if the size < 8x8 we get the mv from the bmi substructure,
@@ -957,7 +957,7 @@ void vp9_pick_intra_mode(VP9_COMP *cpi, MACROBLOCK *x, RD_COST *rd_cost,
mbmi->ref_frame[0] = INTRA_FRAME;
mbmi->mv[0].as_int = INVALID_MV;
mbmi->uv_mode = DC_PRED;
- vpx_memset(x->skip_txfm, 0, sizeof(x->skip_txfm));
+ memset(x->skip_txfm, 0, sizeof(x->skip_txfm));
// Change the limit of this loop to add other intra prediction
// mode tests.
diff --git a/vp9/encoder/vp9_quantize.c b/vp9/encoder/vp9_quantize.c
index 9c08c5c62..3c07e2c24 100644
--- a/vp9/encoder/vp9_quantize.c
+++ b/vp9/encoder/vp9_quantize.c
@@ -30,8 +30,8 @@ void vp9_quantize_dc(const tran_low_t *coeff_ptr,
const int abs_coeff = (coeff ^ coeff_sign) - coeff_sign;
int tmp, eob = -1;
- vpx_memset(qcoeff_ptr, 0, n_coeffs * sizeof(*qcoeff_ptr));
- vpx_memset(dqcoeff_ptr, 0, n_coeffs * sizeof(*dqcoeff_ptr));
+ memset(qcoeff_ptr, 0, n_coeffs * sizeof(*qcoeff_ptr));
+ memset(dqcoeff_ptr, 0, n_coeffs * sizeof(*dqcoeff_ptr));
if (!skip_block) {
tmp = clamp(abs_coeff + round_ptr[rc != 0], INT16_MIN, INT16_MAX);
@@ -52,8 +52,8 @@ void vp9_highbd_quantize_dc(const tran_low_t *coeff_ptr,
const int16_t dequant_ptr, uint16_t *eob_ptr) {
int eob = -1;
- vpx_memset(qcoeff_ptr, 0, n_coeffs * sizeof(*qcoeff_ptr));
- vpx_memset(dqcoeff_ptr, 0, n_coeffs * sizeof(*dqcoeff_ptr));
+ memset(qcoeff_ptr, 0, n_coeffs * sizeof(*qcoeff_ptr));
+ memset(dqcoeff_ptr, 0, n_coeffs * sizeof(*dqcoeff_ptr));
if (!skip_block) {
const int rc = 0;
@@ -84,8 +84,8 @@ void vp9_quantize_dc_32x32(const tran_low_t *coeff_ptr, int skip_block,
const int abs_coeff = (coeff ^ coeff_sign) - coeff_sign;
int tmp, eob = -1;
- vpx_memset(qcoeff_ptr, 0, n_coeffs * sizeof(*qcoeff_ptr));
- vpx_memset(dqcoeff_ptr, 0, n_coeffs * sizeof(*dqcoeff_ptr));
+ memset(qcoeff_ptr, 0, n_coeffs * sizeof(*qcoeff_ptr));
+ memset(dqcoeff_ptr, 0, n_coeffs * sizeof(*dqcoeff_ptr));
if (!skip_block) {
@@ -112,8 +112,8 @@ void vp9_highbd_quantize_dc_32x32(const tran_low_t *coeff_ptr,
const int n_coeffs = 1024;
int eob = -1;
- vpx_memset(qcoeff_ptr, 0, n_coeffs * sizeof(*qcoeff_ptr));
- vpx_memset(dqcoeff_ptr, 0, n_coeffs * sizeof(*dqcoeff_ptr));
+ memset(qcoeff_ptr, 0, n_coeffs * sizeof(*qcoeff_ptr));
+ memset(dqcoeff_ptr, 0, n_coeffs * sizeof(*dqcoeff_ptr));
if (!skip_block) {
const int rc = 0;
@@ -148,8 +148,8 @@ void vp9_quantize_fp_c(const tran_low_t *coeff_ptr, intptr_t n_coeffs,
(void)quant_shift_ptr;
(void)iscan;
- vpx_memset(qcoeff_ptr, 0, n_coeffs * sizeof(*qcoeff_ptr));
- vpx_memset(dqcoeff_ptr, 0, n_coeffs * sizeof(*dqcoeff_ptr));
+ memset(qcoeff_ptr, 0, n_coeffs * sizeof(*qcoeff_ptr));
+ memset(dqcoeff_ptr, 0, n_coeffs * sizeof(*dqcoeff_ptr));
if (!skip_block) {
// Quantization pass: All coefficients with index >= zero_flag are
@@ -195,8 +195,8 @@ void vp9_highbd_quantize_fp_c(const tran_low_t *coeff_ptr,
(void)quant_shift_ptr;
(void)iscan;
- vpx_memset(qcoeff_ptr, 0, count * sizeof(*qcoeff_ptr));
- vpx_memset(dqcoeff_ptr, 0, count * sizeof(*dqcoeff_ptr));
+ memset(qcoeff_ptr, 0, count * sizeof(*qcoeff_ptr));
+ memset(dqcoeff_ptr, 0, count * sizeof(*dqcoeff_ptr));
if (!skip_block) {
// Quantization pass: All coefficients with index >= zero_flag are
@@ -238,8 +238,8 @@ void vp9_quantize_fp_32x32_c(const tran_low_t *coeff_ptr, intptr_t n_coeffs,
(void)quant_shift_ptr;
(void)iscan;
- vpx_memset(qcoeff_ptr, 0, n_coeffs * sizeof(*qcoeff_ptr));
- vpx_memset(dqcoeff_ptr, 0, n_coeffs * sizeof(*dqcoeff_ptr));
+ memset(qcoeff_ptr, 0, n_coeffs * sizeof(*qcoeff_ptr));
+ memset(dqcoeff_ptr, 0, n_coeffs * sizeof(*dqcoeff_ptr));
if (!skip_block) {
for (i = 0; i < n_coeffs; i++) {
@@ -281,8 +281,8 @@ void vp9_highbd_quantize_fp_32x32_c(const tran_low_t *coeff_ptr,
(void)quant_shift_ptr;
(void)iscan;
- vpx_memset(qcoeff_ptr, 0, n_coeffs * sizeof(*qcoeff_ptr));
- vpx_memset(dqcoeff_ptr, 0, n_coeffs * sizeof(*dqcoeff_ptr));
+ memset(qcoeff_ptr, 0, n_coeffs * sizeof(*qcoeff_ptr));
+ memset(dqcoeff_ptr, 0, n_coeffs * sizeof(*dqcoeff_ptr));
if (!skip_block) {
for (i = 0; i < n_coeffs; i++) {
@@ -321,8 +321,8 @@ void vp9_quantize_b_c(const tran_low_t *coeff_ptr, intptr_t n_coeffs,
const int nzbins[2] = {zbins[0] * -1, zbins[1] * -1};
(void)iscan;
- vpx_memset(qcoeff_ptr, 0, n_coeffs * sizeof(*qcoeff_ptr));
- vpx_memset(dqcoeff_ptr, 0, n_coeffs * sizeof(*dqcoeff_ptr));
+ memset(qcoeff_ptr, 0, n_coeffs * sizeof(*qcoeff_ptr));
+ memset(dqcoeff_ptr, 0, n_coeffs * sizeof(*dqcoeff_ptr));
if (!skip_block) {
// Pre-scan pass
@@ -373,8 +373,8 @@ void vp9_highbd_quantize_b_c(const tran_low_t *coeff_ptr, intptr_t n_coeffs,
const int nzbins[2] = {zbins[0] * -1, zbins[1] * -1};
(void)iscan;
- vpx_memset(qcoeff_ptr, 0, n_coeffs * sizeof(*qcoeff_ptr));
- vpx_memset(dqcoeff_ptr, 0, n_coeffs * sizeof(*dqcoeff_ptr));
+ memset(qcoeff_ptr, 0, n_coeffs * sizeof(*qcoeff_ptr));
+ memset(dqcoeff_ptr, 0, n_coeffs * sizeof(*dqcoeff_ptr));
if (!skip_block) {
// Pre-scan pass
@@ -431,8 +431,8 @@ void vp9_quantize_b_32x32_c(const tran_low_t *coeff_ptr, intptr_t n_coeffs,
int i, eob = -1;
(void)iscan;
- vpx_memset(qcoeff_ptr, 0, n_coeffs * sizeof(*qcoeff_ptr));
- vpx_memset(dqcoeff_ptr, 0, n_coeffs * sizeof(*dqcoeff_ptr));
+ memset(qcoeff_ptr, 0, n_coeffs * sizeof(*qcoeff_ptr));
+ memset(dqcoeff_ptr, 0, n_coeffs * sizeof(*dqcoeff_ptr));
if (!skip_block) {
// Pre-scan pass
@@ -490,8 +490,8 @@ void vp9_highbd_quantize_b_32x32_c(const tran_low_t *coeff_ptr,
int i, eob = -1;
(void)iscan;
- vpx_memset(qcoeff_ptr, 0, n_coeffs * sizeof(*qcoeff_ptr));
- vpx_memset(dqcoeff_ptr, 0, n_coeffs * sizeof(*dqcoeff_ptr));
+ memset(qcoeff_ptr, 0, n_coeffs * sizeof(*qcoeff_ptr));
+ memset(dqcoeff_ptr, 0, n_coeffs * sizeof(*dqcoeff_ptr));
if (!skip_block) {
// Pre-scan pass
diff --git a/vp9/encoder/vp9_rdopt.c b/vp9/encoder/vp9_rdopt.c
index fc0c6eee5..676e3ca30 100644
--- a/vp9/encoder/vp9_rdopt.c
+++ b/vp9/encoder/vp9_rdopt.c
@@ -728,7 +728,7 @@ static void super_block_yrd(VP9_COMP *cpi, MACROBLOCK *x, int *rate,
assert(bs == xd->mi[0]->mbmi.sb_type);
if (cpi->sf.tx_size_search_method == USE_LARGESTALL || xd->lossless) {
- vpx_memset(txfm_cache, 0, TX_MODES * sizeof(int64_t));
+ memset(txfm_cache, 0, TX_MODES * sizeof(int64_t));
choose_largest_tx_size(cpi, x, rate, distortion, skip, ret_sse, ref_best_rd,
bs);
} else {
@@ -1084,7 +1084,7 @@ static int64_t rd_pick_intra_sby_mode(VP9_COMP *cpi, MACROBLOCK *x,
for (i = 0; i < TX_MODES; i++)
tx_cache[i] = INT64_MAX;
- vpx_memset(x->skip_txfm, 0, sizeof(x->skip_txfm));
+ memset(x->skip_txfm, 0, sizeof(x->skip_txfm));
/* Y Search for intra prediction mode */
for (mode = DC_PRED; mode <= TM_PRED; mode++) {
int64_t local_tx_cache[TX_MODES];
@@ -1201,7 +1201,7 @@ static int64_t rd_pick_intra_sbuv_mode(VP9_COMP *cpi, MACROBLOCK *x,
int this_rate_tokenonly, this_rate, s;
int64_t this_distortion, this_sse;
- vpx_memset(x->skip_txfm, 0, sizeof(x->skip_txfm));
+ memset(x->skip_txfm, 0, sizeof(x->skip_txfm));
for (mode = DC_PRED; mode <= TM_PRED; ++mode) {
if (!(cpi->sf.intra_uv_mode_mask[max_tx_size] & (1 << mode)))
continue;
@@ -1239,7 +1239,7 @@ static int64_t rd_sbuv_dcpred(const VP9_COMP *cpi, MACROBLOCK *x,
int64_t unused;
x->e_mbd.mi[0]->mbmi.uv_mode = DC_PRED;
- vpx_memset(x->skip_txfm, 0, sizeof(x->skip_txfm));
+ memset(x->skip_txfm, 0, sizeof(x->skip_txfm));
super_block_uvrd(cpi, x, rate_tokenonly, distortion,
skippable, &unused, bsize, INT64_MAX);
*rate = *rate_tokenonly + cpi->intra_uv_mode_cost[cm->frame_type][DC_PRED];
@@ -2120,8 +2120,8 @@ static void estimate_ref_frame_costs(const VP9_COMMON *cm,
int seg_ref_active = vp9_segfeature_active(&cm->seg, segment_id,
SEG_LVL_REF_FRAME);
if (seg_ref_active) {
- vpx_memset(ref_costs_single, 0, MAX_REF_FRAMES * sizeof(*ref_costs_single));
- vpx_memset(ref_costs_comp, 0, MAX_REF_FRAMES * sizeof(*ref_costs_comp));
+ memset(ref_costs_single, 0, MAX_REF_FRAMES * sizeof(*ref_costs_single));
+ memset(ref_costs_comp, 0, MAX_REF_FRAMES * sizeof(*ref_costs_comp));
*comp_mode_p = 128;
} else {
vp9_prob intra_inter_p = vp9_get_intra_inter_prob(cm, xd);
@@ -2700,7 +2700,7 @@ static int64_t handle_inter_mode(VP9_COMP *cpi, MACROBLOCK *x,
if (is_comp_pred)
if (single_skippable[this_mode][refs[0]] &&
single_skippable[this_mode][refs[1]])
- vpx_memset(skip_txfm, 1, sizeof(skip_txfm));
+ memset(skip_txfm, 1, sizeof(skip_txfm));
if (cpi->sf.use_rd_breakout && ref_best_rd < INT64_MAX) {
// if current pred_error modeled rd is substantially more than the best
@@ -3265,7 +3265,7 @@ void vp9_rd_pick_inter_mode_sb(VP9_COMP *cpi,
if (ref_frame == INTRA_FRAME) {
TX_SIZE uv_tx;
struct macroblockd_plane *const pd = &xd->plane[1];
- vpx_memset(x->skip_txfm, 0, sizeof(x->skip_txfm));
+ memset(x->skip_txfm, 0, sizeof(x->skip_txfm));
super_block_yrd(cpi, x, &rate_y, &distortion_y, &skippable,
NULL, bsize, tx_cache, best_rd);
if (rate_y == INT_MAX)
@@ -3750,7 +3750,7 @@ void vp9_rd_pick_inter_mode_sub8x8(VP9_COMP *cpi,
int64_t filter_cache[SWITCHABLE_FILTER_CONTEXTS];
x->skip_encode = sf->skip_encode_frame && x->q_index < QIDX_SKIP_THRESH;
- vpx_memset(x->zcoeff_blk[TX_4X4], 0, 4);
+ memset(x->zcoeff_blk[TX_4X4], 0, 4);
vp9_zero(best_mbmode);
for (i = 0; i < SWITCHABLE_FILTER_CONTEXTS; ++i)
@@ -4074,7 +4074,7 @@ void vp9_rd_pick_inter_mode_sub8x8(VP9_COMP *cpi,
// then dont bother looking at UV
vp9_build_inter_predictors_sbuv(&x->e_mbd, mi_row, mi_col,
BLOCK_8X8);
- vpx_memset(x->skip_txfm, 0, sizeof(x->skip_txfm));
+ memset(x->skip_txfm, 0, sizeof(x->skip_txfm));
if (!super_block_uvrd(cpi, x, &rate_uv, &distortion_uv, &uv_skippable,
&uv_sse, BLOCK_8X8, tmp_best_rdu))
continue;
diff --git a/vp9/encoder/vp9_segmentation.c b/vp9/encoder/vp9_segmentation.c
index 9c0d78441..9b15072e9 100644
--- a/vp9/encoder/vp9_segmentation.c
+++ b/vp9/encoder/vp9_segmentation.c
@@ -213,8 +213,8 @@ void vp9_choose_segmap_coding_method(VP9_COMMON *cm, MACROBLOCKD *xd) {
// Set default state for the segment tree probabilities and the
// temporal coding probabilities
- vpx_memset(seg->tree_probs, 255, sizeof(seg->tree_probs));
- vpx_memset(seg->pred_probs, 255, sizeof(seg->pred_probs));
+ memset(seg->tree_probs, 255, sizeof(seg->tree_probs));
+ memset(seg->pred_probs, 255, sizeof(seg->pred_probs));
// First of all generate stats regarding how well the last segment map
// predicts this one
@@ -276,6 +276,6 @@ void vp9_reset_segment_features(struct segmentation *seg) {
seg->enabled = 0;
seg->update_map = 0;
seg->update_data = 0;
- vpx_memset(seg->tree_probs, 255, sizeof(seg->tree_probs));
+ memset(seg->tree_probs, 255, sizeof(seg->tree_probs));
vp9_clearall_segfeatures(seg);
}
diff --git a/vp9/encoder/vp9_skin_detection.c b/vp9/encoder/vp9_skin_detection.c
index 08ebcf8f1..1cb066283 100644
--- a/vp9/encoder/vp9_skin_detection.c
+++ b/vp9/encoder/vp9_skin_detection.c
@@ -60,14 +60,14 @@ void vp9_compute_skin_map(VP9_COMP *const cpi, FILE *yuv_skinmap_file) {
const int src_ystride = cpi->Source->y_stride;
const int src_uvstride = cpi->Source->uv_stride;
YV12_BUFFER_CONFIG skinmap;
- vpx_memset(&skinmap, 0, sizeof(YV12_BUFFER_CONFIG));
+ memset(&skinmap, 0, sizeof(YV12_BUFFER_CONFIG));
if (vp9_alloc_frame_buffer(&skinmap, cm->width, cm->height,
cm->subsampling_x, cm->subsampling_y,
VP9_ENC_BORDER_IN_PIXELS, cm->byte_alignment)) {
vp9_free_frame_buffer(&skinmap);
return;
}
- vpx_memset(skinmap.buffer_alloc, 128, skinmap.frame_size);
+ memset(skinmap.buffer_alloc, 128, skinmap.frame_size);
y = skinmap.y_buffer;
// Loop through 8x8 blocks and set skin map based on center pixel of block.
// Set y to white for skin block, otherwise set to source with gray scale.
diff --git a/vp9/encoder/vp9_svc_layercontext.c b/vp9/encoder/vp9_svc_layercontext.c
index bf9cad00a..e122397b3 100644
--- a/vp9/encoder/vp9_svc_layercontext.c
+++ b/vp9/encoder/vp9_svc_layercontext.c
@@ -46,8 +46,8 @@ void vp9_init_layer_context(VP9_COMP *const cpi) {
"Failed to allocate empty frame for multiple frame "
"contexts");
- vpx_memset(cpi->svc.empty_frame.img.buffer_alloc, 0x80,
- cpi->svc.empty_frame.img.buffer_alloc_sz);
+ memset(cpi->svc.empty_frame.img.buffer_alloc, 0x80,
+ cpi->svc.empty_frame.img.buffer_alloc_sz);
cpi->svc.empty_frame_width = cpi->common.width;
cpi->svc.empty_frame_height = cpi->common.height;
}
diff --git a/vp9/encoder/vp9_temporal_filter.c b/vp9/encoder/vp9_temporal_filter.c
index e62e4abe8..ed17c7c35 100644
--- a/vp9/encoder/vp9_temporal_filter.c
+++ b/vp9/encoder/vp9_temporal_filter.c
@@ -329,8 +329,8 @@ static void temporal_filter_iterate_c(VP9_COMP *cpi,
int i, j, k;
int stride;
- vpx_memset(accumulator, 0, 16 * 16 * 3 * sizeof(accumulator[0]));
- vpx_memset(count, 0, 16 * 16 * 3 * sizeof(count[0]));
+ memset(accumulator, 0, 16 * 16 * 3 * sizeof(accumulator[0]));
+ memset(count, 0, 16 * 16 * 3 * sizeof(count[0]));
cpi->td.mb.mv_col_min = -((mb_col * 16) + (17 - 2 * VP9_INTERP_EXTEND));
cpi->td.mb.mv_col_max = ((mb_cols - 1 - mb_col) * 16)
diff --git a/vp9/encoder/x86/vp9_highbd_quantize_intrin_sse2.c b/vp9/encoder/x86/vp9_highbd_quantize_intrin_sse2.c
index 0bce9c321..ffa43b65a 100644
--- a/vp9/encoder/x86/vp9_highbd_quantize_intrin_sse2.c
+++ b/vp9/encoder/x86/vp9_highbd_quantize_intrin_sse2.c
@@ -44,8 +44,8 @@ void vp9_highbd_quantize_b_sse2(const tran_low_t *coeff_ptr,
(void)scan;
- vpx_memset(qcoeff_ptr, 0, count * sizeof(*qcoeff_ptr));
- vpx_memset(dqcoeff_ptr, 0, count * sizeof(*dqcoeff_ptr));
+ memset(qcoeff_ptr, 0, count * sizeof(*qcoeff_ptr));
+ memset(dqcoeff_ptr, 0, count * sizeof(*dqcoeff_ptr));
if (!skip_block) {
// Pre-scan pass
@@ -132,8 +132,8 @@ void vp9_highbd_quantize_b_32x32_sse2(const tran_low_t *coeff_ptr,
nzbins[0] = _mm_sub_epi32(nzbins[0], zbins[0]);
nzbins[1] = _mm_sub_epi32(nzbins[1], zbins[1]);
- vpx_memset(qcoeff_ptr, 0, n_coeffs * sizeof(*qcoeff_ptr));
- vpx_memset(dqcoeff_ptr, 0, n_coeffs * sizeof(*dqcoeff_ptr));
+ memset(qcoeff_ptr, 0, n_coeffs * sizeof(*qcoeff_ptr));
+ memset(dqcoeff_ptr, 0, n_coeffs * sizeof(*dqcoeff_ptr));
if (!skip_block) {
// Pre-scan pass