diff options
Diffstat (limited to 'vp9/common')
-rw-r--r-- | vp9/common/vp9_common.h | 4 | ||||
-rw-r--r-- | vp9/common/vp9_convolve.c | 4 | ||||
-rw-r--r-- | vp9/common/vp9_entropy.c | 6 | ||||
-rw-r--r-- | vp9/common/vp9_loopfilter.c | 2 | ||||
-rw-r--r-- | vp9/common/vp9_reconintra.c | 38 |
5 files changed, 27 insertions, 27 deletions
diff --git a/vp9/common/vp9_common.h b/vp9/common/vp9_common.h index 6801dd3a2..58b345bc1 100644 --- a/vp9/common/vp9_common.h +++ b/vp9/common/vp9_common.h @@ -36,13 +36,13 @@ extern "C" { // Only need this for fixed-size arrays, for structs just assign. #define vp9_copy(dest, src) { \ assert(sizeof(dest) == sizeof(src)); \ - vpx_memcpy(dest, src, sizeof(src)); \ + memcpy(dest, src, sizeof(src)); \ } // Use this for variably-sized arrays. #define vp9_copy_array(dest, src, n) { \ assert(sizeof(*dest) == sizeof(*src)); \ - vpx_memcpy(dest, src, n * sizeof(*src)); \ + memcpy(dest, src, n * sizeof(*src)); \ } #define vp9_zero(dest) vpx_memset(&(dest), 0, sizeof(dest)) diff --git a/vp9/common/vp9_convolve.c b/vp9/common/vp9_convolve.c index 7b65651ba..5fb56ecb6 100644 --- a/vp9/common/vp9_convolve.c +++ b/vp9/common/vp9_convolve.c @@ -256,7 +256,7 @@ void vp9_convolve_copy_c(const uint8_t *src, ptrdiff_t src_stride, (void)filter_y; (void)filter_y_stride; for (r = h; r > 0; --r) { - vpx_memcpy(dst, src, w); + memcpy(dst, src, w); src += src_stride; dst += dst_stride; } @@ -526,7 +526,7 @@ void vp9_highbd_convolve_copy_c(const uint8_t *src8, ptrdiff_t src_stride, (void)bd; for (r = h; r > 0; --r) { - vpx_memcpy(dst, src, w * sizeof(uint16_t)); + memcpy(dst, src, w * sizeof(uint16_t)); src += src_stride; dst += dst_stride; } diff --git a/vp9/common/vp9_entropy.c b/vp9/common/vp9_entropy.c index 7cdfaec57..a2584e8da 100644 --- a/vp9/common/vp9_entropy.c +++ b/vp9/common/vp9_entropy.c @@ -749,13 +749,13 @@ static const vp9_coeff_probs_model default_coef_probs_32x32[PLANE_TYPES] = { }; static void extend_to_full_distribution(vp9_prob *probs, vp9_prob p) { - vpx_memcpy(probs, vp9_pareto8_full[p = 0 ? 0 : p - 1], - MODEL_NODES * sizeof(vp9_prob)); + memcpy(probs, vp9_pareto8_full[p = 0 ? 0 : p - 1], + MODEL_NODES * sizeof(vp9_prob)); } void vp9_model_to_full_probs(const vp9_prob *model, vp9_prob *full) { if (full != model) - vpx_memcpy(full, model, sizeof(vp9_prob) * UNCONSTRAINED_NODES); + memcpy(full, model, sizeof(vp9_prob) * UNCONSTRAINED_NODES); extend_to_full_distribution(&full[UNCONSTRAINED_NODES], model[PIVOT_NODE]); } diff --git a/vp9/common/vp9_loopfilter.c b/vp9/common/vp9_loopfilter.c index a226ff1d3..c4024abeb 100644 --- a/vp9/common/vp9_loopfilter.c +++ b/vp9/common/vp9_loopfilter.c @@ -1603,7 +1603,7 @@ void vp9_loop_filter_data_reset( lf_data->start = 0; lf_data->stop = 0; lf_data->y_only = 0; - vpx_memcpy(lf_data->planes, planes, sizeof(lf_data->planes)); + memcpy(lf_data->planes, planes, sizeof(lf_data->planes)); } int vp9_loop_filter_worker(LFWorkerData *const lf_data, void *unused) { diff --git a/vp9/common/vp9_reconintra.c b/vp9/common/vp9_reconintra.c index f832a3b1c..9be055bd1 100644 --- a/vp9/common/vp9_reconintra.c +++ b/vp9/common/vp9_reconintra.c @@ -245,7 +245,7 @@ static INLINE void highbd_v_predictor(uint16_t *dst, ptrdiff_t stride, (void) left; (void) bd; for (r = 0; r < bs; r++) { - vpx_memcpy(dst, above, bs * sizeof(uint16_t)); + memcpy(dst, above, bs * sizeof(uint16_t)); dst += stride; } } @@ -488,7 +488,7 @@ static INLINE void v_predictor(uint8_t *dst, ptrdiff_t stride, int bs, (void) left; for (r = 0; r < bs; r++) { - vpx_memcpy(dst, above, bs); + memcpy(dst, above, bs); dst += stride; } } @@ -718,24 +718,24 @@ static void build_intra_predictors_high(const MACROBLOCKD *xd, /* slower path if the block needs border extension */ if (x0 + 2 * bs <= frame_width) { if (right_available && bs == 4) { - vpx_memcpy(above_row, above_ref, 2 * bs * sizeof(uint16_t)); + memcpy(above_row, above_ref, 2 * bs * sizeof(uint16_t)); } else { - vpx_memcpy(above_row, above_ref, bs * sizeof(uint16_t)); + memcpy(above_row, above_ref, bs * sizeof(uint16_t)); vpx_memset16(above_row + bs, above_row[bs - 1], bs); } } else if (x0 + bs <= frame_width) { const int r = frame_width - x0; if (right_available && bs == 4) { - vpx_memcpy(above_row, above_ref, r * sizeof(uint16_t)); + memcpy(above_row, above_ref, r * sizeof(uint16_t)); vpx_memset16(above_row + r, above_row[r - 1], x0 + 2 * bs - frame_width); } else { - vpx_memcpy(above_row, above_ref, bs * sizeof(uint16_t)); + memcpy(above_row, above_ref, bs * sizeof(uint16_t)); vpx_memset16(above_row + bs, above_row[bs - 1], bs); } } else if (x0 <= frame_width) { const int r = frame_width - x0; - vpx_memcpy(above_row, above_ref, r * sizeof(uint16_t)); + memcpy(above_row, above_ref, r * sizeof(uint16_t)); vpx_memset16(above_row + r, above_row[r - 1], x0 + 2 * bs - frame_width); } @@ -746,9 +746,9 @@ static void build_intra_predictors_high(const MACROBLOCKD *xd, if (bs == 4 && right_available && left_available) { const_above_row = above_ref; } else { - vpx_memcpy(above_row, above_ref, bs * sizeof(uint16_t)); + memcpy(above_row, above_ref, bs * sizeof(uint16_t)); if (bs == 4 && right_available) - vpx_memcpy(above_row + bs, above_ref + bs, bs * sizeof(uint16_t)); + memcpy(above_row + bs, above_ref + bs, bs * sizeof(uint16_t)); else vpx_memset16(above_row + bs, above_row[bs - 1], bs); // TODO(Peter): this value should probably change for high bitdepth @@ -841,10 +841,10 @@ static void build_intra_predictors(const MACROBLOCKD *xd, const uint8_t *ref, if (xd->mb_to_right_edge < 0) { /* slower path if the block needs border extension */ if (x0 + bs <= frame_width) { - vpx_memcpy(above_row, above_ref, bs); + memcpy(above_row, above_ref, bs); } else if (x0 <= frame_width) { const int r = frame_width - x0; - vpx_memcpy(above_row, above_ref, r); + memcpy(above_row, above_ref, r); vpx_memset(above_row + r, above_row[r - 1], x0 + bs - frame_width); } @@ -853,7 +853,7 @@ static void build_intra_predictors(const MACROBLOCKD *xd, const uint8_t *ref, if (bs == 4 && right_available && left_available) { const_above_row = above_ref; } else { - vpx_memcpy(above_row, above_ref, bs); + memcpy(above_row, above_ref, bs); } } above_row[-1] = left_available ? above_ref[-1] : 129; @@ -871,24 +871,24 @@ static void build_intra_predictors(const MACROBLOCKD *xd, const uint8_t *ref, /* slower path if the block needs border extension */ if (x0 + 2 * bs <= frame_width) { if (right_available && bs == 4) { - vpx_memcpy(above_row, above_ref, 2 * bs); + memcpy(above_row, above_ref, 2 * bs); } else { - vpx_memcpy(above_row, above_ref, bs); + memcpy(above_row, above_ref, bs); vpx_memset(above_row + bs, above_row[bs - 1], bs); } } else if (x0 + bs <= frame_width) { const int r = frame_width - x0; if (right_available && bs == 4) { - vpx_memcpy(above_row, above_ref, r); + memcpy(above_row, above_ref, r); vpx_memset(above_row + r, above_row[r - 1], x0 + 2 * bs - frame_width); } else { - vpx_memcpy(above_row, above_ref, bs); + memcpy(above_row, above_ref, bs); vpx_memset(above_row + bs, above_row[bs - 1], bs); } } else if (x0 <= frame_width) { const int r = frame_width - x0; - vpx_memcpy(above_row, above_ref, r); + memcpy(above_row, above_ref, r); vpx_memset(above_row + r, above_row[r - 1], x0 + 2 * bs - frame_width); } @@ -897,9 +897,9 @@ static void build_intra_predictors(const MACROBLOCKD *xd, const uint8_t *ref, if (bs == 4 && right_available && left_available) { const_above_row = above_ref; } else { - vpx_memcpy(above_row, above_ref, bs); + memcpy(above_row, above_ref, bs); if (bs == 4 && right_available) - vpx_memcpy(above_row + bs, above_ref + bs, bs); + memcpy(above_row + bs, above_ref + bs, bs); else vpx_memset(above_row + bs, above_row[bs - 1], bs); } |