summaryrefslogtreecommitdiff
path: root/vpx_dsp/arm/mem_neon.h
diff options
context:
space:
mode:
Diffstat (limited to 'vpx_dsp/arm/mem_neon.h')
-rw-r--r--vpx_dsp/arm/mem_neon.h14
1 files changed, 8 insertions, 6 deletions
diff --git a/vpx_dsp/arm/mem_neon.h b/vpx_dsp/arm/mem_neon.h
index c89f92d1a..50aaa94fe 100644
--- a/vpx_dsp/arm/mem_neon.h
+++ b/vpx_dsp/arm/mem_neon.h
@@ -113,7 +113,8 @@ static INLINE void uint32_to_mem(uint8_t *buf, uint32_t a) {
}
// Load 2 sets of 4 bytes when alignment is not guaranteed.
-static INLINE uint8x8_t load_unaligned_u8(const uint8_t *buf, int stride) {
+static INLINE uint8x8_t load_unaligned_u8(const uint8_t *buf,
+ ptrdiff_t stride) {
uint32_t a;
uint32x2_t a_u32 = vdup_n_u32(0);
if (stride == 4) return vld1_u8(buf);
@@ -126,7 +127,7 @@ static INLINE uint8x8_t load_unaligned_u8(const uint8_t *buf, int stride) {
}
// Store 2 sets of 4 bytes when alignment is not guaranteed.
-static INLINE void store_unaligned_u8(uint8_t *buf, int stride,
+static INLINE void store_unaligned_u8(uint8_t *buf, ptrdiff_t stride,
const uint8x8_t a) {
const uint32x2_t a_u32 = vreinterpret_u32_u8(a);
if (stride == 4) {
@@ -139,7 +140,8 @@ static INLINE void store_unaligned_u8(uint8_t *buf, int stride,
}
// Load 4 sets of 4 bytes when alignment is not guaranteed.
-static INLINE uint8x16_t load_unaligned_u8q(const uint8_t *buf, int stride) {
+static INLINE uint8x16_t load_unaligned_u8q(const uint8_t *buf,
+ ptrdiff_t stride) {
uint32_t a;
uint32x4_t a_u32 = vdupq_n_u32(0);
if (stride == 4) return vld1q_u8(buf);
@@ -159,7 +161,7 @@ static INLINE uint8x16_t load_unaligned_u8q(const uint8_t *buf, int stride) {
}
// Store 4 sets of 4 bytes when alignment is not guaranteed.
-static INLINE void store_unaligned_u8q(uint8_t *buf, int stride,
+static INLINE void store_unaligned_u8q(uint8_t *buf, ptrdiff_t stride,
const uint8x16_t a) {
const uint32x4_t a_u32 = vreinterpretq_u32_u8(a);
if (stride == 4) {
@@ -176,7 +178,7 @@ static INLINE void store_unaligned_u8q(uint8_t *buf, int stride,
}
// Load 2 sets of 4 bytes when alignment is guaranteed.
-static INLINE uint8x8_t load_u8(const uint8_t *buf, int stride) {
+static INLINE uint8x8_t load_u8(const uint8_t *buf, ptrdiff_t stride) {
uint32x2_t a = vdup_n_u32(0);
assert(!((intptr_t)buf % sizeof(uint32_t)));
@@ -189,7 +191,7 @@ static INLINE uint8x8_t load_u8(const uint8_t *buf, int stride) {
}
// Store 2 sets of 4 bytes when alignment is guaranteed.
-static INLINE void store_u8(uint8_t *buf, int stride, const uint8x8_t a) {
+static INLINE void store_u8(uint8_t *buf, ptrdiff_t stride, const uint8x8_t a) {
uint32x2_t a_u32 = vreinterpret_u32_u8(a);
assert(!((intptr_t)buf % sizeof(uint32_t)));