}
// Load 2 sets of 4 bytes when alignment is not guaranteed.
-static INLINE uint8x8_t load_unaligned_u8(const uint8_t *buf, int stride) {
+static INLINE uint8x8_t load_unaligned_u8(const uint8_t *buf,
+ ptrdiff_t stride) {
uint32_t a;
uint32x2_t a_u32 = vdup_n_u32(0);
if (stride == 4) return vld1_u8(buf);
}
// Store 2 sets of 4 bytes when alignment is not guaranteed.
-static INLINE void store_unaligned_u8(uint8_t *buf, int stride,
+static INLINE void store_unaligned_u8(uint8_t *buf, ptrdiff_t stride,
const uint8x8_t a) {
const uint32x2_t a_u32 = vreinterpret_u32_u8(a);
if (stride == 4) {
}
// Load 4 sets of 4 bytes when alignment is not guaranteed.
-static INLINE uint8x16_t load_unaligned_u8q(const uint8_t *buf, int stride) {
+static INLINE uint8x16_t load_unaligned_u8q(const uint8_t *buf,
+ ptrdiff_t stride) {
uint32_t a;
uint32x4_t a_u32 = vdupq_n_u32(0);
if (stride == 4) return vld1q_u8(buf);
}
// Store 4 sets of 4 bytes when alignment is not guaranteed.
-static INLINE void store_unaligned_u8q(uint8_t *buf, int stride,
+static INLINE void store_unaligned_u8q(uint8_t *buf, ptrdiff_t stride,
const uint8x16_t a) {
const uint32x4_t a_u32 = vreinterpretq_u32_u8(a);
if (stride == 4) {
}
// Load 2 sets of 4 bytes when alignment is guaranteed.
-static INLINE uint8x8_t load_u8(const uint8_t *buf, int stride) {
+static INLINE uint8x8_t load_u8(const uint8_t *buf, ptrdiff_t stride) {
uint32x2_t a = vdup_n_u32(0);
assert(!((intptr_t)buf % sizeof(uint32_t)));
}
// Store 2 sets of 4 bytes when alignment is guaranteed.
-static INLINE void store_u8(uint8_t *buf, int stride, const uint8x8_t a) {
+static INLINE void store_u8(uint8_t *buf, ptrdiff_t stride, const uint8x8_t a) {
uint32x2_t a_u32 = vreinterpret_u32_u8(a);
assert(!((intptr_t)buf % sizeof(uint32_t)));