diff options
author | Siddhesh Poyarekar <siddhesh@sourceware.org> | 2018-03-06 19:22:39 +0530 |
---|---|---|
committer | Siddhesh Poyarekar <siddhesh@sourceware.org> | 2018-03-06 19:22:40 +0530 |
commit | 30a81dae5b752f8aa5f96e7f7c341ec57cba3585 (patch) | |
tree | f991950610e09e999bcc17047c3dbec2c278bfc4 /sysdeps/aarch64 | |
parent | adc95fb06a22264349de21507de1a7e652a4052d (diff) | |
download | glibc-30a81dae5b752f8aa5f96e7f7c341ec57cba3585.tar glibc-30a81dae5b752f8aa5f96e7f7c341ec57cba3585.tar.gz glibc-30a81dae5b752f8aa5f96e7f7c341ec57cba3585.tar.bz2 glibc-30a81dae5b752f8aa5f96e7f7c341ec57cba3585.zip |
aarch64: Optimized memcmp for medium to large sizes
This improved memcmp provides a fast path for compares up to 16 bytes
and then compares 16 bytes at a time, thus optimizing loads from both
sources. The glibc memcmp microbenchmark retains performance (with an
error of ~1ns) for smaller compare sizes and reduces up to 31% of
execution time for compares up to 4K on the APM Mustang. On Qualcomm
Falkor this improves to almost 48%, i.e. it is almost 2x improvement
for sizes of 2K and above.
* sysdeps/aarch64/memcmp.S: Widen comparison to 16 bytes at a
time.
Diffstat (limited to 'sysdeps/aarch64')
-rw-r--r-- | sysdeps/aarch64/memcmp.S | 76 |
1 files changed, 55 insertions, 21 deletions
diff --git a/sysdeps/aarch64/memcmp.S b/sysdeps/aarch64/memcmp.S index ecd12061b2..8325d047e7 100644 --- a/sysdeps/aarch64/memcmp.S +++ b/sysdeps/aarch64/memcmp.S @@ -34,9 +34,12 @@ /* Internal variables. */ #define data1 x3 #define data1w w3 -#define data2 x4 -#define data2w w4 -#define tmp1 x5 +#define data1h x4 +#define data2 x5 +#define data2w w5 +#define data2h x6 +#define tmp1 x7 +#define tmp2 x8 ENTRY_ALIGN (memcmp, 6) DELOUSE (0) @@ -46,39 +49,70 @@ ENTRY_ALIGN (memcmp, 6) subs limit, limit, 8 b.lo L(less8) - /* Limit >= 8, so check first 8 bytes using unaligned loads. */ ldr data1, [src1], 8 ldr data2, [src2], 8 - and tmp1, src1, 7 - add limit, limit, tmp1 + cmp data1, data2 + b.ne L(return) + + subs limit, limit, 8 + b.gt L(more16) + + ldr data1, [src1, limit] + ldr data2, [src2, limit] + b L(return) + +L(more16): + ldr data1, [src1], 8 + ldr data2, [src2], 8 cmp data1, data2 bne L(return) + /* Jump directly to comparing the last 16 bytes for 32 byte (or less) + strings. */ + subs limit, limit, 16 + b.ls L(last_bytes) + + /* We overlap loads between 0-32 bytes at either side of SRC1 when we + try to align, so limit it only to strings larger than 128 bytes. */ + cmp limit, 96 + b.ls L(loop8) + /* Align src1 and adjust src2 with bytes not yet done. */ + and tmp1, src1, 15 + add limit, limit, tmp1 sub src1, src1, tmp1 sub src2, src2, tmp1 - subs limit, limit, 8 - b.ls L(last_bytes) - - /* Loop performing 8 bytes per iteration using aligned src1. - Limit is pre-decremented by 8 and must be larger than zero. - Exit if <= 8 bytes left to do or if the data is not equal. */ + /* Loop performing 16 bytes per iteration using aligned src1. + Limit is pre-decremented by 16 and must be larger than zero. + Exit if <= 16 bytes left to do or if the data is not equal. */ .p2align 4 -L(loop8): - ldr data1, [src1], 8 - ldr data2, [src2], 8 - subs limit, limit, 8 - ccmp data1, data2, 0, hi /* NZCV = 0b0000. */ - b.eq L(loop8) +L(loop16): + ldp data1, data1h, [src1], 16 + ldp data2, data2h, [src2], 16 + subs limit, limit, 16 + ccmp data1, data2, 0, hi + ccmp data1h, data2h, 0, eq + b.eq L(loop16) cmp data1, data2 bne L(return) + mov data1, data1h + mov data2, data2h + cmp data1, data2 + bne L(return) - /* Compare last 1-8 bytes using unaligned access. */ + /* Compare last 1-16 bytes using unaligned access. */ L(last_bytes): - ldr data1, [src1, limit] - ldr data2, [src2, limit] + add src1, src1, limit + add src2, src2, limit + ldp data1, data1h, [src1] + ldp data2, data2h, [src2] + cmp data1, data2 + bne L(return) + mov data1, data1h + mov data2, data2h + cmp data1, data2 /* Compare data bytes and set return value to 0, -1 or 1. */ L(return): |