aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorSunil K Pandey <skpgkp2@gmail.com>2023-07-26 08:34:05 -0700
committerSunil K Pandey <skpgkp2@gmail.com>2024-02-08 10:07:26 -0800
commitd04b63770f7f8db79e22a80135fc89dfabaab174 (patch)
tree0212631c6f9fad7d3819d667e14678264f04b58e
parentbb7c57219d18f345acac3587c2b14ed3b9c1e261 (diff)
downloadglibc-release/2.29/master.tar
glibc-release/2.29/master.tar.gz
glibc-release/2.29/master.tar.bz2
glibc-release/2.29/master.zip
x86_64: Optimize ffsll function code size.release/2.29/master
Ffsll function randomly regress by ~20%, depending on how code gets aligned in memory. Ffsll function code size is 17 bytes. Since default function alignment is 16 bytes, it can load on 16, 32, 48 or 64 bytes aligned memory. When ffsll function load at 16, 32 or 64 bytes aligned memory, entire code fits in single 64 bytes cache line. When ffsll function load at 48 bytes aligned memory, it splits in two cache line, hence random regression. Ffsll function size reduction from 17 bytes to 12 bytes ensures that it will always fit in single 64 bytes cache line. This patch fixes ffsll function random performance regression. Reviewed-by: Carlos O'Donell <carlos@redhat.com> (cherry picked from commit 9d94997b5f9445afd4f2bccc5fa60ff7c4361ec1)
-rw-r--r--sysdeps/x86_64/ffsll.c10
1 files changed, 5 insertions, 5 deletions
diff --git a/sysdeps/x86_64/ffsll.c b/sysdeps/x86_64/ffsll.c
index 1caf6ac155..ef686da5ca 100644
--- a/sysdeps/x86_64/ffsll.c
+++ b/sysdeps/x86_64/ffsll.c
@@ -27,13 +27,13 @@ int
ffsll (long long int x)
{
long long int cnt;
- long long int tmp;
- asm ("bsfq %2,%0\n" /* Count low bits in X and store in %1. */
- "cmoveq %1,%0\n" /* If number was zero, use -1 as result. */
- : "=&r" (cnt), "=r" (tmp) : "rm" (x), "1" (-1));
+ asm ("mov $-1,%k0\n" /* Initialize cnt to -1. */
+ "bsf %1,%0\n" /* Count low bits in x and store in cnt. */
+ "inc %k0\n" /* Increment cnt by 1. */
+ : "=&r" (cnt) : "r" (x));
- return cnt + 1;
+ return cnt;
}
#ifndef __ILP32__