aboutsummaryrefslogtreecommitdiff
path: root/sysdeps/x86_64/multiarch/memcpy.S
diff options
context:
space:
mode:
authorLing Ma <ling.ml@alibaba-inc.com>2014-07-14 00:02:52 -0400
committerH.J. Lu <hjl.tools@gmail.com>2014-07-30 08:02:35 -0700
commit05f3633da4f9df870d04dd77336e793746e57ed4 (patch)
treebd980df4702ffd9ebfa9cb4dac8823a659f0c430 /sysdeps/x86_64/multiarch/memcpy.S
parenta53fbd8e6cd2f69bdfa3431d616a5f332aea6664 (diff)
downloadglibc-05f3633da4f9df870d04dd77336e793746e57ed4.tar
glibc-05f3633da4f9df870d04dd77336e793746e57ed4.tar.gz
glibc-05f3633da4f9df870d04dd77336e793746e57ed4.tar.bz2
glibc-05f3633da4f9df870d04dd77336e793746e57ed4.zip
Improve 64bit memcpy performance for Haswell CPU with AVX instruction
In this patch we take advantage of HSW memory bandwidth, manage to reduce miss branch prediction by avoiding using branch instructions and force destination to be aligned with avx instruction. The CPU2006 403.gcc benchmark indicates this patch improves performance from 2% to 10%.
Diffstat (limited to 'sysdeps/x86_64/multiarch/memcpy.S')
-rw-r--r--sysdeps/x86_64/multiarch/memcpy.S4
1 files changed, 4 insertions, 0 deletions
diff --git a/sysdeps/x86_64/multiarch/memcpy.S b/sysdeps/x86_64/multiarch/memcpy.S
index 40ae926386..e666695407 100644
--- a/sysdeps/x86_64/multiarch/memcpy.S
+++ b/sysdeps/x86_64/multiarch/memcpy.S
@@ -32,6 +32,10 @@ ENTRY(__new_memcpy)
cmpl $0, KIND_OFFSET+__cpu_features(%rip)
jne 1f
call __init_cpu_features
+1: leaq __memcpy_avx_unaligned(%rip), %rax
+ testl $bit_AVX_Usable, __cpu_features+FEATURE_OFFSET+index_AVX_Usable(%rip)
+ jz 1f
+ ret
1: leaq __memcpy_sse2(%rip), %rax
testl $bit_Slow_BSF, __cpu_features+FEATURE_OFFSET+index_Slow_BSF(%rip)
jnz 2f