diff options
author | H.J. Lu <hjl.tools@gmail.com> | 2016-03-28 04:39:48 -0700 |
---|---|---|
committer | H.J. Lu <hjl.tools@gmail.com> | 2016-04-02 11:16:44 -0700 |
commit | 5239cb481eea27650173b9b9af22439afdcbf358 (patch) | |
tree | 43d00931862895262fe00695ba41f9dc4b098533 | |
parent | a65b3d13e1754d568782e64a762c2c7fab45a55d (diff) | |
download | glibc-5239cb481eea27650173b9b9af22439afdcbf358.tar glibc-5239cb481eea27650173b9b9af22439afdcbf358.tar.gz glibc-5239cb481eea27650173b9b9af22439afdcbf358.tar.bz2 glibc-5239cb481eea27650173b9b9af22439afdcbf358.zip |
[x86] Add a feature bit: Fast_Unaligned_Copy
On AMD processors, memcpy optimized with unaligned SSE load is
slower than emcpy optimized with aligned SSSE3 while other string
functions are faster with unaligned SSE load. A feature bit,
Fast_Unaligned_Copy, is added to select memcpy optimized with
unaligned SSE load.
[BZ #19583]
* sysdeps/x86/cpu-features.c (init_cpu_features): Set
Fast_Unaligned_Copy with Fast_Unaligned_Load for Intel
processors. Set Fast_Copy_Backward for AMD Excavator
processors.
* sysdeps/x86/cpu-features.h (bit_arch_Fast_Unaligned_Copy):
New.
(index_arch_Fast_Unaligned_Copy): Likewise.
* sysdeps/x86_64/multiarch/memcpy.S (__new_memcpy): Check
Fast_Unaligned_Copy instead of Fast_Unaligned_Load.
(cherry picked from commit e41b395523040fcb58c7d378475720c2836d280c)
-rw-r--r-- | sysdeps/x86/cpu-features.c | 8 | ||||
-rw-r--r-- | sysdeps/x86/cpu-features.h | 3 | ||||
-rw-r--r-- | sysdeps/x86_64/multiarch/memcpy.S | 2 |
3 files changed, 12 insertions, 1 deletions
diff --git a/sysdeps/x86/cpu-features.c b/sysdeps/x86/cpu-features.c index 4711fd53d0..258fcd3115 100644 --- a/sysdeps/x86/cpu-features.c +++ b/sysdeps/x86/cpu-features.c @@ -145,8 +145,12 @@ init_cpu_features (struct cpu_features *cpu_features) #if index_arch_Fast_Unaligned_Load != index_arch_Slow_SSE4_2 # error index_arch_Fast_Unaligned_Load != index_arch_Slow_SSE4_2 #endif +#if index_arch_Fast_Unaligned_Load != index_arch_Fast_Unaligned_Copy +# error index_arch_Fast_Unaligned_Load != index_arch_Fast_Unaligned_Copy +#endif cpu_features->feature[index_arch_Fast_Unaligned_Load] |= (bit_arch_Fast_Unaligned_Load + | bit_arch_Fast_Unaligned_Copy | bit_arch_Prefer_PMINUB_for_stringop | bit_arch_Slow_SSE4_2); break; @@ -175,10 +179,14 @@ init_cpu_features (struct cpu_features *cpu_features) #if index_arch_Fast_Rep_String != index_arch_Prefer_PMINUB_for_stringop # error index_arch_Fast_Rep_String != index_arch_Prefer_PMINUB_for_stringop #endif +#if index_arch_Fast_Rep_String != index_arch_Fast_Unaligned_Copy +# error index_arch_Fast_Rep_String != index_arch_Fast_Unaligned_Copy +#endif cpu_features->feature[index_arch_Fast_Rep_String] |= (bit_arch_Fast_Rep_String | bit_arch_Fast_Copy_Backward | bit_arch_Fast_Unaligned_Load + | bit_arch_Fast_Unaligned_Copy | bit_arch_Prefer_PMINUB_for_stringop); break; } diff --git a/sysdeps/x86/cpu-features.h b/sysdeps/x86/cpu-features.h index 6518eee5ef..48a99041a9 100644 --- a/sysdeps/x86/cpu-features.h +++ b/sysdeps/x86/cpu-features.h @@ -33,6 +33,7 @@ #define bit_arch_AVX512DQ_Usable (1 << 13) #define bit_arch_Prefer_MAP_32BIT_EXEC (1 << 14) #define bit_arch_Prefer_No_VZEROUPPER (1 << 15) +#define bit_arch_Fast_Unaligned_Copy (1 << 16) /* CPUID Feature flags. */ @@ -97,6 +98,7 @@ # define index_arch_AVX512DQ_Usable FEATURE_INDEX_1*FEATURE_SIZE # define index_arch_Prefer_MAP_32BIT_EXEC FEATURE_INDEX_1*FEATURE_SIZE # define index_arch_Prefer_No_VZEROUPPER FEATURE_INDEX_1*FEATURE_SIZE +# define index_arch_Fast_Unaligned_Copy FEATURE_INDEX_1*FEATURE_SIZE # if defined (_LIBC) && !IS_IN (nonlib) @@ -259,6 +261,7 @@ extern const struct cpu_features *__get_cpu_features (void) # define index_arch_AVX512DQ_Usable FEATURE_INDEX_1 # define index_arch_Prefer_MAP_32BIT_EXEC FEATURE_INDEX_1 # define index_arch_Prefer_No_VZEROUPPER FEATURE_INDEX_1 +# define index_arch_Fast_Unaligned_Copy FEATURE_INDEX_1 #endif /* !__ASSEMBLER__ */ diff --git a/sysdeps/x86_64/multiarch/memcpy.S b/sysdeps/x86_64/multiarch/memcpy.S index 1b67326ea8..d495a28fc4 100644 --- a/sysdeps/x86_64/multiarch/memcpy.S +++ b/sysdeps/x86_64/multiarch/memcpy.S @@ -42,7 +42,7 @@ ENTRY(__new_memcpy) HAS_ARCH_FEATURE (AVX_Fast_Unaligned_Load) jnz 2f lea __memcpy_sse2_unaligned(%rip), %RAX_LP - HAS_ARCH_FEATURE (Fast_Unaligned_Load) + HAS_ARCH_FEATURE (Fast_Unaligned_Copy) jnz 2f lea __memcpy_sse2(%rip), %RAX_LP HAS_CPU_FEATURE (SSSE3) |