summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorH.J. Lu <hjl.tools@gmail.com>2016-06-30 07:57:07 -0700
committerH.J. Lu <hjl.tools@gmail.com>2016-06-30 07:58:11 -0700
commit13efa86ece61bf84daca50cab30db1b0902fe2db (patch)
treee6ed4e21bb720ba21d069d61064a057be6f999db
parent73fb56a4d51fd4437e4cde6dd3c8077a610f88a8 (diff)
downloadglibc-13efa86ece61bf84daca50cab30db1b0902fe2db.tar.gz
Check Prefer_ERMS in memmove/memcpy/mempcpy/memset
Although the Enhanced REP MOVSB/STOSB (ERMS) implementations of memmove, memcpy, mempcpy and memset aren't used by the current processors, this patch adds Prefer_ERMS check in memmove, memcpy, mempcpy and memset so that they can be used in the future. * sysdeps/x86/cpu-features.h (bit_arch_Prefer_ERMS): New. (index_arch_Prefer_ERMS): Likewise. * sysdeps/x86_64/multiarch/memcpy.S (__new_memcpy): Return __memcpy_erms for Prefer_ERMS. * sysdeps/x86_64/multiarch/memmove-vec-unaligned-erms.S (__memmove_erms): Enabled for libc.a. * ysdeps/x86_64/multiarch/memmove.S (__libc_memmove): Return __memmove_erms or Prefer_ERMS. * sysdeps/x86_64/multiarch/mempcpy.S (__mempcpy): Return __mempcpy_erms for Prefer_ERMS. * sysdeps/x86_64/multiarch/memset.S (memset): Return __memset_erms for Prefer_ERMS.
-rw-r--r--ChangeLog15
-rw-r--r--sysdeps/x86/cpu-features.h3
-rw-r--r--sysdeps/x86_64/multiarch/memcpy.S3
-rw-r--r--sysdeps/x86_64/multiarch/memmove-vec-unaligned-erms.S6
-rw-r--r--sysdeps/x86_64/multiarch/memmove.S3
-rw-r--r--sysdeps/x86_64/multiarch/mempcpy.S3
-rw-r--r--sysdeps/x86_64/multiarch/memset.S3
7 files changed, 35 insertions, 1 deletions
diff --git a/ChangeLog b/ChangeLog
index 9131b1f889..082422866d 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,3 +1,18 @@
+2016-06-30 H.J. Lu <hongjiu.lu@intel.com>
+
+ * sysdeps/x86/cpu-features.h (bit_arch_Prefer_ERMS): New.
+ (index_arch_Prefer_ERMS): Likewise.
+ * sysdeps/x86_64/multiarch/memcpy.S (__new_memcpy): Return
+ __memcpy_erms for Prefer_ERMS.
+ * sysdeps/x86_64/multiarch/memmove-vec-unaligned-erms.S
+ (__memmove_erms): Enabled for libc.a.
+ * ysdeps/x86_64/multiarch/memmove.S (__libc_memmove): Return
+ __memmove_erms or Prefer_ERMS.
+ * sysdeps/x86_64/multiarch/mempcpy.S (__mempcpy): Return
+ __mempcpy_erms for Prefer_ERMS.
+ * sysdeps/x86_64/multiarch/memset.S (memset): Return
+ __memset_erms for Prefer_ERMS.
+
2016-06-30 Andreas Schwab <schwab@suse.de>
[BZ #20262]
diff --git a/sysdeps/x86/cpu-features.h b/sysdeps/x86/cpu-features.h
index 2bd93713a1..97ffe765f4 100644
--- a/sysdeps/x86/cpu-features.h
+++ b/sysdeps/x86/cpu-features.h
@@ -36,6 +36,7 @@
#define bit_arch_Prefer_MAP_32BIT_EXEC (1 << 16)
#define bit_arch_Prefer_No_VZEROUPPER (1 << 17)
#define bit_arch_Fast_Unaligned_Copy (1 << 18)
+#define bit_arch_Prefer_ERMS (1 << 19)
/* CPUID Feature flags. */
@@ -105,6 +106,7 @@
# define index_arch_Prefer_MAP_32BIT_EXEC FEATURE_INDEX_1*FEATURE_SIZE
# define index_arch_Prefer_No_VZEROUPPER FEATURE_INDEX_1*FEATURE_SIZE
# define index_arch_Fast_Unaligned_Copy FEATURE_INDEX_1*FEATURE_SIZE
+# define index_arch_Prefer_ERMS FEATURE_INDEX_1*FEATURE_SIZE
# if defined (_LIBC) && !IS_IN (nonlib)
@@ -274,6 +276,7 @@ extern const struct cpu_features *__get_cpu_features (void)
# define index_arch_Prefer_MAP_32BIT_EXEC FEATURE_INDEX_1
# define index_arch_Prefer_No_VZEROUPPER FEATURE_INDEX_1
# define index_arch_Fast_Unaligned_Copy FEATURE_INDEX_1
+# define index_arch_Prefer_ERMS FEATURE_INDEX_1
#endif /* !__ASSEMBLER__ */
diff --git a/sysdeps/x86_64/multiarch/memcpy.S b/sysdeps/x86_64/multiarch/memcpy.S
index f6771a4696..df7fbacd8a 100644
--- a/sysdeps/x86_64/multiarch/memcpy.S
+++ b/sysdeps/x86_64/multiarch/memcpy.S
@@ -29,6 +29,9 @@
ENTRY(__new_memcpy)
.type __new_memcpy, @gnu_indirect_function
LOAD_RTLD_GLOBAL_RO_RDX
+ lea __memcpy_erms(%rip), %RAX_LP
+ HAS_ARCH_FEATURE (Prefer_ERMS)
+ jnz 2f
# ifdef HAVE_AVX512_ASM_SUPPORT
HAS_ARCH_FEATURE (AVX512F_Usable)
jz 1f
diff --git a/sysdeps/x86_64/multiarch/memmove-vec-unaligned-erms.S b/sysdeps/x86_64/multiarch/memmove-vec-unaligned-erms.S
index a2cce39a16..4893ea46b4 100644
--- a/sysdeps/x86_64/multiarch/memmove-vec-unaligned-erms.S
+++ b/sysdeps/x86_64/multiarch/memmove-vec-unaligned-erms.S
@@ -150,13 +150,15 @@ L(nop):
#if defined USE_MULTIARCH && IS_IN (libc)
END (MEMMOVE_SYMBOL (__memmove, unaligned))
-# if VEC_SIZE == 16 && defined SHARED
+# if VEC_SIZE == 16
+# if defined SHARED
/* Only used to measure performance of REP MOVSB. */
ENTRY (__mempcpy_erms)
movq %rdi, %rax
addq %rdx, %rax
jmp L(start_movsb)
END (__mempcpy_erms)
+# endif
ENTRY (__memmove_erms)
movq %rdi, %rax
@@ -181,7 +183,9 @@ L(movsb_backward):
cld
ret
END (__memmove_erms)
+# if defined SHARED
strong_alias (__memmove_erms, __memcpy_erms)
+# endif
# endif
# ifdef SHARED
diff --git a/sysdeps/x86_64/multiarch/memmove.S b/sysdeps/x86_64/multiarch/memmove.S
index 25c3586ee9..8e1c6ac8e8 100644
--- a/sysdeps/x86_64/multiarch/memmove.S
+++ b/sysdeps/x86_64/multiarch/memmove.S
@@ -27,6 +27,9 @@
ENTRY(__libc_memmove)
.type __libc_memmove, @gnu_indirect_function
LOAD_RTLD_GLOBAL_RO_RDX
+ lea __memmove_erms(%rip), %RAX_LP
+ HAS_ARCH_FEATURE (Prefer_ERMS)
+ jnz 2f
# ifdef HAVE_AVX512_ASM_SUPPORT
HAS_ARCH_FEATURE (AVX512F_Usable)
jz 1f
diff --git a/sysdeps/x86_64/multiarch/mempcpy.S b/sysdeps/x86_64/multiarch/mempcpy.S
index f9c6df301c..4011a1a4f0 100644
--- a/sysdeps/x86_64/multiarch/mempcpy.S
+++ b/sysdeps/x86_64/multiarch/mempcpy.S
@@ -29,6 +29,9 @@
ENTRY(__mempcpy)
.type __mempcpy, @gnu_indirect_function
LOAD_RTLD_GLOBAL_RO_RDX
+ lea __mempcpy_erms(%rip), %RAX_LP
+ HAS_ARCH_FEATURE (Prefer_ERMS)
+ jnz 2f
# ifdef HAVE_AVX512_ASM_SUPPORT
HAS_ARCH_FEATURE (AVX512F_Usable)
jz 1f
diff --git a/sysdeps/x86_64/multiarch/memset.S b/sysdeps/x86_64/multiarch/memset.S
index 4e52d8f8c4..2b964a0398 100644
--- a/sysdeps/x86_64/multiarch/memset.S
+++ b/sysdeps/x86_64/multiarch/memset.S
@@ -26,6 +26,9 @@
ENTRY(memset)
.type memset, @gnu_indirect_function
LOAD_RTLD_GLOBAL_RO_RDX
+ lea __memset_erms(%rip), %RAX_LP
+ HAS_ARCH_FEATURE (Prefer_ERMS)
+ jnz 2f
lea __memset_sse2_unaligned_erms(%rip), %RAX_LP
HAS_CPU_FEATURE (ERMS)
jnz 1f