forked from rpms/glibc
668eaab0c7
* Fri Jul 22 2022 Arjun Shankar <arjun@redhat.com> - 2.34-40 - Sync with upstream branch release/2.34/master, commit b2f32e746492615a6eb3e66fac1e766e32e8deb1: - malloc: Simplify implementation of __malloc_assert - Update syscall-names.list for Linux 5.18 - x86: Add missing IS_IN (libc) check to strncmp-sse4_2.S - x86: Move mem{p}{mov|cpy}_{chk_}erms to its own file - x86: Move and slightly improve memset_erms - x86: Add definition for __wmemset_chk AVX2 RTM in ifunc impl list - x86: Put wcs{n}len-sse4.1 in the sse4.1 text section - x86: Align entry for memrchr to 64-bytes. - x86: Add BMI1/BMI2 checks for ISA_V3 check - x86: Cleanup bounds checking in large memcpy case - x86: Add bounds `x86_non_temporal_threshold` - x86: Add sse42 implementation to strcmp's ifunc - x86: Fix misordered logic for setting `rep_movsb_stop_threshold` - x86: Align varshift table to 32-bytes - x86: ZERO_UPPER_VEC_REGISTERS_RETURN_XTEST expect no transactions - x86: Shrink code size of memchr-evex.S - x86: Shrink code size of memchr-avx2.S - x86: Optimize memrchr-avx2.S - x86: Optimize memrchr-evex.S - x86: Optimize memrchr-sse2.S - x86: Add COND_VZEROUPPER that can replace vzeroupper if no `ret` - x86: Create header for VEC classes in x86 strings library - x86_64: Add strstr function with 512-bit EVEX - x86-64: Ignore r_addend for R_X86_64_GLOB_DAT/R_X86_64_JUMP_SLOT - x86_64: Implement evex512 version of strlen, strnlen, wcslen and wcsnlen - x86_64: Remove bzero optimization - x86_64: Remove end of line trailing spaces - nptl: Fix ___pthread_unregister_cancel_restore asynchronous restore - linux: Fix mq_timereceive check for 32 bit fallback code (BZ 29304) Resolves: #2109505
125 lines
3.9 KiB
Diff
125 lines
3.9 KiB
Diff
commit 7079931c51547854323fe2ed6fdccf2a1b8b04d7
|
|
Author: Noah Goldstein <goldstein.w.n@gmail.com>
|
|
Date: Wed Jun 29 16:07:05 2022 -0700
|
|
|
|
x86: Move and slightly improve memset_erms
|
|
|
|
Implementation wise:
|
|
1. Remove the VZEROUPPER as memset_{impl}_unaligned_erms does not
|
|
use the L(stosb) label that was previously defined.
|
|
|
|
2. Don't give the hotpath (fallthrough) to zero size.
|
|
|
|
Code positioning wise:
|
|
|
|
Move memset_{chk}_erms to its own file. Leaving it in between the
|
|
memset_{impl}_unaligned both adds unnecessary complexity to the
|
|
file and wastes space in a relatively hot cache section.
|
|
|
|
(cherry picked from commit 4a3f29e7e475dd4e7cce2a24c187e6fb7b5b0a05)
|
|
|
|
diff --git a/sysdeps/x86_64/multiarch/Makefile b/sysdeps/x86_64/multiarch/Makefile
|
|
index 0e39e63ef6be6a86..da9f16286a763556 100644
|
|
--- a/sysdeps/x86_64/multiarch/Makefile
|
|
+++ b/sysdeps/x86_64/multiarch/Makefile
|
|
@@ -29,6 +29,7 @@ sysdep_routines += \
|
|
memset-avx2-unaligned-erms-rtm \
|
|
memset-avx512-no-vzeroupper \
|
|
memset-avx512-unaligned-erms \
|
|
+ memset-erms \
|
|
memset-evex-unaligned-erms \
|
|
memset-sse2-unaligned-erms \
|
|
rawmemchr-avx2 \
|
|
diff --git a/sysdeps/x86_64/multiarch/memset-erms.S b/sysdeps/x86_64/multiarch/memset-erms.S
|
|
new file mode 100644
|
|
index 0000000000000000..e83cccc731f0a7ea
|
|
--- /dev/null
|
|
+++ b/sysdeps/x86_64/multiarch/memset-erms.S
|
|
@@ -0,0 +1,44 @@
|
|
+/* memset implement with rep stosb
|
|
+ Copyright (C) 2022 Free Software Foundation, Inc.
|
|
+ This file is part of the GNU C Library.
|
|
+
|
|
+ The GNU C Library is free software; you can redistribute it and/or
|
|
+ modify it under the terms of the GNU Lesser General Public
|
|
+ License as published by the Free Software Foundation; either
|
|
+ version 2.1 of the License, or (at your option) any later version.
|
|
+
|
|
+ The GNU C Library is distributed in the hope that it will be useful,
|
|
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
+ Lesser General Public License for more details.
|
|
+
|
|
+ You should have received a copy of the GNU Lesser General Public
|
|
+ License along with the GNU C Library; if not, see
|
|
+ <https://www.gnu.org/licenses/>. */
|
|
+
|
|
+
|
|
+#include <sysdep.h>
|
|
+
|
|
+#if defined USE_MULTIARCH && IS_IN (libc)
|
|
+ .text
|
|
+ENTRY (__memset_chk_erms)
|
|
+ cmp %RDX_LP, %RCX_LP
|
|
+ jb HIDDEN_JUMPTARGET (__chk_fail)
|
|
+END (__memset_chk_erms)
|
|
+
|
|
+/* Only used to measure performance of REP STOSB. */
|
|
+ENTRY (__memset_erms)
|
|
+ /* Skip zero length. */
|
|
+ test %RDX_LP, %RDX_LP
|
|
+ jz L(stosb_return_zero)
|
|
+ mov %RDX_LP, %RCX_LP
|
|
+ movzbl %sil, %eax
|
|
+ mov %RDI_LP, %RDX_LP
|
|
+ rep stosb
|
|
+ mov %RDX_LP, %RAX_LP
|
|
+ ret
|
|
+L(stosb_return_zero):
|
|
+ movq %rdi, %rax
|
|
+ ret
|
|
+END (__memset_erms)
|
|
+#endif
|
|
diff --git a/sysdeps/x86_64/multiarch/memset-vec-unaligned-erms.S b/sysdeps/x86_64/multiarch/memset-vec-unaligned-erms.S
|
|
index abc12d9cda1b3843..905d0fa4643d5768 100644
|
|
--- a/sysdeps/x86_64/multiarch/memset-vec-unaligned-erms.S
|
|
+++ b/sysdeps/x86_64/multiarch/memset-vec-unaligned-erms.S
|
|
@@ -156,37 +156,6 @@ L(entry_from_wmemset):
|
|
#if defined USE_MULTIARCH && IS_IN (libc)
|
|
END (MEMSET_SYMBOL (__memset, unaligned))
|
|
|
|
-# if VEC_SIZE == 16
|
|
-ENTRY (__memset_chk_erms)
|
|
- cmp %RDX_LP, %RCX_LP
|
|
- jb HIDDEN_JUMPTARGET (__chk_fail)
|
|
-END (__memset_chk_erms)
|
|
-
|
|
-/* Only used to measure performance of REP STOSB. */
|
|
-ENTRY (__memset_erms)
|
|
- /* Skip zero length. */
|
|
- test %RDX_LP, %RDX_LP
|
|
- jnz L(stosb)
|
|
- movq %rdi, %rax
|
|
- ret
|
|
-# else
|
|
-/* Provide a hidden symbol to debugger. */
|
|
- .hidden MEMSET_SYMBOL (__memset, erms)
|
|
-ENTRY (MEMSET_SYMBOL (__memset, erms))
|
|
-# endif
|
|
-L(stosb):
|
|
- mov %RDX_LP, %RCX_LP
|
|
- movzbl %sil, %eax
|
|
- mov %RDI_LP, %RDX_LP
|
|
- rep stosb
|
|
- mov %RDX_LP, %RAX_LP
|
|
- VZEROUPPER_RETURN
|
|
-# if VEC_SIZE == 16
|
|
-END (__memset_erms)
|
|
-# else
|
|
-END (MEMSET_SYMBOL (__memset, erms))
|
|
-# endif
|
|
-
|
|
# if defined SHARED && IS_IN (libc)
|
|
ENTRY_CHK (MEMSET_CHK_SYMBOL (__memset_chk, unaligned_erms))
|
|
cmp %RDX_LP, %RCX_LP
|