forked from rpms/glibc
601650f878
* Tue May 31 2022 Arjun Shankar <arjun@redhat.com> - 2.34-35 - Sync with upstream branch release/2.34/master, commit ff450cdbdee0b8cb6b9d653d6d2fa892de29be31: - Fix deadlock when pthread_atfork handler calls pthread_atfork or dlclose - x86: Fallback {str|wcs}cmp RTM in the ncmp overflow case [BZ #29127] - string.h: fix __fortified_attr_access macro call [BZ #29162] - linux: Add a getauxval test [BZ #23293] - rtld: Use generic argv adjustment in ld.so [BZ #23293] - S390: Enable static PIE * Thu May 19 2022 Florian Weimer <fweimer@redhat.com> - 2.34-34 - Sync with upstream branch release/2.34/master, commit ede8d94d154157d269b18f3601440ac576c1f96a: - csu: Implement and use _dl_early_allocate during static startup - Linux: Introduce __brk_call for invoking the brk system call - Linux: Implement a useful version of _startup_fatal - ia64: Always define IA64_USE_NEW_STUB as a flag macro - Linux: Define MMAP_CALL_INTERNAL - i386: Honor I386_USE_SYSENTER for 6-argument Linux system calls - i386: Remove OPTIMIZE_FOR_GCC_5 from Linux libc-do-syscall.S - elf: Remove __libc_init_secure - Linux: Consolidate auxiliary vector parsing (redo) - Linux: Include <dl-auxv.h> in dl-sysdep.c only for SHARED - Revert "Linux: Consolidate auxiliary vector parsing" - Linux: Consolidate auxiliary vector parsing - Linux: Assume that NEED_DL_SYSINFO_DSO is always defined - Linux: Remove DL_FIND_ARG_COMPONENTS - Linux: Remove HAVE_AUX_SECURE, HAVE_AUX_XID, HAVE_AUX_PAGESIZE - elf: Merge dl-sysdep.c into the Linux version - elf: Remove unused NEED_DL_BASE_ADDR and _dl_base_addr - x86: Optimize {str|wcs}rchr-evex - x86: Optimize {str|wcs}rchr-avx2 - x86: Optimize {str|wcs}rchr-sse2 - x86: Cleanup page cross code in memcmp-avx2-movbe.S - x86: Remove memcmp-sse4.S - x86: Small improvements for wcslen - x86: Remove AVX str{n}casecmp - x86: Add EVEX optimized str{n}casecmp - x86: Add AVX2 optimized str{n}casecmp - x86: Optimize str{n}casecmp TOLOWER logic in strcmp-sse42.S - x86: Optimize str{n}casecmp TOLOWER logic in strcmp.S - x86: Remove strspn-sse2.S and use the generic implementation - x86: Remove strpbrk-sse2.S and use the generic implementation - x87: Remove strcspn-sse2.S and use the generic implementation - x86: Optimize strspn in strspn-c.c - x86: Optimize strcspn and strpbrk in strcspn-c.c - x86: Code cleanup in strchr-evex and comment justifying branch - x86: Code cleanup in strchr-avx2 and comment justifying branch - x86_64: Remove bcopy optimizations - x86-64: Remove bzero weak alias in SS2 memset - x86_64/multiarch: Sort sysdep_routines and put one entry per line - x86: Improve L to support L(XXX_SYMBOL (YYY, ZZZ)) - fortify: Ensure that __glibc_fortify condition is a constant [BZ #29141] * Thu May 12 2022 Florian Weimer <fweimer@redhat.com> - 2.34-33 - Sync with upstream branch release/2.34/master, commit 91c2e6c3db44297bf4cb3a2e3c40236c5b6a0b23: - dlfcn: Implement the RTLD_DI_PHDR request type for dlinfo - manual: Document the dlinfo function - x86: Fix fallback for wcsncmp_avx2 in strcmp-avx2.S [BZ #28896] - x86: Fix bug in strncmp-evex and strncmp-avx2 [BZ #28895] - x86: Set .text section in memset-vec-unaligned-erms - x86-64: Optimize bzero - x86: Remove SSSE3 instruction for broadcast in memset.S (SSE2 Only) - x86: Improve vec generation in memset-vec-unaligned-erms.S - x86-64: Fix strcmp-evex.S - x86-64: Fix strcmp-avx2.S - x86: Optimize strcmp-evex.S - x86: Optimize strcmp-avx2.S - manual: Clarify that abbreviations of long options are allowed - Add HWCAP2_AFP, HWCAP2_RPRES from Linux 5.17 to AArch64 bits/hwcap.h - aarch64: Add HWCAP2_ECV from Linux 5.16 - Add SOL_MPTCP, SOL_MCTP from Linux 5.16 to bits/socket.h - Update kernel version to 5.17 in tst-mman-consts.py - Update kernel version to 5.16 in tst-mman-consts.py - Update syscall lists for Linux 5.17 - Add ARPHRD_CAN, ARPHRD_MCTP to net/if_arp.h - Update kernel version to 5.15 in tst-mman-consts.py - Add PF_MCTP, AF_MCTP from Linux 5.15 to bits/socket.h Resolves: #2091541
903 lines
28 KiB
Diff
903 lines
28 KiB
Diff
commit 80883f43545f4f9afcb26beef9358dfdcd021bd6
|
|
Author: Noah Goldstein <goldstein.w.n@gmail.com>
|
|
Date: Wed Mar 23 16:57:46 2022 -0500
|
|
|
|
x86: Remove AVX str{n}casecmp
|
|
|
|
The rational is:
|
|
|
|
1. SSE42 has nearly identical logic so any benefit is minimal (3.4%
|
|
regression on Tigerlake using SSE42 versus AVX across the
|
|
benchtest suite).
|
|
2. AVX2 version covers the majority of targets that previously
|
|
prefered it.
|
|
3. The targets where AVX would still be best (SnB and IVB) are
|
|
becoming outdated.
|
|
|
|
All in all the saving the code size is worth it.
|
|
|
|
All string/memory tests pass.
|
|
Reviewed-by: H.J. Lu <hjl.tools@gmail.com>
|
|
|
|
(cherry picked from commit 305769b2a15c2e96f9e1b5195d3c4e0d6f0f4b68)
|
|
|
|
diff --git a/sysdeps/x86_64/multiarch/Makefile b/sysdeps/x86_64/multiarch/Makefile
|
|
index 359712c1491a2431..bca82e38d86cc440 100644
|
|
--- a/sysdeps/x86_64/multiarch/Makefile
|
|
+++ b/sysdeps/x86_64/multiarch/Makefile
|
|
@@ -50,7 +50,6 @@ sysdep_routines += \
|
|
stpncpy-evex \
|
|
stpncpy-sse2-unaligned \
|
|
stpncpy-ssse3 \
|
|
- strcasecmp_l-avx \
|
|
strcasecmp_l-avx2 \
|
|
strcasecmp_l-avx2-rtm \
|
|
strcasecmp_l-evex \
|
|
@@ -91,7 +90,6 @@ sysdep_routines += \
|
|
strlen-avx2-rtm \
|
|
strlen-evex \
|
|
strlen-sse2 \
|
|
- strncase_l-avx \
|
|
strncase_l-avx2 \
|
|
strncase_l-avx2-rtm \
|
|
strncase_l-evex \
|
|
diff --git a/sysdeps/x86_64/multiarch/ifunc-impl-list.c b/sysdeps/x86_64/multiarch/ifunc-impl-list.c
|
|
index f6994e5406933d53..4c7834dd0b951fa4 100644
|
|
--- a/sysdeps/x86_64/multiarch/ifunc-impl-list.c
|
|
+++ b/sysdeps/x86_64/multiarch/ifunc-impl-list.c
|
|
@@ -429,9 +429,6 @@ __libc_ifunc_impl_list (const char *name, struct libc_ifunc_impl *array,
|
|
(CPU_FEATURE_USABLE (AVX2)
|
|
&& CPU_FEATURE_USABLE (RTM)),
|
|
__strcasecmp_avx2_rtm)
|
|
- IFUNC_IMPL_ADD (array, i, strcasecmp,
|
|
- CPU_FEATURE_USABLE (AVX),
|
|
- __strcasecmp_avx)
|
|
IFUNC_IMPL_ADD (array, i, strcasecmp,
|
|
CPU_FEATURE_USABLE (SSE4_2),
|
|
__strcasecmp_sse42)
|
|
@@ -453,9 +450,6 @@ __libc_ifunc_impl_list (const char *name, struct libc_ifunc_impl *array,
|
|
(CPU_FEATURE_USABLE (AVX2)
|
|
&& CPU_FEATURE_USABLE (RTM)),
|
|
__strcasecmp_l_avx2_rtm)
|
|
- IFUNC_IMPL_ADD (array, i, strcasecmp_l,
|
|
- CPU_FEATURE_USABLE (AVX),
|
|
- __strcasecmp_l_avx)
|
|
IFUNC_IMPL_ADD (array, i, strcasecmp_l,
|
|
CPU_FEATURE_USABLE (SSE4_2),
|
|
__strcasecmp_l_sse42)
|
|
@@ -591,9 +585,6 @@ __libc_ifunc_impl_list (const char *name, struct libc_ifunc_impl *array,
|
|
(CPU_FEATURE_USABLE (AVX2)
|
|
&& CPU_FEATURE_USABLE (RTM)),
|
|
__strncasecmp_avx2_rtm)
|
|
- IFUNC_IMPL_ADD (array, i, strncasecmp,
|
|
- CPU_FEATURE_USABLE (AVX),
|
|
- __strncasecmp_avx)
|
|
IFUNC_IMPL_ADD (array, i, strncasecmp,
|
|
CPU_FEATURE_USABLE (SSE4_2),
|
|
__strncasecmp_sse42)
|
|
@@ -616,9 +607,6 @@ __libc_ifunc_impl_list (const char *name, struct libc_ifunc_impl *array,
|
|
(CPU_FEATURE_USABLE (AVX2)
|
|
&& CPU_FEATURE_USABLE (RTM)),
|
|
__strncasecmp_l_avx2_rtm)
|
|
- IFUNC_IMPL_ADD (array, i, strncasecmp_l,
|
|
- CPU_FEATURE_USABLE (AVX),
|
|
- __strncasecmp_l_avx)
|
|
IFUNC_IMPL_ADD (array, i, strncasecmp_l,
|
|
CPU_FEATURE_USABLE (SSE4_2),
|
|
__strncasecmp_l_sse42)
|
|
diff --git a/sysdeps/x86_64/multiarch/ifunc-strcasecmp.h b/sysdeps/x86_64/multiarch/ifunc-strcasecmp.h
|
|
index 488e99e4997f379b..40819caf5ab10337 100644
|
|
--- a/sysdeps/x86_64/multiarch/ifunc-strcasecmp.h
|
|
+++ b/sysdeps/x86_64/multiarch/ifunc-strcasecmp.h
|
|
@@ -22,7 +22,6 @@
|
|
extern __typeof (REDIRECT_NAME) OPTIMIZE (sse2) attribute_hidden;
|
|
extern __typeof (REDIRECT_NAME) OPTIMIZE (ssse3) attribute_hidden;
|
|
extern __typeof (REDIRECT_NAME) OPTIMIZE (sse42) attribute_hidden;
|
|
-extern __typeof (REDIRECT_NAME) OPTIMIZE (avx) attribute_hidden;
|
|
extern __typeof (REDIRECT_NAME) OPTIMIZE (avx2) attribute_hidden;
|
|
extern __typeof (REDIRECT_NAME) OPTIMIZE (avx2_rtm) attribute_hidden;
|
|
extern __typeof (REDIRECT_NAME) OPTIMIZE (evex) attribute_hidden;
|
|
@@ -46,9 +45,6 @@ IFUNC_SELECTOR (void)
|
|
return OPTIMIZE (avx2);
|
|
}
|
|
|
|
- if (CPU_FEATURE_USABLE_P (cpu_features, AVX))
|
|
- return OPTIMIZE (avx);
|
|
-
|
|
if (CPU_FEATURE_USABLE_P (cpu_features, SSE4_2)
|
|
&& !CPU_FEATURES_ARCH_P (cpu_features, Slow_SSE4_2))
|
|
return OPTIMIZE (sse42);
|
|
diff --git a/sysdeps/x86_64/multiarch/strcasecmp_l-avx.S b/sysdeps/x86_64/multiarch/strcasecmp_l-avx.S
|
|
deleted file mode 100644
|
|
index 647aa05714d7a36c..0000000000000000
|
|
--- a/sysdeps/x86_64/multiarch/strcasecmp_l-avx.S
|
|
+++ /dev/null
|
|
@@ -1,22 +0,0 @@
|
|
-/* strcasecmp_l optimized with AVX.
|
|
- Copyright (C) 2017-2021 Free Software Foundation, Inc.
|
|
- This file is part of the GNU C Library.
|
|
-
|
|
- The GNU C Library is free software; you can redistribute it and/or
|
|
- modify it under the terms of the GNU Lesser General Public
|
|
- License as published by the Free Software Foundation; either
|
|
- version 2.1 of the License, or (at your option) any later version.
|
|
-
|
|
- The GNU C Library is distributed in the hope that it will be useful,
|
|
- but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
- Lesser General Public License for more details.
|
|
-
|
|
- You should have received a copy of the GNU Lesser General Public
|
|
- License along with the GNU C Library; if not, see
|
|
- <https://www.gnu.org/licenses/>. */
|
|
-
|
|
-#define STRCMP_SSE42 __strcasecmp_l_avx
|
|
-#define USE_AVX 1
|
|
-#define USE_AS_STRCASECMP_L
|
|
-#include "strcmp-sse42.S"
|
|
diff --git a/sysdeps/x86_64/multiarch/strcmp-sse42.S b/sysdeps/x86_64/multiarch/strcmp-sse42.S
|
|
index a6825de8195ad8c6..466c6a92a612ebcb 100644
|
|
--- a/sysdeps/x86_64/multiarch/strcmp-sse42.S
|
|
+++ b/sysdeps/x86_64/multiarch/strcmp-sse42.S
|
|
@@ -42,13 +42,8 @@
|
|
# define UPDATE_STRNCMP_COUNTER
|
|
#endif
|
|
|
|
-#ifdef USE_AVX
|
|
-# define SECTION avx
|
|
-# define GLABEL(l) l##_avx
|
|
-#else
|
|
-# define SECTION sse4.2
|
|
-# define GLABEL(l) l##_sse42
|
|
-#endif
|
|
+#define SECTION sse4.2
|
|
+#define GLABEL(l) l##_sse42
|
|
|
|
#define LABEL(l) .L##l
|
|
|
|
@@ -106,21 +101,7 @@ END (GLABEL(__strncasecmp))
|
|
#endif
|
|
|
|
|
|
-#ifdef USE_AVX
|
|
-# define movdqa vmovdqa
|
|
-# define movdqu vmovdqu
|
|
-# define pmovmskb vpmovmskb
|
|
-# define pcmpistri vpcmpistri
|
|
-# define psubb vpsubb
|
|
-# define pcmpeqb vpcmpeqb
|
|
-# define psrldq vpsrldq
|
|
-# define pslldq vpslldq
|
|
-# define palignr vpalignr
|
|
-# define pxor vpxor
|
|
-# define D(arg) arg, arg
|
|
-#else
|
|
-# define D(arg) arg
|
|
-#endif
|
|
+#define arg arg
|
|
|
|
STRCMP_SSE42:
|
|
cfi_startproc
|
|
@@ -192,18 +173,7 @@ LABEL(case_add):
|
|
movdqu (%rdi), %xmm1
|
|
movdqu (%rsi), %xmm2
|
|
#if defined USE_AS_STRCASECMP_L || defined USE_AS_STRNCASECMP_L
|
|
-# ifdef USE_AVX
|
|
-# define TOLOWER(reg1, reg2) \
|
|
- vpaddb LCASE_MIN_reg, reg1, %xmm7; \
|
|
- vpaddb LCASE_MIN_reg, reg2, %xmm8; \
|
|
- vpcmpgtb LCASE_MAX_reg, %xmm7, %xmm7; \
|
|
- vpcmpgtb LCASE_MAX_reg, %xmm8, %xmm8; \
|
|
- vpandn CASE_ADD_reg, %xmm7, %xmm7; \
|
|
- vpandn CASE_ADD_reg, %xmm8, %xmm8; \
|
|
- vpaddb %xmm7, reg1, reg1; \
|
|
- vpaddb %xmm8, reg2, reg2
|
|
-# else
|
|
-# define TOLOWER(reg1, reg2) \
|
|
+# define TOLOWER(reg1, reg2) \
|
|
movdqa LCASE_MIN_reg, %xmm7; \
|
|
movdqa LCASE_MIN_reg, %xmm8; \
|
|
paddb reg1, %xmm7; \
|
|
@@ -214,15 +184,15 @@ LABEL(case_add):
|
|
pandn CASE_ADD_reg, %xmm8; \
|
|
paddb %xmm7, reg1; \
|
|
paddb %xmm8, reg2
|
|
-# endif
|
|
+
|
|
TOLOWER (%xmm1, %xmm2)
|
|
#else
|
|
# define TOLOWER(reg1, reg2)
|
|
#endif
|
|
- pxor %xmm0, D(%xmm0) /* clear %xmm0 for null char checks */
|
|
- pcmpeqb %xmm1, D(%xmm0) /* Any null chars? */
|
|
- pcmpeqb %xmm2, D(%xmm1) /* compare first 16 bytes for equality */
|
|
- psubb %xmm0, D(%xmm1) /* packed sub of comparison results*/
|
|
+ pxor %xmm0, %xmm0 /* clear %xmm0 for null char checks */
|
|
+ pcmpeqb %xmm1, %xmm0 /* Any null chars? */
|
|
+ pcmpeqb %xmm2, %xmm1 /* compare first 16 bytes for equality */
|
|
+ psubb %xmm0, %xmm1 /* packed sub of comparison results*/
|
|
pmovmskb %xmm1, %edx
|
|
sub $0xffff, %edx /* if first 16 bytes are same, edx == 0xffff */
|
|
jnz LABEL(less16bytes)/* If not, find different value or null char */
|
|
@@ -246,7 +216,7 @@ LABEL(crosscache):
|
|
xor %r8d, %r8d
|
|
and $0xf, %ecx /* offset of rsi */
|
|
and $0xf, %eax /* offset of rdi */
|
|
- pxor %xmm0, D(%xmm0) /* clear %xmm0 for null char check */
|
|
+ pxor %xmm0, %xmm0 /* clear %xmm0 for null char check */
|
|
cmp %eax, %ecx
|
|
je LABEL(ashr_0) /* rsi and rdi relative offset same */
|
|
ja LABEL(bigger)
|
|
@@ -260,7 +230,7 @@ LABEL(bigger):
|
|
sub %rcx, %r9
|
|
lea LABEL(unaligned_table)(%rip), %r10
|
|
movslq (%r10, %r9,4), %r9
|
|
- pcmpeqb %xmm1, D(%xmm0) /* Any null chars? */
|
|
+ pcmpeqb %xmm1, %xmm0 /* Any null chars? */
|
|
lea (%r10, %r9), %r10
|
|
_CET_NOTRACK jmp *%r10 /* jump to corresponding case */
|
|
|
|
@@ -273,15 +243,15 @@ LABEL(bigger):
|
|
LABEL(ashr_0):
|
|
|
|
movdqa (%rsi), %xmm1
|
|
- pcmpeqb %xmm1, D(%xmm0) /* Any null chars? */
|
|
+ pcmpeqb %xmm1, %xmm0 /* Any null chars? */
|
|
#if !defined USE_AS_STRCASECMP_L && !defined USE_AS_STRNCASECMP_L
|
|
- pcmpeqb (%rdi), D(%xmm1) /* compare 16 bytes for equality */
|
|
+ pcmpeqb (%rdi), %xmm1 /* compare 16 bytes for equality */
|
|
#else
|
|
movdqa (%rdi), %xmm2
|
|
TOLOWER (%xmm1, %xmm2)
|
|
- pcmpeqb %xmm2, D(%xmm1) /* compare 16 bytes for equality */
|
|
+ pcmpeqb %xmm2, %xmm1 /* compare 16 bytes for equality */
|
|
#endif
|
|
- psubb %xmm0, D(%xmm1) /* packed sub of comparison results*/
|
|
+ psubb %xmm0, %xmm1 /* packed sub of comparison results*/
|
|
pmovmskb %xmm1, %r9d
|
|
shr %cl, %edx /* adjust 0xffff for offset */
|
|
shr %cl, %r9d /* adjust for 16-byte offset */
|
|
@@ -361,10 +331,10 @@ LABEL(ashr_0_exit_use):
|
|
*/
|
|
.p2align 4
|
|
LABEL(ashr_1):
|
|
- pslldq $15, D(%xmm2) /* shift first string to align with second */
|
|
+ pslldq $15, %xmm2 /* shift first string to align with second */
|
|
TOLOWER (%xmm1, %xmm2)
|
|
- pcmpeqb %xmm1, D(%xmm2) /* compare 16 bytes for equality */
|
|
- psubb %xmm0, D(%xmm2) /* packed sub of comparison results*/
|
|
+ pcmpeqb %xmm1, %xmm2 /* compare 16 bytes for equality */
|
|
+ psubb %xmm0, %xmm2 /* packed sub of comparison results*/
|
|
pmovmskb %xmm2, %r9d
|
|
shr %cl, %edx /* adjust 0xffff for offset */
|
|
shr %cl, %r9d /* adjust for 16-byte offset */
|
|
@@ -392,7 +362,7 @@ LABEL(loop_ashr_1_use):
|
|
|
|
LABEL(nibble_ashr_1_restart_use):
|
|
movdqa (%rdi, %rdx), %xmm0
|
|
- palignr $1, -16(%rdi, %rdx), D(%xmm0)
|
|
+ palignr $1, -16(%rdi, %rdx), %xmm0
|
|
#if !defined USE_AS_STRCASECMP_L && !defined USE_AS_STRNCASECMP_L
|
|
pcmpistri $0x1a,(%rsi,%rdx), %xmm0
|
|
#else
|
|
@@ -411,7 +381,7 @@ LABEL(nibble_ashr_1_restart_use):
|
|
jg LABEL(nibble_ashr_1_use)
|
|
|
|
movdqa (%rdi, %rdx), %xmm0
|
|
- palignr $1, -16(%rdi, %rdx), D(%xmm0)
|
|
+ palignr $1, -16(%rdi, %rdx), %xmm0
|
|
#if !defined USE_AS_STRCASECMP_L && !defined USE_AS_STRNCASECMP_L
|
|
pcmpistri $0x1a,(%rsi,%rdx), %xmm0
|
|
#else
|
|
@@ -431,7 +401,7 @@ LABEL(nibble_ashr_1_restart_use):
|
|
LABEL(nibble_ashr_1_use):
|
|
sub $0x1000, %r10
|
|
movdqa -16(%rdi, %rdx), %xmm0
|
|
- psrldq $1, D(%xmm0)
|
|
+ psrldq $1, %xmm0
|
|
pcmpistri $0x3a,%xmm0, %xmm0
|
|
#if defined USE_AS_STRNCMP || defined USE_AS_STRNCASECMP_L
|
|
cmp %r11, %rcx
|
|
@@ -449,10 +419,10 @@ LABEL(nibble_ashr_1_use):
|
|
*/
|
|
.p2align 4
|
|
LABEL(ashr_2):
|
|
- pslldq $14, D(%xmm2)
|
|
+ pslldq $14, %xmm2
|
|
TOLOWER (%xmm1, %xmm2)
|
|
- pcmpeqb %xmm1, D(%xmm2)
|
|
- psubb %xmm0, D(%xmm2)
|
|
+ pcmpeqb %xmm1, %xmm2
|
|
+ psubb %xmm0, %xmm2
|
|
pmovmskb %xmm2, %r9d
|
|
shr %cl, %edx
|
|
shr %cl, %r9d
|
|
@@ -480,7 +450,7 @@ LABEL(loop_ashr_2_use):
|
|
|
|
LABEL(nibble_ashr_2_restart_use):
|
|
movdqa (%rdi, %rdx), %xmm0
|
|
- palignr $2, -16(%rdi, %rdx), D(%xmm0)
|
|
+ palignr $2, -16(%rdi, %rdx), %xmm0
|
|
#if !defined USE_AS_STRCASECMP_L && !defined USE_AS_STRNCASECMP_L
|
|
pcmpistri $0x1a,(%rsi,%rdx), %xmm0
|
|
#else
|
|
@@ -499,7 +469,7 @@ LABEL(nibble_ashr_2_restart_use):
|
|
jg LABEL(nibble_ashr_2_use)
|
|
|
|
movdqa (%rdi, %rdx), %xmm0
|
|
- palignr $2, -16(%rdi, %rdx), D(%xmm0)
|
|
+ palignr $2, -16(%rdi, %rdx), %xmm0
|
|
#if !defined USE_AS_STRCASECMP_L && !defined USE_AS_STRNCASECMP_L
|
|
pcmpistri $0x1a,(%rsi,%rdx), %xmm0
|
|
#else
|
|
@@ -519,7 +489,7 @@ LABEL(nibble_ashr_2_restart_use):
|
|
LABEL(nibble_ashr_2_use):
|
|
sub $0x1000, %r10
|
|
movdqa -16(%rdi, %rdx), %xmm0
|
|
- psrldq $2, D(%xmm0)
|
|
+ psrldq $2, %xmm0
|
|
pcmpistri $0x3a,%xmm0, %xmm0
|
|
#if defined USE_AS_STRNCMP || defined USE_AS_STRNCASECMP_L
|
|
cmp %r11, %rcx
|
|
@@ -537,10 +507,10 @@ LABEL(nibble_ashr_2_use):
|
|
*/
|
|
.p2align 4
|
|
LABEL(ashr_3):
|
|
- pslldq $13, D(%xmm2)
|
|
+ pslldq $13, %xmm2
|
|
TOLOWER (%xmm1, %xmm2)
|
|
- pcmpeqb %xmm1, D(%xmm2)
|
|
- psubb %xmm0, D(%xmm2)
|
|
+ pcmpeqb %xmm1, %xmm2
|
|
+ psubb %xmm0, %xmm2
|
|
pmovmskb %xmm2, %r9d
|
|
shr %cl, %edx
|
|
shr %cl, %r9d
|
|
@@ -568,7 +538,7 @@ LABEL(loop_ashr_3_use):
|
|
|
|
LABEL(nibble_ashr_3_restart_use):
|
|
movdqa (%rdi, %rdx), %xmm0
|
|
- palignr $3, -16(%rdi, %rdx), D(%xmm0)
|
|
+ palignr $3, -16(%rdi, %rdx), %xmm0
|
|
#if !defined USE_AS_STRCASECMP_L && !defined USE_AS_STRNCASECMP_L
|
|
pcmpistri $0x1a,(%rsi,%rdx), %xmm0
|
|
#else
|
|
@@ -587,7 +557,7 @@ LABEL(nibble_ashr_3_restart_use):
|
|
jg LABEL(nibble_ashr_3_use)
|
|
|
|
movdqa (%rdi, %rdx), %xmm0
|
|
- palignr $3, -16(%rdi, %rdx), D(%xmm0)
|
|
+ palignr $3, -16(%rdi, %rdx), %xmm0
|
|
#if !defined USE_AS_STRCASECMP_L && !defined USE_AS_STRNCASECMP_L
|
|
pcmpistri $0x1a,(%rsi,%rdx), %xmm0
|
|
#else
|
|
@@ -607,7 +577,7 @@ LABEL(nibble_ashr_3_restart_use):
|
|
LABEL(nibble_ashr_3_use):
|
|
sub $0x1000, %r10
|
|
movdqa -16(%rdi, %rdx), %xmm0
|
|
- psrldq $3, D(%xmm0)
|
|
+ psrldq $3, %xmm0
|
|
pcmpistri $0x3a,%xmm0, %xmm0
|
|
#if defined USE_AS_STRNCMP || defined USE_AS_STRNCASECMP_L
|
|
cmp %r11, %rcx
|
|
@@ -625,10 +595,10 @@ LABEL(nibble_ashr_3_use):
|
|
*/
|
|
.p2align 4
|
|
LABEL(ashr_4):
|
|
- pslldq $12, D(%xmm2)
|
|
+ pslldq $12, %xmm2
|
|
TOLOWER (%xmm1, %xmm2)
|
|
- pcmpeqb %xmm1, D(%xmm2)
|
|
- psubb %xmm0, D(%xmm2)
|
|
+ pcmpeqb %xmm1, %xmm2
|
|
+ psubb %xmm0, %xmm2
|
|
pmovmskb %xmm2, %r9d
|
|
shr %cl, %edx
|
|
shr %cl, %r9d
|
|
@@ -657,7 +627,7 @@ LABEL(loop_ashr_4_use):
|
|
|
|
LABEL(nibble_ashr_4_restart_use):
|
|
movdqa (%rdi, %rdx), %xmm0
|
|
- palignr $4, -16(%rdi, %rdx), D(%xmm0)
|
|
+ palignr $4, -16(%rdi, %rdx), %xmm0
|
|
#if !defined USE_AS_STRCASECMP_L && !defined USE_AS_STRNCASECMP_L
|
|
pcmpistri $0x1a,(%rsi,%rdx), %xmm0
|
|
#else
|
|
@@ -676,7 +646,7 @@ LABEL(nibble_ashr_4_restart_use):
|
|
jg LABEL(nibble_ashr_4_use)
|
|
|
|
movdqa (%rdi, %rdx), %xmm0
|
|
- palignr $4, -16(%rdi, %rdx), D(%xmm0)
|
|
+ palignr $4, -16(%rdi, %rdx), %xmm0
|
|
#if !defined USE_AS_STRCASECMP_L && !defined USE_AS_STRNCASECMP_L
|
|
pcmpistri $0x1a,(%rsi,%rdx), %xmm0
|
|
#else
|
|
@@ -696,7 +666,7 @@ LABEL(nibble_ashr_4_restart_use):
|
|
LABEL(nibble_ashr_4_use):
|
|
sub $0x1000, %r10
|
|
movdqa -16(%rdi, %rdx), %xmm0
|
|
- psrldq $4, D(%xmm0)
|
|
+ psrldq $4, %xmm0
|
|
pcmpistri $0x3a,%xmm0, %xmm0
|
|
#if defined USE_AS_STRNCMP || defined USE_AS_STRNCASECMP_L
|
|
cmp %r11, %rcx
|
|
@@ -714,10 +684,10 @@ LABEL(nibble_ashr_4_use):
|
|
*/
|
|
.p2align 4
|
|
LABEL(ashr_5):
|
|
- pslldq $11, D(%xmm2)
|
|
+ pslldq $11, %xmm2
|
|
TOLOWER (%xmm1, %xmm2)
|
|
- pcmpeqb %xmm1, D(%xmm2)
|
|
- psubb %xmm0, D(%xmm2)
|
|
+ pcmpeqb %xmm1, %xmm2
|
|
+ psubb %xmm0, %xmm2
|
|
pmovmskb %xmm2, %r9d
|
|
shr %cl, %edx
|
|
shr %cl, %r9d
|
|
@@ -746,7 +716,7 @@ LABEL(loop_ashr_5_use):
|
|
|
|
LABEL(nibble_ashr_5_restart_use):
|
|
movdqa (%rdi, %rdx), %xmm0
|
|
- palignr $5, -16(%rdi, %rdx), D(%xmm0)
|
|
+ palignr $5, -16(%rdi, %rdx), %xmm0
|
|
#if !defined USE_AS_STRCASECMP_L && !defined USE_AS_STRNCASECMP_L
|
|
pcmpistri $0x1a,(%rsi,%rdx), %xmm0
|
|
#else
|
|
@@ -766,7 +736,7 @@ LABEL(nibble_ashr_5_restart_use):
|
|
|
|
movdqa (%rdi, %rdx), %xmm0
|
|
|
|
- palignr $5, -16(%rdi, %rdx), D(%xmm0)
|
|
+ palignr $5, -16(%rdi, %rdx), %xmm0
|
|
#if !defined USE_AS_STRCASECMP_L && !defined USE_AS_STRNCASECMP_L
|
|
pcmpistri $0x1a,(%rsi,%rdx), %xmm0
|
|
#else
|
|
@@ -786,7 +756,7 @@ LABEL(nibble_ashr_5_restart_use):
|
|
LABEL(nibble_ashr_5_use):
|
|
sub $0x1000, %r10
|
|
movdqa -16(%rdi, %rdx), %xmm0
|
|
- psrldq $5, D(%xmm0)
|
|
+ psrldq $5, %xmm0
|
|
pcmpistri $0x3a,%xmm0, %xmm0
|
|
#if defined USE_AS_STRNCMP || defined USE_AS_STRNCASECMP_L
|
|
cmp %r11, %rcx
|
|
@@ -804,10 +774,10 @@ LABEL(nibble_ashr_5_use):
|
|
*/
|
|
.p2align 4
|
|
LABEL(ashr_6):
|
|
- pslldq $10, D(%xmm2)
|
|
+ pslldq $10, %xmm2
|
|
TOLOWER (%xmm1, %xmm2)
|
|
- pcmpeqb %xmm1, D(%xmm2)
|
|
- psubb %xmm0, D(%xmm2)
|
|
+ pcmpeqb %xmm1, %xmm2
|
|
+ psubb %xmm0, %xmm2
|
|
pmovmskb %xmm2, %r9d
|
|
shr %cl, %edx
|
|
shr %cl, %r9d
|
|
@@ -836,7 +806,7 @@ LABEL(loop_ashr_6_use):
|
|
|
|
LABEL(nibble_ashr_6_restart_use):
|
|
movdqa (%rdi, %rdx), %xmm0
|
|
- palignr $6, -16(%rdi, %rdx), D(%xmm0)
|
|
+ palignr $6, -16(%rdi, %rdx), %xmm0
|
|
#if !defined USE_AS_STRCASECMP_L && !defined USE_AS_STRNCASECMP_L
|
|
pcmpistri $0x1a,(%rsi,%rdx), %xmm0
|
|
#else
|
|
@@ -855,7 +825,7 @@ LABEL(nibble_ashr_6_restart_use):
|
|
jg LABEL(nibble_ashr_6_use)
|
|
|
|
movdqa (%rdi, %rdx), %xmm0
|
|
- palignr $6, -16(%rdi, %rdx), D(%xmm0)
|
|
+ palignr $6, -16(%rdi, %rdx), %xmm0
|
|
#if !defined USE_AS_STRCASECMP_L && !defined USE_AS_STRNCASECMP_L
|
|
pcmpistri $0x1a,(%rsi,%rdx), %xmm0
|
|
#else
|
|
@@ -875,7 +845,7 @@ LABEL(nibble_ashr_6_restart_use):
|
|
LABEL(nibble_ashr_6_use):
|
|
sub $0x1000, %r10
|
|
movdqa -16(%rdi, %rdx), %xmm0
|
|
- psrldq $6, D(%xmm0)
|
|
+ psrldq $6, %xmm0
|
|
pcmpistri $0x3a,%xmm0, %xmm0
|
|
#if defined USE_AS_STRNCMP || defined USE_AS_STRNCASECMP_L
|
|
cmp %r11, %rcx
|
|
@@ -893,10 +863,10 @@ LABEL(nibble_ashr_6_use):
|
|
*/
|
|
.p2align 4
|
|
LABEL(ashr_7):
|
|
- pslldq $9, D(%xmm2)
|
|
+ pslldq $9, %xmm2
|
|
TOLOWER (%xmm1, %xmm2)
|
|
- pcmpeqb %xmm1, D(%xmm2)
|
|
- psubb %xmm0, D(%xmm2)
|
|
+ pcmpeqb %xmm1, %xmm2
|
|
+ psubb %xmm0, %xmm2
|
|
pmovmskb %xmm2, %r9d
|
|
shr %cl, %edx
|
|
shr %cl, %r9d
|
|
@@ -925,7 +895,7 @@ LABEL(loop_ashr_7_use):
|
|
|
|
LABEL(nibble_ashr_7_restart_use):
|
|
movdqa (%rdi, %rdx), %xmm0
|
|
- palignr $7, -16(%rdi, %rdx), D(%xmm0)
|
|
+ palignr $7, -16(%rdi, %rdx), %xmm0
|
|
#if !defined USE_AS_STRCASECMP_L && !defined USE_AS_STRNCASECMP_L
|
|
pcmpistri $0x1a,(%rsi,%rdx), %xmm0
|
|
#else
|
|
@@ -944,7 +914,7 @@ LABEL(nibble_ashr_7_restart_use):
|
|
jg LABEL(nibble_ashr_7_use)
|
|
|
|
movdqa (%rdi, %rdx), %xmm0
|
|
- palignr $7, -16(%rdi, %rdx), D(%xmm0)
|
|
+ palignr $7, -16(%rdi, %rdx), %xmm0
|
|
#if !defined USE_AS_STRCASECMP_L && !defined USE_AS_STRNCASECMP_L
|
|
pcmpistri $0x1a,(%rsi,%rdx), %xmm0
|
|
#else
|
|
@@ -964,7 +934,7 @@ LABEL(nibble_ashr_7_restart_use):
|
|
LABEL(nibble_ashr_7_use):
|
|
sub $0x1000, %r10
|
|
movdqa -16(%rdi, %rdx), %xmm0
|
|
- psrldq $7, D(%xmm0)
|
|
+ psrldq $7, %xmm0
|
|
pcmpistri $0x3a,%xmm0, %xmm0
|
|
#if defined USE_AS_STRNCMP || defined USE_AS_STRNCASECMP_L
|
|
cmp %r11, %rcx
|
|
@@ -982,10 +952,10 @@ LABEL(nibble_ashr_7_use):
|
|
*/
|
|
.p2align 4
|
|
LABEL(ashr_8):
|
|
- pslldq $8, D(%xmm2)
|
|
+ pslldq $8, %xmm2
|
|
TOLOWER (%xmm1, %xmm2)
|
|
- pcmpeqb %xmm1, D(%xmm2)
|
|
- psubb %xmm0, D(%xmm2)
|
|
+ pcmpeqb %xmm1, %xmm2
|
|
+ psubb %xmm0, %xmm2
|
|
pmovmskb %xmm2, %r9d
|
|
shr %cl, %edx
|
|
shr %cl, %r9d
|
|
@@ -1014,7 +984,7 @@ LABEL(loop_ashr_8_use):
|
|
|
|
LABEL(nibble_ashr_8_restart_use):
|
|
movdqa (%rdi, %rdx), %xmm0
|
|
- palignr $8, -16(%rdi, %rdx), D(%xmm0)
|
|
+ palignr $8, -16(%rdi, %rdx), %xmm0
|
|
#if !defined USE_AS_STRCASECMP_L && !defined USE_AS_STRNCASECMP_L
|
|
pcmpistri $0x1a, (%rsi,%rdx), %xmm0
|
|
#else
|
|
@@ -1033,7 +1003,7 @@ LABEL(nibble_ashr_8_restart_use):
|
|
jg LABEL(nibble_ashr_8_use)
|
|
|
|
movdqa (%rdi, %rdx), %xmm0
|
|
- palignr $8, -16(%rdi, %rdx), D(%xmm0)
|
|
+ palignr $8, -16(%rdi, %rdx), %xmm0
|
|
#if !defined USE_AS_STRCASECMP_L && !defined USE_AS_STRNCASECMP_L
|
|
pcmpistri $0x1a, (%rsi,%rdx), %xmm0
|
|
#else
|
|
@@ -1053,7 +1023,7 @@ LABEL(nibble_ashr_8_restart_use):
|
|
LABEL(nibble_ashr_8_use):
|
|
sub $0x1000, %r10
|
|
movdqa -16(%rdi, %rdx), %xmm0
|
|
- psrldq $8, D(%xmm0)
|
|
+ psrldq $8, %xmm0
|
|
pcmpistri $0x3a,%xmm0, %xmm0
|
|
#if defined USE_AS_STRNCMP || defined USE_AS_STRNCASECMP_L
|
|
cmp %r11, %rcx
|
|
@@ -1071,10 +1041,10 @@ LABEL(nibble_ashr_8_use):
|
|
*/
|
|
.p2align 4
|
|
LABEL(ashr_9):
|
|
- pslldq $7, D(%xmm2)
|
|
+ pslldq $7, %xmm2
|
|
TOLOWER (%xmm1, %xmm2)
|
|
- pcmpeqb %xmm1, D(%xmm2)
|
|
- psubb %xmm0, D(%xmm2)
|
|
+ pcmpeqb %xmm1, %xmm2
|
|
+ psubb %xmm0, %xmm2
|
|
pmovmskb %xmm2, %r9d
|
|
shr %cl, %edx
|
|
shr %cl, %r9d
|
|
@@ -1104,7 +1074,7 @@ LABEL(loop_ashr_9_use):
|
|
LABEL(nibble_ashr_9_restart_use):
|
|
movdqa (%rdi, %rdx), %xmm0
|
|
|
|
- palignr $9, -16(%rdi, %rdx), D(%xmm0)
|
|
+ palignr $9, -16(%rdi, %rdx), %xmm0
|
|
#if !defined USE_AS_STRCASECMP_L && !defined USE_AS_STRNCASECMP_L
|
|
pcmpistri $0x1a, (%rsi,%rdx), %xmm0
|
|
#else
|
|
@@ -1123,7 +1093,7 @@ LABEL(nibble_ashr_9_restart_use):
|
|
jg LABEL(nibble_ashr_9_use)
|
|
|
|
movdqa (%rdi, %rdx), %xmm0
|
|
- palignr $9, -16(%rdi, %rdx), D(%xmm0)
|
|
+ palignr $9, -16(%rdi, %rdx), %xmm0
|
|
#if !defined USE_AS_STRCASECMP_L && !defined USE_AS_STRNCASECMP_L
|
|
pcmpistri $0x1a, (%rsi,%rdx), %xmm0
|
|
#else
|
|
@@ -1143,7 +1113,7 @@ LABEL(nibble_ashr_9_restart_use):
|
|
LABEL(nibble_ashr_9_use):
|
|
sub $0x1000, %r10
|
|
movdqa -16(%rdi, %rdx), %xmm0
|
|
- psrldq $9, D(%xmm0)
|
|
+ psrldq $9, %xmm0
|
|
pcmpistri $0x3a,%xmm0, %xmm0
|
|
#if defined USE_AS_STRNCMP || defined USE_AS_STRNCASECMP_L
|
|
cmp %r11, %rcx
|
|
@@ -1161,10 +1131,10 @@ LABEL(nibble_ashr_9_use):
|
|
*/
|
|
.p2align 4
|
|
LABEL(ashr_10):
|
|
- pslldq $6, D(%xmm2)
|
|
+ pslldq $6, %xmm2
|
|
TOLOWER (%xmm1, %xmm2)
|
|
- pcmpeqb %xmm1, D(%xmm2)
|
|
- psubb %xmm0, D(%xmm2)
|
|
+ pcmpeqb %xmm1, %xmm2
|
|
+ psubb %xmm0, %xmm2
|
|
pmovmskb %xmm2, %r9d
|
|
shr %cl, %edx
|
|
shr %cl, %r9d
|
|
@@ -1193,7 +1163,7 @@ LABEL(loop_ashr_10_use):
|
|
|
|
LABEL(nibble_ashr_10_restart_use):
|
|
movdqa (%rdi, %rdx), %xmm0
|
|
- palignr $10, -16(%rdi, %rdx), D(%xmm0)
|
|
+ palignr $10, -16(%rdi, %rdx), %xmm0
|
|
#if !defined USE_AS_STRCASECMP_L && !defined USE_AS_STRNCASECMP_L
|
|
pcmpistri $0x1a, (%rsi,%rdx), %xmm0
|
|
#else
|
|
@@ -1212,7 +1182,7 @@ LABEL(nibble_ashr_10_restart_use):
|
|
jg LABEL(nibble_ashr_10_use)
|
|
|
|
movdqa (%rdi, %rdx), %xmm0
|
|
- palignr $10, -16(%rdi, %rdx), D(%xmm0)
|
|
+ palignr $10, -16(%rdi, %rdx), %xmm0
|
|
#if !defined USE_AS_STRCASECMP_L && !defined USE_AS_STRNCASECMP_L
|
|
pcmpistri $0x1a, (%rsi,%rdx), %xmm0
|
|
#else
|
|
@@ -1232,7 +1202,7 @@ LABEL(nibble_ashr_10_restart_use):
|
|
LABEL(nibble_ashr_10_use):
|
|
sub $0x1000, %r10
|
|
movdqa -16(%rdi, %rdx), %xmm0
|
|
- psrldq $10, D(%xmm0)
|
|
+ psrldq $10, %xmm0
|
|
pcmpistri $0x3a,%xmm0, %xmm0
|
|
#if defined USE_AS_STRNCMP || defined USE_AS_STRNCASECMP_L
|
|
cmp %r11, %rcx
|
|
@@ -1250,10 +1220,10 @@ LABEL(nibble_ashr_10_use):
|
|
*/
|
|
.p2align 4
|
|
LABEL(ashr_11):
|
|
- pslldq $5, D(%xmm2)
|
|
+ pslldq $5, %xmm2
|
|
TOLOWER (%xmm1, %xmm2)
|
|
- pcmpeqb %xmm1, D(%xmm2)
|
|
- psubb %xmm0, D(%xmm2)
|
|
+ pcmpeqb %xmm1, %xmm2
|
|
+ psubb %xmm0, %xmm2
|
|
pmovmskb %xmm2, %r9d
|
|
shr %cl, %edx
|
|
shr %cl, %r9d
|
|
@@ -1282,7 +1252,7 @@ LABEL(loop_ashr_11_use):
|
|
|
|
LABEL(nibble_ashr_11_restart_use):
|
|
movdqa (%rdi, %rdx), %xmm0
|
|
- palignr $11, -16(%rdi, %rdx), D(%xmm0)
|
|
+ palignr $11, -16(%rdi, %rdx), %xmm0
|
|
#if !defined USE_AS_STRCASECMP_L && !defined USE_AS_STRNCASECMP_L
|
|
pcmpistri $0x1a, (%rsi,%rdx), %xmm0
|
|
#else
|
|
@@ -1301,7 +1271,7 @@ LABEL(nibble_ashr_11_restart_use):
|
|
jg LABEL(nibble_ashr_11_use)
|
|
|
|
movdqa (%rdi, %rdx), %xmm0
|
|
- palignr $11, -16(%rdi, %rdx), D(%xmm0)
|
|
+ palignr $11, -16(%rdi, %rdx), %xmm0
|
|
#if !defined USE_AS_STRCASECMP_L && !defined USE_AS_STRNCASECMP_L
|
|
pcmpistri $0x1a, (%rsi,%rdx), %xmm0
|
|
#else
|
|
@@ -1321,7 +1291,7 @@ LABEL(nibble_ashr_11_restart_use):
|
|
LABEL(nibble_ashr_11_use):
|
|
sub $0x1000, %r10
|
|
movdqa -16(%rdi, %rdx), %xmm0
|
|
- psrldq $11, D(%xmm0)
|
|
+ psrldq $11, %xmm0
|
|
pcmpistri $0x3a,%xmm0, %xmm0
|
|
#if defined USE_AS_STRNCMP || defined USE_AS_STRNCASECMP_L
|
|
cmp %r11, %rcx
|
|
@@ -1339,10 +1309,10 @@ LABEL(nibble_ashr_11_use):
|
|
*/
|
|
.p2align 4
|
|
LABEL(ashr_12):
|
|
- pslldq $4, D(%xmm2)
|
|
+ pslldq $4, %xmm2
|
|
TOLOWER (%xmm1, %xmm2)
|
|
- pcmpeqb %xmm1, D(%xmm2)
|
|
- psubb %xmm0, D(%xmm2)
|
|
+ pcmpeqb %xmm1, %xmm2
|
|
+ psubb %xmm0, %xmm2
|
|
pmovmskb %xmm2, %r9d
|
|
shr %cl, %edx
|
|
shr %cl, %r9d
|
|
@@ -1371,7 +1341,7 @@ LABEL(loop_ashr_12_use):
|
|
|
|
LABEL(nibble_ashr_12_restart_use):
|
|
movdqa (%rdi, %rdx), %xmm0
|
|
- palignr $12, -16(%rdi, %rdx), D(%xmm0)
|
|
+ palignr $12, -16(%rdi, %rdx), %xmm0
|
|
#if !defined USE_AS_STRCASECMP_L && !defined USE_AS_STRNCASECMP_L
|
|
pcmpistri $0x1a, (%rsi,%rdx), %xmm0
|
|
#else
|
|
@@ -1390,7 +1360,7 @@ LABEL(nibble_ashr_12_restart_use):
|
|
jg LABEL(nibble_ashr_12_use)
|
|
|
|
movdqa (%rdi, %rdx), %xmm0
|
|
- palignr $12, -16(%rdi, %rdx), D(%xmm0)
|
|
+ palignr $12, -16(%rdi, %rdx), %xmm0
|
|
#if !defined USE_AS_STRCASECMP_L && !defined USE_AS_STRNCASECMP_L
|
|
pcmpistri $0x1a, (%rsi,%rdx), %xmm0
|
|
#else
|
|
@@ -1410,7 +1380,7 @@ LABEL(nibble_ashr_12_restart_use):
|
|
LABEL(nibble_ashr_12_use):
|
|
sub $0x1000, %r10
|
|
movdqa -16(%rdi, %rdx), %xmm0
|
|
- psrldq $12, D(%xmm0)
|
|
+ psrldq $12, %xmm0
|
|
pcmpistri $0x3a,%xmm0, %xmm0
|
|
#if defined USE_AS_STRNCMP || defined USE_AS_STRNCASECMP_L
|
|
cmp %r11, %rcx
|
|
@@ -1428,10 +1398,10 @@ LABEL(nibble_ashr_12_use):
|
|
*/
|
|
.p2align 4
|
|
LABEL(ashr_13):
|
|
- pslldq $3, D(%xmm2)
|
|
+ pslldq $3, %xmm2
|
|
TOLOWER (%xmm1, %xmm2)
|
|
- pcmpeqb %xmm1, D(%xmm2)
|
|
- psubb %xmm0, D(%xmm2)
|
|
+ pcmpeqb %xmm1, %xmm2
|
|
+ psubb %xmm0, %xmm2
|
|
pmovmskb %xmm2, %r9d
|
|
shr %cl, %edx
|
|
shr %cl, %r9d
|
|
@@ -1461,7 +1431,7 @@ LABEL(loop_ashr_13_use):
|
|
|
|
LABEL(nibble_ashr_13_restart_use):
|
|
movdqa (%rdi, %rdx), %xmm0
|
|
- palignr $13, -16(%rdi, %rdx), D(%xmm0)
|
|
+ palignr $13, -16(%rdi, %rdx), %xmm0
|
|
#if !defined USE_AS_STRCASECMP_L && !defined USE_AS_STRNCASECMP_L
|
|
pcmpistri $0x1a, (%rsi,%rdx), %xmm0
|
|
#else
|
|
@@ -1480,7 +1450,7 @@ LABEL(nibble_ashr_13_restart_use):
|
|
jg LABEL(nibble_ashr_13_use)
|
|
|
|
movdqa (%rdi, %rdx), %xmm0
|
|
- palignr $13, -16(%rdi, %rdx), D(%xmm0)
|
|
+ palignr $13, -16(%rdi, %rdx), %xmm0
|
|
#if !defined USE_AS_STRCASECMP_L && !defined USE_AS_STRNCASECMP_L
|
|
pcmpistri $0x1a, (%rsi,%rdx), %xmm0
|
|
#else
|
|
@@ -1500,7 +1470,7 @@ LABEL(nibble_ashr_13_restart_use):
|
|
LABEL(nibble_ashr_13_use):
|
|
sub $0x1000, %r10
|
|
movdqa -16(%rdi, %rdx), %xmm0
|
|
- psrldq $13, D(%xmm0)
|
|
+ psrldq $13, %xmm0
|
|
pcmpistri $0x3a,%xmm0, %xmm0
|
|
#if defined USE_AS_STRNCMP || defined USE_AS_STRNCASECMP_L
|
|
cmp %r11, %rcx
|
|
@@ -1518,10 +1488,10 @@ LABEL(nibble_ashr_13_use):
|
|
*/
|
|
.p2align 4
|
|
LABEL(ashr_14):
|
|
- pslldq $2, D(%xmm2)
|
|
+ pslldq $2, %xmm2
|
|
TOLOWER (%xmm1, %xmm2)
|
|
- pcmpeqb %xmm1, D(%xmm2)
|
|
- psubb %xmm0, D(%xmm2)
|
|
+ pcmpeqb %xmm1, %xmm2
|
|
+ psubb %xmm0, %xmm2
|
|
pmovmskb %xmm2, %r9d
|
|
shr %cl, %edx
|
|
shr %cl, %r9d
|
|
@@ -1551,7 +1521,7 @@ LABEL(loop_ashr_14_use):
|
|
|
|
LABEL(nibble_ashr_14_restart_use):
|
|
movdqa (%rdi, %rdx), %xmm0
|
|
- palignr $14, -16(%rdi, %rdx), D(%xmm0)
|
|
+ palignr $14, -16(%rdi, %rdx), %xmm0
|
|
#if !defined USE_AS_STRCASECMP_L && !defined USE_AS_STRNCASECMP_L
|
|
pcmpistri $0x1a, (%rsi,%rdx), %xmm0
|
|
#else
|
|
@@ -1570,7 +1540,7 @@ LABEL(nibble_ashr_14_restart_use):
|
|
jg LABEL(nibble_ashr_14_use)
|
|
|
|
movdqa (%rdi, %rdx), %xmm0
|
|
- palignr $14, -16(%rdi, %rdx), D(%xmm0)
|
|
+ palignr $14, -16(%rdi, %rdx), %xmm0
|
|
#if !defined USE_AS_STRCASECMP_L && !defined USE_AS_STRNCASECMP_L
|
|
pcmpistri $0x1a, (%rsi,%rdx), %xmm0
|
|
#else
|
|
@@ -1590,7 +1560,7 @@ LABEL(nibble_ashr_14_restart_use):
|
|
LABEL(nibble_ashr_14_use):
|
|
sub $0x1000, %r10
|
|
movdqa -16(%rdi, %rdx), %xmm0
|
|
- psrldq $14, D(%xmm0)
|
|
+ psrldq $14, %xmm0
|
|
pcmpistri $0x3a,%xmm0, %xmm0
|
|
#if defined USE_AS_STRNCMP || defined USE_AS_STRNCASECMP_L
|
|
cmp %r11, %rcx
|
|
@@ -1608,10 +1578,10 @@ LABEL(nibble_ashr_14_use):
|
|
*/
|
|
.p2align 4
|
|
LABEL(ashr_15):
|
|
- pslldq $1, D(%xmm2)
|
|
+ pslldq $1, %xmm2
|
|
TOLOWER (%xmm1, %xmm2)
|
|
- pcmpeqb %xmm1, D(%xmm2)
|
|
- psubb %xmm0, D(%xmm2)
|
|
+ pcmpeqb %xmm1, %xmm2
|
|
+ psubb %xmm0, %xmm2
|
|
pmovmskb %xmm2, %r9d
|
|
shr %cl, %edx
|
|
shr %cl, %r9d
|
|
@@ -1643,7 +1613,7 @@ LABEL(loop_ashr_15_use):
|
|
|
|
LABEL(nibble_ashr_15_restart_use):
|
|
movdqa (%rdi, %rdx), %xmm0
|
|
- palignr $15, -16(%rdi, %rdx), D(%xmm0)
|
|
+ palignr $15, -16(%rdi, %rdx), %xmm0
|
|
#if !defined USE_AS_STRCASECMP_L && !defined USE_AS_STRNCASECMP_L
|
|
pcmpistri $0x1a, (%rsi,%rdx), %xmm0
|
|
#else
|
|
@@ -1662,7 +1632,7 @@ LABEL(nibble_ashr_15_restart_use):
|
|
jg LABEL(nibble_ashr_15_use)
|
|
|
|
movdqa (%rdi, %rdx), %xmm0
|
|
- palignr $15, -16(%rdi, %rdx), D(%xmm0)
|
|
+ palignr $15, -16(%rdi, %rdx), %xmm0
|
|
#if !defined USE_AS_STRCASECMP_L && !defined USE_AS_STRNCASECMP_L
|
|
pcmpistri $0x1a, (%rsi,%rdx), %xmm0
|
|
#else
|
|
@@ -1682,7 +1652,7 @@ LABEL(nibble_ashr_15_restart_use):
|
|
LABEL(nibble_ashr_15_use):
|
|
sub $0x1000, %r10
|
|
movdqa -16(%rdi, %rdx), %xmm0
|
|
- psrldq $15, D(%xmm0)
|
|
+ psrldq $15, %xmm0
|
|
pcmpistri $0x3a,%xmm0, %xmm0
|
|
#if defined USE_AS_STRNCMP || defined USE_AS_STRNCASECMP_L
|
|
cmp %r11, %rcx
|
|
diff --git a/sysdeps/x86_64/multiarch/strncase_l-avx.S b/sysdeps/x86_64/multiarch/strncase_l-avx.S
|
|
deleted file mode 100644
|
|
index f1d3fefdd94674b8..0000000000000000
|
|
--- a/sysdeps/x86_64/multiarch/strncase_l-avx.S
|
|
+++ /dev/null
|
|
@@ -1,22 +0,0 @@
|
|
-/* strncasecmp_l optimized with AVX.
|
|
- Copyright (C) 2017-2021 Free Software Foundation, Inc.
|
|
- This file is part of the GNU C Library.
|
|
-
|
|
- The GNU C Library is free software; you can redistribute it and/or
|
|
- modify it under the terms of the GNU Lesser General Public
|
|
- License as published by the Free Software Foundation; either
|
|
- version 2.1 of the License, or (at your option) any later version.
|
|
-
|
|
- The GNU C Library is distributed in the hope that it will be useful,
|
|
- but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
- Lesser General Public License for more details.
|
|
-
|
|
- You should have received a copy of the GNU Lesser General Public
|
|
- License along with the GNU C Library; if not, see
|
|
- <https://www.gnu.org/licenses/>. */
|
|
-
|
|
-#define STRCMP_SSE42 __strncasecmp_l_avx
|
|
-#define USE_AVX 1
|
|
-#define USE_AS_STRNCASECMP_L
|
|
-#include "strcmp-sse42.S"
|