glibc/glibc-RHEL-15696-36.patch
DJ Delorie 02cfe04e36 Import Intel hyperscale improvements (RHEL-15696)
Resolves: RHEL-15696

Includes two additional (well, 1.5) upstream patches
to resolve roundeven redirects.
2023-12-15 17:33:06 -05:00

45 lines
1.6 KiB
Diff

From a35a59036ebae3efcdf5e8167610e0656fca9770 Mon Sep 17 00:00:00 2001
From: "H.J. Lu" <hjl.tools@gmail.com>
Date: Thu, 11 Jun 2020 12:41:18 -0700
Subject: [PATCH] x86_64: Use %xmmN with vpxor to clear a vector register
Content-type: text/plain; charset=UTF-8
Since "vpxor %xmmN, %xmmN, %xmmN" clears the whole vector register, use
%xmmN, instead of %ymmN, with vpxor to clear a vector register.
---
sysdeps/x86_64/multiarch/strcmp-avx2.S | 4 ++--
sysdeps/x86_64/multiarch/strrchr-avx2.S | 2 +-
2 files changed, 3 insertions(+), 3 deletions(-)
diff --git a/sysdeps/x86_64/multiarch/strcmp-avx2.S b/sysdeps/x86_64/multiarch/strcmp-avx2.S
index 433ae047..70d8499b 100644
--- a/sysdeps/x86_64/multiarch/strcmp-avx2.S
+++ b/sysdeps/x86_64/multiarch/strcmp-avx2.S
@@ -105,8 +105,8 @@ ENTRY (STRCMP)
# endif
movl %edi, %eax
xorl %edx, %edx
- /* Make %ymm7 all zeros in this function. */
- vpxor %ymm7, %ymm7, %ymm7
+ /* Make %xmm7 (%ymm7) all zeros in this function. */
+ vpxor %xmm7, %xmm7, %xmm7
orl %esi, %eax
andl $(PAGE_SIZE - 1), %eax
cmpl $(PAGE_SIZE - (VEC_SIZE * 4)), %eax
diff --git a/sysdeps/x86_64/multiarch/strrchr-avx2.S b/sysdeps/x86_64/multiarch/strrchr-avx2.S
index 9f22a15e..c949410b 100644
--- a/sysdeps/x86_64/multiarch/strrchr-avx2.S
+++ b/sysdeps/x86_64/multiarch/strrchr-avx2.S
@@ -48,7 +48,7 @@ ENTRY (STRRCHR)
movl %edi, %ecx
/* Broadcast CHAR to YMM4. */
VPBROADCAST %xmm4, %ymm4
- vpxor %ymm0, %ymm0, %ymm0
+ vpxor %xmm0, %xmm0, %xmm0
/* Check if we may cross page boundary with one vector load. */
andl $(2 * VEC_SIZE - 1), %ecx
--
GitLab