glibc/glibc-RHEL-118273-25.patch
Yuki Inoguchi 9dd92cac18 aarch64: Add GLIBC_2.40 vector functions and performance fixes (RHEL-118273)
This combines the following upstream commits:

e45af510bc AArch64: Fix instability in AdvSIMD sinh
6c22823da5 AArch64: Fix instability in AdvSIMD tan
aebaeb2c33 AArch64: Update math-vector-fortran.h
e20ca759af AArch64: add optimised strspn/strcspn
aac077645a AArch64: Fix SVE powf routine [BZ #33299]
1e3d1ddf97 AArch64: Optimize SVE exp functions
dee22d2a81 AArch64: Optimise SVE FP64 Hyperbolics
6849c5b791 AArch64: Improve codegen SVE log1p helper
09795c5612 AArch64: Fix builderror with GCC 12.1/12.2
aa18367c11 AArch64: Improve enabling of SVE for libmvec
691edbdf77 aarch64: fix unwinding in longjmp
4352e2cc93 aarch64: Fix _dl_tlsdesc_dynamic unwind for pac-ret (BZ 32612)
cf56eb28fa AArch64: Optimize algorithm in users of SVE expf helper
ce2f26a22e AArch64: Remove PTR_ARG/SIZE_ARG defines
8f0e7fe61e Aarch64: Improve codegen in SVE asinh
c0ff447edf Aarch64: Improve codegen in SVE exp and users, and update expf_inline
f5ff34cb3c AArch64: Improve codegen for SVE erfcf
0b195651db AArch64: Improve codegen for SVE pow
95e807209b AArch64: Improve codegen for SVE powf
d3f2b71ef1 aarch64: Fix tests not compatible with targets supporting GCS
f86b4cf875 AArch64: Improve codegen in SVE expm1f and users
140b985e5a AArch64: Improve codegen in AdvSIMD asinh
91c1fadba3 AArch64: Improve codegen for SVE log1pf users
cff9648d0b AArch64: Improve codegen of AdvSIMD expf family
569cfaaf49 AArch64: Improve codegen in AdvSIMD pow
ca0c0d0f26 AArch64: Improve codegen in users of ADVSIMD log1p helper
13a7ef5999 AArch64: Improve codegen in users of ADVSIMD expm1 helper
2d82d781a5 AArch64: Remove SVE erf and erfc tables
1cf29fbc5b AArch64: Small optimisation in AdvSIMD erf and erfc
7b8c134b54 AArch64: Improve codegen in SVE expf & related routines
a15b1394b5 AArch64: Improve codegen in SVE F32 logs
5bc100bd4b AArch64: Improve codegen in users of AdvSIMD log1pf helper
7900ac490d AArch64: Improve codegen in users of ADVSIMD expm1f helper
0fed0b250f aarch64/fpu: Add vector variants of pow
75207bde68 aarch64/fpu: Add vector variants of cbrt
157f89fa3d aarch64/fpu: Add vector variants of hypot
90a6ca8b28 aarch64: Fix AdvSIMD libmvec routines for big-endian
87cb1dfcd6 aarch64/fpu: Add vector variants of erfc
3d3a4fb8e4 aarch64/fpu: Add vector variants of tanh
eedbbca0bf aarch64/fpu: Add vector variants of sinh
8b67920528 aarch64/fpu: Add vector variants of atanh
81406ea3c5 aarch64/fpu: Add vector variants of asinh
b09fee1d21 aarch64/fpu: Add vector variants of acosh
bdb5705b7b aarch64/fpu: Add vector variants of cosh
cb5d84f1f8 aarch64/fpu: Add vector variants of erf

Resolves: RHEL-118273
2025-12-05 16:24:54 +01:00

222 lines
9.1 KiB
Diff

commit f86b4cf87581cf1e45702b07880679ffa0b1f47a
Author: Luna Lamb <luna.lamb@arm.com>
Date: Fri Jan 3 20:15:17 2025 +0000
AArch64: Improve codegen in SVE expm1f and users
Use unpredicated muls, use absolute compare and improve memory access.
Expm1f, sinhf and tanhf show 7%, 5% and 1% improvement in throughput
microbenchmark on Neoverse V1.
diff --git a/sysdeps/aarch64/fpu/expm1f_sve.c b/sysdeps/aarch64/fpu/expm1f_sve.c
index 7c852125cdbd0a2b..05a66400d477b819 100644
--- a/sysdeps/aarch64/fpu/expm1f_sve.c
+++ b/sysdeps/aarch64/fpu/expm1f_sve.c
@@ -18,7 +18,6 @@
<https://www.gnu.org/licenses/>. */
#include "sv_math.h"
-#include "poly_sve_f32.h"
/* Largest value of x for which expm1(x) should round to -1. */
#define SpecialBound 0x1.5ebc4p+6f
@@ -28,20 +27,17 @@ static const struct data
/* These 4 are grouped together so they can be loaded as one quadword, then
used with _lane forms of svmla/svmls. */
float c2, c4, ln2_hi, ln2_lo;
- float c0, c1, c3, inv_ln2, special_bound, shift;
+ float c0, inv_ln2, c1, c3, special_bound;
} data = {
/* Generated using fpminimax. */
.c0 = 0x1.fffffep-2, .c1 = 0x1.5554aep-3,
.c2 = 0x1.555736p-5, .c3 = 0x1.12287cp-7,
- .c4 = 0x1.6b55a2p-10,
+ .c4 = 0x1.6b55a2p-10, .inv_ln2 = 0x1.715476p+0f,
+ .special_bound = SpecialBound, .ln2_lo = 0x1.7f7d1cp-20f,
+ .ln2_hi = 0x1.62e4p-1f,
- .special_bound = SpecialBound, .shift = 0x1.8p23f,
- .inv_ln2 = 0x1.715476p+0f, .ln2_hi = 0x1.62e4p-1f,
- .ln2_lo = 0x1.7f7d1cp-20f,
};
-#define C(i) sv_f32 (d->c##i)
-
static svfloat32_t NOINLINE
special_case (svfloat32_t x, svbool_t pg)
{
@@ -71,9 +67,8 @@ svfloat32_t SV_NAME_F1 (expm1) (svfloat32_t x, svbool_t pg)
and f = x - i * ln2, then f is in [-ln2/2, ln2/2].
exp(x) - 1 = 2^i * (expm1(f) + 1) - 1
where 2^i is exact because i is an integer. */
- svfloat32_t j = svmla_x (pg, sv_f32 (d->shift), x, d->inv_ln2);
- j = svsub_x (pg, j, d->shift);
- svint32_t i = svcvt_s32_x (pg, j);
+ svfloat32_t j = svmul_x (svptrue_b32 (), x, d->inv_ln2);
+ j = svrinta_x (pg, j);
svfloat32_t f = svmls_lane (x, j, lane_constants, 2);
f = svmls_lane (f, j, lane_constants, 3);
@@ -83,17 +78,17 @@ svfloat32_t SV_NAME_F1 (expm1) (svfloat32_t x, svbool_t pg)
x + ax^2 + bx^3 + cx^4 ....
So we calculate the polynomial P(f) = a + bf + cf^2 + ...
and assemble the approximation expm1(f) ~= f + f^2 * P(f). */
- svfloat32_t p12 = svmla_lane (C (1), f, lane_constants, 0);
- svfloat32_t p34 = svmla_lane (C (3), f, lane_constants, 1);
- svfloat32_t f2 = svmul_x (pg, f, f);
+ svfloat32_t p12 = svmla_lane (sv_f32 (d->c1), f, lane_constants, 0);
+ svfloat32_t p34 = svmla_lane (sv_f32 (d->c3), f, lane_constants, 1);
+ svfloat32_t f2 = svmul_x (svptrue_b32 (), f, f);
svfloat32_t p = svmla_x (pg, p12, f2, p34);
- p = svmla_x (pg, C (0), f, p);
+
+ p = svmla_x (pg, sv_f32 (d->c0), f, p);
p = svmla_x (pg, f, f2, p);
/* Assemble the result.
expm1(x) ~= 2^i * (p + 1) - 1
Let t = 2^i. */
- svfloat32_t t = svreinterpret_f32 (
- svadd_x (pg, svreinterpret_u32 (svlsl_x (pg, i, 23)), 0x3f800000));
- return svmla_x (pg, svsub_x (pg, t, 1), p, t);
+ svfloat32_t t = svscale_x (pg, sv_f32 (1.0f), svcvt_s32_x (pg, j));
+ return svmla_x (pg, svsub_x (pg, t, 1.0f), p, t);
}
diff --git a/sysdeps/aarch64/fpu/sinhf_sve.c b/sysdeps/aarch64/fpu/sinhf_sve.c
index 6c204b57a2aa18d3..50dd386774b005ca 100644
--- a/sysdeps/aarch64/fpu/sinhf_sve.c
+++ b/sysdeps/aarch64/fpu/sinhf_sve.c
@@ -63,5 +63,5 @@ svfloat32_t SV_NAME_F1 (sinh) (svfloat32_t x, const svbool_t pg)
if (__glibc_unlikely (svptest_any (pg, special)))
return special_case (x, svmul_x (pg, t, halfsign), special);
- return svmul_x (pg, t, halfsign);
+ return svmul_x (svptrue_b32 (), t, halfsign);
}
diff --git a/sysdeps/aarch64/fpu/sv_expm1f_inline.h b/sysdeps/aarch64/fpu/sv_expm1f_inline.h
index 5b7245122294e1b4..e46ddda5437dc826 100644
--- a/sysdeps/aarch64/fpu/sv_expm1f_inline.h
+++ b/sysdeps/aarch64/fpu/sv_expm1f_inline.h
@@ -27,21 +27,18 @@ struct sv_expm1f_data
/* These 4 are grouped together so they can be loaded as one quadword, then
used with _lane forms of svmla/svmls. */
float32_t c2, c4, ln2_hi, ln2_lo;
- float32_t c0, c1, c3, inv_ln2, shift;
+ float c0, inv_ln2, c1, c3, special_bound;
};
/* Coefficients generated using fpminimax. */
#define SV_EXPM1F_DATA \
{ \
- .c0 = 0x1.fffffep-2, .c1 = 0x1.5554aep-3, .c2 = 0x1.555736p-5, \
- .c3 = 0x1.12287cp-7, .c4 = 0x1.6b55a2p-10, \
+ .c0 = 0x1.fffffep-2, .c1 = 0x1.5554aep-3, .inv_ln2 = 0x1.715476p+0f, \
+ .c2 = 0x1.555736p-5, .c3 = 0x1.12287cp-7, \
\
- .shift = 0x1.8p23f, .inv_ln2 = 0x1.715476p+0f, .ln2_hi = 0x1.62e4p-1f, \
- .ln2_lo = 0x1.7f7d1cp-20f, \
+ .c4 = 0x1.6b55a2p-10, .ln2_lo = 0x1.7f7d1cp-20f, .ln2_hi = 0x1.62e4p-1f, \
}
-#define C(i) sv_f32 (d->c##i)
-
static inline svfloat32_t
expm1f_inline (svfloat32_t x, svbool_t pg, const struct sv_expm1f_data *d)
{
@@ -55,9 +52,8 @@ expm1f_inline (svfloat32_t x, svbool_t pg, const struct sv_expm1f_data *d)
and f = x - i * ln2, then f is in [-ln2/2, ln2/2].
exp(x) - 1 = 2^i * (expm1(f) + 1) - 1
where 2^i is exact because i is an integer. */
- svfloat32_t j = svmla_x (pg, sv_f32 (d->shift), x, d->inv_ln2);
- j = svsub_x (pg, j, d->shift);
- svint32_t i = svcvt_s32_x (pg, j);
+ svfloat32_t j = svmul_x (svptrue_b32 (), x, d->inv_ln2);
+ j = svrinta_x (pg, j);
svfloat32_t f = svmls_lane (x, j, lane_constants, 2);
f = svmls_lane (f, j, lane_constants, 3);
@@ -67,18 +63,18 @@ expm1f_inline (svfloat32_t x, svbool_t pg, const struct sv_expm1f_data *d)
x + ax^2 + bx^3 + cx^4 ....
So we calculate the polynomial P(f) = a + bf + cf^2 + ...
and assemble the approximation expm1(f) ~= f + f^2 * P(f). */
- svfloat32_t p12 = svmla_lane (C (1), f, lane_constants, 0);
- svfloat32_t p34 = svmla_lane (C (3), f, lane_constants, 1);
- svfloat32_t f2 = svmul_x (pg, f, f);
+ svfloat32_t p12 = svmla_lane (sv_f32 (d->c1), f, lane_constants, 0);
+ svfloat32_t p34 = svmla_lane (sv_f32 (d->c3), f, lane_constants, 1);
+ svfloat32_t f2 = svmul_x (svptrue_b32 (), f, f);
svfloat32_t p = svmla_x (pg, p12, f2, p34);
- p = svmla_x (pg, C (0), f, p);
+ p = svmla_x (pg, sv_f32 (d->c0), f, p);
p = svmla_x (pg, f, f2, p);
/* Assemble the result.
expm1(x) ~= 2^i * (p + 1) - 1
Let t = 2^i. */
- svfloat32_t t = svscale_x (pg, sv_f32 (1), i);
- return svmla_x (pg, svsub_x (pg, t, 1), p, t);
+ svfloat32_t t = svscale_x (pg, sv_f32 (1.0f), svcvt_s32_x (pg, j));
+ return svmla_x (pg, svsub_x (pg, t, 1.0f), p, t);
}
#endif
diff --git a/sysdeps/aarch64/fpu/tanhf_sve.c b/sysdeps/aarch64/fpu/tanhf_sve.c
index 0b94523cf5074200..80dd679346f13f37 100644
--- a/sysdeps/aarch64/fpu/tanhf_sve.c
+++ b/sysdeps/aarch64/fpu/tanhf_sve.c
@@ -19,20 +19,27 @@
#include "sv_expm1f_inline.h"
+/* Largest value of x for which tanhf(x) rounds to 1 (or -1 for negative). */
+#define BoringBound 0x1.205966p+3f
+
static const struct data
{
struct sv_expm1f_data expm1f_consts;
- uint32_t boring_bound, onef;
+ uint32_t onef, special_bound;
+ float boring_bound;
} data = {
.expm1f_consts = SV_EXPM1F_DATA,
- /* 0x1.205966p+3, above which tanhf rounds to 1 (or -1 for negative). */
- .boring_bound = 0x41102cb3,
.onef = 0x3f800000,
+ .special_bound = 0x7f800000,
+ .boring_bound = BoringBound,
};
static svfloat32_t NOINLINE
-special_case (svfloat32_t x, svfloat32_t y, svbool_t special)
+special_case (svfloat32_t x, svbool_t pg, svbool_t is_boring,
+ svfloat32_t boring, svfloat32_t q, svbool_t special)
{
+ svfloat32_t y
+ = svsel_f32 (is_boring, boring, svdiv_x (pg, q, svadd_x (pg, q, 2.0)));
return sv_call_f32 (tanhf, x, y, special);
}
@@ -47,15 +54,16 @@ svfloat32_t SV_NAME_F1 (tanh) (svfloat32_t x, const svbool_t pg)
svfloat32_t ax = svabs_x (pg, x);
svuint32_t iax = svreinterpret_u32 (ax);
svuint32_t sign = sveor_x (pg, svreinterpret_u32 (x), iax);
- svbool_t is_boring = svcmpgt (pg, iax, d->boring_bound);
svfloat32_t boring = svreinterpret_f32 (svorr_x (pg, sign, d->onef));
-
- svbool_t special = svcmpgt (pg, iax, 0x7f800000);
+ svbool_t special = svcmpgt (pg, iax, d->special_bound);
+ svbool_t is_boring = svacgt (pg, x, d->boring_bound);
/* tanh(x) = (e^2x - 1) / (e^2x + 1). */
- svfloat32_t q = expm1f_inline (svmul_x (pg, x, 2.0), pg, &d->expm1f_consts);
- svfloat32_t y = svdiv_x (pg, q, svadd_x (pg, q, 2.0));
+ svfloat32_t q = expm1f_inline (svmul_x (svptrue_b32 (), x, 2.0), pg,
+ &d->expm1f_consts);
+
if (__glibc_unlikely (svptest_any (pg, special)))
- return special_case (x, svsel_f32 (is_boring, boring, y), special);
+ return special_case (x, pg, is_boring, boring, q, special);
+ svfloat32_t y = svdiv_x (pg, q, svadd_x (pg, q, 2.0));
return svsel_f32 (is_boring, boring, y);
}