glibc/glibc-RHEL-118273-38.patch
Yuki Inoguchi 9dd92cac18 aarch64: Add GLIBC_2.40 vector functions and performance fixes (RHEL-118273)
This combines the following upstream commits:

e45af510bc AArch64: Fix instability in AdvSIMD sinh
6c22823da5 AArch64: Fix instability in AdvSIMD tan
aebaeb2c33 AArch64: Update math-vector-fortran.h
e20ca759af AArch64: add optimised strspn/strcspn
aac077645a AArch64: Fix SVE powf routine [BZ #33299]
1e3d1ddf97 AArch64: Optimize SVE exp functions
dee22d2a81 AArch64: Optimise SVE FP64 Hyperbolics
6849c5b791 AArch64: Improve codegen SVE log1p helper
09795c5612 AArch64: Fix builderror with GCC 12.1/12.2
aa18367c11 AArch64: Improve enabling of SVE for libmvec
691edbdf77 aarch64: fix unwinding in longjmp
4352e2cc93 aarch64: Fix _dl_tlsdesc_dynamic unwind for pac-ret (BZ 32612)
cf56eb28fa AArch64: Optimize algorithm in users of SVE expf helper
ce2f26a22e AArch64: Remove PTR_ARG/SIZE_ARG defines
8f0e7fe61e Aarch64: Improve codegen in SVE asinh
c0ff447edf Aarch64: Improve codegen in SVE exp and users, and update expf_inline
f5ff34cb3c AArch64: Improve codegen for SVE erfcf
0b195651db AArch64: Improve codegen for SVE pow
95e807209b AArch64: Improve codegen for SVE powf
d3f2b71ef1 aarch64: Fix tests not compatible with targets supporting GCS
f86b4cf875 AArch64: Improve codegen in SVE expm1f and users
140b985e5a AArch64: Improve codegen in AdvSIMD asinh
91c1fadba3 AArch64: Improve codegen for SVE log1pf users
cff9648d0b AArch64: Improve codegen of AdvSIMD expf family
569cfaaf49 AArch64: Improve codegen in AdvSIMD pow
ca0c0d0f26 AArch64: Improve codegen in users of ADVSIMD log1p helper
13a7ef5999 AArch64: Improve codegen in users of ADVSIMD expm1 helper
2d82d781a5 AArch64: Remove SVE erf and erfc tables
1cf29fbc5b AArch64: Small optimisation in AdvSIMD erf and erfc
7b8c134b54 AArch64: Improve codegen in SVE expf & related routines
a15b1394b5 AArch64: Improve codegen in SVE F32 logs
5bc100bd4b AArch64: Improve codegen in users of AdvSIMD log1pf helper
7900ac490d AArch64: Improve codegen in users of ADVSIMD expm1f helper
0fed0b250f aarch64/fpu: Add vector variants of pow
75207bde68 aarch64/fpu: Add vector variants of cbrt
157f89fa3d aarch64/fpu: Add vector variants of hypot
90a6ca8b28 aarch64: Fix AdvSIMD libmvec routines for big-endian
87cb1dfcd6 aarch64/fpu: Add vector variants of erfc
3d3a4fb8e4 aarch64/fpu: Add vector variants of tanh
eedbbca0bf aarch64/fpu: Add vector variants of sinh
8b67920528 aarch64/fpu: Add vector variants of atanh
81406ea3c5 aarch64/fpu: Add vector variants of asinh
b09fee1d21 aarch64/fpu: Add vector variants of acosh
bdb5705b7b aarch64/fpu: Add vector variants of cosh
cb5d84f1f8 aarch64/fpu: Add vector variants of erf

Resolves: RHEL-118273
2025-12-05 16:24:54 +01:00

189 lines
8.1 KiB
Diff

commit 6849c5b791edd216f2ec3fdbe4d138bc69b9b333
Author: Luna Lamb <luna.lamb@arm.com>
Date: Wed Jun 18 16:12:19 2025 +0000
AArch64: Improve codegen SVE log1p helper
Improve codegen by packing coefficients.
4% and 2% improvement in throughput microbenchmark on Neoverse V1, for acosh
and atanh respectively.
Reviewed-by: Wilco Dijkstra <Wilco.Dijkstra@arm.com>
diff --git a/sysdeps/aarch64/fpu/acosh_sve.c b/sysdeps/aarch64/fpu/acosh_sve.c
index 3e4faaa5ca686c18..78ebcffbb5737641 100644
--- a/sysdeps/aarch64/fpu/acosh_sve.c
+++ b/sysdeps/aarch64/fpu/acosh_sve.c
@@ -30,10 +30,10 @@ special_case (svfloat64_t x, svfloat64_t y, svbool_t special)
}
/* SVE approximation for double-precision acosh, based on log1p.
- The largest observed error is 3.19 ULP in the region where the
+ The largest observed error is 3.14 ULP in the region where the
argument to log1p falls in the k=0 interval, i.e. x close to 1:
- SV_NAME_D1 (acosh)(0x1.1e4388d4ca821p+0) got 0x1.ed23399f5137p-2
- want 0x1.ed23399f51373p-2. */
+ SV_NAME_D1 (acosh)(0x1.1e80ed12f0ad1p+0) got 0x1.ef0cee7c33ce1p-2
+ want 0x1.ef0cee7c33ce4p-2. */
svfloat64_t SV_NAME_D1 (acosh) (svfloat64_t x, const svbool_t pg)
{
/* (ix - One) >= (BigBound - One). */
diff --git a/sysdeps/aarch64/fpu/atanh_sve.c b/sysdeps/aarch64/fpu/atanh_sve.c
index 7a52728d70f6d226..a4803e5c1305379e 100644
--- a/sysdeps/aarch64/fpu/atanh_sve.c
+++ b/sysdeps/aarch64/fpu/atanh_sve.c
@@ -30,7 +30,7 @@ special_case (svfloat64_t x, svfloat64_t y, svbool_t special)
}
/* SVE approximation for double-precision atanh, based on log1p.
- The greatest observed error is 2.81 ULP:
+ The greatest observed error is 3.3 ULP:
_ZGVsMxv_atanh(0x1.ffae6288b601p-6) got 0x1.ffd8ff31b5019p-6
want 0x1.ffd8ff31b501cp-6. */
svfloat64_t SV_NAME_D1 (atanh) (svfloat64_t x, const svbool_t pg)
@@ -42,7 +42,6 @@ svfloat64_t SV_NAME_D1 (atanh) (svfloat64_t x, const svbool_t pg)
svfloat64_t halfsign = svreinterpret_f64 (svorr_x (pg, sign, Half));
/* It is special if iax >= 1. */
-// svbool_t special = svcmpge (pg, iax, One);
svbool_t special = svacge (pg, x, 1.0);
/* Computation is performed based on the following sequence of equality:
diff --git a/sysdeps/aarch64/fpu/sv_log1p_inline.h b/sysdeps/aarch64/fpu/sv_log1p_inline.h
index da019674f94dbac7..a9ecd75d19e95d39 100644
--- a/sysdeps/aarch64/fpu/sv_log1p_inline.h
+++ b/sysdeps/aarch64/fpu/sv_log1p_inline.h
@@ -21,11 +21,12 @@
#define AARCH64_FPU_SV_LOG1P_INLINE_H
#include "sv_math.h"
-#include "poly_sve_f64.h"
static const struct sv_log1p_data
{
- double poly[19], ln2[2];
+ double c0, c2, c4, c6, c8, c10, c12, c14, c16;
+ double c1, c3, c5, c7, c9, c11, c13, c15, c17, c18;
+ double ln2_lo, ln2_hi;
uint64_t hf_rt2_top;
uint64_t one_m_hf_rt2_top;
uint32_t bottom_mask;
@@ -33,15 +34,30 @@ static const struct sv_log1p_data
} sv_log1p_data = {
/* Coefficients generated using Remez, deg=20, in [sqrt(2)/2-1, sqrt(2)-1].
*/
- .poly = { -0x1.ffffffffffffbp-2, 0x1.55555555551a9p-2, -0x1.00000000008e3p-2,
- 0x1.9999999a32797p-3, -0x1.555555552fecfp-3, 0x1.249248e071e5ap-3,
- -0x1.ffffff8bf8482p-4, 0x1.c71c8f07da57ap-4, -0x1.9999ca4ccb617p-4,
- 0x1.7459ad2e1dfa3p-4, -0x1.554d2680a3ff2p-4, 0x1.3b4c54d487455p-4,
- -0x1.2548a9ffe80e6p-4, 0x1.0f389a24b2e07p-4, -0x1.eee4db15db335p-5,
- 0x1.e95b494d4a5ddp-5, -0x1.15fdf07cb7c73p-4, 0x1.0310b70800fcfp-4,
- -0x1.cfa7385bdb37ep-6 },
- .ln2 = { 0x1.62e42fefa3800p-1, 0x1.ef35793c76730p-45 },
+ .c0 = -0x1.ffffffffffffbp-2,
+ .c1 = 0x1.55555555551a9p-2,
+ .c2 = -0x1.00000000008e3p-2,
+ .c3 = 0x1.9999999a32797p-3,
+ .c4 = -0x1.555555552fecfp-3,
+ .c5 = 0x1.249248e071e5ap-3,
+ .c6 = -0x1.ffffff8bf8482p-4,
+ .c7 = 0x1.c71c8f07da57ap-4,
+ .c8 = -0x1.9999ca4ccb617p-4,
+ .c9 = 0x1.7459ad2e1dfa3p-4,
+ .c10 = -0x1.554d2680a3ff2p-4,
+ .c11 = 0x1.3b4c54d487455p-4,
+ .c12 = -0x1.2548a9ffe80e6p-4,
+ .c13 = 0x1.0f389a24b2e07p-4,
+ .c14 = -0x1.eee4db15db335p-5,
+ .c15 = 0x1.e95b494d4a5ddp-5,
+ .c16 = -0x1.15fdf07cb7c73p-4,
+ .c17 = 0x1.0310b70800fcfp-4,
+ .c18 = -0x1.cfa7385bdb37ep-6,
+ .ln2_lo = 0x1.62e42fefa3800p-1,
+ .ln2_hi = 0x1.ef35793c76730p-45,
+ /* top32(asuint64(sqrt(2)/2)) << 32. */
.hf_rt2_top = 0x3fe6a09e00000000,
+ /* (top32(asuint64(1)) - top32(asuint64(sqrt(2)/2))) << 32. */
.one_m_hf_rt2_top = 0x00095f6200000000,
.bottom_mask = 0xffffffff,
.one_top = 0x3ff
@@ -51,14 +67,14 @@ static inline svfloat64_t
sv_log1p_inline (svfloat64_t x, const svbool_t pg)
{
/* Helper for calculating log(x + 1). Adapted from v_log1p_inline.h, which
- differs from v_log1p_2u5.c by:
+ differs from advsimd/log1p.c by:
- No special-case handling - this should be dealt with by the caller.
- Pairwise Horner polynomial evaluation for improved accuracy.
- Optionally simulate the shortcut for k=0, used in the scalar routine,
using svsel, for improved accuracy when the argument to log1p is close
to 0. This feature is enabled by defining WANT_SV_LOG1P_K0_SHORTCUT as 1
in the source of the caller before including this file.
- See sv_log1p_2u1.c for details of the algorithm. */
+ See sve/log1p.c for details of the algorithm. */
const struct sv_log1p_data *d = ptr_barrier (&sv_log1p_data);
svfloat64_t m = svadd_x (pg, x, 1);
svuint64_t mi = svreinterpret_u64 (m);
@@ -79,7 +95,7 @@ sv_log1p_inline (svfloat64_t x, const svbool_t pg)
svfloat64_t cm;
#ifndef WANT_SV_LOG1P_K0_SHORTCUT
-#error \
+#error \
"Cannot use sv_log1p_inline.h without specifying whether you need the k0 shortcut for greater accuracy close to 0"
#elif WANT_SV_LOG1P_K0_SHORTCUT
/* Shortcut if k is 0 - set correction term to 0 and f to x. The result is
@@ -96,14 +112,46 @@ sv_log1p_inline (svfloat64_t x, const svbool_t pg)
#endif
/* Approximate log1p(f) on the reduced input using a polynomial. */
- svfloat64_t f2 = svmul_x (pg, f, f);
- svfloat64_t p = sv_pw_horner_18_f64_x (pg, f, f2, d->poly);
+ svfloat64_t f2 = svmul_x (svptrue_b64 (), f, f),
+ f4 = svmul_x (svptrue_b64 (), f2, f2),
+ f8 = svmul_x (svptrue_b64 (), f4, f4),
+ f16 = svmul_x (svptrue_b64 (), f8, f8);
+
+ svfloat64_t c13 = svld1rq (svptrue_b64 (), &d->c1);
+ svfloat64_t c57 = svld1rq (svptrue_b64 (), &d->c5);
+ svfloat64_t c911 = svld1rq (svptrue_b64 (), &d->c9);
+ svfloat64_t c1315 = svld1rq (svptrue_b64 (), &d->c13);
+ svfloat64_t c1718 = svld1rq (svptrue_b64 (), &d->c17);
+
+ /* Order-18 Estrin scheme. */
+ svfloat64_t p01 = svmla_lane (sv_f64 (d->c0), f, c13, 0);
+ svfloat64_t p23 = svmla_lane (sv_f64 (d->c2), f, c13, 1);
+ svfloat64_t p45 = svmla_lane (sv_f64 (d->c4), f, c57, 0);
+ svfloat64_t p67 = svmla_lane (sv_f64 (d->c6), f, c57, 1);
+
+ svfloat64_t p03 = svmla_x (pg, p01, f2, p23);
+ svfloat64_t p47 = svmla_x (pg, p45, f2, p67);
+ svfloat64_t p07 = svmla_x (pg, p03, f4, p47);
+
+ svfloat64_t p89 = svmla_lane (sv_f64 (d->c8), f, c911, 0);
+ svfloat64_t p1011 = svmla_lane (sv_f64 (d->c10), f, c911, 1);
+ svfloat64_t p1213 = svmla_lane (sv_f64 (d->c12), f, c1315, 0);
+ svfloat64_t p1415 = svmla_lane (sv_f64 (d->c14), f, c1315, 1);
+
+ svfloat64_t p811 = svmla_x (pg, p89, f2, p1011);
+ svfloat64_t p1215 = svmla_x (pg, p1213, f2, p1415);
+ svfloat64_t p815 = svmla_x (pg, p811, f4, p1215);
+
+ svfloat64_t p015 = svmla_x (pg, p07, f8, p815);
+ svfloat64_t p1617 = svmla_lane (sv_f64 (d->c16), f, c1718, 0);
+ svfloat64_t p1618 = svmla_lane (p1617, f2, c1718, 1);
+ svfloat64_t p = svmla_x (pg, p015, f16, p1618);
/* Assemble log1p(x) = k * log2 + log1p(f) + c/m. */
- svfloat64_t ylo = svmla_x (pg, cm, k, d->ln2[0]);
- svfloat64_t yhi = svmla_x (pg, f, k, d->ln2[1]);
+ svfloat64_t ln2_lo_hi = svld1rq (svptrue_b64 (), &d->ln2_lo);
+ svfloat64_t ylo = svmla_lane (cm, k, ln2_lo_hi, 0);
+ svfloat64_t yhi = svmla_lane (f, k, ln2_lo_hi, 1);
- return svmla_x (pg, svadd_x (pg, ylo, yhi), f2, p);
+ return svmad_x (pg, p, f2, svadd_x (pg, ylo, yhi));
}
-
#endif