glibc/glibc-RHEL-118273-33.patch
Yuki Inoguchi 9dd92cac18 aarch64: Add GLIBC_2.40 vector functions and performance fixes (RHEL-118273)
This combines the following upstream commits:

e45af510bc AArch64: Fix instability in AdvSIMD sinh
6c22823da5 AArch64: Fix instability in AdvSIMD tan
aebaeb2c33 AArch64: Update math-vector-fortran.h
e20ca759af AArch64: add optimised strspn/strcspn
aac077645a AArch64: Fix SVE powf routine [BZ #33299]
1e3d1ddf97 AArch64: Optimize SVE exp functions
dee22d2a81 AArch64: Optimise SVE FP64 Hyperbolics
6849c5b791 AArch64: Improve codegen SVE log1p helper
09795c5612 AArch64: Fix builderror with GCC 12.1/12.2
aa18367c11 AArch64: Improve enabling of SVE for libmvec
691edbdf77 aarch64: fix unwinding in longjmp
4352e2cc93 aarch64: Fix _dl_tlsdesc_dynamic unwind for pac-ret (BZ 32612)
cf56eb28fa AArch64: Optimize algorithm in users of SVE expf helper
ce2f26a22e AArch64: Remove PTR_ARG/SIZE_ARG defines
8f0e7fe61e Aarch64: Improve codegen in SVE asinh
c0ff447edf Aarch64: Improve codegen in SVE exp and users, and update expf_inline
f5ff34cb3c AArch64: Improve codegen for SVE erfcf
0b195651db AArch64: Improve codegen for SVE pow
95e807209b AArch64: Improve codegen for SVE powf
d3f2b71ef1 aarch64: Fix tests not compatible with targets supporting GCS
f86b4cf875 AArch64: Improve codegen in SVE expm1f and users
140b985e5a AArch64: Improve codegen in AdvSIMD asinh
91c1fadba3 AArch64: Improve codegen for SVE log1pf users
cff9648d0b AArch64: Improve codegen of AdvSIMD expf family
569cfaaf49 AArch64: Improve codegen in AdvSIMD pow
ca0c0d0f26 AArch64: Improve codegen in users of ADVSIMD log1p helper
13a7ef5999 AArch64: Improve codegen in users of ADVSIMD expm1 helper
2d82d781a5 AArch64: Remove SVE erf and erfc tables
1cf29fbc5b AArch64: Small optimisation in AdvSIMD erf and erfc
7b8c134b54 AArch64: Improve codegen in SVE expf & related routines
a15b1394b5 AArch64: Improve codegen in SVE F32 logs
5bc100bd4b AArch64: Improve codegen in users of AdvSIMD log1pf helper
7900ac490d AArch64: Improve codegen in users of ADVSIMD expm1f helper
0fed0b250f aarch64/fpu: Add vector variants of pow
75207bde68 aarch64/fpu: Add vector variants of cbrt
157f89fa3d aarch64/fpu: Add vector variants of hypot
90a6ca8b28 aarch64: Fix AdvSIMD libmvec routines for big-endian
87cb1dfcd6 aarch64/fpu: Add vector variants of erfc
3d3a4fb8e4 aarch64/fpu: Add vector variants of tanh
eedbbca0bf aarch64/fpu: Add vector variants of sinh
8b67920528 aarch64/fpu: Add vector variants of atanh
81406ea3c5 aarch64/fpu: Add vector variants of asinh
b09fee1d21 aarch64/fpu: Add vector variants of acosh
bdb5705b7b aarch64/fpu: Add vector variants of cosh
cb5d84f1f8 aarch64/fpu: Add vector variants of erf

Resolves: RHEL-118273
2025-12-05 16:24:54 +01:00

114 lines
4.5 KiB
Diff

commit cf56eb28fa277d9dbb301654682ca89f71c30a48
Author: Pierre Blanchard <pierre.blanchard@arm.com>
Date: Tue Mar 18 17:07:31 2025 +0000
AArch64: Optimize algorithm in users of SVE expf helper
Polynomial order was unnecessarily high, unlocking multiple
optimizations.
Max error for new SVE expf is 0.88 +0.5ULP.
Max error for new SVE coshf is 2.56 +0.5ULP.
Performance improvement on Neoverse V1: expf (30%), coshf (26%).
Reviewed-by: Wilco Dijkstra <Wilco.Dijkstra@arm.com>
diff --git a/sysdeps/aarch64/fpu/coshf_sve.c b/sysdeps/aarch64/fpu/coshf_sve.c
index 7ad6efa0fc218278..508c0790ee89e0cd 100644
--- a/sysdeps/aarch64/fpu/coshf_sve.c
+++ b/sysdeps/aarch64/fpu/coshf_sve.c
@@ -39,9 +39,9 @@ special_case (svfloat32_t x, svfloat32_t half_e, svfloat32_t half_over_e,
}
/* Single-precision vector cosh, using vector expf.
- Maximum error is 2.77 ULP:
- _ZGVsMxv_coshf(-0x1.5b38f4p+1) got 0x1.e45946p+2
- want 0x1.e4594cp+2. */
+ Maximum error is 2.56 +0.5 ULP:
+ _ZGVsMxv_coshf(-0x1.5b40f4p+1) got 0x1.e47748p+2
+ want 0x1.e4774ep+2. */
svfloat32_t SV_NAME_F1 (cosh) (svfloat32_t x, svbool_t pg)
{
const struct data *d = ptr_barrier (&data);
diff --git a/sysdeps/aarch64/fpu/expf_sve.c b/sysdeps/aarch64/fpu/expf_sve.c
index da93e01b87e0e890..aee86a203379efb3 100644
--- a/sysdeps/aarch64/fpu/expf_sve.c
+++ b/sysdeps/aarch64/fpu/expf_sve.c
@@ -40,9 +40,9 @@ special_case (svfloat32_t x, svbool_t special, const struct sv_expf_data *d)
}
/* Optimised single-precision SVE exp function.
- Worst-case error is 1.04 ulp:
- SV_NAME_F1 (exp)(0x1.a8eda4p+1) got 0x1.ba74bcp+4
- want 0x1.ba74bap+4. */
+ Worst-case error is 0.88 +0.50 ULP:
+ _ZGVsMxv_expf(-0x1.bba276p-6) got 0x1.f25288p-1
+ want 0x1.f2528ap-1. */
svfloat32_t SV_NAME_F1 (exp) (svfloat32_t x, const svbool_t pg)
{
const struct data *d = ptr_barrier (&data);
diff --git a/sysdeps/aarch64/fpu/sv_expf_inline.h b/sysdeps/aarch64/fpu/sv_expf_inline.h
index 75781fb4ddcb9790..01fbb4d4c046eb3b 100644
--- a/sysdeps/aarch64/fpu/sv_expf_inline.h
+++ b/sysdeps/aarch64/fpu/sv_expf_inline.h
@@ -24,50 +24,40 @@
struct sv_expf_data
{
- float c1, c3, inv_ln2;
- float ln2_lo, c0, c2, c4;
- float ln2_hi, shift;
+ float ln2_hi, ln2_lo, c1, null;
+ float inv_ln2, shift;
};
-/* Coefficients copied from the polynomial in AdvSIMD variant, reversed for
- compatibility with polynomial helpers. Shift is 1.5*2^17 + 127. */
+/* Shift is 1.5*2^17 + 127. */
#define SV_EXPF_DATA \
{ \
- /* Coefficients copied from the polynomial in AdvSIMD variant. */ \
- .c0 = 0x1.ffffecp-1f, .c1 = 0x1.fffdb6p-2f, .c2 = 0x1.555e66p-3f, \
- .c3 = 0x1.573e2ep-5f, .c4 = 0x1.0e4020p-7f, .inv_ln2 = 0x1.715476p+0f, \
- .ln2_hi = 0x1.62e4p-1f, .ln2_lo = 0x1.7f7d1cp-20f, \
- .shift = 0x1.803f8p17f, \
+ .c1 = 0.5f, .inv_ln2 = 0x1.715476p+0f, .ln2_hi = 0x1.62e4p-1f, \
+ .ln2_lo = 0x1.7f7d1cp-20f, .shift = 0x1.803f8p17f, \
}
-#define C(i) sv_f32 (d->poly[i])
-
static inline svfloat32_t
expf_inline (svfloat32_t x, const svbool_t pg, const struct sv_expf_data *d)
{
/* exp(x) = 2^n (1 + poly(r)), with 1 + poly(r) in [1/sqrt(2),sqrt(2)]
x = ln2*n + r, with r in [-ln2/2, ln2/2]. */
- svfloat32_t lane_consts = svld1rq (svptrue_b32 (), &d->ln2_lo);
+ svfloat32_t lane_consts = svld1rq (svptrue_b32 (), &d->ln2_hi);
/* n = round(x/(ln2/N)). */
svfloat32_t z = svmad_x (pg, sv_f32 (d->inv_ln2), x, d->shift);
svfloat32_t n = svsub_x (pg, z, d->shift);
/* r = x - n*ln2/N. */
- svfloat32_t r = svmsb_x (pg, sv_f32 (d->ln2_hi), n, x);
+ svfloat32_t r = x;
r = svmls_lane (r, n, lane_consts, 0);
+ r = svmls_lane (r, n, lane_consts, 1);
/* scale = 2^(n/N). */
svfloat32_t scale = svexpa (svreinterpret_u32 (z));
- /* poly(r) = exp(r) - 1 ~= C0 r + C1 r^2 + C2 r^3 + C3 r^4 + C4 r^5. */
- svfloat32_t p12 = svmla_lane (sv_f32 (d->c1), r, lane_consts, 2);
- svfloat32_t p34 = svmla_lane (sv_f32 (d->c3), r, lane_consts, 3);
+ /* poly(r) = exp(r) - 1 ~= r + 0.5 r^2. */
svfloat32_t r2 = svmul_x (svptrue_b32 (), r, r);
- svfloat32_t p14 = svmla_x (pg, p12, p34, r2);
- svfloat32_t p0 = svmul_lane (r, lane_consts, 1);
- svfloat32_t poly = svmla_x (pg, p0, r2, p14);
+ svfloat32_t poly = svmla_lane (r, r2, lane_consts, 2);
return svmla_x (pg, scale, scale, poly);
}