Upstream commit: fffc2df8a3e2c8cda2991063d23086360268b777 - i386: Provide GLIBC_ABI_GNU_TLS symbol version [BZ #33221] - i386: Update ___tls_get_addr to preserve vector registers - Extend struct r_debug to support multiple namespaces (RHEL-101985) - Fix a potential crash in the dynamic loader when processing specific symbol versions (RHEL-109683) - Signal la_objopen for ld.so with dlmopen (RHEL-109693) - Switch to main malloc after final ld.so self-relocation (RHEL-109703) - Prevent ld.so from asserting and crashing during audited library loads (RHEL-109702) - x86-64: Provide GLIBC_ABI_DT_X86_64_PLT symbol version (RHEL-109621) - x86-64, i386: Provide GLIBC_ABI_GNU2_TLS symbol version (RHEL-109625) - Ensure fallback initialization of ctype TLS data pointers to fix segfaults in programs using dlmopen or auditors (RHEL-72018) - Handle load segment gaps in _dl_find_object (RHEL-104854) - AArch64: Improve codegen in SVE log1p - AArch64: Optimize inverse trig functions - AArch64: Avoid memset ifunc in cpu-features.c [BZ #33112] Resolves: RHEL-109536 Resolves: RHEL-72018 Resolves: RHEL-101985 Resolves: RHEL-104854 Resolves: RHEL-109621 Resolves: RHEL-109625 Resolves: RHEL-109683 Resolves: RHEL-109693 Resolves: RHEL-109702 Resolves: RHEL-109703
136 lines
5.6 KiB
Diff
136 lines
5.6 KiB
Diff
commit 392e6cf1e86e29fe155c21351cd7b7a0fd371f5b
|
|
Author: Luna Lamb <luna.lamb@arm.com>
|
|
Date: Thu May 29 15:22:51 2025 +0000
|
|
|
|
AArch64: Improve codegen in SVE log1p
|
|
|
|
Improves memory access, reformat evaluation scheme to pack coefficients.
|
|
5% improvement in throughput microbenchmark on Neoverse V1.
|
|
|
|
Reviewed-by: Wilco Dijkstra <Wilco.Dijkstra@arm.com>
|
|
(cherry picked from commit da196e6134ede64728006518352d75b6c3902fec)
|
|
|
|
diff --git a/sysdeps/aarch64/fpu/log1p_sve.c b/sysdeps/aarch64/fpu/log1p_sve.c
|
|
index 04f7e5720e13c371..5251f3c07566eec3 100644
|
|
--- a/sysdeps/aarch64/fpu/log1p_sve.c
|
|
+++ b/sysdeps/aarch64/fpu/log1p_sve.c
|
|
@@ -22,19 +22,33 @@
|
|
|
|
static const struct data
|
|
{
|
|
- double poly[19];
|
|
+ float64_t c0, c2, c4, c6, c8, c10, c12, c14, c16;
|
|
+ float64_t c1, c3, c5, c7, c9, c11, c13, c15, c17, c18;
|
|
double ln2_hi, ln2_lo;
|
|
uint64_t hfrt2_top, onemhfrt2_top, inf, mone;
|
|
} data = {
|
|
/* Generated using Remez in [ sqrt(2)/2 - 1, sqrt(2) - 1]. Order 20
|
|
- polynomial, however first 2 coefficients are 0 and 1 so are not stored. */
|
|
- .poly = { -0x1.ffffffffffffbp-2, 0x1.55555555551a9p-2, -0x1.00000000008e3p-2,
|
|
- 0x1.9999999a32797p-3, -0x1.555555552fecfp-3, 0x1.249248e071e5ap-3,
|
|
- -0x1.ffffff8bf8482p-4, 0x1.c71c8f07da57ap-4, -0x1.9999ca4ccb617p-4,
|
|
- 0x1.7459ad2e1dfa3p-4, -0x1.554d2680a3ff2p-4, 0x1.3b4c54d487455p-4,
|
|
- -0x1.2548a9ffe80e6p-4, 0x1.0f389a24b2e07p-4, -0x1.eee4db15db335p-5,
|
|
- 0x1.e95b494d4a5ddp-5, -0x1.15fdf07cb7c73p-4, 0x1.0310b70800fcfp-4,
|
|
- -0x1.cfa7385bdb37ep-6, },
|
|
+ polynomial, however first 2 coefficients are 0 and 1 so are not
|
|
+ stored. */
|
|
+ .c0 = -0x1.ffffffffffffbp-2,
|
|
+ .c1 = 0x1.55555555551a9p-2,
|
|
+ .c2 = -0x1.00000000008e3p-2,
|
|
+ .c3 = 0x1.9999999a32797p-3,
|
|
+ .c4 = -0x1.555555552fecfp-3,
|
|
+ .c5 = 0x1.249248e071e5ap-3,
|
|
+ .c6 = -0x1.ffffff8bf8482p-4,
|
|
+ .c7 = 0x1.c71c8f07da57ap-4,
|
|
+ .c8 = -0x1.9999ca4ccb617p-4,
|
|
+ .c9 = 0x1.7459ad2e1dfa3p-4,
|
|
+ .c10 = -0x1.554d2680a3ff2p-4,
|
|
+ .c11 = 0x1.3b4c54d487455p-4,
|
|
+ .c12 = -0x1.2548a9ffe80e6p-4,
|
|
+ .c13 = 0x1.0f389a24b2e07p-4,
|
|
+ .c14 = -0x1.eee4db15db335p-5,
|
|
+ .c15 = 0x1.e95b494d4a5ddp-5,
|
|
+ .c16 = -0x1.15fdf07cb7c73p-4,
|
|
+ .c17 = 0x1.0310b70800fcfp-4,
|
|
+ .c18 = -0x1.cfa7385bdb37ep-6,
|
|
.ln2_hi = 0x1.62e42fefa3800p-1,
|
|
.ln2_lo = 0x1.ef35793c76730p-45,
|
|
/* top32(asuint64(sqrt(2)/2)) << 32. */
|
|
@@ -49,7 +63,7 @@ static const struct data
|
|
#define BottomMask 0xffffffff
|
|
|
|
static svfloat64_t NOINLINE
|
|
-special_case (svbool_t special, svfloat64_t x, svfloat64_t y)
|
|
+special_case (svfloat64_t x, svfloat64_t y, svbool_t special)
|
|
{
|
|
return sv_call_f64 (log1p, x, y, special);
|
|
}
|
|
@@ -91,8 +105,9 @@ svfloat64_t SV_NAME_D1 (log1p) (svfloat64_t x, svbool_t pg)
|
|
/* Reduce x to f in [sqrt(2)/2, sqrt(2)]. */
|
|
svuint64_t utop
|
|
= svadd_x (pg, svand_x (pg, u, 0x000fffff00000000), d->hfrt2_top);
|
|
- svuint64_t u_red = svorr_x (pg, utop, svand_x (pg, mi, BottomMask));
|
|
- svfloat64_t f = svsub_x (pg, svreinterpret_f64 (u_red), 1);
|
|
+ svuint64_t u_red
|
|
+ = svorr_x (pg, utop, svand_x (svptrue_b64 (), mi, BottomMask));
|
|
+ svfloat64_t f = svsub_x (svptrue_b64 (), svreinterpret_f64 (u_red), 1);
|
|
|
|
/* Correction term c/m. */
|
|
svfloat64_t cm = svdiv_x (pg, svsub_x (pg, x, svsub_x (pg, m, 1)), m);
|
|
@@ -103,16 +118,47 @@ svfloat64_t SV_NAME_D1 (log1p) (svfloat64_t x, svbool_t pg)
|
|
Hence approximation has the form f + f^2 * P(f)
|
|
where P(x) = C0 + C1*x + C2x^2 + ...
|
|
Assembling this all correctly is dealt with at the final step. */
|
|
- svfloat64_t f2 = svmul_x (pg, f, f), f4 = svmul_x (pg, f2, f2),
|
|
- f8 = svmul_x (pg, f4, f4), f16 = svmul_x (pg, f8, f8);
|
|
- svfloat64_t p = sv_estrin_18_f64_x (pg, f, f2, f4, f8, f16, d->poly);
|
|
+ svfloat64_t f2 = svmul_x (svptrue_b64 (), f, f),
|
|
+ f4 = svmul_x (svptrue_b64 (), f2, f2),
|
|
+ f8 = svmul_x (svptrue_b64 (), f4, f4),
|
|
+ f16 = svmul_x (svptrue_b64 (), f8, f8);
|
|
+
|
|
+ svfloat64_t c13 = svld1rq (svptrue_b64 (), &d->c1);
|
|
+ svfloat64_t c57 = svld1rq (svptrue_b64 (), &d->c5);
|
|
+ svfloat64_t c911 = svld1rq (svptrue_b64 (), &d->c9);
|
|
+ svfloat64_t c1315 = svld1rq (svptrue_b64 (), &d->c13);
|
|
+ svfloat64_t c1718 = svld1rq (svptrue_b64 (), &d->c17);
|
|
+
|
|
+ /* Order-18 Estrin scheme. */
|
|
+ svfloat64_t p01 = svmla_lane (sv_f64 (d->c0), f, c13, 0);
|
|
+ svfloat64_t p23 = svmla_lane (sv_f64 (d->c2), f, c13, 1);
|
|
+ svfloat64_t p45 = svmla_lane (sv_f64 (d->c4), f, c57, 0);
|
|
+ svfloat64_t p67 = svmla_lane (sv_f64 (d->c6), f, c57, 1);
|
|
+
|
|
+ svfloat64_t p03 = svmla_x (pg, p01, f2, p23);
|
|
+ svfloat64_t p47 = svmla_x (pg, p45, f2, p67);
|
|
+ svfloat64_t p07 = svmla_x (pg, p03, f4, p47);
|
|
+
|
|
+ svfloat64_t p89 = svmla_lane (sv_f64 (d->c8), f, c911, 0);
|
|
+ svfloat64_t p1011 = svmla_lane (sv_f64 (d->c10), f, c911, 1);
|
|
+ svfloat64_t p1213 = svmla_lane (sv_f64 (d->c12), f, c1315, 0);
|
|
+ svfloat64_t p1415 = svmla_lane (sv_f64 (d->c14), f, c1315, 1);
|
|
+
|
|
+ svfloat64_t p811 = svmla_x (pg, p89, f2, p1011);
|
|
+ svfloat64_t p1215 = svmla_x (pg, p1213, f2, p1415);
|
|
+ svfloat64_t p815 = svmla_x (pg, p811, f4, p1215);
|
|
+
|
|
+ svfloat64_t p015 = svmla_x (pg, p07, f8, p815);
|
|
+ svfloat64_t p1617 = svmla_lane (sv_f64 (d->c16), f, c1718, 0);
|
|
+ svfloat64_t p1618 = svmla_lane (p1617, f2, c1718, 1);
|
|
+ svfloat64_t p = svmla_x (pg, p015, f16, p1618);
|
|
|
|
svfloat64_t ylo = svmla_x (pg, cm, k, d->ln2_lo);
|
|
svfloat64_t yhi = svmla_x (pg, f, k, d->ln2_hi);
|
|
- svfloat64_t y = svmla_x (pg, svadd_x (pg, ylo, yhi), f2, p);
|
|
|
|
if (__glibc_unlikely (svptest_any (pg, special)))
|
|
- return special_case (special, x, y);
|
|
-
|
|
- return y;
|
|
+ return special_case (
|
|
+ x, svmla_x (svptrue_b64 (), svadd_x (svptrue_b64 (), ylo, yhi), f2, p),
|
|
+ special);
|
|
+ return svmla_x (svptrue_b64 (), svadd_x (svptrue_b64 (), ylo, yhi), f2, p);
|
|
}
|