1a997221e3
Upstream commit: 5d070d12b3a52bc44dd1b71743abc4b6243862ae
Related: RHEL-25850
- x86: Expand the comment on when REP STOSB is used on memset
- x86: Do not prefer ERMS for memset on Zen3+
- x86: Fix Zen3/Zen4 ERMS selection (BZ 30994)
Resolves: RHEL-25530
- Add tst-gnu2-tls2mod1 to test-internal-extras
- elf: Enable TLS descriptor tests on aarch64
- arm: Update _dl_tlsdesc_dynamic to preserve caller-saved registers (BZ 31372)
- Ignore undefined symbols for -mtls-dialect=gnu2
- x86-64: Allocate state buffer space for RDI, RSI and RBX
- x86-64: Update _dl_tlsdesc_dynamic to preserve AMX registers
- x86: Update _dl_tlsdesc_dynamic to preserve caller-saved registers
Resolves: RHEL-29179
- x86-64: Save APX registers in ld.so trampoline
Resolves: RHEL-25045
- LoongArch: Correct {__ieee754, _}_scalb -> {__ieee754, _}_scalbf
- powerpc: Placeholder and infrastructure/build support to add Power11 related changes.
- powerpc: Add HWCAP3/HWCAP4 data to TCB for Power Architecture.
Resolves: RHEL-24761
Fedora 40 commit: 24af28d49b
147 lines
6.4 KiB
Diff
147 lines
6.4 KiB
Diff
commit aa4249266e9906c4bc833e4847f4d8feef59504f
|
|
Author: Adhemerval Zanella <adhemerval.zanella@linaro.org>
|
|
Date: Thu Feb 8 10:08:38 2024 -0300
|
|
|
|
x86: Fix Zen3/Zen4 ERMS selection (BZ 30994)
|
|
|
|
The REP MOVSB usage on memcpy/memmove does not show much performance
|
|
improvement on Zen3/Zen4 cores compared to the vectorized loops. Also,
|
|
as from BZ 30994, if the source is aligned and the destination is not
|
|
the performance can be 20x slower.
|
|
|
|
The performance difference is noticeable with small buffer sizes, closer
|
|
to the lower bounds limits when memcpy/memmove starts to use ERMS. The
|
|
performance of REP MOVSB is similar to vectorized instruction on the
|
|
size limit (the L2 cache). Also, there is no drawback to multiple cores
|
|
sharing the cache.
|
|
|
|
Checked on x86_64-linux-gnu on Zen3.
|
|
Reviewed-by: H.J. Lu <hjl.tools@gmail.com>
|
|
|
|
(cherry picked from commit 0c0d39fe4aeb0f69b26e76337c5dfd5530d5d44e)
|
|
|
|
diff --git a/sysdeps/x86/dl-cacheinfo.h b/sysdeps/x86/dl-cacheinfo.h
|
|
index d5101615e348e5c2..f34d12846caf9422 100644
|
|
--- a/sysdeps/x86/dl-cacheinfo.h
|
|
+++ b/sysdeps/x86/dl-cacheinfo.h
|
|
@@ -791,7 +791,6 @@ dl_init_cacheinfo (struct cpu_features *cpu_features)
|
|
long int data = -1;
|
|
long int shared = -1;
|
|
long int shared_per_thread = -1;
|
|
- long int core = -1;
|
|
unsigned int threads = 0;
|
|
unsigned long int level1_icache_size = -1;
|
|
unsigned long int level1_icache_linesize = -1;
|
|
@@ -809,7 +808,6 @@ dl_init_cacheinfo (struct cpu_features *cpu_features)
|
|
if (cpu_features->basic.kind == arch_kind_intel)
|
|
{
|
|
data = handle_intel (_SC_LEVEL1_DCACHE_SIZE, cpu_features);
|
|
- core = handle_intel (_SC_LEVEL2_CACHE_SIZE, cpu_features);
|
|
shared = handle_intel (_SC_LEVEL3_CACHE_SIZE, cpu_features);
|
|
shared_per_thread = shared;
|
|
|
|
@@ -822,7 +820,8 @@ dl_init_cacheinfo (struct cpu_features *cpu_features)
|
|
= handle_intel (_SC_LEVEL1_DCACHE_ASSOC, cpu_features);
|
|
level1_dcache_linesize
|
|
= handle_intel (_SC_LEVEL1_DCACHE_LINESIZE, cpu_features);
|
|
- level2_cache_size = core;
|
|
+ level2_cache_size
|
|
+ = handle_intel (_SC_LEVEL2_CACHE_SIZE, cpu_features);
|
|
level2_cache_assoc
|
|
= handle_intel (_SC_LEVEL2_CACHE_ASSOC, cpu_features);
|
|
level2_cache_linesize
|
|
@@ -835,12 +834,12 @@ dl_init_cacheinfo (struct cpu_features *cpu_features)
|
|
level4_cache_size
|
|
= handle_intel (_SC_LEVEL4_CACHE_SIZE, cpu_features);
|
|
|
|
- get_common_cache_info (&shared, &shared_per_thread, &threads, core);
|
|
+ get_common_cache_info (&shared, &shared_per_thread, &threads,
|
|
+ level2_cache_size);
|
|
}
|
|
else if (cpu_features->basic.kind == arch_kind_zhaoxin)
|
|
{
|
|
data = handle_zhaoxin (_SC_LEVEL1_DCACHE_SIZE);
|
|
- core = handle_zhaoxin (_SC_LEVEL2_CACHE_SIZE);
|
|
shared = handle_zhaoxin (_SC_LEVEL3_CACHE_SIZE);
|
|
shared_per_thread = shared;
|
|
|
|
@@ -849,19 +848,19 @@ dl_init_cacheinfo (struct cpu_features *cpu_features)
|
|
level1_dcache_size = data;
|
|
level1_dcache_assoc = handle_zhaoxin (_SC_LEVEL1_DCACHE_ASSOC);
|
|
level1_dcache_linesize = handle_zhaoxin (_SC_LEVEL1_DCACHE_LINESIZE);
|
|
- level2_cache_size = core;
|
|
+ level2_cache_size = handle_zhaoxin (_SC_LEVEL2_CACHE_SIZE);
|
|
level2_cache_assoc = handle_zhaoxin (_SC_LEVEL2_CACHE_ASSOC);
|
|
level2_cache_linesize = handle_zhaoxin (_SC_LEVEL2_CACHE_LINESIZE);
|
|
level3_cache_size = shared;
|
|
level3_cache_assoc = handle_zhaoxin (_SC_LEVEL3_CACHE_ASSOC);
|
|
level3_cache_linesize = handle_zhaoxin (_SC_LEVEL3_CACHE_LINESIZE);
|
|
|
|
- get_common_cache_info (&shared, &shared_per_thread, &threads, core);
|
|
+ get_common_cache_info (&shared, &shared_per_thread, &threads,
|
|
+ level2_cache_size);
|
|
}
|
|
else if (cpu_features->basic.kind == arch_kind_amd)
|
|
{
|
|
data = handle_amd (_SC_LEVEL1_DCACHE_SIZE);
|
|
- core = handle_amd (_SC_LEVEL2_CACHE_SIZE);
|
|
shared = handle_amd (_SC_LEVEL3_CACHE_SIZE);
|
|
|
|
level1_icache_size = handle_amd (_SC_LEVEL1_ICACHE_SIZE);
|
|
@@ -869,7 +868,7 @@ dl_init_cacheinfo (struct cpu_features *cpu_features)
|
|
level1_dcache_size = data;
|
|
level1_dcache_assoc = handle_amd (_SC_LEVEL1_DCACHE_ASSOC);
|
|
level1_dcache_linesize = handle_amd (_SC_LEVEL1_DCACHE_LINESIZE);
|
|
- level2_cache_size = core;
|
|
+ level2_cache_size = handle_amd (_SC_LEVEL2_CACHE_SIZE);;
|
|
level2_cache_assoc = handle_amd (_SC_LEVEL2_CACHE_ASSOC);
|
|
level2_cache_linesize = handle_amd (_SC_LEVEL2_CACHE_LINESIZE);
|
|
level3_cache_size = shared;
|
|
@@ -880,12 +879,12 @@ dl_init_cacheinfo (struct cpu_features *cpu_features)
|
|
if (shared <= 0)
|
|
{
|
|
/* No shared L3 cache. All we have is the L2 cache. */
|
|
- shared = core;
|
|
+ shared = level2_cache_size;
|
|
}
|
|
else if (cpu_features->basic.family < 0x17)
|
|
{
|
|
/* Account for exclusive L2 and L3 caches. */
|
|
- shared += core;
|
|
+ shared += level2_cache_size;
|
|
}
|
|
|
|
shared_per_thread = shared;
|
|
@@ -987,6 +986,12 @@ dl_init_cacheinfo (struct cpu_features *cpu_features)
|
|
if (CPU_FEATURE_USABLE_P (cpu_features, FSRM))
|
|
rep_movsb_threshold = 2112;
|
|
|
|
+ /* For AMD CPUs that support ERMS (Zen3+), REP MOVSB is in a lot of
|
|
+ cases slower than the vectorized path (and for some alignments,
|
|
+ it is really slow, check BZ #30994). */
|
|
+ if (cpu_features->basic.kind == arch_kind_amd)
|
|
+ rep_movsb_threshold = non_temporal_threshold;
|
|
+
|
|
/* The default threshold to use Enhanced REP STOSB. */
|
|
unsigned long int rep_stosb_threshold = 2048;
|
|
|
|
@@ -1028,16 +1033,9 @@ dl_init_cacheinfo (struct cpu_features *cpu_features)
|
|
SIZE_MAX);
|
|
|
|
unsigned long int rep_movsb_stop_threshold;
|
|
- /* ERMS feature is implemented from AMD Zen3 architecture and it is
|
|
- performing poorly for data above L2 cache size. Henceforth, adding
|
|
- an upper bound threshold parameter to limit the usage of Enhanced
|
|
- REP MOVSB operations and setting its value to L2 cache size. */
|
|
- if (cpu_features->basic.kind == arch_kind_amd)
|
|
- rep_movsb_stop_threshold = core;
|
|
/* Setting the upper bound of ERMS to the computed value of
|
|
- non-temporal threshold for architectures other than AMD. */
|
|
- else
|
|
- rep_movsb_stop_threshold = non_temporal_threshold;
|
|
+ non-temporal threshold for all architectures. */
|
|
+ rep_movsb_stop_threshold = non_temporal_threshold;
|
|
|
|
cpu_features->data_cache_size = data;
|
|
cpu_features->shared_cache_size = shared;
|