Update to 4.18.0-477.27.2.el8_8
This commit is contained in:
parent
a35919e2be
commit
a8f8575300
@ -0,0 +1,243 @@
|
||||
From 38871bad2601515be96d1c3eb2ca1dbe2293bfff Mon Sep 17 00:00:00 2001
|
||||
From: Jithu Joseph <jithu.joseph@intel.com>
|
||||
Date: Fri, 6 May 2022 15:53:59 -0700
|
||||
Subject: [PATCH 01/36] x86/microcode/intel: Expose collect_cpu_info_early()
|
||||
for IFS
|
||||
|
||||
IFS is a CPU feature that allows a binary blob, similar to microcode,
|
||||
to be loaded and consumed to perform low level validation of CPU
|
||||
circuitry. In fact, it carries the same Processor Signature
|
||||
(family/model/stepping) details that are contained in Intel microcode
|
||||
blobs.
|
||||
|
||||
In support of an IFS driver to trigger loading, validation, and running
|
||||
of these tests blobs, make the functionality of cpu_signatures_match()
|
||||
and collect_cpu_info_early() available outside of the microcode driver.
|
||||
|
||||
Add an "intel_" prefix and drop the "_early" suffix from
|
||||
collect_cpu_info_early() and EXPORT_SYMBOL_GPL() it. Add
|
||||
declaration to x86 <asm/cpu.h>
|
||||
|
||||
Make cpu_signatures_match() an inline function in x86 <asm/cpu.h>,
|
||||
and also give it an "intel_" prefix.
|
||||
|
||||
No functional change intended.
|
||||
|
||||
Reviewed-by: Dan Williams <dan.j.williams@intel.com>
|
||||
Signed-off-by: Jithu Joseph <jithu.joseph@intel.com>
|
||||
Co-developed-by: Tony Luck <tony.luck@intel.com>
|
||||
Signed-off-by: Tony Luck <tony.luck@intel.com>
|
||||
Reviewed-by: Thomas Gleixner <tglx@linutronix.de>
|
||||
Acked-by: Borislav Petkov <bp@suse.de>
|
||||
Reviewed-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
|
||||
Link: https://lore.kernel.org/r/20220506225410.1652287-2-tony.luck@intel.com
|
||||
Signed-off-by: Hans de Goede <hdegoede@redhat.com>
|
||||
(cherry picked from commit d3287fb0d3c8afdfd4870a6cd4a852abc9008b3b)
|
||||
Signed-off-by: Mridula Shastry <mridula.c.shastry@oracle.com>
|
||||
Reviewed-by: Todd Vierling <todd.vierling@oracle.com>
|
||||
---
|
||||
arch/x86/include/asm/cpu.h | 19 +++++++++
|
||||
arch/x86/kernel/cpu/intel.c | 32 +++++++++++++++
|
||||
arch/x86/kernel/cpu/microcode/intel.c | 59 ++++-----------------------
|
||||
3 files changed, 58 insertions(+), 52 deletions(-)
|
||||
|
||||
diff --git a/arch/x86/include/asm/cpu.h b/arch/x86/include/asm/cpu.h
|
||||
index 21924730afae..2d62a3f122ee 100644
|
||||
--- a/arch/x86/include/asm/cpu.h
|
||||
+++ b/arch/x86/include/asm/cpu.h
|
||||
@@ -73,4 +73,23 @@ static inline u8 get_this_hybrid_cpu_type(void)
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
+
|
||||
+struct ucode_cpu_info;
|
||||
+
|
||||
+int intel_cpu_collect_info(struct ucode_cpu_info *uci);
|
||||
+
|
||||
+static inline bool intel_cpu_signatures_match(unsigned int s1, unsigned int p1,
|
||||
+ unsigned int s2, unsigned int p2)
|
||||
+{
|
||||
+ if (s1 != s2)
|
||||
+ return false;
|
||||
+
|
||||
+ /* Processor flags are either both 0 ... */
|
||||
+ if (!p1 && !p2)
|
||||
+ return true;
|
||||
+
|
||||
+ /* ... or they intersect. */
|
||||
+ return p1 & p2;
|
||||
+}
|
||||
+
|
||||
#endif /* _ASM_X86_CPU_H */
|
||||
diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
|
||||
index 7579e773e29b..cf85431fb09a 100644
|
||||
--- a/arch/x86/kernel/cpu/intel.c
|
||||
+++ b/arch/x86/kernel/cpu/intel.c
|
||||
@@ -217,6 +217,38 @@ static bool bad_spectre_microcode(struct cpuinfo_x86 *c)
|
||||
return false;
|
||||
}
|
||||
|
||||
+int intel_cpu_collect_info(struct ucode_cpu_info *uci)
|
||||
+{
|
||||
+ unsigned int val[2];
|
||||
+ unsigned int family, model;
|
||||
+ struct cpu_signature csig = { 0 };
|
||||
+ unsigned int eax, ebx, ecx, edx;
|
||||
+
|
||||
+ memset(uci, 0, sizeof(*uci));
|
||||
+
|
||||
+ eax = 0x00000001;
|
||||
+ ecx = 0;
|
||||
+ native_cpuid(&eax, &ebx, &ecx, &edx);
|
||||
+ csig.sig = eax;
|
||||
+
|
||||
+ family = x86_family(eax);
|
||||
+ model = x86_model(eax);
|
||||
+
|
||||
+ if (model >= 5 || family > 6) {
|
||||
+ /* get processor flags from MSR 0x17 */
|
||||
+ native_rdmsr(MSR_IA32_PLATFORM_ID, val[0], val[1]);
|
||||
+ csig.pf = 1 << ((val[1] >> 18) & 7);
|
||||
+ }
|
||||
+
|
||||
+ csig.rev = intel_get_microcode_revision();
|
||||
+
|
||||
+ uci->cpu_sig = csig;
|
||||
+ uci->valid = 1;
|
||||
+
|
||||
+ return 0;
|
||||
+}
|
||||
+EXPORT_SYMBOL_GPL(intel_cpu_collect_info);
|
||||
+
|
||||
static void early_init_intel(struct cpuinfo_x86 *c)
|
||||
{
|
||||
u64 misc_enable;
|
||||
diff --git a/arch/x86/kernel/cpu/microcode/intel.c b/arch/x86/kernel/cpu/microcode/intel.c
|
||||
index 6a99535d7f37..5f8223909db4 100644
|
||||
--- a/arch/x86/kernel/cpu/microcode/intel.c
|
||||
+++ b/arch/x86/kernel/cpu/microcode/intel.c
|
||||
@@ -45,20 +45,6 @@ static struct microcode_intel *intel_ucode_patch;
|
||||
/* last level cache size per core */
|
||||
static int llc_size_per_core;
|
||||
|
||||
-static inline bool cpu_signatures_match(unsigned int s1, unsigned int p1,
|
||||
- unsigned int s2, unsigned int p2)
|
||||
-{
|
||||
- if (s1 != s2)
|
||||
- return false;
|
||||
-
|
||||
- /* Processor flags are either both 0 ... */
|
||||
- if (!p1 && !p2)
|
||||
- return true;
|
||||
-
|
||||
- /* ... or they intersect. */
|
||||
- return p1 & p2;
|
||||
-}
|
||||
-
|
||||
/*
|
||||
* Returns 1 if update has been found, 0 otherwise.
|
||||
*/
|
||||
@@ -69,7 +55,7 @@ static int find_matching_signature(void *mc, unsigned int csig, int cpf)
|
||||
struct extended_signature *ext_sig;
|
||||
int i;
|
||||
|
||||
- if (cpu_signatures_match(csig, cpf, mc_hdr->sig, mc_hdr->pf))
|
||||
+ if (intel_cpu_signatures_match(csig, cpf, mc_hdr->sig, mc_hdr->pf))
|
||||
return 1;
|
||||
|
||||
/* Look for ext. headers: */
|
||||
@@ -80,7 +66,7 @@ static int find_matching_signature(void *mc, unsigned int csig, int cpf)
|
||||
ext_sig = (void *)ext_hdr + EXT_HEADER_SIZE;
|
||||
|
||||
for (i = 0; i < ext_hdr->count; i++) {
|
||||
- if (cpu_signatures_match(csig, cpf, ext_sig->sig, ext_sig->pf))
|
||||
+ if (intel_cpu_signatures_match(csig, cpf, ext_sig->sig, ext_sig->pf))
|
||||
return 1;
|
||||
ext_sig++;
|
||||
}
|
||||
@@ -385,37 +371,6 @@ next:
|
||||
return patch;
|
||||
}
|
||||
|
||||
-static int collect_cpu_info_early(struct ucode_cpu_info *uci)
|
||||
-{
|
||||
- unsigned int val[2];
|
||||
- unsigned int family, model;
|
||||
- struct cpu_signature csig = { 0 };
|
||||
- unsigned int eax, ebx, ecx, edx;
|
||||
-
|
||||
- memset(uci, 0, sizeof(*uci));
|
||||
-
|
||||
- eax = 0x00000001;
|
||||
- ecx = 0;
|
||||
- native_cpuid(&eax, &ebx, &ecx, &edx);
|
||||
- csig.sig = eax;
|
||||
-
|
||||
- family = x86_family(eax);
|
||||
- model = x86_model(eax);
|
||||
-
|
||||
- if ((model >= 5) || (family > 6)) {
|
||||
- /* get processor flags from MSR 0x17 */
|
||||
- native_rdmsr(MSR_IA32_PLATFORM_ID, val[0], val[1]);
|
||||
- csig.pf = 1 << ((val[1] >> 18) & 7);
|
||||
- }
|
||||
-
|
||||
- csig.rev = intel_get_microcode_revision();
|
||||
-
|
||||
- uci->cpu_sig = csig;
|
||||
- uci->valid = 1;
|
||||
-
|
||||
- return 0;
|
||||
-}
|
||||
-
|
||||
static void show_saved_mc(void)
|
||||
{
|
||||
#ifdef DEBUG
|
||||
@@ -429,7 +384,7 @@ static void show_saved_mc(void)
|
||||
return;
|
||||
}
|
||||
|
||||
- collect_cpu_info_early(&uci);
|
||||
+ intel_cpu_collect_info(&uci);
|
||||
|
||||
sig = uci.cpu_sig.sig;
|
||||
pf = uci.cpu_sig.pf;
|
||||
@@ -538,7 +493,7 @@ void show_ucode_info_early(void)
|
||||
struct ucode_cpu_info uci;
|
||||
|
||||
if (delay_ucode_info) {
|
||||
- collect_cpu_info_early(&uci);
|
||||
+ intel_cpu_collect_info(&uci);
|
||||
print_ucode_info(&uci, current_mc_date);
|
||||
delay_ucode_info = 0;
|
||||
}
|
||||
@@ -640,7 +595,7 @@ int __init save_microcode_in_initrd_intel(void)
|
||||
if (!(cp.data && cp.size))
|
||||
return 0;
|
||||
|
||||
- collect_cpu_info_early(&uci);
|
||||
+ intel_cpu_collect_info(&uci);
|
||||
|
||||
scan_microcode(cp.data, cp.size, &uci, true);
|
||||
|
||||
@@ -673,7 +628,7 @@ static struct microcode_intel *__load_ucode_intel(struct ucode_cpu_info *uci)
|
||||
if (!(cp.data && cp.size))
|
||||
return NULL;
|
||||
|
||||
- collect_cpu_info_early(uci);
|
||||
+ intel_cpu_collect_info(uci);
|
||||
|
||||
return scan_microcode(cp.data, cp.size, uci, false);
|
||||
}
|
||||
@@ -748,7 +703,7 @@ void reload_ucode_intel(void)
|
||||
struct microcode_intel *p;
|
||||
struct ucode_cpu_info uci;
|
||||
|
||||
- collect_cpu_info_early(&uci);
|
||||
+ intel_cpu_collect_info(&uci);
|
||||
|
||||
p = find_patch(&uci);
|
||||
if (!p)
|
||||
--
|
||||
2.39.3
|
||||
|
@ -0,0 +1,129 @@
|
||||
From be1cba911af4925ef0f42bdfee9682b01c94dfcb Mon Sep 17 00:00:00 2001
|
||||
From: Borislav Petkov <bp@suse.de>
|
||||
Date: Tue, 19 Apr 2022 09:52:41 -0700
|
||||
Subject: [PATCH 02/36] x86/cpu: Load microcode during
|
||||
restore_processor_state()
|
||||
|
||||
When resuming from system sleep state, restore_processor_state()
|
||||
restores the boot CPU MSRs. These MSRs could be emulated by microcode.
|
||||
If microcode is not loaded yet, writing to emulated MSRs leads to
|
||||
unchecked MSR access error:
|
||||
|
||||
...
|
||||
PM: Calling lapic_suspend+0x0/0x210
|
||||
unchecked MSR access error: WRMSR to 0x10f (tried to write 0x0...0) at rIP: ... (native_write_msr)
|
||||
Call Trace:
|
||||
<TASK>
|
||||
? restore_processor_state
|
||||
x86_acpi_suspend_lowlevel
|
||||
acpi_suspend_enter
|
||||
suspend_devices_and_enter
|
||||
pm_suspend.cold
|
||||
state_store
|
||||
kobj_attr_store
|
||||
sysfs_kf_write
|
||||
kernfs_fop_write_iter
|
||||
new_sync_write
|
||||
vfs_write
|
||||
ksys_write
|
||||
__x64_sys_write
|
||||
do_syscall_64
|
||||
entry_SYSCALL_64_after_hwframe
|
||||
RIP: 0033:0x7fda13c260a7
|
||||
|
||||
To ensure microcode emulated MSRs are available for restoration, load
|
||||
the microcode on the boot CPU before restoring these MSRs.
|
||||
|
||||
[ Pawan: write commit message and productize it. ]
|
||||
|
||||
Fixes: e2a1256b17b1 ("x86/speculation: Restore speculation related MSRs during S3 resume")
|
||||
Reported-by: Kyle D. Pelton <kyle.d.pelton@intel.com>
|
||||
Signed-off-by: Borislav Petkov <bp@suse.de>
|
||||
Signed-off-by: Pawan Gupta <pawan.kumar.gupta@linux.intel.com>
|
||||
Tested-by: Kyle D. Pelton <kyle.d.pelton@intel.com>
|
||||
Cc: stable@vger.kernel.org
|
||||
Link: https://bugzilla.kernel.org/show_bug.cgi?id=215841
|
||||
Link: https://lore.kernel.org/r/4350dfbf785cd482d3fafa72b2b49c83102df3ce.1650386317.git.pawan.kumar.gupta@linux.intel.com
|
||||
(cherry picked from commit f9e14dbbd454581061c736bf70bf5cbb15ac927c)
|
||||
Signed-off-by: Mridula Shastry <mridula.c.shastry@oracle.com>
|
||||
Reviewed-by: Todd Vierling <todd.vierling@oracle.com>
|
||||
---
|
||||
arch/x86/include/asm/microcode.h | 2 ++
|
||||
arch/x86/kernel/cpu/microcode/core.c | 6 +++---
|
||||
arch/x86/power/cpu.c | 8 ++++++++
|
||||
3 files changed, 13 insertions(+), 3 deletions(-)
|
||||
|
||||
diff --git a/arch/x86/include/asm/microcode.h b/arch/x86/include/asm/microcode.h
|
||||
index 2b7cc5397f80..a930a63d1260 100644
|
||||
--- a/arch/x86/include/asm/microcode.h
|
||||
+++ b/arch/x86/include/asm/microcode.h
|
||||
@@ -133,6 +133,7 @@ extern void load_ucode_ap(void);
|
||||
void reload_early_microcode(void);
|
||||
extern bool get_builtin_firmware(struct cpio_data *cd, const char *name);
|
||||
extern bool initrd_gone;
|
||||
+void microcode_bsp_resume(void);
|
||||
#else
|
||||
static inline int __init microcode_init(void) { return 0; };
|
||||
static inline void __init load_ucode_bsp(void) { }
|
||||
@@ -140,6 +141,7 @@ static inline void load_ucode_ap(void) { }
|
||||
static inline void reload_early_microcode(void) { }
|
||||
static inline bool
|
||||
get_builtin_firmware(struct cpio_data *cd, const char *name) { return false; }
|
||||
+static inline void microcode_bsp_resume(void) { }
|
||||
#endif
|
||||
|
||||
#endif /* _ASM_X86_MICROCODE_H */
|
||||
diff --git a/arch/x86/kernel/cpu/microcode/core.c b/arch/x86/kernel/cpu/microcode/core.c
|
||||
index e263da7247af..f0946658f885 100644
|
||||
--- a/arch/x86/kernel/cpu/microcode/core.c
|
||||
+++ b/arch/x86/kernel/cpu/microcode/core.c
|
||||
@@ -778,9 +778,9 @@ static struct subsys_interface mc_cpu_interface = {
|
||||
};
|
||||
|
||||
/**
|
||||
- * mc_bp_resume - Update boot CPU microcode during resume.
|
||||
+ * microcode_bsp_resume - Update boot CPU microcode during resume.
|
||||
*/
|
||||
-static void mc_bp_resume(void)
|
||||
+void microcode_bsp_resume(void)
|
||||
{
|
||||
int cpu = smp_processor_id();
|
||||
struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
|
||||
@@ -792,7 +792,7 @@ static void mc_bp_resume(void)
|
||||
}
|
||||
|
||||
static struct syscore_ops mc_syscore_ops = {
|
||||
- .resume = mc_bp_resume,
|
||||
+ .resume = microcode_bsp_resume,
|
||||
};
|
||||
|
||||
static int mc_cpu_starting(unsigned int cpu)
|
||||
diff --git a/arch/x86/power/cpu.c b/arch/x86/power/cpu.c
|
||||
index f48756c903d4..dd5a29553697 100644
|
||||
--- a/arch/x86/power/cpu.c
|
||||
+++ b/arch/x86/power/cpu.c
|
||||
@@ -25,6 +25,7 @@
|
||||
#include <asm/cpu.h>
|
||||
#include <asm/mmu_context.h>
|
||||
#include <linux/dmi.h>
|
||||
+#include <asm/microcode.h>
|
||||
|
||||
#ifdef CONFIG_X86_32
|
||||
__visible unsigned long saved_context_ebx;
|
||||
@@ -266,6 +267,13 @@ static void notrace __restore_processor_state(struct saved_context *ctxt)
|
||||
x86_platform.restore_sched_clock_state();
|
||||
mtrr_bp_restore();
|
||||
perf_restore_debug_store();
|
||||
+
|
||||
+ microcode_bsp_resume();
|
||||
+
|
||||
+ /*
|
||||
+ * This needs to happen after the microcode has been updated upon resume
|
||||
+ * because some of the MSRs are "emulated" in microcode.
|
||||
+ */
|
||||
msr_restore_context(ctxt);
|
||||
}
|
||||
|
||||
--
|
||||
2.39.3
|
||||
|
@ -0,0 +1,51 @@
|
||||
From b31d4c149df44c050b12b7aca2c535e06f1b1237 Mon Sep 17 00:00:00 2001
|
||||
From: Borislav Petkov <bp@suse.de>
|
||||
Date: Fri, 5 Apr 2019 06:28:11 +0200
|
||||
Subject: [PATCH 03/36] x86/microcode: Deprecate MICROCODE_OLD_INTERFACE
|
||||
|
||||
This is the ancient loading interface which requires special tools to be
|
||||
used. The bigger problem with it is that it is as inadequate for proper
|
||||
loading of microcode as the late microcode loading interface is because
|
||||
it happens just as late.
|
||||
|
||||
iucode_tool's manpage already warns people that it is deprecated.
|
||||
|
||||
Deprecate it and turn it off by default along with a big fat warning in
|
||||
the Kconfig help. It will go away sometime in the future.
|
||||
|
||||
Signed-off-by: Borislav Petkov <bp@suse.de>
|
||||
Reviewed-by: Thomas Gleixner <tglx@linutronix.de>
|
||||
Cc: x86@kernel.org
|
||||
Link: https://lkml.kernel.org/r/20190405133010.24249-4-bp@alien8.de
|
||||
(cherry picked from commit c02f48e070bde326f55bd94544ca82291f7396e3)
|
||||
Signed-off-by: Mridula Shastry <mridula.c.shastry@oracle.com>
|
||||
Reviewed-by: Todd Vierling <todd.vierling@oracle.com>
|
||||
---
|
||||
arch/x86/Kconfig | 10 +++++++++-
|
||||
1 file changed, 9 insertions(+), 1 deletion(-)
|
||||
|
||||
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
|
||||
index f26d1d596e9a..4f762b807bd4 100644
|
||||
--- a/arch/x86/Kconfig
|
||||
+++ b/arch/x86/Kconfig
|
||||
@@ -1361,8 +1361,16 @@ config MICROCODE_AMD
|
||||
processors will be enabled.
|
||||
|
||||
config MICROCODE_OLD_INTERFACE
|
||||
- def_bool y
|
||||
+ bool "Ancient loading interface (DEPRECATED)"
|
||||
+ default n
|
||||
depends on MICROCODE
|
||||
+ ---help---
|
||||
+ DO NOT USE THIS! This is the ancient /dev/cpu/microcode interface
|
||||
+ which was used by userspace tools like iucode_tool and microcode.ctl.
|
||||
+ It is inadequate because it runs too late to be able to properly
|
||||
+ load microcode on a machine and it needs special tools. Instead, you
|
||||
+ should've switched to the early loading method with the initrd or
|
||||
+ builtin microcode by now: Documentation/x86/microcode.txt
|
||||
|
||||
config X86_MSR
|
||||
tristate "/dev/cpu/*/msr - Model-specific register support"
|
||||
--
|
||||
2.39.3
|
||||
|
170
SOURCES/1018-x86-microcode-Rip-out-the-OLD_INTERFACE.patch
Normal file
170
SOURCES/1018-x86-microcode-Rip-out-the-OLD_INTERFACE.patch
Normal file
@ -0,0 +1,170 @@
|
||||
From c11a8e91d637901133fad441470d2240cc9d05e7 Mon Sep 17 00:00:00 2001
|
||||
From: Borislav Petkov <bp@suse.de>
|
||||
Date: Wed, 25 May 2022 18:12:29 +0200
|
||||
Subject: [PATCH 04/36] x86/microcode: Rip out the OLD_INTERFACE
|
||||
|
||||
Everything should be using the early initrd loading by now.
|
||||
|
||||
Signed-off-by: Borislav Petkov <bp@suse.de>
|
||||
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
|
||||
Link: https://lore.kernel.org/r/20220525161232.14924-2-bp@alien8.de
|
||||
|
||||
(cherry picked from commit 181b6f40e9ea80c76756d4d0cdeed396016c487e)
|
||||
Signed-off-by: Mridula Shastry <mridula.c.shastry@oracle.com>
|
||||
Reviewed-by: Todd Vierling <todd.vierling@oracle.com>
|
||||
---
|
||||
arch/x86/Kconfig | 12 ----
|
||||
arch/x86/kernel/cpu/microcode/core.c | 100 ---------------------------
|
||||
2 files changed, 112 deletions(-)
|
||||
|
||||
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
|
||||
index 4f762b807bd4..b9bcde5c538f 100644
|
||||
--- a/arch/x86/Kconfig
|
||||
+++ b/arch/x86/Kconfig
|
||||
@@ -1360,18 +1360,6 @@ config MICROCODE_AMD
|
||||
If you select this option, microcode patch loading support for AMD
|
||||
processors will be enabled.
|
||||
|
||||
-config MICROCODE_OLD_INTERFACE
|
||||
- bool "Ancient loading interface (DEPRECATED)"
|
||||
- default n
|
||||
- depends on MICROCODE
|
||||
- ---help---
|
||||
- DO NOT USE THIS! This is the ancient /dev/cpu/microcode interface
|
||||
- which was used by userspace tools like iucode_tool and microcode.ctl.
|
||||
- It is inadequate because it runs too late to be able to properly
|
||||
- load microcode on a machine and it needs special tools. Instead, you
|
||||
- should've switched to the early loading method with the initrd or
|
||||
- builtin microcode by now: Documentation/x86/microcode.txt
|
||||
-
|
||||
config X86_MSR
|
||||
tristate "/dev/cpu/*/msr - Model-specific register support"
|
||||
---help---
|
||||
diff --git a/arch/x86/kernel/cpu/microcode/core.c b/arch/x86/kernel/cpu/microcode/core.c
|
||||
index f0946658f885..bbc262a5fc0d 100644
|
||||
--- a/arch/x86/kernel/cpu/microcode/core.c
|
||||
+++ b/arch/x86/kernel/cpu/microcode/core.c
|
||||
@@ -392,98 +392,6 @@ static int apply_microcode_on_target(int cpu)
|
||||
return ret;
|
||||
}
|
||||
|
||||
-#ifdef CONFIG_MICROCODE_OLD_INTERFACE
|
||||
-static int do_microcode_update(const void __user *buf, size_t size)
|
||||
-{
|
||||
- int error = 0;
|
||||
- int cpu;
|
||||
-
|
||||
- for_each_online_cpu(cpu) {
|
||||
- struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
|
||||
- enum ucode_state ustate;
|
||||
-
|
||||
- if (!uci->valid)
|
||||
- continue;
|
||||
-
|
||||
- ustate = microcode_ops->request_microcode_user(cpu, buf, size);
|
||||
- if (ustate == UCODE_ERROR) {
|
||||
- error = -1;
|
||||
- break;
|
||||
- } else if (ustate == UCODE_NEW) {
|
||||
- apply_microcode_on_target(cpu);
|
||||
- }
|
||||
- }
|
||||
-
|
||||
- return error;
|
||||
-}
|
||||
-
|
||||
-static int microcode_open(struct inode *inode, struct file *file)
|
||||
-{
|
||||
- return capable(CAP_SYS_RAWIO) ? nonseekable_open(inode, file) : -EPERM;
|
||||
-}
|
||||
-
|
||||
-static ssize_t microcode_write(struct file *file, const char __user *buf,
|
||||
- size_t len, loff_t *ppos)
|
||||
-{
|
||||
- ssize_t ret = -EINVAL;
|
||||
- unsigned long nr_pages = totalram_pages();
|
||||
-
|
||||
- if ((len >> PAGE_SHIFT) > nr_pages) {
|
||||
- pr_err("too much data (max %ld pages)\n", nr_pages);
|
||||
- return ret;
|
||||
- }
|
||||
-
|
||||
- get_online_cpus();
|
||||
- mutex_lock(µcode_mutex);
|
||||
-
|
||||
- if (do_microcode_update(buf, len) == 0)
|
||||
- ret = (ssize_t)len;
|
||||
-
|
||||
- if (ret > 0)
|
||||
- perf_check_microcode();
|
||||
-
|
||||
- mutex_unlock(µcode_mutex);
|
||||
- put_online_cpus();
|
||||
-
|
||||
- return ret;
|
||||
-}
|
||||
-
|
||||
-static const struct file_operations microcode_fops = {
|
||||
- .owner = THIS_MODULE,
|
||||
- .write = microcode_write,
|
||||
- .open = microcode_open,
|
||||
- .llseek = no_llseek,
|
||||
-};
|
||||
-
|
||||
-static struct miscdevice microcode_dev = {
|
||||
- .minor = MICROCODE_MINOR,
|
||||
- .name = "microcode",
|
||||
- .nodename = "cpu/microcode",
|
||||
- .fops = µcode_fops,
|
||||
-};
|
||||
-
|
||||
-static int __init microcode_dev_init(void)
|
||||
-{
|
||||
- int error;
|
||||
-
|
||||
- error = misc_register(µcode_dev);
|
||||
- if (error) {
|
||||
- pr_err("can't misc_register on minor=%d\n", MICROCODE_MINOR);
|
||||
- return error;
|
||||
- }
|
||||
-
|
||||
- return 0;
|
||||
-}
|
||||
-
|
||||
-static void __exit microcode_dev_exit(void)
|
||||
-{
|
||||
- misc_deregister(µcode_dev);
|
||||
-}
|
||||
-#else
|
||||
-#define microcode_dev_init() 0
|
||||
-#define microcode_dev_exit() do { } while (0)
|
||||
-#endif
|
||||
-
|
||||
/* fake device for request_firmware */
|
||||
static struct platform_device *microcode_pdev;
|
||||
|
||||
@@ -876,10 +784,6 @@ int __init microcode_init(void)
|
||||
goto out_driver;
|
||||
}
|
||||
|
||||
- error = microcode_dev_init();
|
||||
- if (error)
|
||||
- goto out_ucode_group;
|
||||
-
|
||||
register_syscore_ops(&mc_syscore_ops);
|
||||
cpuhp_setup_state_nocalls(CPUHP_AP_MICROCODE_LOADER, "x86/microcode:starting",
|
||||
mc_cpu_starting, NULL);
|
||||
@@ -890,10 +794,6 @@ int __init microcode_init(void)
|
||||
|
||||
return 0;
|
||||
|
||||
- out_ucode_group:
|
||||
- sysfs_remove_group(&cpu_subsys.dev_root->kobj,
|
||||
- &cpu_root_microcode_group);
|
||||
-
|
||||
out_driver:
|
||||
get_online_cpus();
|
||||
mutex_lock(µcode_mutex);
|
||||
--
|
||||
2.39.3
|
||||
|
106
SOURCES/1019-x86-microcode-Default-disable-late-loading.patch
Normal file
106
SOURCES/1019-x86-microcode-Default-disable-late-loading.patch
Normal file
@ -0,0 +1,106 @@
|
||||
From 9cc01dc772e684b7864d8c8a1f526035c991aa76 Mon Sep 17 00:00:00 2001
|
||||
From: Borislav Petkov <bp@suse.de>
|
||||
Date: Wed, 25 May 2022 18:12:30 +0200
|
||||
Subject: [PATCH 05/36] x86/microcode: Default-disable late loading
|
||||
|
||||
It is dangerous and it should not be used anyway - there's a nice early
|
||||
loading already.
|
||||
|
||||
Requested-by: Peter Zijlstra (Intel) <peterz@infradead.org>
|
||||
Signed-off-by: Borislav Petkov <bp@suse.de>
|
||||
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
|
||||
Link: https://lore.kernel.org/r/20220525161232.14924-3-bp@alien8.de
|
||||
(cherry picked from commit a77a94f86273ce42a39cb479217dd8d68acfe0ff)
|
||||
Signed-off-by: Mridula Shastry <mridula.c.shastry@oracle.com>
|
||||
Reviewed-by: Todd Vierling <todd.vierling@oracle.com>
|
||||
---
|
||||
arch/x86/Kconfig | 11 +++++++++++
|
||||
arch/x86/kernel/cpu/common.c | 2 ++
|
||||
arch/x86/kernel/cpu/microcode/core.c | 7 ++++++-
|
||||
3 files changed, 19 insertions(+), 1 deletion(-)
|
||||
|
||||
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
|
||||
index b9bcde5c538f..5af5427c50ae 100644
|
||||
--- a/arch/x86/Kconfig
|
||||
+++ b/arch/x86/Kconfig
|
||||
@@ -1360,6 +1360,17 @@ config MICROCODE_AMD
|
||||
If you select this option, microcode patch loading support for AMD
|
||||
processors will be enabled.
|
||||
|
||||
+config MICROCODE_LATE_LOADING
|
||||
+ bool "Late microcode loading (DANGEROUS)"
|
||||
+ default n
|
||||
+ depends on MICROCODE
|
||||
+ help
|
||||
+ Loading microcode late, when the system is up and executing instructions
|
||||
+ is a tricky business and should be avoided if possible. Just the sequence
|
||||
+ of synchronizing all cores and SMT threads is one fragile dance which does
|
||||
+ not guarantee that cores might not softlock after the loading. Therefore,
|
||||
+ use this at your own risk. Late loading taints the kernel too.
|
||||
+
|
||||
config X86_MSR
|
||||
tristate "/dev/cpu/*/msr - Model-specific register support"
|
||||
---help---
|
||||
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
|
||||
index f59edeef14eb..7be057c7531c 100644
|
||||
--- a/arch/x86/kernel/cpu/common.c
|
||||
+++ b/arch/x86/kernel/cpu/common.c
|
||||
@@ -2145,6 +2145,7 @@ void cpu_init_secondary(void)
|
||||
}
|
||||
#endif
|
||||
|
||||
+#ifdef CONFIG_MICROCODE_LATE_LOADING
|
||||
/*
|
||||
* The microcode loader calls this upon late microcode load to recheck features,
|
||||
* only when microcode has been updated. Caller holds microcode_mutex and CPU
|
||||
@@ -2174,6 +2175,7 @@ void microcode_check(void)
|
||||
pr_warn("x86/CPU: CPU features have changed after loading microcode, but might not take effect.\n");
|
||||
pr_warn("x86/CPU: Please consider either early loading through initrd/built-in or a potential BIOS update.\n");
|
||||
}
|
||||
+#endif
|
||||
|
||||
/*
|
||||
* Invoked from core CPU hotplug code after hotplug operations
|
||||
diff --git a/arch/x86/kernel/cpu/microcode/core.c b/arch/x86/kernel/cpu/microcode/core.c
|
||||
index bbc262a5fc0d..c4d19f4b7afb 100644
|
||||
--- a/arch/x86/kernel/cpu/microcode/core.c
|
||||
+++ b/arch/x86/kernel/cpu/microcode/core.c
|
||||
@@ -395,6 +395,7 @@ static int apply_microcode_on_target(int cpu)
|
||||
/* fake device for request_firmware */
|
||||
static struct platform_device *microcode_pdev;
|
||||
|
||||
+#ifdef CONFIG_MICROCODE_LATE_LOADING
|
||||
/*
|
||||
* Late loading dance. Why the heavy-handed stomp_machine effort?
|
||||
*
|
||||
@@ -563,6 +564,9 @@ put:
|
||||
return ret;
|
||||
}
|
||||
|
||||
+static DEVICE_ATTR_WO(reload);
|
||||
+#endif
|
||||
+
|
||||
static ssize_t version_show(struct device *dev,
|
||||
struct device_attribute *attr, char *buf)
|
||||
{
|
||||
@@ -579,7 +583,6 @@ static ssize_t pf_show(struct device *dev,
|
||||
return sprintf(buf, "0x%x\n", uci->cpu_sig.pf);
|
||||
}
|
||||
|
||||
-static DEVICE_ATTR_WO(reload);
|
||||
static DEVICE_ATTR(version, 0444, version_show, NULL);
|
||||
static DEVICE_ATTR(processor_flags, 0444, pf_show, NULL);
|
||||
|
||||
@@ -732,7 +735,9 @@ static int mc_cpu_down_prep(unsigned int cpu)
|
||||
}
|
||||
|
||||
static struct attribute *cpu_root_microcode_attrs[] = {
|
||||
+#ifdef CONFIG_MICROCODE_LATE_LOADING
|
||||
&dev_attr_reload.attr,
|
||||
+#endif
|
||||
NULL
|
||||
};
|
||||
|
||||
--
|
||||
2.39.3
|
||||
|
@ -0,0 +1,47 @@
|
||||
From abfdcc849a720f36bbb075ee35d64e84314387fe Mon Sep 17 00:00:00 2001
|
||||
From: Borislav Petkov <bp@suse.de>
|
||||
Date: Wed, 25 May 2022 18:12:31 +0200
|
||||
Subject: [PATCH 06/36] x86/microcode: Taint and warn on late loading
|
||||
|
||||
Warn before it is attempted and taint the kernel. Late loading microcode
|
||||
can lead to malfunction of the kernel when the microcode update changes
|
||||
behaviour. There is no way for the kernel to determine whether its safe or
|
||||
not.
|
||||
|
||||
Signed-off-by: Borislav Petkov <bp@suse.de>
|
||||
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
|
||||
Link: https://lore.kernel.org/r/20220525161232.14924-4-bp@alien8.de
|
||||
|
||||
(cherry picked from commit d23d33ea0fcdc4bbb484990bf53867f99c63ccab)
|
||||
Signed-off-by: Mridula Shastry <mridula.c.shastry@oracle.com>
|
||||
Reviewed-by: Todd Vierling <todd.vierling@oracle.com>
|
||||
---
|
||||
arch/x86/kernel/cpu/microcode/core.c | 5 +++++
|
||||
1 file changed, 5 insertions(+)
|
||||
|
||||
diff --git a/arch/x86/kernel/cpu/microcode/core.c b/arch/x86/kernel/cpu/microcode/core.c
|
||||
index c4d19f4b7afb..a803b839b16e 100644
|
||||
--- a/arch/x86/kernel/cpu/microcode/core.c
|
||||
+++ b/arch/x86/kernel/cpu/microcode/core.c
|
||||
@@ -513,6 +513,9 @@ static int microcode_reload_late(void)
|
||||
{
|
||||
int ret;
|
||||
|
||||
+ pr_err("Attempting late microcode loading - it is dangerous and taints the kernel.\n");
|
||||
+ pr_err("You should switch to early loading, if possible.\n");
|
||||
+
|
||||
atomic_set(&late_cpus_in, 0);
|
||||
atomic_set(&late_cpus_out, 0);
|
||||
|
||||
@@ -561,6 +564,8 @@ put:
|
||||
if (ret >= 0)
|
||||
ret = size;
|
||||
|
||||
+ add_taint(TAINT_CPU_OUT_OF_SPEC, LOCKDEP_STILL_OK);
|
||||
+
|
||||
return ret;
|
||||
}
|
||||
|
||||
--
|
||||
2.39.3
|
||||
|
@ -0,0 +1,45 @@
|
||||
From 88686baf053310a4bd81088bd733c8be8a68db49 Mon Sep 17 00:00:00 2001
|
||||
From: Borislav Petkov <bp@suse.de>
|
||||
Date: Wed, 25 May 2022 18:12:32 +0200
|
||||
Subject: [PATCH 07/36] x86/microcode: Remove unnecessary perf callback
|
||||
|
||||
c93dc84cbe32 ("perf/x86: Add a microcode revision check for SNB-PEBS")
|
||||
checks whether the microcode revision has fixed PEBS issues.
|
||||
|
||||
This can happen either:
|
||||
|
||||
1. At PEBS init time, where the early microcode has been loaded already
|
||||
|
||||
2. During late loading, in the microcode_check() callback.
|
||||
|
||||
So remove the unnecessary call in the microcode loader init routine.
|
||||
|
||||
Signed-off-by: Borislav Petkov <bp@suse.de>
|
||||
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
|
||||
Link: https://lore.kernel.org/r/20220525161232.14924-5-bp@alien8.de
|
||||
|
||||
(cherry picked from commit 0c0fe08c76485fe0178ebb0fa1a2052c727abe94)
|
||||
Signed-off-by: Mridula Shastry <mridula.c.shastry@oracle.com>
|
||||
Reviewed-by: Todd Vierling <todd.vierling@oracle.com>
|
||||
---
|
||||
arch/x86/kernel/cpu/microcode/core.c | 3 ---
|
||||
1 file changed, 3 deletions(-)
|
||||
|
||||
diff --git a/arch/x86/kernel/cpu/microcode/core.c b/arch/x86/kernel/cpu/microcode/core.c
|
||||
index a803b839b16e..44bac17304ac 100644
|
||||
--- a/arch/x86/kernel/cpu/microcode/core.c
|
||||
+++ b/arch/x86/kernel/cpu/microcode/core.c
|
||||
@@ -776,10 +776,7 @@ int __init microcode_init(void)
|
||||
|
||||
get_online_cpus();
|
||||
mutex_lock(µcode_mutex);
|
||||
-
|
||||
error = subsys_interface_register(&mc_cpu_interface);
|
||||
- if (!error)
|
||||
- perf_check_microcode();
|
||||
mutex_unlock(µcode_mutex);
|
||||
put_online_cpus();
|
||||
|
||||
--
|
||||
2.39.3
|
||||
|
@ -0,0 +1,49 @@
|
||||
From bdaca71f1e3ffeea24c2a02d4eb21879d4dece96 Mon Sep 17 00:00:00 2001
|
||||
From: Ashok Raj <ashok.raj@intel.com>
|
||||
Date: Mon, 29 Aug 2022 18:10:30 +0000
|
||||
Subject: [PATCH 08/36] x86/microcode: Print previous version of microcode
|
||||
after reload
|
||||
|
||||
Print both old and new versions of microcode after a reload is complete
|
||||
because knowing the previous microcode version is sometimes important
|
||||
from a debugging perspective.
|
||||
|
||||
[ bp: Massage commit message. ]
|
||||
|
||||
Signed-off-by: Ashok Raj <ashok.raj@intel.com>
|
||||
Signed-off-by: Borislav Petkov <bp@suse.de>
|
||||
Acked-by: Tony Luck <tony.luck@intel.com>
|
||||
Link: https://lore.kernel.org/r/20220829181030.722891-1-ashok.raj@intel.com
|
||||
(cherry picked from commit 7fce8d6eccbc31a561d07c79f359ad09f0424347)
|
||||
Signed-off-by: Mridula Shastry <mridula.c.shastry@oracle.com>
|
||||
Reviewed-by: Todd Vierling <todd.vierling@oracle.com>
|
||||
---
|
||||
arch/x86/kernel/cpu/microcode/core.c | 5 +++--
|
||||
1 file changed, 3 insertions(+), 2 deletions(-)
|
||||
|
||||
diff --git a/arch/x86/kernel/cpu/microcode/core.c b/arch/x86/kernel/cpu/microcode/core.c
|
||||
index 44bac17304ac..6affa814699d 100644
|
||||
--- a/arch/x86/kernel/cpu/microcode/core.c
|
||||
+++ b/arch/x86/kernel/cpu/microcode/core.c
|
||||
@@ -511,7 +511,7 @@ wait_for_siblings:
|
||||
*/
|
||||
static int microcode_reload_late(void)
|
||||
{
|
||||
- int ret;
|
||||
+ int old = boot_cpu_data.microcode, ret;
|
||||
|
||||
pr_err("Attempting late microcode loading - it is dangerous and taints the kernel.\n");
|
||||
pr_err("You should switch to early loading, if possible.\n");
|
||||
@@ -523,7 +523,8 @@ static int microcode_reload_late(void)
|
||||
if (ret > 0)
|
||||
microcode_check();
|
||||
|
||||
- pr_info("Reload completed, microcode revision: 0x%x\n", boot_cpu_data.microcode);
|
||||
+ pr_info("Reload completed, microcode revision: 0x%x -> 0x%x\n",
|
||||
+ old, boot_cpu_data.microcode);
|
||||
|
||||
return ret;
|
||||
}
|
||||
--
|
||||
2.39.3
|
||||
|
@ -0,0 +1,179 @@
|
||||
From 15a76d02ad4df4dbe37ed5bda874deeba2659843 Mon Sep 17 00:00:00 2001
|
||||
From: Borislav Petkov <bp@suse.de>
|
||||
Date: Wed, 19 Oct 2022 18:13:06 +0200
|
||||
Subject: [PATCH 09/36] x86/microcode: Rip out the subsys interface gunk
|
||||
|
||||
This is a left-over from the old days when CPU hotplug wasn't as robust
|
||||
as it is now. Currently, microcode gets loaded early on the CPU init
|
||||
path and there's no need to attempt to load it again, which that subsys
|
||||
interface callback is doing.
|
||||
|
||||
The only other thing that the subsys interface init path was doing is
|
||||
adding the
|
||||
|
||||
/sys/devices/system/cpu/cpu*/microcode/
|
||||
|
||||
hierarchy.
|
||||
|
||||
So add a function which gets called on each CPU after all the necessary
|
||||
driver setup has happened. Use schedule_on_each_cpu() which can block
|
||||
because the sysfs creating code does kmem_cache_zalloc() which can block
|
||||
too and the initial version of this where it did that setup in an IPI
|
||||
handler of on_each_cpu() can cause a deadlock of the sort:
|
||||
|
||||
lock(fs_reclaim);
|
||||
<Interrupt>
|
||||
lock(fs_reclaim);
|
||||
|
||||
as the IPI handler runs in IRQ context.
|
||||
|
||||
Signed-off-by: Borislav Petkov <bp@suse.de>
|
||||
Reviewed-by: Ashok Raj <ashok.raj@intel.com>
|
||||
Link: https://lore.kernel.org/r/20221028142638.28498-2-bp@alien8.de
|
||||
(cherry picked from commit b6f86689d5b740f2cc3ac3a1032c7374b24381cc)
|
||||
Signed-off-by: Mridula Shastry <mridula.c.shastry@oracle.com>
|
||||
Reviewed-by: Todd Vierling <todd.vierling@oracle.com>
|
||||
---
|
||||
arch/x86/kernel/cpu/microcode/core.c | 82 +++++++---------------------
|
||||
1 file changed, 21 insertions(+), 61 deletions(-)
|
||||
|
||||
diff --git a/arch/x86/kernel/cpu/microcode/core.c b/arch/x86/kernel/cpu/microcode/core.c
|
||||
index 6affa814699d..8a7fc5d8db6e 100644
|
||||
--- a/arch/x86/kernel/cpu/microcode/core.c
|
||||
+++ b/arch/x86/kernel/cpu/microcode/core.c
|
||||
@@ -621,8 +621,8 @@ static enum ucode_state microcode_resume_cpu(int cpu)
|
||||
|
||||
static enum ucode_state microcode_init_cpu(int cpu, bool refresh_fw)
|
||||
{
|
||||
- enum ucode_state ustate;
|
||||
struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
|
||||
+ enum ucode_state ustate;
|
||||
|
||||
if (uci->valid)
|
||||
return UCODE_OK;
|
||||
@@ -656,44 +656,6 @@ static enum ucode_state microcode_update_cpu(int cpu)
|
||||
return microcode_init_cpu(cpu, false);
|
||||
}
|
||||
|
||||
-static int mc_device_add(struct device *dev, struct subsys_interface *sif)
|
||||
-{
|
||||
- int err, cpu = dev->id;
|
||||
-
|
||||
- if (!cpu_online(cpu))
|
||||
- return 0;
|
||||
-
|
||||
- pr_debug("CPU%d added\n", cpu);
|
||||
-
|
||||
- err = sysfs_create_group(&dev->kobj, &mc_attr_group);
|
||||
- if (err)
|
||||
- return err;
|
||||
-
|
||||
- if (microcode_init_cpu(cpu, true) == UCODE_ERROR)
|
||||
- return -EINVAL;
|
||||
-
|
||||
- return err;
|
||||
-}
|
||||
-
|
||||
-static void mc_device_remove(struct device *dev, struct subsys_interface *sif)
|
||||
-{
|
||||
- int cpu = dev->id;
|
||||
-
|
||||
- if (!cpu_online(cpu))
|
||||
- return;
|
||||
-
|
||||
- pr_debug("CPU%d removed\n", cpu);
|
||||
- microcode_fini_cpu(cpu);
|
||||
- sysfs_remove_group(&dev->kobj, &mc_attr_group);
|
||||
-}
|
||||
-
|
||||
-static struct subsys_interface mc_cpu_interface = {
|
||||
- .name = "microcode",
|
||||
- .subsys = &cpu_subsys,
|
||||
- .add_dev = mc_device_add,
|
||||
- .remove_dev = mc_device_remove,
|
||||
-};
|
||||
-
|
||||
/**
|
||||
* microcode_bsp_resume - Update boot CPU microcode during resume.
|
||||
*/
|
||||
@@ -733,6 +695,9 @@ static int mc_cpu_down_prep(unsigned int cpu)
|
||||
struct device *dev;
|
||||
|
||||
dev = get_cpu_device(cpu);
|
||||
+
|
||||
+ microcode_fini_cpu(cpu);
|
||||
+
|
||||
/* Suspend is in progress, only remove the interface */
|
||||
sysfs_remove_group(&dev->kobj, &mc_attr_group);
|
||||
pr_debug("CPU%d removed\n", cpu);
|
||||
@@ -740,6 +705,18 @@ static int mc_cpu_down_prep(unsigned int cpu)
|
||||
return 0;
|
||||
}
|
||||
|
||||
+static void setup_online_cpu(struct work_struct *work)
|
||||
+{
|
||||
+ int cpu = smp_processor_id();
|
||||
+ struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
|
||||
+
|
||||
+ memset(uci, 0, sizeof(*uci));
|
||||
+
|
||||
+ microcode_ops->collect_cpu_info(cpu, &uci->cpu_sig);
|
||||
+
|
||||
+ mc_cpu_online(cpu);
|
||||
+}
|
||||
+
|
||||
static struct attribute *cpu_root_microcode_attrs[] = {
|
||||
#ifdef CONFIG_MICROCODE_LATE_LOADING
|
||||
&dev_attr_reload.attr,
|
||||
@@ -771,27 +748,19 @@ int __init microcode_init(void)
|
||||
return -ENODEV;
|
||||
|
||||
microcode_pdev = platform_device_register_simple("microcode", -1,
|
||||
- NULL, 0);
|
||||
- if (IS_ERR(microcode_pdev))
|
||||
- return PTR_ERR(microcode_pdev);
|
||||
-
|
||||
- get_online_cpus();
|
||||
- mutex_lock(µcode_mutex);
|
||||
- error = subsys_interface_register(&mc_cpu_interface);
|
||||
- mutex_unlock(µcode_mutex);
|
||||
- put_online_cpus();
|
||||
-
|
||||
- if (error)
|
||||
- goto out_pdev;
|
||||
+ NULL, 0);
|
||||
|
||||
error = sysfs_create_group(&cpu_subsys.dev_root->kobj,
|
||||
&cpu_root_microcode_group);
|
||||
|
||||
if (error) {
|
||||
pr_err("Error creating microcode group!\n");
|
||||
- goto out_driver;
|
||||
+ goto out_pdev;
|
||||
}
|
||||
|
||||
+ /* Do per-CPU setup */
|
||||
+ schedule_on_each_cpu(setup_online_cpu);
|
||||
+
|
||||
register_syscore_ops(&mc_syscore_ops);
|
||||
cpuhp_setup_state_nocalls(CPUHP_AP_MICROCODE_LOADER, "x86/microcode:starting",
|
||||
mc_cpu_starting, NULL);
|
||||
@@ -802,15 +771,6 @@ int __init microcode_init(void)
|
||||
|
||||
return 0;
|
||||
|
||||
- out_driver:
|
||||
- get_online_cpus();
|
||||
- mutex_lock(µcode_mutex);
|
||||
-
|
||||
- subsys_interface_unregister(&mc_cpu_interface);
|
||||
-
|
||||
- mutex_unlock(µcode_mutex);
|
||||
- put_online_cpus();
|
||||
-
|
||||
out_pdev:
|
||||
platform_device_unregister(microcode_pdev);
|
||||
return error;
|
||||
--
|
||||
2.39.3
|
||||
|
212
SOURCES/1024-x86-microcode-Simplify-init-path-even-more.patch
Normal file
212
SOURCES/1024-x86-microcode-Simplify-init-path-even-more.patch
Normal file
@ -0,0 +1,212 @@
|
||||
From 1a37aa866d76aaab6901bb331ea1be202ccad636 Mon Sep 17 00:00:00 2001
|
||||
From: Borislav Petkov <bp@suse.de>
|
||||
Date: Wed, 19 Oct 2022 19:07:30 +0200
|
||||
Subject: [PATCH 10/36] x86/microcode: Simplify init path even more
|
||||
|
||||
Get rid of all the IPI-sending functions and their wrappers and use
|
||||
those which are supposed to be called on each CPU.
|
||||
|
||||
Thus:
|
||||
|
||||
- microcode_init_cpu() gets called on each CPU on init, applying any new
|
||||
microcode that the driver might've found on the filesystem.
|
||||
|
||||
- mc_cpu_starting() simply tries to apply cached microcode as this is
|
||||
the cpuhp starting callback which gets called on CPU resume too.
|
||||
|
||||
Even if the driver init function is a late initcall, there is no
|
||||
filesystem by then (not even a hdd driver has been loaded yet) so a new
|
||||
firmware load attempt cannot simply be done.
|
||||
|
||||
It is pointless anyway - for that there's late loading if one really
|
||||
needs it.
|
||||
|
||||
Signed-off-by: Borislav Petkov <bp@suse.de>
|
||||
Reviewed-by: Ashok Raj <ashok.raj@intel.com>
|
||||
Link: https://lore.kernel.org/r/20221028142638.28498-3-bp@alien8.de
|
||||
(cherry picked from commit 2071c0aeda228107bf1b9e870b6187c90fbeef1d)
|
||||
Signed-off-by: Mridula Shastry <mridula.c.shastry@oracle.com>
|
||||
Reviewed-by: Todd Vierling <todd.vierling@oracle.com>
|
||||
---
|
||||
arch/x86/kernel/cpu/microcode/core.c | 120 ++++-----------------------
|
||||
1 file changed, 16 insertions(+), 104 deletions(-)
|
||||
|
||||
diff --git a/arch/x86/kernel/cpu/microcode/core.c b/arch/x86/kernel/cpu/microcode/core.c
|
||||
index 8a7fc5d8db6e..50321a43da01 100644
|
||||
--- a/arch/x86/kernel/cpu/microcode/core.c
|
||||
+++ b/arch/x86/kernel/cpu/microcode/core.c
|
||||
@@ -338,60 +338,6 @@ void reload_early_microcode(void)
|
||||
}
|
||||
}
|
||||
|
||||
-static void collect_cpu_info_local(void *arg)
|
||||
-{
|
||||
- struct cpu_info_ctx *ctx = arg;
|
||||
-
|
||||
- ctx->err = microcode_ops->collect_cpu_info(smp_processor_id(),
|
||||
- ctx->cpu_sig);
|
||||
-}
|
||||
-
|
||||
-static int collect_cpu_info_on_target(int cpu, struct cpu_signature *cpu_sig)
|
||||
-{
|
||||
- struct cpu_info_ctx ctx = { .cpu_sig = cpu_sig, .err = 0 };
|
||||
- int ret;
|
||||
-
|
||||
- ret = smp_call_function_single(cpu, collect_cpu_info_local, &ctx, 1);
|
||||
- if (!ret)
|
||||
- ret = ctx.err;
|
||||
-
|
||||
- return ret;
|
||||
-}
|
||||
-
|
||||
-static int collect_cpu_info(int cpu)
|
||||
-{
|
||||
- struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
|
||||
- int ret;
|
||||
-
|
||||
- memset(uci, 0, sizeof(*uci));
|
||||
-
|
||||
- ret = collect_cpu_info_on_target(cpu, &uci->cpu_sig);
|
||||
- if (!ret)
|
||||
- uci->valid = 1;
|
||||
-
|
||||
- return ret;
|
||||
-}
|
||||
-
|
||||
-static void apply_microcode_local(void *arg)
|
||||
-{
|
||||
- enum ucode_state *err = arg;
|
||||
-
|
||||
- *err = microcode_ops->apply_microcode(smp_processor_id());
|
||||
-}
|
||||
-
|
||||
-static int apply_microcode_on_target(int cpu)
|
||||
-{
|
||||
- enum ucode_state err;
|
||||
- int ret;
|
||||
-
|
||||
- ret = smp_call_function_single(cpu, apply_microcode_local, &err, 1);
|
||||
- if (!ret) {
|
||||
- if (err == UCODE_ERROR)
|
||||
- ret = 1;
|
||||
- }
|
||||
- return ret;
|
||||
-}
|
||||
-
|
||||
/* fake device for request_firmware */
|
||||
static struct platform_device *microcode_pdev;
|
||||
|
||||
@@ -478,7 +424,7 @@ static int __reload_late(void *info)
|
||||
* below.
|
||||
*/
|
||||
if (cpumask_first(topology_sibling_cpumask(cpu)) == cpu)
|
||||
- apply_microcode_local(&err);
|
||||
+ err = microcode_ops->apply_microcode(cpu);
|
||||
else
|
||||
goto wait_for_siblings;
|
||||
|
||||
@@ -500,7 +446,7 @@ wait_for_siblings:
|
||||
* revision.
|
||||
*/
|
||||
if (cpumask_first(topology_sibling_cpumask(cpu)) != cpu)
|
||||
- apply_microcode_local(&err);
|
||||
+ err = microcode_ops->apply_microcode(cpu);
|
||||
|
||||
return ret;
|
||||
}
|
||||
@@ -609,51 +555,15 @@ static void microcode_fini_cpu(int cpu)
|
||||
microcode_ops->microcode_fini_cpu(cpu);
|
||||
}
|
||||
|
||||
-static enum ucode_state microcode_resume_cpu(int cpu)
|
||||
-{
|
||||
- if (apply_microcode_on_target(cpu))
|
||||
- return UCODE_ERROR;
|
||||
-
|
||||
- pr_debug("CPU%d updated upon resume\n", cpu);
|
||||
-
|
||||
- return UCODE_OK;
|
||||
-}
|
||||
-
|
||||
-static enum ucode_state microcode_init_cpu(int cpu, bool refresh_fw)
|
||||
-{
|
||||
- struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
|
||||
- enum ucode_state ustate;
|
||||
-
|
||||
- if (uci->valid)
|
||||
- return UCODE_OK;
|
||||
-
|
||||
- if (collect_cpu_info(cpu))
|
||||
- return UCODE_ERROR;
|
||||
-
|
||||
- /* --dimm. Trigger a delayed update? */
|
||||
- if (system_state != SYSTEM_RUNNING)
|
||||
- return UCODE_NFOUND;
|
||||
-
|
||||
- ustate = microcode_ops->request_microcode_fw(cpu, µcode_pdev->dev, refresh_fw);
|
||||
- if (ustate == UCODE_NEW) {
|
||||
- pr_debug("CPU%d updated upon init\n", cpu);
|
||||
- apply_microcode_on_target(cpu);
|
||||
- }
|
||||
-
|
||||
- return ustate;
|
||||
-}
|
||||
-
|
||||
-static enum ucode_state microcode_update_cpu(int cpu)
|
||||
+static enum ucode_state microcode_init_cpu(int cpu)
|
||||
{
|
||||
struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
|
||||
|
||||
- /* Refresh CPU microcode revision after resume. */
|
||||
- collect_cpu_info(cpu);
|
||||
+ memset(uci, 0, sizeof(*uci));
|
||||
|
||||
- if (uci->valid)
|
||||
- return microcode_resume_cpu(cpu);
|
||||
+ microcode_ops->collect_cpu_info(cpu, &uci->cpu_sig);
|
||||
|
||||
- return microcode_init_cpu(cpu, false);
|
||||
+ return microcode_ops->apply_microcode(cpu);
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -671,14 +581,14 @@ void microcode_bsp_resume(void)
|
||||
}
|
||||
|
||||
static struct syscore_ops mc_syscore_ops = {
|
||||
- .resume = microcode_bsp_resume,
|
||||
+ .resume = microcode_bsp_resume,
|
||||
};
|
||||
|
||||
static int mc_cpu_starting(unsigned int cpu)
|
||||
{
|
||||
- microcode_update_cpu(cpu);
|
||||
- pr_debug("CPU%d added\n", cpu);
|
||||
- return 0;
|
||||
+ enum ucode_state err = microcode_ops->apply_microcode(cpu);
|
||||
+
|
||||
+ return err == UCODE_ERROR;
|
||||
}
|
||||
|
||||
static int mc_cpu_online(unsigned int cpu)
|
||||
@@ -708,11 +618,13 @@ static int mc_cpu_down_prep(unsigned int cpu)
|
||||
static void setup_online_cpu(struct work_struct *work)
|
||||
{
|
||||
int cpu = smp_processor_id();
|
||||
- struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
|
||||
-
|
||||
- memset(uci, 0, sizeof(*uci));
|
||||
+ enum ucode_state err;
|
||||
|
||||
- microcode_ops->collect_cpu_info(cpu, &uci->cpu_sig);
|
||||
+ err = microcode_init_cpu(cpu);
|
||||
+ if (err == UCODE_ERROR) {
|
||||
+ pr_err("Error applying microcode on CPU%d\n", cpu);
|
||||
+ return;
|
||||
+ }
|
||||
|
||||
mc_cpu_online(cpu);
|
||||
}
|
||||
--
|
||||
2.39.3
|
||||
|
@ -0,0 +1,88 @@
|
||||
From a385d6a4761819826f5fb234415f4f1ad43fc141 Mon Sep 17 00:00:00 2001
|
||||
From: Borislav Petkov <bp@suse.de>
|
||||
Date: Mon, 19 Dec 2022 22:06:55 +0100
|
||||
Subject: [PATCH 11/36] x86/microcode/AMD: Rename a couple of functions
|
||||
|
||||
- Rename apply_microcode_early_amd() to early_apply_microcode():
|
||||
simplify the name so that it is clear what it does and when does it do
|
||||
it.
|
||||
|
||||
- Rename __load_ucode_amd() to find_blobs_in_containers(): the new name
|
||||
actually explains what it does.
|
||||
|
||||
Document some.
|
||||
|
||||
No functional changes.
|
||||
|
||||
Signed-off-by: Borislav Petkov <bp@suse.de>
|
||||
Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de>
|
||||
Link: https://lore.kernel.org/r/20221219210656.5140-1-bp@alien8.de
|
||||
(cherry picked from commit 61de9b7036f26448a1916291c456f62dd6bf07ea)
|
||||
Signed-off-by: Mridula Shastry <mridula.c.shastry@oracle.com>
|
||||
Reviewed-by: Todd Vierling <todd.vierling@oracle.com>
|
||||
---
|
||||
arch/x86/kernel/cpu/microcode/amd.c | 14 +++++++-------
|
||||
1 file changed, 7 insertions(+), 7 deletions(-)
|
||||
|
||||
diff --git a/arch/x86/kernel/cpu/microcode/amd.c b/arch/x86/kernel/cpu/microcode/amd.c
|
||||
index 750889d77b56..0b557d04be22 100644
|
||||
--- a/arch/x86/kernel/cpu/microcode/amd.c
|
||||
+++ b/arch/x86/kernel/cpu/microcode/amd.c
|
||||
@@ -415,8 +415,7 @@ static int __apply_microcode_amd(struct microcode_amd *mc)
|
||||
*
|
||||
* Returns true if container found (sets @desc), false otherwise.
|
||||
*/
|
||||
-static bool
|
||||
-apply_microcode_early_amd(u32 cpuid_1_eax, void *ucode, size_t size, bool save_patch)
|
||||
+static bool early_apply_microcode(u32 cpuid_1_eax, void *ucode, size_t size, bool save_patch)
|
||||
{
|
||||
struct cont_desc desc = { 0 };
|
||||
u8 (*patch)[PATCH_MAX_SIZE];
|
||||
@@ -476,7 +475,7 @@ static bool get_builtin_microcode(struct cpio_data *cp, unsigned int family)
|
||||
#endif
|
||||
}
|
||||
|
||||
-static void __load_ucode_amd(unsigned int cpuid_1_eax, struct cpio_data *ret)
|
||||
+static void find_blobs_in_containers(unsigned int cpuid_1_eax, struct cpio_data *ret)
|
||||
{
|
||||
struct ucode_cpu_info *uci;
|
||||
struct cpio_data cp;
|
||||
@@ -506,11 +505,11 @@ void __init load_ucode_amd_bsp(unsigned int cpuid_1_eax)
|
||||
{
|
||||
struct cpio_data cp = { };
|
||||
|
||||
- __load_ucode_amd(cpuid_1_eax, &cp);
|
||||
+ find_blobs_in_containers(cpuid_1_eax, &cp);
|
||||
if (!(cp.data && cp.size))
|
||||
return;
|
||||
|
||||
- apply_microcode_early_amd(cpuid_1_eax, cp.data, cp.size, true);
|
||||
+ early_apply_microcode(cpuid_1_eax, cp.data, cp.size, true);
|
||||
}
|
||||
|
||||
void load_ucode_amd_ap(unsigned int cpuid_1_eax)
|
||||
@@ -541,11 +540,11 @@ void load_ucode_amd_ap(unsigned int cpuid_1_eax)
|
||||
}
|
||||
}
|
||||
|
||||
- __load_ucode_amd(cpuid_1_eax, &cp);
|
||||
+ find_blobs_in_containers(cpuid_1_eax, &cp);
|
||||
if (!(cp.data && cp.size))
|
||||
return;
|
||||
|
||||
- apply_microcode_early_amd(cpuid_1_eax, cp.data, cp.size, false);
|
||||
+ early_apply_microcode(cpuid_1_eax, cp.data, cp.size, false);
|
||||
}
|
||||
|
||||
static enum ucode_state
|
||||
@@ -810,6 +809,7 @@ static int verify_and_add_patch(u8 family, u8 *fw, unsigned int leftover,
|
||||
return 0;
|
||||
}
|
||||
|
||||
+/* Scan the blob in @data and add microcode patches to the cache. */
|
||||
static enum ucode_state __load_microcode_amd(u8 family, const u8 *data,
|
||||
size_t size)
|
||||
{
|
||||
--
|
||||
2.39.3
|
||||
|
@ -0,0 +1,79 @@
|
||||
From f19ca27f91db6320d02ba1cd28751a217616e282 Mon Sep 17 00:00:00 2001
|
||||
From: Mihai Carabas <mihai.carabas@oracle.com>
|
||||
Date: Tue, 21 Apr 2020 22:28:38 +0300
|
||||
Subject: [PATCH 12/36] x86/microcode: Fix return value for microcode late
|
||||
loading
|
||||
|
||||
The return value from stop_machine() might not be consistent.
|
||||
|
||||
stop_machine_cpuslocked() returns:
|
||||
- zero if all functions have returned 0.
|
||||
- a non-zero value if at least one of the functions returned
|
||||
a non-zero value.
|
||||
|
||||
There is no way to know if it is negative or positive. So make
|
||||
__reload_late() return 0 on success or negative otherwise.
|
||||
|
||||
[ bp: Unify ret val check and touch up. ]
|
||||
|
||||
Signed-off-by: Mihai Carabas <mihai.carabas@oracle.com>
|
||||
Signed-off-by: Borislav Petkov <bp@suse.de>
|
||||
Link: https://lkml.kernel.org/r/1587497318-4438-1-git-send-email-mihai.carabas@oracle.com
|
||||
(cherry picked from commit 9adbf3c609af92a57a73000a3cb8f4c2d307dfa3)
|
||||
Signed-off-by: Mridula Shastry <mridula.c.shastry@oracle.com>
|
||||
Reviewed-by: Todd Vierling <todd.vierling@oracle.com>
|
||||
---
|
||||
arch/x86/kernel/cpu/microcode/core.c | 15 +++++++--------
|
||||
1 file changed, 7 insertions(+), 8 deletions(-)
|
||||
|
||||
diff --git a/arch/x86/kernel/cpu/microcode/core.c b/arch/x86/kernel/cpu/microcode/core.c
|
||||
index 50321a43da01..36f78ceca5f2 100644
|
||||
--- a/arch/x86/kernel/cpu/microcode/core.c
|
||||
+++ b/arch/x86/kernel/cpu/microcode/core.c
|
||||
@@ -400,8 +400,7 @@ static int __wait_for_cpus(atomic_t *t, long long timeout)
|
||||
/*
|
||||
* Returns:
|
||||
* < 0 - on error
|
||||
- * 0 - no update done
|
||||
- * 1 - microcode was updated
|
||||
+ * 0 - success (no update done or microcode was updated)
|
||||
*/
|
||||
static int __reload_late(void *info)
|
||||
{
|
||||
@@ -428,11 +427,11 @@ static int __reload_late(void *info)
|
||||
else
|
||||
goto wait_for_siblings;
|
||||
|
||||
- if (err > UCODE_NFOUND) {
|
||||
- pr_warn("Error reloading microcode on CPU %d\n", cpu);
|
||||
+ if (err >= UCODE_NFOUND) {
|
||||
+ if (err == UCODE_ERROR)
|
||||
+ pr_warn("Error reloading microcode on CPU %d\n", cpu);
|
||||
+
|
||||
ret = -1;
|
||||
- } else if (err == UCODE_UPDATED || err == UCODE_OK) {
|
||||
- ret = 1;
|
||||
}
|
||||
|
||||
wait_for_siblings:
|
||||
@@ -466,7 +465,7 @@ static int microcode_reload_late(void)
|
||||
atomic_set(&late_cpus_out, 0);
|
||||
|
||||
ret = stop_machine_cpuslocked(__reload_late, NULL, cpu_online_mask);
|
||||
- if (ret > 0)
|
||||
+ if (ret == 0)
|
||||
microcode_check();
|
||||
|
||||
pr_info("Reload completed, microcode revision: 0x%x -> 0x%x\n",
|
||||
@@ -508,7 +507,7 @@ static ssize_t reload_store(struct device *dev,
|
||||
put:
|
||||
put_online_cpus();
|
||||
|
||||
- if (ret >= 0)
|
||||
+ if (ret == 0)
|
||||
ret = size;
|
||||
|
||||
add_taint(TAINT_CPU_OUT_OF_SPEC, LOCKDEP_STILL_OK);
|
||||
--
|
||||
2.39.3
|
||||
|
@ -0,0 +1,110 @@
|
||||
From e298b1596a0ace2d3b272747a645856f6eb61128 Mon Sep 17 00:00:00 2001
|
||||
From: Ashok Raj <ashok.raj@intel.com>
|
||||
Date: Mon, 9 Jan 2023 07:35:50 -0800
|
||||
Subject: [PATCH 13/36] x86/microcode: Add a parameter to microcode_check() to
|
||||
store CPU capabilities
|
||||
|
||||
Add a parameter to store CPU capabilities before performing a microcode
|
||||
update so that CPU capabilities can be compared before and after update.
|
||||
|
||||
[ bp: Massage. ]
|
||||
|
||||
Signed-off-by: Ashok Raj <ashok.raj@intel.com>
|
||||
Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de>
|
||||
Link: https://lore.kernel.org/r/20230109153555.4986-2-ashok.raj@intel.com
|
||||
(cherry picked from commit ab31c74455c64e69342ddab21fd9426fcbfefde7)
|
||||
|
||||
CVE: CVE-2023-20593
|
||||
Signed-off-by: Mridula Shastry <mridula.c.shastry@oracle.com>
|
||||
Reviewed-by: Todd Vierling <todd.vierling@oracle.com>
|
||||
---
|
||||
arch/x86/include/asm/processor.h | 2 +-
|
||||
arch/x86/kernel/cpu/common.c | 21 +++++++++++++--------
|
||||
arch/x86/kernel/cpu/microcode/core.c | 3 ++-
|
||||
3 files changed, 16 insertions(+), 10 deletions(-)
|
||||
|
||||
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
|
||||
index 605740dfd017..9968d456d7e8 100644
|
||||
--- a/arch/x86/include/asm/processor.h
|
||||
+++ b/arch/x86/include/asm/processor.h
|
||||
@@ -919,7 +919,7 @@ bool xen_set_default_idle(void);
|
||||
|
||||
void stop_this_cpu(void *dummy);
|
||||
void df_debug(struct pt_regs *regs, long error_code);
|
||||
-void microcode_check(void);
|
||||
+void microcode_check(struct cpuinfo_x86 *prev_info);
|
||||
|
||||
enum l1tf_mitigations {
|
||||
L1TF_MITIGATION_OFF,
|
||||
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
|
||||
index 7be057c7531c..437fe55acce8 100644
|
||||
--- a/arch/x86/kernel/cpu/common.c
|
||||
+++ b/arch/x86/kernel/cpu/common.c
|
||||
@@ -2146,30 +2146,35 @@ void cpu_init_secondary(void)
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_MICROCODE_LATE_LOADING
|
||||
-/*
|
||||
+/**
|
||||
+ * microcode_check() - Check if any CPU capabilities changed after an update.
|
||||
+ * @prev_info: CPU capabilities stored before an update.
|
||||
+ *
|
||||
* The microcode loader calls this upon late microcode load to recheck features,
|
||||
* only when microcode has been updated. Caller holds microcode_mutex and CPU
|
||||
* hotplug lock.
|
||||
+ *
|
||||
+ * Return: None
|
||||
*/
|
||||
-void microcode_check(void)
|
||||
+void microcode_check(struct cpuinfo_x86 *prev_info)
|
||||
{
|
||||
- struct cpuinfo_x86 info;
|
||||
-
|
||||
perf_check_microcode();
|
||||
|
||||
/* Reload CPUID max function as it might've changed. */
|
||||
- info.cpuid_level = cpuid_eax(0);
|
||||
+ prev_info->cpuid_level = cpuid_eax(0);
|
||||
|
||||
/*
|
||||
* Copy all capability leafs to pick up the synthetic ones so that
|
||||
* memcmp() below doesn't fail on that. The ones coming from CPUID will
|
||||
* get overwritten in get_cpu_cap().
|
||||
*/
|
||||
- memcpy(&info.x86_capability, &boot_cpu_data.x86_capability, sizeof(info.x86_capability));
|
||||
+ memcpy(&prev_info->x86_capability, &boot_cpu_data.x86_capability,
|
||||
+ sizeof(prev_info->x86_capability));
|
||||
|
||||
- get_cpu_cap(&info);
|
||||
+ get_cpu_cap(prev_info);
|
||||
|
||||
- if (!memcmp(&info.x86_capability, &boot_cpu_data.x86_capability, sizeof(info.x86_capability)))
|
||||
+ if (!memcmp(&prev_info->x86_capability, &boot_cpu_data.x86_capability,
|
||||
+ sizeof(prev_info->x86_capability)))
|
||||
return;
|
||||
|
||||
pr_warn("x86/CPU: CPU features have changed after loading microcode, but might not take effect.\n");
|
||||
diff --git a/arch/x86/kernel/cpu/microcode/core.c b/arch/x86/kernel/cpu/microcode/core.c
|
||||
index 36f78ceca5f2..41a90074df21 100644
|
||||
--- a/arch/x86/kernel/cpu/microcode/core.c
|
||||
+++ b/arch/x86/kernel/cpu/microcode/core.c
|
||||
@@ -457,6 +457,7 @@ wait_for_siblings:
|
||||
static int microcode_reload_late(void)
|
||||
{
|
||||
int old = boot_cpu_data.microcode, ret;
|
||||
+ struct cpuinfo_x86 prev_info;
|
||||
|
||||
pr_err("Attempting late microcode loading - it is dangerous and taints the kernel.\n");
|
||||
pr_err("You should switch to early loading, if possible.\n");
|
||||
@@ -466,7 +467,7 @@ static int microcode_reload_late(void)
|
||||
|
||||
ret = stop_machine_cpuslocked(__reload_late, NULL, cpu_online_mask);
|
||||
if (ret == 0)
|
||||
- microcode_check();
|
||||
+ microcode_check(&prev_info);
|
||||
|
||||
pr_info("Reload completed, microcode revision: 0x%x -> 0x%x\n",
|
||||
old, boot_cpu_data.microcode);
|
||||
--
|
||||
2.39.3
|
||||
|
@ -0,0 +1,135 @@
|
||||
From a8cd7508b4dfd8cedd394da8cdb0253f18bdb1a8 Mon Sep 17 00:00:00 2001
|
||||
From: Ashok Raj <ashok.raj@intel.com>
|
||||
Date: Mon, 9 Jan 2023 07:35:51 -0800
|
||||
Subject: [PATCH 14/36] x86/microcode: Check CPU capabilities after late
|
||||
microcode update correctly
|
||||
|
||||
The kernel caches each CPU's feature bits at boot in an x86_capability[]
|
||||
structure. However, the capabilities in the BSP's copy can be turned off
|
||||
as a result of certain command line parameters or configuration
|
||||
restrictions, for example the SGX bit. This can cause a mismatch when
|
||||
comparing the values before and after the microcode update.
|
||||
|
||||
microcode_check() is called after an update to report any previously
|
||||
cached CPUID bits which might have changed due to the update.
|
||||
|
||||
Therefore, store the cached CPU caps before the update and compare them
|
||||
with the CPU caps after the microcode update has succeeded.
|
||||
|
||||
Thus, the comparison is done between the CPUID *hardware* bits before
|
||||
and after the upgrade instead of using the cached, possibly runtime
|
||||
modified values in BSP's boot_cpu_data copy.
|
||||
|
||||
As a result, false warnings about CPUID bits changes are avoided.
|
||||
|
||||
[ bp:
|
||||
- Massage.
|
||||
- Add SRBDS_CTRL example.
|
||||
- Add kernel-doc.
|
||||
- Incorporate forgotten review feedback from dhansen.
|
||||
]
|
||||
|
||||
Fixes: 1008c52c09dc ("x86/CPU: Add a microcode loader callback")
|
||||
Signed-off-by: Ashok Raj <ashok.raj@intel.com>
|
||||
Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de>
|
||||
Link: https://lore.kernel.org/r/20230109153555.4986-3-ashok.raj@intel.com
|
||||
(cherry picked from commit c0dd9245aa9e25a697181f6085692272c9ec61bc)
|
||||
|
||||
CVE: CVE-2023-20593
|
||||
Signed-off-by: Mridula Shastry <mridula.c.shastry@oracle.com>
|
||||
Reviewed-by: Todd Vierling <todd.vierling@oracle.com>
|
||||
---
|
||||
arch/x86/include/asm/processor.h | 1 +
|
||||
arch/x86/kernel/cpu/common.c | 36 ++++++++++++++++++----------
|
||||
arch/x86/kernel/cpu/microcode/core.c | 6 +++++
|
||||
3 files changed, 30 insertions(+), 13 deletions(-)
|
||||
|
||||
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
|
||||
index 9968d456d7e8..820104bdee1f 100644
|
||||
--- a/arch/x86/include/asm/processor.h
|
||||
+++ b/arch/x86/include/asm/processor.h
|
||||
@@ -920,6 +920,7 @@ bool xen_set_default_idle(void);
|
||||
void stop_this_cpu(void *dummy);
|
||||
void df_debug(struct pt_regs *regs, long error_code);
|
||||
void microcode_check(struct cpuinfo_x86 *prev_info);
|
||||
+void store_cpu_caps(struct cpuinfo_x86 *info);
|
||||
|
||||
enum l1tf_mitigations {
|
||||
L1TF_MITIGATION_OFF,
|
||||
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
|
||||
index 437fe55acce8..22763a691b7b 100644
|
||||
--- a/arch/x86/kernel/cpu/common.c
|
||||
+++ b/arch/x86/kernel/cpu/common.c
|
||||
@@ -2146,6 +2146,25 @@ void cpu_init_secondary(void)
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_MICROCODE_LATE_LOADING
|
||||
+/**
|
||||
+ * store_cpu_caps() - Store a snapshot of CPU capabilities
|
||||
+ * @curr_info: Pointer where to store it
|
||||
+ *
|
||||
+ * Returns: None
|
||||
+ */
|
||||
+void store_cpu_caps(struct cpuinfo_x86 *curr_info)
|
||||
+{
|
||||
+ /* Reload CPUID max function as it might've changed. */
|
||||
+ curr_info->cpuid_level = cpuid_eax(0);
|
||||
+
|
||||
+ /* Copy all capability leafs and pick up the synthetic ones. */
|
||||
+ memcpy(&curr_info->x86_capability, &boot_cpu_data.x86_capability,
|
||||
+ sizeof(curr_info->x86_capability));
|
||||
+
|
||||
+ /* Get the hardware CPUID leafs */
|
||||
+ get_cpu_cap(curr_info);
|
||||
+}
|
||||
+
|
||||
/**
|
||||
* microcode_check() - Check if any CPU capabilities changed after an update.
|
||||
* @prev_info: CPU capabilities stored before an update.
|
||||
@@ -2158,22 +2177,13 @@ void cpu_init_secondary(void)
|
||||
*/
|
||||
void microcode_check(struct cpuinfo_x86 *prev_info)
|
||||
{
|
||||
- perf_check_microcode();
|
||||
-
|
||||
- /* Reload CPUID max function as it might've changed. */
|
||||
- prev_info->cpuid_level = cpuid_eax(0);
|
||||
+ struct cpuinfo_x86 curr_info;
|
||||
|
||||
- /*
|
||||
- * Copy all capability leafs to pick up the synthetic ones so that
|
||||
- * memcmp() below doesn't fail on that. The ones coming from CPUID will
|
||||
- * get overwritten in get_cpu_cap().
|
||||
- */
|
||||
- memcpy(&prev_info->x86_capability, &boot_cpu_data.x86_capability,
|
||||
- sizeof(prev_info->x86_capability));
|
||||
+ perf_check_microcode();
|
||||
|
||||
- get_cpu_cap(prev_info);
|
||||
+ store_cpu_caps(&curr_info);
|
||||
|
||||
- if (!memcmp(&prev_info->x86_capability, &boot_cpu_data.x86_capability,
|
||||
+ if (!memcmp(&prev_info->x86_capability, &curr_info.x86_capability,
|
||||
sizeof(prev_info->x86_capability)))
|
||||
return;
|
||||
|
||||
diff --git a/arch/x86/kernel/cpu/microcode/core.c b/arch/x86/kernel/cpu/microcode/core.c
|
||||
index 41a90074df21..9df2c40a4ddf 100644
|
||||
--- a/arch/x86/kernel/cpu/microcode/core.c
|
||||
+++ b/arch/x86/kernel/cpu/microcode/core.c
|
||||
@@ -465,6 +465,12 @@ static int microcode_reload_late(void)
|
||||
atomic_set(&late_cpus_in, 0);
|
||||
atomic_set(&late_cpus_out, 0);
|
||||
|
||||
+ /*
|
||||
+ * Take a snapshot before the microcode update in order to compare and
|
||||
+ * check whether any bits changed after an update.
|
||||
+ */
|
||||
+ store_cpu_caps(&prev_info);
|
||||
+
|
||||
ret = stop_machine_cpuslocked(__reload_late, NULL, cpu_online_mask);
|
||||
if (ret == 0)
|
||||
microcode_check(&prev_info);
|
||||
--
|
||||
2.39.3
|
||||
|
@ -0,0 +1,54 @@
|
||||
From 527c5ac4a29d4b854b10586ea877c65174cbd40d Mon Sep 17 00:00:00 2001
|
||||
From: Ashok Raj <ashok.raj@intel.com>
|
||||
Date: Mon, 9 Jan 2023 07:35:52 -0800
|
||||
Subject: [PATCH 15/36] x86/microcode: Adjust late loading result reporting
|
||||
message
|
||||
|
||||
During late microcode loading, the "Reload completed" message is issued
|
||||
unconditionally, regardless of success or failure.
|
||||
|
||||
Adjust the message to report the result of the update.
|
||||
|
||||
[ bp: Massage. ]
|
||||
|
||||
Fixes: 9bd681251b7c ("x86/microcode: Announce reload operation's completion")
|
||||
Suggested-by: Thomas Gleixner <tglx@linutronix.de>
|
||||
Signed-off-by: Ashok Raj <ashok.raj@intel.com>
|
||||
Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de>
|
||||
Reviewed-by: Tony Luck <tony.luck@intel.com>
|
||||
Link: https://lore.kernel.org/lkml/874judpqqd.ffs@tglx/
|
||||
(cherry picked from commit 6eab3abac7043226e5375e9ead0c7607ced6767b)
|
||||
|
||||
CVE: CVE-2023-20593
|
||||
Signed-off-by: Mridula Shastry <mridula.c.shastry@oracle.com>
|
||||
Reviewed-by: Todd Vierling <todd.vierling@oracle.com>
|
||||
---
|
||||
arch/x86/kernel/cpu/microcode/core.c | 11 +++++++----
|
||||
1 file changed, 7 insertions(+), 4 deletions(-)
|
||||
|
||||
diff --git a/arch/x86/kernel/cpu/microcode/core.c b/arch/x86/kernel/cpu/microcode/core.c
|
||||
index 9df2c40a4ddf..fb383088bede 100644
|
||||
--- a/arch/x86/kernel/cpu/microcode/core.c
|
||||
+++ b/arch/x86/kernel/cpu/microcode/core.c
|
||||
@@ -472,11 +472,14 @@ static int microcode_reload_late(void)
|
||||
store_cpu_caps(&prev_info);
|
||||
|
||||
ret = stop_machine_cpuslocked(__reload_late, NULL, cpu_online_mask);
|
||||
- if (ret == 0)
|
||||
+ if (!ret) {
|
||||
+ pr_info("Reload succeeded, microcode revision: 0x%x -> 0x%x\n",
|
||||
+ old, boot_cpu_data.microcode);
|
||||
microcode_check(&prev_info);
|
||||
-
|
||||
- pr_info("Reload completed, microcode revision: 0x%x -> 0x%x\n",
|
||||
- old, boot_cpu_data.microcode);
|
||||
+ } else {
|
||||
+ pr_info("Reload failed, current microcode revision: 0x%x\n",
|
||||
+ boot_cpu_data.microcode);
|
||||
+ }
|
||||
|
||||
return ret;
|
||||
}
|
||||
--
|
||||
2.39.3
|
||||
|
@ -0,0 +1,148 @@
|
||||
From e167d6613aef8356261f97e2b50489142f8e1406 Mon Sep 17 00:00:00 2001
|
||||
From: Alexey Kardashevskiy <aik@amd.com>
|
||||
Date: Fri, 20 Jan 2023 14:10:45 +1100
|
||||
Subject: [PATCH 16/36] x86/amd: Cache debug register values in percpu
|
||||
variables
|
||||
|
||||
Reading DR[0-3]_ADDR_MASK MSRs takes about 250 cycles which is going to
|
||||
be noticeable with the AMD KVM SEV-ES DebugSwap feature enabled. KVM is
|
||||
going to store host's DR[0-3] and DR[0-3]_ADDR_MASK before switching to
|
||||
a guest; the hardware is going to swap these on VMRUN and VMEXIT.
|
||||
|
||||
Store MSR values passed to set_dr_addr_mask() in percpu variables
|
||||
(when changed) and return them via new amd_get_dr_addr_mask().
|
||||
The gain here is about 10x.
|
||||
|
||||
As set_dr_addr_mask() uses the array too, change the @dr type to
|
||||
unsigned to avoid checking for <0. And give it the amd_ prefix to match
|
||||
the new helper as the whole DR_ADDR_MASK feature is AMD-specific anyway.
|
||||
|
||||
While at it, replace deprecated boot_cpu_has() with cpu_feature_enabled()
|
||||
in set_dr_addr_mask().
|
||||
|
||||
Signed-off-by: Alexey Kardashevskiy <aik@amd.com>
|
||||
Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de>
|
||||
Link: https://lore.kernel.org/r/20230120031047.628097-2-aik@amd.com
|
||||
(cherry picked from commit 7914695743d598b189d549f2f57af24aa5633705)
|
||||
|
||||
The conflict is due to patch 84b6a34 which is not present
|
||||
in this repo. This is an optimization/not a bug fix. Hence
|
||||
not porting it.
|
||||
|
||||
CVE: CVE-2023-20593
|
||||
Signed-off-by: Mridula Shastry <mridula.c.shastry@oracle.com>
|
||||
Reviewed-by: Todd Vierling <todd.vierling@oracle.com>
|
||||
---
|
||||
arch/x86/include/asm/debugreg.h | 9 +++++--
|
||||
arch/x86/kernel/cpu/amd.c | 47 +++++++++++++++++++++++----------
|
||||
arch/x86/kernel/hw_breakpoint.c | 4 +--
|
||||
3 files changed, 42 insertions(+), 18 deletions(-)
|
||||
|
||||
diff --git a/arch/x86/include/asm/debugreg.h b/arch/x86/include/asm/debugreg.h
|
||||
index 1c964123f2dd..ccf6639c8df9 100644
|
||||
--- a/arch/x86/include/asm/debugreg.h
|
||||
+++ b/arch/x86/include/asm/debugreg.h
|
||||
@@ -144,9 +144,14 @@ static __always_inline void local_db_restore(unsigned long dr7)
|
||||
}
|
||||
|
||||
#ifdef CONFIG_CPU_SUP_AMD
|
||||
-extern void set_dr_addr_mask(unsigned long mask, int dr);
|
||||
+extern void amd_set_dr_addr_mask(unsigned long mask, unsigned int dr);
|
||||
+extern unsigned long amd_get_dr_addr_mask(unsigned int dr);
|
||||
#else
|
||||
-static inline void set_dr_addr_mask(unsigned long mask, int dr) { }
|
||||
+static inline void amd_set_dr_addr_mask(unsigned long mask, unsigned int dr) { }
|
||||
+static inline unsigned long amd_get_dr_addr_mask(unsigned int dr)
|
||||
+{
|
||||
+ return 0;
|
||||
+}
|
||||
#endif
|
||||
|
||||
#endif /* _ASM_X86_DEBUGREG_H */
|
||||
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
|
||||
index efa49b3f87b9..6eb7923f5f08 100644
|
||||
--- a/arch/x86/kernel/cpu/amd.c
|
||||
+++ b/arch/x86/kernel/cpu/amd.c
|
||||
@@ -1146,24 +1146,43 @@ static bool cpu_has_amd_erratum(struct cpuinfo_x86 *cpu, const int *erratum)
|
||||
return false;
|
||||
}
|
||||
|
||||
-void set_dr_addr_mask(unsigned long mask, int dr)
|
||||
+static DEFINE_PER_CPU_READ_MOSTLY(unsigned long[4], amd_dr_addr_mask);
|
||||
+
|
||||
+static unsigned int amd_msr_dr_addr_masks[] = {
|
||||
+ MSR_F16H_DR0_ADDR_MASK,
|
||||
+ MSR_F16H_DR1_ADDR_MASK,
|
||||
+ MSR_F16H_DR1_ADDR_MASK + 1,
|
||||
+ MSR_F16H_DR1_ADDR_MASK + 2
|
||||
+};
|
||||
+
|
||||
+void amd_set_dr_addr_mask(unsigned long mask, unsigned int dr)
|
||||
{
|
||||
- if (!boot_cpu_has(X86_FEATURE_BPEXT))
|
||||
+ int cpu = smp_processor_id();
|
||||
+
|
||||
+ if (!cpu_feature_enabled(X86_FEATURE_BPEXT))
|
||||
return;
|
||||
|
||||
- switch (dr) {
|
||||
- case 0:
|
||||
- wrmsr(MSR_F16H_DR0_ADDR_MASK, mask, 0);
|
||||
- break;
|
||||
- case 1:
|
||||
- case 2:
|
||||
- case 3:
|
||||
- wrmsr(MSR_F16H_DR1_ADDR_MASK - 1 + dr, mask, 0);
|
||||
- break;
|
||||
- default:
|
||||
- break;
|
||||
- }
|
||||
+ if (WARN_ON_ONCE(dr >= ARRAY_SIZE(amd_msr_dr_addr_masks)))
|
||||
+ return;
|
||||
+
|
||||
+ if (per_cpu(amd_dr_addr_mask, cpu)[dr] == mask)
|
||||
+ return;
|
||||
+
|
||||
+ wrmsr(amd_msr_dr_addr_masks[dr], mask, 0);
|
||||
+ per_cpu(amd_dr_addr_mask, cpu)[dr] = mask;
|
||||
+}
|
||||
+
|
||||
+unsigned long amd_get_dr_addr_mask(unsigned int dr)
|
||||
+{
|
||||
+ if (!cpu_feature_enabled(X86_FEATURE_BPEXT))
|
||||
+ return 0;
|
||||
+
|
||||
+ if (WARN_ON_ONCE(dr >= ARRAY_SIZE(amd_msr_dr_addr_masks)))
|
||||
+ return 0;
|
||||
+
|
||||
+ return per_cpu(amd_dr_addr_mask[dr], smp_processor_id());
|
||||
}
|
||||
+EXPORT_SYMBOL_GPL(amd_get_dr_addr_mask);
|
||||
|
||||
u32 amd_get_highest_perf(void)
|
||||
{
|
||||
diff --git a/arch/x86/kernel/hw_breakpoint.c b/arch/x86/kernel/hw_breakpoint.c
|
||||
index 3871950432d6..d38d4648512d 100644
|
||||
--- a/arch/x86/kernel/hw_breakpoint.c
|
||||
+++ b/arch/x86/kernel/hw_breakpoint.c
|
||||
@@ -129,7 +129,7 @@ int arch_install_hw_breakpoint(struct perf_event *bp)
|
||||
|
||||
set_debugreg(*dr7, 7);
|
||||
if (info->mask)
|
||||
- set_dr_addr_mask(info->mask, i);
|
||||
+ amd_set_dr_addr_mask(info->mask, i);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@@ -166,7 +166,7 @@ void arch_uninstall_hw_breakpoint(struct perf_event *bp)
|
||||
|
||||
set_debugreg(*dr7, 7);
|
||||
if (info->mask)
|
||||
- set_dr_addr_mask(0, i);
|
||||
+ amd_set_dr_addr_mask(0, i);
|
||||
}
|
||||
|
||||
static int arch_bp_generic_len(int x86_len)
|
||||
--
|
||||
2.39.3
|
||||
|
@ -0,0 +1,94 @@
|
||||
From a53f36400559990c1a3716f11ecb46c03cc7cde7 Mon Sep 17 00:00:00 2001
|
||||
From: Borislav Petkov <bp@suse.de>
|
||||
Date: Thu, 25 Aug 2022 09:51:57 +0200
|
||||
Subject: [PATCH 17/36] x86/microcode: Remove ->request_microcode_user()
|
||||
|
||||
181b6f40e9ea ("x86/microcode: Rip out the OLD_INTERFACE")
|
||||
|
||||
removed the old microcode loading interface but forgot to remove the
|
||||
related ->request_microcode_user() functionality which it uses.
|
||||
|
||||
Rip it out now too.
|
||||
|
||||
Signed-off-by: Borislav Petkov <bp@suse.de>
|
||||
Link: https://lore.kernel.org/r/20220825075445.28171-1-bp@alien8.de
|
||||
(cherry picked from commit 8c61eafd22d7207039bff85c6e1d386f15abd17e)
|
||||
Signed-off-by: Mridula Shastry <mridula.c.shastry@oracle.com>
|
||||
Reviewed-by: Todd Vierling <todd.vierling@oracle.com>
|
||||
---
|
||||
arch/x86/include/asm/microcode.h | 3 ---
|
||||
arch/x86/kernel/cpu/microcode/amd.c | 7 -------
|
||||
arch/x86/kernel/cpu/microcode/intel.c | 17 -----------------
|
||||
3 files changed, 27 deletions(-)
|
||||
|
||||
diff --git a/arch/x86/include/asm/microcode.h b/arch/x86/include/asm/microcode.h
|
||||
index a930a63d1260..215ea340e7f9 100644
|
||||
--- a/arch/x86/include/asm/microcode.h
|
||||
+++ b/arch/x86/include/asm/microcode.h
|
||||
@@ -32,9 +32,6 @@ enum ucode_state {
|
||||
};
|
||||
|
||||
struct microcode_ops {
|
||||
- enum ucode_state (*request_microcode_user) (int cpu,
|
||||
- const void __user *buf, size_t size);
|
||||
-
|
||||
enum ucode_state (*request_microcode_fw) (int cpu, struct device *,
|
||||
bool refresh_fw);
|
||||
|
||||
diff --git a/arch/x86/kernel/cpu/microcode/amd.c b/arch/x86/kernel/cpu/microcode/amd.c
|
||||
index 0b557d04be22..9d15b7e5a13e 100644
|
||||
--- a/arch/x86/kernel/cpu/microcode/amd.c
|
||||
+++ b/arch/x86/kernel/cpu/microcode/amd.c
|
||||
@@ -929,12 +929,6 @@ static enum ucode_state request_microcode_amd(int cpu, struct device *device,
|
||||
return ret;
|
||||
}
|
||||
|
||||
-static enum ucode_state
|
||||
-request_microcode_user(int cpu, const void __user *buf, size_t size)
|
||||
-{
|
||||
- return UCODE_ERROR;
|
||||
-}
|
||||
-
|
||||
static void microcode_fini_cpu_amd(int cpu)
|
||||
{
|
||||
struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
|
||||
@@ -943,7 +937,6 @@ static void microcode_fini_cpu_amd(int cpu)
|
||||
}
|
||||
|
||||
static struct microcode_ops microcode_amd_ops = {
|
||||
- .request_microcode_user = request_microcode_user,
|
||||
.request_microcode_fw = request_microcode_amd,
|
||||
.collect_cpu_info = collect_cpu_info_amd,
|
||||
.apply_microcode = apply_microcode_amd,
|
||||
diff --git a/arch/x86/kernel/cpu/microcode/intel.c b/arch/x86/kernel/cpu/microcode/intel.c
|
||||
index 5f8223909db4..6fa667192bd9 100644
|
||||
--- a/arch/x86/kernel/cpu/microcode/intel.c
|
||||
+++ b/arch/x86/kernel/cpu/microcode/intel.c
|
||||
@@ -952,24 +952,7 @@ static enum ucode_state request_microcode_fw(int cpu, struct device *device,
|
||||
return ret;
|
||||
}
|
||||
|
||||
-static enum ucode_state
|
||||
-request_microcode_user(int cpu, const void __user *buf, size_t size)
|
||||
-{
|
||||
- struct iov_iter iter;
|
||||
- struct iovec iov;
|
||||
-
|
||||
- if (is_blacklisted(cpu))
|
||||
- return UCODE_NFOUND;
|
||||
-
|
||||
- iov.iov_base = (void __user *)buf;
|
||||
- iov.iov_len = size;
|
||||
- iov_iter_init(&iter, WRITE, &iov, 1, size);
|
||||
-
|
||||
- return generic_load_microcode(cpu, &iter);
|
||||
-}
|
||||
-
|
||||
static struct microcode_ops microcode_intel_ops = {
|
||||
- .request_microcode_user = request_microcode_user,
|
||||
.request_microcode_fw = request_microcode_fw,
|
||||
.collect_cpu_info = collect_cpu_info,
|
||||
.apply_microcode = apply_microcode_intel,
|
||||
--
|
||||
2.39.3
|
||||
|
88
SOURCES/1032-x86-microcode-Kill-refresh_fw.patch
Normal file
88
SOURCES/1032-x86-microcode-Kill-refresh_fw.patch
Normal file
@ -0,0 +1,88 @@
|
||||
From f93ce31159692d4be33e05f6a63361c7b9a29292 Mon Sep 17 00:00:00 2001
|
||||
From: Borislav Petkov <bp@suse.de>
|
||||
Date: Wed, 19 Oct 2022 19:16:20 +0200
|
||||
Subject: [PATCH 18/36] x86/microcode: Kill refresh_fw
|
||||
|
||||
request_microcode_fw() can always request firmware now so drop this
|
||||
superfluous argument.
|
||||
|
||||
Signed-off-by: Borislav Petkov <bp@suse.de>
|
||||
Reviewed-by: Ashok Raj <ashok.raj@intel.com>
|
||||
Link: https://lore.kernel.org/r/20221028142638.28498-4-bp@alien8.de
|
||||
(cherry picked from commit a61ac80ae52ea349416472cd52005f9988537208)
|
||||
Signed-off-by: Mridula Shastry <mridula.c.shastry@oracle.com>
|
||||
Reviewed-by: Todd Vierling <todd.vierling@oracle.com>
|
||||
---
|
||||
arch/x86/include/asm/microcode.h | 3 +--
|
||||
arch/x86/kernel/cpu/microcode/amd.c | 5 ++---
|
||||
arch/x86/kernel/cpu/microcode/core.c | 2 +-
|
||||
arch/x86/kernel/cpu/microcode/intel.c | 3 +--
|
||||
4 files changed, 5 insertions(+), 8 deletions(-)
|
||||
|
||||
diff --git a/arch/x86/include/asm/microcode.h b/arch/x86/include/asm/microcode.h
|
||||
index 215ea340e7f9..af1ba544c800 100644
|
||||
--- a/arch/x86/include/asm/microcode.h
|
||||
+++ b/arch/x86/include/asm/microcode.h
|
||||
@@ -32,8 +32,7 @@ enum ucode_state {
|
||||
};
|
||||
|
||||
struct microcode_ops {
|
||||
- enum ucode_state (*request_microcode_fw) (int cpu, struct device *,
|
||||
- bool refresh_fw);
|
||||
+ enum ucode_state (*request_microcode_fw) (int cpu, struct device *);
|
||||
|
||||
void (*microcode_fini_cpu) (int cpu);
|
||||
|
||||
diff --git a/arch/x86/kernel/cpu/microcode/amd.c b/arch/x86/kernel/cpu/microcode/amd.c
|
||||
index 9d15b7e5a13e..381addff7fc8 100644
|
||||
--- a/arch/x86/kernel/cpu/microcode/amd.c
|
||||
+++ b/arch/x86/kernel/cpu/microcode/amd.c
|
||||
@@ -895,8 +895,7 @@ load_microcode_amd(bool save, u8 family, const u8 *data, size_t size)
|
||||
*
|
||||
* These might be larger than 2K.
|
||||
*/
|
||||
-static enum ucode_state request_microcode_amd(int cpu, struct device *device,
|
||||
- bool refresh_fw)
|
||||
+static enum ucode_state request_microcode_amd(int cpu, struct device *device)
|
||||
{
|
||||
char fw_name[36] = "amd-ucode/microcode_amd.bin";
|
||||
struct cpuinfo_x86 *c = &cpu_data(cpu);
|
||||
@@ -905,7 +904,7 @@ static enum ucode_state request_microcode_amd(int cpu, struct device *device,
|
||||
const struct firmware *fw;
|
||||
|
||||
/* reload ucode container only on the boot cpu */
|
||||
- if (!refresh_fw || !bsp)
|
||||
+ if (!bsp)
|
||||
return UCODE_OK;
|
||||
|
||||
if (c->x86 >= 0x15)
|
||||
diff --git a/arch/x86/kernel/cpu/microcode/core.c b/arch/x86/kernel/cpu/microcode/core.c
|
||||
index fb383088bede..3d4c2c1aae11 100644
|
||||
--- a/arch/x86/kernel/cpu/microcode/core.c
|
||||
+++ b/arch/x86/kernel/cpu/microcode/core.c
|
||||
@@ -506,7 +506,7 @@ static ssize_t reload_store(struct device *dev,
|
||||
if (ret)
|
||||
goto put;
|
||||
|
||||
- tmp_ret = microcode_ops->request_microcode_fw(bsp, µcode_pdev->dev, true);
|
||||
+ tmp_ret = microcode_ops->request_microcode_fw(bsp, µcode_pdev->dev);
|
||||
if (tmp_ret != UCODE_NEW)
|
||||
goto put;
|
||||
|
||||
diff --git a/arch/x86/kernel/cpu/microcode/intel.c b/arch/x86/kernel/cpu/microcode/intel.c
|
||||
index 6fa667192bd9..818d5fa8ca20 100644
|
||||
--- a/arch/x86/kernel/cpu/microcode/intel.c
|
||||
+++ b/arch/x86/kernel/cpu/microcode/intel.c
|
||||
@@ -921,8 +921,7 @@ static bool is_blacklisted(unsigned int cpu)
|
||||
return false;
|
||||
}
|
||||
|
||||
-static enum ucode_state request_microcode_fw(int cpu, struct device *device,
|
||||
- bool refresh_fw)
|
||||
+static enum ucode_state request_microcode_fw(int cpu, struct device *device)
|
||||
{
|
||||
struct cpuinfo_x86 *c = &cpu_data(cpu);
|
||||
const struct firmware *firmware;
|
||||
--
|
||||
2.39.3
|
||||
|
@ -0,0 +1,92 @@
|
||||
From cb425451860828e0ac0516f58a37b131f0476b3c Mon Sep 17 00:00:00 2001
|
||||
From: "Borislav Petkov (AMD)" <bp@alien8.de>
|
||||
Date: Tue, 17 Jan 2023 23:59:24 +0100
|
||||
Subject: [PATCH 19/36] x86/microcode/amd: Remove load_microcode_amd()'s bsp
|
||||
parameter
|
||||
|
||||
It is always the BSP.
|
||||
|
||||
No functional changes.
|
||||
|
||||
Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de>
|
||||
Link: https://lore.kernel.org/r/20230130161709.11615-2-bp@alien8.de
|
||||
(cherry picked from commit 2355370cd941cbb20882cc3f34460f9f2b8f9a18)
|
||||
|
||||
CVE: CVE-2023-20593
|
||||
Signed-off-by: Mridula Shastry <mridula.c.shastry@oracle.com>
|
||||
Reviewed-by: Todd Vierling <todd.vierling@oracle.com>
|
||||
---
|
||||
arch/x86/kernel/cpu/microcode/amd.c | 19 ++++---------------
|
||||
1 file changed, 4 insertions(+), 15 deletions(-)
|
||||
|
||||
diff --git a/arch/x86/kernel/cpu/microcode/amd.c b/arch/x86/kernel/cpu/microcode/amd.c
|
||||
index 381addff7fc8..68643a68fa11 100644
|
||||
--- a/arch/x86/kernel/cpu/microcode/amd.c
|
||||
+++ b/arch/x86/kernel/cpu/microcode/amd.c
|
||||
@@ -547,8 +547,7 @@ void load_ucode_amd_ap(unsigned int cpuid_1_eax)
|
||||
early_apply_microcode(cpuid_1_eax, cp.data, cp.size, false);
|
||||
}
|
||||
|
||||
-static enum ucode_state
|
||||
-load_microcode_amd(bool save, u8 family, const u8 *data, size_t size);
|
||||
+static enum ucode_state load_microcode_amd(u8 family, const u8 *data, size_t size);
|
||||
|
||||
int __init save_microcode_in_initrd_amd(unsigned int cpuid_1_eax)
|
||||
{
|
||||
@@ -566,7 +565,7 @@ int __init save_microcode_in_initrd_amd(unsigned int cpuid_1_eax)
|
||||
if (!desc.mc)
|
||||
return -EINVAL;
|
||||
|
||||
- ret = load_microcode_amd(true, x86_family(cpuid_1_eax), desc.data, desc.size);
|
||||
+ ret = load_microcode_amd(x86_family(cpuid_1_eax), desc.data, desc.size);
|
||||
if (ret > UCODE_UPDATED)
|
||||
return -EINVAL;
|
||||
|
||||
@@ -844,8 +843,7 @@ static enum ucode_state __load_microcode_amd(u8 family, const u8 *data,
|
||||
return UCODE_OK;
|
||||
}
|
||||
|
||||
-static enum ucode_state
|
||||
-load_microcode_amd(bool save, u8 family, const u8 *data, size_t size)
|
||||
+static enum ucode_state load_microcode_amd(u8 family, const u8 *data, size_t size)
|
||||
{
|
||||
struct ucode_patch *p;
|
||||
enum ucode_state ret;
|
||||
@@ -869,10 +867,6 @@ load_microcode_amd(bool save, u8 family, const u8 *data, size_t size)
|
||||
ret = UCODE_NEW;
|
||||
}
|
||||
|
||||
- /* save BSP's matching patch for early load */
|
||||
- if (!save)
|
||||
- return ret;
|
||||
-
|
||||
memset(amd_ucode_patch, 0, PATCH_MAX_SIZE);
|
||||
memcpy(amd_ucode_patch, p->data, min_t(u32, ksize(p->data), PATCH_MAX_SIZE));
|
||||
|
||||
@@ -899,14 +893,9 @@ static enum ucode_state request_microcode_amd(int cpu, struct device *device)
|
||||
{
|
||||
char fw_name[36] = "amd-ucode/microcode_amd.bin";
|
||||
struct cpuinfo_x86 *c = &cpu_data(cpu);
|
||||
- bool bsp = c->cpu_index == boot_cpu_data.cpu_index;
|
||||
enum ucode_state ret = UCODE_NFOUND;
|
||||
const struct firmware *fw;
|
||||
|
||||
- /* reload ucode container only on the boot cpu */
|
||||
- if (!bsp)
|
||||
- return UCODE_OK;
|
||||
-
|
||||
if (c->x86 >= 0x15)
|
||||
snprintf(fw_name, sizeof(fw_name), "amd-ucode/microcode_amd_fam%.2xh.bin", c->x86);
|
||||
|
||||
@@ -919,7 +908,7 @@ static enum ucode_state request_microcode_amd(int cpu, struct device *device)
|
||||
if (!verify_container(fw->data, fw->size, false))
|
||||
goto fw_release;
|
||||
|
||||
- ret = load_microcode_amd(bsp, c->x86, fw->data, fw->size);
|
||||
+ ret = load_microcode_amd(c->x86, fw->data, fw->size);
|
||||
|
||||
fw_release:
|
||||
release_firmware(fw);
|
||||
--
|
||||
2.39.3
|
||||
|
@ -0,0 +1,62 @@
|
||||
From fbc8e41bf83779c3c6727075c5d131a3aa9f7a2b Mon Sep 17 00:00:00 2001
|
||||
From: Borislav Petkov <bp@suse.de>
|
||||
Date: Wed, 19 Oct 2022 19:25:27 +0200
|
||||
Subject: [PATCH 20/36] x86/microcode: Drop struct ucode_cpu_info.valid
|
||||
|
||||
It is not needed anymore.
|
||||
|
||||
Signed-off-by: Borislav Petkov <bp@suse.de>
|
||||
Reviewed-by: Ashok Raj <ashok.raj@intel.com>
|
||||
Link: https://lore.kernel.org/r/20221028142638.28498-6-bp@alien8.de
|
||||
(cherry picked from commit 254ed7cf4dd79a18bbc496ab53f6c82d45431c78)
|
||||
Signed-off-by: Mridula Shastry <mridula.c.shastry@oracle.com>
|
||||
Reviewed-by: Todd Vierling <todd.vierling@oracle.com>
|
||||
---
|
||||
arch/x86/include/asm/microcode.h | 1 -
|
||||
arch/x86/kernel/cpu/intel.c | 1 -
|
||||
arch/x86/kernel/cpu/microcode/core.c | 4 ++--
|
||||
3 files changed, 2 insertions(+), 4 deletions(-)
|
||||
|
||||
diff --git a/arch/x86/include/asm/microcode.h b/arch/x86/include/asm/microcode.h
|
||||
index af1ba544c800..e65c39a1f5fa 100644
|
||||
--- a/arch/x86/include/asm/microcode.h
|
||||
+++ b/arch/x86/include/asm/microcode.h
|
||||
@@ -48,7 +48,6 @@ struct microcode_ops {
|
||||
|
||||
struct ucode_cpu_info {
|
||||
struct cpu_signature cpu_sig;
|
||||
- int valid;
|
||||
void *mc;
|
||||
};
|
||||
extern struct ucode_cpu_info ucode_cpu_info[];
|
||||
diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
|
||||
index cf85431fb09a..c7935482c58a 100644
|
||||
--- a/arch/x86/kernel/cpu/intel.c
|
||||
+++ b/arch/x86/kernel/cpu/intel.c
|
||||
@@ -243,7 +243,6 @@ int intel_cpu_collect_info(struct ucode_cpu_info *uci)
|
||||
csig.rev = intel_get_microcode_revision();
|
||||
|
||||
uci->cpu_sig = csig;
|
||||
- uci->valid = 1;
|
||||
|
||||
return 0;
|
||||
}
|
||||
diff --git a/arch/x86/kernel/cpu/microcode/core.c b/arch/x86/kernel/cpu/microcode/core.c
|
||||
index 3d4c2c1aae11..6cd0db1e7b96 100644
|
||||
--- a/arch/x86/kernel/cpu/microcode/core.c
|
||||
+++ b/arch/x86/kernel/cpu/microcode/core.c
|
||||
@@ -583,9 +583,9 @@ void microcode_bsp_resume(void)
|
||||
int cpu = smp_processor_id();
|
||||
struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
|
||||
|
||||
- if (uci->valid && uci->mc)
|
||||
+ if (uci->mc)
|
||||
microcode_ops->apply_microcode(cpu);
|
||||
- else if (!uci->mc)
|
||||
+ else
|
||||
reload_early_microcode();
|
||||
}
|
||||
|
||||
--
|
||||
2.39.3
|
||||
|
@ -0,0 +1,110 @@
|
||||
From 077f4f2e376a6489736ee5994db3539593941c0c Mon Sep 17 00:00:00 2001
|
||||
From: "Borislav Petkov (AMD)" <bp@alien8.de>
|
||||
Date: Thu, 26 Jan 2023 00:08:03 +0100
|
||||
Subject: [PATCH 21/36] x86/microcode/AMD: Add a @cpu parameter to the
|
||||
reloading functions
|
||||
|
||||
Will be used in a subsequent change.
|
||||
|
||||
Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de>
|
||||
Link: https://lore.kernel.org/r/20230130161709.11615-3-bp@alien8.de
|
||||
(cherry picked from commit a5ad92134bd153a9ccdcddf09a95b088f36c3cce)
|
||||
|
||||
CVE: CVE-2023-20593
|
||||
Signed-off-by: Mridula Shastry <mridula.c.shastry@oracle.com>
|
||||
Reviewed-by: Todd Vierling <todd.vierling@oracle.com>
|
||||
---
|
||||
arch/x86/include/asm/microcode.h | 4 ++--
|
||||
arch/x86/include/asm/microcode_amd.h | 4 ++--
|
||||
arch/x86/kernel/cpu/microcode/amd.c | 2 +-
|
||||
arch/x86/kernel/cpu/microcode/core.c | 6 +++---
|
||||
4 files changed, 8 insertions(+), 8 deletions(-)
|
||||
|
||||
diff --git a/arch/x86/include/asm/microcode.h b/arch/x86/include/asm/microcode.h
|
||||
index e65c39a1f5fa..67051a58a18b 100644
|
||||
--- a/arch/x86/include/asm/microcode.h
|
||||
+++ b/arch/x86/include/asm/microcode.h
|
||||
@@ -125,7 +125,7 @@ static inline unsigned int x86_cpuid_family(void)
|
||||
int __init microcode_init(void);
|
||||
extern void __init load_ucode_bsp(void);
|
||||
extern void load_ucode_ap(void);
|
||||
-void reload_early_microcode(void);
|
||||
+void reload_early_microcode(unsigned int cpu);
|
||||
extern bool get_builtin_firmware(struct cpio_data *cd, const char *name);
|
||||
extern bool initrd_gone;
|
||||
void microcode_bsp_resume(void);
|
||||
@@ -133,7 +133,7 @@ void microcode_bsp_resume(void);
|
||||
static inline int __init microcode_init(void) { return 0; };
|
||||
static inline void __init load_ucode_bsp(void) { }
|
||||
static inline void load_ucode_ap(void) { }
|
||||
-static inline void reload_early_microcode(void) { }
|
||||
+static inline void reload_early_microcode(unsigned int cpu) { }
|
||||
static inline bool
|
||||
get_builtin_firmware(struct cpio_data *cd, const char *name) { return false; }
|
||||
static inline void microcode_bsp_resume(void) { }
|
||||
diff --git a/arch/x86/include/asm/microcode_amd.h b/arch/x86/include/asm/microcode_amd.h
|
||||
index 7063b5a43220..a645b25ee442 100644
|
||||
--- a/arch/x86/include/asm/microcode_amd.h
|
||||
+++ b/arch/x86/include/asm/microcode_amd.h
|
||||
@@ -47,12 +47,12 @@ struct microcode_amd {
|
||||
extern void __init load_ucode_amd_bsp(unsigned int family);
|
||||
extern void load_ucode_amd_ap(unsigned int family);
|
||||
extern int __init save_microcode_in_initrd_amd(unsigned int family);
|
||||
-void reload_ucode_amd(void);
|
||||
+void reload_ucode_amd(unsigned int cpu);
|
||||
#else
|
||||
static inline void __init load_ucode_amd_bsp(unsigned int family) {}
|
||||
static inline void load_ucode_amd_ap(unsigned int family) {}
|
||||
static inline int __init
|
||||
save_microcode_in_initrd_amd(unsigned int family) { return -EINVAL; }
|
||||
-static inline void reload_ucode_amd(void) {}
|
||||
+static inline void reload_ucode_amd(unsigned int cpu) {}
|
||||
#endif
|
||||
#endif /* _ASM_X86_MICROCODE_AMD_H */
|
||||
diff --git a/arch/x86/kernel/cpu/microcode/amd.c b/arch/x86/kernel/cpu/microcode/amd.c
|
||||
index 68643a68fa11..6ab27650f8a7 100644
|
||||
--- a/arch/x86/kernel/cpu/microcode/amd.c
|
||||
+++ b/arch/x86/kernel/cpu/microcode/amd.c
|
||||
@@ -572,7 +572,7 @@ int __init save_microcode_in_initrd_amd(unsigned int cpuid_1_eax)
|
||||
return 0;
|
||||
}
|
||||
|
||||
-void reload_ucode_amd(void)
|
||||
+void reload_ucode_amd(unsigned int cpu)
|
||||
{
|
||||
struct microcode_amd *mc;
|
||||
u32 rev, dummy __always_unused;
|
||||
diff --git a/arch/x86/kernel/cpu/microcode/core.c b/arch/x86/kernel/cpu/microcode/core.c
|
||||
index 6cd0db1e7b96..681df41e5b69 100644
|
||||
--- a/arch/x86/kernel/cpu/microcode/core.c
|
||||
+++ b/arch/x86/kernel/cpu/microcode/core.c
|
||||
@@ -317,7 +317,7 @@ struct cpio_data find_microcode_in_initrd(const char *path, bool use_pa)
|
||||
#endif
|
||||
}
|
||||
|
||||
-void reload_early_microcode(void)
|
||||
+void reload_early_microcode(unsigned int cpu)
|
||||
{
|
||||
int vendor, family;
|
||||
|
||||
@@ -331,7 +331,7 @@ void reload_early_microcode(void)
|
||||
break;
|
||||
case X86_VENDOR_AMD:
|
||||
if (family >= 0x10)
|
||||
- reload_ucode_amd();
|
||||
+ reload_ucode_amd(cpu);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
@@ -586,7 +586,7 @@ void microcode_bsp_resume(void)
|
||||
if (uci->mc)
|
||||
microcode_ops->apply_microcode(cpu);
|
||||
else
|
||||
- reload_early_microcode();
|
||||
+ reload_early_microcode(cpu);
|
||||
}
|
||||
|
||||
static struct syscore_ops mc_syscore_ops = {
|
||||
--
|
||||
2.39.3
|
||||
|
@ -0,0 +1,63 @@
|
||||
From d9032189bbe791846b2c3e224ae164e106e2be1b Mon Sep 17 00:00:00 2001
|
||||
From: Kees Cook <keescook@chromium.org>
|
||||
Date: Wed, 21 Sep 2022 20:10:10 -0700
|
||||
Subject: [PATCH 22/36] x86/microcode/AMD: Track patch allocation size
|
||||
explicitly
|
||||
|
||||
In preparation for reducing the use of ksize(), record the actual
|
||||
allocation size for later memcpy(). This avoids copying extra
|
||||
(uninitialized!) bytes into the patch buffer when the requested
|
||||
allocation size isn't exactly the size of a kmalloc bucket.
|
||||
Additionally, fix potential future issues where runtime bounds checking
|
||||
will notice that the buffer was allocated to a smaller value than
|
||||
returned by ksize().
|
||||
|
||||
Fixes: 757885e94a22 ("x86, microcode, amd: Early microcode patch loading support for AMD")
|
||||
Suggested-by: Daniel Micay <danielmicay@gmail.com>
|
||||
Signed-off-by: Kees Cook <keescook@chromium.org>
|
||||
Signed-off-by: Borislav Petkov <bp@suse.de>
|
||||
Link: https://lore.kernel.org/lkml/CA+DvKQ+bp7Y7gmaVhacjv9uF6Ar-o4tet872h4Q8RPYPJjcJQA@mail.gmail.com/
|
||||
(cherry picked from commit 712f210a457d9c32414df246a72781550bc23ef6)
|
||||
Signed-off-by: Mridula Shastry <mridula.c.shastry@oracle.com>
|
||||
Reviewed-by: Todd Vierling <todd.vierling@oracle.com>
|
||||
---
|
||||
arch/x86/include/asm/microcode.h | 1 +
|
||||
arch/x86/kernel/cpu/microcode/amd.c | 3 ++-
|
||||
2 files changed, 3 insertions(+), 1 deletion(-)
|
||||
|
||||
diff --git a/arch/x86/include/asm/microcode.h b/arch/x86/include/asm/microcode.h
|
||||
index 67051a58a18b..629330986955 100644
|
||||
--- a/arch/x86/include/asm/microcode.h
|
||||
+++ b/arch/x86/include/asm/microcode.h
|
||||
@@ -9,6 +9,7 @@
|
||||
struct ucode_patch {
|
||||
struct list_head plist;
|
||||
void *data; /* Intel uses only this one */
|
||||
+ unsigned int size;
|
||||
u32 patch_id;
|
||||
u16 equiv_cpu;
|
||||
};
|
||||
diff --git a/arch/x86/kernel/cpu/microcode/amd.c b/arch/x86/kernel/cpu/microcode/amd.c
|
||||
index 6ab27650f8a7..030f69f93c00 100644
|
||||
--- a/arch/x86/kernel/cpu/microcode/amd.c
|
||||
+++ b/arch/x86/kernel/cpu/microcode/amd.c
|
||||
@@ -791,6 +791,7 @@ static int verify_and_add_patch(u8 family, u8 *fw, unsigned int leftover,
|
||||
kfree(patch);
|
||||
return -EINVAL;
|
||||
}
|
||||
+ patch->size = *patch_size;
|
||||
|
||||
mc_hdr = (struct microcode_header_amd *)(fw + SECTION_HDR_SIZE);
|
||||
proc_id = mc_hdr->processor_rev_id;
|
||||
@@ -868,7 +869,7 @@ static enum ucode_state load_microcode_amd(u8 family, const u8 *data, size_t siz
|
||||
}
|
||||
|
||||
memset(amd_ucode_patch, 0, PATCH_MAX_SIZE);
|
||||
- memcpy(amd_ucode_patch, p->data, min_t(u32, ksize(p->data), PATCH_MAX_SIZE));
|
||||
+ memcpy(amd_ucode_patch, p->data, min_t(u32, p->size, PATCH_MAX_SIZE));
|
||||
|
||||
return ret;
|
||||
}
|
||||
--
|
||||
2.39.3
|
||||
|
111
SOURCES/1037-x86-microcode-AMD-Fix-mixed-steppings-support.patch
Normal file
111
SOURCES/1037-x86-microcode-AMD-Fix-mixed-steppings-support.patch
Normal file
@ -0,0 +1,111 @@
|
||||
From 34d3d5458f58c88954bb3087219610f35e2b5c37 Mon Sep 17 00:00:00 2001
|
||||
From: "Borislav Petkov (AMD)" <bp@alien8.de>
|
||||
Date: Thu, 26 Jan 2023 16:26:17 +0100
|
||||
Subject: [PATCH 23/36] x86/microcode/AMD: Fix mixed steppings support
|
||||
|
||||
The AMD side of the loader has always claimed to support mixed
|
||||
steppings. But somewhere along the way, it broke that by assuming that
|
||||
the cached patch blob is a single one instead of it being one per
|
||||
*node*.
|
||||
|
||||
So turn it into a per-node one so that each node can stash the blob
|
||||
relevant for it.
|
||||
|
||||
[ NB: Fixes tag is not really the exactly correct one but it is good
|
||||
enough. ]
|
||||
|
||||
Fixes: fe055896c040 ("x86/microcode: Merge the early microcode loader")
|
||||
Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de>
|
||||
Cc: <stable@kernel.org> # 2355370cd941 ("x86/microcode/amd: Remove load_microcode_amd()'s bsp parameter")
|
||||
Cc: <stable@kernel.org> # a5ad92134bd1 ("x86/microcode/AMD: Add a @cpu parameter to the reloading functions")
|
||||
Link: https://lore.kernel.org/r/20230130161709.11615-4-bp@alien8.de
|
||||
(cherry picked from commit 7ff6edf4fef38ab404ee7861f257e28eaaeed35f)
|
||||
|
||||
CVE: CVE-2023-20593
|
||||
Signed-off-by: Mridula Shastry <mridula.c.shastry@oracle.com>
|
||||
Reviewed-by: Todd Vierling <todd.vierling@oracle.com>
|
||||
---
|
||||
arch/x86/kernel/cpu/microcode/amd.c | 34 ++++++++++++++++++-----------
|
||||
1 file changed, 21 insertions(+), 13 deletions(-)
|
||||
|
||||
diff --git a/arch/x86/kernel/cpu/microcode/amd.c b/arch/x86/kernel/cpu/microcode/amd.c
|
||||
index 030f69f93c00..d1d31011b8fa 100644
|
||||
--- a/arch/x86/kernel/cpu/microcode/amd.c
|
||||
+++ b/arch/x86/kernel/cpu/microcode/amd.c
|
||||
@@ -55,7 +55,9 @@ struct cont_desc {
|
||||
};
|
||||
|
||||
static u32 ucode_new_rev;
|
||||
-static u8 amd_ucode_patch[PATCH_MAX_SIZE];
|
||||
+
|
||||
+/* One blob per node. */
|
||||
+static u8 amd_ucode_patch[MAX_NUMNODES][PATCH_MAX_SIZE];
|
||||
|
||||
/*
|
||||
* Microcode patch container file is prepended to the initrd in cpio
|
||||
@@ -428,7 +430,7 @@ static bool early_apply_microcode(u32 cpuid_1_eax, void *ucode, size_t size, boo
|
||||
patch = (u8 (*)[PATCH_MAX_SIZE])__pa_nodebug(&amd_ucode_patch);
|
||||
#else
|
||||
new_rev = &ucode_new_rev;
|
||||
- patch = &amd_ucode_patch;
|
||||
+ patch = &amd_ucode_patch[0];
|
||||
#endif
|
||||
|
||||
desc.cpuid_1_eax = cpuid_1_eax;
|
||||
@@ -574,10 +576,10 @@ int __init save_microcode_in_initrd_amd(unsigned int cpuid_1_eax)
|
||||
|
||||
void reload_ucode_amd(unsigned int cpu)
|
||||
{
|
||||
- struct microcode_amd *mc;
|
||||
u32 rev, dummy __always_unused;
|
||||
+ struct microcode_amd *mc;
|
||||
|
||||
- mc = (struct microcode_amd *)amd_ucode_patch;
|
||||
+ mc = (struct microcode_amd *)amd_ucode_patch[cpu_to_node(cpu)];
|
||||
|
||||
rdmsr(MSR_AMD64_PATCH_LEVEL, rev, dummy);
|
||||
|
||||
@@ -846,6 +848,8 @@ static enum ucode_state __load_microcode_amd(u8 family, const u8 *data,
|
||||
|
||||
static enum ucode_state load_microcode_amd(u8 family, const u8 *data, size_t size)
|
||||
{
|
||||
+ struct cpuinfo_x86 *c;
|
||||
+ unsigned int nid, cpu;
|
||||
struct ucode_patch *p;
|
||||
enum ucode_state ret;
|
||||
|
||||
@@ -858,18 +862,22 @@ static enum ucode_state load_microcode_amd(u8 family, const u8 *data, size_t siz
|
||||
return ret;
|
||||
}
|
||||
|
||||
- p = find_patch(0);
|
||||
- if (!p) {
|
||||
- return ret;
|
||||
- } else {
|
||||
- if (boot_cpu_data.microcode >= p->patch_id)
|
||||
- return ret;
|
||||
+ for_each_node(nid) {
|
||||
+ cpu = cpumask_first(cpumask_of_node(nid));
|
||||
+ c = &cpu_data(cpu);
|
||||
+
|
||||
+ p = find_patch(cpu);
|
||||
+ if (!p)
|
||||
+ continue;
|
||||
+
|
||||
+ if (c->microcode >= p->patch_id)
|
||||
+ continue;
|
||||
|
||||
ret = UCODE_NEW;
|
||||
- }
|
||||
|
||||
- memset(amd_ucode_patch, 0, PATCH_MAX_SIZE);
|
||||
- memcpy(amd_ucode_patch, p->data, min_t(u32, p->size, PATCH_MAX_SIZE));
|
||||
+ memset(&amd_ucode_patch[nid], 0, PATCH_MAX_SIZE);
|
||||
+ memcpy(&amd_ucode_patch[nid], p->data, min_t(u32, p->size, PATCH_MAX_SIZE));
|
||||
+ }
|
||||
|
||||
return ret;
|
||||
}
|
||||
--
|
||||
2.39.3
|
||||
|
@ -0,0 +1,40 @@
|
||||
From c6dc156a35536e30bb96df8a1b8c93cb16bd219b Mon Sep 17 00:00:00 2001
|
||||
From: "Borislav Petkov (AMD)" <bp@alien8.de>
|
||||
Date: Mon, 30 Jan 2023 13:48:04 +0100
|
||||
Subject: [PATCH 24/36] x86/microcode/core: Return an error only when necessary
|
||||
|
||||
Return an error from the late loading function which is run on each CPU
|
||||
only when an error has actually been encountered during the update.
|
||||
|
||||
Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de>
|
||||
Link: https://lore.kernel.org/r/20230130161709.11615-5-bp@alien8.de
|
||||
(cherry picked from commit f33e0c893b22bf94d7985f1f2aa3872237560c74)
|
||||
|
||||
CVE: CVE-2023-20593
|
||||
Signed-off-by: Mridula Shastry <mridula.c.shastry@oracle.com>
|
||||
Reviewed-by: Todd Vierling <todd.vierling@oracle.com>
|
||||
---
|
||||
arch/x86/kernel/cpu/microcode/core.c | 6 +++---
|
||||
1 file changed, 3 insertions(+), 3 deletions(-)
|
||||
|
||||
diff --git a/arch/x86/kernel/cpu/microcode/core.c b/arch/x86/kernel/cpu/microcode/core.c
|
||||
index 681df41e5b69..23b88e66fa7c 100644
|
||||
--- a/arch/x86/kernel/cpu/microcode/core.c
|
||||
+++ b/arch/x86/kernel/cpu/microcode/core.c
|
||||
@@ -428,10 +428,10 @@ static int __reload_late(void *info)
|
||||
goto wait_for_siblings;
|
||||
|
||||
if (err >= UCODE_NFOUND) {
|
||||
- if (err == UCODE_ERROR)
|
||||
+ if (err == UCODE_ERROR) {
|
||||
pr_warn("Error reloading microcode on CPU %d\n", cpu);
|
||||
-
|
||||
- ret = -1;
|
||||
+ ret = -1;
|
||||
+ }
|
||||
}
|
||||
|
||||
wait_for_siblings:
|
||||
--
|
||||
2.39.3
|
||||
|
225
SOURCES/1039-x86-apic-Don-t-disable-x2APIC-if-locked.patch
Normal file
225
SOURCES/1039-x86-apic-Don-t-disable-x2APIC-if-locked.patch
Normal file
@ -0,0 +1,225 @@
|
||||
From ce57ab2e87f50d1176abea086f4aad38ba87886b Mon Sep 17 00:00:00 2001
|
||||
From: Daniel Sneddon <daniel.sneddon@linux.intel.com>
|
||||
Date: Tue, 16 Aug 2022 16:19:42 -0700
|
||||
Subject: [PATCH 25/36] x86/apic: Don't disable x2APIC if locked
|
||||
|
||||
The APIC supports two modes, legacy APIC (or xAPIC), and Extended APIC
|
||||
(or x2APIC). X2APIC mode is mostly compatible with legacy APIC, but
|
||||
it disables the memory-mapped APIC interface in favor of one that uses
|
||||
MSRs. The APIC mode is controlled by the EXT bit in the APIC MSR.
|
||||
|
||||
The MMIO/xAPIC interface has some problems, most notably the APIC LEAK
|
||||
[1]. This bug allows an attacker to use the APIC MMIO interface to
|
||||
extract data from the SGX enclave.
|
||||
|
||||
Introduce support for a new feature that will allow the BIOS to lock
|
||||
the APIC in x2APIC mode. If the APIC is locked in x2APIC mode and the
|
||||
kernel tries to disable the APIC or revert to legacy APIC mode a GP
|
||||
fault will occur.
|
||||
|
||||
Introduce support for a new MSR (IA32_XAPIC_DISABLE_STATUS) and handle
|
||||
the new locked mode when the LEGACY_XAPIC_DISABLED bit is set by
|
||||
preventing the kernel from trying to disable the x2APIC.
|
||||
|
||||
On platforms with the IA32_XAPIC_DISABLE_STATUS MSR, if SGX or TDX are
|
||||
enabled the LEGACY_XAPIC_DISABLED will be set by the BIOS. If
|
||||
legacy APIC is required, then it SGX and TDX need to be disabled in the
|
||||
BIOS.
|
||||
|
||||
[1]: https://aepicleak.com/aepicleak.pdf
|
||||
|
||||
Signed-off-by: Daniel Sneddon <daniel.sneddon@linux.intel.com>
|
||||
Signed-off-by: Dave Hansen <dave.hansen@linux.intel.com>
|
||||
Acked-by: Dave Hansen <dave.hansen@linux.intel.com>
|
||||
Tested-by: Neelima Krishnan <neelima.krishnan@intel.com>
|
||||
Link: https://lkml.kernel.org/r/20220816231943.1152579-1-daniel.sneddon@linux.intel.com
|
||||
(cherry picked from commit b8d1d163604bd1e600b062fb00de5dc42baa355f)
|
||||
Signed-off-by: Mridula Shastry <mridula.c.shastry@oracle.com>
|
||||
Reviewed-by: Todd Vierling <todd.vierling@oracle.com>
|
||||
---
|
||||
.../admin-guide/kernel-parameters.txt | 4 ++
|
||||
arch/x86/Kconfig | 7 ++-
|
||||
arch/x86/include/asm/cpu.h | 2 +
|
||||
arch/x86/include/asm/msr-index.h | 13 ++++++
|
||||
arch/x86/kernel/apic/apic.c | 44 +++++++++++++++++--
|
||||
5 files changed, 65 insertions(+), 5 deletions(-)
|
||||
|
||||
diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
|
||||
index 25f0b3a543e9..73a92fb9e3c6 100644
|
||||
--- a/Documentation/admin-guide/kernel-parameters.txt
|
||||
+++ b/Documentation/admin-guide/kernel-parameters.txt
|
||||
@@ -3355,6 +3355,10 @@
|
||||
|
||||
nox2apic [X86-64,APIC] Do not enable x2APIC mode.
|
||||
|
||||
+ NOTE: this parameter will be ignored on systems with the
|
||||
+ LEGACY_XAPIC_DISABLED bit set in the
|
||||
+ IA32_XAPIC_DISABLE_STATUS MSR.
|
||||
+
|
||||
cpu0_hotplug [X86] Turn on CPU0 hotplug feature when
|
||||
CONFIG_BOOTPARAM_HOTPLUG_CPU0 is off.
|
||||
Some features depend on CPU0. Known dependencies are:
|
||||
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
|
||||
index 5af5427c50ae..585893a349b6 100644
|
||||
--- a/arch/x86/Kconfig
|
||||
+++ b/arch/x86/Kconfig
|
||||
@@ -449,6 +449,11 @@ config X86_X2APIC
|
||||
This allows 32-bit apic IDs (so it can support very large systems),
|
||||
and accesses the local apic via MSRs not via mmio.
|
||||
|
||||
+ Some Intel systems circa 2022 and later are locked into x2APIC mode
|
||||
+ and can not fall back to the legacy APIC modes if SGX or TDX are
|
||||
+ enabled in the BIOS. They will be unable to boot without enabling
|
||||
+ this option.
|
||||
+
|
||||
If you don't know what to do here, say N.
|
||||
|
||||
config X86_MPPARSE
|
||||
@@ -1979,7 +1984,7 @@ endchoice
|
||||
|
||||
config X86_SGX
|
||||
bool "Software Guard eXtensions (SGX)"
|
||||
- depends on X86_64 && CPU_SUP_INTEL
|
||||
+ depends on X86_64 && CPU_SUP_INTEL && X86_X2APIC
|
||||
depends on CRYPTO=y
|
||||
depends on CRYPTO_SHA256=y
|
||||
select SRCU
|
||||
diff --git a/arch/x86/include/asm/cpu.h b/arch/x86/include/asm/cpu.h
|
||||
index 2d62a3f122ee..66f9715b01c9 100644
|
||||
--- a/arch/x86/include/asm/cpu.h
|
||||
+++ b/arch/x86/include/asm/cpu.h
|
||||
@@ -92,4 +92,6 @@ static inline bool intel_cpu_signatures_match(unsigned int s1, unsigned int p1,
|
||||
return p1 & p2;
|
||||
}
|
||||
|
||||
+extern u64 x86_read_arch_cap_msr(void);
|
||||
+
|
||||
#endif /* _ASM_X86_CPU_H */
|
||||
diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
|
||||
index e30f8e289deb..9b0d407dd1ae 100644
|
||||
--- a/arch/x86/include/asm/msr-index.h
|
||||
+++ b/arch/x86/include/asm/msr-index.h
|
||||
@@ -153,6 +153,11 @@
|
||||
* Return Stack Buffer Predictions.
|
||||
*/
|
||||
|
||||
+#define ARCH_CAP_XAPIC_DISABLE BIT(21) /*
|
||||
+ * IA32_XAPIC_DISABLE_STATUS MSR
|
||||
+ * supported
|
||||
+ */
|
||||
+
|
||||
#define MSR_IA32_FLUSH_CMD 0x0000010b
|
||||
#define L1D_FLUSH BIT(0) /*
|
||||
* Writeback and invalidate the
|
||||
@@ -1015,4 +1020,12 @@
|
||||
#define MSR_IA32_HW_FEEDBACK_PTR 0x17d0
|
||||
#define MSR_IA32_HW_FEEDBACK_CONFIG 0x17d1
|
||||
|
||||
+/* x2APIC locked status */
|
||||
+#define MSR_IA32_XAPIC_DISABLE_STATUS 0xBD
|
||||
+#define LEGACY_XAPIC_DISABLED BIT(0) /*
|
||||
+ * x2APIC mode is locked and
|
||||
+ * disabling x2APIC will cause
|
||||
+ * a #GP
|
||||
+ */
|
||||
+
|
||||
#endif /* _ASM_X86_MSR_INDEX_H */
|
||||
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
|
||||
index b191e575e53c..d73f1037961a 100644
|
||||
--- a/arch/x86/kernel/apic/apic.c
|
||||
+++ b/arch/x86/kernel/apic/apic.c
|
||||
@@ -57,6 +57,7 @@
|
||||
#include <asm/cpu_device_id.h>
|
||||
#include <asm/intel-family.h>
|
||||
#include <asm/irq_regs.h>
|
||||
+#include <asm/cpu.h>
|
||||
|
||||
unsigned int num_processors;
|
||||
|
||||
@@ -1734,11 +1735,26 @@ EXPORT_SYMBOL_GPL(x2apic_mode);
|
||||
|
||||
enum {
|
||||
X2APIC_OFF,
|
||||
- X2APIC_ON,
|
||||
X2APIC_DISABLED,
|
||||
+ /* All states below here have X2APIC enabled */
|
||||
+ X2APIC_ON,
|
||||
+ X2APIC_ON_LOCKED
|
||||
};
|
||||
static int x2apic_state;
|
||||
|
||||
+static bool x2apic_hw_locked(void)
|
||||
+{
|
||||
+ u64 ia32_cap;
|
||||
+ u64 msr;
|
||||
+
|
||||
+ ia32_cap = x86_read_arch_cap_msr();
|
||||
+ if (ia32_cap & ARCH_CAP_XAPIC_DISABLE) {
|
||||
+ rdmsrl(MSR_IA32_XAPIC_DISABLE_STATUS, msr);
|
||||
+ return (msr & LEGACY_XAPIC_DISABLED);
|
||||
+ }
|
||||
+ return false;
|
||||
+}
|
||||
+
|
||||
static void __x2apic_disable(void)
|
||||
{
|
||||
u64 msr;
|
||||
@@ -1776,6 +1792,10 @@ static int __init setup_nox2apic(char *str)
|
||||
apicid);
|
||||
return 0;
|
||||
}
|
||||
+ if (x2apic_hw_locked()) {
|
||||
+ pr_warn("APIC locked in x2apic mode, can't disable\n");
|
||||
+ return 0;
|
||||
+ }
|
||||
pr_warn("x2apic already enabled.\n");
|
||||
__x2apic_disable();
|
||||
}
|
||||
@@ -1790,10 +1810,18 @@ early_param("nox2apic", setup_nox2apic);
|
||||
void x2apic_setup(void)
|
||||
{
|
||||
/*
|
||||
- * If x2apic is not in ON state, disable it if already enabled
|
||||
+ * Try to make the AP's APIC state match that of the BSP, but if the
|
||||
+ * BSP is unlocked and the AP is locked then there is a state mismatch.
|
||||
+ * Warn about the mismatch in case a GP fault occurs due to a locked AP
|
||||
+ * trying to be turned off.
|
||||
+ */
|
||||
+ if (x2apic_state != X2APIC_ON_LOCKED && x2apic_hw_locked())
|
||||
+ pr_warn("x2apic lock mismatch between BSP and AP.\n");
|
||||
+ /*
|
||||
+ * If x2apic is not in ON or LOCKED state, disable it if already enabled
|
||||
* from BIOS.
|
||||
*/
|
||||
- if (x2apic_state != X2APIC_ON) {
|
||||
+ if (x2apic_state < X2APIC_ON) {
|
||||
__x2apic_disable();
|
||||
return;
|
||||
}
|
||||
@@ -1814,6 +1842,11 @@ static __init void x2apic_disable(void)
|
||||
if (x2apic_id >= 255)
|
||||
panic("Cannot disable x2apic, id: %08x\n", x2apic_id);
|
||||
|
||||
+ if (x2apic_hw_locked()) {
|
||||
+ pr_warn("Cannot disable locked x2apic, id: %08x\n", x2apic_id);
|
||||
+ return;
|
||||
+ }
|
||||
+
|
||||
__x2apic_disable();
|
||||
register_lapic_address(mp_lapic_addr);
|
||||
}
|
||||
@@ -1872,7 +1905,10 @@ void __init check_x2apic(void)
|
||||
if (x2apic_enabled()) {
|
||||
pr_info("x2apic: enabled by BIOS, switching to x2apic ops\n");
|
||||
x2apic_mode = 1;
|
||||
- x2apic_state = X2APIC_ON;
|
||||
+ if (x2apic_hw_locked())
|
||||
+ x2apic_state = X2APIC_ON_LOCKED;
|
||||
+ else
|
||||
+ x2apic_state = X2APIC_ON;
|
||||
} else if (!boot_cpu_has(X86_FEATURE_X2APIC)) {
|
||||
x2apic_state = X2APIC_DISABLED;
|
||||
}
|
||||
--
|
||||
2.39.3
|
||||
|
@ -0,0 +1,186 @@
|
||||
From 53173ac5885435f751b54a87710e6111a51a819c Mon Sep 17 00:00:00 2001
|
||||
From: "Borislav Petkov (AMD)" <bp@alien8.de>
|
||||
Date: Sat, 15 Jul 2023 13:31:32 +0200
|
||||
Subject: [PATCH 26/36] x86/cpu/amd: Move the errata checking functionality up
|
||||
|
||||
Avoid new and remove old forward declarations.
|
||||
|
||||
No functional changes.
|
||||
|
||||
Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de>
|
||||
(cherry picked from commit 8b6f687743dacce83dbb0c7cfacf88bab00f808a)
|
||||
|
||||
CVE: CVE-2023-20593
|
||||
Signed-off-by: Mridula Shastry <mridula.c.shastry@oracle.com>
|
||||
Reviewed-by: Todd Vierling <todd.vierling@oracle.com>
|
||||
---
|
||||
arch/x86/kernel/cpu/amd.c | 139 ++++++++++++++++++--------------------
|
||||
1 file changed, 67 insertions(+), 72 deletions(-)
|
||||
|
||||
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
|
||||
index 6eb7923f5f08..2406a71a1eb6 100644
|
||||
--- a/arch/x86/kernel/cpu/amd.c
|
||||
+++ b/arch/x86/kernel/cpu/amd.c
|
||||
@@ -26,11 +26,6 @@
|
||||
|
||||
#include "cpu.h"
|
||||
|
||||
-static const int amd_erratum_383[];
|
||||
-static const int amd_erratum_400[];
|
||||
-static const int amd_erratum_1054[];
|
||||
-static bool cpu_has_amd_erratum(struct cpuinfo_x86 *cpu, const int *erratum);
|
||||
-
|
||||
/*
|
||||
* nodes_per_socket: Stores the number of nodes per socket.
|
||||
* Refer to Fam15h Models 00-0fh BKDG - CPUID Fn8000_001E_ECX
|
||||
@@ -38,6 +33,73 @@ static bool cpu_has_amd_erratum(struct cpuinfo_x86 *cpu, const int *erratum);
|
||||
*/
|
||||
static u32 nodes_per_socket = 1;
|
||||
|
||||
+/*
|
||||
+ * AMD errata checking
|
||||
+ *
|
||||
+ * Errata are defined as arrays of ints using the AMD_LEGACY_ERRATUM() or
|
||||
+ * AMD_OSVW_ERRATUM() macros. The latter is intended for newer errata that
|
||||
+ * have an OSVW id assigned, which it takes as first argument. Both take a
|
||||
+ * variable number of family-specific model-stepping ranges created by
|
||||
+ * AMD_MODEL_RANGE().
|
||||
+ *
|
||||
+ * Example:
|
||||
+ *
|
||||
+ * const int amd_erratum_319[] =
|
||||
+ * AMD_LEGACY_ERRATUM(AMD_MODEL_RANGE(0x10, 0x2, 0x1, 0x4, 0x2),
|
||||
+ * AMD_MODEL_RANGE(0x10, 0x8, 0x0, 0x8, 0x0),
|
||||
+ * AMD_MODEL_RANGE(0x10, 0x9, 0x0, 0x9, 0x0));
|
||||
+ */
|
||||
+
|
||||
+#define AMD_LEGACY_ERRATUM(...) { -1, __VA_ARGS__, 0 }
|
||||
+#define AMD_OSVW_ERRATUM(osvw_id, ...) { osvw_id, __VA_ARGS__, 0 }
|
||||
+#define AMD_MODEL_RANGE(f, m_start, s_start, m_end, s_end) \
|
||||
+ ((f << 24) | (m_start << 16) | (s_start << 12) | (m_end << 4) | (s_end))
|
||||
+#define AMD_MODEL_RANGE_FAMILY(range) (((range) >> 24) & 0xff)
|
||||
+#define AMD_MODEL_RANGE_START(range) (((range) >> 12) & 0xfff)
|
||||
+#define AMD_MODEL_RANGE_END(range) ((range) & 0xfff)
|
||||
+
|
||||
+static const int amd_erratum_400[] =
|
||||
+ AMD_OSVW_ERRATUM(1, AMD_MODEL_RANGE(0xf, 0x41, 0x2, 0xff, 0xf),
|
||||
+ AMD_MODEL_RANGE(0x10, 0x2, 0x1, 0xff, 0xf));
|
||||
+
|
||||
+static const int amd_erratum_383[] =
|
||||
+ AMD_OSVW_ERRATUM(3, AMD_MODEL_RANGE(0x10, 0, 0, 0xff, 0xf));
|
||||
+
|
||||
+/* #1054: Instructions Retired Performance Counter May Be Inaccurate */
|
||||
+static const int amd_erratum_1054[] =
|
||||
+ AMD_LEGACY_ERRATUM(AMD_MODEL_RANGE(0x17, 0, 0, 0x2f, 0xf));
|
||||
+
|
||||
+static bool cpu_has_amd_erratum(struct cpuinfo_x86 *cpu, const int *erratum)
|
||||
+{
|
||||
+ int osvw_id = *erratum++;
|
||||
+ u32 range;
|
||||
+ u32 ms;
|
||||
+
|
||||
+ if (osvw_id >= 0 && osvw_id < 65536 &&
|
||||
+ cpu_has(cpu, X86_FEATURE_OSVW)) {
|
||||
+ u64 osvw_len;
|
||||
+
|
||||
+ rdmsrl(MSR_AMD64_OSVW_ID_LENGTH, osvw_len);
|
||||
+ if (osvw_id < osvw_len) {
|
||||
+ u64 osvw_bits;
|
||||
+
|
||||
+ rdmsrl(MSR_AMD64_OSVW_STATUS + (osvw_id >> 6),
|
||||
+ osvw_bits);
|
||||
+ return osvw_bits & (1ULL << (osvw_id & 0x3f));
|
||||
+ }
|
||||
+ }
|
||||
+
|
||||
+ /* OSVW unavailable or ID unknown, match family-model-stepping range */
|
||||
+ ms = (cpu->x86_model << 4) | cpu->x86_stepping;
|
||||
+ while ((range = *erratum++))
|
||||
+ if ((cpu->x86 == AMD_MODEL_RANGE_FAMILY(range)) &&
|
||||
+ (ms >= AMD_MODEL_RANGE_START(range)) &&
|
||||
+ (ms <= AMD_MODEL_RANGE_END(range)))
|
||||
+ return true;
|
||||
+
|
||||
+ return false;
|
||||
+}
|
||||
+
|
||||
static inline int rdmsrl_amd_safe(unsigned msr, unsigned long long *p)
|
||||
{
|
||||
u32 gprs[8] = { 0 };
|
||||
@@ -1079,73 +1141,6 @@ static const struct cpu_dev amd_cpu_dev = {
|
||||
|
||||
cpu_dev_register(amd_cpu_dev);
|
||||
|
||||
-/*
|
||||
- * AMD errata checking
|
||||
- *
|
||||
- * Errata are defined as arrays of ints using the AMD_LEGACY_ERRATUM() or
|
||||
- * AMD_OSVW_ERRATUM() macros. The latter is intended for newer errata that
|
||||
- * have an OSVW id assigned, which it takes as first argument. Both take a
|
||||
- * variable number of family-specific model-stepping ranges created by
|
||||
- * AMD_MODEL_RANGE().
|
||||
- *
|
||||
- * Example:
|
||||
- *
|
||||
- * const int amd_erratum_319[] =
|
||||
- * AMD_LEGACY_ERRATUM(AMD_MODEL_RANGE(0x10, 0x2, 0x1, 0x4, 0x2),
|
||||
- * AMD_MODEL_RANGE(0x10, 0x8, 0x0, 0x8, 0x0),
|
||||
- * AMD_MODEL_RANGE(0x10, 0x9, 0x0, 0x9, 0x0));
|
||||
- */
|
||||
-
|
||||
-#define AMD_LEGACY_ERRATUM(...) { -1, __VA_ARGS__, 0 }
|
||||
-#define AMD_OSVW_ERRATUM(osvw_id, ...) { osvw_id, __VA_ARGS__, 0 }
|
||||
-#define AMD_MODEL_RANGE(f, m_start, s_start, m_end, s_end) \
|
||||
- ((f << 24) | (m_start << 16) | (s_start << 12) | (m_end << 4) | (s_end))
|
||||
-#define AMD_MODEL_RANGE_FAMILY(range) (((range) >> 24) & 0xff)
|
||||
-#define AMD_MODEL_RANGE_START(range) (((range) >> 12) & 0xfff)
|
||||
-#define AMD_MODEL_RANGE_END(range) ((range) & 0xfff)
|
||||
-
|
||||
-static const int amd_erratum_400[] =
|
||||
- AMD_OSVW_ERRATUM(1, AMD_MODEL_RANGE(0xf, 0x41, 0x2, 0xff, 0xf),
|
||||
- AMD_MODEL_RANGE(0x10, 0x2, 0x1, 0xff, 0xf));
|
||||
-
|
||||
-static const int amd_erratum_383[] =
|
||||
- AMD_OSVW_ERRATUM(3, AMD_MODEL_RANGE(0x10, 0, 0, 0xff, 0xf));
|
||||
-
|
||||
-/* #1054: Instructions Retired Performance Counter May Be Inaccurate */
|
||||
-static const int amd_erratum_1054[] =
|
||||
- AMD_LEGACY_ERRATUM(AMD_MODEL_RANGE(0x17, 0, 0, 0x2f, 0xf));
|
||||
-
|
||||
-static bool cpu_has_amd_erratum(struct cpuinfo_x86 *cpu, const int *erratum)
|
||||
-{
|
||||
- int osvw_id = *erratum++;
|
||||
- u32 range;
|
||||
- u32 ms;
|
||||
-
|
||||
- if (osvw_id >= 0 && osvw_id < 65536 &&
|
||||
- cpu_has(cpu, X86_FEATURE_OSVW)) {
|
||||
- u64 osvw_len;
|
||||
-
|
||||
- rdmsrl(MSR_AMD64_OSVW_ID_LENGTH, osvw_len);
|
||||
- if (osvw_id < osvw_len) {
|
||||
- u64 osvw_bits;
|
||||
-
|
||||
- rdmsrl(MSR_AMD64_OSVW_STATUS + (osvw_id >> 6),
|
||||
- osvw_bits);
|
||||
- return osvw_bits & (1ULL << (osvw_id & 0x3f));
|
||||
- }
|
||||
- }
|
||||
-
|
||||
- /* OSVW unavailable or ID unknown, match family-model-stepping range */
|
||||
- ms = (cpu->x86_model << 4) | cpu->x86_stepping;
|
||||
- while ((range = *erratum++))
|
||||
- if ((cpu->x86 == AMD_MODEL_RANGE_FAMILY(range)) &&
|
||||
- (ms >= AMD_MODEL_RANGE_START(range)) &&
|
||||
- (ms <= AMD_MODEL_RANGE_END(range)))
|
||||
- return true;
|
||||
-
|
||||
- return false;
|
||||
-}
|
||||
-
|
||||
static DEFINE_PER_CPU_READ_MOSTLY(unsigned long[4], amd_dr_addr_mask);
|
||||
|
||||
static unsigned int amd_msr_dr_addr_masks[] = {
|
||||
--
|
||||
2.39.3
|
||||
|
@ -0,0 +1,63 @@
|
||||
From d2d2944b80b99ecdcb1c10b0704bbdf78fbf5f79 Mon Sep 17 00:00:00 2001
|
||||
From: Ashok Raj <ashok.raj@intel.com>
|
||||
Date: Mon, 28 Nov 2022 09:24:51 -0800
|
||||
Subject: [PATCH 27/36] x86/cpu: Remove redundant extern
|
||||
x86_read_arch_cap_msr()
|
||||
|
||||
The prototype for the x86_read_arch_cap_msr() function has moved to
|
||||
arch/x86/include/asm/cpu.h - kill the redundant definition in arch/x86/kernel/cpu.h
|
||||
and include the header.
|
||||
|
||||
Signed-off-by: Ashok Raj <ashok.raj@intel.com>
|
||||
Signed-off-by: Ingo Molnar <mingo@kernel.org>
|
||||
Reviewed-by: Daniel Sneddon <daniel.sneddon@linux.intel.com>
|
||||
Link: https://lore.kernel.org/r/20221128172451.792595-1-ashok.raj@intel.com
|
||||
(cherry picked from commit bb5525a50601f8be7d0ffb04740e1714506e60c4)
|
||||
Signed-off-by: Mridula Shastry <mridula.c.shastry@oracle.com>
|
||||
Reviewed-by: Todd Vierling <todd.vierling@oracle.com>
|
||||
---
|
||||
arch/x86/kernel/cpu/bugs.c | 1 +
|
||||
arch/x86/kernel/cpu/cpu.h | 2 --
|
||||
arch/x86/kernel/cpu/tsx.c | 1 +
|
||||
3 files changed, 2 insertions(+), 2 deletions(-)
|
||||
|
||||
diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
|
||||
index 848a0cce2243..5dc79427d047 100644
|
||||
--- a/arch/x86/kernel/cpu/bugs.c
|
||||
+++ b/arch/x86/kernel/cpu/bugs.c
|
||||
@@ -33,6 +33,7 @@
|
||||
#include <asm/e820/api.h>
|
||||
#include <asm/hypervisor.h>
|
||||
#include <asm/tlbflush.h>
|
||||
+#include <asm/cpu.h>
|
||||
|
||||
#include "cpu.h"
|
||||
|
||||
diff --git a/arch/x86/kernel/cpu/cpu.h b/arch/x86/kernel/cpu/cpu.h
|
||||
index 54afd004624b..1add3618f766 100644
|
||||
--- a/arch/x86/kernel/cpu/cpu.h
|
||||
+++ b/arch/x86/kernel/cpu/cpu.h
|
||||
@@ -84,8 +84,6 @@ unsigned int aperfmperf_get_khz(int cpu);
|
||||
extern void x86_spec_ctrl_setup_ap(void);
|
||||
extern void update_srbds_msr(void);
|
||||
|
||||
-extern u64 x86_read_arch_cap_msr(void);
|
||||
-
|
||||
#ifdef CONFIG_IA32_FEAT_CTL
|
||||
void init_ia32_feat_ctl(struct cpuinfo_x86 *c);
|
||||
#endif
|
||||
diff --git a/arch/x86/kernel/cpu/tsx.c b/arch/x86/kernel/cpu/tsx.c
|
||||
index 39b3e65c9c8d..73b7226d042b 100644
|
||||
--- a/arch/x86/kernel/cpu/tsx.c
|
||||
+++ b/arch/x86/kernel/cpu/tsx.c
|
||||
@@ -11,6 +11,7 @@
|
||||
#include <linux/cpufeature.h>
|
||||
|
||||
#include <asm/cmdline.h>
|
||||
+#include <asm/cpu.h>
|
||||
|
||||
#include "cpu.h"
|
||||
|
||||
--
|
||||
2.39.3
|
||||
|
@ -0,0 +1,77 @@
|
||||
From 58f7b0c78ff579aef5a470804ff76e4ed913056b Mon Sep 17 00:00:00 2001
|
||||
From: Kim Phillips <kim.phillips@amd.com>
|
||||
Date: Tue, 10 Jan 2023 16:46:37 -0600
|
||||
Subject: [PATCH 28/36] x86/cpu, kvm: Add support for CPUID_80000021_EAX
|
||||
|
||||
Add support for CPUID leaf 80000021, EAX. The majority of the features will be
|
||||
used in the kernel and thus a separate leaf is appropriate.
|
||||
|
||||
Include KVM's reverse_cpuid entry because features are used by VM guests, too.
|
||||
|
||||
[ bp: Massage commit message. ]
|
||||
|
||||
Signed-off-by: Kim Phillips <kim.phillips@amd.com>
|
||||
Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de>
|
||||
Acked-by: Sean Christopherson <seanjc@google.com>
|
||||
Link: https://lore.kernel.org/r/20230124163319.2277355-2-kim.phillips@amd.com
|
||||
(cherry picked from commit 8415a74852d7c24795007ee9862d25feb519007c)
|
||||
|
||||
The conflicts in arch/x86/include/asm/cpufeature.h,
|
||||
arch/x86/include/asm/cpufeatures.h, arch/x86/include/asm/disabled-features.h
|
||||
and arch/x86/include/asm/required-features.h are due to
|
||||
the below commit added for kABI compatibility:
|
||||
Repo: https://gitlab.com/redhat/centos-stream/src/kernel/centos-stream-9/:
|
||||
commit: 219a085bc8ba328d65a1e12bc1886415525a38d9
|
||||
|
||||
The conflict in arch/x86/kvm/reverse_cpuid.h is due to
|
||||
upstream commit 24d74b9f5f2a972ac9228372adeac62b2dc10ea2
|
||||
|
||||
Signed-off-by: Mridula Shastry <mridula.c.shastry@oracle.com>
|
||||
Reviewed-by: Todd Vierling <todd.vierling@oracle.com>
|
||||
---
|
||||
arch/x86/include/asm/cpufeature.h | 1 +
|
||||
arch/x86/kernel/cpu/common.c | 3 +++
|
||||
arch/x86/kvm/reverse_cpuid.h | 1 +
|
||||
3 files changed, 5 insertions(+)
|
||||
|
||||
diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h
|
||||
index 925722baef7c..3b2473cdf2ad 100644
|
||||
--- a/arch/x86/include/asm/cpufeature.h
|
||||
+++ b/arch/x86/include/asm/cpufeature.h
|
||||
@@ -31,6 +31,7 @@ enum cpuid_leafs
|
||||
CPUID_8000_0007_EBX,
|
||||
CPUID_7_EDX,
|
||||
CPUID_8000_001F_EAX,
|
||||
+ CPUID_8000_0021_EAX,
|
||||
};
|
||||
|
||||
#ifdef CONFIG_X86_FEATURE_NAMES
|
||||
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
|
||||
index 22763a691b7b..a8dfd75588cd 100644
|
||||
--- a/arch/x86/kernel/cpu/common.c
|
||||
+++ b/arch/x86/kernel/cpu/common.c
|
||||
@@ -962,6 +962,9 @@ void get_cpu_cap(struct cpuinfo_x86 *c)
|
||||
if (c->extended_cpuid_level >= 0x8000001f)
|
||||
c->x86_capability[CPUID_8000_001F_EAX] = cpuid_eax(0x8000001f);
|
||||
|
||||
+ if (c->extended_cpuid_level >= 0x80000021)
|
||||
+ c->x86_capability[CPUID_8000_0021_EAX] = cpuid_eax(0x80000021);
|
||||
+
|
||||
init_scattered_cpuid_features(c);
|
||||
init_speculation_control(c);
|
||||
|
||||
diff --git a/arch/x86/kvm/reverse_cpuid.h b/arch/x86/kvm/reverse_cpuid.h
|
||||
index a19d473d0184..7eeade35a425 100644
|
||||
--- a/arch/x86/kvm/reverse_cpuid.h
|
||||
+++ b/arch/x86/kvm/reverse_cpuid.h
|
||||
@@ -48,6 +48,7 @@ static const struct cpuid_reg reverse_cpuid[] = {
|
||||
[CPUID_7_1_EAX] = { 7, 1, CPUID_EAX},
|
||||
[CPUID_12_EAX] = {0x00000012, 0, CPUID_EAX},
|
||||
[CPUID_8000_001F_EAX] = {0x8000001f, 0, CPUID_EAX},
|
||||
+ [CPUID_8000_0021_EAX] = {0x80000021, 0, CPUID_EAX},
|
||||
};
|
||||
|
||||
/*
|
||||
--
|
||||
2.39.3
|
||||
|
@ -0,0 +1,43 @@
|
||||
From 35b7cde8ddf1c374c533ec0de2958d2cd74e587d Mon Sep 17 00:00:00 2001
|
||||
From: Jim Mattson <jmattson@google.com>
|
||||
Date: Fri, 7 Oct 2022 15:16:44 -0700
|
||||
Subject: [PATCH 29/36] KVM: x86: Advertise that the SMM_CTL MSR is not
|
||||
supported
|
||||
|
||||
CPUID.80000021H:EAX[bit 9] indicates that the SMM_CTL MSR (0xc0010116) is
|
||||
not supported. This defeature can be advertised by KVM_GET_SUPPORTED_CPUID
|
||||
regardless of whether or not the host enumerates it; currently it will be
|
||||
included only if the host enumerates at least leaf 8000001DH, due to a
|
||||
preexisting bug in QEMU that KVM has to work around (commit f751d8eac176,
|
||||
"KVM: x86: work around QEMU issue with synthetic CPUID leaves", 2022-04-29).
|
||||
|
||||
Signed-off-by: Jim Mattson <jmattson@google.com>
|
||||
Message-Id: <20221007221644.138355-1-jmattson@google.com>
|
||||
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
|
||||
(cherry picked from commit 74bee0cad8dcd8ddec5e763c369239fc5990676a)
|
||||
Signed-off-by: Mridula Shastry <mridula.c.shastry@oracle.com>
|
||||
Reviewed-by: Todd Vierling <todd.vierling@oracle.com>
|
||||
---
|
||||
arch/x86/kvm/cpuid.c | 4 ++++
|
||||
1 file changed, 4 insertions(+)
|
||||
|
||||
diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
|
||||
index 6e3f385a3648..3726861ae52a 100644
|
||||
--- a/arch/x86/kvm/cpuid.c
|
||||
+++ b/arch/x86/kvm/cpuid.c
|
||||
@@ -1180,8 +1180,12 @@ static inline int __do_cpuid_func(struct kvm_cpuid_array *array, u32 function)
|
||||
* Other defined bits are for MSRs that KVM does not expose:
|
||||
* EAX 3 SPCL, SMM page configuration lock
|
||||
* EAX 13 PCMSR, Prefetch control MSR
|
||||
+ *
|
||||
+ * KVM doesn't support SMM_CTL.
|
||||
+ * EAX 9 SMM_CTL MSR is not supported
|
||||
*/
|
||||
entry->eax &= BIT(0) | BIT(2) | BIT(6);
|
||||
+ entry->eax |= BIT(9);
|
||||
if (static_cpu_has(X86_FEATURE_LFENCE_RDTSC))
|
||||
entry->eax |= BIT(2);
|
||||
if (!static_cpu_has_bug(X86_BUG_NULL_SEG))
|
||||
--
|
||||
2.39.3
|
||||
|
@ -0,0 +1,84 @@
|
||||
From d8cd9882c866de836235c5761b11e1bc4272508e Mon Sep 17 00:00:00 2001
|
||||
From: Kim Phillips <kim.phillips@amd.com>
|
||||
Date: Tue, 24 Jan 2023 10:33:13 -0600
|
||||
Subject: [PATCH 30/36] KVM: x86: Move open-coded CPUID leaf 0x80000021 EAX bit
|
||||
propagation code
|
||||
|
||||
Move code from __do_cpuid_func() to kvm_set_cpu_caps() in preparation for adding
|
||||
the features in their native leaf.
|
||||
|
||||
Also drop the bit description comments as it will be more self-describing once
|
||||
the individual features are added.
|
||||
|
||||
Whilst there, switch to using the more efficient cpu_feature_enabled() instead
|
||||
of static_cpu_has().
|
||||
|
||||
Note, LFENCE_RDTSC and "NULL selector clears base" are currently synthetic,
|
||||
Linux-defined feature flags as Linux tracking of the features predates AMD's
|
||||
definition. Keep the manual propagation of the flags from their synthetic
|
||||
counterparts until the kernel fully converts to AMD's definition, otherwise KVM
|
||||
would stop synthesizing the flags as intended.
|
||||
|
||||
Signed-off-by: Kim Phillips <kim.phillips@amd.com>
|
||||
Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de>
|
||||
Acked-by: Sean Christopherson <seanjc@google.com>
|
||||
Link: https://lore.kernel.org/r/20230124163319.2277355-3-kim.phillips@amd.com
|
||||
(cherry picked from commit c35ac8c4bf600ee23bacb20f863aa7830efb23fb)
|
||||
Signed-off-by: Mridula Shastry <mridula.c.shastry@oracle.com>
|
||||
Reviewed-by: Todd Vierling <todd.vierling@oracle.com>
|
||||
---
|
||||
arch/x86/kvm/cpuid.c | 31 ++++++++++++-------------------
|
||||
1 file changed, 12 insertions(+), 19 deletions(-)
|
||||
|
||||
diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
|
||||
index 3726861ae52a..2ca5ac683c44 100644
|
||||
--- a/arch/x86/kvm/cpuid.c
|
||||
+++ b/arch/x86/kvm/cpuid.c
|
||||
@@ -682,6 +682,17 @@ void kvm_set_cpu_caps(void)
|
||||
0 /* SME */ | F(SEV) | 0 /* VM_PAGE_FLUSH */ | F(SEV_ES) |
|
||||
F(SME_COHERENT));
|
||||
|
||||
+ kvm_cpu_cap_mask(CPUID_8000_0021_EAX,
|
||||
+ BIT(0) /* NO_NESTED_DATA_BP */ |
|
||||
+ BIT(2) /* LFENCE Always serializing */ | 0 /* SmmPgCfgLock */ |
|
||||
+ BIT(6) /* NULL_SEL_CLR_BASE */ | 0 /* PrefetchCtlMsr */
|
||||
+ );
|
||||
+ if (cpu_feature_enabled(X86_FEATURE_LFENCE_RDTSC))
|
||||
+ kvm_cpu_caps[CPUID_8000_0021_EAX] |= BIT(2) /* LFENCE Always serializing */;
|
||||
+ if (!static_cpu_has_bug(X86_BUG_NULL_SEG))
|
||||
+ kvm_cpu_caps[CPUID_8000_0021_EAX] |= BIT(6) /* NULL_SEL_CLR_BASE */;
|
||||
+ kvm_cpu_caps[CPUID_8000_0021_EAX] |= BIT(9) /* NO_SMM_CTL_MSR */;
|
||||
+
|
||||
kvm_cpu_cap_mask(CPUID_C000_0001_EDX,
|
||||
F(XSTORE) | F(XSTORE_EN) | F(XCRYPT) | F(XCRYPT_EN) |
|
||||
F(ACE2) | F(ACE2_EN) | F(PHE) | F(PHE_EN) |
|
||||
@@ -1171,25 +1182,7 @@ static inline int __do_cpuid_func(struct kvm_cpuid_array *array, u32 function)
|
||||
break;
|
||||
case 0x80000021:
|
||||
entry->ebx = entry->ecx = entry->edx = 0;
|
||||
- /*
|
||||
- * Pass down these bits:
|
||||
- * EAX 0 NNDBP, Processor ignores nested data breakpoints
|
||||
- * EAX 2 LAS, LFENCE always serializing
|
||||
- * EAX 6 NSCB, Null selector clear base
|
||||
- *
|
||||
- * Other defined bits are for MSRs that KVM does not expose:
|
||||
- * EAX 3 SPCL, SMM page configuration lock
|
||||
- * EAX 13 PCMSR, Prefetch control MSR
|
||||
- *
|
||||
- * KVM doesn't support SMM_CTL.
|
||||
- * EAX 9 SMM_CTL MSR is not supported
|
||||
- */
|
||||
- entry->eax &= BIT(0) | BIT(2) | BIT(6);
|
||||
- entry->eax |= BIT(9);
|
||||
- if (static_cpu_has(X86_FEATURE_LFENCE_RDTSC))
|
||||
- entry->eax |= BIT(2);
|
||||
- if (!static_cpu_has_bug(X86_BUG_NULL_SEG))
|
||||
- entry->eax |= BIT(6);
|
||||
+ cpuid_entry_override(entry, CPUID_8000_0021_EAX);
|
||||
break;
|
||||
/*Add support for Centaur's CPUID instruction*/
|
||||
case 0xC0000000:
|
||||
--
|
||||
2.39.3
|
||||
|
@ -0,0 +1,51 @@
|
||||
From a1116fd67a3178ce220ee6c2c8abe6bf792e26f2 Mon Sep 17 00:00:00 2001
|
||||
From: Kim Phillips <kim.phillips@amd.com>
|
||||
Date: Tue, 24 Jan 2023 10:33:14 -0600
|
||||
Subject: [PATCH 31/36] x86/cpu, kvm: Add the NO_NESTED_DATA_BP feature
|
||||
|
||||
The "Processor ignores nested data breakpoints" feature was being
|
||||
open-coded for KVM. Add the feature to its newly introduced CPUID leaf
|
||||
0x80000021 EAX proper.
|
||||
|
||||
Signed-off-by: Kim Phillips <kim.phillips@amd.com>
|
||||
Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de>
|
||||
Acked-by: Sean Christopherson <seanjc@google.com>
|
||||
Link: https://lore.kernel.org/r/20230124163319.2277355-4-kim.phillips@amd.com
|
||||
(cherry picked from commit a9dc9ec5a1fafc3d2fe7a7b594eefaeaccf89a6b)
|
||||
Signed-off-by: Mridula Shastry <mridula.c.shastry@oracle.com>
|
||||
Reviewed-by: Todd Vierling <todd.vierling@oracle.com>
|
||||
---
|
||||
arch/x86/include/asm/cpufeatures.h | 3 +++
|
||||
arch/x86/kvm/cpuid.c | 2 +-
|
||||
2 files changed, 4 insertions(+), 1 deletion(-)
|
||||
|
||||
diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h
|
||||
index ffc806d3a863..ac6e58a79fad 100644
|
||||
--- a/arch/x86/include/asm/cpufeatures.h
|
||||
+++ b/arch/x86/include/asm/cpufeatures.h
|
||||
@@ -421,6 +421,9 @@
|
||||
#define X86_FEATURE_IBRS_EXIT_SET (22*32+ 0) /* "" Set IBRS on kernel exit */
|
||||
#define X86_FEATURE_IBRS_EXIT_SKIP (22*32+ 1) /* "" Skip SPEC_CTRL MSR write on exit */
|
||||
|
||||
+/* AMD-defined Extended Feature 2 EAX, CPUID level 0x80000021 (EAX), word 20 */
|
||||
+#define X86_FEATURE_NO_NESTED_DATA_BP (20*32+ 0) /* "" No Nested Data Breakpoints */
|
||||
+
|
||||
/*
|
||||
* BUG word(s)
|
||||
*/
|
||||
diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
|
||||
index 2ca5ac683c44..1a8dbad7e2d9 100644
|
||||
--- a/arch/x86/kvm/cpuid.c
|
||||
+++ b/arch/x86/kvm/cpuid.c
|
||||
@@ -683,7 +683,7 @@ void kvm_set_cpu_caps(void)
|
||||
F(SME_COHERENT));
|
||||
|
||||
kvm_cpu_cap_mask(CPUID_8000_0021_EAX,
|
||||
- BIT(0) /* NO_NESTED_DATA_BP */ |
|
||||
+ F(NO_NESTED_DATA_BP) |
|
||||
BIT(2) /* LFENCE Always serializing */ | 0 /* SmmPgCfgLock */ |
|
||||
BIT(6) /* NULL_SEL_CLR_BASE */ | 0 /* PrefetchCtlMsr */
|
||||
);
|
||||
--
|
||||
2.39.3
|
||||
|
@ -0,0 +1,148 @@
|
||||
From 97e9891f4f6253ec70fc870e639d7e4d07b9e361 Mon Sep 17 00:00:00 2001
|
||||
From: Pawan Gupta <pawan.kumar.gupta@linux.intel.com>
|
||||
Date: Wed, 30 Nov 2022 07:25:51 -0800
|
||||
Subject: [PATCH 32/36] x86/bugs: Make sure MSR_SPEC_CTRL is updated properly
|
||||
upon resume from S3
|
||||
|
||||
The "force" argument to write_spec_ctrl_current() is currently ambiguous
|
||||
as it does not guarantee the MSR write. This is due to the optimization
|
||||
that writes to the MSR happen only when the new value differs from the
|
||||
cached value.
|
||||
|
||||
This is fine in most cases, but breaks for S3 resume when the cached MSR
|
||||
value gets out of sync with the hardware MSR value due to S3 resetting
|
||||
it.
|
||||
|
||||
When x86_spec_ctrl_current is same as x86_spec_ctrl_base, the MSR write
|
||||
is skipped. Which results in SPEC_CTRL mitigations not getting restored.
|
||||
|
||||
Move the MSR write from write_spec_ctrl_current() to a new function that
|
||||
unconditionally writes to the MSR. Update the callers accordingly and
|
||||
rename functions.
|
||||
|
||||
[ bp: Rework a bit. ]
|
||||
|
||||
Fixes: caa0ff24d5d0 ("x86/bugs: Keep a per-CPU IA32_SPEC_CTRL value")
|
||||
Suggested-by: Borislav Petkov <bp@alien8.de>
|
||||
Signed-off-by: Pawan Gupta <pawan.kumar.gupta@linux.intel.com>
|
||||
Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de>
|
||||
Reviewed-by: Thomas Gleixner <tglx@linutronix.de>
|
||||
Cc: <stable@kernel.org>
|
||||
Link: https://lore.kernel.org/r/806d39b0bfec2fe8f50dc5446dff20f5bb24a959.1669821572.git.pawan.kumar.gupta@linux.intel.com
|
||||
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
|
||||
(cherry picked from commit 66065157420c5b9b3f078f43d313c153e1ff7f83)
|
||||
Signed-off-by: Mridula Shastry <mridula.c.shastry@oracle.com>
|
||||
Reviewed-by: Todd Vierling <todd.vierling@oracle.com>
|
||||
---
|
||||
arch/x86/include/asm/nospec-branch.h | 2 +-
|
||||
arch/x86/kernel/cpu/bugs.c | 21 ++++++++++++++-------
|
||||
arch/x86/kernel/process.c | 2 +-
|
||||
3 files changed, 16 insertions(+), 9 deletions(-)
|
||||
|
||||
diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h
|
||||
index 442e62e7be47..53e56fc9cf70 100644
|
||||
--- a/arch/x86/include/asm/nospec-branch.h
|
||||
+++ b/arch/x86/include/asm/nospec-branch.h
|
||||
@@ -318,7 +318,7 @@ static inline void indirect_branch_prediction_barrier(void)
|
||||
/* The Intel SPEC CTRL MSR base value cache */
|
||||
extern u64 x86_spec_ctrl_base;
|
||||
DECLARE_PER_CPU(u64, x86_spec_ctrl_current);
|
||||
-extern void write_spec_ctrl_current(u64 val, bool force);
|
||||
+extern void update_spec_ctrl_cond(u64 val);
|
||||
extern u64 spec_ctrl_current(void);
|
||||
|
||||
/*
|
||||
diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
|
||||
index 5dc79427d047..53623ea69873 100644
|
||||
--- a/arch/x86/kernel/cpu/bugs.c
|
||||
+++ b/arch/x86/kernel/cpu/bugs.c
|
||||
@@ -60,11 +60,18 @@ EXPORT_SYMBOL_GPL(x86_spec_ctrl_current);
|
||||
|
||||
static DEFINE_MUTEX(spec_ctrl_mutex);
|
||||
|
||||
+/* Update SPEC_CTRL MSR and its cached copy unconditionally */
|
||||
+static void update_spec_ctrl(u64 val)
|
||||
+{
|
||||
+ this_cpu_write(x86_spec_ctrl_current, val);
|
||||
+ wrmsrl(MSR_IA32_SPEC_CTRL, val);
|
||||
+}
|
||||
+
|
||||
/*
|
||||
* Keep track of the SPEC_CTRL MSR value for the current task, which may differ
|
||||
* from x86_spec_ctrl_base due to STIBP/SSB in __speculation_ctrl_update().
|
||||
*/
|
||||
-void write_spec_ctrl_current(u64 val, bool force)
|
||||
+void update_spec_ctrl_cond(u64 val)
|
||||
{
|
||||
if (this_cpu_read(x86_spec_ctrl_current) == val)
|
||||
return;
|
||||
@@ -76,7 +83,7 @@ void write_spec_ctrl_current(u64 val, bool force)
|
||||
* forced or the exit MSR write is skipped the update can be delayed
|
||||
* until that time.
|
||||
*/
|
||||
- if (force || !cpu_feature_enabled(X86_FEATURE_KERNEL_IBRS) ||
|
||||
+ if (!cpu_feature_enabled(X86_FEATURE_KERNEL_IBRS) ||
|
||||
cpu_feature_enabled(X86_FEATURE_IBRS_EXIT_SKIP))
|
||||
wrmsrl(MSR_IA32_SPEC_CTRL, val);
|
||||
}
|
||||
@@ -1308,7 +1315,7 @@ static void __init spec_ctrl_disable_kernel_rrsba(void)
|
||||
|
||||
if (ia32_cap & ARCH_CAP_RRSBA) {
|
||||
x86_spec_ctrl_base |= SPEC_CTRL_RRSBA_DIS_S;
|
||||
- write_spec_ctrl_current(x86_spec_ctrl_base, true);
|
||||
+ update_spec_ctrl(x86_spec_ctrl_base);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1440,7 +1447,7 @@ static void __init spectre_v2_select_mitigation(void)
|
||||
|
||||
if (spectre_v2_in_ibrs_mode(mode)) {
|
||||
x86_spec_ctrl_base |= SPEC_CTRL_IBRS;
|
||||
- write_spec_ctrl_current(x86_spec_ctrl_base, true);
|
||||
+ update_spec_ctrl(x86_spec_ctrl_base);
|
||||
}
|
||||
|
||||
switch (mode) {
|
||||
@@ -1565,7 +1572,7 @@ static void __init spectre_v2_select_mitigation(void)
|
||||
static void update_stibp_msr(void * __unused)
|
||||
{
|
||||
u64 val = spec_ctrl_current() | (x86_spec_ctrl_base & SPEC_CTRL_STIBP);
|
||||
- write_spec_ctrl_current(val, true);
|
||||
+ update_spec_ctrl(val);
|
||||
}
|
||||
|
||||
/* Update x86_spec_ctrl_base in case SMT state changed. */
|
||||
@@ -1798,7 +1805,7 @@ static enum ssb_mitigation __init __ssb_select_mitigation(void)
|
||||
x86_amd_ssb_disable();
|
||||
} else {
|
||||
x86_spec_ctrl_base |= SPEC_CTRL_SSBD;
|
||||
- write_spec_ctrl_current(x86_spec_ctrl_base, true);
|
||||
+ update_spec_ctrl(x86_spec_ctrl_base);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2016,7 +2023,7 @@ int arch_prctl_spec_ctrl_get(struct task_struct *task, unsigned long which)
|
||||
void x86_spec_ctrl_setup_ap(void)
|
||||
{
|
||||
if (boot_cpu_has(X86_FEATURE_MSR_SPEC_CTRL))
|
||||
- write_spec_ctrl_current(x86_spec_ctrl_base, true);
|
||||
+ update_spec_ctrl(x86_spec_ctrl_base);
|
||||
|
||||
if (ssb_mode == SPEC_STORE_BYPASS_DISABLE)
|
||||
x86_amd_ssb_disable();
|
||||
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
|
||||
index 87bcb25ff8b2..a62f200aa736 100644
|
||||
--- a/arch/x86/kernel/process.c
|
||||
+++ b/arch/x86/kernel/process.c
|
||||
@@ -583,7 +583,7 @@ static __always_inline void __speculation_ctrl_update(unsigned long tifp,
|
||||
}
|
||||
|
||||
if (updmsr)
|
||||
- write_spec_ctrl_current(msr, false);
|
||||
+ update_spec_ctrl_cond(msr);
|
||||
}
|
||||
|
||||
static unsigned long speculation_ctrl_update_tif(struct task_struct *tsk)
|
||||
--
|
||||
2.39.3
|
||||
|
209
SOURCES/1047-x86-cpu-Support-AMD-Automatic-IBRS.patch
Normal file
209
SOURCES/1047-x86-cpu-Support-AMD-Automatic-IBRS.patch
Normal file
@ -0,0 +1,209 @@
|
||||
From 6b575d67beff6c1ce5f7643cb9326327170114a8 Mon Sep 17 00:00:00 2001
|
||||
From: Kim Phillips <kim.phillips@amd.com>
|
||||
Date: Tue, 24 Jan 2023 10:33:18 -0600
|
||||
Subject: [PATCH 33/36] x86/cpu: Support AMD Automatic IBRS
|
||||
|
||||
The AMD Zen4 core supports a new feature called Automatic IBRS.
|
||||
|
||||
It is a "set-and-forget" feature that means that, like Intel's Enhanced IBRS,
|
||||
h/w manages its IBRS mitigation resources automatically across CPL transitions.
|
||||
|
||||
The feature is advertised by CPUID_Fn80000021_EAX bit 8 and is enabled by
|
||||
setting MSR C000_0080 (EFER) bit 21.
|
||||
|
||||
Enable Automatic IBRS by default if the CPU feature is present. It typically
|
||||
provides greater performance over the incumbent generic retpolines mitigation.
|
||||
|
||||
Reuse the SPECTRE_V2_EIBRS spectre_v2_mitigation enum. AMD Automatic IBRS and
|
||||
Intel Enhanced IBRS have similar enablement. Add NO_EIBRS_PBRSB to
|
||||
cpu_vuln_whitelist, since AMD Automatic IBRS isn't affected by PBRSB-eIBRS.
|
||||
|
||||
The kernel command line option spectre_v2=eibrs is used to select AMD Automatic
|
||||
IBRS, if available.
|
||||
|
||||
Signed-off-by: Kim Phillips <kim.phillips@amd.com>
|
||||
Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de>
|
||||
Acked-by: Sean Christopherson <seanjc@google.com>
|
||||
Acked-by: Dave Hansen <dave.hansen@linux.intel.com>
|
||||
Link: https://lore.kernel.org/r/20230124163319.2277355-8-kim.phillips@amd.com
|
||||
(cherry picked from commit e7862eda309ecfccc36bb5558d937ed3ace07f3f)
|
||||
|
||||
Also add fix 06cb31cc761823ef444ba4e1df11347342a6e745 upstream found
|
||||
while resolving conflicts. It's a minor doc change.
|
||||
|
||||
Signed-off-by: Mridula Shastry <mridula.c.shastry@oracle.com>
|
||||
Reviewed-by: Todd Vierling <todd.vierling@oracle.com>
|
||||
---
|
||||
Documentation/admin-guide/hw-vuln/spectre.rst | 7 ++++---
|
||||
.../admin-guide/kernel-parameters.txt | 6 +++---
|
||||
arch/x86/include/asm/cpufeatures.h | 1 +
|
||||
arch/x86/include/asm/msr-index.h | 2 ++
|
||||
arch/x86/kernel/cpu/bugs.c | 20 +++++++++++--------
|
||||
arch/x86/kernel/cpu/common.c | 17 +++++++++-------
|
||||
6 files changed, 32 insertions(+), 21 deletions(-)
|
||||
|
||||
diff --git a/Documentation/admin-guide/hw-vuln/spectre.rst b/Documentation/admin-guide/hw-vuln/spectre.rst
|
||||
index fb2572ed9674..bf161b5aa98f 100644
|
||||
--- a/Documentation/admin-guide/hw-vuln/spectre.rst
|
||||
+++ b/Documentation/admin-guide/hw-vuln/spectre.rst
|
||||
@@ -610,9 +610,10 @@ kernel command line.
|
||||
retpoline,generic Retpolines
|
||||
retpoline,lfence LFENCE; indirect branch
|
||||
retpoline,amd alias for retpoline,lfence
|
||||
- eibrs enhanced IBRS
|
||||
- eibrs,retpoline enhanced IBRS + Retpolines
|
||||
- eibrs,lfence enhanced IBRS + LFENCE
|
||||
+ eibrs Enhanced/Auto IBRS
|
||||
+ eibrs,retpoline Enhanced/Auto IBRS + Retpolines
|
||||
+ eibrs,lfence Enhanced/Auto IBRS + LFENCE
|
||||
+ ibrs use IBRS to protect kernel
|
||||
|
||||
Not specifying this option is equivalent to
|
||||
spectre_v2=auto.
|
||||
diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
|
||||
index 73a92fb9e3c6..b6da8048f62c 100644
|
||||
--- a/Documentation/admin-guide/kernel-parameters.txt
|
||||
+++ b/Documentation/admin-guide/kernel-parameters.txt
|
||||
@@ -4968,9 +4968,9 @@
|
||||
retpoline,generic - Retpolines
|
||||
retpoline,lfence - LFENCE; indirect branch
|
||||
retpoline,amd - alias for retpoline,lfence
|
||||
- eibrs - enhanced IBRS
|
||||
- eibrs,retpoline - enhanced IBRS + Retpolines
|
||||
- eibrs,lfence - enhanced IBRS + LFENCE
|
||||
+ eibrs - Enhanced/Auto IBRS
|
||||
+ eibrs,retpoline - Enhanced/Auto IBRS + Retpolines
|
||||
+ eibrs,lfence - Enhanced/Auto IBRS + LFENCE
|
||||
ibrs - use IBRS to protect kernel
|
||||
ibrs_always - use IBRS to protect both kernel
|
||||
and userland
|
||||
diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h
|
||||
index ac6e58a79fad..316b11ea50ca 100644
|
||||
--- a/arch/x86/include/asm/cpufeatures.h
|
||||
+++ b/arch/x86/include/asm/cpufeatures.h
|
||||
@@ -423,6 +423,7 @@
|
||||
|
||||
/* AMD-defined Extended Feature 2 EAX, CPUID level 0x80000021 (EAX), word 20 */
|
||||
#define X86_FEATURE_NO_NESTED_DATA_BP (20*32+ 0) /* "" No Nested Data Breakpoints */
|
||||
+#define X86_FEATURE_AUTOIBRS (20*32+ 8) /* "" Automatic IBRS */
|
||||
|
||||
/*
|
||||
* BUG word(s)
|
||||
diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
|
||||
index 9b0d407dd1ae..bf54eb786776 100644
|
||||
--- a/arch/x86/include/asm/msr-index.h
|
||||
+++ b/arch/x86/include/asm/msr-index.h
|
||||
@@ -30,6 +30,7 @@
|
||||
#define _EFER_SVME 12 /* Enable virtualization */
|
||||
#define _EFER_LMSLE 13 /* Long Mode Segment Limit Enable */
|
||||
#define _EFER_FFXSR 14 /* Enable Fast FXSAVE/FXRSTOR */
|
||||
+#define _EFER_AUTOIBRS 21 /* Enable Automatic IBRS */
|
||||
|
||||
#define EFER_SCE (1<<_EFER_SCE)
|
||||
#define EFER_LME (1<<_EFER_LME)
|
||||
@@ -38,6 +39,7 @@
|
||||
#define EFER_SVME (1<<_EFER_SVME)
|
||||
#define EFER_LMSLE (1<<_EFER_LMSLE)
|
||||
#define EFER_FFXSR (1<<_EFER_FFXSR)
|
||||
+#define EFER_AUTOIBRS (1<<_EFER_AUTOIBRS)
|
||||
|
||||
/* Intel MSRs. Some also available on other CPUs */
|
||||
|
||||
diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
|
||||
index 53623ea69873..43295a878b8f 100644
|
||||
--- a/arch/x86/kernel/cpu/bugs.c
|
||||
+++ b/arch/x86/kernel/cpu/bugs.c
|
||||
@@ -1177,9 +1177,9 @@ static const char * const spectre_v2_strings[] = {
|
||||
[SPECTRE_V2_NONE] = "Vulnerable",
|
||||
[SPECTRE_V2_RETPOLINE] = "Mitigation: Retpolines",
|
||||
[SPECTRE_V2_LFENCE] = "Mitigation: LFENCE",
|
||||
- [SPECTRE_V2_EIBRS] = "Mitigation: Enhanced IBRS",
|
||||
- [SPECTRE_V2_EIBRS_LFENCE] = "Mitigation: Enhanced IBRS + LFENCE",
|
||||
- [SPECTRE_V2_EIBRS_RETPOLINE] = "Mitigation: Enhanced IBRS + Retpolines",
|
||||
+ [SPECTRE_V2_EIBRS] = "Mitigation: Enhanced / Automatic IBRS",
|
||||
+ [SPECTRE_V2_EIBRS_LFENCE] = "Mitigation: Enhanced / Automatic IBRS + LFENCE",
|
||||
+ [SPECTRE_V2_EIBRS_RETPOLINE] = "Mitigation: Enhanced / Automatic IBRS + Retpolines",
|
||||
[SPECTRE_V2_IBRS] = "Mitigation: IBRS",
|
||||
[SPECTRE_V2_IBRS_ALWAYS] = "Mitigation: IBRS (kernel and user space)",
|
||||
[SPECTRE_V2_RETPOLINE_IBRS_USER] = "Mitigation: Full retpoline and IBRS (user space)",
|
||||
@@ -1253,7 +1253,7 @@ static enum spectre_v2_mitigation_cmd __init spectre_v2_parse_cmdline(void)
|
||||
cmd == SPECTRE_V2_CMD_EIBRS_LFENCE ||
|
||||
cmd == SPECTRE_V2_CMD_EIBRS_RETPOLINE) &&
|
||||
!boot_cpu_has(X86_FEATURE_IBRS_ENHANCED)) {
|
||||
- pr_err("%s selected but CPU doesn't have eIBRS. Switching to AUTO select\n",
|
||||
+ pr_err("%s selected but CPU doesn't have Enhanced or Automatic IBRS. Switching to AUTO select\n",
|
||||
mitigation_options[i].option);
|
||||
return SPECTRE_V2_CMD_AUTO;
|
||||
}
|
||||
@@ -1446,8 +1446,12 @@ static void __init spectre_v2_select_mitigation(void)
|
||||
pr_err(SPECTRE_V2_EIBRS_EBPF_MSG);
|
||||
|
||||
if (spectre_v2_in_ibrs_mode(mode)) {
|
||||
- x86_spec_ctrl_base |= SPEC_CTRL_IBRS;
|
||||
- update_spec_ctrl(x86_spec_ctrl_base);
|
||||
+ if (boot_cpu_has(X86_FEATURE_AUTOIBRS)) {
|
||||
+ msr_set_bit(MSR_EFER, _EFER_AUTOIBRS);
|
||||
+ } else {
|
||||
+ x86_spec_ctrl_base |= SPEC_CTRL_IBRS;
|
||||
+ update_spec_ctrl(x86_spec_ctrl_base);
|
||||
+ }
|
||||
}
|
||||
|
||||
switch (mode) {
|
||||
@@ -1542,8 +1546,8 @@ static void __init spectre_v2_select_mitigation(void)
|
||||
/*
|
||||
* Retpoline protects the kernel, but doesn't protect firmware. IBRS
|
||||
* and Enhanced IBRS protect firmware too, so enable IBRS around
|
||||
- * firmware calls only when IBRS / Enhanced IBRS aren't otherwise
|
||||
- * enabled.
|
||||
+ * firmware calls only when IBRS / Enhanced / Automatic IBRS aren't
|
||||
+ * otherwise enabled.
|
||||
*
|
||||
* Use "mode" to check Enhanced IBRS instead of boot_cpu_has(), because
|
||||
* the user might select retpoline on the kernel command line and if
|
||||
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
|
||||
index a8dfd75588cd..c5c08a3ace2a 100644
|
||||
--- a/arch/x86/kernel/cpu/common.c
|
||||
+++ b/arch/x86/kernel/cpu/common.c
|
||||
@@ -1091,7 +1091,7 @@ static const __initconst struct x86_cpu_id_v2 cpu_vuln_whitelist[] = {
|
||||
VULNWL_AMD(0x12, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO),
|
||||
|
||||
/* FAMILY_ANY must be last, otherwise 0x0f - 0x12 matches won't work */
|
||||
- VULNWL_AMD(X86_FAMILY_ANY, NO_MELTDOWN | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO),
|
||||
+ VULNWL_AMD(X86_FAMILY_ANY, NO_MELTDOWN | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO | NO_EIBRS_PBRSB),
|
||||
{}
|
||||
};
|
||||
|
||||
@@ -1196,8 +1196,16 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
|
||||
!cpu_has(c, X86_FEATURE_AMD_SSB_NO))
|
||||
setup_force_cpu_bug(X86_BUG_SPEC_STORE_BYPASS);
|
||||
|
||||
- if (ia32_cap & ARCH_CAP_IBRS_ALL)
|
||||
+ /*
|
||||
+ * AMD's AutoIBRS is equivalent to Intel's eIBRS - use the Intel feature
|
||||
+ * flag and protect from vendor-specific bugs via the whitelist.
|
||||
+ */
|
||||
+ if ((ia32_cap & ARCH_CAP_IBRS_ALL) || cpu_has(c, X86_FEATURE_AUTOIBRS)) {
|
||||
setup_force_cpu_cap(X86_FEATURE_IBRS_ENHANCED);
|
||||
+ if (!cpu_matches(cpu_vuln_whitelist, NO_EIBRS_PBRSB) &&
|
||||
+ !(ia32_cap & ARCH_CAP_PBRSB_NO))
|
||||
+ setup_force_cpu_bug(X86_BUG_EIBRS_PBRSB);
|
||||
+ }
|
||||
|
||||
if (!cpu_matches(cpu_vuln_whitelist, NO_MDS) &&
|
||||
!(ia32_cap & ARCH_CAP_MDS_NO)) {
|
||||
@@ -1259,11 +1267,6 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
|
||||
setup_force_cpu_bug(X86_BUG_RETBLEED);
|
||||
}
|
||||
|
||||
- if (cpu_has(c, X86_FEATURE_IBRS_ENHANCED) &&
|
||||
- !cpu_matches(cpu_vuln_whitelist, NO_EIBRS_PBRSB) &&
|
||||
- !(ia32_cap & ARCH_CAP_PBRSB_NO))
|
||||
- setup_force_cpu_bug(X86_BUG_EIBRS_PBRSB);
|
||||
-
|
||||
if (cpu_matches(cpu_vuln_whitelist, NO_MELTDOWN))
|
||||
return;
|
||||
|
||||
--
|
||||
2.39.3
|
||||
|
86
SOURCES/1048-x86-CPU-AMD-Make-sure-EFER-AIBRSE-is-set.patch
Normal file
86
SOURCES/1048-x86-CPU-AMD-Make-sure-EFER-AIBRSE-is-set.patch
Normal file
@ -0,0 +1,86 @@
|
||||
From 373fedf7ca5674e2ab511685e183e6e6c6e00d91 Mon Sep 17 00:00:00 2001
|
||||
From: "Borislav Petkov (AMD)" <bp@alien8.de>
|
||||
Date: Sat, 25 Feb 2023 01:11:31 +0100
|
||||
Subject: [PATCH 34/36] x86/CPU/AMD: Make sure EFER[AIBRSE] is set
|
||||
|
||||
The AutoIBRS bit gets set only on the BSP as part of determining which
|
||||
mitigation to enable on AMD. Setting on the APs relies on the
|
||||
circumstance that the APs get booted through the trampoline and EFER
|
||||
- the MSR which contains that bit - gets replicated on every AP from the
|
||||
BSP.
|
||||
|
||||
However, this can change in the future and considering the security
|
||||
implications of this bit not being set on every CPU, make sure it is set
|
||||
by verifying EFER later in the boot process and on every AP.
|
||||
|
||||
Reported-by: Josh Poimboeuf <jpoimboe@kernel.org>
|
||||
Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de>
|
||||
Acked-by: Dave Hansen <dave.hansen@linux.intel.com>
|
||||
Link: https://lore.kernel.org/r/20230224185257.o3mcmloei5zqu7wa@treble
|
||||
(cherry picked from commit 8cc68c9c9e92dbaae51a711454c66eb668045508)
|
||||
|
||||
CVE: CVE-2023-20593
|
||||
Signed-off-by: Mridula Shastry <mridula.c.shastry@oracle.com>
|
||||
Reviewed-by: Todd Vierling <todd.vierling@oracle.com>
|
||||
---
|
||||
arch/x86/kernel/cpu/amd.c | 11 +++++++++++
|
||||
arch/x86/kernel/cpu/bugs.c | 3 +--
|
||||
arch/x86/kernel/cpu/cpu.h | 8 ++++++++
|
||||
3 files changed, 20 insertions(+), 2 deletions(-)
|
||||
|
||||
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
|
||||
index 2406a71a1eb6..c2979e3bed04 100644
|
||||
--- a/arch/x86/kernel/cpu/amd.c
|
||||
+++ b/arch/x86/kernel/cpu/amd.c
|
||||
@@ -1046,6 +1046,17 @@ static void init_amd(struct cpuinfo_x86 *c)
|
||||
msr_set_bit(MSR_K7_HWCR, MSR_K7_HWCR_IRPERF_EN_BIT);
|
||||
|
||||
check_null_seg_clears_base(c);
|
||||
+
|
||||
+ /*
|
||||
+ * Make sure EFER[AIBRSE - Automatic IBRS Enable] is set. The APs are brought up
|
||||
+ * using the trampoline code and as part of it, MSR_EFER gets prepared there in
|
||||
+ * order to be replicated onto them. Regardless, set it here again, if not set,
|
||||
+ * to protect against any future refactoring/code reorganization which might
|
||||
+ * miss setting this important bit.
|
||||
+ */
|
||||
+ if (spectre_v2_in_eibrs_mode(spectre_v2_enabled) &&
|
||||
+ cpu_has(c, X86_FEATURE_AUTOIBRS))
|
||||
+ WARN_ON_ONCE(msr_set_bit(MSR_EFER, _EFER_AUTOIBRS));
|
||||
}
|
||||
|
||||
#ifdef CONFIG_X86_32
|
||||
diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
|
||||
index 43295a878b8f..d8c1bbea4e90 100644
|
||||
--- a/arch/x86/kernel/cpu/bugs.c
|
||||
+++ b/arch/x86/kernel/cpu/bugs.c
|
||||
@@ -748,8 +748,7 @@ static int __init nospectre_v1_cmdline(char *str)
|
||||
}
|
||||
early_param("nospectre_v1", nospectre_v1_cmdline);
|
||||
|
||||
-static enum spectre_v2_mitigation spectre_v2_enabled __ro_after_init =
|
||||
- SPECTRE_V2_NONE;
|
||||
+enum spectre_v2_mitigation spectre_v2_enabled __ro_after_init = SPECTRE_V2_NONE;
|
||||
|
||||
#undef pr_fmt
|
||||
#define pr_fmt(fmt) "RETBleed: " fmt
|
||||
diff --git a/arch/x86/kernel/cpu/cpu.h b/arch/x86/kernel/cpu/cpu.h
|
||||
index 1add3618f766..0af870727bfa 100644
|
||||
--- a/arch/x86/kernel/cpu/cpu.h
|
||||
+++ b/arch/x86/kernel/cpu/cpu.h
|
||||
@@ -88,4 +88,12 @@ extern void update_srbds_msr(void);
|
||||
void init_ia32_feat_ctl(struct cpuinfo_x86 *c);
|
||||
#endif
|
||||
|
||||
+extern enum spectre_v2_mitigation spectre_v2_enabled;
|
||||
+
|
||||
+static inline bool spectre_v2_in_eibrs_mode(enum spectre_v2_mitigation mode)
|
||||
+{
|
||||
+ return mode == SPECTRE_V2_EIBRS ||
|
||||
+ mode == SPECTRE_V2_EIBRS_RETPOLINE ||
|
||||
+ mode == SPECTRE_V2_EIBRS_LFENCE;
|
||||
+}
|
||||
#endif /* ARCH_X86_CPU_H */
|
||||
--
|
||||
2.39.3
|
||||
|
175
SOURCES/1049-x86-cpu-amd-Add-a-Zenbleed-fix.patch
Normal file
175
SOURCES/1049-x86-cpu-amd-Add-a-Zenbleed-fix.patch
Normal file
@ -0,0 +1,175 @@
|
||||
From 661c6baa7f0dd4ef365c1cfd5aee9e999304fab2 Mon Sep 17 00:00:00 2001
|
||||
From: "Borislav Petkov (AMD)" <bp@alien8.de>
|
||||
Date: Sat, 15 Jul 2023 13:41:28 +0200
|
||||
Subject: [PATCH 35/36] x86/cpu/amd: Add a Zenbleed fix
|
||||
|
||||
Add a fix for the Zen2 VZEROUPPER data corruption bug where under
|
||||
certain circumstances executing VZEROUPPER can cause register
|
||||
corruption or leak data.
|
||||
|
||||
The optimal fix is through microcode but in the case the proper
|
||||
microcode revision has not been applied, enable a fallback fix using
|
||||
a chicken bit.
|
||||
|
||||
Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de>
|
||||
(cherry picked from commit 522b1d69219d8f083173819fde04f994aa051a98)
|
||||
|
||||
CVE: CVE-2023-20593
|
||||
Signed-off-by: Mridula Shastry <mridula.c.shastry@oracle.com>
|
||||
Reviewed-by: Todd Vierling <todd.vierling@oracle.com>
|
||||
---
|
||||
arch/x86/include/asm/microcode.h | 1 +
|
||||
arch/x86/include/asm/microcode_amd.h | 2 +
|
||||
arch/x86/include/asm/msr-index.h | 2 +
|
||||
arch/x86/kernel/cpu/amd.c | 60 ++++++++++++++++++++++++++++
|
||||
arch/x86/kernel/cpu/common.c | 2 +
|
||||
5 files changed, 67 insertions(+)
|
||||
|
||||
diff --git a/arch/x86/include/asm/microcode.h b/arch/x86/include/asm/microcode.h
|
||||
index 629330986955..2e23f658c69b 100644
|
||||
--- a/arch/x86/include/asm/microcode.h
|
||||
+++ b/arch/x86/include/asm/microcode.h
|
||||
@@ -5,6 +5,7 @@
|
||||
#include <asm/cpu.h>
|
||||
#include <linux/earlycpio.h>
|
||||
#include <linux/initrd.h>
|
||||
+#include <asm/microcode_amd.h>
|
||||
|
||||
struct ucode_patch {
|
||||
struct list_head plist;
|
||||
diff --git a/arch/x86/include/asm/microcode_amd.h b/arch/x86/include/asm/microcode_amd.h
|
||||
index a645b25ee442..403a8e76b310 100644
|
||||
--- a/arch/x86/include/asm/microcode_amd.h
|
||||
+++ b/arch/x86/include/asm/microcode_amd.h
|
||||
@@ -48,11 +48,13 @@ extern void __init load_ucode_amd_bsp(unsigned int family);
|
||||
extern void load_ucode_amd_ap(unsigned int family);
|
||||
extern int __init save_microcode_in_initrd_amd(unsigned int family);
|
||||
void reload_ucode_amd(unsigned int cpu);
|
||||
+extern void amd_check_microcode(void);
|
||||
#else
|
||||
static inline void __init load_ucode_amd_bsp(unsigned int family) {}
|
||||
static inline void load_ucode_amd_ap(unsigned int family) {}
|
||||
static inline int __init
|
||||
save_microcode_in_initrd_amd(unsigned int family) { return -EINVAL; }
|
||||
static inline void reload_ucode_amd(unsigned int cpu) {}
|
||||
+static inline void amd_check_microcode(void) {}
|
||||
#endif
|
||||
#endif /* _ASM_X86_MICROCODE_AMD_H */
|
||||
diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
|
||||
index bf54eb786776..bc38cbf0ac20 100644
|
||||
--- a/arch/x86/include/asm/msr-index.h
|
||||
+++ b/arch/x86/include/asm/msr-index.h
|
||||
@@ -498,6 +498,8 @@
|
||||
#define MSR_AMD_PPIN 0xc00102f1
|
||||
#define MSR_AMD64_LS_CFG 0xc0011020
|
||||
#define MSR_AMD64_DC_CFG 0xc0011022
|
||||
+#define MSR_AMD64_DE_CFG_ZEN2_FP_BACKUP_FIX_BIT 9
|
||||
+
|
||||
#define MSR_AMD64_BU_CFG2 0xc001102a
|
||||
#define MSR_AMD64_IBSFETCHCTL 0xc0011030
|
||||
#define MSR_AMD64_IBSFETCHLINAD 0xc0011031
|
||||
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
|
||||
index c2979e3bed04..384e484cbbdc 100644
|
||||
--- a/arch/x86/kernel/cpu/amd.c
|
||||
+++ b/arch/x86/kernel/cpu/amd.c
|
||||
@@ -69,6 +69,11 @@ static const int amd_erratum_383[] =
|
||||
static const int amd_erratum_1054[] =
|
||||
AMD_LEGACY_ERRATUM(AMD_MODEL_RANGE(0x17, 0, 0, 0x2f, 0xf));
|
||||
|
||||
+static const int amd_zenbleed[] =
|
||||
+ AMD_LEGACY_ERRATUM(AMD_MODEL_RANGE(0x17, 0x30, 0x0, 0x4f, 0xf),
|
||||
+ AMD_MODEL_RANGE(0x17, 0x60, 0x0, 0x7f, 0xf),
|
||||
+ AMD_MODEL_RANGE(0x17, 0xa0, 0x0, 0xaf, 0xf));
|
||||
+
|
||||
static bool cpu_has_amd_erratum(struct cpuinfo_x86 *cpu, const int *erratum)
|
||||
{
|
||||
int osvw_id = *erratum++;
|
||||
@@ -957,6 +962,47 @@ static void init_amd_zn(struct cpuinfo_x86 *c)
|
||||
}
|
||||
}
|
||||
|
||||
+static bool cpu_has_zenbleed_microcode(void)
|
||||
+{
|
||||
+ u32 good_rev = 0;
|
||||
+
|
||||
+ switch (boot_cpu_data.x86_model) {
|
||||
+ case 0x30 ... 0x3f: good_rev = 0x0830107a; break;
|
||||
+ case 0x60 ... 0x67: good_rev = 0x0860010b; break;
|
||||
+ case 0x68 ... 0x6f: good_rev = 0x08608105; break;
|
||||
+ case 0x70 ... 0x7f: good_rev = 0x08701032; break;
|
||||
+ case 0xa0 ... 0xaf: good_rev = 0x08a00008; break;
|
||||
+
|
||||
+ default:
|
||||
+ return false;
|
||||
+ break;
|
||||
+ }
|
||||
+
|
||||
+ if (boot_cpu_data.microcode < good_rev)
|
||||
+ return false;
|
||||
+
|
||||
+ return true;
|
||||
+}
|
||||
+
|
||||
+static void zenbleed_check(struct cpuinfo_x86 *c)
|
||||
+{
|
||||
+ if (!cpu_has_amd_erratum(c, amd_zenbleed))
|
||||
+ return;
|
||||
+
|
||||
+ if (cpu_has(c, X86_FEATURE_HYPERVISOR))
|
||||
+ return;
|
||||
+
|
||||
+ if (!cpu_has(c, X86_FEATURE_AVX))
|
||||
+ return;
|
||||
+
|
||||
+ if (!cpu_has_zenbleed_microcode()) {
|
||||
+ pr_notice_once("Zenbleed: please update your microcode for the most optimal fix\n");
|
||||
+ msr_set_bit(MSR_AMD64_DE_CFG, MSR_AMD64_DE_CFG_ZEN2_FP_BACKUP_FIX_BIT);
|
||||
+ } else {
|
||||
+ msr_clear_bit(MSR_AMD64_DE_CFG, MSR_AMD64_DE_CFG_ZEN2_FP_BACKUP_FIX_BIT);
|
||||
+ }
|
||||
+}
|
||||
+
|
||||
static void init_amd(struct cpuinfo_x86 *c)
|
||||
{
|
||||
early_init_amd(c);
|
||||
@@ -1057,6 +1103,8 @@ static void init_amd(struct cpuinfo_x86 *c)
|
||||
if (spectre_v2_in_eibrs_mode(spectre_v2_enabled) &&
|
||||
cpu_has(c, X86_FEATURE_AUTOIBRS))
|
||||
WARN_ON_ONCE(msr_set_bit(MSR_EFER, _EFER_AUTOIBRS));
|
||||
+
|
||||
+ zenbleed_check(c);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_X86_32
|
||||
@@ -1205,3 +1253,15 @@ u32 amd_get_highest_perf(void)
|
||||
return 255;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(amd_get_highest_perf);
|
||||
+
|
||||
+static void zenbleed_check_cpu(void *unused)
|
||||
+{
|
||||
+ struct cpuinfo_x86 *c = &cpu_data(smp_processor_id());
|
||||
+
|
||||
+ zenbleed_check(c);
|
||||
+}
|
||||
+
|
||||
+void amd_check_microcode(void)
|
||||
+{
|
||||
+ on_each_cpu(zenbleed_check_cpu, NULL, 1);
|
||||
+}
|
||||
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
|
||||
index c5c08a3ace2a..61039cf3d6eb 100644
|
||||
--- a/arch/x86/kernel/cpu/common.c
|
||||
+++ b/arch/x86/kernel/cpu/common.c
|
||||
@@ -2187,6 +2187,8 @@ void microcode_check(struct cpuinfo_x86 *prev_info)
|
||||
|
||||
perf_check_microcode();
|
||||
|
||||
+ amd_check_microcode();
|
||||
+
|
||||
store_cpu_caps(&curr_info);
|
||||
|
||||
if (!memcmp(&prev_info->x86_capability, &curr_info.x86_capability,
|
||||
--
|
||||
2.39.3
|
||||
|
@ -0,0 +1,76 @@
|
||||
From 516c2ee4fa03f50738843aae224410f1c532ddd3 Mon Sep 17 00:00:00 2001
|
||||
From: Pablo Neira Ayuso <pablo@netfilter.org>
|
||||
Date: Thu, 8 Jun 2023 02:32:02 +0200
|
||||
Subject: [PATCH 36/36] netfilter: nf_tables: incorrect error path handling with
|
||||
NFT_MSG_NEWRULE
|
||||
|
||||
In case of error when adding a new rule that refers to an anonymous set,
|
||||
deactivate expressions via NFT_TRANS_PREPARE state, not NFT_TRANS_RELEASE.
|
||||
Thus, the lookup expression marks anonymous sets as inactive in the next
|
||||
generation to ensure it is not reachable in this transaction anymore and
|
||||
decrement the set refcount as introduced by c1592a89942e ("netfilter:
|
||||
nf_tables: deactivate anonymous set from preparation phase"). The abort
|
||||
step takes care of undoing the anonymous set.
|
||||
|
||||
This is also consistent with rule deletion, where NFT_TRANS_PREPARE is
|
||||
used. Note that this error path is exercised in the preparation step of
|
||||
the commit protocol. This patch replaces nf_tables_rule_release() by the
|
||||
deactivate and destroy calls, this time with NFT_TRANS_PREPARE.
|
||||
|
||||
Due to this incorrect error handling, it is possible to access a
|
||||
dangling pointer to the anonymous set that remains in the transaction
|
||||
list.
|
||||
|
||||
[1009.379054] BUG: KASAN: use-after-free in nft_set_lookup_global+0x147/0x1a0 [nf_tables]
|
||||
[1009.379106] Read of size 8 at addr ffff88816c4c8020 by task nft-rule-add/137110
|
||||
[1009.379116] CPU: 7 PID: 137110 Comm: nft-rule-add Not tainted 6.4.0-rc4+ #256
|
||||
[1009.379128] Call Trace:
|
||||
[1009.379132] <TASK>
|
||||
[1009.379135] dump_stack_lvl+0x33/0x50
|
||||
[1009.379146] ? nft_set_lookup_global+0x147/0x1a0 [nf_tables]
|
||||
[1009.379191] print_address_description.constprop.0+0x27/0x300
|
||||
[1009.379201] kasan_report+0x107/0x120
|
||||
[1009.379210] ? nft_set_lookup_global+0x147/0x1a0 [nf_tables]
|
||||
[1009.379255] nft_set_lookup_global+0x147/0x1a0 [nf_tables]
|
||||
[1009.379302] nft_lookup_init+0xa5/0x270 [nf_tables]
|
||||
[1009.379350] nf_tables_newrule+0x698/0xe50 [nf_tables]
|
||||
[1009.379397] ? nf_tables_rule_release+0xe0/0xe0 [nf_tables]
|
||||
[1009.379441] ? kasan_unpoison+0x23/0x50
|
||||
[1009.379450] nfnetlink_rcv_batch+0x97c/0xd90 [nfnetlink]
|
||||
[1009.379470] ? nfnetlink_rcv_msg+0x480/0x480 [nfnetlink]
|
||||
[1009.379485] ? __alloc_skb+0xb8/0x1e0
|
||||
[1009.379493] ? __alloc_skb+0xb8/0x1e0
|
||||
[1009.379502] ? entry_SYSCALL_64_after_hwframe+0x46/0xb0
|
||||
[1009.379509] ? unwind_get_return_address+0x2a/0x40
|
||||
[1009.379517] ? write_profile+0xc0/0xc0
|
||||
[1009.379524] ? avc_lookup+0x8f/0xc0
|
||||
[1009.379532] ? __rcu_read_unlock+0x43/0x60
|
||||
|
||||
Fixes: 958bee14d071 ("netfilter: nf_tables: use new transaction infrastructure to handle sets")
|
||||
Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
|
||||
(cherry picked from commit 1240eb93f0616b21c675416516ff3d74798fdc97)
|
||||
|
||||
CVE: CVE-2023-3390
|
||||
Signed-off-by: Mridula Shastry <mridula.c.shastry@oracle.com>
|
||||
Reviewed-by: Venkat Venkatsubra <venkat.x.venkatsubra@oracle.com>
|
||||
---
|
||||
net/netfilter/nf_tables_api.c | 3 ++-
|
||||
1 file changed, 2 insertions(+), 1 deletion(-)
|
||||
|
||||
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
|
||||
index c7d98b011cd6..f9bf6e156051 100644
|
||||
--- a/net/netfilter/nf_tables_api.c
|
||||
+++ b/net/netfilter/nf_tables_api.c
|
||||
@@ -3260,7 +3260,8 @@ err_destroy_flow_rule:
|
||||
if (flow)
|
||||
nft_flow_rule_destroy(flow);
|
||||
err_release_rule:
|
||||
- nf_tables_rule_release(&ctx, rule);
|
||||
+ nft_rule_expr_deactivate(&ctx, rule, NFT_TRANS_PREPARE);
|
||||
+ nf_tables_rule_destroy(&ctx, rule);
|
||||
err_release_expr:
|
||||
for (i = 0; i < n; i++) {
|
||||
if (info[i].ops) {
|
||||
--
|
||||
2.39.3
|
||||
|
@ -38,11 +38,11 @@
|
||||
# define buildid .local
|
||||
|
||||
%define rpmversion 4.18.0
|
||||
%define pkgrelease 477.27.1.el8_8
|
||||
%define pkgrelease 477.27.2.el8_8
|
||||
%define tarfile_release 477.13.1.el8_8
|
||||
|
||||
# allow pkg_release to have configurable %%{?dist} tag
|
||||
%define specrelease 477.27.1%{?dist}
|
||||
%define specrelease 477.27.2%{?dist}
|
||||
|
||||
%define pkg_release %{specrelease}%{?buildid}
|
||||
|
||||
@ -540,6 +540,42 @@ Patch1011: 1011-net-sched-cls_fw-Fix-improper-refcount-update-leads-.patch
|
||||
Patch1012: 1012-netfilter-nft_set_pipapo-fix-improper-element-remova.patch
|
||||
Patch1013: 1013-netfilter-nf_tables-prevent-OOB-access-in-nft_byteor.patch
|
||||
Patch1014: 1014-net-sched-flower-fix-possible-OOB-write-in-fl_set_ge.patch
|
||||
Patch1015: 1015-x86-microcode-intel-Expose-collect_cpu_info_early-fo.patch
|
||||
Patch1016: 1016-x86-cpu-Load-microcode-during-restore_processor_stat.patch
|
||||
Patch1017: 1017-x86-microcode-Deprecate-MICROCODE_OLD_INTERFACE.patch
|
||||
Patch1018: 1018-x86-microcode-Rip-out-the-OLD_INTERFACE.patch
|
||||
Patch1019: 1019-x86-microcode-Default-disable-late-loading.patch
|
||||
Patch1020: 1020-x86-microcode-Taint-and-warn-on-late-loading.patch
|
||||
Patch1021: 1021-x86-microcode-Remove-unnecessary-perf-callback.patch
|
||||
Patch1022: 1022-x86-microcode-Print-previous-version-of-microcode-af.patch
|
||||
Patch1023: 1023-x86-microcode-Rip-out-the-subsys-interface-gunk.patch
|
||||
Patch1024: 1024-x86-microcode-Simplify-init-path-even-more.patch
|
||||
Patch1025: 1025-x86-microcode-AMD-Rename-a-couple-of-functions.patch
|
||||
Patch1026: 1026-x86-microcode-Fix-return-value-for-microcode-late-lo.patch
|
||||
Patch1027: 1027-x86-microcode-Add-a-parameter-to-microcode_check-to-.patch
|
||||
Patch1028: 1028-x86-microcode-Check-CPU-capabilities-after-late-micr.patch
|
||||
Patch1029: 1029-x86-microcode-Adjust-late-loading-result-reporting-m.patch
|
||||
Patch1030: 1030-x86-amd-Cache-debug-register-values-in-percpu-variab.patch
|
||||
Patch1031: 1031-x86-microcode-Remove-request_microcode_user.patch
|
||||
Patch1032: 1032-x86-microcode-Kill-refresh_fw.patch
|
||||
Patch1033: 1033-x86-microcode-amd-Remove-load_microcode_amd-s-bsp-pa.patch
|
||||
Patch1034: 1034-x86-microcode-Drop-struct-ucode_cpu_info.valid.patch
|
||||
Patch1035: 1035-x86-microcode-AMD-Add-a-cpu-parameter-to-the-reloadi.patch
|
||||
Patch1036: 1036-x86-microcode-AMD-Track-patch-allocation-size-explic.patch
|
||||
Patch1037: 1037-x86-microcode-AMD-Fix-mixed-steppings-support.patch
|
||||
Patch1038: 1038-x86-microcode-core-Return-an-error-only-when-necessa.patch
|
||||
Patch1039: 1039-x86-apic-Don-t-disable-x2APIC-if-locked.patch
|
||||
Patch1040: 1040-x86-cpu-amd-Move-the-errata-checking-functionality-u.patch
|
||||
Patch1041: 1041-x86-cpu-Remove-redundant-extern-x86_read_arch_cap_ms.patch
|
||||
Patch1042: 1042-x86-cpu-kvm-Add-support-for-CPUID_80000021_EAX.patch
|
||||
Patch1043: 1043-KVM-x86-Advertise-that-the-SMM_CTL-MSR-is-not-suppor.patch
|
||||
Patch1044: 1044-KVM-x86-Move-open-coded-CPUID-leaf-0x80000021-EAX-bi.patch
|
||||
Patch1045: 1045-x86-cpu-kvm-Add-the-NO_NESTED_DATA_BP-feature.patch
|
||||
Patch1046: 1046-x86-bugs-Make-sure-MSR_SPEC_CTRL-is-updated-properly.patch
|
||||
Patch1047: 1047-x86-cpu-Support-AMD-Automatic-IBRS.patch
|
||||
Patch1048: 1048-x86-CPU-AMD-Make-sure-EFER-AIBRSE-is-set.patch
|
||||
Patch1049: 1049-x86-cpu-amd-Add-a-Zenbleed-fix.patch
|
||||
Patch1050: 1050-netfilter-nf_tables-incorrect-error-path-handling-wi.patch
|
||||
|
||||
Patch9001: 9001-x86-xen-Split-HVM-vector-callback-setup-and-interrup.patch
|
||||
Patch9002: 9002-x86-microcode-AMD-Load-late-on-both-threads-too.patch
|
||||
@ -1120,6 +1156,42 @@ ApplyPatch 1011-net-sched-cls_fw-Fix-improper-refcount-update-leads-.patch
|
||||
ApplyPatch 1012-netfilter-nft_set_pipapo-fix-improper-element-remova.patch
|
||||
ApplyPatch 1013-netfilter-nf_tables-prevent-OOB-access-in-nft_byteor.patch
|
||||
ApplyPatch 1014-net-sched-flower-fix-possible-OOB-write-in-fl_set_ge.patch
|
||||
ApplyPatch 1015-x86-microcode-intel-Expose-collect_cpu_info_early-fo.patch
|
||||
ApplyPatch 1016-x86-cpu-Load-microcode-during-restore_processor_stat.patch
|
||||
ApplyPatch 1017-x86-microcode-Deprecate-MICROCODE_OLD_INTERFACE.patch
|
||||
ApplyPatch 1018-x86-microcode-Rip-out-the-OLD_INTERFACE.patch
|
||||
ApplyPatch 1019-x86-microcode-Default-disable-late-loading.patch
|
||||
ApplyPatch 1020-x86-microcode-Taint-and-warn-on-late-loading.patch
|
||||
ApplyPatch 1021-x86-microcode-Remove-unnecessary-perf-callback.patch
|
||||
ApplyPatch 1022-x86-microcode-Print-previous-version-of-microcode-af.patch
|
||||
ApplyPatch 1023-x86-microcode-Rip-out-the-subsys-interface-gunk.patch
|
||||
ApplyPatch 1024-x86-microcode-Simplify-init-path-even-more.patch
|
||||
ApplyPatch 1025-x86-microcode-AMD-Rename-a-couple-of-functions.patch
|
||||
ApplyPatch 1026-x86-microcode-Fix-return-value-for-microcode-late-lo.patch
|
||||
ApplyPatch 1027-x86-microcode-Add-a-parameter-to-microcode_check-to-.patch
|
||||
ApplyPatch 1028-x86-microcode-Check-CPU-capabilities-after-late-micr.patch
|
||||
ApplyPatch 1029-x86-microcode-Adjust-late-loading-result-reporting-m.patch
|
||||
ApplyPatch 1030-x86-amd-Cache-debug-register-values-in-percpu-variab.patch
|
||||
ApplyPatch 1031-x86-microcode-Remove-request_microcode_user.patch
|
||||
ApplyPatch 1032-x86-microcode-Kill-refresh_fw.patch
|
||||
ApplyPatch 1033-x86-microcode-amd-Remove-load_microcode_amd-s-bsp-pa.patch
|
||||
ApplyPatch 1034-x86-microcode-Drop-struct-ucode_cpu_info.valid.patch
|
||||
ApplyPatch 1035-x86-microcode-AMD-Add-a-cpu-parameter-to-the-reloadi.patch
|
||||
ApplyPatch 1036-x86-microcode-AMD-Track-patch-allocation-size-explic.patch
|
||||
ApplyPatch 1037-x86-microcode-AMD-Fix-mixed-steppings-support.patch
|
||||
ApplyPatch 1038-x86-microcode-core-Return-an-error-only-when-necessa.patch
|
||||
ApplyPatch 1039-x86-apic-Don-t-disable-x2APIC-if-locked.patch
|
||||
ApplyPatch 1040-x86-cpu-amd-Move-the-errata-checking-functionality-u.patch
|
||||
ApplyPatch 1041-x86-cpu-Remove-redundant-extern-x86_read_arch_cap_ms.patch
|
||||
ApplyPatch 1042-x86-cpu-kvm-Add-support-for-CPUID_80000021_EAX.patch
|
||||
ApplyPatch 1043-KVM-x86-Advertise-that-the-SMM_CTL-MSR-is-not-suppor.patch
|
||||
ApplyPatch 1044-KVM-x86-Move-open-coded-CPUID-leaf-0x80000021-EAX-bi.patch
|
||||
ApplyPatch 1045-x86-cpu-kvm-Add-the-NO_NESTED_DATA_BP-feature.patch
|
||||
ApplyPatch 1046-x86-bugs-Make-sure-MSR_SPEC_CTRL-is-updated-properly.patch
|
||||
ApplyPatch 1047-x86-cpu-Support-AMD-Automatic-IBRS.patch
|
||||
ApplyPatch 1048-x86-CPU-AMD-Make-sure-EFER-AIBRSE-is-set.patch
|
||||
ApplyPatch 1049-x86-cpu-amd-Add-a-Zenbleed-fix.patch
|
||||
ApplyPatch 1050-netfilter-nf_tables-incorrect-error-path-handling-wi.patch
|
||||
|
||||
ApplyPatch 9001-x86-xen-Split-HVM-vector-callback-setup-and-interrup.patch
|
||||
ApplyPatch 9002-x86-microcode-AMD-Load-late-on-both-threads-too.patch
|
||||
@ -2727,6 +2799,43 @@ fi
|
||||
#
|
||||
#
|
||||
%changelog
|
||||
* Thu Sep 29 2023 Andrew Lukoshko <alukoshko@almalinux.org> [4.18.0-477.27.2.el8_8]
|
||||
- x86/microcode/intel: Expose collect_cpu_info_early() for IFS
|
||||
- x86/cpu: Load microcode during restore_processor_state()
|
||||
- x86/microcode: Deprecate MICROCODE_OLD_INTERFACE
|
||||
- x86/microcode: Rip out the OLD_INTERFACE
|
||||
- x86/microcode: Default-disable late loading
|
||||
- x86/microcode: Taint and warn on late loading
|
||||
- x86/microcode: Remove unnecessary perf callback
|
||||
- x86/microcode: Print previous version of microcode after reload
|
||||
- x86/microcode: Rip out the subsys interface gunk
|
||||
- x86/microcode: Simplify init path even more
|
||||
- x86/microcode/AMD: Rename a couple of functions {CVE-2023-20593}
|
||||
- x86/microcode: Add a parameter to microcode_check() to store CPU capabilities {CVE-2023-20593}
|
||||
- x86/microcode: Check CPU capabilities after late microcode update correctly {CVE-2023-20593}
|
||||
- x86/microcode: Adjust late loading result reporting message {CVE-2023-20593}
|
||||
- x86/amd: Cache debug register values in percpu variables {CVE-2023-20593}
|
||||
- x86/microcode: Remove ->request_microcode_user()
|
||||
- x86/microcode: Kill refresh_fw
|
||||
- x86/microcode/amd: Remove load_microcode_amd()'s bsp parameter {CVE-2023-20593}
|
||||
- x86/microcode: Drop struct ucode_cpu_info.valid
|
||||
- x86/microcode/AMD: Add a @cpu parameter to the reloading functions {CVE-2023-20593}
|
||||
- x86/microcode/AMD: Track patch allocation size explicitly
|
||||
- x86/microcode/AMD: Fix mixed steppings support {CVE-2023-20593}
|
||||
- x86/microcode/core: Return an error only when necessary {CVE-2023-20593}
|
||||
- x86/apic: Don't disable x2APIC if locked
|
||||
- x86/cpu/amd: Move the errata checking functionality up {CVE-2023-20593}
|
||||
- x86/cpu: Remove redundant extern x86_read_arch_cap_msr()
|
||||
- x86/cpu, kvm: Add support for CPUID_80000021_EAX
|
||||
- KVM: x86: Advertise that the SMM_CTL MSR is not supported
|
||||
- KVM: x86: Move open-coded CPUID leaf 0x80000021 EAX bit propagation code
|
||||
- x86/cpu, kvm: Add the NO_NESTED_DATA_BP feature
|
||||
- x86/bugs: Make sure MSR_SPEC_CTRL is updated properly upon resume from S3
|
||||
- x86/cpu: Support AMD Automatic IBRS
|
||||
- x86/CPU/AMD: Make sure EFER[AIBRSE] is set
|
||||
- x86/cpu/amd: Add a Zenbleed fix {CVE-2023-20593}
|
||||
- netfilter: nf_tables: incorrect error path handling with NFT_MSG_NEWRULE {CVE-2023-3390}
|
||||
|
||||
* Thu Sep 21 2023 Andrew Lukoshko <alukoshko@almalinux.org> [4.18.0-477.27.1.el8_8]
|
||||
- bluetooth: Perform careful capability checks in hci_sock_ioctl() {CVE-2023-2002}
|
||||
- ipvlan:Fix out-of-bounds caused by unclear skb->cb {CVE-2023-3090}
|
||||
|
Loading…
Reference in New Issue
Block a user