kernel-rt/SOURCES/1019-x86-microcode-Default-...

107 lines
3.7 KiB
Diff

From 9cc01dc772e684b7864d8c8a1f526035c991aa76 Mon Sep 17 00:00:00 2001
From: Borislav Petkov <bp@suse.de>
Date: Wed, 25 May 2022 18:12:30 +0200
Subject: [PATCH 05/36] x86/microcode: Default-disable late loading
It is dangerous and it should not be used anyway - there's a nice early
loading already.
Requested-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: Borislav Petkov <bp@suse.de>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Link: https://lore.kernel.org/r/20220525161232.14924-3-bp@alien8.de
(cherry picked from commit a77a94f86273ce42a39cb479217dd8d68acfe0ff)
Signed-off-by: Mridula Shastry <mridula.c.shastry@oracle.com>
Reviewed-by: Todd Vierling <todd.vierling@oracle.com>
---
arch/x86/Kconfig | 11 +++++++++++
arch/x86/kernel/cpu/common.c | 2 ++
arch/x86/kernel/cpu/microcode/core.c | 7 ++++++-
3 files changed, 19 insertions(+), 1 deletion(-)
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index b9bcde5c538f..5af5427c50ae 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -1360,6 +1360,17 @@ config MICROCODE_AMD
If you select this option, microcode patch loading support for AMD
processors will be enabled.
+config MICROCODE_LATE_LOADING
+ bool "Late microcode loading (DANGEROUS)"
+ default n
+ depends on MICROCODE
+ help
+ Loading microcode late, when the system is up and executing instructions
+ is a tricky business and should be avoided if possible. Just the sequence
+ of synchronizing all cores and SMT threads is one fragile dance which does
+ not guarantee that cores might not softlock after the loading. Therefore,
+ use this at your own risk. Late loading taints the kernel too.
+
config X86_MSR
tristate "/dev/cpu/*/msr - Model-specific register support"
---help---
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index f59edeef14eb..7be057c7531c 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -2145,6 +2145,7 @@ void cpu_init_secondary(void)
}
#endif
+#ifdef CONFIG_MICROCODE_LATE_LOADING
/*
* The microcode loader calls this upon late microcode load to recheck features,
* only when microcode has been updated. Caller holds microcode_mutex and CPU
@@ -2174,6 +2175,7 @@ void microcode_check(void)
pr_warn("x86/CPU: CPU features have changed after loading microcode, but might not take effect.\n");
pr_warn("x86/CPU: Please consider either early loading through initrd/built-in or a potential BIOS update.\n");
}
+#endif
/*
* Invoked from core CPU hotplug code after hotplug operations
diff --git a/arch/x86/kernel/cpu/microcode/core.c b/arch/x86/kernel/cpu/microcode/core.c
index bbc262a5fc0d..c4d19f4b7afb 100644
--- a/arch/x86/kernel/cpu/microcode/core.c
+++ b/arch/x86/kernel/cpu/microcode/core.c
@@ -395,6 +395,7 @@ static int apply_microcode_on_target(int cpu)
/* fake device for request_firmware */
static struct platform_device *microcode_pdev;
+#ifdef CONFIG_MICROCODE_LATE_LOADING
/*
* Late loading dance. Why the heavy-handed stomp_machine effort?
*
@@ -563,6 +564,9 @@ put:
return ret;
}
+static DEVICE_ATTR_WO(reload);
+#endif
+
static ssize_t version_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
@@ -579,7 +583,6 @@ static ssize_t pf_show(struct device *dev,
return sprintf(buf, "0x%x\n", uci->cpu_sig.pf);
}
-static DEVICE_ATTR_WO(reload);
static DEVICE_ATTR(version, 0444, version_show, NULL);
static DEVICE_ATTR(processor_flags, 0444, pf_show, NULL);
@@ -732,7 +735,9 @@ static int mc_cpu_down_prep(unsigned int cpu)
}
static struct attribute *cpu_root_microcode_attrs[] = {
+#ifdef CONFIG_MICROCODE_LATE_LOADING
&dev_attr_reload.attr,
+#endif
NULL
};
--
2.39.3