forked from rpms/kernel
149 lines
5.4 KiB
Diff
149 lines
5.4 KiB
Diff
From 97e9891f4f6253ec70fc870e639d7e4d07b9e361 Mon Sep 17 00:00:00 2001
|
|
From: Pawan Gupta <pawan.kumar.gupta@linux.intel.com>
|
|
Date: Wed, 30 Nov 2022 07:25:51 -0800
|
|
Subject: [PATCH 32/36] x86/bugs: Make sure MSR_SPEC_CTRL is updated properly
|
|
upon resume from S3
|
|
|
|
The "force" argument to write_spec_ctrl_current() is currently ambiguous
|
|
as it does not guarantee the MSR write. This is due to the optimization
|
|
that writes to the MSR happen only when the new value differs from the
|
|
cached value.
|
|
|
|
This is fine in most cases, but breaks for S3 resume when the cached MSR
|
|
value gets out of sync with the hardware MSR value due to S3 resetting
|
|
it.
|
|
|
|
When x86_spec_ctrl_current is same as x86_spec_ctrl_base, the MSR write
|
|
is skipped. Which results in SPEC_CTRL mitigations not getting restored.
|
|
|
|
Move the MSR write from write_spec_ctrl_current() to a new function that
|
|
unconditionally writes to the MSR. Update the callers accordingly and
|
|
rename functions.
|
|
|
|
[ bp: Rework a bit. ]
|
|
|
|
Fixes: caa0ff24d5d0 ("x86/bugs: Keep a per-CPU IA32_SPEC_CTRL value")
|
|
Suggested-by: Borislav Petkov <bp@alien8.de>
|
|
Signed-off-by: Pawan Gupta <pawan.kumar.gupta@linux.intel.com>
|
|
Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de>
|
|
Reviewed-by: Thomas Gleixner <tglx@linutronix.de>
|
|
Cc: <stable@kernel.org>
|
|
Link: https://lore.kernel.org/r/806d39b0bfec2fe8f50dc5446dff20f5bb24a959.1669821572.git.pawan.kumar.gupta@linux.intel.com
|
|
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
|
|
(cherry picked from commit 66065157420c5b9b3f078f43d313c153e1ff7f83)
|
|
Signed-off-by: Mridula Shastry <mridula.c.shastry@oracle.com>
|
|
Reviewed-by: Todd Vierling <todd.vierling@oracle.com>
|
|
---
|
|
arch/x86/include/asm/nospec-branch.h | 2 +-
|
|
arch/x86/kernel/cpu/bugs.c | 21 ++++++++++++++-------
|
|
arch/x86/kernel/process.c | 2 +-
|
|
3 files changed, 16 insertions(+), 9 deletions(-)
|
|
|
|
diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h
|
|
index 442e62e7be47..53e56fc9cf70 100644
|
|
--- a/arch/x86/include/asm/nospec-branch.h
|
|
+++ b/arch/x86/include/asm/nospec-branch.h
|
|
@@ -318,7 +318,7 @@ static inline void indirect_branch_prediction_barrier(void)
|
|
/* The Intel SPEC CTRL MSR base value cache */
|
|
extern u64 x86_spec_ctrl_base;
|
|
DECLARE_PER_CPU(u64, x86_spec_ctrl_current);
|
|
-extern void write_spec_ctrl_current(u64 val, bool force);
|
|
+extern void update_spec_ctrl_cond(u64 val);
|
|
extern u64 spec_ctrl_current(void);
|
|
|
|
/*
|
|
diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
|
|
index 5dc79427d047..53623ea69873 100644
|
|
--- a/arch/x86/kernel/cpu/bugs.c
|
|
+++ b/arch/x86/kernel/cpu/bugs.c
|
|
@@ -60,11 +60,18 @@ EXPORT_SYMBOL_GPL(x86_spec_ctrl_current);
|
|
|
|
static DEFINE_MUTEX(spec_ctrl_mutex);
|
|
|
|
+/* Update SPEC_CTRL MSR and its cached copy unconditionally */
|
|
+static void update_spec_ctrl(u64 val)
|
|
+{
|
|
+ this_cpu_write(x86_spec_ctrl_current, val);
|
|
+ wrmsrl(MSR_IA32_SPEC_CTRL, val);
|
|
+}
|
|
+
|
|
/*
|
|
* Keep track of the SPEC_CTRL MSR value for the current task, which may differ
|
|
* from x86_spec_ctrl_base due to STIBP/SSB in __speculation_ctrl_update().
|
|
*/
|
|
-void write_spec_ctrl_current(u64 val, bool force)
|
|
+void update_spec_ctrl_cond(u64 val)
|
|
{
|
|
if (this_cpu_read(x86_spec_ctrl_current) == val)
|
|
return;
|
|
@@ -76,7 +83,7 @@ void write_spec_ctrl_current(u64 val, bool force)
|
|
* forced or the exit MSR write is skipped the update can be delayed
|
|
* until that time.
|
|
*/
|
|
- if (force || !cpu_feature_enabled(X86_FEATURE_KERNEL_IBRS) ||
|
|
+ if (!cpu_feature_enabled(X86_FEATURE_KERNEL_IBRS) ||
|
|
cpu_feature_enabled(X86_FEATURE_IBRS_EXIT_SKIP))
|
|
wrmsrl(MSR_IA32_SPEC_CTRL, val);
|
|
}
|
|
@@ -1308,7 +1315,7 @@ static void __init spec_ctrl_disable_kernel_rrsba(void)
|
|
|
|
if (ia32_cap & ARCH_CAP_RRSBA) {
|
|
x86_spec_ctrl_base |= SPEC_CTRL_RRSBA_DIS_S;
|
|
- write_spec_ctrl_current(x86_spec_ctrl_base, true);
|
|
+ update_spec_ctrl(x86_spec_ctrl_base);
|
|
}
|
|
}
|
|
|
|
@@ -1440,7 +1447,7 @@ static void __init spectre_v2_select_mitigation(void)
|
|
|
|
if (spectre_v2_in_ibrs_mode(mode)) {
|
|
x86_spec_ctrl_base |= SPEC_CTRL_IBRS;
|
|
- write_spec_ctrl_current(x86_spec_ctrl_base, true);
|
|
+ update_spec_ctrl(x86_spec_ctrl_base);
|
|
}
|
|
|
|
switch (mode) {
|
|
@@ -1565,7 +1572,7 @@ static void __init spectre_v2_select_mitigation(void)
|
|
static void update_stibp_msr(void * __unused)
|
|
{
|
|
u64 val = spec_ctrl_current() | (x86_spec_ctrl_base & SPEC_CTRL_STIBP);
|
|
- write_spec_ctrl_current(val, true);
|
|
+ update_spec_ctrl(val);
|
|
}
|
|
|
|
/* Update x86_spec_ctrl_base in case SMT state changed. */
|
|
@@ -1798,7 +1805,7 @@ static enum ssb_mitigation __init __ssb_select_mitigation(void)
|
|
x86_amd_ssb_disable();
|
|
} else {
|
|
x86_spec_ctrl_base |= SPEC_CTRL_SSBD;
|
|
- write_spec_ctrl_current(x86_spec_ctrl_base, true);
|
|
+ update_spec_ctrl(x86_spec_ctrl_base);
|
|
}
|
|
}
|
|
|
|
@@ -2016,7 +2023,7 @@ int arch_prctl_spec_ctrl_get(struct task_struct *task, unsigned long which)
|
|
void x86_spec_ctrl_setup_ap(void)
|
|
{
|
|
if (boot_cpu_has(X86_FEATURE_MSR_SPEC_CTRL))
|
|
- write_spec_ctrl_current(x86_spec_ctrl_base, true);
|
|
+ update_spec_ctrl(x86_spec_ctrl_base);
|
|
|
|
if (ssb_mode == SPEC_STORE_BYPASS_DISABLE)
|
|
x86_amd_ssb_disable();
|
|
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
|
|
index 87bcb25ff8b2..a62f200aa736 100644
|
|
--- a/arch/x86/kernel/process.c
|
|
+++ b/arch/x86/kernel/process.c
|
|
@@ -583,7 +583,7 @@ static __always_inline void __speculation_ctrl_update(unsigned long tifp,
|
|
}
|
|
|
|
if (updmsr)
|
|
- write_spec_ctrl_current(msr, false);
|
|
+ update_spec_ctrl_cond(msr);
|
|
}
|
|
|
|
static unsigned long speculation_ctrl_update_tif(struct task_struct *tsk)
|
|
--
|
|
2.39.3
|
|
|