forked from rpms/kernel
149 lines
4.6 KiB
Diff
149 lines
4.6 KiB
Diff
From e167d6613aef8356261f97e2b50489142f8e1406 Mon Sep 17 00:00:00 2001
|
|
From: Alexey Kardashevskiy <aik@amd.com>
|
|
Date: Fri, 20 Jan 2023 14:10:45 +1100
|
|
Subject: [PATCH 16/36] x86/amd: Cache debug register values in percpu
|
|
variables
|
|
|
|
Reading DR[0-3]_ADDR_MASK MSRs takes about 250 cycles which is going to
|
|
be noticeable with the AMD KVM SEV-ES DebugSwap feature enabled. KVM is
|
|
going to store host's DR[0-3] and DR[0-3]_ADDR_MASK before switching to
|
|
a guest; the hardware is going to swap these on VMRUN and VMEXIT.
|
|
|
|
Store MSR values passed to set_dr_addr_mask() in percpu variables
|
|
(when changed) and return them via new amd_get_dr_addr_mask().
|
|
The gain here is about 10x.
|
|
|
|
As set_dr_addr_mask() uses the array too, change the @dr type to
|
|
unsigned to avoid checking for <0. And give it the amd_ prefix to match
|
|
the new helper as the whole DR_ADDR_MASK feature is AMD-specific anyway.
|
|
|
|
While at it, replace deprecated boot_cpu_has() with cpu_feature_enabled()
|
|
in set_dr_addr_mask().
|
|
|
|
Signed-off-by: Alexey Kardashevskiy <aik@amd.com>
|
|
Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de>
|
|
Link: https://lore.kernel.org/r/20230120031047.628097-2-aik@amd.com
|
|
(cherry picked from commit 7914695743d598b189d549f2f57af24aa5633705)
|
|
|
|
The conflict is due to patch 84b6a34 which is not present
|
|
in this repo. This is an optimization/not a bug fix. Hence
|
|
not porting it.
|
|
|
|
CVE: CVE-2023-20593
|
|
Signed-off-by: Mridula Shastry <mridula.c.shastry@oracle.com>
|
|
Reviewed-by: Todd Vierling <todd.vierling@oracle.com>
|
|
---
|
|
arch/x86/include/asm/debugreg.h | 9 +++++--
|
|
arch/x86/kernel/cpu/amd.c | 47 +++++++++++++++++++++++----------
|
|
arch/x86/kernel/hw_breakpoint.c | 4 +--
|
|
3 files changed, 42 insertions(+), 18 deletions(-)
|
|
|
|
diff --git a/arch/x86/include/asm/debugreg.h b/arch/x86/include/asm/debugreg.h
|
|
index 1c964123f2dd..ccf6639c8df9 100644
|
|
--- a/arch/x86/include/asm/debugreg.h
|
|
+++ b/arch/x86/include/asm/debugreg.h
|
|
@@ -144,9 +144,14 @@ static __always_inline void local_db_restore(unsigned long dr7)
|
|
}
|
|
|
|
#ifdef CONFIG_CPU_SUP_AMD
|
|
-extern void set_dr_addr_mask(unsigned long mask, int dr);
|
|
+extern void amd_set_dr_addr_mask(unsigned long mask, unsigned int dr);
|
|
+extern unsigned long amd_get_dr_addr_mask(unsigned int dr);
|
|
#else
|
|
-static inline void set_dr_addr_mask(unsigned long mask, int dr) { }
|
|
+static inline void amd_set_dr_addr_mask(unsigned long mask, unsigned int dr) { }
|
|
+static inline unsigned long amd_get_dr_addr_mask(unsigned int dr)
|
|
+{
|
|
+ return 0;
|
|
+}
|
|
#endif
|
|
|
|
#endif /* _ASM_X86_DEBUGREG_H */
|
|
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
|
|
index efa49b3f87b9..6eb7923f5f08 100644
|
|
--- a/arch/x86/kernel/cpu/amd.c
|
|
+++ b/arch/x86/kernel/cpu/amd.c
|
|
@@ -1146,24 +1146,43 @@ static bool cpu_has_amd_erratum(struct cpuinfo_x86 *cpu, const int *erratum)
|
|
return false;
|
|
}
|
|
|
|
-void set_dr_addr_mask(unsigned long mask, int dr)
|
|
+static DEFINE_PER_CPU_READ_MOSTLY(unsigned long[4], amd_dr_addr_mask);
|
|
+
|
|
+static unsigned int amd_msr_dr_addr_masks[] = {
|
|
+ MSR_F16H_DR0_ADDR_MASK,
|
|
+ MSR_F16H_DR1_ADDR_MASK,
|
|
+ MSR_F16H_DR1_ADDR_MASK + 1,
|
|
+ MSR_F16H_DR1_ADDR_MASK + 2
|
|
+};
|
|
+
|
|
+void amd_set_dr_addr_mask(unsigned long mask, unsigned int dr)
|
|
{
|
|
- if (!boot_cpu_has(X86_FEATURE_BPEXT))
|
|
+ int cpu = smp_processor_id();
|
|
+
|
|
+ if (!cpu_feature_enabled(X86_FEATURE_BPEXT))
|
|
return;
|
|
|
|
- switch (dr) {
|
|
- case 0:
|
|
- wrmsr(MSR_F16H_DR0_ADDR_MASK, mask, 0);
|
|
- break;
|
|
- case 1:
|
|
- case 2:
|
|
- case 3:
|
|
- wrmsr(MSR_F16H_DR1_ADDR_MASK - 1 + dr, mask, 0);
|
|
- break;
|
|
- default:
|
|
- break;
|
|
- }
|
|
+ if (WARN_ON_ONCE(dr >= ARRAY_SIZE(amd_msr_dr_addr_masks)))
|
|
+ return;
|
|
+
|
|
+ if (per_cpu(amd_dr_addr_mask, cpu)[dr] == mask)
|
|
+ return;
|
|
+
|
|
+ wrmsr(amd_msr_dr_addr_masks[dr], mask, 0);
|
|
+ per_cpu(amd_dr_addr_mask, cpu)[dr] = mask;
|
|
+}
|
|
+
|
|
+unsigned long amd_get_dr_addr_mask(unsigned int dr)
|
|
+{
|
|
+ if (!cpu_feature_enabled(X86_FEATURE_BPEXT))
|
|
+ return 0;
|
|
+
|
|
+ if (WARN_ON_ONCE(dr >= ARRAY_SIZE(amd_msr_dr_addr_masks)))
|
|
+ return 0;
|
|
+
|
|
+ return per_cpu(amd_dr_addr_mask[dr], smp_processor_id());
|
|
}
|
|
+EXPORT_SYMBOL_GPL(amd_get_dr_addr_mask);
|
|
|
|
u32 amd_get_highest_perf(void)
|
|
{
|
|
diff --git a/arch/x86/kernel/hw_breakpoint.c b/arch/x86/kernel/hw_breakpoint.c
|
|
index 3871950432d6..d38d4648512d 100644
|
|
--- a/arch/x86/kernel/hw_breakpoint.c
|
|
+++ b/arch/x86/kernel/hw_breakpoint.c
|
|
@@ -129,7 +129,7 @@ int arch_install_hw_breakpoint(struct perf_event *bp)
|
|
|
|
set_debugreg(*dr7, 7);
|
|
if (info->mask)
|
|
- set_dr_addr_mask(info->mask, i);
|
|
+ amd_set_dr_addr_mask(info->mask, i);
|
|
|
|
return 0;
|
|
}
|
|
@@ -166,7 +166,7 @@ void arch_uninstall_hw_breakpoint(struct perf_event *bp)
|
|
|
|
set_debugreg(*dr7, 7);
|
|
if (info->mask)
|
|
- set_dr_addr_mask(0, i);
|
|
+ amd_set_dr_addr_mask(0, i);
|
|
}
|
|
|
|
static int arch_bp_generic_len(int x86_len)
|
|
--
|
|
2.39.3
|
|
|