forked from rpms/kernel
Drop old patches
This commit is contained in:
parent
ed917183a5
commit
15cccd797f
@ -1,76 +0,0 @@
|
||||
From 82d811ff566594de3676f35808e8a9e19c5c864c Mon Sep 17 00:00:00 2001
|
||||
From: Sean Christopherson <seanjc@google.com>
|
||||
Date: Wed, 23 Aug 2023 18:01:04 -0700
|
||||
Subject: [PATCH] KVM: x86/mmu: Fix an sign-extension bug with mmu_seq that
|
||||
hangs vCPUs
|
||||
MIME-Version: 1.0
|
||||
Content-Type: text/plain; charset=UTF-8
|
||||
Content-Transfer-Encoding: 8bit
|
||||
|
||||
Upstream commit ba6e3fe25543 ("KVM: x86/mmu: Grab mmu_invalidate_seq in
|
||||
kvm_faultin_pfn()") unknowingly fixed the bug in v6.3 when refactoring
|
||||
how KVM tracks the sequence counter snapshot.
|
||||
|
||||
Take the vCPU's mmu_seq snapshot as an "unsigned long" instead of an "int"
|
||||
when checking to see if a page fault is stale, as the sequence count is
|
||||
stored as an "unsigned long" everywhere else in KVM. This fixes a bug
|
||||
where KVM will effectively hang vCPUs due to always thinking page faults
|
||||
are stale, which results in KVM refusing to "fix" faults.
|
||||
|
||||
mmu_invalidate_seq (née mmu_notifier_seq) is a sequence counter used when
|
||||
KVM is handling page faults to detect if userspace mappings relevant to
|
||||
the guest were invalidated between snapshotting the counter and acquiring
|
||||
mmu_lock, i.e. to ensure that the userspace mapping KVM is using to
|
||||
resolve the page fault is fresh. If KVM sees that the counter has
|
||||
changed, KVM simply resumes the guest without fixing the fault.
|
||||
|
||||
What _should_ happen is that the source of the mmu_notifier invalidations
|
||||
eventually goes away, mmu_invalidate_seq becomes stable, and KVM can once
|
||||
again fix guest page fault(s).
|
||||
|
||||
But for a long-lived VM and/or a VM that the host just doesn't particularly
|
||||
like, it's possible for a VM to be on the receiving end of 2 billion (with
|
||||
a B) mmu_notifier invalidations. When that happens, bit 31 will be set in
|
||||
mmu_invalidate_seq. This causes the value to be turned into a 32-bit
|
||||
negative value when implicitly cast to an "int" by is_page_fault_stale(),
|
||||
and then sign-extended into a 64-bit unsigned when the signed "int" is
|
||||
implicitly cast back to an "unsigned long" on the call to
|
||||
mmu_invalidate_retry_hva().
|
||||
|
||||
As a result of the casting and sign-extension, given a sequence counter of
|
||||
e.g. 0x8002dc25, mmu_invalidate_retry_hva() ends up doing
|
||||
|
||||
if (0x8002dc25 != 0xffffffff8002dc25)
|
||||
|
||||
and signals that the page fault is stale and needs to be retried even
|
||||
though the sequence counter is stable, and KVM effectively hangs any vCPU
|
||||
that takes a page fault (EPT violation or #NPF when TDP is enabled).
|
||||
|
||||
Reported-by: Brian Rak <brak@vultr.com>
|
||||
Reported-by: Amaan Cheval <amaan.cheval@gmail.com>
|
||||
Reported-by: Eric Wheeler <kvm@lists.ewheeler.net>
|
||||
Closes: https://lore.kernel.org/all/f023d927-52aa-7e08-2ee5-59a2fbc65953@gameservers.com
|
||||
Fixes: a955cad84cda ("KVM: x86/mmu: Retry page fault if root is invalidated by memslot update")
|
||||
Signed-off-by: Sean Christopherson <seanjc@google.com>
|
||||
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
|
||||
---
|
||||
arch/x86/kvm/mmu/mmu.c | 3 ++-
|
||||
1 file changed, 2 insertions(+), 1 deletion(-)
|
||||
|
||||
diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
|
||||
index 230108a90cf3..beca03556379 100644
|
||||
--- a/arch/x86/kvm/mmu/mmu.c
|
||||
+++ b/arch/x86/kvm/mmu/mmu.c
|
||||
@@ -4212,7 +4212,8 @@ static int kvm_faultin_pfn(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
|
||||
* root was invalidated by a memslot update or a relevant mmu_notifier fired.
|
||||
*/
|
||||
static bool is_page_fault_stale(struct kvm_vcpu *vcpu,
|
||||
- struct kvm_page_fault *fault, int mmu_seq)
|
||||
+ struct kvm_page_fault *fault,
|
||||
+ unsigned long mmu_seq)
|
||||
{
|
||||
struct kvm_mmu_page *sp = to_shadow_page(vcpu->arch.mmu->root.hpa);
|
||||
|
||||
--
|
||||
2.27.0
|
||||
|
@ -1,34 +0,0 @@
|
||||
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
|
||||
index c52a1dd1b..3ff44747e 100644
|
||||
--- a/net/netfilter/nf_tables_api.c
|
||||
+++ b/net/netfilter/nf_tables_api.c
|
||||
@@ -8583,16 +8583,10 @@ static int nft_verdict_init(const struct nft_ctx *ctx, struct nft_data *data,
|
||||
data->verdict.code = ntohl(nla_get_be32(tb[NFTA_VERDICT_CODE]));
|
||||
|
||||
switch (data->verdict.code) {
|
||||
- default:
|
||||
- switch (data->verdict.code & NF_VERDICT_MASK) {
|
||||
- case NF_ACCEPT:
|
||||
- case NF_DROP:
|
||||
- case NF_QUEUE:
|
||||
- break;
|
||||
- default:
|
||||
- return -EINVAL;
|
||||
- }
|
||||
- /* fall through */
|
||||
+ case NF_ACCEPT:
|
||||
+ case NF_DROP:
|
||||
+ case NF_QUEUE:
|
||||
+ break;
|
||||
case NFT_CONTINUE:
|
||||
case NFT_BREAK:
|
||||
case NFT_RETURN:
|
||||
@@ -8611,6 +8605,8 @@ static int nft_verdict_init(const struct nft_ctx *ctx, struct nft_data *data,
|
||||
chain->use++;
|
||||
data->verdict.chain = chain;
|
||||
break;
|
||||
+ default:
|
||||
+ return -EINVAL;
|
||||
}
|
||||
|
||||
desc->len = sizeof(data->verdict);
|
@ -527,9 +527,6 @@ Patch999999: linux-kernel-test.patch
|
||||
|
||||
Patch1000: debrand-single-cpu.patch
|
||||
Patch1002: debrand-rh-i686-cpu.patch
|
||||
Patch1003: CVE-2024-1086.patch
|
||||
|
||||
Patch1100: 1100-KVM-x86-mmu-Fix-an-sign-extension-bug-with-mmu_seq-t.patch
|
||||
|
||||
# END OF PATCH DEFINITIONS
|
||||
|
||||
@ -1094,12 +1091,8 @@ mv linux-%{specversion}-%{pkgrelease} linux-%{KVERREL}
|
||||
|
||||
cd linux-%{KVERREL}
|
||||
|
||||
ApplyPatch CVE-2024-1086.patch
|
||||
|
||||
ApplyOptionalPatch debrand-single-cpu.patch
|
||||
ApplyOptionalPatch debrand-rh-i686-cpu.patch
|
||||
# Already applied in the source tarball
|
||||
# ApplyOptionalPatch 1100-KVM-x86-mmu-Fix-an-sign-extension-bug-with-mmu_seq-t.patch
|
||||
ApplyOptionalPatch linux-kernel-test.patch
|
||||
|
||||
# END OF PATCH APPLICATIONS
|
||||
|
Loading…
Reference in New Issue
Block a user