19bc18cc9e
- kvm-vdpa-Skip-the-maps-not-in-the-iova-tree.patch [RHELX-57] - kvm-vdpa-do-not-save-failed-dma-maps-in-SVQ-iova-tree.patch [RHELX-57] - kvm-util-accept-iova_tree_remove_parameter-by-value.patch [RHELX-57] - kvm-vdpa-Remove-SVQ-vring-from-iova_tree-at-shutdown.patch [RHELX-57] - kvm-vdpa-Make-SVQ-vring-unmapping-return-void.patch [RHELX-57] - kvm-vhost-Always-store-new-kick-fd-on-vhost_svq_set_svq_.patch [RHELX-57] - kvm-vdpa-Use-ring-hwaddr-at-vhost_vdpa_svq_unmap_ring.patch [RHELX-57] - kvm-vhost-stop-transfer-elem-ownership-in-vhost_handle_g.patch [RHELX-57] - kvm-vhost-use-SVQ-element-ndescs-instead-of-opaque-data-.patch [RHELX-57] - kvm-vhost-Delete-useless-read-memory-barrier.patch [RHELX-57] - kvm-vhost-Do-not-depend-on-NULL-VirtQueueElement-on-vhos.patch [RHELX-57] - kvm-vhost_net-Add-NetClientInfo-start-callback.patch [RHELX-57] - kvm-vhost_net-Add-NetClientInfo-stop-callback.patch [RHELX-57] - kvm-vdpa-add-net_vhost_vdpa_cvq_info-NetClientInfo.patch [RHELX-57] - kvm-vdpa-Move-command-buffers-map-to-start-of-net-device.patch [RHELX-57] - kvm-vdpa-extract-vhost_vdpa_net_cvq_add-from-vhost_vdpa_.patch [RHELX-57] - kvm-vhost_net-add-NetClientState-load-callback.patch [RHELX-57] - kvm-vdpa-Add-virtio-net-mac-address-via-CVQ-at-start.patch [RHELX-57] - kvm-vdpa-Delete-CVQ-migration-blocker.patch [RHELX-57] - kvm-vdpa-Make-VhostVDPAState-cvq_cmd_in_buffer-control-a.patch [RHELX-57] - kvm-vdpa-extract-vhost_vdpa_net_load_mac-from-vhost_vdpa.patch [RHELX-57] - kvm-vdpa-Add-vhost_vdpa_net_load_mq.patch [RHELX-57] - kvm-vdpa-validate-MQ-CVQ-commands.patch [RHELX-57] - kvm-virtio-net-Update-virtio-net-curr_queue_pairs-in-vdp.patch [RHELX-57] - kvm-vdpa-Allow-MQ-feature-in-SVQ.patch [RHELX-57] - kvm-i386-reset-KVM-nested-state-upon-CPU-reset.patch [bz#2125281] - kvm-i386-do-kvm_put_msr_feature_control-first-thing-when.patch [bz#2125281] - kvm-Revert-Re-enable-capstone-internal-build.patch [bz#2127825] - kvm-spec-Use-capstone-package.patch [bz#2127825] - Resolves: RHELX-57 (vDPA SVQ Multiqueue support ) - Resolves: bz#2125281 ([RHEL9.1] Guests in VMX root operation fail to reboot with QEMU's 'system_reset' command [rhel-9.2.0]) - Resolves: bz#2127825 (Use capstone for qemu-kvm build)
94 lines
3.0 KiB
Diff
94 lines
3.0 KiB
Diff
From 6f650e08efc35cc04730bf99cea7be8d4faa6e74 Mon Sep 17 00:00:00 2001
|
|
From: Vitaly Kuznetsov <vkuznets@redhat.com>
|
|
Date: Thu, 18 Aug 2022 17:01:12 +0200
|
|
Subject: [PATCH 26/29] i386: reset KVM nested state upon CPU reset
|
|
|
|
RH-Author: Miroslav Rezanina <mrezanin@redhat.com>
|
|
RH-MergeRequest: 118: Synchronize qemu-kvm-7.0.0-13.el9
|
|
RH-Bugzilla: 2125281
|
|
RH-Acked-by: Vitaly Kuznetsov <vkuznets@redhat.com>
|
|
RH-Commit: [1/2] b34da74a40fe32ef210c8127ba8bb032aaab6381 (mrezanin/centos-src-qemu-kvm)
|
|
|
|
Make sure env->nested_state is cleaned up when a vCPU is reset, it may
|
|
be stale after an incoming migration, kvm_arch_put_registers() may
|
|
end up failing or putting vCPU in a weird state.
|
|
|
|
Reviewed-by: Maxim Levitsky <mlevitsk@redhat.com>
|
|
Signed-off-by: Vitaly Kuznetsov <vkuznets@redhat.com>
|
|
Message-Id: <20220818150113.479917-2-vkuznets@redhat.com>
|
|
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
|
|
(cherry picked from commit 3cafdb67504a34a0305260f0c86a73d5a3fb000b)
|
|
Signed-off-by: Vitaly Kuznetsov <vkuznets@redhat.com>
|
|
---
|
|
target/i386/kvm/kvm.c | 37 +++++++++++++++++++++++++++----------
|
|
1 file changed, 27 insertions(+), 10 deletions(-)
|
|
|
|
diff --git a/target/i386/kvm/kvm.c b/target/i386/kvm/kvm.c
|
|
index 4e5d4bafc4..fd3237310b 100644
|
|
--- a/target/i386/kvm/kvm.c
|
|
+++ b/target/i386/kvm/kvm.c
|
|
@@ -1695,6 +1695,30 @@ static void kvm_init_xsave(CPUX86State *env)
|
|
env->xsave_buf_len);
|
|
}
|
|
|
|
+static void kvm_init_nested_state(CPUX86State *env)
|
|
+{
|
|
+ struct kvm_vmx_nested_state_hdr *vmx_hdr;
|
|
+ uint32_t size;
|
|
+
|
|
+ if (!env->nested_state) {
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ size = env->nested_state->size;
|
|
+
|
|
+ memset(env->nested_state, 0, size);
|
|
+ env->nested_state->size = size;
|
|
+
|
|
+ if (cpu_has_vmx(env)) {
|
|
+ env->nested_state->format = KVM_STATE_NESTED_FORMAT_VMX;
|
|
+ vmx_hdr = &env->nested_state->hdr.vmx;
|
|
+ vmx_hdr->vmxon_pa = -1ull;
|
|
+ vmx_hdr->vmcs12_pa = -1ull;
|
|
+ } else if (cpu_has_svm(env)) {
|
|
+ env->nested_state->format = KVM_STATE_NESTED_FORMAT_SVM;
|
|
+ }
|
|
+}
|
|
+
|
|
int kvm_arch_init_vcpu(CPUState *cs)
|
|
{
|
|
struct {
|
|
@@ -2122,19 +2146,10 @@ int kvm_arch_init_vcpu(CPUState *cs)
|
|
assert(max_nested_state_len >= offsetof(struct kvm_nested_state, data));
|
|
|
|
if (cpu_has_vmx(env) || cpu_has_svm(env)) {
|
|
- struct kvm_vmx_nested_state_hdr *vmx_hdr;
|
|
-
|
|
env->nested_state = g_malloc0(max_nested_state_len);
|
|
env->nested_state->size = max_nested_state_len;
|
|
|
|
- if (cpu_has_vmx(env)) {
|
|
- env->nested_state->format = KVM_STATE_NESTED_FORMAT_VMX;
|
|
- vmx_hdr = &env->nested_state->hdr.vmx;
|
|
- vmx_hdr->vmxon_pa = -1ull;
|
|
- vmx_hdr->vmcs12_pa = -1ull;
|
|
- } else {
|
|
- env->nested_state->format = KVM_STATE_NESTED_FORMAT_SVM;
|
|
- }
|
|
+ kvm_init_nested_state(env);
|
|
}
|
|
}
|
|
|
|
@@ -2199,6 +2214,8 @@ void kvm_arch_reset_vcpu(X86CPU *cpu)
|
|
/* enabled by default */
|
|
env->poll_control_msr = 1;
|
|
|
|
+ kvm_init_nested_state(env);
|
|
+
|
|
sev_es_set_reset_vector(CPU(cpu));
|
|
}
|
|
|
|
--
|
|
2.31.1
|
|
|