import qemu-kvm-4.2.0-60.module+el8.5.0+14545+9e40c7b1.2
This commit is contained in:
parent
4cda9e4498
commit
03df4f4cb3
@ -0,0 +1,47 @@
|
||||
From 4da53939a51a5a834fae3fb8687603598d811269 Mon Sep 17 00:00:00 2001
|
||||
From: David Edmondson <david.edmondson@oracle.com>
|
||||
Date: Mon, 5 Jul 2021 11:46:27 +0100
|
||||
Subject: [PATCH 3/7] target/i386: Clarify the padding requirements of
|
||||
X86XSaveArea
|
||||
|
||||
RH-Author: Dr. David Alan Gilbert <dgilbert@redhat.com>
|
||||
RH-MergeRequest: 113: non-av 8.5z: Fix XSAVE on newer CPUs
|
||||
RH-Commit: [3/7] 789cb79b4ae08bd78479b0399821adba67139c76
|
||||
RH-Bugzilla: 2065239
|
||||
RH-Acked-by: Jon Maloy <jmaloy@redhat.com>
|
||||
RH-Acked-by: Paolo Bonzini <pbonzini@redhat.com>
|
||||
RH-Acked-by: Bandan Das <None>
|
||||
|
||||
Replace the hard-coded size of offsets or structure elements with
|
||||
defined constants or sizeof().
|
||||
|
||||
Signed-off-by: David Edmondson <david.edmondson@oracle.com>
|
||||
Message-Id: <20210705104632.2902400-4-david.edmondson@oracle.com>
|
||||
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
|
||||
(cherry picked from commit fde74821006472f40fee9a094e6da86cd39b5623)
|
||||
---
|
||||
target/i386/cpu.h | 8 +++++++-
|
||||
1 file changed, 7 insertions(+), 1 deletion(-)
|
||||
|
||||
diff --git a/target/i386/cpu.h b/target/i386/cpu.h
|
||||
index 7c81d4bd6d..d586b5508d 100644
|
||||
--- a/target/i386/cpu.h
|
||||
+++ b/target/i386/cpu.h
|
||||
@@ -1296,7 +1296,13 @@ typedef struct X86XSaveArea {
|
||||
|
||||
/* AVX State: */
|
||||
XSaveAVX avx_state;
|
||||
- uint8_t padding[960 - 576 - sizeof(XSaveAVX)];
|
||||
+
|
||||
+ /* Ensure that XSaveBNDREG is properly aligned. */
|
||||
+ uint8_t padding[XSAVE_BNDREG_OFFSET
|
||||
+ - sizeof(X86LegacyXSaveArea)
|
||||
+ - sizeof(X86XSaveHeader)
|
||||
+ - sizeof(XSaveAVX)];
|
||||
+
|
||||
/* MPX State: */
|
||||
XSaveBNDREG bndreg_state;
|
||||
XSaveBNDCSR bndcsr_state;
|
||||
--
|
||||
2.27.0
|
||||
|
@ -0,0 +1,118 @@
|
||||
From 007e162cc8332faa132568656e0defc509ba658c Mon Sep 17 00:00:00 2001
|
||||
From: David Edmondson <david.edmondson@oracle.com>
|
||||
Date: Mon, 5 Jul 2021 11:46:26 +0100
|
||||
Subject: [PATCH 2/7] target/i386: Consolidate the X86XSaveArea offset checks
|
||||
|
||||
RH-Author: Dr. David Alan Gilbert <dgilbert@redhat.com>
|
||||
RH-MergeRequest: 113: non-av 8.5z: Fix XSAVE on newer CPUs
|
||||
RH-Commit: [2/7] 308e45b88a5e8501947466d4cf11d1ae0a68e0d8
|
||||
RH-Bugzilla: 2065239
|
||||
RH-Acked-by: Jon Maloy <jmaloy@redhat.com>
|
||||
RH-Acked-by: Paolo Bonzini <pbonzini@redhat.com>
|
||||
RH-Acked-by: Bandan Das <None>
|
||||
|
||||
Rather than having similar but different checks in cpu.h and kvm.c,
|
||||
move them all to cpu.h.
|
||||
Message-Id: <20210705104632.2902400-3-david.edmondson@oracle.com>
|
||||
|
||||
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
|
||||
(cherry picked from commit 436463b84b75fad6ff962e45a2220a7d1d17557e)
|
||||
---
|
||||
target/i386/cpu.h | 22 +++++++++++++++-------
|
||||
target/i386/kvm.c | 39 ---------------------------------------
|
||||
2 files changed, 15 insertions(+), 46 deletions(-)
|
||||
|
||||
diff --git a/target/i386/cpu.h b/target/i386/cpu.h
|
||||
index 1fac5170a6..7c81d4bd6d 100644
|
||||
--- a/target/i386/cpu.h
|
||||
+++ b/target/i386/cpu.h
|
||||
@@ -1308,21 +1308,29 @@ typedef struct X86XSaveArea {
|
||||
XSavePKRU pkru_state;
|
||||
} X86XSaveArea;
|
||||
|
||||
-QEMU_BUILD_BUG_ON(offsetof(X86XSaveArea, avx_state) != XSAVE_AVX_OFFSET);
|
||||
QEMU_BUILD_BUG_ON(sizeof(XSaveAVX) != 0x100);
|
||||
-QEMU_BUILD_BUG_ON(offsetof(X86XSaveArea, bndreg_state) != XSAVE_BNDREG_OFFSET);
|
||||
QEMU_BUILD_BUG_ON(sizeof(XSaveBNDREG) != 0x40);
|
||||
-QEMU_BUILD_BUG_ON(offsetof(X86XSaveArea, bndcsr_state) != XSAVE_BNDCSR_OFFSET);
|
||||
QEMU_BUILD_BUG_ON(sizeof(XSaveBNDCSR) != 0x40);
|
||||
-QEMU_BUILD_BUG_ON(offsetof(X86XSaveArea, opmask_state) != XSAVE_OPMASK_OFFSET);
|
||||
QEMU_BUILD_BUG_ON(sizeof(XSaveOpmask) != 0x40);
|
||||
-QEMU_BUILD_BUG_ON(offsetof(X86XSaveArea, zmm_hi256_state) != XSAVE_ZMM_HI256_OFFSET);
|
||||
QEMU_BUILD_BUG_ON(sizeof(XSaveZMM_Hi256) != 0x200);
|
||||
-QEMU_BUILD_BUG_ON(offsetof(X86XSaveArea, hi16_zmm_state) != XSAVE_HI16_ZMM_OFFSET);
|
||||
QEMU_BUILD_BUG_ON(sizeof(XSaveHi16_ZMM) != 0x400);
|
||||
-QEMU_BUILD_BUG_ON(offsetof(X86XSaveArea, pkru_state) != XSAVE_PKRU_OFFSET);
|
||||
QEMU_BUILD_BUG_ON(sizeof(XSavePKRU) != 0x8);
|
||||
|
||||
+QEMU_BUILD_BUG_ON(offsetof(X86XSaveArea, legacy.fcw) != XSAVE_FCW_FSW_OFFSET);
|
||||
+QEMU_BUILD_BUG_ON(offsetof(X86XSaveArea, legacy.ftw) != XSAVE_FTW_FOP_OFFSET);
|
||||
+QEMU_BUILD_BUG_ON(offsetof(X86XSaveArea, legacy.fpip) != XSAVE_CWD_RIP_OFFSET);
|
||||
+QEMU_BUILD_BUG_ON(offsetof(X86XSaveArea, legacy.fpdp) != XSAVE_CWD_RDP_OFFSET);
|
||||
+QEMU_BUILD_BUG_ON(offsetof(X86XSaveArea, legacy.mxcsr) != XSAVE_MXCSR_OFFSET);
|
||||
+QEMU_BUILD_BUG_ON(offsetof(X86XSaveArea, legacy.fpregs) != XSAVE_ST_SPACE_OFFSET);
|
||||
+QEMU_BUILD_BUG_ON(offsetof(X86XSaveArea, legacy.xmm_regs) != XSAVE_XMM_SPACE_OFFSET);
|
||||
+QEMU_BUILD_BUG_ON(offsetof(X86XSaveArea, avx_state) != XSAVE_AVX_OFFSET);
|
||||
+QEMU_BUILD_BUG_ON(offsetof(X86XSaveArea, bndreg_state) != XSAVE_BNDREG_OFFSET);
|
||||
+QEMU_BUILD_BUG_ON(offsetof(X86XSaveArea, bndcsr_state) != XSAVE_BNDCSR_OFFSET);
|
||||
+QEMU_BUILD_BUG_ON(offsetof(X86XSaveArea, opmask_state) != XSAVE_OPMASK_OFFSET);
|
||||
+QEMU_BUILD_BUG_ON(offsetof(X86XSaveArea, zmm_hi256_state) != XSAVE_ZMM_HI256_OFFSET);
|
||||
+QEMU_BUILD_BUG_ON(offsetof(X86XSaveArea, hi16_zmm_state) != XSAVE_HI16_ZMM_OFFSET);
|
||||
+QEMU_BUILD_BUG_ON(offsetof(X86XSaveArea, pkru_state) != XSAVE_PKRU_OFFSET);
|
||||
+
|
||||
typedef enum TPRAccess {
|
||||
TPR_ACCESS_READ,
|
||||
TPR_ACCESS_WRITE,
|
||||
diff --git a/target/i386/kvm.c b/target/i386/kvm.c
|
||||
index f5c17e0028..215487b17d 100644
|
||||
--- a/target/i386/kvm.c
|
||||
+++ b/target/i386/kvm.c
|
||||
@@ -2350,45 +2350,6 @@ static int kvm_put_fpu(X86CPU *cpu)
|
||||
return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_FPU, &fpu);
|
||||
}
|
||||
|
||||
-#define XSAVE_FCW_FSW 0
|
||||
-#define XSAVE_FTW_FOP 1
|
||||
-#define XSAVE_CWD_RIP 2
|
||||
-#define XSAVE_CWD_RDP 4
|
||||
-#define XSAVE_MXCSR 6
|
||||
-#define XSAVE_ST_SPACE 8
|
||||
-#define XSAVE_XMM_SPACE 40
|
||||
-#define XSAVE_XSTATE_BV 128
|
||||
-#define XSAVE_YMMH_SPACE 144
|
||||
-#define XSAVE_BNDREGS 240
|
||||
-#define XSAVE_BNDCSR 256
|
||||
-#define XSAVE_OPMASK 272
|
||||
-#define XSAVE_ZMM_Hi256 288
|
||||
-#define XSAVE_Hi16_ZMM 416
|
||||
-#define XSAVE_PKRU 672
|
||||
-
|
||||
-#define XSAVE_BYTE_OFFSET(word_offset) \
|
||||
- ((word_offset) * sizeof_field(struct kvm_xsave, region[0]))
|
||||
-
|
||||
-#define ASSERT_OFFSET(word_offset, field) \
|
||||
- QEMU_BUILD_BUG_ON(XSAVE_BYTE_OFFSET(word_offset) != \
|
||||
- offsetof(X86XSaveArea, field))
|
||||
-
|
||||
-ASSERT_OFFSET(XSAVE_FCW_FSW, legacy.fcw);
|
||||
-ASSERT_OFFSET(XSAVE_FTW_FOP, legacy.ftw);
|
||||
-ASSERT_OFFSET(XSAVE_CWD_RIP, legacy.fpip);
|
||||
-ASSERT_OFFSET(XSAVE_CWD_RDP, legacy.fpdp);
|
||||
-ASSERT_OFFSET(XSAVE_MXCSR, legacy.mxcsr);
|
||||
-ASSERT_OFFSET(XSAVE_ST_SPACE, legacy.fpregs);
|
||||
-ASSERT_OFFSET(XSAVE_XMM_SPACE, legacy.xmm_regs);
|
||||
-ASSERT_OFFSET(XSAVE_XSTATE_BV, header.xstate_bv);
|
||||
-ASSERT_OFFSET(XSAVE_YMMH_SPACE, avx_state);
|
||||
-ASSERT_OFFSET(XSAVE_BNDREGS, bndreg_state);
|
||||
-ASSERT_OFFSET(XSAVE_BNDCSR, bndcsr_state);
|
||||
-ASSERT_OFFSET(XSAVE_OPMASK, opmask_state);
|
||||
-ASSERT_OFFSET(XSAVE_ZMM_Hi256, zmm_hi256_state);
|
||||
-ASSERT_OFFSET(XSAVE_Hi16_ZMM, hi16_zmm_state);
|
||||
-ASSERT_OFFSET(XSAVE_PKRU, pkru_state);
|
||||
-
|
||||
static int kvm_put_xsave(X86CPU *cpu)
|
||||
{
|
||||
CPUX86State *env = &cpu->env;
|
||||
--
|
||||
2.27.0
|
||||
|
@ -0,0 +1,82 @@
|
||||
From 2d657f560ad94d6a2459d6318d2d0a6847e6bba4 Mon Sep 17 00:00:00 2001
|
||||
From: David Edmondson <david.edmondson@oracle.com>
|
||||
Date: Mon, 5 Jul 2021 11:46:25 +0100
|
||||
Subject: [PATCH 1/7] target/i386: Declare constants for XSAVE offsets
|
||||
|
||||
RH-Author: Dr. David Alan Gilbert <dgilbert@redhat.com>
|
||||
RH-MergeRequest: 113: non-av 8.5z: Fix XSAVE on newer CPUs
|
||||
RH-Commit: [1/7] 85ec92995a1f2488dea8f2c3b527b1b42f1ed882
|
||||
RH-Bugzilla: 2065239
|
||||
RH-Acked-by: Jon Maloy <jmaloy@redhat.com>
|
||||
RH-Acked-by: Paolo Bonzini <pbonzini@redhat.com>
|
||||
RH-Acked-by: Bandan Das <None>
|
||||
|
||||
Declare and use manifest constants for the XSAVE state component
|
||||
offsets.
|
||||
|
||||
Signed-off-by: David Edmondson <david.edmondson@oracle.com>
|
||||
Message-Id: <20210705104632.2902400-2-david.edmondson@oracle.com>
|
||||
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
|
||||
(cherry picked from commit ac7b7cae4e8598359a2a7557899904c9563a776a)
|
||||
(cherry picked from commit 2644e6189fdf2ae5f19b5ac8cc2939e0d4e4c92f)
|
||||
---
|
||||
target/i386/cpu.h | 30 +++++++++++++++++++++++-------
|
||||
1 file changed, 23 insertions(+), 7 deletions(-)
|
||||
|
||||
diff --git a/target/i386/cpu.h b/target/i386/cpu.h
|
||||
index 7a3aa40201..1fac5170a6 100644
|
||||
--- a/target/i386/cpu.h
|
||||
+++ b/target/i386/cpu.h
|
||||
@@ -1272,6 +1272,22 @@ typedef struct XSavePKRU {
|
||||
uint32_t padding;
|
||||
} XSavePKRU;
|
||||
|
||||
+#define XSAVE_FCW_FSW_OFFSET 0x000
|
||||
+#define XSAVE_FTW_FOP_OFFSET 0x004
|
||||
+#define XSAVE_CWD_RIP_OFFSET 0x008
|
||||
+#define XSAVE_CWD_RDP_OFFSET 0x010
|
||||
+#define XSAVE_MXCSR_OFFSET 0x018
|
||||
+#define XSAVE_ST_SPACE_OFFSET 0x020
|
||||
+#define XSAVE_XMM_SPACE_OFFSET 0x0a0
|
||||
+#define XSAVE_XSTATE_BV_OFFSET 0x200
|
||||
+#define XSAVE_AVX_OFFSET 0x240
|
||||
+#define XSAVE_BNDREG_OFFSET 0x3c0
|
||||
+#define XSAVE_BNDCSR_OFFSET 0x400
|
||||
+#define XSAVE_OPMASK_OFFSET 0x440
|
||||
+#define XSAVE_ZMM_HI256_OFFSET 0x480
|
||||
+#define XSAVE_HI16_ZMM_OFFSET 0x680
|
||||
+#define XSAVE_PKRU_OFFSET 0xa80
|
||||
+
|
||||
typedef struct X86XSaveArea {
|
||||
X86LegacyXSaveArea legacy;
|
||||
X86XSaveHeader header;
|
||||
@@ -1292,19 +1308,19 @@ typedef struct X86XSaveArea {
|
||||
XSavePKRU pkru_state;
|
||||
} X86XSaveArea;
|
||||
|
||||
-QEMU_BUILD_BUG_ON(offsetof(X86XSaveArea, avx_state) != 0x240);
|
||||
+QEMU_BUILD_BUG_ON(offsetof(X86XSaveArea, avx_state) != XSAVE_AVX_OFFSET);
|
||||
QEMU_BUILD_BUG_ON(sizeof(XSaveAVX) != 0x100);
|
||||
-QEMU_BUILD_BUG_ON(offsetof(X86XSaveArea, bndreg_state) != 0x3c0);
|
||||
+QEMU_BUILD_BUG_ON(offsetof(X86XSaveArea, bndreg_state) != XSAVE_BNDREG_OFFSET);
|
||||
QEMU_BUILD_BUG_ON(sizeof(XSaveBNDREG) != 0x40);
|
||||
-QEMU_BUILD_BUG_ON(offsetof(X86XSaveArea, bndcsr_state) != 0x400);
|
||||
+QEMU_BUILD_BUG_ON(offsetof(X86XSaveArea, bndcsr_state) != XSAVE_BNDCSR_OFFSET);
|
||||
QEMU_BUILD_BUG_ON(sizeof(XSaveBNDCSR) != 0x40);
|
||||
-QEMU_BUILD_BUG_ON(offsetof(X86XSaveArea, opmask_state) != 0x440);
|
||||
+QEMU_BUILD_BUG_ON(offsetof(X86XSaveArea, opmask_state) != XSAVE_OPMASK_OFFSET);
|
||||
QEMU_BUILD_BUG_ON(sizeof(XSaveOpmask) != 0x40);
|
||||
-QEMU_BUILD_BUG_ON(offsetof(X86XSaveArea, zmm_hi256_state) != 0x480);
|
||||
+QEMU_BUILD_BUG_ON(offsetof(X86XSaveArea, zmm_hi256_state) != XSAVE_ZMM_HI256_OFFSET);
|
||||
QEMU_BUILD_BUG_ON(sizeof(XSaveZMM_Hi256) != 0x200);
|
||||
-QEMU_BUILD_BUG_ON(offsetof(X86XSaveArea, hi16_zmm_state) != 0x680);
|
||||
+QEMU_BUILD_BUG_ON(offsetof(X86XSaveArea, hi16_zmm_state) != XSAVE_HI16_ZMM_OFFSET);
|
||||
QEMU_BUILD_BUG_ON(sizeof(XSaveHi16_ZMM) != 0x400);
|
||||
-QEMU_BUILD_BUG_ON(offsetof(X86XSaveArea, pkru_state) != 0xA80);
|
||||
+QEMU_BUILD_BUG_ON(offsetof(X86XSaveArea, pkru_state) != XSAVE_PKRU_OFFSET);
|
||||
QEMU_BUILD_BUG_ON(sizeof(XSavePKRU) != 0x8);
|
||||
|
||||
typedef enum TPRAccess {
|
||||
--
|
||||
2.27.0
|
||||
|
@ -0,0 +1,67 @@
|
||||
From ea372e888f6fec3a34fb23975538fbdc9f14e3c6 Mon Sep 17 00:00:00 2001
|
||||
From: David Edmondson <david.edmondson@oracle.com>
|
||||
Date: Mon, 5 Jul 2021 11:46:29 +0100
|
||||
Subject: [PATCH 5/7] target/i386: Make x86_ext_save_areas visible outside
|
||||
cpu.c
|
||||
|
||||
RH-Author: Dr. David Alan Gilbert <dgilbert@redhat.com>
|
||||
RH-MergeRequest: 113: non-av 8.5z: Fix XSAVE on newer CPUs
|
||||
RH-Commit: [5/7] a643aa829daf4b985db081fc7cd9907331e4fb3f
|
||||
RH-Bugzilla: 2065239
|
||||
RH-Acked-by: Jon Maloy <jmaloy@redhat.com>
|
||||
RH-Acked-by: Paolo Bonzini <pbonzini@redhat.com>
|
||||
RH-Acked-by: Bandan Das <None>
|
||||
|
||||
Provide visibility of the x86_ext_save_areas array and associated type
|
||||
outside of cpu.c.
|
||||
|
||||
Signed-off-by: David Edmondson <david.edmondson@oracle.com>
|
||||
Message-Id: <20210705104632.2902400-6-david.edmondson@oracle.com>
|
||||
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
|
||||
(cherry picked from commit 5aa10ab1a08e4123dee214a2f854909efb07b45b)
|
||||
---
|
||||
target/i386/cpu.c | 7 +------
|
||||
target/i386/cpu.h | 9 +++++++++
|
||||
2 files changed, 10 insertions(+), 6 deletions(-)
|
||||
|
||||
diff --git a/target/i386/cpu.c b/target/i386/cpu.c
|
||||
index d5b0d4b7f0..a030030299 100644
|
||||
--- a/target/i386/cpu.c
|
||||
+++ b/target/i386/cpu.c
|
||||
@@ -1504,12 +1504,7 @@ static const X86RegisterInfo32 x86_reg_info_32[CPU_NB_REGS32] = {
|
||||
};
|
||||
#undef REGISTER
|
||||
|
||||
-typedef struct ExtSaveArea {
|
||||
- uint32_t feature, bits;
|
||||
- uint32_t offset, size;
|
||||
-} ExtSaveArea;
|
||||
-
|
||||
-static const ExtSaveArea x86_ext_save_areas[] = {
|
||||
+const ExtSaveArea x86_ext_save_areas[XSAVE_STATE_AREA_COUNT] = {
|
||||
[XSTATE_FP_BIT] = {
|
||||
/* x87 FP state component is always enabled if XSAVE is supported */
|
||||
.feature = FEAT_1_ECX, .bits = CPUID_EXT_XSAVE,
|
||||
diff --git a/target/i386/cpu.h b/target/i386/cpu.h
|
||||
index 8daa83a6a3..cff2914203 100644
|
||||
--- a/target/i386/cpu.h
|
||||
+++ b/target/i386/cpu.h
|
||||
@@ -1337,6 +1337,15 @@ QEMU_BUILD_BUG_ON(offsetof(X86XSaveArea, zmm_hi256_state) != XSAVE_ZMM_HI256_OFF
|
||||
QEMU_BUILD_BUG_ON(offsetof(X86XSaveArea, hi16_zmm_state) != XSAVE_HI16_ZMM_OFFSET);
|
||||
QEMU_BUILD_BUG_ON(offsetof(X86XSaveArea, pkru_state) != XSAVE_PKRU_OFFSET);
|
||||
|
||||
+typedef struct ExtSaveArea {
|
||||
+ uint32_t feature, bits;
|
||||
+ uint32_t offset, size;
|
||||
+} ExtSaveArea;
|
||||
+
|
||||
+#define XSAVE_STATE_AREA_COUNT (XSTATE_PKRU_BIT + 1)
|
||||
+
|
||||
+extern const ExtSaveArea x86_ext_save_areas[XSAVE_STATE_AREA_COUNT];
|
||||
+
|
||||
typedef enum TPRAccess {
|
||||
TPR_ACCESS_READ,
|
||||
TPR_ACCESS_WRITE,
|
||||
--
|
||||
2.27.0
|
||||
|
341
SOURCES/kvm-target-i386-Observe-XSAVE-state-area-offsets.patch
Normal file
341
SOURCES/kvm-target-i386-Observe-XSAVE-state-area-offsets.patch
Normal file
@ -0,0 +1,341 @@
|
||||
From d6f29071cce7c162df253a8fbfd6de691be5fff9 Mon Sep 17 00:00:00 2001
|
||||
From: David Edmondson <david.edmondson@oracle.com>
|
||||
Date: Mon, 5 Jul 2021 11:46:30 +0100
|
||||
Subject: [PATCH 6/7] target/i386: Observe XSAVE state area offsets
|
||||
|
||||
RH-Author: Dr. David Alan Gilbert <dgilbert@redhat.com>
|
||||
RH-MergeRequest: 113: non-av 8.5z: Fix XSAVE on newer CPUs
|
||||
RH-Commit: [6/7] 3741a121957cd10e4d160da22c056ff81b6bc62f
|
||||
RH-Bugzilla: 2065239
|
||||
RH-Acked-by: Jon Maloy <jmaloy@redhat.com>
|
||||
RH-Acked-by: Paolo Bonzini <pbonzini@redhat.com>
|
||||
RH-Acked-by: Bandan Das <None>
|
||||
|
||||
Rather than relying on the X86XSaveArea structure definition directly,
|
||||
the routines that manipulate the XSAVE state area should observe the
|
||||
offsets declared in the x86_ext_save_areas array.
|
||||
|
||||
Currently the offsets declared in the array are derived from the
|
||||
structure definition, resulting in no functional change.
|
||||
|
||||
Signed-off-by: David Edmondson <david.edmondson@oracle.com>
|
||||
Message-Id: <20210705104632.2902400-7-david.edmondson@oracle.com>
|
||||
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
|
||||
(cherry picked from commit 3568987f78faff90829ea6c885bbdd5b083dc86c)
|
||||
---
|
||||
target/i386/xsave_helper.c | 262 ++++++++++++++++++++++++++++---------
|
||||
1 file changed, 200 insertions(+), 62 deletions(-)
|
||||
|
||||
diff --git a/target/i386/xsave_helper.c b/target/i386/xsave_helper.c
|
||||
index b16c6ac0fe..ac61a96344 100644
|
||||
--- a/target/i386/xsave_helper.c
|
||||
+++ b/target/i386/xsave_helper.c
|
||||
@@ -9,13 +9,20 @@
|
||||
void x86_cpu_xsave_all_areas(X86CPU *cpu, void *buf, uint32_t buflen)
|
||||
{
|
||||
CPUX86State *env = &cpu->env;
|
||||
- X86XSaveArea *xsave = buf;
|
||||
- uint16_t cwd, swd, twd;
|
||||
+ const ExtSaveArea *e, *f;
|
||||
int i;
|
||||
|
||||
- assert(buflen >= sizeof(*xsave));
|
||||
+ X86LegacyXSaveArea *legacy;
|
||||
+ X86XSaveHeader *header;
|
||||
+ uint16_t cwd, swd, twd;
|
||||
+
|
||||
+ memset(buf, 0, buflen);
|
||||
+
|
||||
+ e = &x86_ext_save_areas[XSTATE_FP_BIT];
|
||||
+
|
||||
+ legacy = buf + e->offset;
|
||||
+ header = buf + e->offset + sizeof(*legacy);
|
||||
|
||||
- memset(xsave, 0, buflen);
|
||||
twd = 0;
|
||||
swd = env->fpus & ~(7 << 11);
|
||||
swd |= (env->fpstt & 7) << 11;
|
||||
@@ -23,91 +30,222 @@ void x86_cpu_xsave_all_areas(X86CPU *cpu, void *buf, uint32_t buflen)
|
||||
for (i = 0; i < 8; ++i) {
|
||||
twd |= (!env->fptags[i]) << i;
|
||||
}
|
||||
- xsave->legacy.fcw = cwd;
|
||||
- xsave->legacy.fsw = swd;
|
||||
- xsave->legacy.ftw = twd;
|
||||
- xsave->legacy.fpop = env->fpop;
|
||||
- xsave->legacy.fpip = env->fpip;
|
||||
- xsave->legacy.fpdp = env->fpdp;
|
||||
- memcpy(&xsave->legacy.fpregs, env->fpregs,
|
||||
- sizeof env->fpregs);
|
||||
- xsave->legacy.mxcsr = env->mxcsr;
|
||||
- xsave->header.xstate_bv = env->xstate_bv;
|
||||
- memcpy(&xsave->bndreg_state.bnd_regs, env->bnd_regs,
|
||||
- sizeof env->bnd_regs);
|
||||
- xsave->bndcsr_state.bndcsr = env->bndcs_regs;
|
||||
- memcpy(&xsave->opmask_state.opmask_regs, env->opmask_regs,
|
||||
- sizeof env->opmask_regs);
|
||||
+ legacy->fcw = cwd;
|
||||
+ legacy->fsw = swd;
|
||||
+ legacy->ftw = twd;
|
||||
+ legacy->fpop = env->fpop;
|
||||
+ legacy->fpip = env->fpip;
|
||||
+ legacy->fpdp = env->fpdp;
|
||||
+ memcpy(&legacy->fpregs, env->fpregs,
|
||||
+ sizeof(env->fpregs));
|
||||
+ legacy->mxcsr = env->mxcsr;
|
||||
|
||||
for (i = 0; i < CPU_NB_REGS; i++) {
|
||||
- uint8_t *xmm = xsave->legacy.xmm_regs[i];
|
||||
- uint8_t *ymmh = xsave->avx_state.ymmh[i];
|
||||
- uint8_t *zmmh = xsave->zmm_hi256_state.zmm_hi256[i];
|
||||
+ uint8_t *xmm = legacy->xmm_regs[i];
|
||||
+
|
||||
stq_p(xmm, env->xmm_regs[i].ZMM_Q(0));
|
||||
- stq_p(xmm+8, env->xmm_regs[i].ZMM_Q(1));
|
||||
- stq_p(ymmh, env->xmm_regs[i].ZMM_Q(2));
|
||||
- stq_p(ymmh+8, env->xmm_regs[i].ZMM_Q(3));
|
||||
- stq_p(zmmh, env->xmm_regs[i].ZMM_Q(4));
|
||||
- stq_p(zmmh+8, env->xmm_regs[i].ZMM_Q(5));
|
||||
- stq_p(zmmh+16, env->xmm_regs[i].ZMM_Q(6));
|
||||
- stq_p(zmmh+24, env->xmm_regs[i].ZMM_Q(7));
|
||||
+ stq_p(xmm + 8, env->xmm_regs[i].ZMM_Q(1));
|
||||
+ }
|
||||
+
|
||||
+ header->xstate_bv = env->xstate_bv;
|
||||
+
|
||||
+ e = &x86_ext_save_areas[XSTATE_YMM_BIT];
|
||||
+ if (e->size && e->offset) {
|
||||
+ XSaveAVX *avx;
|
||||
+
|
||||
+ avx = buf + e->offset;
|
||||
+
|
||||
+ for (i = 0; i < CPU_NB_REGS; i++) {
|
||||
+ uint8_t *ymmh = avx->ymmh[i];
|
||||
+
|
||||
+ stq_p(ymmh, env->xmm_regs[i].ZMM_Q(2));
|
||||
+ stq_p(ymmh + 8, env->xmm_regs[i].ZMM_Q(3));
|
||||
+ }
|
||||
+ }
|
||||
+
|
||||
+ e = &x86_ext_save_areas[XSTATE_BNDREGS_BIT];
|
||||
+ if (e->size && e->offset) {
|
||||
+ XSaveBNDREG *bndreg;
|
||||
+ XSaveBNDCSR *bndcsr;
|
||||
+
|
||||
+ f = &x86_ext_save_areas[XSTATE_BNDCSR_BIT];
|
||||
+ assert(f->size);
|
||||
+ assert(f->offset);
|
||||
+
|
||||
+ bndreg = buf + e->offset;
|
||||
+ bndcsr = buf + f->offset;
|
||||
+
|
||||
+ memcpy(&bndreg->bnd_regs, env->bnd_regs,
|
||||
+ sizeof(env->bnd_regs));
|
||||
+ bndcsr->bndcsr = env->bndcs_regs;
|
||||
}
|
||||
|
||||
+ e = &x86_ext_save_areas[XSTATE_OPMASK_BIT];
|
||||
+ if (e->size && e->offset) {
|
||||
+ XSaveOpmask *opmask;
|
||||
+ XSaveZMM_Hi256 *zmm_hi256;
|
||||
+#ifdef TARGET_X86_64
|
||||
+ XSaveHi16_ZMM *hi16_zmm;
|
||||
+#endif
|
||||
+
|
||||
+ f = &x86_ext_save_areas[XSTATE_ZMM_Hi256_BIT];
|
||||
+ assert(f->size);
|
||||
+ assert(f->offset);
|
||||
+
|
||||
+ opmask = buf + e->offset;
|
||||
+ zmm_hi256 = buf + f->offset;
|
||||
+
|
||||
+ memcpy(&opmask->opmask_regs, env->opmask_regs,
|
||||
+ sizeof(env->opmask_regs));
|
||||
+
|
||||
+ for (i = 0; i < CPU_NB_REGS; i++) {
|
||||
+ uint8_t *zmmh = zmm_hi256->zmm_hi256[i];
|
||||
+
|
||||
+ stq_p(zmmh, env->xmm_regs[i].ZMM_Q(4));
|
||||
+ stq_p(zmmh + 8, env->xmm_regs[i].ZMM_Q(5));
|
||||
+ stq_p(zmmh + 16, env->xmm_regs[i].ZMM_Q(6));
|
||||
+ stq_p(zmmh + 24, env->xmm_regs[i].ZMM_Q(7));
|
||||
+ }
|
||||
+
|
||||
#ifdef TARGET_X86_64
|
||||
- memcpy(&xsave->hi16_zmm_state.hi16_zmm, &env->xmm_regs[16],
|
||||
- 16 * sizeof env->xmm_regs[16]);
|
||||
- memcpy(&xsave->pkru_state, &env->pkru, sizeof env->pkru);
|
||||
+ f = &x86_ext_save_areas[XSTATE_Hi16_ZMM_BIT];
|
||||
+ assert(f->size);
|
||||
+ assert(f->offset);
|
||||
+
|
||||
+ hi16_zmm = buf + f->offset;
|
||||
+
|
||||
+ memcpy(&hi16_zmm->hi16_zmm, &env->xmm_regs[16],
|
||||
+ 16 * sizeof(env->xmm_regs[16]));
|
||||
+#endif
|
||||
+ }
|
||||
+
|
||||
+#ifdef TARGET_X86_64
|
||||
+ e = &x86_ext_save_areas[XSTATE_PKRU_BIT];
|
||||
+ if (e->size && e->offset) {
|
||||
+ XSavePKRU *pkru = buf + e->offset;
|
||||
+
|
||||
+ memcpy(pkru, &env->pkru, sizeof(env->pkru));
|
||||
+ }
|
||||
#endif
|
||||
}
|
||||
|
||||
void x86_cpu_xrstor_all_areas(X86CPU *cpu, const void *buf, uint32_t buflen)
|
||||
{
|
||||
CPUX86State *env = &cpu->env;
|
||||
- const X86XSaveArea *xsave = buf;
|
||||
+ const ExtSaveArea *e, *f, *g;
|
||||
int i;
|
||||
+
|
||||
+ const X86LegacyXSaveArea *legacy;
|
||||
+ const X86XSaveHeader *header;
|
||||
uint16_t cwd, swd, twd;
|
||||
|
||||
- assert(buflen >= sizeof(*xsave));
|
||||
+ e = &x86_ext_save_areas[XSTATE_FP_BIT];
|
||||
|
||||
- cwd = xsave->legacy.fcw;
|
||||
- swd = xsave->legacy.fsw;
|
||||
- twd = xsave->legacy.ftw;
|
||||
- env->fpop = xsave->legacy.fpop;
|
||||
+ legacy = buf + e->offset;
|
||||
+ header = buf + e->offset + sizeof(*legacy);
|
||||
+
|
||||
+ cwd = legacy->fcw;
|
||||
+ swd = legacy->fsw;
|
||||
+ twd = legacy->ftw;
|
||||
+ env->fpop = legacy->fpop;
|
||||
env->fpstt = (swd >> 11) & 7;
|
||||
env->fpus = swd;
|
||||
env->fpuc = cwd;
|
||||
for (i = 0; i < 8; ++i) {
|
||||
env->fptags[i] = !((twd >> i) & 1);
|
||||
}
|
||||
- env->fpip = xsave->legacy.fpip;
|
||||
- env->fpdp = xsave->legacy.fpdp;
|
||||
- env->mxcsr = xsave->legacy.mxcsr;
|
||||
- memcpy(env->fpregs, &xsave->legacy.fpregs,
|
||||
- sizeof env->fpregs);
|
||||
- env->xstate_bv = xsave->header.xstate_bv;
|
||||
- memcpy(env->bnd_regs, &xsave->bndreg_state.bnd_regs,
|
||||
- sizeof env->bnd_regs);
|
||||
- env->bndcs_regs = xsave->bndcsr_state.bndcsr;
|
||||
- memcpy(env->opmask_regs, &xsave->opmask_state.opmask_regs,
|
||||
- sizeof env->opmask_regs);
|
||||
+ env->fpip = legacy->fpip;
|
||||
+ env->fpdp = legacy->fpdp;
|
||||
+ env->mxcsr = legacy->mxcsr;
|
||||
+ memcpy(env->fpregs, &legacy->fpregs,
|
||||
+ sizeof(env->fpregs));
|
||||
|
||||
for (i = 0; i < CPU_NB_REGS; i++) {
|
||||
- const uint8_t *xmm = xsave->legacy.xmm_regs[i];
|
||||
- const uint8_t *ymmh = xsave->avx_state.ymmh[i];
|
||||
- const uint8_t *zmmh = xsave->zmm_hi256_state.zmm_hi256[i];
|
||||
+ const uint8_t *xmm = legacy->xmm_regs[i];
|
||||
+
|
||||
env->xmm_regs[i].ZMM_Q(0) = ldq_p(xmm);
|
||||
- env->xmm_regs[i].ZMM_Q(1) = ldq_p(xmm+8);
|
||||
- env->xmm_regs[i].ZMM_Q(2) = ldq_p(ymmh);
|
||||
- env->xmm_regs[i].ZMM_Q(3) = ldq_p(ymmh+8);
|
||||
- env->xmm_regs[i].ZMM_Q(4) = ldq_p(zmmh);
|
||||
- env->xmm_regs[i].ZMM_Q(5) = ldq_p(zmmh+8);
|
||||
- env->xmm_regs[i].ZMM_Q(6) = ldq_p(zmmh+16);
|
||||
- env->xmm_regs[i].ZMM_Q(7) = ldq_p(zmmh+24);
|
||||
+ env->xmm_regs[i].ZMM_Q(1) = ldq_p(xmm + 8);
|
||||
+ }
|
||||
+
|
||||
+ env->xstate_bv = header->xstate_bv;
|
||||
+
|
||||
+ e = &x86_ext_save_areas[XSTATE_YMM_BIT];
|
||||
+ if (e->size && e->offset) {
|
||||
+ const XSaveAVX *avx;
|
||||
+
|
||||
+ avx = buf + e->offset;
|
||||
+ for (i = 0; i < CPU_NB_REGS; i++) {
|
||||
+ const uint8_t *ymmh = avx->ymmh[i];
|
||||
+
|
||||
+ env->xmm_regs[i].ZMM_Q(2) = ldq_p(ymmh);
|
||||
+ env->xmm_regs[i].ZMM_Q(3) = ldq_p(ymmh + 8);
|
||||
+ }
|
||||
+ }
|
||||
+
|
||||
+ e = &x86_ext_save_areas[XSTATE_BNDREGS_BIT];
|
||||
+ if (e->size && e->offset) {
|
||||
+ const XSaveBNDREG *bndreg;
|
||||
+ const XSaveBNDCSR *bndcsr;
|
||||
+
|
||||
+ f = &x86_ext_save_areas[XSTATE_BNDCSR_BIT];
|
||||
+ assert(f->size);
|
||||
+ assert(f->offset);
|
||||
+
|
||||
+ bndreg = buf + e->offset;
|
||||
+ bndcsr = buf + f->offset;
|
||||
+
|
||||
+ memcpy(env->bnd_regs, &bndreg->bnd_regs,
|
||||
+ sizeof(env->bnd_regs));
|
||||
+ env->bndcs_regs = bndcsr->bndcsr;
|
||||
}
|
||||
|
||||
+ e = &x86_ext_save_areas[XSTATE_OPMASK_BIT];
|
||||
+ if (e->size && e->offset) {
|
||||
+ const XSaveOpmask *opmask;
|
||||
+ const XSaveZMM_Hi256 *zmm_hi256;
|
||||
#ifdef TARGET_X86_64
|
||||
- memcpy(&env->xmm_regs[16], &xsave->hi16_zmm_state.hi16_zmm,
|
||||
- 16 * sizeof env->xmm_regs[16]);
|
||||
- memcpy(&env->pkru, &xsave->pkru_state, sizeof env->pkru);
|
||||
+ const XSaveHi16_ZMM *hi16_zmm;
|
||||
+#endif
|
||||
+
|
||||
+ f = &x86_ext_save_areas[XSTATE_ZMM_Hi256_BIT];
|
||||
+ assert(f->size);
|
||||
+ assert(f->offset);
|
||||
+
|
||||
+ g = &x86_ext_save_areas[XSTATE_Hi16_ZMM_BIT];
|
||||
+ assert(g->size);
|
||||
+ assert(g->offset);
|
||||
+
|
||||
+ opmask = buf + e->offset;
|
||||
+ zmm_hi256 = buf + f->offset;
|
||||
+#ifdef TARGET_X86_64
|
||||
+ hi16_zmm = buf + g->offset;
|
||||
+#endif
|
||||
+
|
||||
+ memcpy(env->opmask_regs, &opmask->opmask_regs,
|
||||
+ sizeof(env->opmask_regs));
|
||||
+
|
||||
+ for (i = 0; i < CPU_NB_REGS; i++) {
|
||||
+ const uint8_t *zmmh = zmm_hi256->zmm_hi256[i];
|
||||
+
|
||||
+ env->xmm_regs[i].ZMM_Q(4) = ldq_p(zmmh);
|
||||
+ env->xmm_regs[i].ZMM_Q(5) = ldq_p(zmmh + 8);
|
||||
+ env->xmm_regs[i].ZMM_Q(6) = ldq_p(zmmh + 16);
|
||||
+ env->xmm_regs[i].ZMM_Q(7) = ldq_p(zmmh + 24);
|
||||
+ }
|
||||
+
|
||||
+#ifdef TARGET_X86_64
|
||||
+ memcpy(&env->xmm_regs[16], &hi16_zmm->hi16_zmm,
|
||||
+ 16 * sizeof(env->xmm_regs[16]));
|
||||
+#endif
|
||||
+ }
|
||||
+
|
||||
+#ifdef TARGET_X86_64
|
||||
+ e = &x86_ext_save_areas[XSTATE_PKRU_BIT];
|
||||
+ if (e->size && e->offset) {
|
||||
+ const XSavePKRU *pkru;
|
||||
+
|
||||
+ pkru = buf + e->offset;
|
||||
+ memcpy(&env->pkru, pkru, sizeof(env->pkru));
|
||||
+ }
|
||||
#endif
|
||||
}
|
||||
--
|
||||
2.27.0
|
||||
|
@ -0,0 +1,213 @@
|
||||
From 8a8d1ef278933a3c3a11f8d8c2985e0af71741b4 Mon Sep 17 00:00:00 2001
|
||||
From: David Edmondson <david.edmondson@oracle.com>
|
||||
Date: Mon, 5 Jul 2021 11:46:28 +0100
|
||||
Subject: [PATCH 4/7] target/i386: Pass buffer and length to XSAVE helper
|
||||
|
||||
RH-Author: Dr. David Alan Gilbert <dgilbert@redhat.com>
|
||||
RH-MergeRequest: 113: non-av 8.5z: Fix XSAVE on newer CPUs
|
||||
RH-Commit: [4/7] 77e093a6ed3928b9191b37faad3cb50b4bdd65e3
|
||||
RH-Bugzilla: 2065239
|
||||
RH-Acked-by: Jon Maloy <jmaloy@redhat.com>
|
||||
RH-Acked-by: Paolo Bonzini <pbonzini@redhat.com>
|
||||
RH-Acked-by: Bandan Das <None>
|
||||
|
||||
In preparation for removing assumptions about XSAVE area offsets, pass
|
||||
a buffer pointer and buffer length to the XSAVE helper functions.
|
||||
|
||||
Signed-off-by: David Edmondson <david.edmondson@oracle.com>
|
||||
Message-Id: <20210705104632.2902400-5-david.edmondson@oracle.com>
|
||||
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
|
||||
(cherry picked from commit c0198c5f87b6db25712672292e01ab710d6ef631)
|
||||
dgilbert: Manual merge in target/i386/hvf/x86hvf.c
|
||||
---
|
||||
target/i386/cpu.h | 5 +++--
|
||||
target/i386/hvf/hvf.c | 3 ++-
|
||||
target/i386/hvf/x86hvf.c | 19 ++++++++-----------
|
||||
target/i386/kvm.c | 13 +++++++------
|
||||
target/i386/xsave_helper.c | 17 +++++++++--------
|
||||
5 files changed, 29 insertions(+), 28 deletions(-)
|
||||
|
||||
diff --git a/target/i386/cpu.h b/target/i386/cpu.h
|
||||
index d586b5508d..8daa83a6a3 100644
|
||||
--- a/target/i386/cpu.h
|
||||
+++ b/target/i386/cpu.h
|
||||
@@ -1626,6 +1626,7 @@ typedef struct CPUX86State {
|
||||
int64_t user_tsc_khz; /* for sanity check only */
|
||||
#if defined(CONFIG_KVM) || defined(CONFIG_HVF)
|
||||
void *xsave_buf;
|
||||
+ uint32_t xsave_buf_len;
|
||||
#endif
|
||||
#if defined(CONFIG_KVM)
|
||||
struct kvm_nested_state *nested_state;
|
||||
@@ -2254,8 +2255,8 @@ void x86_cpu_dump_local_apic_state(CPUState *cs, int flags);
|
||||
/* cpu.c */
|
||||
bool cpu_is_bsp(X86CPU *cpu);
|
||||
|
||||
-void x86_cpu_xrstor_all_areas(X86CPU *cpu, const X86XSaveArea *buf);
|
||||
-void x86_cpu_xsave_all_areas(X86CPU *cpu, X86XSaveArea *buf);
|
||||
+void x86_cpu_xrstor_all_areas(X86CPU *cpu, const void *buf, uint32_t buflen);
|
||||
+void x86_cpu_xsave_all_areas(X86CPU *cpu, void *buf, uint32_t buflen);
|
||||
void x86_update_hflags(CPUX86State* env);
|
||||
|
||||
static inline bool hyperv_feat_enabled(X86CPU *cpu, int feat)
|
||||
diff --git a/target/i386/hvf/hvf.c b/target/i386/hvf/hvf.c
|
||||
index d72543dc31..bbede52fb7 100644
|
||||
--- a/target/i386/hvf/hvf.c
|
||||
+++ b/target/i386/hvf/hvf.c
|
||||
@@ -609,7 +609,8 @@ int hvf_init_vcpu(CPUState *cpu)
|
||||
wvmcs(cpu->hvf_fd, VMCS_TPR_THRESHOLD, 0);
|
||||
|
||||
x86cpu = X86_CPU(cpu);
|
||||
- x86cpu->env.xsave_buf = qemu_memalign(4096, 4096);
|
||||
+ x86cpu->env.xsave_buf_len = 4096;
|
||||
+ x86cpu->env.xsave_buf = qemu_memalign(4096, x86cpu->env.xsave_buf_len);
|
||||
|
||||
hv_vcpu_enable_native_msr(cpu->hvf_fd, MSR_STAR, 1);
|
||||
hv_vcpu_enable_native_msr(cpu->hvf_fd, MSR_LSTAR, 1);
|
||||
diff --git a/target/i386/hvf/x86hvf.c b/target/i386/hvf/x86hvf.c
|
||||
index edefe5319a..7be0582f28 100644
|
||||
--- a/target/i386/hvf/x86hvf.c
|
||||
+++ b/target/i386/hvf/x86hvf.c
|
||||
@@ -72,14 +72,12 @@ void hvf_get_segment(SegmentCache *qseg, struct vmx_segment *vmx_seg)
|
||||
|
||||
void hvf_put_xsave(CPUState *cpu_state)
|
||||
{
|
||||
+ void *xsave = X86_CPU(cpu_state)->env.xsave_buf;
|
||||
+ uint32_t xsave_len = X86_CPU(cpu_state)->env.xsave_buf_len;
|
||||
|
||||
- struct X86XSaveArea *xsave;
|
||||
+ x86_cpu_xsave_all_areas(X86_CPU(cpu_state), xsave, xsave_len);
|
||||
|
||||
- xsave = X86_CPU(cpu_state)->env.xsave_buf;
|
||||
-
|
||||
- x86_cpu_xsave_all_areas(X86_CPU(cpu_state), xsave);
|
||||
-
|
||||
- if (hv_vcpu_write_fpstate(cpu_state->hvf_fd, (void*)xsave, 4096)) {
|
||||
+ if (hv_vcpu_write_fpstate(cpu_state->hvf_fd, xsave, xsave_len)) {
|
||||
abort();
|
||||
}
|
||||
}
|
||||
@@ -157,15 +155,14 @@ void hvf_put_msrs(CPUState *cpu_state)
|
||||
|
||||
void hvf_get_xsave(CPUState *cpu_state)
|
||||
{
|
||||
- struct X86XSaveArea *xsave;
|
||||
-
|
||||
- xsave = X86_CPU(cpu_state)->env.xsave_buf;
|
||||
+ void *xsave = X86_CPU(cpu_state)->env.xsave_buf;
|
||||
+ uint32_t xsave_len = X86_CPU(cpu_state)->env.xsave_buf_len;
|
||||
|
||||
- if (hv_vcpu_read_fpstate(cpu_state->hvf_fd, (void*)xsave, 4096)) {
|
||||
+ if (hv_vcpu_read_fpstate(cpu_state->hvf_fd, xsave, xsave_len)) {
|
||||
abort();
|
||||
}
|
||||
|
||||
- x86_cpu_xrstor_all_areas(X86_CPU(cpu_state), xsave);
|
||||
+ x86_cpu_xrstor_all_areas(X86_CPU(cpu_state), xsave, xsave_len);
|
||||
}
|
||||
|
||||
void hvf_get_segments(CPUState *cpu_state)
|
||||
diff --git a/target/i386/kvm.c b/target/i386/kvm.c
|
||||
index 215487b17d..8167587445 100644
|
||||
--- a/target/i386/kvm.c
|
||||
+++ b/target/i386/kvm.c
|
||||
@@ -1826,8 +1826,9 @@ int kvm_arch_init_vcpu(CPUState *cs)
|
||||
}
|
||||
|
||||
if (has_xsave) {
|
||||
- env->xsave_buf = qemu_memalign(4096, sizeof(struct kvm_xsave));
|
||||
- memset(env->xsave_buf, 0, sizeof(struct kvm_xsave));
|
||||
+ env->xsave_buf_len = sizeof(struct kvm_xsave);
|
||||
+ env->xsave_buf = qemu_memalign(4096, env->xsave_buf_len);
|
||||
+ memset(env->xsave_buf, 0, env->xsave_buf_len);
|
||||
}
|
||||
|
||||
max_nested_state_len = kvm_max_nested_state_length();
|
||||
@@ -2353,12 +2354,12 @@ static int kvm_put_fpu(X86CPU *cpu)
|
||||
static int kvm_put_xsave(X86CPU *cpu)
|
||||
{
|
||||
CPUX86State *env = &cpu->env;
|
||||
- X86XSaveArea *xsave = env->xsave_buf;
|
||||
+ void *xsave = env->xsave_buf;
|
||||
|
||||
if (!has_xsave) {
|
||||
return kvm_put_fpu(cpu);
|
||||
}
|
||||
- x86_cpu_xsave_all_areas(cpu, xsave);
|
||||
+ x86_cpu_xsave_all_areas(cpu, xsave, env->xsave_buf_len);
|
||||
|
||||
return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_XSAVE, xsave);
|
||||
}
|
||||
@@ -2977,7 +2978,7 @@ static int kvm_get_fpu(X86CPU *cpu)
|
||||
static int kvm_get_xsave(X86CPU *cpu)
|
||||
{
|
||||
CPUX86State *env = &cpu->env;
|
||||
- X86XSaveArea *xsave = env->xsave_buf;
|
||||
+ void *xsave = env->xsave_buf;
|
||||
int ret;
|
||||
|
||||
if (!has_xsave) {
|
||||
@@ -2988,7 +2989,7 @@ static int kvm_get_xsave(X86CPU *cpu)
|
||||
if (ret < 0) {
|
||||
return ret;
|
||||
}
|
||||
- x86_cpu_xrstor_all_areas(cpu, xsave);
|
||||
+ x86_cpu_xrstor_all_areas(cpu, xsave, env->xsave_buf_len);
|
||||
|
||||
return 0;
|
||||
}
|
||||
diff --git a/target/i386/xsave_helper.c b/target/i386/xsave_helper.c
|
||||
index 818115e7d2..b16c6ac0fe 100644
|
||||
--- a/target/i386/xsave_helper.c
|
||||
+++ b/target/i386/xsave_helper.c
|
||||
@@ -6,14 +6,16 @@
|
||||
|
||||
#include "cpu.h"
|
||||
|
||||
-void x86_cpu_xsave_all_areas(X86CPU *cpu, X86XSaveArea *buf)
|
||||
+void x86_cpu_xsave_all_areas(X86CPU *cpu, void *buf, uint32_t buflen)
|
||||
{
|
||||
CPUX86State *env = &cpu->env;
|
||||
X86XSaveArea *xsave = buf;
|
||||
-
|
||||
uint16_t cwd, swd, twd;
|
||||
int i;
|
||||
- memset(xsave, 0, sizeof(X86XSaveArea));
|
||||
+
|
||||
+ assert(buflen >= sizeof(*xsave));
|
||||
+
|
||||
+ memset(xsave, 0, buflen);
|
||||
twd = 0;
|
||||
swd = env->fpus & ~(7 << 11);
|
||||
swd |= (env->fpstt & 7) << 11;
|
||||
@@ -56,17 +58,17 @@ void x86_cpu_xsave_all_areas(X86CPU *cpu, X86XSaveArea *buf)
|
||||
16 * sizeof env->xmm_regs[16]);
|
||||
memcpy(&xsave->pkru_state, &env->pkru, sizeof env->pkru);
|
||||
#endif
|
||||
-
|
||||
}
|
||||
|
||||
-void x86_cpu_xrstor_all_areas(X86CPU *cpu, const X86XSaveArea *buf)
|
||||
+void x86_cpu_xrstor_all_areas(X86CPU *cpu, const void *buf, uint32_t buflen)
|
||||
{
|
||||
-
|
||||
CPUX86State *env = &cpu->env;
|
||||
const X86XSaveArea *xsave = buf;
|
||||
-
|
||||
int i;
|
||||
uint16_t cwd, swd, twd;
|
||||
+
|
||||
+ assert(buflen >= sizeof(*xsave));
|
||||
+
|
||||
cwd = xsave->legacy.fcw;
|
||||
swd = xsave->legacy.fsw;
|
||||
twd = xsave->legacy.ftw;
|
||||
@@ -108,5 +110,4 @@ void x86_cpu_xrstor_all_areas(X86CPU *cpu, const X86XSaveArea *buf)
|
||||
16 * sizeof env->xmm_regs[16]);
|
||||
memcpy(&env->pkru, &xsave->pkru_state, sizeof env->pkru);
|
||||
#endif
|
||||
-
|
||||
}
|
||||
--
|
||||
2.27.0
|
||||
|
@ -0,0 +1,213 @@
|
||||
From ae580956fc0ed8feb794b6392df59ff5de673565 Mon Sep 17 00:00:00 2001
|
||||
From: David Edmondson <david.edmondson@oracle.com>
|
||||
Date: Mon, 5 Jul 2021 11:46:31 +0100
|
||||
Subject: [PATCH 7/7] target/i386: Populate x86_ext_save_areas offsets using
|
||||
cpuid where possible
|
||||
|
||||
RH-Author: Dr. David Alan Gilbert <dgilbert@redhat.com>
|
||||
RH-MergeRequest: 113: non-av 8.5z: Fix XSAVE on newer CPUs
|
||||
RH-Commit: [7/7] 145fc1dd5232673e2070423a027cad38fa4c4890
|
||||
RH-Bugzilla: 2065239
|
||||
RH-Acked-by: Jon Maloy <jmaloy@redhat.com>
|
||||
RH-Acked-by: Paolo Bonzini <pbonzini@redhat.com>
|
||||
RH-Acked-by: Bandan Das <None>
|
||||
|
||||
Rather than relying on the X86XSaveArea structure definition,
|
||||
determine the offset of XSAVE state areas using CPUID leaf 0xd where
|
||||
possible (KVM and HVF).
|
||||
|
||||
Signed-off-by: David Edmondson <david.edmondson@oracle.com>
|
||||
Message-Id: <20210705104632.2902400-8-david.edmondson@oracle.com>
|
||||
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
|
||||
(cherry picked from commit fea4500841024195ec701713e05b92ebf667f192)
|
||||
dgilbert: Hairy backport, since we've not got Claudio's cpu
|
||||
accelerator split
|
||||
---
|
||||
target/i386/cpu.c | 65 +++++++++++++++++++++++++++++++++++--------
|
||||
target/i386/cpu.h | 2 +-
|
||||
target/i386/hvf/hvf.c | 17 ++++-------
|
||||
target/i386/kvm.c | 7 +++++
|
||||
4 files changed, 66 insertions(+), 25 deletions(-)
|
||||
|
||||
diff --git a/target/i386/cpu.c b/target/i386/cpu.c
|
||||
index a030030299..c52eae1f0d 100644
|
||||
--- a/target/i386/cpu.c
|
||||
+++ b/target/i386/cpu.c
|
||||
@@ -1504,48 +1504,37 @@ static const X86RegisterInfo32 x86_reg_info_32[CPU_NB_REGS32] = {
|
||||
};
|
||||
#undef REGISTER
|
||||
|
||||
-const ExtSaveArea x86_ext_save_areas[XSAVE_STATE_AREA_COUNT] = {
|
||||
+ExtSaveArea x86_ext_save_areas[XSAVE_STATE_AREA_COUNT] = {
|
||||
[XSTATE_FP_BIT] = {
|
||||
/* x87 FP state component is always enabled if XSAVE is supported */
|
||||
.feature = FEAT_1_ECX, .bits = CPUID_EXT_XSAVE,
|
||||
- /* x87 state is in the legacy region of the XSAVE area */
|
||||
- .offset = 0,
|
||||
.size = sizeof(X86LegacyXSaveArea) + sizeof(X86XSaveHeader),
|
||||
},
|
||||
[XSTATE_SSE_BIT] = {
|
||||
/* SSE state component is always enabled if XSAVE is supported */
|
||||
.feature = FEAT_1_ECX, .bits = CPUID_EXT_XSAVE,
|
||||
- /* SSE state is in the legacy region of the XSAVE area */
|
||||
- .offset = 0,
|
||||
.size = sizeof(X86LegacyXSaveArea) + sizeof(X86XSaveHeader),
|
||||
},
|
||||
[XSTATE_YMM_BIT] =
|
||||
{ .feature = FEAT_1_ECX, .bits = CPUID_EXT_AVX,
|
||||
- .offset = offsetof(X86XSaveArea, avx_state),
|
||||
.size = sizeof(XSaveAVX) },
|
||||
[XSTATE_BNDREGS_BIT] =
|
||||
{ .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX,
|
||||
- .offset = offsetof(X86XSaveArea, bndreg_state),
|
||||
.size = sizeof(XSaveBNDREG) },
|
||||
[XSTATE_BNDCSR_BIT] =
|
||||
{ .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX,
|
||||
- .offset = offsetof(X86XSaveArea, bndcsr_state),
|
||||
.size = sizeof(XSaveBNDCSR) },
|
||||
[XSTATE_OPMASK_BIT] =
|
||||
{ .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
|
||||
- .offset = offsetof(X86XSaveArea, opmask_state),
|
||||
.size = sizeof(XSaveOpmask) },
|
||||
[XSTATE_ZMM_Hi256_BIT] =
|
||||
{ .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
|
||||
- .offset = offsetof(X86XSaveArea, zmm_hi256_state),
|
||||
.size = sizeof(XSaveZMM_Hi256) },
|
||||
[XSTATE_Hi16_ZMM_BIT] =
|
||||
{ .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
|
||||
- .offset = offsetof(X86XSaveArea, hi16_zmm_state),
|
||||
.size = sizeof(XSaveHi16_ZMM) },
|
||||
[XSTATE_PKRU_BIT] =
|
||||
{ .feature = FEAT_7_0_ECX, .bits = CPUID_7_0_ECX_PKU,
|
||||
- .offset = offsetof(X86XSaveArea, pkru_state),
|
||||
.size = sizeof(XSavePKRU) },
|
||||
};
|
||||
|
||||
@@ -5237,6 +5226,52 @@ static void x86_cpu_apply_version_props(X86CPU *cpu, X86CPUModel *model)
|
||||
assert(vdef->version == version);
|
||||
}
|
||||
|
||||
+static void kvm_cpu_xsave_init(void)
|
||||
+{
|
||||
+ static bool first = true;
|
||||
+ KVMState *s = kvm_state;
|
||||
+ int i;
|
||||
+
|
||||
+ if (!first) {
|
||||
+ return;
|
||||
+ }
|
||||
+ first = false;
|
||||
+
|
||||
+ /* x87 and SSE states are in the legacy region of the XSAVE area. */
|
||||
+ x86_ext_save_areas[XSTATE_FP_BIT].offset = 0;
|
||||
+ x86_ext_save_areas[XSTATE_SSE_BIT].offset = 0;
|
||||
+
|
||||
+ for (i = XSTATE_SSE_BIT + 1; i < XSAVE_STATE_AREA_COUNT; i++) {
|
||||
+ ExtSaveArea *esa = &x86_ext_save_areas[i];
|
||||
+
|
||||
+ if (esa->size) {
|
||||
+ int sz = kvm_arch_get_supported_cpuid(s, 0xd, i, R_EAX);
|
||||
+ if (sz != 0) {
|
||||
+ assert(esa->size == sz);
|
||||
+ esa->offset = kvm_arch_get_supported_cpuid(s, 0xd, i, R_EBX);
|
||||
+ }
|
||||
+ }
|
||||
+ }
|
||||
+}
|
||||
+
|
||||
+static void tcg_cpu_xsave_init(void)
|
||||
+{
|
||||
+#define XO(bit, field) \
|
||||
+ x86_ext_save_areas[bit].offset = offsetof(X86XSaveArea, field);
|
||||
+
|
||||
+ XO(XSTATE_FP_BIT, legacy);
|
||||
+ XO(XSTATE_SSE_BIT, legacy);
|
||||
+ XO(XSTATE_YMM_BIT, avx_state);
|
||||
+ XO(XSTATE_BNDREGS_BIT, bndreg_state);
|
||||
+ XO(XSTATE_BNDCSR_BIT, bndcsr_state);
|
||||
+ XO(XSTATE_OPMASK_BIT, opmask_state);
|
||||
+ XO(XSTATE_ZMM_Hi256_BIT, zmm_hi256_state);
|
||||
+ XO(XSTATE_Hi16_ZMM_BIT, hi16_zmm_state);
|
||||
+ XO(XSTATE_PKRU_BIT, pkru_state);
|
||||
+
|
||||
+#undef XO
|
||||
+}
|
||||
+
|
||||
/* Load data from X86CPUDefinition into a X86CPU object
|
||||
*/
|
||||
static void x86_cpu_load_model(X86CPU *cpu, X86CPUModel *model, Error **errp)
|
||||
@@ -7147,6 +7182,12 @@ static void x86_cpu_initfn(Object *obj)
|
||||
if (xcc->model) {
|
||||
x86_cpu_load_model(cpu, xcc->model, &error_abort);
|
||||
}
|
||||
+
|
||||
+ if (kvm_enabled()) {
|
||||
+ kvm_cpu_xsave_init();
|
||||
+ } else if (tcg_enabled()) {
|
||||
+ tcg_cpu_xsave_init();
|
||||
+ }
|
||||
}
|
||||
|
||||
static int64_t x86_cpu_get_arch_id(CPUState *cs)
|
||||
diff --git a/target/i386/cpu.h b/target/i386/cpu.h
|
||||
index cff2914203..53895a97dd 100644
|
||||
--- a/target/i386/cpu.h
|
||||
+++ b/target/i386/cpu.h
|
||||
@@ -1344,7 +1344,7 @@ typedef struct ExtSaveArea {
|
||||
|
||||
#define XSAVE_STATE_AREA_COUNT (XSTATE_PKRU_BIT + 1)
|
||||
|
||||
-extern const ExtSaveArea x86_ext_save_areas[XSAVE_STATE_AREA_COUNT];
|
||||
+extern ExtSaveArea x86_ext_save_areas[XSAVE_STATE_AREA_COUNT];
|
||||
|
||||
typedef enum TPRAccess {
|
||||
TPR_ACCESS_READ,
|
||||
diff --git a/target/i386/hvf/hvf.c b/target/i386/hvf/hvf.c
|
||||
index bbede52fb7..de29137bec 100644
|
||||
--- a/target/i386/hvf/hvf.c
|
||||
+++ b/target/i386/hvf/hvf.c
|
||||
@@ -612,18 +612,11 @@ int hvf_init_vcpu(CPUState *cpu)
|
||||
x86cpu->env.xsave_buf_len = 4096;
|
||||
x86cpu->env.xsave_buf = qemu_memalign(4096, x86cpu->env.xsave_buf_len);
|
||||
|
||||
- hv_vcpu_enable_native_msr(cpu->hvf_fd, MSR_STAR, 1);
|
||||
- hv_vcpu_enable_native_msr(cpu->hvf_fd, MSR_LSTAR, 1);
|
||||
- hv_vcpu_enable_native_msr(cpu->hvf_fd, MSR_CSTAR, 1);
|
||||
- hv_vcpu_enable_native_msr(cpu->hvf_fd, MSR_FMASK, 1);
|
||||
- hv_vcpu_enable_native_msr(cpu->hvf_fd, MSR_FSBASE, 1);
|
||||
- hv_vcpu_enable_native_msr(cpu->hvf_fd, MSR_GSBASE, 1);
|
||||
- hv_vcpu_enable_native_msr(cpu->hvf_fd, MSR_KERNELGSBASE, 1);
|
||||
- hv_vcpu_enable_native_msr(cpu->hvf_fd, MSR_TSC_AUX, 1);
|
||||
- hv_vcpu_enable_native_msr(cpu->hvf_fd, MSR_IA32_TSC, 1);
|
||||
- hv_vcpu_enable_native_msr(cpu->hvf_fd, MSR_IA32_SYSENTER_CS, 1);
|
||||
- hv_vcpu_enable_native_msr(cpu->hvf_fd, MSR_IA32_SYSENTER_EIP, 1);
|
||||
- hv_vcpu_enable_native_msr(cpu->hvf_fd, MSR_IA32_SYSENTER_ESP, 1);
|
||||
+ /*
|
||||
+ * The allocated storage must be large enough for all of the
|
||||
+ * possible XSAVE state components.
|
||||
+ */
|
||||
+ assert(hvf_get_supported_cpuid(0xd, 0, R_ECX) <= x86cpu->env.xsave_buf_len);
|
||||
|
||||
return 0;
|
||||
}
|
||||
diff --git a/target/i386/kvm.c b/target/i386/kvm.c
|
||||
index 8167587445..548c5e94bb 100644
|
||||
--- a/target/i386/kvm.c
|
||||
+++ b/target/i386/kvm.c
|
||||
@@ -1829,6 +1829,13 @@ int kvm_arch_init_vcpu(CPUState *cs)
|
||||
env->xsave_buf_len = sizeof(struct kvm_xsave);
|
||||
env->xsave_buf = qemu_memalign(4096, env->xsave_buf_len);
|
||||
memset(env->xsave_buf, 0, env->xsave_buf_len);
|
||||
+
|
||||
+ /*
|
||||
+ * The allocated storage must be large enough for all of the
|
||||
+ * possible XSAVE state components.
|
||||
+ */
|
||||
+ assert(kvm_arch_get_supported_cpuid(kvm_state, 0xd, 0, R_ECX)
|
||||
+ <= env->xsave_buf_len);
|
||||
}
|
||||
|
||||
max_nested_state_len = kvm_max_nested_state_length();
|
||||
--
|
||||
2.27.0
|
||||
|
@ -67,7 +67,7 @@ Obsoletes: %1-rhev <= %{epoch}:%{version}-%{release}
|
||||
Summary: QEMU is a machine emulator and virtualizer
|
||||
Name: qemu-kvm
|
||||
Version: 4.2.0
|
||||
Release: 59%{?dist}.2
|
||||
Release: 60%{?dist}.2
|
||||
# Epoch because we pushed a qemu-1.0 package. AIUI this can't ever be dropped
|
||||
Epoch: 15
|
||||
License: GPLv2 and GPLv2+ and CC-BY
|
||||
@ -1254,6 +1254,20 @@ Patch545: kvm-hw-scsi-scsi-disk-MODE_PAGE_ALLS-not-allowed-in-MODE.patch
|
||||
Patch546: kvm-e1000-fix-tx-re-entrancy-problem.patch
|
||||
# For bz#2048627 - CVE-2022-0358 virt:rhel/qemu-kvm: QEMU: virtiofsd: potential privilege escalation via CVE-2018-13405 [rhel-8.5.0.z]
|
||||
Patch547: kvm-virtiofsd-Drop-membership-of-all-supplementary-group.patch
|
||||
# For bz#2065239 - KVM Fedora 35 guest x86 programs randomly crash in signal handler [rhel-8.5.0.z]
|
||||
Patch548: kvm-target-i386-Declare-constants-for-XSAVE-offsets.patch
|
||||
# For bz#2065239 - KVM Fedora 35 guest x86 programs randomly crash in signal handler [rhel-8.5.0.z]
|
||||
Patch549: kvm-target-i386-Consolidate-the-X86XSaveArea-offset-chec.patch
|
||||
# For bz#2065239 - KVM Fedora 35 guest x86 programs randomly crash in signal handler [rhel-8.5.0.z]
|
||||
Patch550: kvm-target-i386-Clarify-the-padding-requirements-of-X86X.patch
|
||||
# For bz#2065239 - KVM Fedora 35 guest x86 programs randomly crash in signal handler [rhel-8.5.0.z]
|
||||
Patch551: kvm-target-i386-Pass-buffer-and-length-to-XSAVE-helper.patch
|
||||
# For bz#2065239 - KVM Fedora 35 guest x86 programs randomly crash in signal handler [rhel-8.5.0.z]
|
||||
Patch552: kvm-target-i386-Make-x86_ext_save_areas-visible-outside-.patch
|
||||
# For bz#2065239 - KVM Fedora 35 guest x86 programs randomly crash in signal handler [rhel-8.5.0.z]
|
||||
Patch553: kvm-target-i386-Observe-XSAVE-state-area-offsets.patch
|
||||
# For bz#2065239 - KVM Fedora 35 guest x86 programs randomly crash in signal handler [rhel-8.5.0.z]
|
||||
Patch554: kvm-target-i386-Populate-x86_ext_save_areas-offsets-usin.patch
|
||||
|
||||
BuildRequires: wget
|
||||
BuildRequires: rpm-build
|
||||
@ -2202,6 +2216,17 @@ useradd -r -u 107 -g qemu -G kvm -d / -s /sbin/nologin \
|
||||
|
||||
|
||||
%changelog
|
||||
* Tue Mar 22 2022 Jon Maloy <jmaloy@redhat.com> - 4.2.0-60.el8_5.2
|
||||
- kvm-target-i386-Declare-constants-for-XSAVE-offsets.patch [bz#2065239]
|
||||
- kvm-target-i386-Consolidate-the-X86XSaveArea-offset-chec.patch [bz#2065239]
|
||||
- kvm-target-i386-Clarify-the-padding-requirements-of-X86X.patch [bz#2065239]
|
||||
- kvm-target-i386-Pass-buffer-and-length-to-XSAVE-helper.patch [bz#2065239]
|
||||
- kvm-target-i386-Make-x86_ext_save_areas-visible-outside-.patch [bz#2065239]
|
||||
- kvm-target-i386-Observe-XSAVE-state-area-offsets.patch [bz#2065239]
|
||||
- kvm-target-i386-Populate-x86_ext_save_areas-offsets-usin.patch [bz#2065239]
|
||||
- Resolves: bz#2065239
|
||||
(KVM Fedora 35 guest x86 programs randomly crash in signal handler [rhel-8.5.0.z])
|
||||
|
||||
* Tue Feb 08 2022 Jon Maloy <jmaloy@redhat.com> - 4.2.0-59.el8_5.2
|
||||
- kvm-virtiofsd-Drop-membership-of-all-supplementary-group.patch [bz#2048627]
|
||||
- Resolves: bz#2048627
|
||||
|
Loading…
Reference in New Issue
Block a user