550d33ded2
- kvm-qapi-machine.json-Add-cluster-id.patch [bz#2041823] - kvm-qtest-numa-test-Specify-CPU-topology-in-aarch64_numa.patch [bz#2041823] - kvm-hw-arm-virt-Consider-SMP-configuration-in-CPU-topolo.patch [bz#2041823] - kvm-qtest-numa-test-Correct-CPU-and-NUMA-association-in-.patch [bz#2041823] - kvm-hw-arm-virt-Fix-CPU-s-default-NUMA-node-ID.patch [bz#2041823] - kvm-hw-acpi-aml-build-Use-existing-CPU-topology-to-build.patch [bz#2041823] - kvm-coroutine-Rename-qemu_coroutine_inc-dec_pool_size.patch [bz#2079938] - kvm-coroutine-Revert-to-constant-batch-size.patch [bz#2079938] - kvm-virtio-scsi-fix-ctrl-and-event-handler-functions-in-.patch [bz#2079347] - kvm-virtio-scsi-don-t-waste-CPU-polling-the-event-virtqu.patch [bz#2079347] - kvm-virtio-scsi-clean-up-virtio_scsi_handle_event_vq.patch [bz#2079347] - kvm-virtio-scsi-clean-up-virtio_scsi_handle_ctrl_vq.patch [bz#2079347] - kvm-virtio-scsi-clean-up-virtio_scsi_handle_cmd_vq.patch [bz#2079347] - kvm-virtio-scsi-move-request-related-items-from-.h-to-.c.patch [bz#2079347] - kvm-Revert-virtio-scsi-Reject-scsi-cd-if-data-plane-enab.patch [bz#1995710] - kvm-migration-Fix-operator-type.patch [bz#2064530] - Resolves: bz#2041823 ([aarch64][numa] When there are at least 6 Numa nodes serial log shows 'arch topology borken') - Resolves: bz#2079938 (qemu coredump when boot with multi disks (qemu) failed to set up stack guard page: Cannot allocate memory) - Resolves: bz#2079347 (Guest boot blocked when scsi disks using same iothread and 100% CPU consumption) - Resolves: bz#1995710 (RFE: Allow virtio-scsi CD-ROM media change with IOThreads) - Resolves: bz#2064530 (Rebuild qemu-kvm with clang-14)
101 lines
3.9 KiB
Diff
101 lines
3.9 KiB
Diff
From a039ed652e6d2f5edcef9d5d1d3baec17ce7f929 Mon Sep 17 00:00:00 2001
|
|
From: Gavin Shan <gshan@redhat.com>
|
|
Date: Wed, 11 May 2022 18:01:35 +0800
|
|
Subject: [PATCH 04/16] qtest/numa-test: Correct CPU and NUMA association in
|
|
aarch64_numa_cpu()
|
|
|
|
RH-Author: Gavin Shan <gshan@redhat.com>
|
|
RH-MergeRequest: 86: hw/arm/virt: Fix the default CPU topology
|
|
RH-Commit: [4/6] 64e9908a179eb4fb586d662f70f275a81808e50c (gwshan/qemu-rhel-9)
|
|
RH-Bugzilla: 2041823
|
|
RH-Acked-by: Eric Auger <eric.auger@redhat.com>
|
|
RH-Acked-by: Cornelia Huck <cohuck@redhat.com>
|
|
RH-Acked-by: Andrew Jones <drjones@redhat.com>
|
|
|
|
Bugzilla: https://bugzilla.redhat.com/show_bug.cgi?id=2041823
|
|
|
|
In aarch64_numa_cpu(), the CPU and NUMA association is something
|
|
like below. Two threads in the same core/cluster/socket are
|
|
associated with two individual NUMA nodes, which is unreal as
|
|
Igor Mammedov mentioned. We don't expect the association to break
|
|
NUMA-to-socket boundary, which matches with the real world.
|
|
|
|
NUMA-node socket cluster core thread
|
|
------------------------------------------
|
|
0 0 0 0 0
|
|
1 0 0 0 1
|
|
|
|
This corrects the topology for CPUs and their association with
|
|
NUMA nodes. After this patch is applied, the CPU and NUMA
|
|
association becomes something like below, which looks real.
|
|
Besides, socket/cluster/core/thread IDs are all checked when
|
|
the NUMA node IDs are verified. It helps to check if the CPU
|
|
topology is properly populated or not.
|
|
|
|
NUMA-node socket cluster core thread
|
|
------------------------------------------
|
|
0 1 0 0 0
|
|
1 0 0 0 0
|
|
|
|
Suggested-by: Igor Mammedov <imammedo@redhat.com>
|
|
Signed-off-by: Gavin Shan <gshan@redhat.com>
|
|
Acked-by: Igor Mammedov <imammedo@redhat.com>
|
|
Message-id: 20220503140304.855514-5-gshan@redhat.com
|
|
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
|
|
(cherry picked from commit e280ecb39bc1629f74ea5479d464fd1608dc8f76)
|
|
Signed-off-by: Gavin Shan <gshan@redhat.com>
|
|
---
|
|
tests/qtest/numa-test.c | 18 ++++++++++++------
|
|
1 file changed, 12 insertions(+), 6 deletions(-)
|
|
|
|
diff --git a/tests/qtest/numa-test.c b/tests/qtest/numa-test.c
|
|
index aeda8c774c..32e35daaae 100644
|
|
--- a/tests/qtest/numa-test.c
|
|
+++ b/tests/qtest/numa-test.c
|
|
@@ -224,17 +224,17 @@ static void aarch64_numa_cpu(const void *data)
|
|
g_autofree char *cli = NULL;
|
|
|
|
cli = make_cli(data, "-machine "
|
|
- "smp.cpus=2,smp.sockets=1,smp.clusters=1,smp.cores=1,smp.threads=2 "
|
|
+ "smp.cpus=2,smp.sockets=2,smp.clusters=1,smp.cores=1,smp.threads=1 "
|
|
"-numa node,nodeid=0,memdev=ram -numa node,nodeid=1 "
|
|
- "-numa cpu,node-id=1,thread-id=0 "
|
|
- "-numa cpu,node-id=0,thread-id=1");
|
|
+ "-numa cpu,node-id=0,socket-id=1,cluster-id=0,core-id=0,thread-id=0 "
|
|
+ "-numa cpu,node-id=1,socket-id=0,cluster-id=0,core-id=0,thread-id=0");
|
|
qts = qtest_init(cli);
|
|
cpus = get_cpus(qts, &resp);
|
|
g_assert(cpus);
|
|
|
|
while ((e = qlist_pop(cpus))) {
|
|
QDict *cpu, *props;
|
|
- int64_t thread, node;
|
|
+ int64_t socket, cluster, core, thread, node;
|
|
|
|
cpu = qobject_to(QDict, e);
|
|
g_assert(qdict_haskey(cpu, "props"));
|
|
@@ -242,12 +242,18 @@ static void aarch64_numa_cpu(const void *data)
|
|
|
|
g_assert(qdict_haskey(props, "node-id"));
|
|
node = qdict_get_int(props, "node-id");
|
|
+ g_assert(qdict_haskey(props, "socket-id"));
|
|
+ socket = qdict_get_int(props, "socket-id");
|
|
+ g_assert(qdict_haskey(props, "cluster-id"));
|
|
+ cluster = qdict_get_int(props, "cluster-id");
|
|
+ g_assert(qdict_haskey(props, "core-id"));
|
|
+ core = qdict_get_int(props, "core-id");
|
|
g_assert(qdict_haskey(props, "thread-id"));
|
|
thread = qdict_get_int(props, "thread-id");
|
|
|
|
- if (thread == 0) {
|
|
+ if (socket == 0 && cluster == 0 && core == 0 && thread == 0) {
|
|
g_assert_cmpint(node, ==, 1);
|
|
- } else if (thread == 1) {
|
|
+ } else if (socket == 1 && cluster == 0 && core == 0 && thread == 0) {
|
|
g_assert_cmpint(node, ==, 0);
|
|
} else {
|
|
g_assert(false);
|
|
--
|
|
2.31.1
|
|
|