550d33ded2
- kvm-qapi-machine.json-Add-cluster-id.patch [bz#2041823] - kvm-qtest-numa-test-Specify-CPU-topology-in-aarch64_numa.patch [bz#2041823] - kvm-hw-arm-virt-Consider-SMP-configuration-in-CPU-topolo.patch [bz#2041823] - kvm-qtest-numa-test-Correct-CPU-and-NUMA-association-in-.patch [bz#2041823] - kvm-hw-arm-virt-Fix-CPU-s-default-NUMA-node-ID.patch [bz#2041823] - kvm-hw-acpi-aml-build-Use-existing-CPU-topology-to-build.patch [bz#2041823] - kvm-coroutine-Rename-qemu_coroutine_inc-dec_pool_size.patch [bz#2079938] - kvm-coroutine-Revert-to-constant-batch-size.patch [bz#2079938] - kvm-virtio-scsi-fix-ctrl-and-event-handler-functions-in-.patch [bz#2079347] - kvm-virtio-scsi-don-t-waste-CPU-polling-the-event-virtqu.patch [bz#2079347] - kvm-virtio-scsi-clean-up-virtio_scsi_handle_event_vq.patch [bz#2079347] - kvm-virtio-scsi-clean-up-virtio_scsi_handle_ctrl_vq.patch [bz#2079347] - kvm-virtio-scsi-clean-up-virtio_scsi_handle_cmd_vq.patch [bz#2079347] - kvm-virtio-scsi-move-request-related-items-from-.h-to-.c.patch [bz#2079347] - kvm-Revert-virtio-scsi-Reject-scsi-cd-if-data-plane-enab.patch [bz#1995710] - kvm-migration-Fix-operator-type.patch [bz#2064530] - Resolves: bz#2041823 ([aarch64][numa] When there are at least 6 Numa nodes serial log shows 'arch topology borken') - Resolves: bz#2079938 (qemu coredump when boot with multi disks (qemu) failed to set up stack guard page: Cannot allocate memory) - Resolves: bz#2079347 (Guest boot blocked when scsi disks using same iothread and 100% CPU consumption) - Resolves: bz#1995710 (RFE: Allow virtio-scsi CD-ROM media change with IOThreads) - Resolves: bz#2064530 (Rebuild qemu-kvm with clang-14)
180 lines
7.0 KiB
Diff
180 lines
7.0 KiB
Diff
From 8a12049e97149056f61f7748d9869606d282d16e Mon Sep 17 00:00:00 2001
|
|
From: Gavin Shan <gshan@redhat.com>
|
|
Date: Wed, 11 May 2022 18:01:35 +0800
|
|
Subject: [PATCH 06/16] hw/acpi/aml-build: Use existing CPU topology to build
|
|
PPTT table
|
|
|
|
RH-Author: Gavin Shan <gshan@redhat.com>
|
|
RH-MergeRequest: 86: hw/arm/virt: Fix the default CPU topology
|
|
RH-Commit: [6/6] 53fa376531c204cf706cc1a7a0499019756106cb (gwshan/qemu-rhel-9)
|
|
RH-Bugzilla: 2041823
|
|
RH-Acked-by: Eric Auger <eric.auger@redhat.com>
|
|
RH-Acked-by: Cornelia Huck <cohuck@redhat.com>
|
|
RH-Acked-by: Andrew Jones <drjones@redhat.com>
|
|
|
|
Bugzilla: https://bugzilla.redhat.com/show_bug.cgi?id=2041823
|
|
|
|
When the PPTT table is built, the CPU topology is re-calculated, but
|
|
it's unecessary because the CPU topology has been populated in
|
|
virt_possible_cpu_arch_ids() on arm/virt machine.
|
|
|
|
This reworks build_pptt() to avoid by reusing the existing IDs in
|
|
ms->possible_cpus. Currently, the only user of build_pptt() is
|
|
arm/virt machine.
|
|
|
|
Signed-off-by: Gavin Shan <gshan@redhat.com>
|
|
Tested-by: Yanan Wang <wangyanan55@huawei.com>
|
|
Reviewed-by: Yanan Wang <wangyanan55@huawei.com>
|
|
Acked-by: Igor Mammedov <imammedo@redhat.com>
|
|
Acked-by: Michael S. Tsirkin <mst@redhat.com>
|
|
Message-id: 20220503140304.855514-7-gshan@redhat.com
|
|
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
|
|
(cherry picked from commit ae9141d4a3265553503bf07d3574b40f84615a34)
|
|
Signed-off-by: Gavin Shan <gshan@redhat.com>
|
|
---
|
|
hw/acpi/aml-build.c | 111 +++++++++++++++++++-------------------------
|
|
1 file changed, 48 insertions(+), 63 deletions(-)
|
|
|
|
diff --git a/hw/acpi/aml-build.c b/hw/acpi/aml-build.c
|
|
index 4086879ebf..e6bfac95c7 100644
|
|
--- a/hw/acpi/aml-build.c
|
|
+++ b/hw/acpi/aml-build.c
|
|
@@ -2002,86 +2002,71 @@ void build_pptt(GArray *table_data, BIOSLinker *linker, MachineState *ms,
|
|
const char *oem_id, const char *oem_table_id)
|
|
{
|
|
MachineClass *mc = MACHINE_GET_CLASS(ms);
|
|
- GQueue *list = g_queue_new();
|
|
- guint pptt_start = table_data->len;
|
|
- guint parent_offset;
|
|
- guint length, i;
|
|
- int uid = 0;
|
|
- int socket;
|
|
+ CPUArchIdList *cpus = ms->possible_cpus;
|
|
+ int64_t socket_id = -1, cluster_id = -1, core_id = -1;
|
|
+ uint32_t socket_offset = 0, cluster_offset = 0, core_offset = 0;
|
|
+ uint32_t pptt_start = table_data->len;
|
|
+ int n;
|
|
AcpiTable table = { .sig = "PPTT", .rev = 2,
|
|
.oem_id = oem_id, .oem_table_id = oem_table_id };
|
|
|
|
acpi_table_begin(&table, table_data);
|
|
|
|
- for (socket = 0; socket < ms->smp.sockets; socket++) {
|
|
- g_queue_push_tail(list,
|
|
- GUINT_TO_POINTER(table_data->len - pptt_start));
|
|
- build_processor_hierarchy_node(
|
|
- table_data,
|
|
- /*
|
|
- * Physical package - represents the boundary
|
|
- * of a physical package
|
|
- */
|
|
- (1 << 0),
|
|
- 0, socket, NULL, 0);
|
|
- }
|
|
-
|
|
- if (mc->smp_props.clusters_supported) {
|
|
- length = g_queue_get_length(list);
|
|
- for (i = 0; i < length; i++) {
|
|
- int cluster;
|
|
-
|
|
- parent_offset = GPOINTER_TO_UINT(g_queue_pop_head(list));
|
|
- for (cluster = 0; cluster < ms->smp.clusters; cluster++) {
|
|
- g_queue_push_tail(list,
|
|
- GUINT_TO_POINTER(table_data->len - pptt_start));
|
|
- build_processor_hierarchy_node(
|
|
- table_data,
|
|
- (0 << 0), /* not a physical package */
|
|
- parent_offset, cluster, NULL, 0);
|
|
- }
|
|
+ /*
|
|
+ * This works with the assumption that cpus[n].props.*_id has been
|
|
+ * sorted from top to down levels in mc->possible_cpu_arch_ids().
|
|
+ * Otherwise, the unexpected and duplicated containers will be
|
|
+ * created.
|
|
+ */
|
|
+ for (n = 0; n < cpus->len; n++) {
|
|
+ if (cpus->cpus[n].props.socket_id != socket_id) {
|
|
+ assert(cpus->cpus[n].props.socket_id > socket_id);
|
|
+ socket_id = cpus->cpus[n].props.socket_id;
|
|
+ cluster_id = -1;
|
|
+ core_id = -1;
|
|
+ socket_offset = table_data->len - pptt_start;
|
|
+ build_processor_hierarchy_node(table_data,
|
|
+ (1 << 0), /* Physical package */
|
|
+ 0, socket_id, NULL, 0);
|
|
}
|
|
- }
|
|
|
|
- length = g_queue_get_length(list);
|
|
- for (i = 0; i < length; i++) {
|
|
- int core;
|
|
-
|
|
- parent_offset = GPOINTER_TO_UINT(g_queue_pop_head(list));
|
|
- for (core = 0; core < ms->smp.cores; core++) {
|
|
- if (ms->smp.threads > 1) {
|
|
- g_queue_push_tail(list,
|
|
- GUINT_TO_POINTER(table_data->len - pptt_start));
|
|
- build_processor_hierarchy_node(
|
|
- table_data,
|
|
- (0 << 0), /* not a physical package */
|
|
- parent_offset, core, NULL, 0);
|
|
- } else {
|
|
- build_processor_hierarchy_node(
|
|
- table_data,
|
|
- (1 << 1) | /* ACPI Processor ID valid */
|
|
- (1 << 3), /* Node is a Leaf */
|
|
- parent_offset, uid++, NULL, 0);
|
|
+ if (mc->smp_props.clusters_supported) {
|
|
+ if (cpus->cpus[n].props.cluster_id != cluster_id) {
|
|
+ assert(cpus->cpus[n].props.cluster_id > cluster_id);
|
|
+ cluster_id = cpus->cpus[n].props.cluster_id;
|
|
+ core_id = -1;
|
|
+ cluster_offset = table_data->len - pptt_start;
|
|
+ build_processor_hierarchy_node(table_data,
|
|
+ (0 << 0), /* Not a physical package */
|
|
+ socket_offset, cluster_id, NULL, 0);
|
|
}
|
|
+ } else {
|
|
+ cluster_offset = socket_offset;
|
|
}
|
|
- }
|
|
|
|
- length = g_queue_get_length(list);
|
|
- for (i = 0; i < length; i++) {
|
|
- int thread;
|
|
+ if (ms->smp.threads == 1) {
|
|
+ build_processor_hierarchy_node(table_data,
|
|
+ (1 << 1) | /* ACPI Processor ID valid */
|
|
+ (1 << 3), /* Node is a Leaf */
|
|
+ cluster_offset, n, NULL, 0);
|
|
+ } else {
|
|
+ if (cpus->cpus[n].props.core_id != core_id) {
|
|
+ assert(cpus->cpus[n].props.core_id > core_id);
|
|
+ core_id = cpus->cpus[n].props.core_id;
|
|
+ core_offset = table_data->len - pptt_start;
|
|
+ build_processor_hierarchy_node(table_data,
|
|
+ (0 << 0), /* Not a physical package */
|
|
+ cluster_offset, core_id, NULL, 0);
|
|
+ }
|
|
|
|
- parent_offset = GPOINTER_TO_UINT(g_queue_pop_head(list));
|
|
- for (thread = 0; thread < ms->smp.threads; thread++) {
|
|
- build_processor_hierarchy_node(
|
|
- table_data,
|
|
+ build_processor_hierarchy_node(table_data,
|
|
(1 << 1) | /* ACPI Processor ID valid */
|
|
(1 << 2) | /* Processor is a Thread */
|
|
(1 << 3), /* Node is a Leaf */
|
|
- parent_offset, uid++, NULL, 0);
|
|
+ core_offset, n, NULL, 0);
|
|
}
|
|
}
|
|
|
|
- g_queue_free(list);
|
|
acpi_table_end(linker, &table);
|
|
}
|
|
|
|
--
|
|
2.31.1
|
|
|