qemu-kvm/kvm-migration-add-speed-limit-for-multifd-migration.patch
Danilo C. L. de Paula 200e3560ab * Tue Sep 10 2019 Danilo Cesar Lemes de Paula <ddepaula@redhat.com> - 4.1.0-9.el8
- kvm-migration-always-initialise-ram_counters-for-a-new-m.patch [bz#1734316]
- kvm-migration-add-qemu_file_update_transfer-interface.patch [bz#1734316]
- kvm-migration-add-speed-limit-for-multifd-migration.patch [bz#1734316]
- kvm-migration-update-ram_counters-for-multifd-sync-packe.patch [bz#1734316]
- kvm-spapr-pci-Consolidate-de-allocation-of-MSIs.patch [bz#1750200]
- kvm-spapr-pci-Free-MSIs-during-reset.patch [bz#1750200]
- Resolves: bz#1734316
  (multifd migration does not honour speed limits, consumes entire bandwidth of NIC)
- Resolves: bz#1750200
  ([RHEL8.1][QEMU4.1]boot up guest with vf device,then system_reset guest,error prompt(qemu-kvm: Can't allocate MSIs for device 2800: IRQ 4904 is not free))
2019-09-10 19:56:34 +01:00

142 lines
4.6 KiB
Diff

From d2ade4bec79bdfe6f0867b0672c6731bc1664b42 Mon Sep 17 00:00:00 2001
From: Juan Quintela <quintela@redhat.com>
Date: Wed, 4 Sep 2019 11:23:31 +0100
Subject: [PATCH 3/6] migration: add speed limit for multifd migration
RH-Author: Juan Quintela <quintela@redhat.com>
Message-id: <20190904112332.16160-4-quintela@redhat.com>
Patchwork-id: 90279
O-Subject: [RHEL-AV-8.1 qemu-kvm PATCH v2 3/4] migration: add speed limit for multifd migration
Bugzilla: 1734316
RH-Acked-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
RH-Acked-by: Peter Xu <peterx@redhat.com>
RH-Acked-by: Danilo de Paula <ddepaula@redhat.com>
From: Ivan Ren <ivanren@tencent.com>
Limit the speed of multifd migration through common speed limitation
qemu file.
Signed-off-by: Ivan Ren <ivanren@tencent.com>
Message-Id: <1564464816-21804-3-git-send-email-ivanren@tencent.com>
Reviewed-by: Wei Yang <richardw.yang@linux.intel.com>
Reviewed-by: Juan Quintela <quintela@redhat.com>
Signed-off-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
(cherry picked from commit 1b81c974ccfd536aceef840e220912b142a7dda0)
Signed-off-by: Juan Quintela <quintela@redhat.com>
Signed-off-by: Danilo C. L. de Paula <ddepaula@redhat.com>
---
migration/ram.c | 22 ++++++++++++----------
1 file changed, 12 insertions(+), 10 deletions(-)
diff --git a/migration/ram.c b/migration/ram.c
index 889148d..88ddd2b 100644
--- a/migration/ram.c
+++ b/migration/ram.c
@@ -922,7 +922,7 @@ struct {
* false.
*/
-static int multifd_send_pages(void)
+static int multifd_send_pages(RAMState *rs)
{
int i;
static int next_channel;
@@ -954,6 +954,7 @@ static int multifd_send_pages(void)
multifd_send_state->pages = p->pages;
p->pages = pages;
transferred = ((uint64_t) pages->used) * TARGET_PAGE_SIZE + p->packet_len;
+ qemu_file_update_transfer(rs->f, transferred);
ram_counters.multifd_bytes += transferred;
ram_counters.transferred += transferred;;
qemu_mutex_unlock(&p->mutex);
@@ -962,7 +963,7 @@ static int multifd_send_pages(void)
return 1;
}
-static int multifd_queue_page(RAMBlock *block, ram_addr_t offset)
+static int multifd_queue_page(RAMState *rs, RAMBlock *block, ram_addr_t offset)
{
MultiFDPages_t *pages = multifd_send_state->pages;
@@ -981,12 +982,12 @@ static int multifd_queue_page(RAMBlock *block, ram_addr_t offset)
}
}
- if (multifd_send_pages() < 0) {
+ if (multifd_send_pages(rs) < 0) {
return -1;
}
if (pages->block != block) {
- return multifd_queue_page(block, offset);
+ return multifd_queue_page(rs, block, offset);
}
return 1;
@@ -1054,7 +1055,7 @@ void multifd_save_cleanup(void)
multifd_send_state = NULL;
}
-static void multifd_send_sync_main(void)
+static void multifd_send_sync_main(RAMState *rs)
{
int i;
@@ -1062,7 +1063,7 @@ static void multifd_send_sync_main(void)
return;
}
if (multifd_send_state->pages->used) {
- if (multifd_send_pages() < 0) {
+ if (multifd_send_pages(rs) < 0) {
error_report("%s: multifd_send_pages fail", __func__);
return;
}
@@ -1083,6 +1084,7 @@ static void multifd_send_sync_main(void)
p->packet_num = multifd_send_state->packet_num++;
p->flags |= MULTIFD_FLAG_SYNC;
p->pending_job++;
+ qemu_file_update_transfer(rs->f, p->packet_len);
qemu_mutex_unlock(&p->mutex);
qemu_sem_post(&p->sem);
}
@@ -2079,7 +2081,7 @@ static int ram_save_page(RAMState *rs, PageSearchStatus *pss, bool last_stage)
static int ram_save_multifd_page(RAMState *rs, RAMBlock *block,
ram_addr_t offset)
{
- if (multifd_queue_page(block, offset) < 0) {
+ if (multifd_queue_page(rs, block, offset) < 0) {
return -1;
}
ram_counters.normal++;
@@ -3482,7 +3484,7 @@ static int ram_save_setup(QEMUFile *f, void *opaque)
ram_control_before_iterate(f, RAM_CONTROL_SETUP);
ram_control_after_iterate(f, RAM_CONTROL_SETUP);
- multifd_send_sync_main();
+ multifd_send_sync_main(*rsp);
qemu_put_be64(f, RAM_SAVE_FLAG_EOS);
qemu_fflush(f);
@@ -3570,7 +3572,7 @@ static int ram_save_iterate(QEMUFile *f, void *opaque)
ram_control_after_iterate(f, RAM_CONTROL_ROUND);
out:
- multifd_send_sync_main();
+ multifd_send_sync_main(rs);
qemu_put_be64(f, RAM_SAVE_FLAG_EOS);
qemu_fflush(f);
ram_counters.transferred += 8;
@@ -3629,7 +3631,7 @@ static int ram_save_complete(QEMUFile *f, void *opaque)
rcu_read_unlock();
- multifd_send_sync_main();
+ multifd_send_sync_main(rs);
qemu_put_be64(f, RAM_SAVE_FLAG_EOS);
qemu_fflush(f);
--
1.8.3.1