From d2ade4bec79bdfe6f0867b0672c6731bc1664b42 Mon Sep 17 00:00:00 2001 From: Juan Quintela Date: Wed, 4 Sep 2019 11:23:31 +0100 Subject: [PATCH 3/6] migration: add speed limit for multifd migration RH-Author: Juan Quintela Message-id: <20190904112332.16160-4-quintela@redhat.com> Patchwork-id: 90279 O-Subject: [RHEL-AV-8.1 qemu-kvm PATCH v2 3/4] migration: add speed limit for multifd migration Bugzilla: 1734316 RH-Acked-by: Dr. David Alan Gilbert RH-Acked-by: Peter Xu RH-Acked-by: Danilo de Paula From: Ivan Ren Limit the speed of multifd migration through common speed limitation qemu file. Signed-off-by: Ivan Ren Message-Id: <1564464816-21804-3-git-send-email-ivanren@tencent.com> Reviewed-by: Wei Yang Reviewed-by: Juan Quintela Signed-off-by: Dr. David Alan Gilbert (cherry picked from commit 1b81c974ccfd536aceef840e220912b142a7dda0) Signed-off-by: Juan Quintela Signed-off-by: Danilo C. L. de Paula --- migration/ram.c | 22 ++++++++++++---------- 1 file changed, 12 insertions(+), 10 deletions(-) diff --git a/migration/ram.c b/migration/ram.c index 889148d..88ddd2b 100644 --- a/migration/ram.c +++ b/migration/ram.c @@ -922,7 +922,7 @@ struct { * false. */ -static int multifd_send_pages(void) +static int multifd_send_pages(RAMState *rs) { int i; static int next_channel; @@ -954,6 +954,7 @@ static int multifd_send_pages(void) multifd_send_state->pages = p->pages; p->pages = pages; transferred = ((uint64_t) pages->used) * TARGET_PAGE_SIZE + p->packet_len; + qemu_file_update_transfer(rs->f, transferred); ram_counters.multifd_bytes += transferred; ram_counters.transferred += transferred;; qemu_mutex_unlock(&p->mutex); @@ -962,7 +963,7 @@ static int multifd_send_pages(void) return 1; } -static int multifd_queue_page(RAMBlock *block, ram_addr_t offset) +static int multifd_queue_page(RAMState *rs, RAMBlock *block, ram_addr_t offset) { MultiFDPages_t *pages = multifd_send_state->pages; @@ -981,12 +982,12 @@ static int multifd_queue_page(RAMBlock *block, ram_addr_t offset) } } - if (multifd_send_pages() < 0) { + if (multifd_send_pages(rs) < 0) { return -1; } if (pages->block != block) { - return multifd_queue_page(block, offset); + return multifd_queue_page(rs, block, offset); } return 1; @@ -1054,7 +1055,7 @@ void multifd_save_cleanup(void) multifd_send_state = NULL; } -static void multifd_send_sync_main(void) +static void multifd_send_sync_main(RAMState *rs) { int i; @@ -1062,7 +1063,7 @@ static void multifd_send_sync_main(void) return; } if (multifd_send_state->pages->used) { - if (multifd_send_pages() < 0) { + if (multifd_send_pages(rs) < 0) { error_report("%s: multifd_send_pages fail", __func__); return; } @@ -1083,6 +1084,7 @@ static void multifd_send_sync_main(void) p->packet_num = multifd_send_state->packet_num++; p->flags |= MULTIFD_FLAG_SYNC; p->pending_job++; + qemu_file_update_transfer(rs->f, p->packet_len); qemu_mutex_unlock(&p->mutex); qemu_sem_post(&p->sem); } @@ -2079,7 +2081,7 @@ static int ram_save_page(RAMState *rs, PageSearchStatus *pss, bool last_stage) static int ram_save_multifd_page(RAMState *rs, RAMBlock *block, ram_addr_t offset) { - if (multifd_queue_page(block, offset) < 0) { + if (multifd_queue_page(rs, block, offset) < 0) { return -1; } ram_counters.normal++; @@ -3482,7 +3484,7 @@ static int ram_save_setup(QEMUFile *f, void *opaque) ram_control_before_iterate(f, RAM_CONTROL_SETUP); ram_control_after_iterate(f, RAM_CONTROL_SETUP); - multifd_send_sync_main(); + multifd_send_sync_main(*rsp); qemu_put_be64(f, RAM_SAVE_FLAG_EOS); qemu_fflush(f); @@ -3570,7 +3572,7 @@ static int ram_save_iterate(QEMUFile *f, void *opaque) ram_control_after_iterate(f, RAM_CONTROL_ROUND); out: - multifd_send_sync_main(); + multifd_send_sync_main(rs); qemu_put_be64(f, RAM_SAVE_FLAG_EOS); qemu_fflush(f); ram_counters.transferred += 8; @@ -3629,7 +3631,7 @@ static int ram_save_complete(QEMUFile *f, void *opaque) rcu_read_unlock(); - multifd_send_sync_main(); + multifd_send_sync_main(rs); qemu_put_be64(f, RAM_SAVE_FLAG_EOS); qemu_fflush(f); -- 1.8.3.1