qemu-kvm/SOURCES/kvm-libvhost-user-Fix-some-memtable-remap-cases.patch

118 lines
4.1 KiB
Diff

From ee360b70f179cf540faebe7e55b34e323e2bb179 Mon Sep 17 00:00:00 2001
From: "Dr. David Alan Gilbert" <dgilbert@redhat.com>
Date: Mon, 27 Jan 2020 19:02:09 +0100
Subject: [PATCH 098/116] libvhost-user: Fix some memtable remap cases
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
RH-Author: Dr. David Alan Gilbert <dgilbert@redhat.com>
Message-id: <20200127190227.40942-95-dgilbert@redhat.com>
Patchwork-id: 93548
O-Subject: [RHEL-AV-8.2 qemu-kvm PATCH 094/112] libvhost-user: Fix some memtable remap cases
Bugzilla: 1694164
RH-Acked-by: Philippe Mathieu-Daudé <philmd@redhat.com>
RH-Acked-by: Stefan Hajnoczi <stefanha@redhat.com>
RH-Acked-by: Sergio Lopez Pascual <slp@redhat.com>
From: "Dr. David Alan Gilbert" <dgilbert@redhat.com>
If a new setmemtable command comes in once the vhost threads are
running, it will remap the guests address space and the threads
will now be looking in the wrong place.
Fortunately we're running this command under lock, so we can
update the queue mappings so that threads will look in the new-right
place.
Note: This doesn't fix things that the threads might be doing
without a lock (e.g. a readv/writev!) That's for another time.
Signed-off-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
(cherry picked from commit 49e9ec749d4db62ae51f76354143cee183912a1d)
Signed-off-by: Miroslav Rezanina <mrezanin@redhat.com>
---
contrib/libvhost-user/libvhost-user.c | 33 +++++++++++++++++++++++++--------
contrib/libvhost-user/libvhost-user.h | 3 +++
2 files changed, 28 insertions(+), 8 deletions(-)
diff --git a/contrib/libvhost-user/libvhost-user.c b/contrib/libvhost-user/libvhost-user.c
index 63e4106..b89bf18 100644
--- a/contrib/libvhost-user/libvhost-user.c
+++ b/contrib/libvhost-user/libvhost-user.c
@@ -565,6 +565,21 @@ vu_reset_device_exec(VuDev *dev, VhostUserMsg *vmsg)
}
static bool
+map_ring(VuDev *dev, VuVirtq *vq)
+{
+ vq->vring.desc = qva_to_va(dev, vq->vra.desc_user_addr);
+ vq->vring.used = qva_to_va(dev, vq->vra.used_user_addr);
+ vq->vring.avail = qva_to_va(dev, vq->vra.avail_user_addr);
+
+ DPRINT("Setting virtq addresses:\n");
+ DPRINT(" vring_desc at %p\n", vq->vring.desc);
+ DPRINT(" vring_used at %p\n", vq->vring.used);
+ DPRINT(" vring_avail at %p\n", vq->vring.avail);
+
+ return !(vq->vring.desc && vq->vring.used && vq->vring.avail);
+}
+
+static bool
vu_set_mem_table_exec_postcopy(VuDev *dev, VhostUserMsg *vmsg)
{
int i;
@@ -767,6 +782,14 @@ vu_set_mem_table_exec(VuDev *dev, VhostUserMsg *vmsg)
close(vmsg->fds[i]);
}
+ for (i = 0; i < dev->max_queues; i++) {
+ if (dev->vq[i].vring.desc) {
+ if (map_ring(dev, &dev->vq[i])) {
+ vu_panic(dev, "remaping queue %d during setmemtable", i);
+ }
+ }
+ }
+
return false;
}
@@ -853,18 +876,12 @@ vu_set_vring_addr_exec(VuDev *dev, VhostUserMsg *vmsg)
DPRINT(" avail_user_addr: 0x%016" PRIx64 "\n", vra->avail_user_addr);
DPRINT(" log_guest_addr: 0x%016" PRIx64 "\n", vra->log_guest_addr);
+ vq->vra = *vra;
vq->vring.flags = vra->flags;
- vq->vring.desc = qva_to_va(dev, vra->desc_user_addr);
- vq->vring.used = qva_to_va(dev, vra->used_user_addr);
- vq->vring.avail = qva_to_va(dev, vra->avail_user_addr);
vq->vring.log_guest_addr = vra->log_guest_addr;
- DPRINT("Setting virtq addresses:\n");
- DPRINT(" vring_desc at %p\n", vq->vring.desc);
- DPRINT(" vring_used at %p\n", vq->vring.used);
- DPRINT(" vring_avail at %p\n", vq->vring.avail);
- if (!(vq->vring.desc && vq->vring.used && vq->vring.avail)) {
+ if (map_ring(dev, vq)) {
vu_panic(dev, "Invalid vring_addr message");
return false;
}
diff --git a/contrib/libvhost-user/libvhost-user.h b/contrib/libvhost-user/libvhost-user.h
index 1844b6f..5cb7708 100644
--- a/contrib/libvhost-user/libvhost-user.h
+++ b/contrib/libvhost-user/libvhost-user.h
@@ -327,6 +327,9 @@ typedef struct VuVirtq {
int err_fd;
unsigned int enable;
bool started;
+
+ /* Guest addresses of our ring */
+ struct vhost_vring_addr vra;
} VuVirtq;
enum VuWatchCondtion {
--
1.8.3.1