93 lines
3.0 KiB
Diff
93 lines
3.0 KiB
Diff
commit 32f6daad4651a748a58a3ab6da0611862175722f
|
|
Author: Alex Williamson <alex.williamson@redhat.com>
|
|
Date: Wed Apr 11 09:51:49 2012 -0600
|
|
|
|
KVM: unmap pages from the iommu when slots are removed
|
|
|
|
We've been adding new mappings, but not destroying old mappings.
|
|
This can lead to a page leak as pages are pinned using
|
|
get_user_pages, but only unpinned with put_page if they still
|
|
exist in the memslots list on vm shutdown. A memslot that is
|
|
destroyed while an iommu domain is enabled for the guest will
|
|
therefore result in an elevated page reference count that is
|
|
never cleared.
|
|
|
|
Additionally, without this fix, the iommu is only programmed
|
|
with the first translation for a gpa. This can result in
|
|
peer-to-peer errors if a mapping is destroyed and replaced by a
|
|
new mapping at the same gpa as the iommu will still be pointing
|
|
to the original, pinned memory address.
|
|
|
|
Signed-off-by: Alex Williamson <alex.williamson@redhat.com>
|
|
Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
|
|
|
|
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
|
|
index 665a260..72cbf08 100644
|
|
--- a/include/linux/kvm_host.h
|
|
+++ b/include/linux/kvm_host.h
|
|
@@ -596,6 +596,7 @@ void kvm_free_irq_source_id(struct kvm *kvm, int irq_source_id);
|
|
|
|
#ifdef CONFIG_IOMMU_API
|
|
int kvm_iommu_map_pages(struct kvm *kvm, struct kvm_memory_slot *slot);
|
|
+void kvm_iommu_unmap_pages(struct kvm *kvm, struct kvm_memory_slot *slot);
|
|
int kvm_iommu_map_guest(struct kvm *kvm);
|
|
int kvm_iommu_unmap_guest(struct kvm *kvm);
|
|
int kvm_assign_device(struct kvm *kvm,
|
|
@@ -609,6 +610,11 @@ static inline int kvm_iommu_map_pages(struct kvm *kvm,
|
|
return 0;
|
|
}
|
|
|
|
+static inline void kvm_iommu_unmap_pages(struct kvm *kvm,
|
|
+ struct kvm_memory_slot *slot)
|
|
+{
|
|
+}
|
|
+
|
|
static inline int kvm_iommu_map_guest(struct kvm *kvm)
|
|
{
|
|
return -ENODEV;
|
|
diff --git a/virt/kvm/iommu.c b/virt/kvm/iommu.c
|
|
index a457d21..fec1723 100644
|
|
--- a/virt/kvm/iommu.c
|
|
+++ b/virt/kvm/iommu.c
|
|
@@ -310,6 +310,11 @@ static void kvm_iommu_put_pages(struct kvm *kvm,
|
|
}
|
|
}
|
|
|
|
+void kvm_iommu_unmap_pages(struct kvm *kvm, struct kvm_memory_slot *slot)
|
|
+{
|
|
+ kvm_iommu_put_pages(kvm, slot->base_gfn, slot->npages);
|
|
+}
|
|
+
|
|
static int kvm_iommu_unmap_memslots(struct kvm *kvm)
|
|
{
|
|
int idx;
|
|
@@ -320,7 +325,7 @@ static int kvm_iommu_unmap_memslots(struct kvm *kvm)
|
|
slots = kvm_memslots(kvm);
|
|
|
|
kvm_for_each_memslot(memslot, slots)
|
|
- kvm_iommu_put_pages(kvm, memslot->base_gfn, memslot->npages);
|
|
+ kvm_iommu_unmap_pages(kvm, memslot);
|
|
|
|
srcu_read_unlock(&kvm->srcu, idx);
|
|
|
|
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
|
|
index 42b7393..9739b53 100644
|
|
--- a/virt/kvm/kvm_main.c
|
|
+++ b/virt/kvm/kvm_main.c
|
|
@@ -808,12 +808,13 @@ int __kvm_set_memory_region(struct kvm *kvm,
|
|
if (r)
|
|
goto out_free;
|
|
|
|
- /* map the pages in iommu page table */
|
|
+ /* map/unmap the pages in iommu page table */
|
|
if (npages) {
|
|
r = kvm_iommu_map_pages(kvm, &new);
|
|
if (r)
|
|
goto out_free;
|
|
- }
|
|
+ } else
|
|
+ kvm_iommu_unmap_pages(kvm, &old);
|
|
|
|
r = -ENOMEM;
|
|
slots = kmemdup(kvm->memslots, sizeof(struct kvm_memslots),
|