forked from rpms/libvirt
184 lines
8.4 KiB
Diff
184 lines
8.4 KiB
Diff
From 5347b12008842b5c86f766e391c6f3756afbff7d Mon Sep 17 00:00:00 2001
|
|
Message-Id: <5347b12008842b5c86f766e391c6f3756afbff7d@dist-git>
|
|
From: Daniel Henrique Barboza <danielhb413@gmail.com>
|
|
Date: Fri, 3 May 2019 13:54:53 +0200
|
|
Subject: [PATCH] PPC64 support for NVIDIA V100 GPU with NVLink2 passthrough
|
|
|
|
The NVIDIA V100 GPU has an onboard RAM that is mapped into the
|
|
host memory and accessible as normal RAM via an NVLink2 bridge. When
|
|
passed through in a guest, QEMU puts the NVIDIA RAM window in a
|
|
non-contiguous area, above the PCI MMIO area that starts at 32TiB.
|
|
This means that the NVIDIA RAM window starts at 64TiB and go all the
|
|
way to 128TiB.
|
|
|
|
This means that the guest might request a 64-bit window, for each PCI
|
|
Host Bridge, that goes all the way to 128TiB. However, the NVIDIA RAM
|
|
window isn't counted as regular RAM, thus this window is considered
|
|
only for the allocation of the Translation and Control Entry (TCE).
|
|
For more information about how NVLink2 support works in QEMU,
|
|
refer to the accepted implementation [1].
|
|
|
|
This memory layout differs from the existing VFIO case, requiring its
|
|
own formula. This patch changes the PPC64 code of
|
|
@qemuDomainGetMemLockLimitBytes to:
|
|
|
|
- detect if we have a NVLink2 bridge being passed through to the
|
|
guest. This is done by using the @ppc64VFIODeviceIsNV2Bridge function
|
|
added in the previous patch. The existence of the NVLink2 bridge in
|
|
the guest means that we are dealing with the NVLink2 memory layout;
|
|
|
|
- if an IBM NVLink2 bridge exists, passthroughLimit is calculated in a
|
|
different way to account for the extra memory the TCE table can alloc.
|
|
The 64TiB..128TiB window is more than enough to fit all possible
|
|
GPUs, thus the memLimit is the same regardless of passing through 1 or
|
|
multiple V100 GPUs.
|
|
|
|
Further reading explaining the background
|
|
[1] https://lists.gnu.org/archive/html/qemu-devel/2019-03/msg03700.html
|
|
[2] https://www.redhat.com/archives/libvir-list/2019-March/msg00660.html
|
|
[3] https://www.redhat.com/archives/libvir-list/2019-April/msg00527.html
|
|
|
|
Signed-off-by: Daniel Henrique Barboza <danielhb413@gmail.com>
|
|
Reviewed-by: Erik Skultety <eskultet@redhat.com>
|
|
(cherry picked from commit 1a922648f67f56c4374d647feebf2adb9a642f96)
|
|
|
|
https://bugzilla.redhat.com/show_bug.cgi?id=1505998
|
|
|
|
Conflicts:
|
|
The upstream commit relied on:
|
|
- v4.7.0-37-gb72183223f
|
|
- v4.7.0-38-ga14f597266
|
|
which were not backported so virPCIDeviceAddressAsString had to
|
|
swapped for the former virDomainPCIAddressAsString in order to
|
|
compile.
|
|
|
|
Signed-off-by: Erik Skultety <eskultet@redhat.com>
|
|
Message-Id: <03c00ebf46d85b0615134ef8655e67a4c909b7da.1556884443.git.eskultet@redhat.com>
|
|
Reviewed-by: Andrea Bolognani <abologna@redhat.com>
|
|
---
|
|
src/qemu/qemu_domain.c | 80 ++++++++++++++++++++++++++++++++----------
|
|
1 file changed, 61 insertions(+), 19 deletions(-)
|
|
|
|
diff --git a/src/qemu/qemu_domain.c b/src/qemu/qemu_domain.c
|
|
index a8bc618389..21f0722495 100644
|
|
--- a/src/qemu/qemu_domain.c
|
|
+++ b/src/qemu/qemu_domain.c
|
|
@@ -9813,7 +9813,7 @@ qemuDomainUpdateCurrentMemorySize(virQEMUDriverPtr driver,
|
|
* such as '0004:04:00.0', and tells if the device is a NVLink2
|
|
* bridge.
|
|
*/
|
|
-static ATTRIBUTE_UNUSED bool
|
|
+static bool
|
|
ppc64VFIODeviceIsNV2Bridge(const char *device)
|
|
{
|
|
const char *nvlink2Files[] = {"ibm,gpu", "ibm,nvlink",
|
|
@@ -9851,7 +9851,9 @@ getPPC64MemLockLimitBytes(virDomainDefPtr def)
|
|
unsigned long long maxMemory = 0;
|
|
unsigned long long passthroughLimit = 0;
|
|
size_t i, nPCIHostBridges = 0;
|
|
+ virPCIDeviceAddressPtr pciAddr;
|
|
bool usesVFIO = false;
|
|
+ bool nvlink2Capable = false;
|
|
|
|
for (i = 0; i < def->ncontrollers; i++) {
|
|
virDomainControllerDefPtr cont = def->controllers[i];
|
|
@@ -9869,7 +9871,17 @@ getPPC64MemLockLimitBytes(virDomainDefPtr def)
|
|
dev->source.subsys.type == VIR_DOMAIN_HOSTDEV_SUBSYS_TYPE_PCI &&
|
|
dev->source.subsys.u.pci.backend == VIR_DOMAIN_HOSTDEV_PCI_BACKEND_VFIO) {
|
|
usesVFIO = true;
|
|
- break;
|
|
+
|
|
+ pciAddr = &dev->source.subsys.u.pci.addr;
|
|
+ if (virPCIDeviceAddressIsValid(pciAddr, false)) {
|
|
+ VIR_AUTOFREE(char *) pciAddrStr = NULL;
|
|
+
|
|
+ pciAddrStr = virDomainPCIAddressAsString(pciAddr);
|
|
+ if (ppc64VFIODeviceIsNV2Bridge(pciAddrStr)) {
|
|
+ nvlink2Capable = true;
|
|
+ break;
|
|
+ }
|
|
+ }
|
|
}
|
|
}
|
|
|
|
@@ -9896,29 +9908,59 @@ getPPC64MemLockLimitBytes(virDomainDefPtr def)
|
|
4096 * nPCIHostBridges +
|
|
8192;
|
|
|
|
- /* passthroughLimit := max( 2 GiB * #PHBs, (c)
|
|
- * memory (d)
|
|
- * + memory * 1/512 * #PHBs + 8 MiB ) (e)
|
|
+ /* NVLink2 support in QEMU is a special case of the passthrough
|
|
+ * mechanics explained in the usesVFIO case below. The GPU RAM
|
|
+ * is placed with a gap after maxMemory. The current QEMU
|
|
+ * implementation puts the NVIDIA RAM above the PCI MMIO, which
|
|
+ * starts at 32TiB and is the MMIO reserved for the guest main RAM.
|
|
*
|
|
- * (c) is the pre-DDW VFIO DMA window accounting. We're allowing 2 GiB
|
|
- * rather than 1 GiB
|
|
+ * This window ends at 64TiB, and this is where the GPUs are being
|
|
+ * placed. The next available window size is at 128TiB, and
|
|
+ * 64TiB..128TiB will fit all possible NVIDIA GPUs.
|
|
*
|
|
- * (d) is the with-DDW (and memory pre-registration and related
|
|
- * features) DMA window accounting - assuming that we only account RAM
|
|
- * once, even if mapped to multiple PHBs
|
|
+ * The same assumption as the most common case applies here:
|
|
+ * the guest will request a 64-bit DMA window, per PHB, that is
|
|
+ * big enough to map all its RAM, which is now at 128TiB due
|
|
+ * to the GPUs.
|
|
*
|
|
- * (e) is the with-DDW userspace view and overhead for the 64-bit DMA
|
|
- * window. This is based a bit on expected guest behaviour, but there
|
|
- * really isn't a way to completely avoid that. We assume the guest
|
|
- * requests a 64-bit DMA window (per PHB) just big enough to map all
|
|
- * its RAM. 4 kiB page size gives the 1/512; it will be less with 64
|
|
- * kiB pages, less still if the guest is mapped with hugepages (unlike
|
|
- * the default 32-bit DMA window, DDW windows can use large IOMMU
|
|
- * pages). 8 MiB is for second and further level overheads, like (b) */
|
|
- if (usesVFIO)
|
|
+ * Note that the NVIDIA RAM window must be accounted for the TCE
|
|
+ * table size, but *not* for the main RAM (maxMemory). This gives
|
|
+ * us the following passthroughLimit for the NVLink2 case:
|
|
+ *
|
|
+ * passthroughLimit = maxMemory +
|
|
+ * 128TiB/512KiB * #PHBs + 8 MiB */
|
|
+ if (nvlink2Capable) {
|
|
+ passthroughLimit = maxMemory +
|
|
+ 128 * (1ULL<<30) / 512 * nPCIHostBridges +
|
|
+ 8192;
|
|
+ } else if (usesVFIO) {
|
|
+ /* For regular (non-NVLink2 present) VFIO passthrough, the value
|
|
+ * of passthroughLimit is:
|
|
+ *
|
|
+ * passthroughLimit := max( 2 GiB * #PHBs, (c)
|
|
+ * memory (d)
|
|
+ * + memory * 1/512 * #PHBs + 8 MiB ) (e)
|
|
+ *
|
|
+ * (c) is the pre-DDW VFIO DMA window accounting. We're allowing 2
|
|
+ * GiB rather than 1 GiB
|
|
+ *
|
|
+ * (d) is the with-DDW (and memory pre-registration and related
|
|
+ * features) DMA window accounting - assuming that we only account
|
|
+ * RAM once, even if mapped to multiple PHBs
|
|
+ *
|
|
+ * (e) is the with-DDW userspace view and overhead for the 64-bit
|
|
+ * DMA window. This is based a bit on expected guest behaviour, but
|
|
+ * there really isn't a way to completely avoid that. We assume the
|
|
+ * guest requests a 64-bit DMA window (per PHB) just big enough to
|
|
+ * map all its RAM. 4 kiB page size gives the 1/512; it will be
|
|
+ * less with 64 kiB pages, less still if the guest is mapped with
|
|
+ * hugepages (unlike the default 32-bit DMA window, DDW windows
|
|
+ * can use large IOMMU pages). 8 MiB is for second and further level
|
|
+ * overheads, like (b) */
|
|
passthroughLimit = MAX(2 * 1024 * 1024 * nPCIHostBridges,
|
|
memory +
|
|
memory / 512 * nPCIHostBridges + 8192);
|
|
+ }
|
|
|
|
memKB = baseLimit + passthroughLimit;
|
|
|
|
--
|
|
2.21.0
|
|
|